2
* Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
4
* Redistribution and use in source and binary forms, with or without
5
* modification, are permitted provided that the following conditions
7
* 1. Redistributions of source code must retain the above copyright
8
* notice, this list of conditions and the following disclaimer.
9
* 2. Redistributions in binary form must reproduce the above copyright
10
* notice, this list of conditions and the following disclaimer in the
11
* documentation and/or other materials provided with the distribution.
13
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
#include "CodeBlock.h"
32
#include "JITInlineMethods.h"
33
#include "JITStubCall.h"
35
#include "JSFunction.h"
36
#include "Interpreter.h"
37
#include "LinkBuffer.h"
38
#include "RepatchBuffer.h"
39
#include "ResultType.h"
40
#include "SamplingTool.h"
52
void JIT::emit_op_put_by_index(Instruction* currentInstruction)
54
unsigned base = currentInstruction[1].u.operand;
55
unsigned property = currentInstruction[2].u.operand;
56
unsigned value = currentInstruction[3].u.operand;
58
JITStubCall stubCall(this, cti_op_put_by_index);
59
stubCall.addArgument(base);
60
stubCall.addArgument(Imm32(property));
61
stubCall.addArgument(value);
65
void JIT::emit_op_put_getter(Instruction* currentInstruction)
67
unsigned base = currentInstruction[1].u.operand;
68
unsigned property = currentInstruction[2].u.operand;
69
unsigned function = currentInstruction[3].u.operand;
71
JITStubCall stubCall(this, cti_op_put_getter);
72
stubCall.addArgument(base);
73
stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
74
stubCall.addArgument(function);
78
void JIT::emit_op_put_setter(Instruction* currentInstruction)
80
unsigned base = currentInstruction[1].u.operand;
81
unsigned property = currentInstruction[2].u.operand;
82
unsigned function = currentInstruction[3].u.operand;
84
JITStubCall stubCall(this, cti_op_put_setter);
85
stubCall.addArgument(base);
86
stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
87
stubCall.addArgument(function);
91
void JIT::emit_op_del_by_id(Instruction* currentInstruction)
93
unsigned dst = currentInstruction[1].u.operand;
94
unsigned base = currentInstruction[2].u.operand;
95
unsigned property = currentInstruction[3].u.operand;
97
JITStubCall stubCall(this, cti_op_del_by_id);
98
stubCall.addArgument(base);
99
stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
104
#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
106
/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
108
// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
109
void JIT::emit_op_method_check(Instruction*) {}
110
void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
111
#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
112
#error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
115
void JIT::emit_op_get_by_val(Instruction* currentInstruction)
117
unsigned dst = currentInstruction[1].u.operand;
118
unsigned base = currentInstruction[2].u.operand;
119
unsigned property = currentInstruction[3].u.operand;
121
JITStubCall stubCall(this, cti_op_get_by_val);
122
stubCall.addArgument(base);
123
stubCall.addArgument(property);
127
void JIT::emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
129
ASSERT_NOT_REACHED();
132
void JIT::emit_op_put_by_val(Instruction* currentInstruction)
134
unsigned base = currentInstruction[1].u.operand;
135
unsigned property = currentInstruction[2].u.operand;
136
unsigned value = currentInstruction[3].u.operand;
138
JITStubCall stubCall(this, cti_op_put_by_val);
139
stubCall.addArgument(base);
140
stubCall.addArgument(property);
141
stubCall.addArgument(value);
145
void JIT::emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
147
ASSERT_NOT_REACHED();
150
void JIT::emit_op_get_by_id(Instruction* currentInstruction)
152
int dst = currentInstruction[1].u.operand;
153
int base = currentInstruction[2].u.operand;
154
int ident = currentInstruction[3].u.operand;
156
JITStubCall stubCall(this, cti_op_get_by_id_generic);
157
stubCall.addArgument(base);
158
stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
161
m_propertyAccessInstructionIndex++;
164
void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
166
m_propertyAccessInstructionIndex++;
167
ASSERT_NOT_REACHED();
170
void JIT::emit_op_put_by_id(Instruction* currentInstruction)
172
int base = currentInstruction[1].u.operand;
173
int ident = currentInstruction[2].u.operand;
174
int value = currentInstruction[3].u.operand;
176
JITStubCall stubCall(this, cti_op_put_by_id_generic);
177
stubCall.addArgument(base);
178
stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
179
stubCall.addArgument(value);
182
m_propertyAccessInstructionIndex++;
185
void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
187
m_propertyAccessInstructionIndex++;
188
ASSERT_NOT_REACHED();
191
#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
193
/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
195
#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
197
void JIT::emit_op_method_check(Instruction* currentInstruction)
199
// Assert that the following instruction is a get_by_id.
200
ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
202
currentInstruction += OPCODE_LENGTH(op_method_check);
204
// Do the method check - check the object & its prototype's structure inline (this is the common case).
205
m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
206
MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
208
int dst = currentInstruction[1].u.operand;
209
int base = currentInstruction[2].u.operand;
211
emitLoad(base, regT1, regT0);
212
emitJumpSlowCaseIfNotJSCell(base, regT1);
214
Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
215
DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT2);
216
Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
218
// This will be relinked to load the function without doing a load.
219
DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
220
move(Imm32(JSValue::CellTag), regT1);
223
ASSERT(differenceBetween(info.structureToCompare, protoObj) == patchOffsetMethodCheckProtoObj);
224
ASSERT(differenceBetween(info.structureToCompare, protoStructureToCompare) == patchOffsetMethodCheckProtoStruct);
225
ASSERT(differenceBetween(info.structureToCompare, putFunction) == patchOffsetMethodCheckPutFunction);
227
// Link the failure cases here.
228
structureCheck.link(this);
229
protoStructureCheck.link(this);
231
// Do a regular(ish) get_by_id (the slow case will be link to
232
// cti_op_get_by_id_method_check instead of cti_op_get_by_id.
233
compileGetByIdHotPath();
236
emitStore(dst, regT1, regT0);
237
map(m_bytecodeIndex + OPCODE_LENGTH(op_method_check), dst, regT1, regT0);
239
// We've already generated the following get_by_id, so make sure it's skipped over.
240
m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
243
void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
245
currentInstruction += OPCODE_LENGTH(op_method_check);
247
int dst = currentInstruction[1].u.operand;
248
int base = currentInstruction[2].u.operand;
249
int ident = currentInstruction[3].u.operand;
251
compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter, true);
253
// We've already generated the following get_by_id, so make sure it's skipped over.
254
m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
257
#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
259
// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
260
void JIT::emit_op_method_check(Instruction*) {}
261
void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
265
void JIT::emit_op_get_by_val(Instruction* currentInstruction)
267
unsigned dst = currentInstruction[1].u.operand;
268
unsigned base = currentInstruction[2].u.operand;
269
unsigned property = currentInstruction[3].u.operand;
271
emitLoad2(base, regT1, regT0, property, regT3, regT2);
273
addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
274
emitJumpSlowCaseIfNotJSCell(base, regT1);
275
addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
277
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
278
addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
280
load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag
281
load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload
282
addSlowCase(branch32(Equal, regT1, Imm32(JSValue::EmptyValueTag)));
284
emitStore(dst, regT1, regT0);
285
map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
288
void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
290
unsigned dst = currentInstruction[1].u.operand;
291
unsigned base = currentInstruction[2].u.operand;
292
unsigned property = currentInstruction[3].u.operand;
294
linkSlowCase(iter); // property int32 check
295
linkSlowCaseIfNotJSCell(iter, base); // base cell check
296
linkSlowCase(iter); // base array check
297
linkSlowCase(iter); // vector length check
298
linkSlowCase(iter); // empty value
300
JITStubCall stubCall(this, cti_op_get_by_val);
301
stubCall.addArgument(base);
302
stubCall.addArgument(property);
306
void JIT::emit_op_put_by_val(Instruction* currentInstruction)
308
unsigned base = currentInstruction[1].u.operand;
309
unsigned property = currentInstruction[2].u.operand;
310
unsigned value = currentInstruction[3].u.operand;
312
emitLoad2(base, regT1, regT0, property, regT3, regT2);
314
addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
315
emitJumpSlowCaseIfNotJSCell(base, regT1);
316
addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
317
addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
319
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
321
Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), Imm32(JSValue::EmptyValueTag));
323
Label storeResult(this);
324
emitLoad(value, regT1, regT0);
325
store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); // payload
326
store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4)); // tag
330
add32(Imm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
331
branch32(Below, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
333
add32(Imm32(1), regT2, regT0);
334
store32(regT0, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)));
335
jump().linkTo(storeResult, this);
340
void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
342
unsigned base = currentInstruction[1].u.operand;
343
unsigned property = currentInstruction[2].u.operand;
344
unsigned value = currentInstruction[3].u.operand;
346
linkSlowCase(iter); // property int32 check
347
linkSlowCaseIfNotJSCell(iter, base); // base cell check
348
linkSlowCase(iter); // base not array check
349
linkSlowCase(iter); // in vector check
351
JITStubCall stubPutByValCall(this, cti_op_put_by_val);
352
stubPutByValCall.addArgument(base);
353
stubPutByValCall.addArgument(property);
354
stubPutByValCall.addArgument(value);
355
stubPutByValCall.call();
358
void JIT::emit_op_get_by_id(Instruction* currentInstruction)
360
int dst = currentInstruction[1].u.operand;
361
int base = currentInstruction[2].u.operand;
363
emitLoad(base, regT1, regT0);
364
emitJumpSlowCaseIfNotJSCell(base, regT1);
365
compileGetByIdHotPath();
366
emitStore(dst, regT1, regT0);
367
map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
370
void JIT::compileGetByIdHotPath()
372
// As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
373
// Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
374
// to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
375
// to jump back to if one of these trampolies finds a match.
376
Label hotPathBegin(this);
377
m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
378
m_propertyAccessInstructionIndex++;
380
DataLabelPtr structureToCompare;
381
Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
382
addSlowCase(structureCheck);
383
ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
384
ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
386
Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT2);
387
Label externalLoadComplete(this);
388
ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad);
389
ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad);
391
DataLabel32 displacementLabel1 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload
392
ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetGetByIdPropertyMapOffset1);
393
DataLabel32 displacementLabel2 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag
394
ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetGetByIdPropertyMapOffset2);
396
Label putResult(this);
397
ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
400
void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
402
int dst = currentInstruction[1].u.operand;
403
int base = currentInstruction[2].u.operand;
404
int ident = currentInstruction[3].u.operand;
406
compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter);
409
void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
411
// As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
412
// so that we only need track one pointer into the slow case code - we track a pointer to the location
413
// of the call (which we can use to look up the patch information), but should a array-length or
414
// prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
415
// the distance from the call to the head of the slow case.
416
linkSlowCaseIfNotJSCell(iter, base);
419
Label coldPathBegin(this);
421
JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
422
stubCall.addArgument(regT1, regT0);
423
stubCall.addArgument(ImmPtr(ident));
424
Call call = stubCall.call(dst);
426
ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
428
// Track the location of the call; this will be used to recover patch information.
429
m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
430
m_propertyAccessInstructionIndex++;
433
void JIT::emit_op_put_by_id(Instruction* currentInstruction)
435
// In order to be able to patch both the Structure, and the object offset, we store one pointer,
436
// to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
437
// such that the Structure & offset are always at the same distance from this.
439
int base = currentInstruction[1].u.operand;
440
int value = currentInstruction[3].u.operand;
442
emitLoad2(base, regT1, regT0, value, regT3, regT2);
444
emitJumpSlowCaseIfNotJSCell(base, regT1);
446
Label hotPathBegin(this);
447
m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
448
m_propertyAccessInstructionIndex++;
450
// It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
451
DataLabelPtr structureToCompare;
452
addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
453
ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
455
// Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
456
Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
457
Label externalLoadComplete(this);
458
ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad);
459
ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad);
461
DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT0, patchGetByIdDefaultOffset)); // payload
462
DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT0, patchGetByIdDefaultOffset)); // tag
463
ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetPutByIdPropertyMapOffset1);
464
ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetPutByIdPropertyMapOffset2);
467
void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
469
int base = currentInstruction[1].u.operand;
470
int ident = currentInstruction[2].u.operand;
472
linkSlowCaseIfNotJSCell(iter, base);
475
JITStubCall stubCall(this, cti_op_put_by_id);
476
stubCall.addArgument(regT1, regT0);
477
stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
478
stubCall.addArgument(regT3, regT2);
479
Call call = stubCall.call();
481
// Track the location of the call; this will be used to recover patch information.
482
m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
483
m_propertyAccessInstructionIndex++;
486
// Compile a store into an object's property storage. May overwrite base.
487
void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset)
489
int offset = cachedOffset;
490
if (structure->isUsingInlineStorage())
491
offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register);
493
loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
494
emitStore(offset, valueTag, valuePayload, base);
497
// Compile a load from an object's property storage. May overwrite base.
498
void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset)
500
int offset = cachedOffset;
501
if (structure->isUsingInlineStorage())
502
offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register);
504
loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
505
emitLoad(offset, resultTag, resultPayload, base);
508
void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset)
510
if (base->isUsingInlineStorage()) {
511
load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]), resultPayload);
512
load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]) + 4, resultTag);
516
size_t offset = cachedOffset * sizeof(JSValue);
518
PropertyStorage* protoPropertyStorage = &base->m_externalStorage;
519
loadPtr(static_cast<void*>(protoPropertyStorage), temp);
520
load32(Address(temp, offset), resultPayload);
521
load32(Address(temp, offset + 4), resultTag);
524
void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress)
526
// It is assumed that regT0 contains the basePayload and regT1 contains the baseTag. The value can be found on the stack.
528
JumpList failureCases;
529
failureCases.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
531
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
532
failureCases.append(branchPtr(NotEqual, regT2, ImmPtr(oldStructure)));
534
// Verify that nothing in the prototype chain has a setter for this property.
535
for (RefPtr<Structure>* it = chain->head(); *it; ++it) {
536
loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
537
loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
538
failureCases.append(branchPtr(NotEqual, regT2, ImmPtr(it->get())));
541
// Reallocate property storage if needed.
543
bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
544
if (willNeedStorageRealloc) {
545
// This trampoline was called to like a JIT stub; before we can can call again we need to
546
// remove the return address from the stack, to prevent the stack from becoming misaligned.
547
preserveReturnAddressAfterCall(regT3);
549
JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
550
stubCall.skipArgument(); // base
551
stubCall.skipArgument(); // ident
552
stubCall.skipArgument(); // value
553
stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
554
stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
555
stubCall.call(regT0);
557
restoreReturnAddressBeforeReturn(regT3);
560
sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
561
add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
562
storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)));
564
load32(Address(stackPointerRegister, offsetof(struct JITStackFrame, args[2]) + sizeof(void*)), regT3);
565
load32(Address(stackPointerRegister, offsetof(struct JITStackFrame, args[2]) + sizeof(void*) + 4), regT2);
568
compilePutDirectOffset(regT0, regT2, regT3, newStructure, cachedOffset);
572
ASSERT(!failureCases.empty());
573
failureCases.link(this);
574
restoreArgumentReferenceForTrampoline();
575
Call failureCall = tailRecursiveCall();
577
LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
579
patchBuffer.link(failureCall, FunctionPtr(cti_op_put_by_id_fail));
581
if (willNeedStorageRealloc) {
582
ASSERT(m_calls.size() == 1);
583
patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
586
CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
587
stubInfo->stubRoutine = entryLabel;
588
RepatchBuffer repatchBuffer(m_codeBlock);
589
repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
592
void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
594
RepatchBuffer repatchBuffer(codeBlock);
596
// We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
597
// Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
598
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
600
int offset = sizeof(JSValue) * cachedOffset;
602
// If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
603
// and makes the subsequent load's offset automatically correct
604
if (structure->isUsingInlineStorage())
605
repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad));
607
// Patch the offset into the propoerty map to load from, then patch the Structure to look for.
608
repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
609
repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset1), offset); // payload
610
repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset2), offset + 4); // tag
613
void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
615
RepatchBuffer repatchBuffer(codeBlock);
617
ASSERT(!methodCallLinkInfo.cachedStructure);
618
methodCallLinkInfo.cachedStructure = structure;
621
Structure* prototypeStructure = proto->structure();
622
ASSERT(!methodCallLinkInfo.cachedPrototypeStructure);
623
methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
624
prototypeStructure->ref();
626
repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
627
repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
628
repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
629
repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
631
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id));
634
void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
636
RepatchBuffer repatchBuffer(codeBlock);
638
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
639
// Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
640
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_put_by_id_generic));
642
int offset = sizeof(JSValue) * cachedOffset;
644
// If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
645
// and makes the subsequent load's offset automatically correct
646
if (structure->isUsingInlineStorage())
647
repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad));
649
// Patch the offset into the propoerty map to load from, then patch the Structure to look for.
650
repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
651
repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1), offset); // payload
652
repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2), offset + 4); // tag
655
void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
657
StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
659
// regT0 holds a JSCell*
662
Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
664
// Checks out okay! - get the length from the storage
665
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
666
load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
668
Jump failureCases2 = branch32(Above, regT2, Imm32(INT_MAX));
670
move(Imm32(JSValue::Int32Tag), regT1);
671
Jump success = jump();
673
LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
675
// Use the patch information to link the failure cases back to the original slow case routine.
676
CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
677
patchBuffer.link(failureCases1, slowCaseBegin);
678
patchBuffer.link(failureCases2, slowCaseBegin);
680
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
681
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
683
// Track the stub we have created so that it will be deleted later.
684
CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
685
stubInfo->stubRoutine = entryLabel;
687
// Finally patch the jump to slow case back in the hot path to jump here instead.
688
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
689
RepatchBuffer repatchBuffer(m_codeBlock);
690
repatchBuffer.relink(jumpLocation, entryLabel);
692
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
693
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
696
void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
698
// regT0 holds a JSCell*
700
// The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
701
// referencing the prototype object - let's speculatively load it's table nice and early!)
702
JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
704
Jump failureCases1 = checkStructure(regT0, structure);
706
// Check the prototype object's Structure had not changed.
707
Structure** prototypeStructureAddress = &(protoObject->m_structure);
709
move(ImmPtr(prototypeStructure), regT3);
710
Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
712
Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
715
// Checks out okay! - getDirectOffset
716
compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
718
Jump success = jump();
720
LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
722
// Use the patch information to link the failure cases back to the original slow case routine.
723
CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
724
patchBuffer.link(failureCases1, slowCaseBegin);
725
patchBuffer.link(failureCases2, slowCaseBegin);
727
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
728
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
730
// Track the stub we have created so that it will be deleted later.
731
CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
732
stubInfo->stubRoutine = entryLabel;
734
// Finally patch the jump to slow case back in the hot path to jump here instead.
735
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
736
RepatchBuffer repatchBuffer(m_codeBlock);
737
repatchBuffer.relink(jumpLocation, entryLabel);
739
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
740
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
744
void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
746
// regT0 holds a JSCell*
748
Jump failureCase = checkStructure(regT0, structure);
749
compileGetDirectOffset(regT0, regT1, regT0, structure, cachedOffset);
750
Jump success = jump();
752
LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
754
// Use the patch information to link the failure cases back to the original slow case routine.
755
CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
757
lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
759
patchBuffer.link(failureCase, lastProtoBegin);
761
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
762
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
764
CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
767
polymorphicStructures->list[currentIndex].set(entryLabel, structure);
769
// Finally patch the jump to slow case back in the hot path to jump here instead.
770
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
771
RepatchBuffer repatchBuffer(m_codeBlock);
772
repatchBuffer.relink(jumpLocation, entryLabel);
775
void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
777
// regT0 holds a JSCell*
779
// The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
780
// referencing the prototype object - let's speculatively load it's table nice and early!)
781
JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
783
// Check eax is an object of the right Structure.
784
Jump failureCases1 = checkStructure(regT0, structure);
786
// Check the prototype object's Structure had not changed.
787
Structure** prototypeStructureAddress = &(protoObject->m_structure);
789
move(ImmPtr(prototypeStructure), regT3);
790
Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
792
Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
795
compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
797
Jump success = jump();
799
LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
801
// Use the patch information to link the failure cases back to the original slow case routine.
802
CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
803
patchBuffer.link(failureCases1, lastProtoBegin);
804
patchBuffer.link(failureCases2, lastProtoBegin);
806
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
807
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
809
CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
812
prototypeStructure->ref();
813
prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure);
815
// Finally patch the jump to slow case back in the hot path to jump here instead.
816
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
817
RepatchBuffer repatchBuffer(m_codeBlock);
818
repatchBuffer.relink(jumpLocation, entryLabel);
821
void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
823
// regT0 holds a JSCell*
827
JumpList bucketsOfFail;
829
// Check eax is an object of the right Structure.
830
bucketsOfFail.append(checkStructure(regT0, structure));
832
Structure* currStructure = structure;
833
RefPtr<Structure>* chainEntries = chain->head();
834
JSObject* protoObject = 0;
835
for (unsigned i = 0; i < count; ++i) {
836
protoObject = asObject(currStructure->prototypeForLookup(callFrame));
837
currStructure = chainEntries[i].get();
839
// Check the prototype object's Structure had not changed.
840
Structure** prototypeStructureAddress = &(protoObject->m_structure);
842
move(ImmPtr(currStructure), regT3);
843
bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
845
bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
850
compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
851
Jump success = jump();
853
LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
855
// Use the patch information to link the failure cases back to the original slow case routine.
856
CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
858
patchBuffer.link(bucketsOfFail, lastProtoBegin);
860
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
861
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
863
CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
865
// Track the stub we have created so that it will be deleted later.
868
prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
870
// Finally patch the jump to slow case back in the hot path to jump here instead.
871
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
872
RepatchBuffer repatchBuffer(m_codeBlock);
873
repatchBuffer.relink(jumpLocation, entryLabel);
876
void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
878
// regT0 holds a JSCell*
882
JumpList bucketsOfFail;
884
// Check eax is an object of the right Structure.
885
bucketsOfFail.append(checkStructure(regT0, structure));
887
Structure* currStructure = structure;
888
RefPtr<Structure>* chainEntries = chain->head();
889
JSObject* protoObject = 0;
890
for (unsigned i = 0; i < count; ++i) {
891
protoObject = asObject(currStructure->prototypeForLookup(callFrame));
892
currStructure = chainEntries[i].get();
894
// Check the prototype object's Structure had not changed.
895
Structure** prototypeStructureAddress = &(protoObject->m_structure);
897
move(ImmPtr(currStructure), regT3);
898
bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
900
bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
905
compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
906
Jump success = jump();
908
LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
910
// Use the patch information to link the failure cases back to the original slow case routine.
911
patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
913
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
914
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
916
// Track the stub we have created so that it will be deleted later.
917
CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
918
stubInfo->stubRoutine = entryLabel;
920
// Finally patch the jump to slow case back in the hot path to jump here instead.
921
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
922
RepatchBuffer repatchBuffer(m_codeBlock);
923
repatchBuffer.relink(jumpLocation, entryLabel);
925
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
926
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
929
/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
931
#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
933
#else // USE(JSVALUE32_64)
935
void JIT::emit_op_get_by_val(Instruction* currentInstruction)
937
unsigned dst = currentInstruction[1].u.operand;
938
unsigned base = currentInstruction[2].u.operand;
939
unsigned property = currentInstruction[3].u.operand;
941
emitGetVirtualRegisters(base, regT0, property, regT1);
942
emitJumpSlowCaseIfNotImmediateInteger(regT1);
944
// This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
945
// We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
946
// number was signed since m_vectorLength is always less than intmax (since the total allocation
947
// size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
948
// to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
949
// extending since it makes it easier to re-tag the value in the slow case.
950
zeroExtend32ToPtr(regT1, regT1);
952
emitFastArithImmToInt(regT1);
954
emitJumpSlowCaseIfNotJSCell(regT0, base);
955
addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
957
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
958
addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
960
loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0);
961
addSlowCase(branchTestPtr(Zero, regT0));
963
emitPutVirtualRegister(dst);
966
void JIT::emit_op_put_by_val(Instruction* currentInstruction)
968
unsigned base = currentInstruction[1].u.operand;
969
unsigned property = currentInstruction[2].u.operand;
970
unsigned value = currentInstruction[3].u.operand;
972
emitGetVirtualRegisters(base, regT0, property, regT1);
973
emitJumpSlowCaseIfNotImmediateInteger(regT1);
975
// See comment in op_get_by_val.
976
zeroExtend32ToPtr(regT1, regT1);
978
emitFastArithImmToInt(regT1);
980
emitJumpSlowCaseIfNotJSCell(regT0, base);
981
addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
982
addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
984
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
986
Jump empty = branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
988
Label storeResult(this);
989
emitGetVirtualRegister(value, regT0);
990
storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
994
add32(Imm32(1), Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
995
branch32(Below, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
998
add32(Imm32(1), regT0);
999
store32(regT0, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)));
1000
jump().linkTo(storeResult, this);
1005
void JIT::emit_op_put_by_index(Instruction* currentInstruction)
1007
JITStubCall stubCall(this, cti_op_put_by_index);
1008
stubCall.addArgument(currentInstruction[1].u.operand, regT2);
1009
stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
1010
stubCall.addArgument(currentInstruction[3].u.operand, regT2);
1014
void JIT::emit_op_put_getter(Instruction* currentInstruction)
1016
JITStubCall stubCall(this, cti_op_put_getter);
1017
stubCall.addArgument(currentInstruction[1].u.operand, regT2);
1018
stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
1019
stubCall.addArgument(currentInstruction[3].u.operand, regT2);
1023
void JIT::emit_op_put_setter(Instruction* currentInstruction)
1025
JITStubCall stubCall(this, cti_op_put_setter);
1026
stubCall.addArgument(currentInstruction[1].u.operand, regT2);
1027
stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
1028
stubCall.addArgument(currentInstruction[3].u.operand, regT2);
1032
void JIT::emit_op_del_by_id(Instruction* currentInstruction)
1034
JITStubCall stubCall(this, cti_op_del_by_id);
1035
stubCall.addArgument(currentInstruction[2].u.operand, regT2);
1036
stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
1037
stubCall.call(currentInstruction[1].u.operand);
1041
#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1043
/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
1045
// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
1046
void JIT::emit_op_method_check(Instruction*) {}
1047
void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
1048
#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
1049
#error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
1052
void JIT::emit_op_get_by_id(Instruction* currentInstruction)
1054
unsigned resultVReg = currentInstruction[1].u.operand;
1055
unsigned baseVReg = currentInstruction[2].u.operand;
1056
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
1058
emitGetVirtualRegister(baseVReg, regT0);
1059
JITStubCall stubCall(this, cti_op_get_by_id_generic);
1060
stubCall.addArgument(regT0);
1061
stubCall.addArgument(ImmPtr(ident));
1062
stubCall.call(resultVReg);
1064
m_propertyAccessInstructionIndex++;
1067
void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
1069
ASSERT_NOT_REACHED();
1072
void JIT::emit_op_put_by_id(Instruction* currentInstruction)
1074
unsigned baseVReg = currentInstruction[1].u.operand;
1075
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
1076
unsigned valueVReg = currentInstruction[3].u.operand;
1078
emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
1080
JITStubCall stubCall(this, cti_op_put_by_id_generic);
1081
stubCall.addArgument(regT0);
1082
stubCall.addArgument(ImmPtr(ident));
1083
stubCall.addArgument(regT1);
1086
m_propertyAccessInstructionIndex++;
1089
void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
1091
ASSERT_NOT_REACHED();
1094
#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1096
/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
1098
#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
1100
void JIT::emit_op_method_check(Instruction* currentInstruction)
1102
// Assert that the following instruction is a get_by_id.
1103
ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
1105
currentInstruction += OPCODE_LENGTH(op_method_check);
1106
unsigned resultVReg = currentInstruction[1].u.operand;
1107
unsigned baseVReg = currentInstruction[2].u.operand;
1108
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
1110
emitGetVirtualRegister(baseVReg, regT0);
1112
// Do the method check - check the object & its prototype's structure inline (this is the common case).
1113
m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
1114
MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
1116
Jump notCell = emitJumpIfNotJSCell(regT0);
1118
BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
1120
Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
1121
DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT1);
1122
Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
1124
// This will be relinked to load the function without doing a load.
1125
DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
1127
END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
1129
Jump match = jump();
1131
ASSERT(differenceBetween(info.structureToCompare, protoObj) == patchOffsetMethodCheckProtoObj);
1132
ASSERT(differenceBetween(info.structureToCompare, protoStructureToCompare) == patchOffsetMethodCheckProtoStruct);
1133
ASSERT(differenceBetween(info.structureToCompare, putFunction) == patchOffsetMethodCheckPutFunction);
1135
// Link the failure cases here.
1137
structureCheck.link(this);
1138
protoStructureCheck.link(this);
1140
// Do a regular(ish) get_by_id (the slow case will be link to
1141
// cti_op_get_by_id_method_check instead of cti_op_get_by_id.
1142
compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
1145
emitPutVirtualRegister(resultVReg);
1147
// We've already generated the following get_by_id, so make sure it's skipped over.
1148
m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
1151
void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1153
currentInstruction += OPCODE_LENGTH(op_method_check);
1154
unsigned resultVReg = currentInstruction[1].u.operand;
1155
unsigned baseVReg = currentInstruction[2].u.operand;
1156
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
1158
compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, true);
1160
// We've already generated the following get_by_id, so make sure it's skipped over.
1161
m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
1164
#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
1166
// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
1167
void JIT::emit_op_method_check(Instruction*) {}
1168
void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
1172
void JIT::emit_op_get_by_id(Instruction* currentInstruction)
1174
unsigned resultVReg = currentInstruction[1].u.operand;
1175
unsigned baseVReg = currentInstruction[2].u.operand;
1176
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
1178
emitGetVirtualRegister(baseVReg, regT0);
1179
compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
1180
emitPutVirtualRegister(resultVReg);
1183
void JIT::compileGetByIdHotPath(int, int baseVReg, Identifier*, unsigned propertyAccessInstructionIndex)
1185
// As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
1186
// Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
1187
// to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
1188
// to jump back to if one of these trampolies finds a match.
1190
emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
1192
BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
1194
Label hotPathBegin(this);
1195
m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
1197
DataLabelPtr structureToCompare;
1198
Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
1199
addSlowCase(structureCheck);
1200
ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
1201
ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
1203
Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
1204
Label externalLoadComplete(this);
1205
ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad);
1206
ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad);
1208
DataLabel32 displacementLabel = loadPtrWithAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
1209
ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetGetByIdPropertyMapOffset);
1211
Label putResult(this);
1213
END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
1215
ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
1218
void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1220
unsigned resultVReg = currentInstruction[1].u.operand;
1221
unsigned baseVReg = currentInstruction[2].u.operand;
1222
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
1224
compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, false);
1227
void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
1229
// As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
1230
// so that we only need track one pointer into the slow case code - we track a pointer to the location
1231
// of the call (which we can use to look up the patch information), but should a array-length or
1232
// prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
1233
// the distance from the call to the head of the slow case.
1235
linkSlowCaseIfNotJSCell(iter, baseVReg);
1238
BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
1241
Label coldPathBegin(this);
1243
JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
1244
stubCall.addArgument(regT0);
1245
stubCall.addArgument(ImmPtr(ident));
1246
Call call = stubCall.call(resultVReg);
1248
END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
1250
ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
1252
// Track the location of the call; this will be used to recover patch information.
1253
m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
1254
m_propertyAccessInstructionIndex++;
1257
void JIT::emit_op_put_by_id(Instruction* currentInstruction)
1259
unsigned baseVReg = currentInstruction[1].u.operand;
1260
unsigned valueVReg = currentInstruction[3].u.operand;
1262
unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
1264
// In order to be able to patch both the Structure, and the object offset, we store one pointer,
1265
// to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
1266
// such that the Structure & offset are always at the same distance from this.
1268
emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
1270
// Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
1271
emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
1273
BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
1275
Label hotPathBegin(this);
1276
m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
1278
// It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
1279
DataLabelPtr structureToCompare;
1280
addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
1281
ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
1283
// Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
1284
Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
1285
Label externalLoadComplete(this);
1286
ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad);
1287
ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad);
1289
DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT0, patchGetByIdDefaultOffset));
1291
END_UNINTERRUPTED_SEQUENCE(sequencePutById);
1293
ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetPutByIdPropertyMapOffset);
1296
void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1298
unsigned baseVReg = currentInstruction[1].u.operand;
1299
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
1301
unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
1303
linkSlowCaseIfNotJSCell(iter, baseVReg);
1306
JITStubCall stubCall(this, cti_op_put_by_id);
1307
stubCall.addArgument(regT0);
1308
stubCall.addArgument(ImmPtr(ident));
1309
stubCall.addArgument(regT1);
1310
Call call = stubCall.call();
1312
// Track the location of the call; this will be used to recover patch information.
1313
m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
1316
// Compile a store into an object's property storage. May overwrite the
1317
// value in objectReg.
1318
void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset)
1320
int offset = cachedOffset * sizeof(JSValue);
1321
if (structure->isUsingInlineStorage())
1322
offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage);
1324
loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
1325
storePtr(value, Address(base, offset));
1328
// Compile a load from an object's property storage. May overwrite base.
1329
void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset)
1331
int offset = cachedOffset * sizeof(JSValue);
1332
if (structure->isUsingInlineStorage())
1333
offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage);
1335
loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
1336
loadPtr(Address(base, offset), result);
1339
void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID result, size_t cachedOffset)
1341
if (base->isUsingInlineStorage())
1342
loadPtr(static_cast<void*>(&base->m_inlineStorage[cachedOffset]), result);
1344
PropertyStorage* protoPropertyStorage = &base->m_externalStorage;
1345
loadPtr(static_cast<void*>(protoPropertyStorage), temp);
1346
loadPtr(Address(temp, cachedOffset * sizeof(JSValue)), result);
1350
void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress)
1352
JumpList failureCases;
1353
// Check eax is an object of the right Structure.
1354
failureCases.append(emitJumpIfNotJSCell(regT0));
1355
failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure)));
1356
JumpList successCases;
1359
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
1360
// proto(ecx) = baseObject->structure()->prototype()
1361
failureCases.append(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType)));
1363
loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
1365
// ecx = baseObject->m_structure
1366
for (RefPtr<Structure>* it = chain->head(); *it; ++it) {
1367
// null check the prototype
1368
successCases.append(branchPtr(Equal, regT2, ImmPtr(JSValue::encode(jsNull()))));
1370
// Check the structure id
1371
failureCases.append(branchPtr(NotEqual, Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(it->get())));
1373
loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
1374
failureCases.append(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType)));
1375
loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
1378
successCases.link(this);
1382
// emit a call only if storage realloc is needed
1383
bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
1384
if (willNeedStorageRealloc) {
1385
// This trampoline was called to like a JIT stub; before we can can call again we need to
1386
// remove the return address from the stack, to prevent the stack from becoming misaligned.
1387
preserveReturnAddressAfterCall(regT3);
1389
JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
1390
stubCall.skipArgument(); // base
1391
stubCall.skipArgument(); // ident
1392
stubCall.skipArgument(); // value
1393
stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
1394
stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
1395
stubCall.call(regT0);
1396
emitGetJITStubArg(2, regT1);
1398
restoreReturnAddressBeforeReturn(regT3);
1401
// Assumes m_refCount can be decremented easily, refcount decrement is safe as
1402
// codeblock should ensure oldStructure->m_refCount > 0
1403
sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
1404
add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
1405
storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)));
1408
compilePutDirectOffset(regT0, regT1, newStructure, cachedOffset);
1412
ASSERT(!failureCases.empty());
1413
failureCases.link(this);
1414
restoreArgumentReferenceForTrampoline();
1415
Call failureCall = tailRecursiveCall();
1417
LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
1419
patchBuffer.link(failureCall, FunctionPtr(cti_op_put_by_id_fail));
1421
if (willNeedStorageRealloc) {
1422
ASSERT(m_calls.size() == 1);
1423
patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
1426
CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
1427
stubInfo->stubRoutine = entryLabel;
1428
RepatchBuffer repatchBuffer(m_codeBlock);
1429
repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
1432
void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
1434
RepatchBuffer repatchBuffer(codeBlock);
1436
// We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
1437
// Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
1438
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
1440
int offset = sizeof(JSValue) * cachedOffset;
1442
// If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
1443
// and makes the subsequent load's offset automatically correct
1444
if (structure->isUsingInlineStorage())
1445
repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad));
1447
// Patch the offset into the propoerty map to load from, then patch the Structure to look for.
1448
repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
1449
repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset), offset);
1452
void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
1454
RepatchBuffer repatchBuffer(codeBlock);
1456
ASSERT(!methodCallLinkInfo.cachedStructure);
1457
methodCallLinkInfo.cachedStructure = structure;
1460
Structure* prototypeStructure = proto->structure();
1461
ASSERT(!methodCallLinkInfo.cachedPrototypeStructure);
1462
methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
1463
prototypeStructure->ref();
1465
repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
1466
repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
1467
repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
1468
repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
1470
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id));
1473
void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
1475
RepatchBuffer repatchBuffer(codeBlock);
1477
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1478
// Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
1479
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_put_by_id_generic));
1481
int offset = sizeof(JSValue) * cachedOffset;
1483
// If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
1484
// and makes the subsequent load's offset automatically correct
1485
if (structure->isUsingInlineStorage())
1486
repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad));
1488
// Patch the offset into the propoerty map to load from, then patch the Structure to look for.
1489
repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
1490
repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset), offset);
1493
void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
1495
StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
1497
// Check eax is an array
1498
Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
1500
// Checks out okay! - get the length from the storage
1501
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
1502
load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
1504
Jump failureCases2 = branch32(Above, regT2, Imm32(JSImmediate::maxImmediateInt));
1506
emitFastArithIntToImmNoCheck(regT2, regT0);
1507
Jump success = jump();
1509
LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
1511
// Use the patch information to link the failure cases back to the original slow case routine.
1512
CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
1513
patchBuffer.link(failureCases1, slowCaseBegin);
1514
patchBuffer.link(failureCases2, slowCaseBegin);
1516
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
1517
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
1519
// Track the stub we have created so that it will be deleted later.
1520
CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
1521
stubInfo->stubRoutine = entryLabel;
1523
// Finally patch the jump to slow case back in the hot path to jump here instead.
1524
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
1525
RepatchBuffer repatchBuffer(m_codeBlock);
1526
repatchBuffer.relink(jumpLocation, entryLabel);
1528
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1529
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
1532
void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
1534
// The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
1535
// referencing the prototype object - let's speculatively load it's table nice and early!)
1536
JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
1538
// Check eax is an object of the right Structure.
1539
Jump failureCases1 = checkStructure(regT0, structure);
1541
// Check the prototype object's Structure had not changed.
1542
Structure** prototypeStructureAddress = &(protoObject->m_structure);
1543
#if PLATFORM(X86_64)
1544
move(ImmPtr(prototypeStructure), regT3);
1545
Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
1547
Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
1550
// Checks out okay! - getDirectOffset
1551
compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
1553
Jump success = jump();
1555
LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
1557
// Use the patch information to link the failure cases back to the original slow case routine.
1558
CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
1559
patchBuffer.link(failureCases1, slowCaseBegin);
1560
patchBuffer.link(failureCases2, slowCaseBegin);
1562
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
1563
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
1565
// Track the stub we have created so that it will be deleted later.
1566
CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
1567
stubInfo->stubRoutine = entryLabel;
1569
// Finally patch the jump to slow case back in the hot path to jump here instead.
1570
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
1571
RepatchBuffer repatchBuffer(m_codeBlock);
1572
repatchBuffer.relink(jumpLocation, entryLabel);
1574
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1575
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
1578
void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
1580
Jump failureCase = checkStructure(regT0, structure);
1581
compileGetDirectOffset(regT0, regT0, structure, cachedOffset);
1582
Jump success = jump();
1584
LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
1586
// Use the patch information to link the failure cases back to the original slow case routine.
1587
CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
1588
if (!lastProtoBegin)
1589
lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
1591
patchBuffer.link(failureCase, lastProtoBegin);
1593
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
1594
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
1596
CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
1599
polymorphicStructures->list[currentIndex].set(entryLabel, structure);
1601
// Finally patch the jump to slow case back in the hot path to jump here instead.
1602
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
1603
RepatchBuffer repatchBuffer(m_codeBlock);
1604
repatchBuffer.relink(jumpLocation, entryLabel);
1607
void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
1609
// The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
1610
// referencing the prototype object - let's speculatively load it's table nice and early!)
1611
JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
1613
// Check eax is an object of the right Structure.
1614
Jump failureCases1 = checkStructure(regT0, structure);
1616
// Check the prototype object's Structure had not changed.
1617
Structure** prototypeStructureAddress = &(protoObject->m_structure);
1618
#if PLATFORM(X86_64)
1619
move(ImmPtr(prototypeStructure), regT3);
1620
Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
1622
Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
1625
// Checks out okay! - getDirectOffset
1626
compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
1628
Jump success = jump();
1630
LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
1632
// Use the patch information to link the failure cases back to the original slow case routine.
1633
CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
1634
patchBuffer.link(failureCases1, lastProtoBegin);
1635
patchBuffer.link(failureCases2, lastProtoBegin);
1637
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
1638
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
1640
CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
1643
prototypeStructure->ref();
1644
prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure);
1646
// Finally patch the jump to slow case back in the hot path to jump here instead.
1647
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
1648
RepatchBuffer repatchBuffer(m_codeBlock);
1649
repatchBuffer.relink(jumpLocation, entryLabel);
1652
void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
1656
JumpList bucketsOfFail;
1658
// Check eax is an object of the right Structure.
1659
Jump baseObjectCheck = checkStructure(regT0, structure);
1660
bucketsOfFail.append(baseObjectCheck);
1662
Structure* currStructure = structure;
1663
RefPtr<Structure>* chainEntries = chain->head();
1664
JSObject* protoObject = 0;
1665
for (unsigned i = 0; i < count; ++i) {
1666
protoObject = asObject(currStructure->prototypeForLookup(callFrame));
1667
currStructure = chainEntries[i].get();
1669
// Check the prototype object's Structure had not changed.
1670
Structure** prototypeStructureAddress = &(protoObject->m_structure);
1671
#if PLATFORM(X86_64)
1672
move(ImmPtr(currStructure), regT3);
1673
bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
1675
bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
1678
ASSERT(protoObject);
1680
compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
1681
Jump success = jump();
1683
LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
1685
// Use the patch information to link the failure cases back to the original slow case routine.
1686
CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
1688
patchBuffer.link(bucketsOfFail, lastProtoBegin);
1690
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
1691
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
1693
CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
1695
// Track the stub we have created so that it will be deleted later.
1698
prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
1700
// Finally patch the jump to slow case back in the hot path to jump here instead.
1701
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
1702
RepatchBuffer repatchBuffer(m_codeBlock);
1703
repatchBuffer.relink(jumpLocation, entryLabel);
1706
void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
1710
JumpList bucketsOfFail;
1712
// Check eax is an object of the right Structure.
1713
bucketsOfFail.append(checkStructure(regT0, structure));
1715
Structure* currStructure = structure;
1716
RefPtr<Structure>* chainEntries = chain->head();
1717
JSObject* protoObject = 0;
1718
for (unsigned i = 0; i < count; ++i) {
1719
protoObject = asObject(currStructure->prototypeForLookup(callFrame));
1720
currStructure = chainEntries[i].get();
1722
// Check the prototype object's Structure had not changed.
1723
Structure** prototypeStructureAddress = &(protoObject->m_structure);
1724
#if PLATFORM(X86_64)
1725
move(ImmPtr(currStructure), regT3);
1726
bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
1728
bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
1731
ASSERT(protoObject);
1733
compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
1734
Jump success = jump();
1736
LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
1738
// Use the patch information to link the failure cases back to the original slow case routine.
1739
patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
1741
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
1742
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
1744
// Track the stub we have created so that it will be deleted later.
1745
CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
1746
stubInfo->stubRoutine = entryLabel;
1748
// Finally patch the jump to slow case back in the hot path to jump here instead.
1749
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
1750
RepatchBuffer repatchBuffer(m_codeBlock);
1751
repatchBuffer.relink(jumpLocation, entryLabel);
1753
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1754
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
1757
/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
1759
#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1761
#endif // USE(JSVALUE32_64)
1765
#endif // ENABLE(JIT)