2
* Copyright (C) 2011 Apple Inc. All rights reserved.
4
* Redistribution and use in source and binary forms, with or without
5
* modification, are permitted provided that the following conditions
7
* 1. Redistributions of source code must retain the above copyright
8
* notice, this list of conditions and the following disclaimer.
9
* 2. Redistributions in binary form must reproduce the above copyright
10
* notice, this list of conditions and the following disclaimer in the
11
* documentation and/or other materials provided with the distribution.
13
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
#include "DFGOSRExitCompiler.h"
29
#if ENABLE(DFG_JIT) && USE(JSVALUE64)
31
#include "DFGOperations.h"
32
#include <wtf/DataLog.h>
34
namespace JSC { namespace DFG {
36
void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
38
// 1) Pro-forma stuff.
39
#if DFG_ENABLE(DEBUG_VERBOSE)
40
dataLogF("OSR exit for Node @%d (", (int)exit.m_nodeIndex);
41
for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) {
42
dataLogF("bc#%u", codeOrigin.bytecodeIndex);
43
if (!codeOrigin.inlineCallFrame)
45
dataLogF(" -> %p ", codeOrigin.inlineCallFrame->executable.get());
48
dumpOperands(operands, WTF::dataFile());
50
#if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE)
51
SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
52
debugInfo->codeBlock = m_jit.codeBlock();
53
debugInfo->nodeIndex = exit.m_nodeIndex;
55
m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
58
#if DFG_ENABLE(JIT_BREAK_ON_SPECULATION_FAILURE)
62
#if DFG_ENABLE(SUCCESS_STATS)
63
static SamplingCounter counter("SpeculationFailure");
64
m_jit.emitCount(counter);
67
// 2) Perform speculation recovery. This only comes into play when an operation
68
// starts mutating state before verifying the speculation it has already made.
70
GPRReg alreadyBoxed = InvalidGPRReg;
73
switch (recovery->type()) {
75
m_jit.sub32(recovery->src(), recovery->dest());
76
m_jit.or64(GPRInfo::tagTypeNumberRegister, recovery->dest());
77
alreadyBoxed = recovery->dest();
80
case BooleanSpeculationCheck:
81
m_jit.xor64(AssemblyHelpers::TrustedImm32(static_cast<int32_t>(ValueFalse)), recovery->dest());
89
// 3) Refine some array and/or value profile, if appropriate.
91
if (!!exit.m_jsValueSource) {
92
if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
93
// If the instruction that this originated from has an array profile, then
94
// refine it. If it doesn't, then do nothing. The latter could happen for
95
// hoisted checks, or checks emitted for operations that didn't have array
96
// profiling - either ops that aren't array accesses at all, or weren't
97
// known to be array acceses in the bytecode. The latter case is a FIXME
98
// while the former case is an outcome of a CheckStructure not knowing why
99
// it was emitted (could be either due to an inline cache of a property
100
// property access, or due to an array profile).
102
CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
103
if (ArrayProfile* arrayProfile = m_jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
105
if (exit.m_jsValueSource.isAddress())
106
usedRegister = exit.m_jsValueSource.base();
108
usedRegister = exit.m_jsValueSource.gpr();
112
scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister);
113
scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister, scratch1);
115
m_jit.push(scratch1);
116
m_jit.push(scratch2);
119
if (exit.m_jsValueSource.isAddress()) {
121
m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value);
123
value = exit.m_jsValueSource.gpr();
125
m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureOffset()), scratch1);
126
m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructure());
127
m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1);
128
m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
129
m_jit.lshift32(scratch1, scratch2);
130
m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
137
if (!!exit.m_valueProfile) {
138
EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0);
140
#if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE)
141
dataLogF(" (have exit profile, bucket %p) ", bucket);
144
if (exit.m_jsValueSource.isAddress()) {
145
// We can't be sure that we have a spare register. So use the tagTypeNumberRegister,
146
// since we know how to restore it.
147
m_jit.load64(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), GPRInfo::tagTypeNumberRegister);
148
m_jit.store64(GPRInfo::tagTypeNumberRegister, bucket);
149
m_jit.move(AssemblyHelpers::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister);
151
m_jit.store64(exit.m_jsValueSource.gpr(), bucket);
155
// 4) Figure out how many scratch slots we'll need. We need one for every GPR/FPR
156
// whose destination is now occupied by a DFG virtual register, and we need
157
// one for every displaced virtual register if there are more than
158
// GPRInfo::numberOfRegisters of them. Also see if there are any constants,
159
// any undefined slots, any FPR slots, and any unboxed ints.
161
Vector<bool> poisonedVirtualRegisters(operands.numberOfLocals());
162
for (unsigned i = 0; i < poisonedVirtualRegisters.size(); ++i)
163
poisonedVirtualRegisters[i] = false;
165
unsigned numberOfPoisonedVirtualRegisters = 0;
166
unsigned numberOfDisplacedVirtualRegisters = 0;
168
// Booleans for fast checks. We expect that most OSR exits do not have to rebox
169
// Int32s, have no FPRs, and have no constants. If there are constants, we
170
// expect most of them to be jsUndefined(); if that's true then we handle that
171
// specially to minimize code size and execution time.
172
bool haveUnboxedInt32s = false;
173
bool haveUnboxedDoubles = false;
174
bool haveFPRs = false;
175
bool haveConstants = false;
176
bool haveUndefined = false;
177
bool haveUInt32s = false;
178
bool haveArguments = false;
180
for (size_t index = 0; index < operands.size(); ++index) {
181
const ValueRecovery& recovery = operands[index];
182
switch (recovery.technique()) {
183
case Int32DisplacedInJSStack:
184
case DoubleDisplacedInJSStack:
185
case DisplacedInJSStack:
186
numberOfDisplacedVirtualRegisters++;
187
ASSERT((int)recovery.virtualRegister() >= 0);
189
// See if we might like to store to this virtual register before doing
190
// virtual register shuffling. If so, we say that the virtual register
191
// is poisoned: it cannot be stored to until after displaced virtual
192
// registers are handled. We track poisoned virtual register carefully
193
// to ensure this happens efficiently. Note that we expect this case
194
// to be rare, so the handling of it is optimized for the cases in
195
// which it does not happen.
196
if (recovery.virtualRegister() < (int)operands.numberOfLocals()) {
197
switch (operands.local(recovery.virtualRegister()).technique()) {
199
case UnboxedInt32InGPR:
202
if (!poisonedVirtualRegisters[recovery.virtualRegister()]) {
203
poisonedVirtualRegisters[recovery.virtualRegister()] = true;
204
numberOfPoisonedVirtualRegisters++;
213
case UnboxedInt32InGPR:
214
case AlreadyInJSStackAsUnboxedInt32:
215
haveUnboxedInt32s = true;
218
case AlreadyInJSStackAsUnboxedDouble:
219
haveUnboxedDoubles = true;
231
haveConstants = true;
232
if (recovery.constant().isUndefined())
233
haveUndefined = true;
236
case ArgumentsThatWereNotCreated:
237
haveArguments = true;
245
#if DFG_ENABLE(DEBUG_VERBOSE)
247
if (numberOfPoisonedVirtualRegisters)
248
dataLogF("Poisoned=%u ", numberOfPoisonedVirtualRegisters);
249
if (numberOfDisplacedVirtualRegisters)
250
dataLogF("Displaced=%u ", numberOfDisplacedVirtualRegisters);
251
if (haveUnboxedInt32s)
252
dataLogF("UnboxedInt32 ");
253
if (haveUnboxedDoubles)
254
dataLogF("UnboxedDoubles ");
260
dataLogF("Constants ");
262
dataLogF("Undefined ");
266
ScratchBuffer* scratchBuffer = m_jit.globalData()->scratchBufferForSize(sizeof(EncodedJSValue) * std::max(haveUInt32s ? 2u : 0u, numberOfPoisonedVirtualRegisters + (numberOfDisplacedVirtualRegisters <= GPRInfo::numberOfRegisters ? 0 : numberOfDisplacedVirtualRegisters)));
267
EncodedJSValue* scratchDataBuffer = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
269
// From here on, the code assumes that it is profitable to maximize the distance
270
// between when something is computed and when it is stored.
272
// 5) Perform all reboxing of integers.
274
if (haveUnboxedInt32s || haveUInt32s) {
275
for (size_t index = 0; index < operands.size(); ++index) {
276
const ValueRecovery& recovery = operands[index];
277
switch (recovery.technique()) {
278
case UnboxedInt32InGPR:
279
if (recovery.gpr() != alreadyBoxed)
280
m_jit.or64(GPRInfo::tagTypeNumberRegister, recovery.gpr());
283
case AlreadyInJSStackAsUnboxedInt32:
284
m_jit.store32(AssemblyHelpers::TrustedImm32(static_cast<uint32_t>(TagTypeNumber >> 32)), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(operands.operandForIndex(index))));
288
// This occurs when the speculative JIT left an unsigned 32-bit integer
289
// in a GPR. If it's positive, we can just box the int. Otherwise we
290
// need to turn it into a boxed double.
292
// We don't try to be clever with register allocation here; we assume
293
// that the program is using FPRs and we don't try to figure out which
294
// ones it is using. Instead just temporarily save fpRegT0 and then
295
// restore it. This makes sense because this path is not cheap to begin
296
// with, and should happen very rarely.
298
GPRReg addressGPR = GPRInfo::regT0;
299
if (addressGPR == recovery.gpr())
300
addressGPR = GPRInfo::regT1;
302
m_jit.store64(addressGPR, scratchDataBuffer);
303
m_jit.move(AssemblyHelpers::TrustedImmPtr(scratchDataBuffer + 1), addressGPR);
304
m_jit.storeDouble(FPRInfo::fpRegT0, addressGPR);
306
AssemblyHelpers::Jump positive = m_jit.branch32(AssemblyHelpers::GreaterThanOrEqual, recovery.gpr(), AssemblyHelpers::TrustedImm32(0));
308
m_jit.convertInt32ToDouble(recovery.gpr(), FPRInfo::fpRegT0);
309
m_jit.addDouble(AssemblyHelpers::AbsoluteAddress(&AssemblyHelpers::twoToThe32), FPRInfo::fpRegT0);
310
m_jit.boxDouble(FPRInfo::fpRegT0, recovery.gpr());
312
AssemblyHelpers::Jump done = m_jit.jump();
314
positive.link(&m_jit);
316
m_jit.or64(GPRInfo::tagTypeNumberRegister, recovery.gpr());
320
m_jit.loadDouble(addressGPR, FPRInfo::fpRegT0);
321
m_jit.load64(scratchDataBuffer, addressGPR);
331
// 6) Dump all non-poisoned GPRs. For poisoned GPRs, save them into the scratch storage.
332
// Note that GPRs do not have a fast change (like haveFPRs) because we expect that
333
// most OSR failure points will have at least one GPR that needs to be dumped.
335
initializePoisoned(operands.numberOfLocals());
336
unsigned currentPoisonIndex = 0;
338
for (size_t index = 0; index < operands.size(); ++index) {
339
const ValueRecovery& recovery = operands[index];
340
int operand = operands.operandForIndex(index);
341
switch (recovery.technique()) {
343
case UnboxedInt32InGPR:
345
if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
346
m_jit.store64(recovery.gpr(), scratchDataBuffer + currentPoisonIndex);
347
m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex;
348
currentPoisonIndex++;
350
m_jit.store64(recovery.gpr(), AssemblyHelpers::addressFor((VirtualRegister)operand));
357
// At this point all GPRs are available for scratch use.
360
// 7) Box all doubles (relies on there being more GPRs than FPRs)
362
for (size_t index = 0; index < operands.size(); ++index) {
363
const ValueRecovery& recovery = operands[index];
364
if (recovery.technique() != InFPR)
366
FPRReg fpr = recovery.fpr();
367
GPRReg gpr = GPRInfo::toRegister(FPRInfo::toIndex(fpr));
368
m_jit.boxDouble(fpr, gpr);
371
// 8) Dump all doubles into the stack, or to the scratch storage if
372
// the destination virtual register is poisoned.
374
for (size_t index = 0; index < operands.size(); ++index) {
375
const ValueRecovery& recovery = operands[index];
376
if (recovery.technique() != InFPR)
378
GPRReg gpr = GPRInfo::toRegister(FPRInfo::toIndex(recovery.fpr()));
379
if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
380
m_jit.store64(gpr, scratchDataBuffer + currentPoisonIndex);
381
m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex;
382
currentPoisonIndex++;
384
m_jit.store64(gpr, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
388
// At this point all GPRs and FPRs are available for scratch use.
390
// 9) Box all unboxed doubles in the stack.
391
if (haveUnboxedDoubles) {
392
for (size_t index = 0; index < operands.size(); ++index) {
393
const ValueRecovery& recovery = operands[index];
394
if (recovery.technique() != AlreadyInJSStackAsUnboxedDouble)
396
m_jit.loadDouble(AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)), FPRInfo::fpRegT0);
397
m_jit.boxDouble(FPRInfo::fpRegT0, GPRInfo::regT0);
398
m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
402
ASSERT(currentPoisonIndex == numberOfPoisonedVirtualRegisters);
404
// 10) Reshuffle displaced virtual registers. Optimize for the case that
405
// the number of displaced virtual registers is not more than the number
406
// of available physical registers.
408
if (numberOfDisplacedVirtualRegisters) {
409
if (numberOfDisplacedVirtualRegisters <= GPRInfo::numberOfRegisters) {
410
// So far this appears to be the case that triggers all the time, but
411
// that is far from guaranteed.
413
unsigned displacementIndex = 0;
414
for (size_t index = 0; index < operands.size(); ++index) {
415
const ValueRecovery& recovery = operands[index];
416
switch (recovery.technique()) {
417
case DisplacedInJSStack:
418
m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
421
case Int32DisplacedInJSStack: {
422
GPRReg gpr = GPRInfo::toRegister(displacementIndex++);
423
m_jit.load32(AssemblyHelpers::addressFor(recovery.virtualRegister()), gpr);
424
m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
428
case DoubleDisplacedInJSStack: {
429
GPRReg gpr = GPRInfo::toRegister(displacementIndex++);
430
m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), gpr);
431
m_jit.sub64(GPRInfo::tagTypeNumberRegister, gpr);
440
displacementIndex = 0;
441
for (size_t index = 0; index < operands.size(); ++index) {
442
const ValueRecovery& recovery = operands[index];
443
switch (recovery.technique()) {
444
case DisplacedInJSStack:
445
case Int32DisplacedInJSStack:
446
case DoubleDisplacedInJSStack:
447
m_jit.store64(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
455
// FIXME: This should use the shuffling algorithm that we use
456
// for speculative->non-speculative jumps, if we ever discover that
457
// some hot code with lots of live values that get displaced and
458
// spilled really enjoys frequently failing speculation.
460
// For now this code is engineered to be correct but probably not
461
// super. In particular, it correctly handles cases where for example
462
// the displacements are a permutation of the destination values, like
467
// It accomplishes this by simply lifting all of the virtual registers
468
// from their old (DFG JIT) locations and dropping them in a scratch
469
// location in memory, and then transferring from that scratch location
470
// to their new (old JIT) locations.
472
unsigned scratchIndex = numberOfPoisonedVirtualRegisters;
473
for (size_t index = 0; index < operands.size(); ++index) {
474
const ValueRecovery& recovery = operands[index];
476
switch (recovery.technique()) {
477
case DisplacedInJSStack:
478
m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
479
m_jit.store64(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
482
case Int32DisplacedInJSStack: {
483
m_jit.load32(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
484
m_jit.or64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
485
m_jit.store64(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
489
case DoubleDisplacedInJSStack: {
490
m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
491
m_jit.sub64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
492
m_jit.store64(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
501
scratchIndex = numberOfPoisonedVirtualRegisters;
502
for (size_t index = 0; index < operands.size(); ++index) {
503
const ValueRecovery& recovery = operands[index];
504
switch (recovery.technique()) {
505
case DisplacedInJSStack:
506
case Int32DisplacedInJSStack:
507
case DoubleDisplacedInJSStack:
508
m_jit.load64(scratchDataBuffer + scratchIndex++, GPRInfo::regT0);
509
m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
517
ASSERT(scratchIndex == numberOfPoisonedVirtualRegisters + numberOfDisplacedVirtualRegisters);
521
// 11) Dump all poisoned virtual registers.
523
if (numberOfPoisonedVirtualRegisters) {
524
for (int virtualRegister = 0; virtualRegister < (int)operands.numberOfLocals(); ++virtualRegister) {
525
if (!poisonedVirtualRegisters[virtualRegister])
528
const ValueRecovery& recovery = operands.local(virtualRegister);
529
switch (recovery.technique()) {
531
case UnboxedInt32InGPR:
534
m_jit.load64(scratchDataBuffer + poisonIndex(virtualRegister), GPRInfo::regT0);
535
m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)virtualRegister));
544
// 12) Dump all constants. Optimize for Undefined, since that's a constant we see
549
m_jit.move(AssemblyHelpers::TrustedImm64(JSValue::encode(jsUndefined())), GPRInfo::regT0);
551
for (size_t index = 0; index < operands.size(); ++index) {
552
const ValueRecovery& recovery = operands[index];
553
if (recovery.technique() != Constant)
555
if (recovery.constant().isUndefined())
556
m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
558
m_jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(recovery.constant())), AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
562
// 13) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
563
// that all new calls into this code will go to the new JIT, so the execute
564
// counter only affects call frames that performed OSR exit and call frames
565
// that were still executing the old JIT at the time of another call frame's
566
// OSR exit. We want to ensure that the following is true:
568
// (a) Code the performs an OSR exit gets a chance to reenter optimized
569
// code eventually, since optimized code is faster. But we don't
570
// want to do such reentery too aggressively (see (c) below).
572
// (b) If there is code on the call stack that is still running the old
573
// JIT's code and has never OSR'd, then it should get a chance to
574
// perform OSR entry despite the fact that we've exited.
576
// (c) Code the performs an OSR exit should not immediately retry OSR
577
// entry, since both forms of OSR are expensive. OSR entry is
578
// particularly expensive.
580
// (d) Frequent OSR failures, even those that do not result in the code
581
// running in a hot loop, result in recompilation getting triggered.
583
// To ensure (c), we'd like to set the execute counter to
584
// counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
585
// (a) and (b), since then every OSR exit would delay the opportunity for
586
// every call frame to perform OSR entry. Essentially, if OSR exit happens
587
// frequently and the function has few loops, then the counter will never
588
// become non-negative and OSR entry will never be triggered. OSR entry
589
// will only happen if a loop gets hot in the old JIT, which does a pretty
590
// good job of ensuring (a) and (b). But that doesn't take care of (d),
591
// since each speculation failure would reset the execute counter.
592
// So we check here if the number of speculation failures is significantly
593
// larger than the number of successes (we want 90% success rate), and if
594
// there have been a large enough number of failures. If so, we set the
595
// counter to 0; otherwise we set the counter to
596
// counterValueForOptimizeAfterWarmUp().
598
handleExitCounts(exit);
600
// 14) Reify inlined call frames.
602
ASSERT(m_jit.baselineCodeBlock()->getJITType() == JITCode::BaselineJIT);
603
m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)JSStack::CodeBlock));
605
for (CodeOrigin codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
606
InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
607
CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(codeOrigin);
608
CodeBlock* baselineCodeBlockForCaller = m_jit.baselineCodeBlockFor(inlineCallFrame->caller);
609
Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlockForCaller);
610
unsigned returnBytecodeIndex = inlineCallFrame->caller.bytecodeIndex + OPCODE_LENGTH(op_call);
611
BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned, BytecodeAndMachineOffset::getBytecodeIndex>(decodedCodeMap.begin(), decodedCodeMap.size(), returnBytecodeIndex);
614
ASSERT(mapping->m_bytecodeIndex == returnBytecodeIndex);
616
void* jumpTarget = baselineCodeBlockForCaller->getJITCode().executableAddressAtOffset(mapping->m_machineCodeOffset);
618
GPRReg callerFrameGPR;
619
if (inlineCallFrame->caller.inlineCallFrame) {
620
m_jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3);
621
callerFrameGPR = GPRInfo::regT3;
623
callerFrameGPR = GPRInfo::callFrameRegister;
625
m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
626
m_jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->callee->scope()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
627
m_jit.store64(callerFrameGPR, AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CallerFrame)));
628
m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ReturnPC)));
629
m_jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
630
m_jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->callee.get()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
633
// 15) Create arguments if necessary and place them into the appropriate aliased
637
HashSet<InlineCallFrame*, DefaultHash<InlineCallFrame*>::Hash,
638
NullableHashTraits<InlineCallFrame*> > didCreateArgumentsObject;
640
for (size_t index = 0; index < operands.size(); ++index) {
641
const ValueRecovery& recovery = operands[index];
642
if (recovery.technique() != ArgumentsThatWereNotCreated)
644
int operand = operands.operandForIndex(index);
645
// Find the right inline call frame.
646
InlineCallFrame* inlineCallFrame = 0;
647
for (InlineCallFrame* current = exit.m_codeOrigin.inlineCallFrame;
649
current = current->caller.inlineCallFrame) {
650
if (current->stackOffset <= operand) {
651
inlineCallFrame = current;
656
if (!m_jit.baselineCodeBlockFor(inlineCallFrame)->usesArguments())
658
int argumentsRegister = m_jit.argumentsRegisterFor(inlineCallFrame);
659
if (didCreateArgumentsObject.add(inlineCallFrame).isNewEntry) {
660
// We know this call frame optimized out an arguments object that
661
// the baseline JIT would have created. Do that creation now.
662
if (inlineCallFrame) {
663
m_jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT0);
664
m_jit.setupArguments(GPRInfo::regT0);
666
m_jit.setupArgumentsExecState();
668
AssemblyHelpers::TrustedImmPtr(
669
bitwise_cast<void*>(operationCreateArguments)),
670
GPRInfo::nonArgGPR0);
671
m_jit.call(GPRInfo::nonArgGPR0);
672
m_jit.store64(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(argumentsRegister));
674
GPRInfo::returnValueGPR,
675
AssemblyHelpers::addressFor(unmodifiedArgumentsRegister(argumentsRegister)));
676
m_jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); // no-op move on almost all platforms.
679
m_jit.load64(AssemblyHelpers::addressFor(argumentsRegister), GPRInfo::regT0);
680
m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
684
// 16) Load the result of the last bytecode operation into regT0.
686
if (exit.m_lastSetOperand != std::numeric_limits<int>::max())
687
m_jit.load64(AssemblyHelpers::addressFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister);
689
// 17) Adjust the call frame pointer.
691
if (exit.m_codeOrigin.inlineCallFrame)
692
m_jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);
694
// 18) Jump into the corresponding baseline JIT code.
696
CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(exit.m_codeOrigin);
697
Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlock);
699
BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned, BytecodeAndMachineOffset::getBytecodeIndex>(decodedCodeMap.begin(), decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex);
702
ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex);
704
void* jumpTarget = baselineCodeBlock->getJITCode().executableAddressAtOffset(mapping->m_machineCodeOffset);
706
ASSERT(GPRInfo::regT1 != GPRInfo::cachedResultRegister);
708
m_jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT1);
710
m_jit.jump(GPRInfo::regT1);
712
#if DFG_ENABLE(DEBUG_VERBOSE)
713
dataLogF("-> %p\n", jumpTarget);
717
} } // namespace JSC::DFG
719
#endif // ENABLE(DFG_JIT) && USE(JSVALUE64)