1
// Copyright 2011 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
6
// * Redistributions of source code must retain the above copyright
7
// notice, this list of conditions and the following disclaimer.
8
// * Redistributions in binary form must reproduce the above
9
// copyright notice, this list of conditions and the following
10
// disclaimer in the documentation and/or other materials provided
11
// with the distribution.
12
// * Neither the name of Google Inc. nor the names of its
13
// contributors may be used to endorse or promote products derived
14
// from this software without specific prior written permission.
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
#include "mips/lithium-codegen-mips.h"
31
#include "mips/lithium-gap-resolver-mips.h"
32
#include "code-stubs.h"
33
#include "stub-cache.h"
39
class SafepointGenerator : public CallWrapper {
41
SafepointGenerator(LCodeGen* codegen,
42
LPointerMap* pointers,
43
Safepoint::DeoptMode mode)
47
virtual ~SafepointGenerator() { }
49
virtual void BeforeCall(int call_size) const { }
51
virtual void AfterCall() const {
52
codegen_->RecordSafepoint(pointers_, deopt_mode_);
57
LPointerMap* pointers_;
58
Safepoint::DeoptMode deopt_mode_;
64
bool LCodeGen::GenerateCode() {
65
HPhase phase("Code generation", chunk());
68
CpuFeatures::Scope scope(FPU);
70
CodeStub::GenerateFPStubs();
72
// Open a frame scope to indicate that there is a frame on the stack. The
73
// NONE indicates that the scope shouldn't actually generate code to set up
74
// the frame (that is done in GeneratePrologue).
75
FrameScope frame_scope(masm_, StackFrame::NONE);
77
return GeneratePrologue() &&
79
GenerateDeferredCode() &&
80
GenerateSafepointTable();
84
void LCodeGen::FinishCode(Handle<Code> code) {
86
code->set_stack_slots(GetStackSlotCount());
87
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
88
PopulateDeoptimizationData(code);
92
void LCodeGen::Abort(const char* format, ...) {
93
if (FLAG_trace_bailout) {
94
SmartArrayPointer<char> name(
95
info()->shared_info()->DebugName()->ToCString());
96
PrintF("Aborting LCodeGen in @\"%s\": ", *name);
98
va_start(arguments, format);
99
OS::VPrint(format, arguments);
107
void LCodeGen::Comment(const char* format, ...) {
108
if (!FLAG_code_comments) return;
110
StringBuilder builder(buffer, ARRAY_SIZE(buffer));
112
va_start(arguments, format);
113
builder.AddFormattedList(format, arguments);
116
// Copy the string before recording it in the assembler to avoid
117
// issues when the stack allocated buffer goes out of scope.
118
size_t length = builder.position();
119
Vector<char> copy = Vector<char>::New(length + 1);
120
memcpy(copy.start(), builder.Finalize(), copy.length());
121
masm()->RecordComment(copy.start());
125
bool LCodeGen::GeneratePrologue() {
126
ASSERT(is_generating());
129
if (strlen(FLAG_stop_at) > 0 &&
130
info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
135
// a1: Callee's JS function.
136
// cp: Callee's context.
137
// fp: Caller's frame pointer.
140
// Strict mode functions and builtins need to replace the receiver
141
// with undefined when called as functions (without an explicit
142
// receiver object). r5 is zero for method calls and non-zero for
144
if (!info_->is_classic_mode() || info_->is_native()) {
146
__ Branch(&ok, eq, t1, Operand(zero_reg));
148
int receiver_offset = scope()->num_parameters() * kPointerSize;
149
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
150
__ sw(a2, MemOperand(sp, receiver_offset));
154
__ Push(ra, fp, cp, a1);
155
__ Addu(fp, sp, Operand(2 * kPointerSize)); // Adj. FP to point to saved FP.
157
// Reserve space for the stack slots needed by the code.
158
int slots = GetStackSlotCount();
160
if (FLAG_debug_code) {
161
__ li(a0, Operand(slots));
162
__ li(a2, Operand(kSlotsZapValue));
167
__ Branch(&loop, ne, a0, Operand(zero_reg));
169
__ Subu(sp, sp, Operand(slots * kPointerSize));
173
// Possibly allocate a local context.
174
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
175
if (heap_slots > 0) {
176
Comment(";;; Allocate local context");
177
// Argument to NewContext is the function, which is in a1.
179
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
180
FastNewContextStub stub(heap_slots);
183
__ CallRuntime(Runtime::kNewFunctionContext, 1);
185
RecordSafepoint(Safepoint::kNoLazyDeopt);
186
// Context is returned in both v0 and cp. It replaces the context
187
// passed to us. It's saved in the stack and kept live in cp.
188
__ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
189
// Copy any necessary parameters into the context.
190
int num_parameters = scope()->num_parameters();
191
for (int i = 0; i < num_parameters; i++) {
192
Variable* var = scope()->parameter(i);
193
if (var->IsContextSlot()) {
194
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
195
(num_parameters - 1 - i) * kPointerSize;
196
// Load parameter from stack.
197
__ lw(a0, MemOperand(fp, parameter_offset));
198
// Store it in the context.
199
MemOperand target = ContextOperand(cp, var->index());
201
// Update the write barrier. This clobbers a3 and a0.
202
__ RecordWriteContextSlot(
203
cp, target.offset(), a0, a3, kRAHasBeenSaved, kSaveFPRegs);
206
Comment(";;; End allocate local context");
211
__ CallRuntime(Runtime::kTraceEnter, 0);
213
EnsureSpaceForLazyDeopt();
214
return !is_aborted();
218
bool LCodeGen::GenerateBody() {
219
ASSERT(is_generating());
220
bool emit_instructions = true;
221
for (current_instruction_ = 0;
222
!is_aborted() && current_instruction_ < instructions_->length();
223
current_instruction_++) {
224
LInstruction* instr = instructions_->at(current_instruction_);
225
if (instr->IsLabel()) {
226
LLabel* label = LLabel::cast(instr);
227
emit_instructions = !label->HasReplacement();
230
if (emit_instructions) {
231
Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
232
instr->CompileToNative(this);
235
return !is_aborted();
239
bool LCodeGen::GenerateDeferredCode() {
240
ASSERT(is_generating());
241
if (deferred_.length() > 0) {
242
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
243
LDeferredCode* code = deferred_[i];
244
__ bind(code->entry());
245
Comment(";;; Deferred code @%d: %s.",
246
code->instruction_index(),
247
code->instr()->Mnemonic());
249
__ jmp(code->exit());
252
// Deferred code is the last part of the instruction sequence. Mark
253
// the generated code as done unless we bailed out.
254
if (!is_aborted()) status_ = DONE;
255
return !is_aborted();
259
bool LCodeGen::GenerateDeoptJumpTable() {
260
// TODO(plind): not clear that this will have advantage for MIPS.
261
// Skipping it for now. Raised issue #100 for this.
262
Abort("Unimplemented: %s", "GenerateDeoptJumpTable");
267
bool LCodeGen::GenerateSafepointTable() {
269
safepoints_.Emit(masm(), GetStackSlotCount());
270
return !is_aborted();
274
Register LCodeGen::ToRegister(int index) const {
275
return Register::FromAllocationIndex(index);
279
DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
280
return DoubleRegister::FromAllocationIndex(index);
284
Register LCodeGen::ToRegister(LOperand* op) const {
285
ASSERT(op->IsRegister());
286
return ToRegister(op->index());
290
Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
291
if (op->IsRegister()) {
292
return ToRegister(op->index());
293
} else if (op->IsConstantOperand()) {
294
__ li(scratch, ToOperand(op));
296
} else if (op->IsStackSlot() || op->IsArgument()) {
297
__ lw(scratch, ToMemOperand(op));
305
DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
306
ASSERT(op->IsDoubleRegister());
307
return ToDoubleRegister(op->index());
311
DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
312
FloatRegister flt_scratch,
313
DoubleRegister dbl_scratch) {
314
if (op->IsDoubleRegister()) {
315
return ToDoubleRegister(op->index());
316
} else if (op->IsConstantOperand()) {
317
LConstantOperand* const_op = LConstantOperand::cast(op);
318
Handle<Object> literal = chunk_->LookupLiteral(const_op);
319
Representation r = chunk_->LookupLiteralRepresentation(const_op);
320
if (r.IsInteger32()) {
321
ASSERT(literal->IsNumber());
322
__ li(at, Operand(static_cast<int32_t>(literal->Number())));
323
__ mtc1(at, flt_scratch);
324
__ cvt_d_w(dbl_scratch, flt_scratch);
326
} else if (r.IsDouble()) {
327
Abort("unsupported double immediate");
328
} else if (r.IsTagged()) {
329
Abort("unsupported tagged immediate");
331
} else if (op->IsStackSlot() || op->IsArgument()) {
332
MemOperand mem_op = ToMemOperand(op);
333
__ ldc1(dbl_scratch, mem_op);
341
int LCodeGen::ToInteger32(LConstantOperand* op) const {
342
Handle<Object> value = chunk_->LookupLiteral(op);
343
ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
344
ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
346
return static_cast<int32_t>(value->Number());
350
double LCodeGen::ToDouble(LConstantOperand* op) const {
351
Handle<Object> value = chunk_->LookupLiteral(op);
352
return value->Number();
356
Operand LCodeGen::ToOperand(LOperand* op) {
357
if (op->IsConstantOperand()) {
358
LConstantOperand* const_op = LConstantOperand::cast(op);
359
Handle<Object> literal = chunk_->LookupLiteral(const_op);
360
Representation r = chunk_->LookupLiteralRepresentation(const_op);
361
if (r.IsInteger32()) {
362
ASSERT(literal->IsNumber());
363
return Operand(static_cast<int32_t>(literal->Number()));
364
} else if (r.IsDouble()) {
365
Abort("ToOperand Unsupported double immediate.");
367
ASSERT(r.IsTagged());
368
return Operand(literal);
369
} else if (op->IsRegister()) {
370
return Operand(ToRegister(op));
371
} else if (op->IsDoubleRegister()) {
372
Abort("ToOperand IsDoubleRegister unimplemented");
375
// Stack slots not implemented, use ToMemOperand instead.
381
MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
382
ASSERT(!op->IsRegister());
383
ASSERT(!op->IsDoubleRegister());
384
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
385
int index = op->index();
387
// Local or spill slot. Skip the frame pointer, function, and
388
// context in the fixed part of the frame.
389
return MemOperand(fp, -(index + 3) * kPointerSize);
391
// Incoming parameter. Skip the return address.
392
return MemOperand(fp, -(index - 1) * kPointerSize);
397
MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
398
ASSERT(op->IsDoubleStackSlot());
399
int index = op->index();
401
// Local or spill slot. Skip the frame pointer, function, context,
402
// and the first word of the double in the fixed part of the frame.
403
return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
405
// Incoming parameter. Skip the return address and the first word of
407
return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
412
void LCodeGen::WriteTranslation(LEnvironment* environment,
413
Translation* translation) {
414
if (environment == NULL) return;
416
// The translation includes one command per value in the environment.
417
int translation_size = environment->values()->length();
418
// The output frame height does not include the parameters.
419
int height = translation_size - environment->parameter_count();
421
WriteTranslation(environment->outer(), translation);
422
int closure_id = DefineDeoptimizationLiteral(environment->closure());
423
translation->BeginFrame(environment->ast_id(), closure_id, height);
424
for (int i = 0; i < translation_size; ++i) {
425
LOperand* value = environment->values()->at(i);
426
// spilled_registers_ and spilled_double_registers_ are either
427
// both NULL or both set.
428
if (environment->spilled_registers() != NULL && value != NULL) {
429
if (value->IsRegister() &&
430
environment->spilled_registers()[value->index()] != NULL) {
431
translation->MarkDuplicate();
432
AddToTranslation(translation,
433
environment->spilled_registers()[value->index()],
434
environment->HasTaggedValueAt(i));
436
value->IsDoubleRegister() &&
437
environment->spilled_double_registers()[value->index()] != NULL) {
438
translation->MarkDuplicate();
441
environment->spilled_double_registers()[value->index()],
446
AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
451
void LCodeGen::AddToTranslation(Translation* translation,
455
// TODO(twuerthinger): Introduce marker operands to indicate that this value
456
// is not present and must be reconstructed from the deoptimizer. Currently
457
// this is only used for the arguments object.
458
translation->StoreArgumentsObject();
459
} else if (op->IsStackSlot()) {
461
translation->StoreStackSlot(op->index());
463
translation->StoreInt32StackSlot(op->index());
465
} else if (op->IsDoubleStackSlot()) {
466
translation->StoreDoubleStackSlot(op->index());
467
} else if (op->IsArgument()) {
469
int src_index = GetStackSlotCount() + op->index();
470
translation->StoreStackSlot(src_index);
471
} else if (op->IsRegister()) {
472
Register reg = ToRegister(op);
474
translation->StoreRegister(reg);
476
translation->StoreInt32Register(reg);
478
} else if (op->IsDoubleRegister()) {
479
DoubleRegister reg = ToDoubleRegister(op);
480
translation->StoreDoubleRegister(reg);
481
} else if (op->IsConstantOperand()) {
482
Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
483
int src_index = DefineDeoptimizationLiteral(literal);
484
translation->StoreLiteral(src_index);
491
void LCodeGen::CallCode(Handle<Code> code,
492
RelocInfo::Mode mode,
493
LInstruction* instr) {
494
CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
498
void LCodeGen::CallCodeGeneric(Handle<Code> code,
499
RelocInfo::Mode mode,
501
SafepointMode safepoint_mode) {
502
ASSERT(instr != NULL);
503
LPointerMap* pointers = instr->pointer_map();
504
RecordPosition(pointers->position());
506
RecordSafepointWithLazyDeopt(instr, safepoint_mode);
510
void LCodeGen::CallRuntime(const Runtime::Function* function,
512
LInstruction* instr) {
513
ASSERT(instr != NULL);
514
LPointerMap* pointers = instr->pointer_map();
515
ASSERT(pointers != NULL);
516
RecordPosition(pointers->position());
518
__ CallRuntime(function, num_arguments);
519
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
523
void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
525
LInstruction* instr) {
526
__ CallRuntimeSaveDoubles(id);
527
RecordSafepointWithRegisters(
528
instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
532
void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
533
Safepoint::DeoptMode mode) {
534
if (!environment->HasBeenRegistered()) {
535
// Physical stack frame layout:
536
// -x ............. -4 0 ..................................... y
537
// [incoming arguments] [spill slots] [pushed outgoing arguments]
539
// Layout of the environment:
540
// 0 ..................................................... size-1
541
// [parameters] [locals] [expression stack including arguments]
543
// Layout of the translation:
544
// 0 ........................................................ size - 1 + 4
545
// [expression stack including arguments] [locals] [4 words] [parameters]
546
// |>------------ translation_size ------------<|
549
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
552
Translation translation(&translations_, frame_count);
553
WriteTranslation(environment, &translation);
554
int deoptimization_index = deoptimizations_.length();
555
int pc_offset = masm()->pc_offset();
556
environment->Register(deoptimization_index,
558
(mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
559
deoptimizations_.Add(environment);
564
void LCodeGen::DeoptimizeIf(Condition cc,
565
LEnvironment* environment,
567
const Operand& src2) {
568
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
569
ASSERT(environment->HasBeenRegistered());
570
int id = environment->deoptimization_index();
571
Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
572
ASSERT(entry != NULL);
574
Abort("bailout was not prepared");
578
ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on MIPS.
580
if (FLAG_deopt_every_n_times == 1 &&
581
info_->shared_info()->opt_count() == id) {
582
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
586
if (FLAG_trap_on_deopt) {
589
__ Branch(&skip, NegateCondition(cc), src1, src2);
591
__ stop("trap_on_deopt");
596
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
598
// TODO(plind): The Arm port is a little different here, due to their
599
// DeOpt jump table, which is not used for Mips yet.
600
__ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
605
void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
606
int length = deoptimizations_.length();
607
if (length == 0) return;
609
Handle<DeoptimizationInputData> data =
610
factory()->NewDeoptimizationInputData(length, TENURED);
612
Handle<ByteArray> translations = translations_.CreateByteArray();
613
data->SetTranslationByteArray(*translations);
614
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
616
Handle<FixedArray> literals =
617
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
618
for (int i = 0; i < deoptimization_literals_.length(); i++) {
619
literals->set(i, *deoptimization_literals_[i]);
621
data->SetLiteralArray(*literals);
623
data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
624
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
626
// Populate the deoptimization entries.
627
for (int i = 0; i < length; i++) {
628
LEnvironment* env = deoptimizations_[i];
629
data->SetAstId(i, Smi::FromInt(env->ast_id()));
630
data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
631
data->SetArgumentsStackHeight(i,
632
Smi::FromInt(env->arguments_stack_height()));
633
data->SetPc(i, Smi::FromInt(env->pc_offset()));
635
code->set_deoptimization_data(*data);
639
int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
640
int result = deoptimization_literals_.length();
641
for (int i = 0; i < deoptimization_literals_.length(); ++i) {
642
if (deoptimization_literals_[i].is_identical_to(literal)) return i;
644
deoptimization_literals_.Add(literal);
649
void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
650
ASSERT(deoptimization_literals_.length() == 0);
652
const ZoneList<Handle<JSFunction> >* inlined_closures =
653
chunk()->inlined_closures();
655
for (int i = 0, length = inlined_closures->length();
658
DefineDeoptimizationLiteral(inlined_closures->at(i));
661
inlined_function_count_ = deoptimization_literals_.length();
665
void LCodeGen::RecordSafepointWithLazyDeopt(
666
LInstruction* instr, SafepointMode safepoint_mode) {
667
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
668
RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
670
ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
671
RecordSafepointWithRegisters(
672
instr->pointer_map(), 0, Safepoint::kLazyDeopt);
677
void LCodeGen::RecordSafepoint(
678
LPointerMap* pointers,
679
Safepoint::Kind kind,
681
Safepoint::DeoptMode deopt_mode) {
682
ASSERT(expected_safepoint_kind_ == kind);
684
const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
685
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
686
kind, arguments, deopt_mode);
687
for (int i = 0; i < operands->length(); i++) {
688
LOperand* pointer = operands->at(i);
689
if (pointer->IsStackSlot()) {
690
safepoint.DefinePointerSlot(pointer->index());
691
} else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
692
safepoint.DefinePointerRegister(ToRegister(pointer));
695
if (kind & Safepoint::kWithRegisters) {
696
// Register cp always contains a pointer to the context.
697
safepoint.DefinePointerRegister(cp);
702
void LCodeGen::RecordSafepoint(LPointerMap* pointers,
703
Safepoint::DeoptMode deopt_mode) {
704
RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
708
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
709
LPointerMap empty_pointers(RelocInfo::kNoPosition);
710
RecordSafepoint(&empty_pointers, deopt_mode);
714
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
716
Safepoint::DeoptMode deopt_mode) {
718
pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
722
void LCodeGen::RecordSafepointWithRegistersAndDoubles(
723
LPointerMap* pointers,
725
Safepoint::DeoptMode deopt_mode) {
727
pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
731
void LCodeGen::RecordPosition(int position) {
732
if (position == RelocInfo::kNoPosition) return;
733
masm()->positions_recorder()->RecordPosition(position);
737
void LCodeGen::DoLabel(LLabel* label) {
738
if (label->is_loop_header()) {
739
Comment(";;; B%d - LOOP entry", label->block_id());
741
Comment(";;; B%d", label->block_id());
743
__ bind(label->label());
744
current_block_ = label->block_id();
749
void LCodeGen::DoParallelMove(LParallelMove* move) {
750
resolver_.Resolve(move);
754
void LCodeGen::DoGap(LGap* gap) {
755
for (int i = LGap::FIRST_INNER_POSITION;
756
i <= LGap::LAST_INNER_POSITION;
758
LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
759
LParallelMove* move = gap->GetParallelMove(inner_pos);
760
if (move != NULL) DoParallelMove(move);
765
void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
770
void LCodeGen::DoParameter(LParameter* instr) {
775
void LCodeGen::DoCallStub(LCallStub* instr) {
776
ASSERT(ToRegister(instr->result()).is(v0));
777
switch (instr->hydrogen()->major_key()) {
778
case CodeStub::RegExpConstructResult: {
779
RegExpConstructResultStub stub;
780
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
783
case CodeStub::RegExpExec: {
785
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
788
case CodeStub::SubString: {
790
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
793
case CodeStub::NumberToString: {
794
NumberToStringStub stub;
795
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
798
case CodeStub::StringAdd: {
799
StringAddStub stub(NO_STRING_ADD_FLAGS);
800
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
803
case CodeStub::StringCompare: {
804
StringCompareStub stub;
805
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
808
case CodeStub::TranscendentalCache: {
809
__ lw(a0, MemOperand(sp, 0));
810
TranscendentalCacheStub stub(instr->transcendental_type(),
811
TranscendentalCacheStub::TAGGED);
812
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
821
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
826
void LCodeGen::DoModI(LModI* instr) {
827
Register scratch = scratch0();
828
const Register left = ToRegister(instr->InputAt(0));
829
const Register result = ToRegister(instr->result());
833
if (instr->hydrogen()->HasPowerOf2Divisor()) {
834
Register scratch = scratch0();
835
ASSERT(!left.is(scratch));
836
__ mov(scratch, left);
837
int32_t p2constant = HConstant::cast(
838
instr->hydrogen()->right())->Integer32Value();
839
ASSERT(p2constant != 0);
840
// Result always takes the sign of the dividend (left).
841
p2constant = abs(p2constant);
843
Label positive_dividend;
844
__ Branch(USE_DELAY_SLOT, &positive_dividend, ge, left, Operand(zero_reg));
845
__ subu(result, zero_reg, left);
846
__ And(result, result, p2constant - 1);
847
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
848
DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
850
__ Branch(USE_DELAY_SLOT, &done);
851
__ subu(result, zero_reg, result);
852
__ bind(&positive_dividend);
853
__ And(result, scratch, p2constant - 1);
855
// div runs in the background while we check for special cases.
856
Register right = EmitLoadRegister(instr->InputAt(1), scratch);
860
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
861
DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
864
__ Branch(USE_DELAY_SLOT, &done, ge, left, Operand(zero_reg));
867
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
868
DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
875
void LCodeGen::DoDivI(LDivI* instr) {
876
const Register left = ToRegister(instr->InputAt(0));
877
const Register right = ToRegister(instr->InputAt(1));
878
const Register result = ToRegister(instr->result());
880
// On MIPS div is asynchronous - it will run in the background while we
881
// check for special cases.
885
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
886
DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
889
// Check for (0 / -x) that will produce negative zero.
890
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
892
__ Branch(&left_not_zero, ne, left, Operand(zero_reg));
893
DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
894
__ bind(&left_not_zero);
897
// Check for (-kMinInt / -1).
898
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
899
Label left_not_min_int;
900
__ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
901
DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
902
__ bind(&left_not_min_int);
906
DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
911
void LCodeGen::DoMulI(LMulI* instr) {
912
Register scratch = scratch0();
913
Register result = ToRegister(instr->result());
914
// Note that result may alias left.
915
Register left = ToRegister(instr->InputAt(0));
916
LOperand* right_op = instr->InputAt(1);
918
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
919
bool bailout_on_minus_zero =
920
instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
922
if (right_op->IsConstantOperand() && !can_overflow) {
923
// Use optimized code for specific constants.
924
int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
926
if (bailout_on_minus_zero && (constant < 0)) {
927
// The case of a null constant will be handled separately.
928
// If constant is negative and left is null, the result should be -0.
929
DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
934
__ Subu(result, zero_reg, left);
937
if (bailout_on_minus_zero) {
938
// If left is strictly negative and the constant is null, the
939
// result is -0. Deoptimize if required, otherwise return 0.
940
DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
942
__ mov(result, zero_reg);
946
__ Move(result, left);
949
// Multiplying by powers of two and powers of two plus or minus
950
// one can be done faster with shifted operands.
951
// For other constants we emit standard code.
952
int32_t mask = constant >> 31;
953
uint32_t constant_abs = (constant + mask) ^ mask;
955
if (IsPowerOf2(constant_abs) ||
956
IsPowerOf2(constant_abs - 1) ||
957
IsPowerOf2(constant_abs + 1)) {
958
if (IsPowerOf2(constant_abs)) {
959
int32_t shift = WhichPowerOf2(constant_abs);
960
__ sll(result, left, shift);
961
} else if (IsPowerOf2(constant_abs - 1)) {
962
int32_t shift = WhichPowerOf2(constant_abs - 1);
963
__ sll(result, left, shift);
964
__ Addu(result, result, left);
965
} else if (IsPowerOf2(constant_abs + 1)) {
966
int32_t shift = WhichPowerOf2(constant_abs + 1);
967
__ sll(result, left, shift);
968
__ Subu(result, result, left);
971
// Correct the sign of the result is the constant is negative.
973
__ Subu(result, zero_reg, result);
977
// Generate standard code.
979
__ mul(result, left, at);
984
Register right = EmitLoadRegister(right_op, scratch);
985
if (bailout_on_minus_zero) {
986
__ Or(ToRegister(instr->TempAt(0)), left, right);
990
// hi:lo = left * right.
991
__ mult(left, right);
994
__ sra(at, result, 31);
995
DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
997
__ mul(result, left, right);
1000
if (bailout_on_minus_zero) {
1001
// Bail out if the result is supposed to be negative zero.
1003
__ Branch(&done, ne, result, Operand(zero_reg));
1005
instr->environment(),
1006
ToRegister(instr->TempAt(0)),
1014
void LCodeGen::DoBitI(LBitI* instr) {
1015
LOperand* left_op = instr->InputAt(0);
1016
LOperand* right_op = instr->InputAt(1);
1017
ASSERT(left_op->IsRegister());
1018
Register left = ToRegister(left_op);
1019
Register result = ToRegister(instr->result());
1020
Operand right(no_reg);
1022
if (right_op->IsStackSlot() || right_op->IsArgument()) {
1023
right = Operand(EmitLoadRegister(right_op, at));
1025
ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1026
right = ToOperand(right_op);
1029
switch (instr->op()) {
1030
case Token::BIT_AND:
1031
__ And(result, left, right);
1034
__ Or(result, left, right);
1036
case Token::BIT_XOR:
1037
__ Xor(result, left, right);
1046
void LCodeGen::DoShiftI(LShiftI* instr) {
1047
// Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1048
// result may alias either of them.
1049
LOperand* right_op = instr->InputAt(1);
1050
Register left = ToRegister(instr->InputAt(0));
1051
Register result = ToRegister(instr->result());
1053
if (right_op->IsRegister()) {
1054
// No need to mask the right operand on MIPS, it is built into the variable
1055
// shift instructions.
1056
switch (instr->op()) {
1058
__ srav(result, left, ToRegister(right_op));
1061
__ srlv(result, left, ToRegister(right_op));
1062
if (instr->can_deopt()) {
1063
DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
1067
__ sllv(result, left, ToRegister(right_op));
1074
// Mask the right_op operand.
1075
int value = ToInteger32(LConstantOperand::cast(right_op));
1076
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1077
switch (instr->op()) {
1079
if (shift_count != 0) {
1080
__ sra(result, left, shift_count);
1082
__ Move(result, left);
1086
if (shift_count != 0) {
1087
__ srl(result, left, shift_count);
1089
if (instr->can_deopt()) {
1090
__ And(at, left, Operand(0x80000000));
1091
DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
1093
__ Move(result, left);
1097
if (shift_count != 0) {
1098
__ sll(result, left, shift_count);
1100
__ Move(result, left);
1111
void LCodeGen::DoSubI(LSubI* instr) {
1112
LOperand* left = instr->InputAt(0);
1113
LOperand* right = instr->InputAt(1);
1114
LOperand* result = instr->result();
1115
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1117
if (!can_overflow) {
1118
if (right->IsStackSlot() || right->IsArgument()) {
1119
Register right_reg = EmitLoadRegister(right, at);
1120
__ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
1122
ASSERT(right->IsRegister() || right->IsConstantOperand());
1123
__ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
1125
} else { // can_overflow.
1126
Register overflow = scratch0();
1127
Register scratch = scratch1();
1128
if (right->IsStackSlot() ||
1129
right->IsArgument() ||
1130
right->IsConstantOperand()) {
1131
Register right_reg = EmitLoadRegister(right, scratch);
1132
__ SubuAndCheckForOverflow(ToRegister(result),
1135
overflow); // Reg at also used as scratch.
1137
ASSERT(right->IsRegister());
1138
// Due to overflow check macros not supporting constant operands,
1139
// handling the IsConstantOperand case was moved to prev if clause.
1140
__ SubuAndCheckForOverflow(ToRegister(result),
1143
overflow); // Reg at also used as scratch.
1145
DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
1150
void LCodeGen::DoConstantI(LConstantI* instr) {
1151
ASSERT(instr->result()->IsRegister());
1152
__ li(ToRegister(instr->result()), Operand(instr->value()));
1156
void LCodeGen::DoConstantD(LConstantD* instr) {
1157
ASSERT(instr->result()->IsDoubleRegister());
1158
DoubleRegister result = ToDoubleRegister(instr->result());
1159
double v = instr->value();
1164
void LCodeGen::DoConstantT(LConstantT* instr) {
1165
ASSERT(instr->result()->IsRegister());
1166
__ li(ToRegister(instr->result()), Operand(instr->value()));
1170
void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1171
Register result = ToRegister(instr->result());
1172
Register array = ToRegister(instr->InputAt(0));
1173
__ lw(result, FieldMemOperand(array, JSArray::kLengthOffset));
1177
void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
1178
Register result = ToRegister(instr->result());
1179
Register array = ToRegister(instr->InputAt(0));
1180
__ lw(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
1184
void LCodeGen::DoElementsKind(LElementsKind* instr) {
1185
Register result = ToRegister(instr->result());
1186
Register input = ToRegister(instr->InputAt(0));
1188
// Load map into |result|.
1189
__ lw(result, FieldMemOperand(input, HeapObject::kMapOffset));
1190
// Load the map's "bit field 2" into |result|. We only need the first byte,
1191
// but the following bit field extraction takes care of that anyway.
1192
__ lbu(result, FieldMemOperand(result, Map::kBitField2Offset));
1193
// Retrieve elements_kind from bit field 2.
1194
__ Ext(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
1198
void LCodeGen::DoValueOf(LValueOf* instr) {
1199
Register input = ToRegister(instr->InputAt(0));
1200
Register result = ToRegister(instr->result());
1201
Register map = ToRegister(instr->TempAt(0));
1204
// If the object is a smi return the object.
1205
__ Move(result, input);
1206
__ JumpIfSmi(input, &done);
1208
// If the object is not a value type, return the object.
1209
__ GetObjectType(input, map, map);
1210
__ Branch(&done, ne, map, Operand(JS_VALUE_TYPE));
1211
__ lw(result, FieldMemOperand(input, JSValue::kValueOffset));
1217
void LCodeGen::DoBitNotI(LBitNotI* instr) {
1218
Register input = ToRegister(instr->InputAt(0));
1219
Register result = ToRegister(instr->result());
1220
__ Nor(result, zero_reg, Operand(input));
1224
void LCodeGen::DoThrow(LThrow* instr) {
1225
Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
1227
CallRuntime(Runtime::kThrow, 1, instr);
1229
if (FLAG_debug_code) {
1230
__ stop("Unreachable code.");
1235
void LCodeGen::DoAddI(LAddI* instr) {
1236
LOperand* left = instr->InputAt(0);
1237
LOperand* right = instr->InputAt(1);
1238
LOperand* result = instr->result();
1239
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1241
if (!can_overflow) {
1242
if (right->IsStackSlot() || right->IsArgument()) {
1243
Register right_reg = EmitLoadRegister(right, at);
1244
__ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
1246
ASSERT(right->IsRegister() || right->IsConstantOperand());
1247
__ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
1249
} else { // can_overflow.
1250
Register overflow = scratch0();
1251
Register scratch = scratch1();
1252
if (right->IsStackSlot() ||
1253
right->IsArgument() ||
1254
right->IsConstantOperand()) {
1255
Register right_reg = EmitLoadRegister(right, scratch);
1256
__ AdduAndCheckForOverflow(ToRegister(result),
1259
overflow); // Reg at also used as scratch.
1261
ASSERT(right->IsRegister());
1262
// Due to overflow check macros not supporting constant operands,
1263
// handling the IsConstantOperand case was moved to prev if clause.
1264
__ AdduAndCheckForOverflow(ToRegister(result),
1267
overflow); // Reg at also used as scratch.
1269
DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
1274
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1275
DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
1276
DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
1277
DoubleRegister result = ToDoubleRegister(instr->result());
1278
switch (instr->op()) {
1280
__ add_d(result, left, right);
1283
__ sub_d(result, left, right);
1286
__ mul_d(result, left, right);
1289
__ div_d(result, left, right);
1292
// Save a0-a3 on the stack.
1293
RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
1294
__ MultiPush(saved_regs);
1296
__ PrepareCallCFunction(0, 2, scratch0());
1297
__ SetCallCDoubleArguments(left, right);
1299
ExternalReference::double_fp_operation(Token::MOD, isolate()),
1301
// Move the result in the double result register.
1302
__ GetCFunctionDoubleResult(result);
1304
// Restore saved register.
1305
__ MultiPop(saved_regs);
1315
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1316
ASSERT(ToRegister(instr->InputAt(0)).is(a1));
1317
ASSERT(ToRegister(instr->InputAt(1)).is(a0));
1318
ASSERT(ToRegister(instr->result()).is(v0));
1320
BinaryOpStub stub(instr->op(), NO_OVERWRITE);
1321
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1322
// Other arch use a nop here, to signal that there is no inlined
1323
// patchable code. Mips does not need the nop, since our marker
1324
// instruction (andi zero_reg) will never be used in normal code.
1328
int LCodeGen::GetNextEmittedBlock(int block) {
1329
for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1330
LLabel* label = chunk_->GetLabel(i);
1331
if (!label->HasReplacement()) return i;
1337
void LCodeGen::EmitBranch(int left_block, int right_block,
1338
Condition cc, Register src1, const Operand& src2) {
1339
int next_block = GetNextEmittedBlock(current_block_);
1340
right_block = chunk_->LookupDestination(right_block);
1341
left_block = chunk_->LookupDestination(left_block);
1342
if (right_block == left_block) {
1343
EmitGoto(left_block);
1344
} else if (left_block == next_block) {
1345
__ Branch(chunk_->GetAssemblyLabel(right_block),
1346
NegateCondition(cc), src1, src2);
1347
} else if (right_block == next_block) {
1348
__ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
1350
__ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
1351
__ Branch(chunk_->GetAssemblyLabel(right_block));
1356
void LCodeGen::EmitBranchF(int left_block, int right_block,
1357
Condition cc, FPURegister src1, FPURegister src2) {
1358
int next_block = GetNextEmittedBlock(current_block_);
1359
right_block = chunk_->LookupDestination(right_block);
1360
left_block = chunk_->LookupDestination(left_block);
1361
if (right_block == left_block) {
1362
EmitGoto(left_block);
1363
} else if (left_block == next_block) {
1364
__ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
1365
NegateCondition(cc), src1, src2);
1366
} else if (right_block == next_block) {
1367
__ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
1369
__ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
1370
__ Branch(chunk_->GetAssemblyLabel(right_block));
1375
void LCodeGen::DoBranch(LBranch* instr) {
1376
int true_block = chunk_->LookupDestination(instr->true_block_id());
1377
int false_block = chunk_->LookupDestination(instr->false_block_id());
1379
Representation r = instr->hydrogen()->value()->representation();
1380
if (r.IsInteger32()) {
1381
Register reg = ToRegister(instr->InputAt(0));
1382
EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
1383
} else if (r.IsDouble()) {
1384
DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
1385
// Test the double value. Zero and NaN are false.
1386
EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero);
1388
ASSERT(r.IsTagged());
1389
Register reg = ToRegister(instr->InputAt(0));
1390
HType type = instr->hydrogen()->value()->type();
1391
if (type.IsBoolean()) {
1392
__ LoadRoot(at, Heap::kTrueValueRootIndex);
1393
EmitBranch(true_block, false_block, eq, reg, Operand(at));
1394
} else if (type.IsSmi()) {
1395
EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
1397
Label* true_label = chunk_->GetAssemblyLabel(true_block);
1398
Label* false_label = chunk_->GetAssemblyLabel(false_block);
1400
ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1401
// Avoid deopts in the case where we've never executed this path before.
1402
if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
1404
if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1405
// undefined -> false.
1406
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
1407
__ Branch(false_label, eq, reg, Operand(at));
1409
if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1410
// Boolean -> its value.
1411
__ LoadRoot(at, Heap::kTrueValueRootIndex);
1412
__ Branch(true_label, eq, reg, Operand(at));
1413
__ LoadRoot(at, Heap::kFalseValueRootIndex);
1414
__ Branch(false_label, eq, reg, Operand(at));
1416
if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1418
__ LoadRoot(at, Heap::kNullValueRootIndex);
1419
__ Branch(false_label, eq, reg, Operand(at));
1422
if (expected.Contains(ToBooleanStub::SMI)) {
1423
// Smis: 0 -> false, all other -> true.
1424
__ Branch(false_label, eq, reg, Operand(zero_reg));
1425
__ JumpIfSmi(reg, true_label);
1426
} else if (expected.NeedsMap()) {
1427
// If we need a map later and have a Smi -> deopt.
1428
__ And(at, reg, Operand(kSmiTagMask));
1429
DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
1432
const Register map = scratch0();
1433
if (expected.NeedsMap()) {
1434
__ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
1435
if (expected.CanBeUndetectable()) {
1436
// Undetectable -> false.
1437
__ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
1438
__ And(at, at, Operand(1 << Map::kIsUndetectable));
1439
__ Branch(false_label, ne, at, Operand(zero_reg));
1443
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1444
// spec object -> true.
1445
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1446
__ Branch(true_label, ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
1449
if (expected.Contains(ToBooleanStub::STRING)) {
1450
// String value -> false iff empty.
1452
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1453
__ Branch(¬_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
1454
__ lw(at, FieldMemOperand(reg, String::kLengthOffset));
1455
__ Branch(true_label, ne, at, Operand(zero_reg));
1456
__ Branch(false_label);
1457
__ bind(¬_string);
1460
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1461
// heap number -> false iff +0, -0, or NaN.
1462
DoubleRegister dbl_scratch = double_scratch0();
1463
Label not_heap_number;
1464
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
1465
__ Branch(¬_heap_number, ne, map, Operand(at));
1466
__ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
1467
__ BranchF(true_label, false_label, ne, dbl_scratch, kDoubleRegZero);
1468
// Falls through if dbl_scratch == 0.
1469
__ Branch(false_label);
1470
__ bind(¬_heap_number);
1473
// We've seen something for the first time -> deopt.
1474
DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
1480
void LCodeGen::EmitGoto(int block) {
1481
block = chunk_->LookupDestination(block);
1482
int next_block = GetNextEmittedBlock(current_block_);
1483
if (block != next_block) {
1484
__ jmp(chunk_->GetAssemblyLabel(block));
1489
void LCodeGen::DoGoto(LGoto* instr) {
1490
EmitGoto(instr->block_id());
1494
Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1495
Condition cond = kNoCondition;
1498
case Token::EQ_STRICT:
1502
cond = is_unsigned ? lo : lt;
1505
cond = is_unsigned ? hi : gt;
1508
cond = is_unsigned ? ls : le;
1511
cond = is_unsigned ? hs : ge;
1514
case Token::INSTANCEOF:
1522
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1523
LOperand* left = instr->InputAt(0);
1524
LOperand* right = instr->InputAt(1);
1525
int false_block = chunk_->LookupDestination(instr->false_block_id());
1526
int true_block = chunk_->LookupDestination(instr->true_block_id());
1528
Condition cond = TokenToCondition(instr->op(), false);
1530
if (left->IsConstantOperand() && right->IsConstantOperand()) {
1531
// We can statically evaluate the comparison.
1532
double left_val = ToDouble(LConstantOperand::cast(left));
1533
double right_val = ToDouble(LConstantOperand::cast(right));
1535
EvalComparison(instr->op(), left_val, right_val) ? true_block
1537
EmitGoto(next_block);
1539
if (instr->is_double()) {
1540
// Compare left and right as doubles and load the
1541
// resulting flags into the normal status register.
1542
FPURegister left_reg = ToDoubleRegister(left);
1543
FPURegister right_reg = ToDoubleRegister(right);
1545
// If a NaN is involved, i.e. the result is unordered,
1546
// jump to false block label.
1547
__ BranchF(NULL, chunk_->GetAssemblyLabel(false_block), eq,
1548
left_reg, right_reg);
1550
EmitBranchF(true_block, false_block, cond, left_reg, right_reg);
1553
Operand cmp_right = Operand(0);
1555
if (right->IsConstantOperand()) {
1556
cmp_left = ToRegister(left);
1557
cmp_right = Operand(ToInteger32(LConstantOperand::cast(right)));
1558
} else if (left->IsConstantOperand()) {
1559
cmp_left = ToRegister(right);
1560
cmp_right = Operand(ToInteger32(LConstantOperand::cast(left)));
1561
// We transposed the operands. Reverse the condition.
1562
cond = ReverseCondition(cond);
1564
cmp_left = ToRegister(left);
1565
cmp_right = Operand(ToRegister(right));
1568
EmitBranch(true_block, false_block, cond, cmp_left, cmp_right);
1574
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
1575
Register left = ToRegister(instr->InputAt(0));
1576
Register right = ToRegister(instr->InputAt(1));
1577
int false_block = chunk_->LookupDestination(instr->false_block_id());
1578
int true_block = chunk_->LookupDestination(instr->true_block_id());
1580
EmitBranch(true_block, false_block, eq, left, Operand(right));
1584
void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
1585
Register left = ToRegister(instr->InputAt(0));
1586
int true_block = chunk_->LookupDestination(instr->true_block_id());
1587
int false_block = chunk_->LookupDestination(instr->false_block_id());
1589
EmitBranch(true_block, false_block, eq, left,
1590
Operand(instr->hydrogen()->right()));
1595
void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
1596
Register scratch = scratch0();
1597
Register reg = ToRegister(instr->InputAt(0));
1598
int false_block = chunk_->LookupDestination(instr->false_block_id());
1600
// If the expression is known to be untagged or a smi, then it's definitely
1601
// not null, and it can't be a an undetectable object.
1602
if (instr->hydrogen()->representation().IsSpecialization() ||
1603
instr->hydrogen()->type().IsSmi()) {
1604
EmitGoto(false_block);
1608
int true_block = chunk_->LookupDestination(instr->true_block_id());
1610
Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
1611
Heap::kNullValueRootIndex :
1612
Heap::kUndefinedValueRootIndex;
1613
__ LoadRoot(at, nil_value);
1614
if (instr->kind() == kStrictEquality) {
1615
EmitBranch(true_block, false_block, eq, reg, Operand(at));
1617
Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
1618
Heap::kUndefinedValueRootIndex :
1619
Heap::kNullValueRootIndex;
1620
Label* true_label = chunk_->GetAssemblyLabel(true_block);
1621
Label* false_label = chunk_->GetAssemblyLabel(false_block);
1622
__ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
1623
__ LoadRoot(at, other_nil_value); // In the delay slot.
1624
__ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
1625
__ JumpIfSmi(reg, false_label); // In the delay slot.
1626
// Check for undetectable objects by looking in the bit field in
1627
// the map. The object has already been smi checked.
1628
__ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
1629
__ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
1630
__ And(scratch, scratch, 1 << Map::kIsUndetectable);
1631
EmitBranch(true_block, false_block, ne, scratch, Operand(zero_reg));
1636
Condition LCodeGen::EmitIsObject(Register input,
1639
Label* is_not_object,
1641
__ JumpIfSmi(input, is_not_object);
1643
__ LoadRoot(temp2, Heap::kNullValueRootIndex);
1644
__ Branch(is_object, eq, input, Operand(temp2));
1647
__ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
1648
// Undetectable objects behave like undefined.
1649
__ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
1650
__ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
1651
__ Branch(is_not_object, ne, temp2, Operand(zero_reg));
1653
// Load instance type and check that it is in object type range.
1654
__ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
1655
__ Branch(is_not_object,
1656
lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1662
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
1663
Register reg = ToRegister(instr->InputAt(0));
1664
Register temp1 = ToRegister(instr->TempAt(0));
1665
Register temp2 = scratch0();
1667
int true_block = chunk_->LookupDestination(instr->true_block_id());
1668
int false_block = chunk_->LookupDestination(instr->false_block_id());
1669
Label* true_label = chunk_->GetAssemblyLabel(true_block);
1670
Label* false_label = chunk_->GetAssemblyLabel(false_block);
1672
Condition true_cond =
1673
EmitIsObject(reg, temp1, temp2, false_label, true_label);
1675
EmitBranch(true_block, false_block, true_cond, temp2,
1676
Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
1680
Condition LCodeGen::EmitIsString(Register input,
1682
Label* is_not_string) {
1683
__ JumpIfSmi(input, is_not_string);
1684
__ GetObjectType(input, temp1, temp1);
1690
void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
1691
Register reg = ToRegister(instr->InputAt(0));
1692
Register temp1 = ToRegister(instr->TempAt(0));
1694
int true_block = chunk_->LookupDestination(instr->true_block_id());
1695
int false_block = chunk_->LookupDestination(instr->false_block_id());
1696
Label* false_label = chunk_->GetAssemblyLabel(false_block);
1698
Condition true_cond =
1699
EmitIsString(reg, temp1, false_label);
1701
EmitBranch(true_block, false_block, true_cond, temp1,
1702
Operand(FIRST_NONSTRING_TYPE));
1706
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
1707
int true_block = chunk_->LookupDestination(instr->true_block_id());
1708
int false_block = chunk_->LookupDestination(instr->false_block_id());
1710
Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
1711
__ And(at, input_reg, kSmiTagMask);
1712
EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
1716
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
1717
Register input = ToRegister(instr->InputAt(0));
1718
Register temp = ToRegister(instr->TempAt(0));
1720
int true_block = chunk_->LookupDestination(instr->true_block_id());
1721
int false_block = chunk_->LookupDestination(instr->false_block_id());
1723
__ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
1724
__ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
1725
__ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
1726
__ And(at, temp, Operand(1 << Map::kIsUndetectable));
1727
EmitBranch(true_block, false_block, ne, at, Operand(zero_reg));
1731
static Condition ComputeCompareCondition(Token::Value op) {
1733
case Token::EQ_STRICT:
1746
return kNoCondition;
1751
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
1752
Token::Value op = instr->op();
1753
int true_block = chunk_->LookupDestination(instr->true_block_id());
1754
int false_block = chunk_->LookupDestination(instr->false_block_id());
1756
Handle<Code> ic = CompareIC::GetUninitialized(op);
1757
CallCode(ic, RelocInfo::CODE_TARGET, instr);
1759
Condition condition = ComputeCompareCondition(op);
1761
EmitBranch(true_block, false_block, condition, v0, Operand(zero_reg));
1765
static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
1766
InstanceType from = instr->from();
1767
InstanceType to = instr->to();
1768
if (from == FIRST_TYPE) return to;
1769
ASSERT(from == to || to == LAST_TYPE);
1774
static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
1775
InstanceType from = instr->from();
1776
InstanceType to = instr->to();
1777
if (from == to) return eq;
1778
if (to == LAST_TYPE) return hs;
1779
if (from == FIRST_TYPE) return ls;
1785
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
1786
Register scratch = scratch0();
1787
Register input = ToRegister(instr->InputAt(0));
1789
int true_block = chunk_->LookupDestination(instr->true_block_id());
1790
int false_block = chunk_->LookupDestination(instr->false_block_id());
1792
Label* false_label = chunk_->GetAssemblyLabel(false_block);
1794
__ JumpIfSmi(input, false_label);
1796
__ GetObjectType(input, scratch, scratch);
1797
EmitBranch(true_block,
1799
BranchCondition(instr->hydrogen()),
1801
Operand(TestType(instr->hydrogen())));
1805
void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
1806
Register input = ToRegister(instr->InputAt(0));
1807
Register result = ToRegister(instr->result());
1809
if (FLAG_debug_code) {
1810
__ AbortIfNotString(input);
1813
__ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
1814
__ IndexFromHash(result, result);
1818
void LCodeGen::DoHasCachedArrayIndexAndBranch(
1819
LHasCachedArrayIndexAndBranch* instr) {
1820
Register input = ToRegister(instr->InputAt(0));
1821
Register scratch = scratch0();
1823
int true_block = chunk_->LookupDestination(instr->true_block_id());
1824
int false_block = chunk_->LookupDestination(instr->false_block_id());
1827
FieldMemOperand(input, String::kHashFieldOffset));
1828
__ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
1829
EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
1833
// Branches to a label or falls through with this instance class-name adr
1834
// returned in temp reg, available for comparison by the caller. Trashes the
1835
// temp registers, but not the input. Only input and temp2 may alias.
1836
void LCodeGen::EmitClassOfTest(Label* is_true,
1838
Handle<String>class_name,
1842
ASSERT(!input.is(temp));
1843
ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
1844
__ JumpIfSmi(input, is_false);
1846
if (class_name->IsEqualTo(CStrVector("Function"))) {
1847
// Assuming the following assertions, we can use the same compares to test
1848
// for both being a function type and being in the object type range.
1849
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
1850
STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
1851
FIRST_SPEC_OBJECT_TYPE + 1);
1852
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
1853
LAST_SPEC_OBJECT_TYPE - 1);
1854
STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
1856
__ GetObjectType(input, temp, temp2);
1857
__ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
1858
__ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
1859
__ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
1861
// Faster code path to avoid two compares: subtract lower bound from the
1862
// actual type and do a signed compare with the width of the type range.
1863
__ GetObjectType(input, temp, temp2);
1864
__ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1865
__ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
1866
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1869
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
1870
// Check if the constructor in the map is a function.
1871
__ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset));
1873
// Objects with a non-function constructor have class 'Object'.
1874
__ GetObjectType(temp, temp2, temp2);
1875
if (class_name->IsEqualTo(CStrVector("Object"))) {
1876
__ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
1878
__ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
1881
// temp now contains the constructor function. Grab the
1882
// instance class name from there.
1883
__ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
1884
__ lw(temp, FieldMemOperand(temp,
1885
SharedFunctionInfo::kInstanceClassNameOffset));
1886
// The class name we are testing against is a symbol because it's a literal.
1887
// The name in the constructor is a symbol because of the way the context is
1888
// booted. This routine isn't expected to work for random API-created
1889
// classes and it doesn't have to because you can't access it with natives
1890
// syntax. Since both sides are symbols it is sufficient to use an identity
1893
// End with the address of this class_name instance in temp register.
1894
// On MIPS, the caller must do the comparison with Handle<String>class_name.
1898
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
1899
Register input = ToRegister(instr->InputAt(0));
1900
Register temp = scratch0();
1901
Register temp2 = ToRegister(instr->TempAt(0));
1902
Handle<String> class_name = instr->hydrogen()->class_name();
1904
int true_block = chunk_->LookupDestination(instr->true_block_id());
1905
int false_block = chunk_->LookupDestination(instr->false_block_id());
1907
Label* true_label = chunk_->GetAssemblyLabel(true_block);
1908
Label* false_label = chunk_->GetAssemblyLabel(false_block);
1910
EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
1912
EmitBranch(true_block, false_block, eq, temp, Operand(class_name));
1916
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
1917
Register reg = ToRegister(instr->InputAt(0));
1918
Register temp = ToRegister(instr->TempAt(0));
1919
int true_block = instr->true_block_id();
1920
int false_block = instr->false_block_id();
1922
__ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
1923
EmitBranch(true_block, false_block, eq, temp, Operand(instr->map()));
1927
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
1928
Label true_label, done;
1929
ASSERT(ToRegister(instr->InputAt(0)).is(a0)); // Object is in a0.
1930
ASSERT(ToRegister(instr->InputAt(1)).is(a1)); // Function is in a1.
1931
Register result = ToRegister(instr->result());
1932
ASSERT(result.is(v0));
1934
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
1935
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1937
__ Branch(&true_label, eq, result, Operand(zero_reg));
1938
__ li(result, Operand(factory()->false_value()));
1940
__ bind(&true_label);
1941
__ li(result, Operand(factory()->true_value()));
1946
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
1947
class DeferredInstanceOfKnownGlobal: public LDeferredCode {
1949
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
1950
LInstanceOfKnownGlobal* instr)
1951
: LDeferredCode(codegen), instr_(instr) { }
1952
virtual void Generate() {
1953
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
1955
virtual LInstruction* instr() { return instr_; }
1956
Label* map_check() { return &map_check_; }
1959
LInstanceOfKnownGlobal* instr_;
1963
DeferredInstanceOfKnownGlobal* deferred;
1964
deferred = new DeferredInstanceOfKnownGlobal(this, instr);
1966
Label done, false_result;
1967
Register object = ToRegister(instr->InputAt(0));
1968
Register temp = ToRegister(instr->TempAt(0));
1969
Register result = ToRegister(instr->result());
1971
ASSERT(object.is(a0));
1972
ASSERT(result.is(v0));
1974
// A Smi is not instance of anything.
1975
__ JumpIfSmi(object, &false_result);
1977
// This is the inlined call site instanceof cache. The two occurences of the
1978
// hole value will be patched to the last map/result pair generated by the
1981
Register map = temp;
1982
__ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
1984
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
1985
__ bind(deferred->map_check()); // Label for calculating code patching.
1986
// We use Factory::the_hole_value() on purpose instead of loading from the
1987
// root array to force relocation to be able to later patch with
1989
Handle<JSGlobalPropertyCell> cell =
1990
factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
1991
__ li(at, Operand(Handle<Object>(cell)));
1992
__ lw(at, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
1993
__ Branch(&cache_miss, ne, map, Operand(at));
1994
// We use Factory::the_hole_value() on purpose instead of loading from the
1995
// root array to force relocation to be able to later patch
1996
// with true or false.
1997
__ li(result, Operand(factory()->the_hole_value()), true);
2000
// The inlined call site cache did not match. Check null and string before
2001
// calling the deferred code.
2002
__ bind(&cache_miss);
2003
// Null is not instance of anything.
2004
__ LoadRoot(temp, Heap::kNullValueRootIndex);
2005
__ Branch(&false_result, eq, object, Operand(temp));
2007
// String values is not instance of anything.
2008
Condition cc = __ IsObjectStringType(object, temp, temp);
2009
__ Branch(&false_result, cc, temp, Operand(zero_reg));
2011
// Go to the deferred code.
2012
__ Branch(deferred->entry());
2014
__ bind(&false_result);
2015
__ LoadRoot(result, Heap::kFalseValueRootIndex);
2017
// Here result has either true or false. Deferred code also produces true or
2019
__ bind(deferred->exit());
2024
void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2026
Register result = ToRegister(instr->result());
2027
ASSERT(result.is(v0));
2029
InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2030
flags = static_cast<InstanceofStub::Flags>(
2031
flags | InstanceofStub::kArgsInRegisters);
2032
flags = static_cast<InstanceofStub::Flags>(
2033
flags | InstanceofStub::kCallSiteInlineCheck);
2034
flags = static_cast<InstanceofStub::Flags>(
2035
flags | InstanceofStub::kReturnTrueFalseObject);
2036
InstanceofStub stub(flags);
2038
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2040
// Get the temp register reserved by the instruction. This needs to be t0 as
2041
// its slot of the pushing of safepoint registers is used to communicate the
2042
// offset to the location of the map check.
2043
Register temp = ToRegister(instr->TempAt(0));
2044
ASSERT(temp.is(t0));
2045
__ li(InstanceofStub::right(), Operand(instr->function()));
2046
static const int kAdditionalDelta = 7;
2047
int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2048
Label before_push_delta;
2049
__ bind(&before_push_delta);
2051
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2052
__ li(temp, Operand(delta * kPointerSize), true);
2053
__ StoreToSafepointRegisterSlot(temp, temp);
2055
CallCodeGeneric(stub.GetCode(),
2056
RelocInfo::CODE_TARGET,
2058
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2059
ASSERT(instr->HasDeoptimizationEnvironment());
2060
LEnvironment* env = instr->deoptimization_environment();
2061
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2062
// Put the result value into the result register slot and
2063
// restore all registers.
2064
__ StoreToSafepointRegisterSlot(result, result);
2068
void LCodeGen::DoCmpT(LCmpT* instr) {
2069
Token::Value op = instr->op();
2071
Handle<Code> ic = CompareIC::GetUninitialized(op);
2072
CallCode(ic, RelocInfo::CODE_TARGET, instr);
2073
// On MIPS there is no need for a "no inlined smi code" marker (nop).
2075
Condition condition = ComputeCompareCondition(op);
2076
// A minor optimization that relies on LoadRoot always emitting one
2078
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
2080
__ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
2081
__ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2082
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2083
ASSERT_EQ(3, masm()->InstructionsGeneratedSince(&done));
2088
void LCodeGen::DoReturn(LReturn* instr) {
2090
// Push the return value on the stack as the parameter.
2091
// Runtime::TraceExit returns its parameter in v0.
2093
__ CallRuntime(Runtime::kTraceExit, 1);
2095
int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
2098
__ Addu(sp, sp, Operand(sp_delta));
2103
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2104
Register result = ToRegister(instr->result());
2105
__ li(at, Operand(Handle<Object>(instr->hydrogen()->cell())));
2106
__ lw(result, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
2107
if (instr->hydrogen()->RequiresHoleCheck()) {
2108
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2109
DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2114
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2115
ASSERT(ToRegister(instr->global_object()).is(a0));
2116
ASSERT(ToRegister(instr->result()).is(v0));
2118
__ li(a2, Operand(instr->name()));
2119
RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
2120
: RelocInfo::CODE_TARGET_CONTEXT;
2121
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2122
CallCode(ic, mode, instr);
2126
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2127
Register value = ToRegister(instr->InputAt(0));
2128
Register scratch = scratch0();
2129
Register scratch2 = ToRegister(instr->TempAt(0));
2132
__ li(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
2134
// If the cell we are storing to contains the hole it could have
2135
// been deleted from the property dictionary. In that case, we need
2136
// to update the property details in the property dictionary to mark
2137
// it as no longer deleted.
2138
if (instr->hydrogen()->RequiresHoleCheck()) {
2140
FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
2141
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2142
DeoptimizeIf(eq, instr->environment(), scratch2, Operand(at));
2146
__ sw(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
2147
// Cells are always rescanned, so no write barrier here.
2151
void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2152
ASSERT(ToRegister(instr->global_object()).is(a1));
2153
ASSERT(ToRegister(instr->value()).is(a0));
2155
__ li(a2, Operand(instr->name()));
2156
Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
2157
? isolate()->builtins()->StoreIC_Initialize_Strict()
2158
: isolate()->builtins()->StoreIC_Initialize();
2159
CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2163
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2164
Register context = ToRegister(instr->context());
2165
Register result = ToRegister(instr->result());
2166
__ lw(result, ContextOperand(context, instr->slot_index()));
2170
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2171
Register context = ToRegister(instr->context());
2172
Register value = ToRegister(instr->value());
2173
MemOperand target = ContextOperand(context, instr->slot_index());
2174
__ sw(value, target);
2175
if (instr->hydrogen()->NeedsWriteBarrier()) {
2176
HType type = instr->hydrogen()->value()->type();
2177
SmiCheck check_needed =
2178
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2179
__ RecordWriteContextSlot(context,
2185
EMIT_REMEMBERED_SET,
2191
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2192
Register object = ToRegister(instr->InputAt(0));
2193
Register result = ToRegister(instr->result());
2194
if (instr->hydrogen()->is_in_object()) {
2195
__ lw(result, FieldMemOperand(object, instr->hydrogen()->offset()));
2197
__ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2198
__ lw(result, FieldMemOperand(result, instr->hydrogen()->offset()));
2203
void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2206
Handle<String> name) {
2207
LookupResult lookup(isolate());
2208
type->LookupInDescriptors(NULL, *name, &lookup);
2209
ASSERT(lookup.IsProperty() &&
2210
(lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
2211
if (lookup.type() == FIELD) {
2212
int index = lookup.GetLocalFieldIndexFromMap(*type);
2213
int offset = index * kPointerSize;
2215
// Negative property indices are in-object properties, indexed
2216
// from the end of the fixed part of the object.
2217
__ lw(result, FieldMemOperand(object, offset + type->instance_size()));
2219
// Non-negative property indices are in the properties array.
2220
__ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2221
__ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
2224
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
2225
LoadHeapObject(result, Handle<HeapObject>::cast(function));
2230
void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2231
Register object = ToRegister(instr->object());
2232
Register result = ToRegister(instr->result());
2233
Register scratch = scratch0();
2234
int map_count = instr->hydrogen()->types()->length();
2235
Handle<String> name = instr->hydrogen()->name();
2236
if (map_count == 0) {
2237
ASSERT(instr->hydrogen()->need_generic());
2238
__ li(a2, Operand(name));
2239
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2240
CallCode(ic, RelocInfo::CODE_TARGET, instr);
2243
__ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2244
for (int i = 0; i < map_count - 1; ++i) {
2245
Handle<Map> map = instr->hydrogen()->types()->at(i);
2247
__ Branch(&next, ne, scratch, Operand(map));
2248
EmitLoadFieldOrConstantFunction(result, object, map, name);
2252
Handle<Map> map = instr->hydrogen()->types()->last();
2253
if (instr->hydrogen()->need_generic()) {
2255
__ Branch(&generic, ne, scratch, Operand(map));
2256
EmitLoadFieldOrConstantFunction(result, object, map, name);
2259
__ li(a2, Operand(name));
2260
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2261
CallCode(ic, RelocInfo::CODE_TARGET, instr);
2263
DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
2264
EmitLoadFieldOrConstantFunction(result, object, map, name);
2271
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2272
ASSERT(ToRegister(instr->object()).is(a0));
2273
ASSERT(ToRegister(instr->result()).is(v0));
2275
// Name is always in a2.
2276
__ li(a2, Operand(instr->name()));
2277
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2278
CallCode(ic, RelocInfo::CODE_TARGET, instr);
2282
void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2283
Register scratch = scratch0();
2284
Register function = ToRegister(instr->function());
2285
Register result = ToRegister(instr->result());
2287
// Check that the function really is a function. Load map into the
2289
__ GetObjectType(function, result, scratch);
2290
DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE));
2292
// Make sure that the function has an instance prototype.
2294
__ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2295
__ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
2296
__ Branch(&non_instance, ne, scratch, Operand(zero_reg));
2298
// Get the prototype or initial map from the function.
2300
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2302
// Check that the function has a prototype or an initial map.
2303
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2304
DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2306
// If the function does not have an initial map, we're done.
2308
__ GetObjectType(result, scratch, scratch);
2309
__ Branch(&done, ne, scratch, Operand(MAP_TYPE));
2311
// Get the prototype from the initial map.
2312
__ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
2315
// Non-instance prototype: Fetch prototype from constructor field
2317
__ bind(&non_instance);
2318
__ lw(result, FieldMemOperand(result, Map::kConstructorOffset));
2325
void LCodeGen::DoLoadElements(LLoadElements* instr) {
2326
Register result = ToRegister(instr->result());
2327
Register input = ToRegister(instr->InputAt(0));
2328
Register scratch = scratch0();
2330
__ lw(result, FieldMemOperand(input, JSObject::kElementsOffset));
2331
if (FLAG_debug_code) {
2333
__ lw(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
2334
__ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
2335
__ Branch(USE_DELAY_SLOT, &done, eq, scratch, Operand(at));
2336
__ LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex); // In the delay slot.
2337
__ Branch(&done, eq, scratch, Operand(at));
2338
// |scratch| still contains |input|'s map.
2339
__ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
2340
__ Ext(scratch, scratch, Map::kElementsKindShift,
2341
Map::kElementsKindBitCount);
2342
__ Branch(&done, eq, scratch,
2343
Operand(FAST_ELEMENTS));
2344
__ Branch(&fail, lt, scratch,
2345
Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2346
__ Branch(&done, le, scratch,
2347
Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2349
__ Abort("Check for fast or external elements failed.");
2355
void LCodeGen::DoLoadExternalArrayPointer(
2356
LLoadExternalArrayPointer* instr) {
2357
Register to_reg = ToRegister(instr->result());
2358
Register from_reg = ToRegister(instr->InputAt(0));
2359
__ lw(to_reg, FieldMemOperand(from_reg,
2360
ExternalArray::kExternalPointerOffset));
2364
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2365
Register arguments = ToRegister(instr->arguments());
2366
Register length = ToRegister(instr->length());
2367
Register index = ToRegister(instr->index());
2368
Register result = ToRegister(instr->result());
2370
// Bailout index is not a valid argument index. Use unsigned check to get
2371
// negative check for free.
2373
// TODO(plind): Shoud be optimized to do the sub before the DeoptimizeIf(),
2374
// as they do in Arm. It will save us an instruction.
2375
DeoptimizeIf(ls, instr->environment(), length, Operand(index));
2377
// There are two words between the frame pointer and the last argument.
2378
// Subtracting from length accounts for one of them, add one more.
2379
__ subu(length, length, index);
2380
__ Addu(length, length, Operand(1));
2381
__ sll(length, length, kPointerSizeLog2);
2382
__ Addu(at, arguments, Operand(length));
2383
__ lw(result, MemOperand(at, 0));
2387
void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2388
Register elements = ToRegister(instr->elements());
2389
Register key = EmitLoadRegister(instr->key(), scratch0());
2390
Register result = ToRegister(instr->result());
2391
Register scratch = scratch0();
2394
__ sll(scratch, key, kPointerSizeLog2); // Key indexes words.
2395
__ addu(scratch, elements, scratch);
2396
__ lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
2398
// Check for the hole value.
2399
if (instr->hydrogen()->RequiresHoleCheck()) {
2400
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2401
DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
2406
void LCodeGen::DoLoadKeyedFastDoubleElement(
2407
LLoadKeyedFastDoubleElement* instr) {
2408
Register elements = ToRegister(instr->elements());
2409
bool key_is_constant = instr->key()->IsConstantOperand();
2410
Register key = no_reg;
2411
DoubleRegister result = ToDoubleRegister(instr->result());
2412
Register scratch = scratch0();
2415
ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
2416
int constant_key = 0;
2417
if (key_is_constant) {
2418
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2419
if (constant_key & 0xF0000000) {
2420
Abort("array index constant value too big.");
2423
key = ToRegister(instr->key());
2426
if (key_is_constant) {
2427
__ Addu(elements, elements, Operand(constant_key * (1 << shift_size) +
2428
FixedDoubleArray::kHeaderSize - kHeapObjectTag));
2430
__ sll(scratch, key, shift_size);
2431
__ Addu(elements, elements, Operand(scratch));
2432
__ Addu(elements, elements,
2433
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
2436
__ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
2437
DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
2439
__ ldc1(result, MemOperand(elements));
2443
void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2444
LLoadKeyedSpecializedArrayElement* instr) {
2445
Register external_pointer = ToRegister(instr->external_pointer());
2446
Register key = no_reg;
2447
ElementsKind elements_kind = instr->elements_kind();
2448
bool key_is_constant = instr->key()->IsConstantOperand();
2449
int constant_key = 0;
2450
if (key_is_constant) {
2451
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2452
if (constant_key & 0xF0000000) {
2453
Abort("array index constant value too big.");
2456
key = ToRegister(instr->key());
2458
int shift_size = ElementsKindToShiftSize(elements_kind);
2460
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
2461
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
2462
FPURegister result = ToDoubleRegister(instr->result());
2463
if (key_is_constant) {
2464
__ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
2466
__ sll(scratch0(), key, shift_size);
2467
__ Addu(scratch0(), scratch0(), external_pointer);
2470
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
2471
__ lwc1(result, MemOperand(scratch0()));
2472
__ cvt_d_s(result, result);
2473
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2474
__ ldc1(result, MemOperand(scratch0()));
2477
Register result = ToRegister(instr->result());
2478
Register scratch = scratch0();
2479
MemOperand mem_operand(zero_reg);
2480
if (key_is_constant) {
2481
mem_operand = MemOperand(external_pointer,
2482
constant_key * (1 << shift_size));
2484
__ sll(scratch, key, shift_size);
2485
__ Addu(scratch, scratch, external_pointer);
2486
mem_operand = MemOperand(scratch);
2488
switch (elements_kind) {
2489
case EXTERNAL_BYTE_ELEMENTS:
2490
__ lb(result, mem_operand);
2492
case EXTERNAL_PIXEL_ELEMENTS:
2493
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
2494
__ lbu(result, mem_operand);
2496
case EXTERNAL_SHORT_ELEMENTS:
2497
__ lh(result, mem_operand);
2499
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
2500
__ lhu(result, mem_operand);
2502
case EXTERNAL_INT_ELEMENTS:
2503
__ lw(result, mem_operand);
2505
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
2506
__ lw(result, mem_operand);
2507
// TODO(danno): we could be more clever here, perhaps having a special
2508
// version of the stub that detects if the overflow case actually
2509
// happens, and generate code that returns a double rather than int.
2510
DeoptimizeIf(Ugreater_equal, instr->environment(),
2511
result, Operand(0x80000000));
2513
case EXTERNAL_FLOAT_ELEMENTS:
2514
case EXTERNAL_DOUBLE_ELEMENTS:
2515
case FAST_DOUBLE_ELEMENTS:
2517
case FAST_SMI_ONLY_ELEMENTS:
2518
case DICTIONARY_ELEMENTS:
2519
case NON_STRICT_ARGUMENTS_ELEMENTS:
2527
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2528
ASSERT(ToRegister(instr->object()).is(a1));
2529
ASSERT(ToRegister(instr->key()).is(a0));
2531
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
2532
CallCode(ic, RelocInfo::CODE_TARGET, instr);
2536
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2537
Register scratch = scratch0();
2538
Register temp = scratch1();
2539
Register result = ToRegister(instr->result());
2541
// Check if the calling frame is an arguments adaptor frame.
2542
Label done, adapted;
2543
__ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2544
__ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
2545
__ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2547
// Result is the frame pointer for the frame if not adapted and for the real
2548
// frame below the adaptor frame if adapted.
2549
__ movn(result, fp, temp); // move only if temp is not equal to zero (ne)
2550
__ movz(result, scratch, temp); // move only if temp is equal to zero (eq)
2554
void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2555
Register elem = ToRegister(instr->InputAt(0));
2556
Register result = ToRegister(instr->result());
2560
// If no arguments adaptor frame the number of arguments is fixed.
2561
__ Addu(result, zero_reg, Operand(scope()->num_parameters()));
2562
__ Branch(&done, eq, fp, Operand(elem));
2564
// Arguments adaptor frame present. Get argument length from there.
2565
__ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2567
MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
2568
__ SmiUntag(result);
2570
// Argument length is in result register.
2575
void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2576
Register receiver = ToRegister(instr->receiver());
2577
Register function = ToRegister(instr->function());
2578
Register length = ToRegister(instr->length());
2579
Register elements = ToRegister(instr->elements());
2580
Register scratch = scratch0();
2581
ASSERT(receiver.is(a0)); // Used for parameter count.
2582
ASSERT(function.is(a1)); // Required by InvokeFunction.
2583
ASSERT(ToRegister(instr->result()).is(v0));
2585
// If the receiver is null or undefined, we have to pass the global
2586
// object as a receiver to normal functions. Values have to be
2587
// passed unchanged to builtins and strict-mode functions.
2588
Label global_object, receiver_ok;
2590
// Do not transform the receiver to object for strict mode
2593
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
2595
FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
2597
// Do not transform the receiver to object for builtins.
2598
int32_t strict_mode_function_mask =
2599
1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
2600
int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
2601
__ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
2602
__ Branch(&receiver_ok, ne, scratch, Operand(zero_reg));
2604
// Normal function. Replace undefined or null with global receiver.
2605
__ LoadRoot(scratch, Heap::kNullValueRootIndex);
2606
__ Branch(&global_object, eq, receiver, Operand(scratch));
2607
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
2608
__ Branch(&global_object, eq, receiver, Operand(scratch));
2610
// Deoptimize if the receiver is not a JS object.
2611
__ And(scratch, receiver, Operand(kSmiTagMask));
2612
DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
2614
__ GetObjectType(receiver, scratch, scratch);
2615
DeoptimizeIf(lt, instr->environment(),
2616
scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
2617
__ Branch(&receiver_ok);
2619
__ bind(&global_object);
2620
__ lw(receiver, GlobalObjectOperand());
2622
FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
2623
__ bind(&receiver_ok);
2625
// Copy the arguments to this function possibly from the
2626
// adaptor frame below it.
2627
const uint32_t kArgumentsLimit = 1 * KB;
2628
DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
2630
// Push the receiver and use the register to keep the original
2631
// number of arguments.
2633
__ Move(receiver, length);
2634
// The arguments are at a one pointer size offset from elements.
2635
__ Addu(elements, elements, Operand(1 * kPointerSize));
2637
// Loop through the arguments pushing them onto the execution
2640
// length is a small non-negative integer, due to the test above.
2641
__ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
2642
__ sll(scratch, length, 2);
2644
__ Addu(scratch, elements, scratch);
2645
__ lw(scratch, MemOperand(scratch));
2647
__ Subu(length, length, Operand(1));
2648
__ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
2649
__ sll(scratch, length, 2);
2652
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
2653
LPointerMap* pointers = instr->pointer_map();
2654
RecordPosition(pointers->position());
2655
SafepointGenerator safepoint_generator(
2656
this, pointers, Safepoint::kLazyDeopt);
2657
// The number of arguments is stored in receiver which is a0, as expected
2658
// by InvokeFunction.
2659
v8::internal::ParameterCount actual(receiver);
2660
__ InvokeFunction(function, actual, CALL_FUNCTION,
2661
safepoint_generator, CALL_AS_METHOD);
2662
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2666
void LCodeGen::DoPushArgument(LPushArgument* instr) {
2667
LOperand* argument = instr->InputAt(0);
2668
if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
2669
Abort("DoPushArgument not implemented for double type.");
2671
Register argument_reg = EmitLoadRegister(argument, at);
2672
__ push(argument_reg);
2677
void LCodeGen::DoThisFunction(LThisFunction* instr) {
2678
Register result = ToRegister(instr->result());
2679
LoadHeapObject(result, instr->hydrogen()->closure());
2683
void LCodeGen::DoContext(LContext* instr) {
2684
Register result = ToRegister(instr->result());
2689
void LCodeGen::DoOuterContext(LOuterContext* instr) {
2690
Register context = ToRegister(instr->context());
2691
Register result = ToRegister(instr->result());
2693
MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2697
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
2698
Register context = ToRegister(instr->context());
2699
Register result = ToRegister(instr->result());
2700
__ lw(result, ContextOperand(cp, Context::GLOBAL_INDEX));
2704
void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
2705
Register global = ToRegister(instr->global());
2706
Register result = ToRegister(instr->result());
2707
__ lw(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
2711
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
2713
LInstruction* instr,
2714
CallKind call_kind) {
2715
// Change context if needed.
2716
bool change_context =
2717
(info()->closure()->context() != function->context()) ||
2718
scope()->contains_with() ||
2719
(scope()->num_heap_slots() > 0);
2720
if (change_context) {
2721
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
2724
// Set a0 to arguments count if adaption is not needed. Assumes that a0
2725
// is available to write to at this point.
2726
if (!function->NeedsArgumentsAdaption()) {
2727
__ li(a0, Operand(arity));
2730
LPointerMap* pointers = instr->pointer_map();
2731
RecordPosition(pointers->position());
2734
__ SetCallKind(t1, call_kind);
2735
__ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
2738
// Setup deoptimization.
2739
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2742
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2746
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
2747
ASSERT(ToRegister(instr->result()).is(v0));
2749
__ li(a1, Operand(instr->function()));
2750
CallKnownFunction(instr->function(), instr->arity(), instr, CALL_AS_METHOD);
2754
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
2755
Register input = ToRegister(instr->InputAt(0));
2756
Register result = ToRegister(instr->result());
2757
Register scratch = scratch0();
2759
// Deoptimize if not a heap number.
2760
__ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2761
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2762
DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
2765
Register exponent = scratch0();
2767
__ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
2768
// Check the sign of the argument. If the argument is positive, just
2770
__ Move(result, input);
2771
__ And(at, exponent, Operand(HeapNumber::kSignMask));
2772
__ Branch(&done, eq, at, Operand(zero_reg));
2774
// Input is negative. Reverse its sign.
2775
// Preserve the value of all registers.
2777
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2779
// Registers were saved at the safepoint, so we can use
2780
// many scratch registers.
2781
Register tmp1 = input.is(a1) ? a0 : a1;
2782
Register tmp2 = input.is(a2) ? a0 : a2;
2783
Register tmp3 = input.is(a3) ? a0 : a3;
2784
Register tmp4 = input.is(t0) ? a0 : t0;
2786
// exponent: floating point exponent value.
2788
Label allocated, slow;
2789
__ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
2790
__ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
2791
__ Branch(&allocated);
2793
// Slow case: Call the runtime system to do the number allocation.
2796
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
2797
// Set the pointer to the new heap number in tmp.
2800
// Restore input_reg after call to runtime.
2801
__ LoadFromSafepointRegisterSlot(input, input);
2802
__ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
2804
__ bind(&allocated);
2805
// exponent: floating point exponent value.
2806
// tmp1: allocated heap number.
2807
__ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
2808
__ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
2809
__ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
2810
__ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
2812
__ StoreToSafepointRegisterSlot(tmp1, result);
2819
void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
2820
Register input = ToRegister(instr->InputAt(0));
2821
Register result = ToRegister(instr->result());
2822
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2824
__ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
2825
__ mov(result, input);
2826
ASSERT_EQ(2, masm()->InstructionsGeneratedSince(&done));
2827
__ subu(result, zero_reg, input);
2828
// Overflow if result is still negative, ie 0x80000000.
2829
DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
2834
void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
2835
// Class for deferred case.
2836
class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
2838
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
2839
LUnaryMathOperation* instr)
2840
: LDeferredCode(codegen), instr_(instr) { }
2841
virtual void Generate() {
2842
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
2844
virtual LInstruction* instr() { return instr_; }
2846
LUnaryMathOperation* instr_;
2849
Representation r = instr->hydrogen()->value()->representation();
2851
FPURegister input = ToDoubleRegister(instr->InputAt(0));
2852
FPURegister result = ToDoubleRegister(instr->result());
2853
__ abs_d(result, input);
2854
} else if (r.IsInteger32()) {
2855
EmitIntegerMathAbs(instr);
2857
// Representation is tagged.
2858
DeferredMathAbsTaggedHeapNumber* deferred =
2859
new DeferredMathAbsTaggedHeapNumber(this, instr);
2860
Register input = ToRegister(instr->InputAt(0));
2862
__ JumpIfNotSmi(input, deferred->entry());
2863
// If smi, handle it directly.
2864
EmitIntegerMathAbs(instr);
2865
__ bind(deferred->exit());
2870
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
2871
DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
2872
Register result = ToRegister(instr->result());
2873
FPURegister single_scratch = double_scratch0().low();
2874
Register scratch1 = scratch0();
2875
Register except_flag = ToRegister(instr->TempAt(0));
2877
__ EmitFPUTruncate(kRoundToMinusInf,
2883
// Deopt if the operation did not succeed.
2884
DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
2887
__ mfc1(result, single_scratch);
2889
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2892
__ Branch(&done, ne, result, Operand(zero_reg));
2893
__ mfc1(scratch1, input.high());
2894
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
2895
DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
2901
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
2902
DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
2903
Register result = ToRegister(instr->result());
2904
Register scratch = scratch0();
2905
Label done, check_sign_on_zero;
2907
// Extract exponent bits.
2908
__ mfc1(result, input.high());
2911
HeapNumber::kExponentShift,
2912
HeapNumber::kExponentBits);
2914
// If the number is in ]-0.5, +0.5[, the result is +/- 0.
2916
__ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
2917
__ mov(result, zero_reg);
2918
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2919
__ Branch(&check_sign_on_zero);
2925
// The following conversion will not work with numbers
2926
// outside of ]-2^32, 2^32[.
2927
DeoptimizeIf(ge, instr->environment(), scratch,
2928
Operand(HeapNumber::kExponentBias + 32));
2930
// Save the original sign for later comparison.
2931
__ And(scratch, result, Operand(HeapNumber::kSignMask));
2933
__ Move(double_scratch0(), 0.5);
2934
__ add_d(double_scratch0(), input, double_scratch0());
2936
// Check sign of the result: if the sign changed, the input
2937
// value was in ]0.5, 0[ and the result should be -0.
2938
__ mfc1(result, double_scratch0().high());
2939
__ Xor(result, result, Operand(scratch));
2940
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2941
// ARM uses 'mi' here, which is 'lt'
2942
DeoptimizeIf(lt, instr->environment(), result,
2946
// ARM uses 'mi' here, which is 'lt'
2947
// Negating it results in 'ge'
2948
__ Branch(&skip2, ge, result, Operand(zero_reg));
2949
__ mov(result, zero_reg);
2954
Register except_flag = scratch;
2956
__ EmitFPUTruncate(kRoundToMinusInf,
2957
double_scratch0().low(),
2962
DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
2964
__ mfc1(result, double_scratch0().low());
2966
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2968
__ Branch(&done, ne, result, Operand(zero_reg));
2969
__ bind(&check_sign_on_zero);
2970
__ mfc1(scratch, input.high());
2971
__ And(scratch, scratch, Operand(HeapNumber::kSignMask));
2972
DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
2978
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
2979
DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
2980
DoubleRegister result = ToDoubleRegister(instr->result());
2981
__ sqrt_d(result, input);
2985
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
2986
DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
2987
DoubleRegister result = ToDoubleRegister(instr->result());
2988
DoubleRegister double_scratch = double_scratch0();
2990
// Add +0 to convert -0 to +0.
2991
__ mtc1(zero_reg, double_scratch.low());
2992
__ mtc1(zero_reg, double_scratch.high());
2993
__ add_d(result, input, double_scratch);
2994
__ sqrt_d(result, result);
2998
void LCodeGen::DoPower(LPower* instr) {
2999
LOperand* left = instr->InputAt(0);
3000
LOperand* right = instr->InputAt(1);
3001
Register scratch = scratch0();
3002
DoubleRegister result_reg = ToDoubleRegister(instr->result());
3003
Representation exponent_type = instr->hydrogen()->right()->representation();
3004
if (exponent_type.IsDouble()) {
3005
// Prepare arguments and call C function.
3006
__ PrepareCallCFunction(0, 2, scratch);
3007
__ SetCallCDoubleArguments(ToDoubleRegister(left),
3008
ToDoubleRegister(right));
3010
ExternalReference::power_double_double_function(isolate()), 0, 2);
3011
} else if (exponent_type.IsInteger32()) {
3012
ASSERT(ToRegister(right).is(a0));
3013
// Prepare arguments and call C function.
3014
__ PrepareCallCFunction(1, 1, scratch);
3015
__ SetCallCDoubleArguments(ToDoubleRegister(left), ToRegister(right));
3017
ExternalReference::power_double_int_function(isolate()), 1, 1);
3019
ASSERT(exponent_type.IsTagged());
3020
ASSERT(instr->hydrogen()->left()->representation().IsDouble());
3022
Register right_reg = ToRegister(right);
3024
// Check for smi on the right hand side.
3025
Label non_smi, call;
3026
__ JumpIfNotSmi(right_reg, &non_smi);
3028
// Untag smi and convert it to a double.
3029
__ SmiUntag(right_reg);
3030
FPURegister single_scratch = double_scratch0();
3031
__ mtc1(right_reg, single_scratch);
3032
__ cvt_d_w(result_reg, single_scratch);
3035
// Heap number map check.
3037
__ lw(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset));
3038
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3039
DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
3040
__ ldc1(result_reg, FieldMemOperand(right_reg, HeapNumber::kValueOffset));
3042
// Prepare arguments and call C function.
3044
__ PrepareCallCFunction(0, 2, scratch);
3045
__ SetCallCDoubleArguments(ToDoubleRegister(left), result_reg);
3047
ExternalReference::power_double_double_function(isolate()), 0, 2);
3049
// Store the result in the result register.
3050
__ GetCFunctionDoubleResult(result_reg);
3054
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3055
ASSERT(ToDoubleRegister(instr->result()).is(f4));
3056
TranscendentalCacheStub stub(TranscendentalCache::LOG,
3057
TranscendentalCacheStub::UNTAGGED);
3058
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3062
void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
3063
ASSERT(ToDoubleRegister(instr->result()).is(f4));
3064
TranscendentalCacheStub stub(TranscendentalCache::TAN,
3065
TranscendentalCacheStub::UNTAGGED);
3066
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3070
void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
3071
ASSERT(ToDoubleRegister(instr->result()).is(f4));
3072
TranscendentalCacheStub stub(TranscendentalCache::COS,
3073
TranscendentalCacheStub::UNTAGGED);
3074
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3078
void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
3079
ASSERT(ToDoubleRegister(instr->result()).is(f4));
3080
TranscendentalCacheStub stub(TranscendentalCache::SIN,
3081
TranscendentalCacheStub::UNTAGGED);
3082
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3086
void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
3087
switch (instr->op()) {
3101
DoMathPowHalf(instr);
3116
Abort("Unimplemented type of LUnaryMathOperation.");
3122
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3123
ASSERT(ToRegister(instr->function()).is(a1));
3124
ASSERT(instr->HasPointerMap());
3125
ASSERT(instr->HasDeoptimizationEnvironment());
3126
LPointerMap* pointers = instr->pointer_map();
3127
RecordPosition(pointers->position());
3128
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3129
ParameterCount count(instr->arity());
3130
__ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
3131
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3135
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3136
ASSERT(ToRegister(instr->result()).is(v0));
3138
int arity = instr->arity();
3140
isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
3141
CallCode(ic, RelocInfo::CODE_TARGET, instr);
3142
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3146
void LCodeGen::DoCallNamed(LCallNamed* instr) {
3147
ASSERT(ToRegister(instr->result()).is(v0));
3149
int arity = instr->arity();
3150
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
3152
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3153
__ li(a2, Operand(instr->name()));
3154
CallCode(ic, mode, instr);
3155
// Restore context register.
3156
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3160
void LCodeGen::DoCallFunction(LCallFunction* instr) {
3161
ASSERT(ToRegister(instr->function()).is(a1));
3162
ASSERT(ToRegister(instr->result()).is(v0));
3164
int arity = instr->arity();
3165
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
3166
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3167
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3171
void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3172
ASSERT(ToRegister(instr->result()).is(v0));
3174
int arity = instr->arity();
3175
RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
3177
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3178
__ li(a2, Operand(instr->name()));
3179
CallCode(ic, mode, instr);
3180
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3184
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3185
ASSERT(ToRegister(instr->result()).is(v0));
3186
__ li(a1, Operand(instr->target()));
3187
CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
3191
void LCodeGen::DoCallNew(LCallNew* instr) {
3192
ASSERT(ToRegister(instr->InputAt(0)).is(a1));
3193
ASSERT(ToRegister(instr->result()).is(v0));
3195
Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
3196
__ li(a0, Operand(instr->arity()));
3197
CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
3201
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3202
CallRuntime(instr->function(), instr->arity(), instr);
3206
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3207
Register object = ToRegister(instr->object());
3208
Register value = ToRegister(instr->value());
3209
Register scratch = scratch0();
3210
int offset = instr->offset();
3212
ASSERT(!object.is(value));
3214
if (!instr->transition().is_null()) {
3215
__ li(scratch, Operand(instr->transition()));
3216
__ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3220
HType type = instr->hydrogen()->value()->type();
3221
SmiCheck check_needed =
3222
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3223
if (instr->is_in_object()) {
3224
__ sw(value, FieldMemOperand(object, offset));
3225
if (instr->hydrogen()->NeedsWriteBarrier()) {
3226
// Update the write barrier for the object for in-object properties.
3227
__ RecordWriteField(object,
3233
EMIT_REMEMBERED_SET,
3237
__ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
3238
__ sw(value, FieldMemOperand(scratch, offset));
3239
if (instr->hydrogen()->NeedsWriteBarrier()) {
3240
// Update the write barrier for the properties array.
3241
// object is used as a scratch register.
3242
__ RecordWriteField(scratch,
3248
EMIT_REMEMBERED_SET,
3255
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3256
ASSERT(ToRegister(instr->object()).is(a1));
3257
ASSERT(ToRegister(instr->value()).is(a0));
3259
// Name is always in a2.
3260
__ li(a2, Operand(instr->name()));
3261
Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3262
? isolate()->builtins()->StoreIC_Initialize_Strict()
3263
: isolate()->builtins()->StoreIC_Initialize();
3264
CallCode(ic, RelocInfo::CODE_TARGET, instr);
3268
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3270
instr->environment(),
3271
ToRegister(instr->index()),
3272
Operand(ToRegister(instr->length())));
3276
void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
3277
Register value = ToRegister(instr->value());
3278
Register elements = ToRegister(instr->object());
3279
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
3280
Register scratch = scratch0();
3282
// This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
3283
// conversion, so it deopts in that case.
3284
if (instr->hydrogen()->ValueNeedsSmiCheck()) {
3285
__ And(at, value, Operand(kSmiTagMask));
3286
DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
3290
if (instr->key()->IsConstantOperand()) {
3291
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
3292
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3294
ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
3295
__ sw(value, FieldMemOperand(elements, offset));
3297
__ sll(scratch, key, kPointerSizeLog2);
3298
__ addu(scratch, elements, scratch);
3299
__ sw(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
3302
if (instr->hydrogen()->NeedsWriteBarrier()) {
3303
HType type = instr->hydrogen()->value()->type();
3304
SmiCheck check_needed =
3305
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3306
// Compute address of modified element and store it into key register.
3307
__ Addu(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3308
__ RecordWrite(elements,
3313
EMIT_REMEMBERED_SET,
3319
void LCodeGen::DoStoreKeyedFastDoubleElement(
3320
LStoreKeyedFastDoubleElement* instr) {
3321
DoubleRegister value = ToDoubleRegister(instr->value());
3322
Register elements = ToRegister(instr->elements());
3323
Register key = no_reg;
3324
Register scratch = scratch0();
3325
bool key_is_constant = instr->key()->IsConstantOperand();
3326
int constant_key = 0;
3329
// Calculate the effective address of the slot in the array to store the
3331
if (key_is_constant) {
3332
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3333
if (constant_key & 0xF0000000) {
3334
Abort("array index constant value too big.");
3337
key = ToRegister(instr->key());
3339
int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3340
if (key_is_constant) {
3341
__ Addu(scratch, elements, Operand(constant_key * (1 << shift_size) +
3342
FixedDoubleArray::kHeaderSize - kHeapObjectTag));
3344
__ sll(scratch, key, shift_size);
3345
__ Addu(scratch, elements, Operand(scratch));
3346
__ Addu(scratch, scratch,
3347
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
3351
// Check for NaN. All NaNs must be canonicalized.
3352
__ BranchF(NULL, &is_nan, eq, value, value);
3353
__ Branch(¬_nan);
3355
// Only load canonical NaN if the comparison above set the overflow.
3357
__ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
3360
__ sdc1(value, MemOperand(scratch));
3364
void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3365
LStoreKeyedSpecializedArrayElement* instr) {
3367
Register external_pointer = ToRegister(instr->external_pointer());
3368
Register key = no_reg;
3369
ElementsKind elements_kind = instr->elements_kind();
3370
bool key_is_constant = instr->key()->IsConstantOperand();
3371
int constant_key = 0;
3372
if (key_is_constant) {
3373
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3374
if (constant_key & 0xF0000000) {
3375
Abort("array index constant value too big.");
3378
key = ToRegister(instr->key());
3380
int shift_size = ElementsKindToShiftSize(elements_kind);
3382
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
3383
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3384
FPURegister value(ToDoubleRegister(instr->value()));
3385
if (key_is_constant) {
3386
__ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
3388
__ sll(scratch0(), key, shift_size);
3389
__ Addu(scratch0(), scratch0(), external_pointer);
3392
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3393
__ cvt_s_d(double_scratch0(), value);
3394
__ swc1(double_scratch0(), MemOperand(scratch0()));
3395
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3396
__ sdc1(value, MemOperand(scratch0()));
3399
Register value(ToRegister(instr->value()));
3400
MemOperand mem_operand(zero_reg);
3401
Register scratch = scratch0();
3402
if (key_is_constant) {
3403
mem_operand = MemOperand(external_pointer,
3404
constant_key * (1 << shift_size));
3406
__ sll(scratch, key, shift_size);
3407
__ Addu(scratch, scratch, external_pointer);
3408
mem_operand = MemOperand(scratch);
3410
switch (elements_kind) {
3411
case EXTERNAL_PIXEL_ELEMENTS:
3412
case EXTERNAL_BYTE_ELEMENTS:
3413
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3414
__ sb(value, mem_operand);
3416
case EXTERNAL_SHORT_ELEMENTS:
3417
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3418
__ sh(value, mem_operand);
3420
case EXTERNAL_INT_ELEMENTS:
3421
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3422
__ sw(value, mem_operand);
3424
case EXTERNAL_FLOAT_ELEMENTS:
3425
case EXTERNAL_DOUBLE_ELEMENTS:
3426
case FAST_DOUBLE_ELEMENTS:
3428
case FAST_SMI_ONLY_ELEMENTS:
3429
case DICTIONARY_ELEMENTS:
3430
case NON_STRICT_ARGUMENTS_ELEMENTS:
3437
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
3438
ASSERT(ToRegister(instr->object()).is(a2));
3439
ASSERT(ToRegister(instr->key()).is(a1));
3440
ASSERT(ToRegister(instr->value()).is(a0));
3442
Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3443
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
3444
: isolate()->builtins()->KeyedStoreIC_Initialize();
3445
CallCode(ic, RelocInfo::CODE_TARGET, instr);
3449
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
3450
Register object_reg = ToRegister(instr->object());
3451
Register new_map_reg = ToRegister(instr->new_map_reg());
3452
Register scratch = scratch0();
3454
Handle<Map> from_map = instr->original_map();
3455
Handle<Map> to_map = instr->transitioned_map();
3456
ElementsKind from_kind = from_map->elements_kind();
3457
ElementsKind to_kind = to_map->elements_kind();
3459
__ mov(ToRegister(instr->result()), object_reg);
3461
Label not_applicable;
3462
__ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
3463
__ Branch(¬_applicable, ne, scratch, Operand(from_map));
3465
__ li(new_map_reg, Operand(to_map));
3466
if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
3467
__ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
3469
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
3470
scratch, kRAHasBeenSaved, kDontSaveFPRegs);
3471
} else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
3472
to_kind == FAST_DOUBLE_ELEMENTS) {
3473
Register fixed_object_reg = ToRegister(instr->temp_reg());
3474
ASSERT(fixed_object_reg.is(a2));
3475
ASSERT(new_map_reg.is(a3));
3476
__ mov(fixed_object_reg, object_reg);
3477
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
3478
RelocInfo::CODE_TARGET, instr);
3479
} else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
3480
Register fixed_object_reg = ToRegister(instr->temp_reg());
3481
ASSERT(fixed_object_reg.is(a2));
3482
ASSERT(new_map_reg.is(a3));
3483
__ mov(fixed_object_reg, object_reg);
3484
CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
3485
RelocInfo::CODE_TARGET, instr);
3489
__ bind(¬_applicable);
3493
void LCodeGen::DoStringAdd(LStringAdd* instr) {
3494
__ push(ToRegister(instr->left()));
3495
__ push(ToRegister(instr->right()));
3496
StringAddStub stub(NO_STRING_CHECK_IN_STUB);
3497
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3501
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
3502
class DeferredStringCharCodeAt: public LDeferredCode {
3504
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
3505
: LDeferredCode(codegen), instr_(instr) { }
3506
virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
3507
virtual LInstruction* instr() { return instr_; }
3509
LStringCharCodeAt* instr_;
3512
DeferredStringCharCodeAt* deferred =
3513
new DeferredStringCharCodeAt(this, instr);
3514
StringCharLoadGenerator::Generate(masm(),
3515
ToRegister(instr->string()),
3516
ToRegister(instr->index()),
3517
ToRegister(instr->result()),
3519
__ bind(deferred->exit());
3523
void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
3524
Register string = ToRegister(instr->string());
3525
Register result = ToRegister(instr->result());
3526
Register scratch = scratch0();
3528
// TODO(3095996): Get rid of this. For now, we need to make the
3529
// result register contain a valid pointer because it is already
3530
// contained in the register pointer map.
3531
__ mov(result, zero_reg);
3533
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3535
// Push the index as a smi. This is safe because of the checks in
3536
// DoStringCharCodeAt above.
3537
if (instr->index()->IsConstantOperand()) {
3538
int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3539
__ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
3542
Register index = ToRegister(instr->index());
3546
CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
3547
if (FLAG_debug_code) {
3548
__ AbortIfNotSmi(v0);
3551
__ StoreToSafepointRegisterSlot(v0, result);
3555
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
3556
class DeferredStringCharFromCode: public LDeferredCode {
3558
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
3559
: LDeferredCode(codegen), instr_(instr) { }
3560
virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
3561
virtual LInstruction* instr() { return instr_; }
3563
LStringCharFromCode* instr_;
3566
DeferredStringCharFromCode* deferred =
3567
new DeferredStringCharFromCode(this, instr);
3569
ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
3570
Register char_code = ToRegister(instr->char_code());
3571
Register result = ToRegister(instr->result());
3572
Register scratch = scratch0();
3573
ASSERT(!char_code.is(result));
3575
__ Branch(deferred->entry(), hi,
3576
char_code, Operand(String::kMaxAsciiCharCode));
3577
__ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
3578
__ sll(scratch, char_code, kPointerSizeLog2);
3579
__ Addu(result, result, scratch);
3580
__ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
3581
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3582
__ Branch(deferred->entry(), eq, result, Operand(scratch));
3583
__ bind(deferred->exit());
3587
void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
3588
Register char_code = ToRegister(instr->char_code());
3589
Register result = ToRegister(instr->result());
3591
// TODO(3095996): Get rid of this. For now, we need to make the
3592
// result register contain a valid pointer because it is already
3593
// contained in the register pointer map.
3594
__ mov(result, zero_reg);
3596
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3597
__ SmiTag(char_code);
3599
CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
3600
__ StoreToSafepointRegisterSlot(v0, result);
3604
void LCodeGen::DoStringLength(LStringLength* instr) {
3605
Register string = ToRegister(instr->InputAt(0));
3606
Register result = ToRegister(instr->result());
3607
__ lw(result, FieldMemOperand(string, String::kLengthOffset));
3611
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3612
LOperand* input = instr->InputAt(0);
3613
ASSERT(input->IsRegister() || input->IsStackSlot());
3614
LOperand* output = instr->result();
3615
ASSERT(output->IsDoubleRegister());
3616
FPURegister single_scratch = double_scratch0().low();
3617
if (input->IsStackSlot()) {
3618
Register scratch = scratch0();
3619
__ lw(scratch, ToMemOperand(input));
3620
__ mtc1(scratch, single_scratch);
3622
__ mtc1(ToRegister(input), single_scratch);
3624
__ cvt_d_w(ToDoubleRegister(output), single_scratch);
3628
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
3629
class DeferredNumberTagI: public LDeferredCode {
3631
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
3632
: LDeferredCode(codegen), instr_(instr) { }
3633
virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
3634
virtual LInstruction* instr() { return instr_; }
3636
LNumberTagI* instr_;
3639
LOperand* input = instr->InputAt(0);
3640
ASSERT(input->IsRegister() && input->Equals(instr->result()));
3641
Register reg = ToRegister(input);
3642
Register overflow = scratch0();
3644
DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
3645
__ SmiTagCheckOverflow(reg, overflow);
3646
__ BranchOnOverflow(deferred->entry(), overflow);
3647
__ bind(deferred->exit());
3651
void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
3653
Register reg = ToRegister(instr->InputAt(0));
3654
FPURegister dbl_scratch = double_scratch0();
3656
// Preserve the value of all registers.
3657
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3659
// There was overflow, so bits 30 and 31 of the original integer
3660
// disagree. Try to allocate a heap number in new space and store
3661
// the value in there. If that fails, call the runtime system.
3664
__ Xor(reg, reg, Operand(0x80000000));
3665
__ mtc1(reg, dbl_scratch);
3666
__ cvt_d_w(dbl_scratch, dbl_scratch);
3667
if (FLAG_inline_new) {
3668
__ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
3669
__ AllocateHeapNumber(t1, a3, t0, t2, &slow);
3670
if (!reg.is(t1)) __ mov(reg, t1);
3674
// Slow case: Call the runtime system to do the number allocation.
3677
// TODO(3095996): Put a valid pointer value in the stack slot where the result
3678
// register is stored, as this register is in the pointer map, but contains an
3680
__ StoreToSafepointRegisterSlot(zero_reg, reg);
3681
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3682
if (!reg.is(v0)) __ mov(reg, v0);
3684
// Done. Put the value in dbl_scratch into the value of the allocated heap
3687
__ sdc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
3688
__ StoreToSafepointRegisterSlot(reg, reg);
3692
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
3693
class DeferredNumberTagD: public LDeferredCode {
3695
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
3696
: LDeferredCode(codegen), instr_(instr) { }
3697
virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
3698
virtual LInstruction* instr() { return instr_; }
3700
LNumberTagD* instr_;
3703
DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
3704
Register scratch = scratch0();
3705
Register reg = ToRegister(instr->result());
3706
Register temp1 = ToRegister(instr->TempAt(0));
3707
Register temp2 = ToRegister(instr->TempAt(1));
3709
DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
3710
if (FLAG_inline_new) {
3711
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
3712
__ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
3714
__ Branch(deferred->entry());
3716
__ bind(deferred->exit());
3717
__ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
3721
void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
3722
// TODO(3095996): Get rid of this. For now, we need to make the
3723
// result register contain a valid pointer because it is already
3724
// contained in the register pointer map.
3725
Register reg = ToRegister(instr->result());
3726
__ mov(reg, zero_reg);
3728
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3729
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3730
__ StoreToSafepointRegisterSlot(v0, reg);
3734
void LCodeGen::DoSmiTag(LSmiTag* instr) {
3735
LOperand* input = instr->InputAt(0);
3736
ASSERT(input->IsRegister() && input->Equals(instr->result()));
3737
ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
3738
__ SmiTag(ToRegister(input));
3742
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
3743
Register scratch = scratch0();
3744
LOperand* input = instr->InputAt(0);
3745
ASSERT(input->IsRegister() && input->Equals(instr->result()));
3746
if (instr->needs_check()) {
3747
STATIC_ASSERT(kHeapObjectTag == 1);
3748
// If the input is a HeapObject, value of scratch won't be zero.
3749
__ And(scratch, ToRegister(input), Operand(kHeapObjectTag));
3750
__ SmiUntag(ToRegister(input));
3751
DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
3753
__ SmiUntag(ToRegister(input));
3758
void LCodeGen::EmitNumberUntagD(Register input_reg,
3759
DoubleRegister result_reg,
3760
bool deoptimize_on_undefined,
3761
LEnvironment* env) {
3762
Register scratch = scratch0();
3764
Label load_smi, heap_number, done;
3767
__ JumpIfSmi(input_reg, &load_smi);
3769
// Heap number map check.
3770
__ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
3771
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3772
if (deoptimize_on_undefined) {
3773
DeoptimizeIf(ne, env, scratch, Operand(at));
3776
__ Branch(&heap_number, eq, scratch, Operand(at));
3778
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3779
DeoptimizeIf(ne, env, input_reg, Operand(at));
3781
// Convert undefined to NaN.
3782
__ LoadRoot(at, Heap::kNanValueRootIndex);
3783
__ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
3786
__ bind(&heap_number);
3788
// Heap number to double register conversion.
3789
__ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
3792
// Smi to double register conversion
3794
__ SmiUntag(input_reg); // Untag smi before converting to float.
3795
__ mtc1(input_reg, result_reg);
3796
__ cvt_d_w(result_reg, result_reg);
3797
__ SmiTag(input_reg); // Retag smi.
3802
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
3803
Register input_reg = ToRegister(instr->InputAt(0));
3804
Register scratch1 = scratch0();
3805
Register scratch2 = ToRegister(instr->TempAt(0));
3806
DoubleRegister double_scratch = double_scratch0();
3807
FPURegister single_scratch = double_scratch.low();
3809
ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
3810
ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
3814
// The input is a tagged HeapObject.
3815
// Heap number map check.
3816
__ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
3817
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3818
// This 'at' value and scratch1 map value are used for tests in both clauses
3821
if (instr->truncating()) {
3822
Register scratch3 = ToRegister(instr->TempAt(1));
3823
DoubleRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
3824
ASSERT(!scratch3.is(input_reg) &&
3825
!scratch3.is(scratch1) &&
3826
!scratch3.is(scratch2));
3827
// Performs a truncating conversion of a floating point number as used by
3828
// the JS bitwise operations.
3830
__ Branch(&heap_number, eq, scratch1, Operand(at)); // HeapNumber map?
3831
// Check for undefined. Undefined is converted to zero for truncating
3833
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3834
DeoptimizeIf(ne, instr->environment(), input_reg, Operand(at));
3835
ASSERT(ToRegister(instr->result()).is(input_reg));
3836
__ mov(input_reg, zero_reg);
3839
__ bind(&heap_number);
3840
__ ldc1(double_scratch2,
3841
FieldMemOperand(input_reg, HeapNumber::kValueOffset));
3842
__ EmitECMATruncate(input_reg,
3849
// Deoptimize if we don't have a heap number.
3850
DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
3852
// Load the double value.
3853
__ ldc1(double_scratch,
3854
FieldMemOperand(input_reg, HeapNumber::kValueOffset));
3856
Register except_flag = scratch2;
3857
__ EmitFPUTruncate(kRoundToZero,
3862
kCheckForInexactConversion);
3864
// Deopt if the operation did not succeed.
3865
DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3868
__ mfc1(input_reg, single_scratch);
3870
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3871
__ Branch(&done, ne, input_reg, Operand(zero_reg));
3873
__ mfc1(scratch1, double_scratch.high());
3874
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3875
DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
3882
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
3883
class DeferredTaggedToI: public LDeferredCode {
3885
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
3886
: LDeferredCode(codegen), instr_(instr) { }
3887
virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
3888
virtual LInstruction* instr() { return instr_; }
3893
LOperand* input = instr->InputAt(0);
3894
ASSERT(input->IsRegister());
3895
ASSERT(input->Equals(instr->result()));
3897
Register input_reg = ToRegister(input);
3899
DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
3901
// Let the deferred code handle the HeapObject case.
3902
__ JumpIfNotSmi(input_reg, deferred->entry());
3904
// Smi to int32 conversion.
3905
__ SmiUntag(input_reg);
3906
__ bind(deferred->exit());
3910
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
3911
LOperand* input = instr->InputAt(0);
3912
ASSERT(input->IsRegister());
3913
LOperand* result = instr->result();
3914
ASSERT(result->IsDoubleRegister());
3916
Register input_reg = ToRegister(input);
3917
DoubleRegister result_reg = ToDoubleRegister(result);
3919
EmitNumberUntagD(input_reg, result_reg,
3920
instr->hydrogen()->deoptimize_on_undefined(),
3921
instr->environment());
3925
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
3926
Register result_reg = ToRegister(instr->result());
3927
Register scratch1 = scratch0();
3928
Register scratch2 = ToRegister(instr->TempAt(0));
3929
DoubleRegister double_input = ToDoubleRegister(instr->InputAt(0));
3930
DoubleRegister double_scratch = double_scratch0();
3931
FPURegister single_scratch = double_scratch0().low();
3933
if (instr->truncating()) {
3934
Register scratch3 = ToRegister(instr->TempAt(1));
3935
__ EmitECMATruncate(result_reg,
3942
Register except_flag = scratch2;
3944
__ EmitFPUTruncate(kRoundToMinusInf,
3949
kCheckForInexactConversion);
3951
// Deopt if the operation did not succeed (except_flag != 0).
3952
DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3955
__ mfc1(result_reg, single_scratch);
3960
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
3961
LOperand* input = instr->InputAt(0);
3962
__ And(at, ToRegister(input), Operand(kSmiTagMask));
3963
DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
3967
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
3968
LOperand* input = instr->InputAt(0);
3969
__ And(at, ToRegister(input), Operand(kSmiTagMask));
3970
DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
3974
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
3975
Register input = ToRegister(instr->InputAt(0));
3976
Register scratch = scratch0();
3978
__ GetObjectType(input, scratch, scratch);
3980
if (instr->hydrogen()->is_interval_check()) {
3983
instr->hydrogen()->GetCheckInterval(&first, &last);
3985
// If there is only one type in the interval check for equality.
3986
if (first == last) {
3987
DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
3989
DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
3990
// Omit check for the last type.
3991
if (last != LAST_TYPE) {
3992
DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
3998
instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4000
if (IsPowerOf2(mask)) {
4001
ASSERT(tag == 0 || IsPowerOf2(tag));
4002
__ And(at, scratch, mask);
4003
DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
4004
at, Operand(zero_reg));
4006
__ And(scratch, scratch, Operand(mask));
4007
DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
4013
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
4014
ASSERT(instr->InputAt(0)->IsRegister());
4015
Register reg = ToRegister(instr->InputAt(0));
4016
DeoptimizeIf(ne, instr->environment(), reg,
4017
Operand(instr->hydrogen()->target()));
4021
void LCodeGen::DoCheckMap(LCheckMap* instr) {
4022
Register scratch = scratch0();
4023
LOperand* input = instr->InputAt(0);
4024
ASSERT(input->IsRegister());
4025
Register reg = ToRegister(input);
4026
__ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
4028
instr->environment(),
4030
Operand(instr->hydrogen()->map()));
4034
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4035
DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
4036
Register result_reg = ToRegister(instr->result());
4037
DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
4038
__ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
4042
void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4043
Register unclamped_reg = ToRegister(instr->unclamped());
4044
Register result_reg = ToRegister(instr->result());
4045
__ ClampUint8(result_reg, unclamped_reg);
4049
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4050
Register scratch = scratch0();
4051
Register input_reg = ToRegister(instr->unclamped());
4052
Register result_reg = ToRegister(instr->result());
4053
DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
4054
Label is_smi, done, heap_number;
4056
// Both smi and heap number cases are handled.
4057
__ JumpIfSmi(input_reg, &is_smi);
4059
// Check for heap number
4060
__ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4061
__ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
4063
// Check for undefined. Undefined is converted to zero for clamping
4065
DeoptimizeIf(ne, instr->environment(), input_reg,
4066
Operand(factory()->undefined_value()));
4067
__ mov(result_reg, zero_reg);
4071
__ bind(&heap_number);
4072
__ ldc1(double_scratch0(), FieldMemOperand(input_reg,
4073
HeapNumber::kValueOffset));
4074
__ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
4079
__ SmiUntag(scratch, input_reg);
4080
__ ClampUint8(result_reg, scratch);
4086
void LCodeGen::LoadHeapObject(Register result,
4087
Handle<HeapObject> object) {
4088
if (heap()->InNewSpace(*object)) {
4089
Handle<JSGlobalPropertyCell> cell =
4090
factory()->NewJSGlobalPropertyCell(object);
4091
__ li(result, Operand(cell));
4092
__ lw(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
4094
__ li(result, Operand(object));
4099
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
4100
Register temp1 = ToRegister(instr->TempAt(0));
4101
Register temp2 = ToRegister(instr->TempAt(1));
4103
Handle<JSObject> holder = instr->holder();
4104
Handle<JSObject> current_prototype = instr->prototype();
4106
// Load prototype object.
4107
LoadHeapObject(temp1, current_prototype);
4109
// Check prototype maps up to the holder.
4110
while (!current_prototype.is_identical_to(holder)) {
4111
__ lw(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
4113
instr->environment(),
4115
Operand(Handle<Map>(current_prototype->map())));
4117
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
4118
// Load next prototype object.
4119
LoadHeapObject(temp1, current_prototype);
4122
// Check the holder map.
4123
__ lw(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
4125
instr->environment(),
4127
Operand(Handle<Map>(current_prototype->map())));
4131
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4132
Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
4133
ASSERT_EQ(2, constant_elements->length());
4134
ElementsKind constant_elements_kind =
4135
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
4137
__ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4138
__ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
4139
__ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4140
__ li(a1, Operand(constant_elements));
4141
__ Push(a3, a2, a1);
4143
// Pick the right runtime function or stub to call.
4144
int length = instr->hydrogen()->length();
4145
if (instr->hydrogen()->IsCopyOnWrite()) {
4146
ASSERT(instr->hydrogen()->depth() == 1);
4147
FastCloneShallowArrayStub::Mode mode =
4148
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
4149
FastCloneShallowArrayStub stub(mode, length);
4150
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4151
} else if (instr->hydrogen()->depth() > 1) {
4152
CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
4153
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
4154
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
4156
FastCloneShallowArrayStub::Mode mode =
4157
constant_elements_kind == FAST_DOUBLE_ELEMENTS
4158
? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
4159
: FastCloneShallowArrayStub::CLONE_ELEMENTS;
4160
FastCloneShallowArrayStub stub(mode, length);
4161
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4166
void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
4170
ASSERT(!source.is(a2));
4171
ASSERT(!result.is(a2));
4173
// Increase the offset so that subsequent objects end up right after
4175
int current_offset = *offset;
4176
int size = object->map()->instance_size();
4179
// Copy object header.
4180
ASSERT(object->properties()->length() == 0);
4181
ASSERT(object->elements()->length() == 0 ||
4182
object->elements()->map() == isolate()->heap()->fixed_cow_array_map());
4183
int inobject_properties = object->map()->inobject_properties();
4184
int header_size = size - inobject_properties * kPointerSize;
4185
for (int i = 0; i < header_size; i += kPointerSize) {
4186
__ lw(a2, FieldMemOperand(source, i));
4187
__ sw(a2, FieldMemOperand(result, current_offset + i));
4190
// Copy in-object properties.
4191
for (int i = 0; i < inobject_properties; i++) {
4192
int total_offset = current_offset + object->GetInObjectPropertyOffset(i);
4193
Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
4194
if (value->IsJSObject()) {
4195
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4196
__ Addu(a2, result, Operand(*offset));
4197
__ sw(a2, FieldMemOperand(result, total_offset));
4198
LoadHeapObject(source, value_object);
4199
EmitDeepCopy(value_object, result, source, offset);
4200
} else if (value->IsHeapObject()) {
4201
LoadHeapObject(a2, Handle<HeapObject>::cast(value));
4202
__ sw(a2, FieldMemOperand(result, total_offset));
4204
__ li(a2, Operand(value));
4205
__ sw(a2, FieldMemOperand(result, total_offset));
4211
void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
4212
int size = instr->hydrogen()->total_size();
4214
// Allocate all objects that are part of the literal in one big
4215
// allocation. This avoids multiple limit checks.
4216
Label allocated, runtime_allocate;
4217
__ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
4220
__ bind(&runtime_allocate);
4221
__ li(a0, Operand(Smi::FromInt(size)));
4223
CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4225
__ bind(&allocated);
4227
LoadHeapObject(a1, instr->hydrogen()->boilerplate());
4228
EmitDeepCopy(instr->hydrogen()->boilerplate(), v0, a1, &offset);
4229
ASSERT_EQ(size, offset);
4233
void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) {
4234
ASSERT(ToRegister(instr->result()).is(v0));
4236
Handle<FixedArray> constant_properties =
4237
instr->hydrogen()->constant_properties();
4239
__ lw(t0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4240
__ lw(t0, FieldMemOperand(t0, JSFunction::kLiteralsOffset));
4241
__ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4242
__ li(a2, Operand(constant_properties));
4243
int flags = instr->hydrogen()->fast_elements()
4244
? ObjectLiteral::kFastElements
4245
: ObjectLiteral::kNoFlags;
4246
__ li(a1, Operand(Smi::FromInt(flags)));
4247
__ Push(t0, a3, a2, a1);
4249
// Pick the right runtime function to call.
4250
int properties_count = constant_properties->length() / 2;
4251
if (instr->hydrogen()->depth() > 1) {
4252
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
4253
} else if (flags != ObjectLiteral::kFastElements ||
4254
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
4255
CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
4257
FastCloneShallowObjectStub stub(properties_count);
4258
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4263
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
4264
ASSERT(ToRegister(instr->InputAt(0)).is(a0));
4265
ASSERT(ToRegister(instr->result()).is(v0));
4267
CallRuntime(Runtime::kToFastProperties, 1, instr);
4271
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
4273
// Registers will be used as follows:
4274
// a3 = JS function.
4275
// t3 = literals array.
4276
// a1 = regexp literal.
4277
// a0 = regexp literal clone.
4278
// a2 and t0-t2 are used as temporaries.
4279
__ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4280
__ lw(t3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
4281
int literal_offset = FixedArray::kHeaderSize +
4282
instr->hydrogen()->literal_index() * kPointerSize;
4283
__ lw(a1, FieldMemOperand(t3, literal_offset));
4284
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4285
__ Branch(&materialized, ne, a1, Operand(at));
4287
// Create regexp literal using runtime function
4288
// Result will be in v0.
4289
__ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4290
__ li(t1, Operand(instr->hydrogen()->pattern()));
4291
__ li(t0, Operand(instr->hydrogen()->flags()));
4292
__ Push(t3, t2, t1, t0);
4293
CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
4296
__ bind(&materialized);
4297
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
4298
Label allocated, runtime_allocate;
4300
__ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
4303
__ bind(&runtime_allocate);
4304
__ li(a0, Operand(Smi::FromInt(size)));
4306
CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4309
__ bind(&allocated);
4310
// Copy the content into the newly allocated memory.
4311
// (Unroll copy loop once for better throughput).
4312
for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
4313
__ lw(a3, FieldMemOperand(a1, i));
4314
__ lw(a2, FieldMemOperand(a1, i + kPointerSize));
4315
__ sw(a3, FieldMemOperand(v0, i));
4316
__ sw(a2, FieldMemOperand(v0, i + kPointerSize));
4318
if ((size % (2 * kPointerSize)) != 0) {
4319
__ lw(a3, FieldMemOperand(a1, size - kPointerSize));
4320
__ sw(a3, FieldMemOperand(v0, size - kPointerSize));
4325
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
4326
// Use the fast case closure allocation code that allocates in new
4327
// space for nested functions that don't need literals cloning.
4328
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
4329
bool pretenure = instr->hydrogen()->pretenure();
4330
if (!pretenure && shared_info->num_literals() == 0) {
4331
FastNewClosureStub stub(shared_info->language_mode());
4332
__ li(a1, Operand(shared_info));
4334
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4336
__ li(a2, Operand(shared_info));
4337
__ li(a1, Operand(pretenure
4338
? factory()->true_value()
4339
: factory()->false_value()));
4340
__ Push(cp, a2, a1);
4341
CallRuntime(Runtime::kNewClosure, 3, instr);
4346
void LCodeGen::DoTypeof(LTypeof* instr) {
4347
ASSERT(ToRegister(instr->result()).is(v0));
4348
Register input = ToRegister(instr->InputAt(0));
4350
CallRuntime(Runtime::kTypeof, 1, instr);
4354
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
4355
Register input = ToRegister(instr->InputAt(0));
4356
int true_block = chunk_->LookupDestination(instr->true_block_id());
4357
int false_block = chunk_->LookupDestination(instr->false_block_id());
4358
Label* true_label = chunk_->GetAssemblyLabel(true_block);
4359
Label* false_label = chunk_->GetAssemblyLabel(false_block);
4361
Register cmp1 = no_reg;
4362
Operand cmp2 = Operand(no_reg);
4364
Condition final_branch_condition = EmitTypeofIs(true_label,
4367
instr->type_literal(),
4371
ASSERT(cmp1.is_valid());
4372
ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
4374
if (final_branch_condition != kNoCondition) {
4375
EmitBranch(true_block, false_block, final_branch_condition, cmp1, cmp2);
4380
Condition LCodeGen::EmitTypeofIs(Label* true_label,
4383
Handle<String> type_name,
4386
// This function utilizes the delay slot heavily. This is used to load
4387
// values that are always usable without depending on the type of the input
4389
Condition final_branch_condition = kNoCondition;
4390
Register scratch = scratch0();
4391
if (type_name->Equals(heap()->number_symbol())) {
4392
__ JumpIfSmi(input, true_label);
4393
__ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
4394
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4397
final_branch_condition = eq;
4399
} else if (type_name->Equals(heap()->string_symbol())) {
4400
__ JumpIfSmi(input, false_label);
4401
__ GetObjectType(input, input, scratch);
4402
__ Branch(USE_DELAY_SLOT, false_label,
4403
ge, scratch, Operand(FIRST_NONSTRING_TYPE));
4404
// input is an object so we can load the BitFieldOffset even if we take the
4406
__ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
4407
__ And(at, at, 1 << Map::kIsUndetectable);
4409
cmp2 = Operand(zero_reg);
4410
final_branch_condition = eq;
4412
} else if (type_name->Equals(heap()->boolean_symbol())) {
4413
__ LoadRoot(at, Heap::kTrueValueRootIndex);
4414
__ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
4415
__ LoadRoot(at, Heap::kFalseValueRootIndex);
4417
cmp2 = Operand(input);
4418
final_branch_condition = eq;
4420
} else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
4421
__ LoadRoot(at, Heap::kNullValueRootIndex);
4423
cmp2 = Operand(input);
4424
final_branch_condition = eq;
4426
} else if (type_name->Equals(heap()->undefined_symbol())) {
4427
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4428
__ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
4429
// The first instruction of JumpIfSmi is an And - it is safe in the delay
4431
__ JumpIfSmi(input, false_label);
4432
// Check for undetectable objects => true.
4433
__ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
4434
__ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
4435
__ And(at, at, 1 << Map::kIsUndetectable);
4437
cmp2 = Operand(zero_reg);
4438
final_branch_condition = ne;
4440
} else if (type_name->Equals(heap()->function_symbol())) {
4441
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
4442
__ JumpIfSmi(input, false_label);
4443
__ GetObjectType(input, scratch, input);
4444
__ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
4446
cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
4447
final_branch_condition = eq;
4449
} else if (type_name->Equals(heap()->object_symbol())) {
4450
__ JumpIfSmi(input, false_label);
4451
if (!FLAG_harmony_typeof) {
4452
__ LoadRoot(at, Heap::kNullValueRootIndex);
4453
__ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
4455
// input is an object, it is safe to use GetObjectType in the delay slot.
4456
__ GetObjectType(input, input, scratch);
4457
__ Branch(USE_DELAY_SLOT, false_label,
4458
lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
4459
// Still an object, so the InstanceType can be loaded.
4460
__ lbu(scratch, FieldMemOperand(input, Map::kInstanceTypeOffset));
4461
__ Branch(USE_DELAY_SLOT, false_label,
4462
gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
4463
// Still an object, so the BitField can be loaded.
4464
// Check for undetectable objects => false.
4465
__ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
4466
__ And(at, at, 1 << Map::kIsUndetectable);
4468
cmp2 = Operand(zero_reg);
4469
final_branch_condition = eq;
4473
cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
4474
__ Branch(false_label);
4477
return final_branch_condition;
4481
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
4482
Register temp1 = ToRegister(instr->TempAt(0));
4483
int true_block = chunk_->LookupDestination(instr->true_block_id());
4484
int false_block = chunk_->LookupDestination(instr->false_block_id());
4486
EmitIsConstructCall(temp1, scratch0());
4488
EmitBranch(true_block, false_block, eq, temp1,
4489
Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
4493
void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
4494
ASSERT(!temp1.is(temp2));
4495
// Get the frame pointer for the calling frame.
4496
__ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4498
// Skip the arguments adaptor frame if it exists.
4499
Label check_frame_marker;
4500
__ lw(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
4501
__ Branch(&check_frame_marker, ne, temp2,
4502
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4503
__ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
4505
// Check the marker in the calling frame.
4506
__ bind(&check_frame_marker);
4507
__ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
4511
void LCodeGen::EnsureSpaceForLazyDeopt() {
4512
// Ensure that we have enough space after the previous lazy-bailout
4513
// instruction for patching the code here.
4514
int current_pc = masm()->pc_offset();
4515
int patch_size = Deoptimizer::patch_size();
4516
if (current_pc < last_lazy_deopt_pc_ + patch_size) {
4517
int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
4518
ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
4519
while (padding_size > 0) {
4521
padding_size -= Assembler::kInstrSize;
4524
last_lazy_deopt_pc_ = masm()->pc_offset();
4528
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
4529
EnsureSpaceForLazyDeopt();
4530
ASSERT(instr->HasEnvironment());
4531
LEnvironment* env = instr->environment();
4532
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
4533
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4537
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
4538
DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
4542
void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
4543
Register object = ToRegister(instr->object());
4544
Register key = ToRegister(instr->key());
4545
Register strict = scratch0();
4546
__ li(strict, Operand(Smi::FromInt(strict_mode_flag())));
4547
__ Push(object, key, strict);
4548
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
4549
LPointerMap* pointers = instr->pointer_map();
4550
RecordPosition(pointers->position());
4551
SafepointGenerator safepoint_generator(
4552
this, pointers, Safepoint::kLazyDeopt);
4553
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
4557
void LCodeGen::DoIn(LIn* instr) {
4558
Register obj = ToRegister(instr->object());
4559
Register key = ToRegister(instr->key());
4561
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
4562
LPointerMap* pointers = instr->pointer_map();
4563
RecordPosition(pointers->position());
4564
SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
4565
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
4569
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
4570
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4571
__ CallRuntimeSaveDoubles(Runtime::kStackGuard);
4572
RecordSafepointWithLazyDeopt(
4573
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4574
ASSERT(instr->HasEnvironment());
4575
LEnvironment* env = instr->environment();
4576
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4580
void LCodeGen::DoStackCheck(LStackCheck* instr) {
4581
class DeferredStackCheck: public LDeferredCode {
4583
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
4584
: LDeferredCode(codegen), instr_(instr) { }
4585
virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
4586
virtual LInstruction* instr() { return instr_; }
4588
LStackCheck* instr_;
4591
ASSERT(instr->HasEnvironment());
4592
LEnvironment* env = instr->environment();
4593
// There is no LLazyBailout instruction for stack-checks. We have to
4594
// prepare for lazy deoptimization explicitly here.
4595
if (instr->hydrogen()->is_function_entry()) {
4596
// Perform stack overflow check.
4598
__ LoadRoot(at, Heap::kStackLimitRootIndex);
4599
__ Branch(&done, hs, sp, Operand(at));
4600
StackCheckStub stub;
4601
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4602
EnsureSpaceForLazyDeopt();
4604
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
4605
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4607
ASSERT(instr->hydrogen()->is_backwards_branch());
4608
// Perform stack overflow check if this goto needs it before jumping.
4609
DeferredStackCheck* deferred_stack_check =
4610
new DeferredStackCheck(this, instr);
4611
__ LoadRoot(at, Heap::kStackLimitRootIndex);
4612
__ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
4613
EnsureSpaceForLazyDeopt();
4614
__ bind(instr->done_label());
4615
deferred_stack_check->SetExit(instr->done_label());
4616
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
4617
// Don't record a deoptimization index for the safepoint here.
4618
// This will be done explicitly when emitting call and the safepoint in
4619
// the deferred code.
4624
void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
4625
// This is a pseudo-instruction that ensures that the environment here is
4626
// properly registered for deoptimization and records the assembler's PC
4628
LEnvironment* environment = instr->environment();
4629
environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
4630
instr->SpilledDoubleRegisterArray());
4632
// If the environment were already registered, we would have no way of
4633
// backpatching it with the spill slot operands.
4634
ASSERT(!environment->HasBeenRegistered());
4635
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
4636
ASSERT(osr_pc_offset_ == -1);
4637
osr_pc_offset_ = masm()->pc_offset();
4643
} } // namespace v8::internal