1
// Copyright 2011 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
6
// * Redistributions of source code must retain the above copyright
7
// notice, this list of conditions and the following disclaimer.
8
// * Redistributions in binary form must reproduce the above
9
// copyright notice, this list of conditions and the following
10
// disclaimer in the documentation and/or other materials provided
11
// with the distribution.
12
// * Neither the name of Google Inc. nor the names of its
13
// contributors may be used to endorse or promote products derived
14
// from this software without specific prior written permission.
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
#include <limits.h> // For LONG_MIN, LONG_MAX.
32
#if defined(V8_TARGET_ARCH_ARM)
34
#include "bootstrapper.h"
42
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
43
: Assembler(arg_isolate, buffer, size),
44
generating_stub_(false),
45
allow_stub_calls_(true) {
46
if (isolate() != NULL) {
47
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
53
// We always generate arm code, never thumb code, even if V8 is compiled to
54
// thumb, so we require inter-working support
55
#if defined(__thumb__) && !defined(USE_THUMB_INTERWORK)
56
#error "flag -mthumb-interwork missing"
60
// We do not support thumb inter-working with an arm architecture not supporting
61
// the blx instruction (below v5t). If you know what CPU you are compiling for
62
// you can use -march=armv7 or similar.
63
#if defined(USE_THUMB_INTERWORK) && !defined(CAN_USE_THUMB_INSTRUCTIONS)
64
# error "For thumb inter-working we require an architecture which supports blx"
68
// Using bx does not yield better code, so use it only when required
69
#if defined(USE_THUMB_INTERWORK)
74
void MacroAssembler::Jump(Register target, Condition cond) {
78
mov(pc, Operand(target), LeaveCC, cond);
83
void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
86
mov(ip, Operand(target, rmode));
89
mov(pc, Operand(target, rmode), LeaveCC, cond);
94
void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
96
ASSERT(!RelocInfo::IsCodeTarget(rmode));
97
Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
101
void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
103
ASSERT(RelocInfo::IsCodeTarget(rmode));
104
// 'code' is always generated ARM code, never THUMB code
105
Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
109
int MacroAssembler::CallSize(Register target, Condition cond) {
113
return 2 * kInstrSize;
118
void MacroAssembler::Call(Register target, Condition cond) {
119
// Block constant pool for the call instruction sequence.
120
BlockConstPoolScope block_const_pool(this);
126
// set lr for return at current pc + 8
127
mov(lr, Operand(pc), LeaveCC, cond);
128
mov(pc, Operand(target), LeaveCC, cond);
130
ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
134
int MacroAssembler::CallSize(
135
Address target, RelocInfo::Mode rmode, Condition cond) {
136
int size = 2 * kInstrSize;
137
Instr mov_instr = cond | MOV | LeaveCC;
138
intptr_t immediate = reinterpret_cast<intptr_t>(target);
139
if (!Operand(immediate, rmode).is_single_instruction(mov_instr)) {
146
void MacroAssembler::Call(Address target,
147
RelocInfo::Mode rmode,
149
// Block constant pool for the call instruction sequence.
150
BlockConstPoolScope block_const_pool(this);
154
// On ARMv5 and after the recommended call sequence is:
155
// ldr ip, [pc, #...]
158
// Statement positions are expected to be recorded when the target
159
// address is loaded. The mov method will automatically record
160
// positions when pc is the target, since this is not the case here
161
// we have to do it explicitly.
162
positions_recorder()->WriteRecordedPositions();
164
mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
167
ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
169
// Set lr for return at current pc + 8.
170
mov(lr, Operand(pc), LeaveCC, cond);
171
// Emit a ldr<cond> pc, [pc + offset of target in constant pool].
172
mov(pc, Operand(reinterpret_cast<int32_t>(target), rmode), LeaveCC, cond);
173
ASSERT(kCallTargetAddressOffset == kInstrSize);
175
ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
179
int MacroAssembler::CallSize(Handle<Code> code,
180
RelocInfo::Mode rmode,
183
return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
187
void MacroAssembler::Call(Handle<Code> code,
188
RelocInfo::Mode rmode,
193
ASSERT(RelocInfo::IsCodeTarget(rmode));
194
if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
195
SetRecordedAstId(ast_id);
196
rmode = RelocInfo::CODE_TARGET_WITH_ID;
198
// 'code' is always generated ARM code, never THUMB code
199
Call(reinterpret_cast<Address>(code.location()), rmode, cond);
200
ASSERT_EQ(CallSize(code, rmode, ast_id, cond),
201
SizeOfCodeGeneratedSince(&start));
205
void MacroAssembler::Ret(Condition cond) {
209
mov(pc, Operand(lr), LeaveCC, cond);
214
void MacroAssembler::Drop(int count, Condition cond) {
216
add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
221
void MacroAssembler::Ret(int drop, Condition cond) {
227
void MacroAssembler::Swap(Register reg1,
231
if (scratch.is(no_reg)) {
232
eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
233
eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
234
eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
236
mov(scratch, reg1, LeaveCC, cond);
237
mov(reg1, reg2, LeaveCC, cond);
238
mov(reg2, scratch, LeaveCC, cond);
243
void MacroAssembler::Call(Label* target) {
248
void MacroAssembler::Push(Handle<Object> handle) {
249
mov(ip, Operand(handle));
254
void MacroAssembler::Move(Register dst, Handle<Object> value) {
255
mov(dst, Operand(value));
259
void MacroAssembler::Move(Register dst, Register src, Condition cond) {
261
mov(dst, src, LeaveCC, cond);
266
void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
267
ASSERT(CpuFeatures::IsSupported(VFP3));
268
CpuFeatures::Scope scope(VFP3);
275
void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
277
if (!src2.is_reg() &&
278
!src2.must_use_constant_pool() &&
279
src2.immediate() == 0) {
280
mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond);
282
} else if (!src2.is_single_instruction() &&
283
!src2.must_use_constant_pool() &&
284
CpuFeatures::IsSupported(ARMv7) &&
285
IsPowerOf2(src2.immediate() + 1)) {
287
WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
290
and_(dst, src1, src2, LeaveCC, cond);
295
void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
298
if (!CpuFeatures::IsSupported(ARMv7)) {
299
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
300
and_(dst, src1, Operand(mask), LeaveCC, cond);
302
mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
305
ubfx(dst, src1, lsb, width, cond);
310
void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
313
if (!CpuFeatures::IsSupported(ARMv7)) {
314
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
315
and_(dst, src1, Operand(mask), LeaveCC, cond);
316
int shift_up = 32 - lsb - width;
317
int shift_down = lsb + shift_up;
319
mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
321
if (shift_down != 0) {
322
mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
325
sbfx(dst, src1, lsb, width, cond);
330
void MacroAssembler::Bfi(Register dst,
336
ASSERT(0 <= lsb && lsb < 32);
337
ASSERT(0 <= width && width < 32);
338
ASSERT(lsb + width < 32);
339
ASSERT(!scratch.is(dst));
340
if (width == 0) return;
341
if (!CpuFeatures::IsSupported(ARMv7)) {
342
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
343
bic(dst, dst, Operand(mask));
344
and_(scratch, src, Operand((1 << width) - 1));
345
mov(scratch, Operand(scratch, LSL, lsb));
346
orr(dst, dst, scratch);
348
bfi(dst, src, lsb, width, cond);
353
void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
355
if (!CpuFeatures::IsSupported(ARMv7)) {
356
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
357
bic(dst, dst, Operand(mask));
359
bfc(dst, lsb, width, cond);
364
void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
366
if (!CpuFeatures::IsSupported(ARMv7)) {
367
ASSERT(!dst.is(pc) && !src.rm().is(pc));
368
ASSERT((satpos >= 0) && (satpos <= 31));
370
// These asserts are required to ensure compatibility with the ARMv7
372
ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL));
373
ASSERT(src.rs().is(no_reg));
376
int satval = (1 << satpos) - 1;
379
b(NegateCondition(cond), &done); // Skip saturate if !condition.
381
if (!(src.is_reg() && dst.is(src.rm()))) {
384
tst(dst, Operand(~satval));
386
mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, mi); // 0 if negative.
387
mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
390
usat(dst, satpos, src, cond);
395
void MacroAssembler::LoadRoot(Register destination,
396
Heap::RootListIndex index,
398
ldr(destination, MemOperand(roots, index << kPointerSizeLog2), cond);
402
void MacroAssembler::StoreRoot(Register source,
403
Heap::RootListIndex index,
405
str(source, MemOperand(roots, index << kPointerSizeLog2), cond);
409
void MacroAssembler::RecordWriteHelper(Register object,
412
if (emit_debug_code()) {
413
// Check that the object is not in new space.
414
Label not_in_new_space;
415
InNewSpace(object, scratch, ne, ¬_in_new_space);
416
Abort("new-space object passed to RecordWriteHelper");
417
bind(¬_in_new_space);
420
// Calculate page address.
421
Bfc(object, 0, kPageSizeBits);
423
// Calculate region number.
424
Ubfx(address, address, Page::kRegionSizeLog2,
425
kPageSizeBits - Page::kRegionSizeLog2);
427
// Mark region dirty.
428
ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
430
orr(scratch, scratch, Operand(ip, LSL, address));
431
str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
435
void MacroAssembler::InNewSpace(Register object,
439
ASSERT(cond == eq || cond == ne);
440
and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
441
cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
446
// Will clobber 4 registers: object, offset, scratch, ip. The
447
// register 'object' contains a heap object pointer. The heap object
448
// tag is shifted away.
449
void MacroAssembler::RecordWrite(Register object,
453
// The compiled code assumes that record write doesn't change the
454
// context register, so we check that none of the clobbered
456
ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
460
// First, test that the object is not in the new space. We cannot set
461
// region marks for new space pages.
462
InNewSpace(object, scratch0, eq, &done);
464
// Add offset into the object.
465
add(scratch0, object, offset);
467
// Record the actual write.
468
RecordWriteHelper(object, scratch0, scratch1);
472
// Clobber all input registers when running with the debug-code flag
473
// turned on to provoke errors.
474
if (emit_debug_code()) {
475
mov(object, Operand(BitCast<int32_t>(kZapValue)));
476
mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
477
mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
482
// Will clobber 4 registers: object, address, scratch, ip. The
483
// register 'object' contains a heap object pointer. The heap object
484
// tag is shifted away.
485
void MacroAssembler::RecordWrite(Register object,
488
// The compiled code assumes that record write doesn't change the
489
// context register, so we check that none of the clobbered
491
ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
495
// First, test that the object is not in the new space. We cannot set
496
// region marks for new space pages.
497
InNewSpace(object, scratch, eq, &done);
499
// Record the actual write.
500
RecordWriteHelper(object, address, scratch);
504
// Clobber all input registers when running with the debug-code flag
505
// turned on to provoke errors.
506
if (emit_debug_code()) {
507
mov(object, Operand(BitCast<int32_t>(kZapValue)));
508
mov(address, Operand(BitCast<int32_t>(kZapValue)));
509
mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
514
// Push and pop all registers that can hold pointers.
515
void MacroAssembler::PushSafepointRegisters() {
516
// Safepoints expect a block of contiguous register values starting with r0:
517
ASSERT(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
518
// Safepoints expect a block of kNumSafepointRegisters values on the
519
// stack, so adjust the stack for unsaved registers.
520
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
521
ASSERT(num_unsaved >= 0);
522
sub(sp, sp, Operand(num_unsaved * kPointerSize));
523
stm(db_w, sp, kSafepointSavedRegisters);
527
void MacroAssembler::PopSafepointRegisters() {
528
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
529
ldm(ia_w, sp, kSafepointSavedRegisters);
530
add(sp, sp, Operand(num_unsaved * kPointerSize));
534
void MacroAssembler::PushSafepointRegistersAndDoubles() {
535
PushSafepointRegisters();
536
sub(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
538
for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
539
vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
544
void MacroAssembler::PopSafepointRegistersAndDoubles() {
545
for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
546
vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
548
add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
550
PopSafepointRegisters();
553
void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
555
str(src, SafepointRegistersAndDoublesSlot(dst));
559
void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
560
str(src, SafepointRegisterSlot(dst));
564
void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
565
ldr(dst, SafepointRegisterSlot(src));
569
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
570
// The registers are pushed starting with the highest encoding,
571
// which means that lowest encodings are closest to the stack pointer.
572
ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
577
MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
578
return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
582
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
583
// General purpose registers are pushed last on the stack.
584
int doubles_size = DwVfpRegister::kNumAllocatableRegisters * kDoubleSize;
585
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
586
return MemOperand(sp, doubles_size + register_offset);
590
void MacroAssembler::Ldrd(Register dst1, Register dst2,
591
const MemOperand& src, Condition cond) {
592
ASSERT(src.rm().is(no_reg));
593
ASSERT(!dst1.is(lr)); // r14.
594
ASSERT_EQ(0, dst1.code() % 2);
595
ASSERT_EQ(dst1.code() + 1, dst2.code());
597
// V8 does not use this addressing mode, so the fallback code
598
// below doesn't support it yet.
599
ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
601
// Generate two ldr instructions if ldrd is not available.
602
if (CpuFeatures::IsSupported(ARMv7)) {
603
CpuFeatures::Scope scope(ARMv7);
604
ldrd(dst1, dst2, src, cond);
606
if ((src.am() == Offset) || (src.am() == NegOffset)) {
607
MemOperand src2(src);
608
src2.set_offset(src2.offset() + 4);
609
if (dst1.is(src.rn())) {
610
ldr(dst2, src2, cond);
611
ldr(dst1, src, cond);
613
ldr(dst1, src, cond);
614
ldr(dst2, src2, cond);
616
} else { // PostIndex or NegPostIndex.
617
ASSERT((src.am() == PostIndex) || (src.am() == NegPostIndex));
618
if (dst1.is(src.rn())) {
619
ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
620
ldr(dst1, src, cond);
622
MemOperand src2(src);
623
src2.set_offset(src2.offset() - 4);
624
ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond);
625
ldr(dst2, src2, cond);
632
void MacroAssembler::Strd(Register src1, Register src2,
633
const MemOperand& dst, Condition cond) {
634
ASSERT(dst.rm().is(no_reg));
635
ASSERT(!src1.is(lr)); // r14.
636
ASSERT_EQ(0, src1.code() % 2);
637
ASSERT_EQ(src1.code() + 1, src2.code());
639
// V8 does not use this addressing mode, so the fallback code
640
// below doesn't support it yet.
641
ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
643
// Generate two str instructions if strd is not available.
644
if (CpuFeatures::IsSupported(ARMv7)) {
645
CpuFeatures::Scope scope(ARMv7);
646
strd(src1, src2, dst, cond);
648
MemOperand dst2(dst);
649
if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
650
dst2.set_offset(dst2.offset() + 4);
651
str(src1, dst, cond);
652
str(src2, dst2, cond);
653
} else { // PostIndex or NegPostIndex.
654
ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
655
dst2.set_offset(dst2.offset() - 4);
656
str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
657
str(src2, dst2, cond);
663
void MacroAssembler::ClearFPSCRBits(const uint32_t bits_to_clear,
664
const Register scratch,
665
const Condition cond) {
667
bic(scratch, scratch, Operand(bits_to_clear), LeaveCC, cond);
672
void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
673
const DwVfpRegister src2,
674
const Condition cond) {
675
// Compare and move FPSCR flags to the normal condition flags.
676
VFPCompareAndLoadFlags(src1, src2, pc, cond);
679
void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
681
const Condition cond) {
682
// Compare and move FPSCR flags to the normal condition flags.
683
VFPCompareAndLoadFlags(src1, src2, pc, cond);
687
void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
688
const DwVfpRegister src2,
689
const Register fpscr_flags,
690
const Condition cond) {
691
// Compare and load FPSCR.
692
vcmp(src1, src2, cond);
693
vmrs(fpscr_flags, cond);
696
void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
698
const Register fpscr_flags,
699
const Condition cond) {
700
// Compare and load FPSCR.
701
vcmp(src1, src2, cond);
702
vmrs(fpscr_flags, cond);
705
void MacroAssembler::Vmov(const DwVfpRegister dst,
707
const Condition cond) {
708
ASSERT(CpuFeatures::IsEnabled(VFP3));
709
static const DoubleRepresentation minus_zero(-0.0);
710
static const DoubleRepresentation zero(0.0);
711
DoubleRepresentation value(imm);
712
// Handle special values first.
713
if (value.bits == zero.bits) {
714
vmov(dst, kDoubleRegZero, cond);
715
} else if (value.bits == minus_zero.bits) {
716
vneg(dst, kDoubleRegZero, cond);
718
vmov(dst, imm, cond);
723
void MacroAssembler::EnterFrame(StackFrame::Type type) {
725
stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
726
mov(ip, Operand(Smi::FromInt(type)));
728
mov(ip, Operand(CodeObject()));
730
add(fp, sp, Operand(3 * kPointerSize)); // Adjust FP to point to saved FP.
734
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
739
// Drop the execution stack down to the frame pointer and restore
740
// the caller frame pointer and return address.
742
ldm(ia_w, sp, fp.bit() | lr.bit());
746
void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
747
// Setup the frame structure on the stack.
748
ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
749
ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
750
ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
752
mov(fp, Operand(sp)); // Setup new frame pointer.
753
// Reserve room for saved entry sp and code object.
754
sub(sp, sp, Operand(2 * kPointerSize));
755
if (emit_debug_code()) {
757
str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
759
mov(ip, Operand(CodeObject()));
760
str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
762
// Save the frame pointer and the context in top.
763
mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
764
str(fp, MemOperand(ip));
765
mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
766
str(cp, MemOperand(ip));
768
// Optionally save all double registers.
770
DwVfpRegister first = d0;
772
DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
773
vstm(db_w, sp, first, last);
774
// Note that d0 will be accessible at
775
// fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize,
776
// since the sp slot and code slot were pushed after the fp.
779
// Reserve place for the return address and stack space and align the frame
780
// preparing for calling the runtime function.
781
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
782
sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
783
if (frame_alignment > 0) {
784
ASSERT(IsPowerOf2(frame_alignment));
785
and_(sp, sp, Operand(-frame_alignment));
788
// Set the exit frame sp value to point just before the return address
790
add(ip, sp, Operand(kPointerSize));
791
str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
795
void MacroAssembler::InitializeNewString(Register string,
797
Heap::RootListIndex map_index,
800
mov(scratch1, Operand(length, LSL, kSmiTagSize));
801
LoadRoot(scratch2, map_index);
802
str(scratch1, FieldMemOperand(string, String::kLengthOffset));
803
mov(scratch1, Operand(String::kEmptyHashField));
804
str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
805
str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
809
int MacroAssembler::ActivationFrameAlignment() {
810
#if defined(V8_HOST_ARCH_ARM)
811
// Running on the real platform. Use the alignment as mandated by the local
813
// Note: This will break if we ever start generating snapshots on one ARM
814
// platform for another ARM platform with a different alignment.
815
return OS::ActivationFrameAlignment();
816
#else // defined(V8_HOST_ARCH_ARM)
817
// If we are using the simulator then we should always align to the expected
818
// alignment. As the simulator is used to generate snapshots we do not know
819
// if the target platform will need alignment, so this is controlled from a
821
return FLAG_sim_stack_alignment;
822
#endif // defined(V8_HOST_ARCH_ARM)
826
void MacroAssembler::LeaveExitFrame(bool save_doubles,
827
Register argument_count) {
828
// Optionally restore all double registers.
830
// Calculate the stack location of the saved doubles and restore them.
831
const int offset = 2 * kPointerSize;
832
sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize));
833
DwVfpRegister first = d0;
835
DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
836
vldm(ia, r3, first, last);
840
mov(r3, Operand(0, RelocInfo::NONE));
841
mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
842
str(r3, MemOperand(ip));
844
// Restore current context from top and clear it in debug mode.
845
mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
846
ldr(cp, MemOperand(ip));
848
str(r3, MemOperand(ip));
851
// Tear down the exit frame, pop the arguments, and return.
852
mov(sp, Operand(fp));
853
ldm(ia_w, sp, fp.bit() | lr.bit());
854
if (argument_count.is_valid()) {
855
add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
859
void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
860
if (use_eabi_hardfloat()) {
868
void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
869
// This macro takes the dst register to make the code more readable
870
// at the call sites. However, the dst register has to be r5 to
871
// follow the calling convention which requires the call type to be
874
if (call_kind == CALL_AS_FUNCTION) {
875
mov(dst, Operand(Smi::FromInt(1)));
877
mov(dst, Operand(Smi::FromInt(0)));
882
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
883
const ParameterCount& actual,
884
Handle<Code> code_constant,
888
const CallWrapper& call_wrapper,
889
CallKind call_kind) {
890
bool definitely_matches = false;
891
Label regular_invoke;
893
// Check whether the expected and actual arguments count match. If not,
894
// setup registers according to contract with ArgumentsAdaptorTrampoline:
895
// r0: actual arguments count
896
// r1: function (passed through to callee)
897
// r2: expected arguments count
898
// r3: callee code entry
900
// The code below is made a lot easier because the calling code already sets
901
// up actual and expected registers according to the contract if values are
902
// passed in registers.
903
ASSERT(actual.is_immediate() || actual.reg().is(r0));
904
ASSERT(expected.is_immediate() || expected.reg().is(r2));
905
ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
907
if (expected.is_immediate()) {
908
ASSERT(actual.is_immediate());
909
if (expected.immediate() == actual.immediate()) {
910
definitely_matches = true;
912
mov(r0, Operand(actual.immediate()));
913
const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
914
if (expected.immediate() == sentinel) {
915
// Don't worry about adapting arguments for builtins that
916
// don't want that done. Skip adaption code by making it look
917
// like we have a match between expected and actual number of
919
definitely_matches = true;
921
mov(r2, Operand(expected.immediate()));
925
if (actual.is_immediate()) {
926
cmp(expected.reg(), Operand(actual.immediate()));
927
b(eq, ®ular_invoke);
928
mov(r0, Operand(actual.immediate()));
930
cmp(expected.reg(), Operand(actual.reg()));
931
b(eq, ®ular_invoke);
935
if (!definitely_matches) {
936
if (!code_constant.is_null()) {
937
mov(r3, Operand(code_constant));
938
add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
941
Handle<Code> adaptor =
942
isolate()->builtins()->ArgumentsAdaptorTrampoline();
943
if (flag == CALL_FUNCTION) {
944
call_wrapper.BeforeCall(CallSize(adaptor));
945
SetCallKind(r5, call_kind);
947
call_wrapper.AfterCall();
950
SetCallKind(r5, call_kind);
951
Jump(adaptor, RelocInfo::CODE_TARGET);
953
bind(®ular_invoke);
958
void MacroAssembler::InvokeCode(Register code,
959
const ParameterCount& expected,
960
const ParameterCount& actual,
962
const CallWrapper& call_wrapper,
963
CallKind call_kind) {
966
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
967
call_wrapper, call_kind);
968
if (flag == CALL_FUNCTION) {
969
call_wrapper.BeforeCall(CallSize(code));
970
SetCallKind(r5, call_kind);
972
call_wrapper.AfterCall();
974
ASSERT(flag == JUMP_FUNCTION);
975
SetCallKind(r5, call_kind);
979
// Continue here if InvokePrologue does handle the invocation due to
980
// mismatched parameter counts.
985
void MacroAssembler::InvokeCode(Handle<Code> code,
986
const ParameterCount& expected,
987
const ParameterCount& actual,
988
RelocInfo::Mode rmode,
990
CallKind call_kind) {
993
InvokePrologue(expected, actual, code, no_reg, &done, flag,
994
NullCallWrapper(), call_kind);
995
if (flag == CALL_FUNCTION) {
996
SetCallKind(r5, call_kind);
999
SetCallKind(r5, call_kind);
1003
// Continue here if InvokePrologue does handle the invocation due to
1004
// mismatched parameter counts.
1009
void MacroAssembler::InvokeFunction(Register fun,
1010
const ParameterCount& actual,
1012
const CallWrapper& call_wrapper,
1013
CallKind call_kind) {
1014
// Contract with called JS functions requires that function is passed in r1.
1017
Register expected_reg = r2;
1018
Register code_reg = r3;
1020
ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1021
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1023
FieldMemOperand(code_reg,
1024
SharedFunctionInfo::kFormalParameterCountOffset));
1025
mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
1027
FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1029
ParameterCount expected(expected_reg);
1030
InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
1034
void MacroAssembler::InvokeFunction(JSFunction* function,
1035
const ParameterCount& actual,
1037
CallKind call_kind) {
1038
ASSERT(function->is_compiled());
1040
// Get the function and setup the context.
1041
mov(r1, Operand(Handle<JSFunction>(function)));
1042
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1044
// Invoke the cached code.
1045
Handle<Code> code(function->code());
1046
ParameterCount expected(function->shared()->formal_parameter_count());
1047
if (V8::UseCrankshaft()) {
1048
// TODO(kasperl): For now, we always call indirectly through the
1049
// code field in the function to allow recompilation to take effect
1050
// without changing any of the call sites.
1051
ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1052
InvokeCode(r3, expected, actual, flag, NullCallWrapper(), call_kind);
1054
InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
1059
void MacroAssembler::IsObjectJSObjectType(Register heap_object,
1063
ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
1064
IsInstanceJSObjectType(map, scratch, fail);
1068
void MacroAssembler::IsInstanceJSObjectType(Register map,
1071
ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
1072
cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1074
cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
1079
void MacroAssembler::IsObjectJSStringType(Register object,
1082
ASSERT(kNotStringTag != 0);
1084
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1085
ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1086
tst(scratch, Operand(kIsNotStringMask));
1091
#ifdef ENABLE_DEBUGGER_SUPPORT
1092
void MacroAssembler::DebugBreak() {
1093
ASSERT(allow_stub_calls());
1094
mov(r0, Operand(0, RelocInfo::NONE));
1095
mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
1097
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
1102
void MacroAssembler::PushTryHandler(CodeLocation try_location,
1104
// Adjust this code if not the case.
1105
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1106
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1107
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
1108
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
1109
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
1110
STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
1112
// The pc (return address) is passed in register lr.
1113
if (try_location == IN_JAVASCRIPT) {
1114
if (type == TRY_CATCH_HANDLER) {
1115
mov(r3, Operand(StackHandler::TRY_CATCH));
1117
mov(r3, Operand(StackHandler::TRY_FINALLY));
1119
stm(db_w, sp, r3.bit() | cp.bit() | fp.bit() | lr.bit());
1120
// Save the current handler as the next handler.
1121
mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1122
ldr(r1, MemOperand(r3));
1124
// Link this handler as the new current one.
1125
str(sp, MemOperand(r3));
1127
// Must preserve r0-r4, r5-r7 are available.
1128
ASSERT(try_location == IN_JS_ENTRY);
1129
// The frame pointer does not point to a JS frame so we save NULL
1130
// for fp. We expect the code throwing an exception to check fp
1131
// before dereferencing it to restore the context.
1132
mov(r5, Operand(StackHandler::ENTRY)); // State.
1133
mov(r6, Operand(Smi::FromInt(0))); // Indicates no context.
1134
mov(r7, Operand(0, RelocInfo::NONE)); // NULL frame pointer.
1135
stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | lr.bit());
1136
// Save the current handler as the next handler.
1137
mov(r7, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1138
ldr(r6, MemOperand(r7));
1140
// Link this handler as the new current one.
1141
str(sp, MemOperand(r7));
1146
void MacroAssembler::PopTryHandler() {
1147
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1149
mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1150
add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1151
str(r1, MemOperand(ip));
1155
void MacroAssembler::Throw(Register value) {
1156
// Adjust this code if not the case.
1157
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1158
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1159
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
1160
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
1161
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
1162
STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
1163
// r0 is expected to hold the exception.
1164
if (!value.is(r0)) {
1168
// Drop the sp to the top of the handler.
1169
mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1170
ldr(sp, MemOperand(r3));
1172
// Restore the next handler.
1174
str(r2, MemOperand(r3));
1176
// Restore context and frame pointer, discard state (r3).
1177
ldm(ia_w, sp, r3.bit() | cp.bit() | fp.bit());
1179
// If the handler is a JS frame, restore the context to the frame.
1180
// (r3 == ENTRY) == (fp == 0) == (cp == 0), so we could test any
1182
cmp(r3, Operand(StackHandler::ENTRY));
1183
str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
1186
if (emit_debug_code()) {
1187
mov(lr, Operand(pc));
1194
void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
1196
// Adjust this code if not the case.
1197
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1198
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1199
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
1200
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
1201
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
1202
STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
1203
// r0 is expected to hold the exception.
1204
if (!value.is(r0)) {
1208
// Drop sp to the top stack handler.
1209
mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1210
ldr(sp, MemOperand(r3));
1212
// Unwind the handlers until the ENTRY handler is found.
1215
// Load the type of the current stack handler.
1216
const int kStateOffset = StackHandlerConstants::kStateOffset;
1217
ldr(r2, MemOperand(sp, kStateOffset));
1218
cmp(r2, Operand(StackHandler::ENTRY));
1220
// Fetch the next handler in the list.
1221
const int kNextOffset = StackHandlerConstants::kNextOffset;
1222
ldr(sp, MemOperand(sp, kNextOffset));
1226
// Set the top handler address to next handler past the current ENTRY handler.
1228
str(r2, MemOperand(r3));
1230
if (type == OUT_OF_MEMORY) {
1231
// Set external caught exception to false.
1232
ExternalReference external_caught(
1233
Isolate::kExternalCaughtExceptionAddress, isolate());
1234
mov(r0, Operand(false, RelocInfo::NONE));
1235
mov(r2, Operand(external_caught));
1236
str(r0, MemOperand(r2));
1238
// Set pending exception and r0 to out of memory exception.
1239
Failure* out_of_memory = Failure::OutOfMemoryException();
1240
mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
1241
mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1243
str(r0, MemOperand(r2));
1246
// Stack layout at this point. See also StackHandlerConstants.
1247
// sp -> state (ENTRY)
1252
// Restore context and frame pointer, discard state (r2).
1253
ldm(ia_w, sp, r2.bit() | cp.bit() | fp.bit());
1255
if (emit_debug_code()) {
1256
mov(lr, Operand(pc));
1263
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1266
Label same_contexts;
1268
ASSERT(!holder_reg.is(scratch));
1269
ASSERT(!holder_reg.is(ip));
1270
ASSERT(!scratch.is(ip));
1272
// Load current lexical context from the stack frame.
1273
ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
1274
// In debug mode, make sure the lexical context is set.
1276
cmp(scratch, Operand(0, RelocInfo::NONE));
1277
Check(ne, "we should not have an empty lexical context");
1280
// Load the global context of the current context.
1281
int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
1282
ldr(scratch, FieldMemOperand(scratch, offset));
1283
ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
1285
// Check the context is a global context.
1286
if (emit_debug_code()) {
1287
// TODO(119): avoid push(holder_reg)/pop(holder_reg)
1288
// Cannot use ip as a temporary in this verification code. Due to the fact
1289
// that ip is clobbered as part of cmp with an object Operand.
1290
push(holder_reg); // Temporarily save holder on the stack.
1291
// Read the first word and compare to the global_context_map.
1292
ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
1293
LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
1294
cmp(holder_reg, ip);
1295
Check(eq, "JSGlobalObject::global_context should be a global context.");
1296
pop(holder_reg); // Restore holder.
1299
// Check if both contexts are the same.
1300
ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
1301
cmp(scratch, Operand(ip));
1302
b(eq, &same_contexts);
1304
// Check the context is a global context.
1305
if (emit_debug_code()) {
1306
// TODO(119): avoid push(holder_reg)/pop(holder_reg)
1307
// Cannot use ip as a temporary in this verification code. Due to the fact
1308
// that ip is clobbered as part of cmp with an object Operand.
1309
push(holder_reg); // Temporarily save holder on the stack.
1310
mov(holder_reg, ip); // Move ip to its holding place.
1311
LoadRoot(ip, Heap::kNullValueRootIndex);
1312
cmp(holder_reg, ip);
1313
Check(ne, "JSGlobalProxy::context() should not be null.");
1315
ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
1316
LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
1317
cmp(holder_reg, ip);
1318
Check(eq, "JSGlobalObject::global_context should be a global context.");
1319
// Restore ip is not needed. ip is reloaded below.
1320
pop(holder_reg); // Restore holder.
1321
// Restore ip to holder's context.
1322
ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
1325
// Check that the security token in the calling global object is
1326
// compatible with the security token in the receiving global
1328
int token_offset = Context::kHeaderSize +
1329
Context::SECURITY_TOKEN_INDEX * kPointerSize;
1331
ldr(scratch, FieldMemOperand(scratch, token_offset));
1332
ldr(ip, FieldMemOperand(ip, token_offset));
1333
cmp(scratch, Operand(ip));
1336
bind(&same_contexts);
1340
void MacroAssembler::LoadFromNumberDictionary(Label* miss,
1349
// elements - holds the slow-case elements of the receiver on entry.
1350
// Unchanged unless 'result' is the same register.
1352
// key - holds the smi key on entry.
1353
// Unchanged unless 'result' is the same register.
1355
// result - holds the result on exit if the load succeeded.
1356
// Allowed to be the same as 'key' or 'result'.
1357
// Unchanged on bailout so 'key' or 'result' can be used
1358
// in further computation.
1360
// Scratch registers:
1362
// t0 - holds the untagged key on entry and holds the hash once computed.
1364
// t1 - used to hold the capacity mask of the dictionary
1366
// t2 - used for the index into the dictionary.
1369
// Compute the hash code from the untagged key. This must be kept in sync
1370
// with ComputeIntegerHash in utils.h.
1372
// hash = ~hash + (hash << 15);
1373
mvn(t1, Operand(t0));
1374
add(t0, t1, Operand(t0, LSL, 15));
1375
// hash = hash ^ (hash >> 12);
1376
eor(t0, t0, Operand(t0, LSR, 12));
1377
// hash = hash + (hash << 2);
1378
add(t0, t0, Operand(t0, LSL, 2));
1379
// hash = hash ^ (hash >> 4);
1380
eor(t0, t0, Operand(t0, LSR, 4));
1381
// hash = hash * 2057;
1382
mov(t1, Operand(2057));
1384
// hash = hash ^ (hash >> 16);
1385
eor(t0, t0, Operand(t0, LSR, 16));
1387
// Compute the capacity mask.
1388
ldr(t1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
1389
mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int
1390
sub(t1, t1, Operand(1));
1392
// Generate an unrolled loop that performs a few probes before giving up.
1393
static const int kProbes = 4;
1394
for (int i = 0; i < kProbes; i++) {
1395
// Use t2 for index calculations and keep the hash intact in t0.
1397
// Compute the masked index: (hash + i + i * i) & mask.
1399
add(t2, t2, Operand(NumberDictionary::GetProbeOffset(i)));
1401
and_(t2, t2, Operand(t1));
1403
// Scale the index by multiplying by the element size.
1404
ASSERT(NumberDictionary::kEntrySize == 3);
1405
add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
1407
// Check if the key is identical to the name.
1408
add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
1409
ldr(ip, FieldMemOperand(t2, NumberDictionary::kElementsStartOffset));
1410
cmp(key, Operand(ip));
1411
if (i != kProbes - 1) {
1419
// Check that the value is a normal property.
1420
// t2: elements + (index * kPointerSize)
1421
const int kDetailsOffset =
1422
NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1423
ldr(t1, FieldMemOperand(t2, kDetailsOffset));
1424
tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
1427
// Get the value at the masked, scaled index and return.
1428
const int kValueOffset =
1429
NumberDictionary::kElementsStartOffset + kPointerSize;
1430
ldr(result, FieldMemOperand(t2, kValueOffset));
1434
void MacroAssembler::AllocateInNewSpace(int object_size,
1439
AllocationFlags flags) {
1440
if (!FLAG_inline_new) {
1441
if (emit_debug_code()) {
1442
// Trash the registers to simulate an allocation failure.
1443
mov(result, Operand(0x7091));
1444
mov(scratch1, Operand(0x7191));
1445
mov(scratch2, Operand(0x7291));
1451
ASSERT(!result.is(scratch1));
1452
ASSERT(!result.is(scratch2));
1453
ASSERT(!scratch1.is(scratch2));
1454
ASSERT(!scratch1.is(ip));
1455
ASSERT(!scratch2.is(ip));
1457
// Make object size into bytes.
1458
if ((flags & SIZE_IN_WORDS) != 0) {
1459
object_size *= kPointerSize;
1461
ASSERT_EQ(0, object_size & kObjectAlignmentMask);
1463
// Check relative positions of allocation top and limit addresses.
1464
// The values must be adjacent in memory to allow the use of LDM.
1465
// Also, assert that the registers are numbered such that the values
1466
// are loaded in the correct order.
1467
ExternalReference new_space_allocation_top =
1468
ExternalReference::new_space_allocation_top_address(isolate());
1469
ExternalReference new_space_allocation_limit =
1470
ExternalReference::new_space_allocation_limit_address(isolate());
1472
reinterpret_cast<intptr_t>(new_space_allocation_top.address());
1474
reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
1475
ASSERT((limit - top) == kPointerSize);
1476
ASSERT(result.code() < ip.code());
1478
// Set up allocation top address and object size registers.
1479
Register topaddr = scratch1;
1480
Register obj_size_reg = scratch2;
1481
mov(topaddr, Operand(new_space_allocation_top));
1482
mov(obj_size_reg, Operand(object_size));
1484
// This code stores a temporary value in ip. This is OK, as the code below
1485
// does not need ip for implicit literal generation.
1486
if ((flags & RESULT_CONTAINS_TOP) == 0) {
1487
// Load allocation top into result and allocation limit into ip.
1488
ldm(ia, topaddr, result.bit() | ip.bit());
1490
if (emit_debug_code()) {
1491
// Assert that result actually contains top on entry. ip is used
1492
// immediately below so this use of ip does not cause difference with
1493
// respect to register content between debug and release mode.
1494
ldr(ip, MemOperand(topaddr));
1496
Check(eq, "Unexpected allocation top");
1498
// Load allocation limit into ip. Result already contains allocation top.
1499
ldr(ip, MemOperand(topaddr, limit - top));
1502
// Calculate new top and bail out if new space is exhausted. Use result
1503
// to calculate the new top.
1504
add(scratch2, result, Operand(obj_size_reg), SetCC);
1506
cmp(scratch2, Operand(ip));
1508
str(scratch2, MemOperand(topaddr));
1510
// Tag object if requested.
1511
if ((flags & TAG_OBJECT) != 0) {
1512
add(result, result, Operand(kHeapObjectTag));
1517
void MacroAssembler::AllocateInNewSpace(Register object_size,
1522
AllocationFlags flags) {
1523
if (!FLAG_inline_new) {
1524
if (emit_debug_code()) {
1525
// Trash the registers to simulate an allocation failure.
1526
mov(result, Operand(0x7091));
1527
mov(scratch1, Operand(0x7191));
1528
mov(scratch2, Operand(0x7291));
1534
// Assert that the register arguments are different and that none of
1535
// them are ip. ip is used explicitly in the code generated below.
1536
ASSERT(!result.is(scratch1));
1537
ASSERT(!result.is(scratch2));
1538
ASSERT(!scratch1.is(scratch2));
1539
ASSERT(!result.is(ip));
1540
ASSERT(!scratch1.is(ip));
1541
ASSERT(!scratch2.is(ip));
1543
// Check relative positions of allocation top and limit addresses.
1544
// The values must be adjacent in memory to allow the use of LDM.
1545
// Also, assert that the registers are numbered such that the values
1546
// are loaded in the correct order.
1547
ExternalReference new_space_allocation_top =
1548
ExternalReference::new_space_allocation_top_address(isolate());
1549
ExternalReference new_space_allocation_limit =
1550
ExternalReference::new_space_allocation_limit_address(isolate());
1552
reinterpret_cast<intptr_t>(new_space_allocation_top.address());
1554
reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
1555
ASSERT((limit - top) == kPointerSize);
1556
ASSERT(result.code() < ip.code());
1558
// Set up allocation top address.
1559
Register topaddr = scratch1;
1560
mov(topaddr, Operand(new_space_allocation_top));
1562
// This code stores a temporary value in ip. This is OK, as the code below
1563
// does not need ip for implicit literal generation.
1564
if ((flags & RESULT_CONTAINS_TOP) == 0) {
1565
// Load allocation top into result and allocation limit into ip.
1566
ldm(ia, topaddr, result.bit() | ip.bit());
1568
if (emit_debug_code()) {
1569
// Assert that result actually contains top on entry. ip is used
1570
// immediately below so this use of ip does not cause difference with
1571
// respect to register content between debug and release mode.
1572
ldr(ip, MemOperand(topaddr));
1574
Check(eq, "Unexpected allocation top");
1576
// Load allocation limit into ip. Result already contains allocation top.
1577
ldr(ip, MemOperand(topaddr, limit - top));
1580
// Calculate new top and bail out if new space is exhausted. Use result
1581
// to calculate the new top. Object size may be in words so a shift is
1582
// required to get the number of bytes.
1583
if ((flags & SIZE_IN_WORDS) != 0) {
1584
add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
1586
add(scratch2, result, Operand(object_size), SetCC);
1589
cmp(scratch2, Operand(ip));
1592
// Update allocation top. result temporarily holds the new top.
1593
if (emit_debug_code()) {
1594
tst(scratch2, Operand(kObjectAlignmentMask));
1595
Check(eq, "Unaligned allocation in new space");
1597
str(scratch2, MemOperand(topaddr));
1599
// Tag object if requested.
1600
if ((flags & TAG_OBJECT) != 0) {
1601
add(result, result, Operand(kHeapObjectTag));
1606
void MacroAssembler::UndoAllocationInNewSpace(Register object,
1608
ExternalReference new_space_allocation_top =
1609
ExternalReference::new_space_allocation_top_address(isolate());
1611
// Make sure the object has no tag before resetting top.
1612
and_(object, object, Operand(~kHeapObjectTagMask));
1614
// Check that the object un-allocated is below the current top.
1615
mov(scratch, Operand(new_space_allocation_top));
1616
ldr(scratch, MemOperand(scratch));
1617
cmp(object, scratch);
1618
Check(lt, "Undo allocation of non allocated memory");
1620
// Write the address of the object to un-allocate as the current top.
1621
mov(scratch, Operand(new_space_allocation_top));
1622
str(object, MemOperand(scratch));
1626
void MacroAssembler::AllocateTwoByteString(Register result,
1631
Label* gc_required) {
1632
// Calculate the number of bytes needed for the characters in the string while
1633
// observing object alignment.
1634
ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1635
mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars.
1636
add(scratch1, scratch1,
1637
Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
1638
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1640
// Allocate two-byte string in new space.
1641
AllocateInNewSpace(scratch1,
1648
// Set the map, length and hash field.
1649
InitializeNewString(result,
1651
Heap::kStringMapRootIndex,
1657
void MacroAssembler::AllocateAsciiString(Register result,
1662
Label* gc_required) {
1663
// Calculate the number of bytes needed for the characters in the string while
1664
// observing object alignment.
1665
ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
1666
ASSERT(kCharSize == 1);
1667
add(scratch1, length,
1668
Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize));
1669
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1671
// Allocate ASCII string in new space.
1672
AllocateInNewSpace(scratch1,
1679
// Set the map, length and hash field.
1680
InitializeNewString(result,
1682
Heap::kAsciiStringMapRootIndex,
1688
void MacroAssembler::AllocateTwoByteConsString(Register result,
1692
Label* gc_required) {
1693
AllocateInNewSpace(ConsString::kSize,
1700
InitializeNewString(result,
1702
Heap::kConsStringMapRootIndex,
1708
void MacroAssembler::AllocateAsciiConsString(Register result,
1712
Label* gc_required) {
1713
AllocateInNewSpace(ConsString::kSize,
1720
InitializeNewString(result,
1722
Heap::kConsAsciiStringMapRootIndex,
1728
void MacroAssembler::AllocateTwoByteSlicedString(Register result,
1732
Label* gc_required) {
1733
AllocateInNewSpace(SlicedString::kSize,
1740
InitializeNewString(result,
1742
Heap::kSlicedStringMapRootIndex,
1748
void MacroAssembler::AllocateAsciiSlicedString(Register result,
1752
Label* gc_required) {
1753
AllocateInNewSpace(SlicedString::kSize,
1760
InitializeNewString(result,
1762
Heap::kSlicedAsciiStringMapRootIndex,
1768
void MacroAssembler::CompareObjectType(Register object,
1771
InstanceType type) {
1772
ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
1773
CompareInstanceType(map, type_reg, type);
1777
void MacroAssembler::CompareInstanceType(Register map,
1779
InstanceType type) {
1780
ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1781
cmp(type_reg, Operand(type));
1785
void MacroAssembler::CompareRoot(Register obj,
1786
Heap::RootListIndex index) {
1787
ASSERT(!obj.is(ip));
1788
LoadRoot(ip, index);
1793
void MacroAssembler::CheckFastElements(Register map,
1796
STATIC_ASSERT(FAST_ELEMENTS == 0);
1797
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
1798
cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
1803
void MacroAssembler::CheckMap(Register obj,
1807
SmiCheckType smi_check_type) {
1808
if (smi_check_type == DO_SMI_CHECK) {
1809
JumpIfSmi(obj, fail);
1811
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
1812
mov(ip, Operand(map));
1818
void MacroAssembler::CheckMap(Register obj,
1820
Heap::RootListIndex index,
1822
SmiCheckType smi_check_type) {
1823
if (smi_check_type == DO_SMI_CHECK) {
1824
JumpIfSmi(obj, fail);
1826
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
1827
LoadRoot(ip, index);
1833
void MacroAssembler::DispatchMap(Register obj,
1836
Handle<Code> success,
1837
SmiCheckType smi_check_type) {
1839
if (smi_check_type == DO_SMI_CHECK) {
1840
JumpIfSmi(obj, &fail);
1842
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
1843
mov(ip, Operand(map));
1845
Jump(success, RelocInfo::CODE_TARGET, eq);
1850
void MacroAssembler::TryGetFunctionPrototype(Register function,
1854
// Check that the receiver isn't a smi.
1855
JumpIfSmi(function, miss);
1857
// Check that the function really is a function. Load map into result reg.
1858
CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
1861
// Make sure that the function has an instance prototype.
1863
ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
1864
tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
1865
b(ne, &non_instance);
1867
// Get the prototype or initial map from the function.
1869
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
1871
// If the prototype or initial map is the hole, don't return it and
1872
// simply miss the cache instead. This will allow us to allocate a
1873
// prototype object on-demand in the runtime system.
1874
LoadRoot(ip, Heap::kTheHoleValueRootIndex);
1878
// If the function does not have an initial map, we're done.
1880
CompareObjectType(result, scratch, scratch, MAP_TYPE);
1883
// Get the prototype from the initial map.
1884
ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
1887
// Non-instance prototype: Fetch prototype from constructor field
1889
bind(&non_instance);
1890
ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
1897
void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
1898
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
1899
Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond);
1903
MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) {
1904
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
1906
{ MaybeObject* maybe_result = stub->TryGetCode();
1907
if (!maybe_result->ToObject(&result)) return maybe_result;
1909
Handle<Code> code(Code::cast(result));
1910
Call(code, RelocInfo::CODE_TARGET, kNoASTId, cond);
1915
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
1916
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
1917
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1921
MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) {
1922
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
1924
{ MaybeObject* maybe_result = stub->TryGetCode();
1925
if (!maybe_result->ToObject(&result)) return maybe_result;
1927
Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond);
1932
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
1933
return ref0.address() - ref1.address();
1937
MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
1938
ExternalReference function, int stack_space) {
1939
ExternalReference next_address =
1940
ExternalReference::handle_scope_next_address();
1941
const int kNextOffset = 0;
1942
const int kLimitOffset = AddressOffset(
1943
ExternalReference::handle_scope_limit_address(),
1945
const int kLevelOffset = AddressOffset(
1946
ExternalReference::handle_scope_level_address(),
1949
// Allocate HandleScope in callee-save registers.
1950
mov(r7, Operand(next_address));
1951
ldr(r4, MemOperand(r7, kNextOffset));
1952
ldr(r5, MemOperand(r7, kLimitOffset));
1953
ldr(r6, MemOperand(r7, kLevelOffset));
1954
add(r6, r6, Operand(1));
1955
str(r6, MemOperand(r7, kLevelOffset));
1957
// Native call returns to the DirectCEntry stub which redirects to the
1958
// return address pushed on stack (could have moved after GC).
1959
// DirectCEntry stub itself is generated early and never moves.
1960
DirectCEntryStub stub;
1961
stub.GenerateCall(this, function);
1963
Label promote_scheduled_exception;
1964
Label delete_allocated_handles;
1965
Label leave_exit_frame;
1967
// If result is non-zero, dereference to get the result value
1968
// otherwise set it to undefined.
1969
cmp(r0, Operand(0));
1970
LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
1971
ldr(r0, MemOperand(r0), ne);
1973
// No more valid handles (the result handle was the last one). Restore
1974
// previous handle scope.
1975
str(r4, MemOperand(r7, kNextOffset));
1976
if (emit_debug_code()) {
1977
ldr(r1, MemOperand(r7, kLevelOffset));
1979
Check(eq, "Unexpected level after return from api call");
1981
sub(r6, r6, Operand(1));
1982
str(r6, MemOperand(r7, kLevelOffset));
1983
ldr(ip, MemOperand(r7, kLimitOffset));
1985
b(ne, &delete_allocated_handles);
1987
// Check if the function scheduled an exception.
1988
bind(&leave_exit_frame);
1989
LoadRoot(r4, Heap::kTheHoleValueRootIndex);
1990
mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate())));
1991
ldr(r5, MemOperand(ip));
1993
b(ne, &promote_scheduled_exception);
1995
// LeaveExitFrame expects unwind space to be in a register.
1996
mov(r4, Operand(stack_space));
1997
LeaveExitFrame(false, r4);
2000
bind(&promote_scheduled_exception);
2002
= TryTailCallExternalReference(
2003
ExternalReference(Runtime::kPromoteScheduledException, isolate()),
2006
if (result->IsFailure()) {
2010
// HandleScope limit has changed. Delete allocated extensions.
2011
bind(&delete_allocated_handles);
2012
str(r5, MemOperand(r7, kLimitOffset));
2014
PrepareCallCFunction(1, r5);
2015
mov(r0, Operand(ExternalReference::isolate_address()));
2017
ExternalReference::delete_handle_scope_extensions(isolate()), 1);
2019
jmp(&leave_exit_frame);
2025
void MacroAssembler::IllegalOperation(int num_arguments) {
2026
if (num_arguments > 0) {
2027
add(sp, sp, Operand(num_arguments * kPointerSize));
2029
LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2033
void MacroAssembler::IndexFromHash(Register hash, Register index) {
2034
// If the hash field contains an array index pick it out. The assert checks
2035
// that the constants for the maximum number of digits for an array index
2036
// cached in the hash field and the number of bits reserved for it does not
2038
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
2039
(1 << String::kArrayIndexValueBits));
2040
// We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
2041
// the low kHashShift bits.
2042
STATIC_ASSERT(kSmiTag == 0);
2043
Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
2044
mov(index, Operand(hash, LSL, kSmiTagSize));
2048
void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
2049
Register outHighReg,
2050
Register outLowReg) {
2051
// ARMv7 VFP3 instructions to implement integer to double conversion.
2052
mov(r7, Operand(inReg, ASR, kSmiTagSize));
2054
vcvt_f64_s32(d7, s15);
2055
vmov(outLowReg, outHighReg, d7);
2059
void MacroAssembler::ObjectToDoubleVFPRegister(Register object,
2060
DwVfpRegister result,
2063
Register heap_number_map,
2064
SwVfpRegister scratch3,
2066
ObjectToDoubleFlags flags) {
2068
if ((flags & OBJECT_NOT_SMI) == 0) {
2070
JumpIfNotSmi(object, ¬_smi);
2071
// Remove smi tag and convert to double.
2072
mov(scratch1, Operand(object, ASR, kSmiTagSize));
2073
vmov(scratch3, scratch1);
2074
vcvt_f64_s32(result, scratch3);
2078
// Check for heap number and load double value from it.
2079
ldr(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
2080
sub(scratch2, object, Operand(kHeapObjectTag));
2081
cmp(scratch1, heap_number_map);
2083
if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
2084
// If exponent is all ones the number is either a NaN or +/-Infinity.
2085
ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
2088
HeapNumber::kExponentShift,
2089
HeapNumber::kExponentBits);
2090
// All-one value sign extend to -1.
2091
cmp(scratch1, Operand(-1));
2094
vldr(result, scratch2, HeapNumber::kValueOffset);
2099
void MacroAssembler::SmiToDoubleVFPRegister(Register smi,
2100
DwVfpRegister value,
2102
SwVfpRegister scratch2) {
2103
mov(scratch1, Operand(smi, ASR, kSmiTagSize));
2104
vmov(scratch2, scratch1);
2105
vcvt_f64_s32(value, scratch2);
2109
// Tries to get a signed int32 out of a double precision floating point heap
2110
// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
2111
// 32bits signed integer range.
2112
void MacroAssembler::ConvertToInt32(Register source,
2116
DwVfpRegister double_scratch,
2118
if (CpuFeatures::IsSupported(VFP3)) {
2119
CpuFeatures::Scope scope(VFP3);
2120
sub(scratch, source, Operand(kHeapObjectTag));
2121
vldr(double_scratch, scratch, HeapNumber::kValueOffset);
2122
vcvt_s32_f64(double_scratch.low(), double_scratch);
2123
vmov(dest, double_scratch.low());
2124
// Signed vcvt instruction will saturate to the minimum (0x80000000) or
2125
// maximun (0x7fffffff) signed 32bits integer when the double is out of
2126
// range. When substracting one, the minimum signed integer becomes the
2127
// maximun signed integer.
2128
sub(scratch, dest, Operand(1));
2129
cmp(scratch, Operand(LONG_MAX - 1));
2130
// If equal then dest was LONG_MAX, if greater dest was LONG_MIN.
2133
// This code is faster for doubles that are in the ranges -0x7fffffff to
2134
// -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to
2135
// the range of signed int32 values that are not Smis. Jumps to the label
2136
// 'not_int32' if the double isn't in the range -0x80000000.0 to
2137
// 0x80000000.0 (excluding the endpoints).
2138
Label right_exponent, done;
2139
// Get exponent word.
2140
ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
2141
// Get exponent alone in scratch2.
2144
HeapNumber::kExponentShift,
2145
HeapNumber::kExponentBits);
2146
// Load dest with zero. We use this either for the final shift or
2148
mov(dest, Operand(0, RelocInfo::NONE));
2149
// Check whether the exponent matches a 32 bit signed int that is not a Smi.
2150
// A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
2151
// the exponent that we are fastest at and also the highest exponent we can
2153
const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
2154
// The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
2155
// split it up to avoid a constant pool entry. You can't do that in general
2156
// for cmp because of the overflow flag, but we know the exponent is in the
2157
// range 0-2047 so there is no overflow.
2158
int fudge_factor = 0x400;
2159
sub(scratch2, scratch2, Operand(fudge_factor));
2160
cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
2161
// If we have a match of the int32-but-not-Smi exponent then skip some
2163
b(eq, &right_exponent);
2164
// If the exponent is higher than that then go to slow case. This catches
2165
// numbers that don't fit in a signed int32, infinities and NaNs.
2168
// We know the exponent is smaller than 30 (biased). If it is less than
2169
// 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
2170
// it rounds to zero.
2171
const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
2172
sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
2173
// Dest already has a Smi zero.
2176
// We have an exponent between 0 and 30 in scratch2. Subtract from 30 to
2177
// get how much to shift down.
2178
rsb(dest, scratch2, Operand(30));
2180
bind(&right_exponent);
2181
// Get the top bits of the mantissa.
2182
and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
2183
// Put back the implicit 1.
2184
orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
2185
// Shift up the mantissa bits to take up the space the exponent used to
2186
// take. We just orred in the implicit bit so that took care of one and
2187
// we want to leave the sign bit 0 so we subtract 2 bits from the shift
2189
const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
2190
mov(scratch2, Operand(scratch2, LSL, shift_distance));
2191
// Put sign in zero flag.
2192
tst(scratch, Operand(HeapNumber::kSignMask));
2193
// Get the second half of the double. For some exponents we don't
2194
// actually need this because the bits get shifted out again, but
2195
// it's probably slower to test than just to do it.
2196
ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
2197
// Shift down 22 bits to get the last 10 bits.
2198
orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
2199
// Move down according to the exponent.
2200
mov(dest, Operand(scratch, LSR, dest));
2201
// Fix sign if sign bit was set.
2202
rsb(dest, dest, Operand(0, RelocInfo::NONE), LeaveCC, ne);
2208
void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
2209
SwVfpRegister result,
2210
DwVfpRegister double_input,
2213
CheckForInexactConversion check_inexact) {
2214
ASSERT(CpuFeatures::IsSupported(VFP3));
2215
CpuFeatures::Scope scope(VFP3);
2216
Register prev_fpscr = scratch1;
2217
Register scratch = scratch2;
2219
int32_t check_inexact_conversion =
2220
(check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0;
2222
// Set custom FPCSR:
2223
// - Set rounding mode.
2224
// - Clear vfp cumulative exception flags.
2225
// - Make sure Flush-to-zero mode control bit is unset.
2229
Operand(kVFPExceptionMask |
2230
check_inexact_conversion |
2231
kVFPRoundingModeMask |
2232
kVFPFlushToZeroMask));
2233
// 'Round To Nearest' is encoded by 0b00 so no bits need to be set.
2234
if (rounding_mode != kRoundToNearest) {
2235
orr(scratch, scratch, Operand(rounding_mode));
2239
// Convert the argument to an integer.
2240
vcvt_s32_f64(result,
2242
(rounding_mode == kRoundToZero) ? kDefaultRoundToZero
2249
// Check for vfp exceptions.
2250
tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion));
2254
void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
2255
Register input_high,
2258
Label done, normal_exponent, restore_sign;
2260
// Extract the biased exponent in result.
2263
HeapNumber::kExponentShift,
2264
HeapNumber::kExponentBits);
2266
// Check for Infinity and NaNs, which should return 0.
2267
cmp(result, Operand(HeapNumber::kExponentMask));
2268
mov(result, Operand(0), LeaveCC, eq);
2271
// Express exponent as delta to (number of mantissa bits + 31).
2274
Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31),
2277
// If the delta is strictly positive, all bits would be shifted away,
2278
// which means that we can return 0.
2279
b(le, &normal_exponent);
2280
mov(result, Operand(0));
2283
bind(&normal_exponent);
2284
const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
2286
add(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits), SetCC);
2289
Register sign = result;
2291
and_(sign, input_high, Operand(HeapNumber::kSignMask));
2293
// Set the implicit 1 before the mantissa part in input_high.
2296
Operand(1 << HeapNumber::kMantissaBitsInTopWord));
2297
// Shift the mantissa bits to the correct position.
2298
// We don't need to clear non-mantissa bits as they will be shifted away.
2299
// If they weren't, it would mean that the answer is in the 32bit range.
2300
mov(input_high, Operand(input_high, LSL, scratch));
2302
// Replace the shifted bits with bits from the lower mantissa word.
2303
Label pos_shift, shift_done;
2304
rsb(scratch, scratch, Operand(32), SetCC);
2308
rsb(scratch, scratch, Operand(0));
2309
mov(input_low, Operand(input_low, LSL, scratch));
2313
mov(input_low, Operand(input_low, LSR, scratch));
2316
orr(input_high, input_high, Operand(input_low));
2317
// Restore sign if necessary.
2318
cmp(sign, Operand(0));
2321
rsb(result, input_high, Operand(0), LeaveCC, ne);
2322
mov(result, input_high, LeaveCC, eq);
2327
void MacroAssembler::EmitECMATruncate(Register result,
2328
DwVfpRegister double_input,
2329
SwVfpRegister single_scratch,
2331
Register input_high,
2332
Register input_low) {
2333
CpuFeatures::Scope scope(VFP3);
2334
ASSERT(!input_high.is(result));
2335
ASSERT(!input_low.is(result));
2336
ASSERT(!input_low.is(input_high));
2337
ASSERT(!scratch.is(result) &&
2338
!scratch.is(input_high) &&
2339
!scratch.is(input_low));
2340
ASSERT(!single_scratch.is(double_input.low()) &&
2341
!single_scratch.is(double_input.high()));
2345
// Clear cumulative exception flags.
2346
ClearFPSCRBits(kVFPExceptionMask, scratch);
2347
// Try a conversion to a signed integer.
2348
vcvt_s32_f64(single_scratch, double_input);
2349
vmov(result, single_scratch);
2350
// Retrieve he FPSCR.
2352
// Check for overflow and NaNs.
2353
tst(scratch, Operand(kVFPOverflowExceptionBit |
2354
kVFPUnderflowExceptionBit |
2355
kVFPInvalidOpExceptionBit));
2356
// If we had no exceptions we are done.
2359
// Load the double value and perform a manual truncation.
2360
vmov(input_low, input_high, double_input);
2361
EmitOutOfInt32RangeTruncate(result,
2369
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2371
int num_least_bits) {
2372
if (CpuFeatures::IsSupported(ARMv7)) {
2373
ubfx(dst, src, kSmiTagSize, num_least_bits);
2375
mov(dst, Operand(src, ASR, kSmiTagSize));
2376
and_(dst, dst, Operand((1 << num_least_bits) - 1));
2381
void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2383
int num_least_bits) {
2384
and_(dst, src, Operand((1 << num_least_bits) - 1));
2388
void MacroAssembler::CallRuntime(const Runtime::Function* f,
2389
int num_arguments) {
2390
// All parameters are on the stack. r0 has the return value after call.
2392
// If the expected number of arguments of the runtime function is
2393
// constant, we check that the actual number of arguments match the
2395
if (f->nargs >= 0 && f->nargs != num_arguments) {
2396
IllegalOperation(num_arguments);
2400
// TODO(1236192): Most runtime routines don't need the number of
2401
// arguments passed in because it is constant. At some point we
2402
// should remove this need and make the runtime routine entry code
2404
mov(r0, Operand(num_arguments));
2405
mov(r1, Operand(ExternalReference(f, isolate())));
2411
void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
2412
CallRuntime(Runtime::FunctionForId(fid), num_arguments);
2416
void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
2417
const Runtime::Function* function = Runtime::FunctionForId(id);
2418
mov(r0, Operand(function->nargs));
2419
mov(r1, Operand(ExternalReference(function, isolate())));
2426
void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2427
int num_arguments) {
2428
mov(r0, Operand(num_arguments));
2429
mov(r1, Operand(ext));
2436
void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
2439
// TODO(1236192): Most runtime routines don't need the number of
2440
// arguments passed in because it is constant. At some point we
2441
// should remove this need and make the runtime routine entry code
2443
mov(r0, Operand(num_arguments));
2444
JumpToExternalReference(ext);
2448
MaybeObject* MacroAssembler::TryTailCallExternalReference(
2449
const ExternalReference& ext, int num_arguments, int result_size) {
2450
// TODO(1236192): Most runtime routines don't need the number of
2451
// arguments passed in because it is constant. At some point we
2452
// should remove this need and make the runtime routine entry code
2454
mov(r0, Operand(num_arguments));
2455
return TryJumpToExternalReference(ext);
2459
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
2462
TailCallExternalReference(ExternalReference(fid, isolate()),
2468
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
2469
#if defined(__thumb__)
2470
// Thumb mode builtin.
2471
ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
2473
mov(r1, Operand(builtin));
2475
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2479
MaybeObject* MacroAssembler::TryJumpToExternalReference(
2480
const ExternalReference& builtin) {
2481
#if defined(__thumb__)
2482
// Thumb mode builtin.
2483
ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
2485
mov(r1, Operand(builtin));
2487
return TryTailCallStub(&stub);
2491
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
2493
const CallWrapper& call_wrapper) {
2494
GetBuiltinEntry(r2, id);
2495
if (flag == CALL_FUNCTION) {
2496
call_wrapper.BeforeCall(CallSize(r2));
2497
SetCallKind(r5, CALL_AS_METHOD);
2499
call_wrapper.AfterCall();
2501
ASSERT(flag == JUMP_FUNCTION);
2502
SetCallKind(r5, CALL_AS_METHOD);
2508
void MacroAssembler::GetBuiltinFunction(Register target,
2509
Builtins::JavaScript id) {
2510
// Load the builtins object into target register.
2511
ldr(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
2512
ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
2513
// Load the JavaScript builtin function from the builtins object.
2514
ldr(target, FieldMemOperand(target,
2515
JSBuiltinsObject::OffsetOfFunctionWithId(id)));
2519
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
2520
ASSERT(!target.is(r1));
2521
GetBuiltinFunction(r1, id);
2522
// Load the code entry point from the builtins object.
2523
ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
2527
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2528
Register scratch1, Register scratch2) {
2529
if (FLAG_native_code_counters && counter->Enabled()) {
2530
mov(scratch1, Operand(value));
2531
mov(scratch2, Operand(ExternalReference(counter)));
2532
str(scratch1, MemOperand(scratch2));
2537
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2538
Register scratch1, Register scratch2) {
2540
if (FLAG_native_code_counters && counter->Enabled()) {
2541
mov(scratch2, Operand(ExternalReference(counter)));
2542
ldr(scratch1, MemOperand(scratch2));
2543
add(scratch1, scratch1, Operand(value));
2544
str(scratch1, MemOperand(scratch2));
2549
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2550
Register scratch1, Register scratch2) {
2552
if (FLAG_native_code_counters && counter->Enabled()) {
2553
mov(scratch2, Operand(ExternalReference(counter)));
2554
ldr(scratch1, MemOperand(scratch2));
2555
sub(scratch1, scratch1, Operand(value));
2556
str(scratch1, MemOperand(scratch2));
2561
void MacroAssembler::Assert(Condition cond, const char* msg) {
2562
if (emit_debug_code())
2567
void MacroAssembler::AssertRegisterIsRoot(Register reg,
2568
Heap::RootListIndex index) {
2569
if (emit_debug_code()) {
2570
LoadRoot(ip, index);
2572
Check(eq, "Register did not match expected root");
2577
void MacroAssembler::AssertFastElements(Register elements) {
2578
if (emit_debug_code()) {
2579
ASSERT(!elements.is(ip));
2582
ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2583
LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2586
LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
2589
LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2592
Abort("JSObject with fast elements map has slow elements");
2599
void MacroAssembler::Check(Condition cond, const char* msg) {
2603
// will not return here
2608
void MacroAssembler::Abort(const char* msg) {
2611
// We want to pass the msg string like a smi to avoid GC
2612
// problems, however msg is not guaranteed to be aligned
2613
// properly. Instead, we pass an aligned pointer that is
2614
// a proper v8 smi, but also pass the alignment difference
2615
// from the real pointer as a smi.
2616
intptr_t p1 = reinterpret_cast<intptr_t>(msg);
2617
intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
2618
ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
2621
RecordComment("Abort message: ");
2625
// Disable stub call restrictions to always allow calls to abort.
2626
AllowStubCallsScope allow_scope(this, true);
2628
mov(r0, Operand(p0));
2630
mov(r0, Operand(Smi::FromInt(p1 - p0)));
2632
CallRuntime(Runtime::kAbort, 2);
2633
// will not return here
2634
if (is_const_pool_blocked()) {
2635
// If the calling code cares about the exact number of
2636
// instructions generated, we insert padding here to keep the size
2637
// of the Abort macro constant.
2638
static const int kExpectedAbortInstructions = 10;
2639
int abort_instructions = InstructionsGeneratedSince(&abort_start);
2640
ASSERT(abort_instructions <= kExpectedAbortInstructions);
2641
while (abort_instructions++ < kExpectedAbortInstructions) {
2648
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2649
if (context_chain_length > 0) {
2650
// Move up the chain of contexts to the context containing the slot.
2651
ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2652
for (int i = 1; i < context_chain_length; i++) {
2653
ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2656
// Slot is in the current function context. Move it into the
2657
// destination register in case we store into it (the write barrier
2658
// cannot be allowed to destroy the context in esi).
2664
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2665
// Load the global or builtins object from the current context.
2666
ldr(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
2667
// Load the global context from the global or builtins object.
2668
ldr(function, FieldMemOperand(function,
2669
GlobalObject::kGlobalContextOffset));
2670
// Load the function from the global context.
2671
ldr(function, MemOperand(function, Context::SlotOffset(index)));
2675
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2678
// Load the initial map. The global functions all have initial maps.
2679
ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2680
if (emit_debug_code()) {
2682
CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2685
Abort("Global functions must have initial map");
2691
void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2694
Label* not_power_of_two_or_zero) {
2695
sub(scratch, reg, Operand(1), SetCC);
2696
b(mi, not_power_of_two_or_zero);
2698
b(ne, not_power_of_two_or_zero);
2702
void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
2705
Label* zero_and_neg,
2706
Label* not_power_of_two) {
2707
sub(scratch, reg, Operand(1), SetCC);
2708
b(mi, zero_and_neg);
2710
b(ne, not_power_of_two);
2714
void MacroAssembler::JumpIfNotBothSmi(Register reg1,
2716
Label* on_not_both_smi) {
2717
STATIC_ASSERT(kSmiTag == 0);
2718
tst(reg1, Operand(kSmiTagMask));
2719
tst(reg2, Operand(kSmiTagMask), eq);
2720
b(ne, on_not_both_smi);
2724
void MacroAssembler::JumpIfEitherSmi(Register reg1,
2726
Label* on_either_smi) {
2727
STATIC_ASSERT(kSmiTag == 0);
2728
tst(reg1, Operand(kSmiTagMask));
2729
tst(reg2, Operand(kSmiTagMask), ne);
2730
b(eq, on_either_smi);
2734
void MacroAssembler::AbortIfSmi(Register object) {
2735
STATIC_ASSERT(kSmiTag == 0);
2736
tst(object, Operand(kSmiTagMask));
2737
Assert(ne, "Operand is a smi");
2741
void MacroAssembler::AbortIfNotSmi(Register object) {
2742
STATIC_ASSERT(kSmiTag == 0);
2743
tst(object, Operand(kSmiTagMask));
2744
Assert(eq, "Operand is not smi");
2748
void MacroAssembler::AbortIfNotString(Register object) {
2749
STATIC_ASSERT(kSmiTag == 0);
2750
tst(object, Operand(kSmiTagMask));
2751
Assert(ne, "Operand is not a string");
2753
ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2754
CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
2756
Assert(lo, "Operand is not a string");
2761
void MacroAssembler::AbortIfNotRootValue(Register src,
2762
Heap::RootListIndex root_value_index,
2763
const char* message) {
2764
CompareRoot(src, root_value_index);
2765
Assert(eq, message);
2769
void MacroAssembler::JumpIfNotHeapNumber(Register object,
2770
Register heap_number_map,
2772
Label* on_not_heap_number) {
2773
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2774
AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2775
cmp(scratch, heap_number_map);
2776
b(ne, on_not_heap_number);
2780
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
2786
// Test that both first and second are sequential ASCII strings.
2787
// Assume that they are non-smis.
2788
ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
2789
ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
2790
ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
2791
ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
2793
JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
2800
void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
2805
// Check that neither is a smi.
2806
STATIC_ASSERT(kSmiTag == 0);
2807
and_(scratch1, first, Operand(second));
2808
JumpIfSmi(scratch1, failure);
2809
JumpIfNonSmisNotBothSequentialAsciiStrings(first,
2817
// Allocates a heap number or jumps to the need_gc label if the young space
2818
// is full and a scavenge is needed.
2819
void MacroAssembler::AllocateHeapNumber(Register result,
2822
Register heap_number_map,
2823
Label* gc_required) {
2824
// Allocate an object in the heap for the heap number and tag it as a heap
2826
AllocateInNewSpace(HeapNumber::kSize,
2833
// Store heap number map in the allocated object.
2834
AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2835
str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
2839
void MacroAssembler::AllocateHeapNumberWithValue(Register result,
2840
DwVfpRegister value,
2843
Register heap_number_map,
2844
Label* gc_required) {
2845
AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
2846
sub(scratch1, result, Operand(kHeapObjectTag));
2847
vstr(value, scratch1, HeapNumber::kValueOffset);
2851
// Copies a fixed number of fields of heap objects from src to dst.
2852
void MacroAssembler::CopyFields(Register dst,
2856
// At least one bit set in the first 15 registers.
2857
ASSERT((temps & ((1 << 15) - 1)) != 0);
2858
ASSERT((temps & dst.bit()) == 0);
2859
ASSERT((temps & src.bit()) == 0);
2860
// Primitive implementation using only one temporary register.
2862
Register tmp = no_reg;
2863
// Find a temp register in temps list.
2864
for (int i = 0; i < 15; i++) {
2865
if ((temps & (1 << i)) != 0) {
2870
ASSERT(!tmp.is(no_reg));
2872
for (int i = 0; i < field_count; i++) {
2873
ldr(tmp, FieldMemOperand(src, i * kPointerSize));
2874
str(tmp, FieldMemOperand(dst, i * kPointerSize));
2879
void MacroAssembler::CopyBytes(Register src,
2883
Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
2885
// Align src before copying in word size chunks.
2887
cmp(length, Operand(0));
2889
bind(&align_loop_1);
2890
tst(src, Operand(kPointerSize - 1));
2892
ldrb(scratch, MemOperand(src, 1, PostIndex));
2893
strb(scratch, MemOperand(dst, 1, PostIndex));
2894
sub(length, length, Operand(1), SetCC);
2895
b(ne, &byte_loop_1);
2897
// Copy bytes in word size chunks.
2899
if (emit_debug_code()) {
2900
tst(src, Operand(kPointerSize - 1));
2901
Assert(eq, "Expecting alignment for CopyBytes");
2903
cmp(length, Operand(kPointerSize));
2905
ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
2906
#if CAN_USE_UNALIGNED_ACCESSES
2907
str(scratch, MemOperand(dst, kPointerSize, PostIndex));
2909
strb(scratch, MemOperand(dst, 1, PostIndex));
2910
mov(scratch, Operand(scratch, LSR, 8));
2911
strb(scratch, MemOperand(dst, 1, PostIndex));
2912
mov(scratch, Operand(scratch, LSR, 8));
2913
strb(scratch, MemOperand(dst, 1, PostIndex));
2914
mov(scratch, Operand(scratch, LSR, 8));
2915
strb(scratch, MemOperand(dst, 1, PostIndex));
2917
sub(length, length, Operand(kPointerSize));
2920
// Copy the last bytes if any left.
2922
cmp(length, Operand(0));
2925
ldrb(scratch, MemOperand(src, 1, PostIndex));
2926
strb(scratch, MemOperand(dst, 1, PostIndex));
2927
sub(length, length, Operand(1), SetCC);
2928
b(ne, &byte_loop_1);
2933
void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
2934
Register source, // Input.
2936
ASSERT(!zeros.is(source) || !source.is(scratch));
2937
ASSERT(!zeros.is(scratch));
2938
ASSERT(!scratch.is(ip));
2939
ASSERT(!source.is(ip));
2940
ASSERT(!zeros.is(ip));
2941
#ifdef CAN_USE_ARMV5_INSTRUCTIONS
2942
clz(zeros, source); // This instruction is only supported after ARM5.
2944
Move(scratch, source);
2945
mov(zeros, Operand(0, RelocInfo::NONE));
2947
tst(scratch, Operand(0xffff0000));
2948
add(zeros, zeros, Operand(16), LeaveCC, eq);
2949
mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
2951
tst(scratch, Operand(0xff000000));
2952
add(zeros, zeros, Operand(8), LeaveCC, eq);
2953
mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
2955
tst(scratch, Operand(0xf0000000));
2956
add(zeros, zeros, Operand(4), LeaveCC, eq);
2957
mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
2959
tst(scratch, Operand(0xc0000000));
2960
add(zeros, zeros, Operand(2), LeaveCC, eq);
2961
mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
2963
tst(scratch, Operand(0x80000000u));
2964
add(zeros, zeros, Operand(1), LeaveCC, eq);
2969
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
2975
int kFlatAsciiStringMask =
2976
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2977
int kFlatAsciiStringTag = ASCII_STRING_TYPE;
2978
and_(scratch1, first, Operand(kFlatAsciiStringMask));
2979
and_(scratch2, second, Operand(kFlatAsciiStringMask));
2980
cmp(scratch1, Operand(kFlatAsciiStringTag));
2981
// Ignore second test if first test failed.
2982
cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
2987
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
2990
int kFlatAsciiStringMask =
2991
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2992
int kFlatAsciiStringTag = ASCII_STRING_TYPE;
2993
and_(scratch, type, Operand(kFlatAsciiStringMask));
2994
cmp(scratch, Operand(kFlatAsciiStringTag));
2998
static const int kRegisterPassedArguments = 4;
3001
int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
3002
int num_double_arguments) {
3003
int stack_passed_words = 0;
3004
if (use_eabi_hardfloat()) {
3005
// In the hard floating point calling convention, we can use
3006
// all double registers to pass doubles.
3007
if (num_double_arguments > DoubleRegister::kNumRegisters) {
3008
stack_passed_words +=
3009
2 * (num_double_arguments - DoubleRegister::kNumRegisters);
3012
// In the soft floating point calling convention, every double
3013
// argument is passed using two registers.
3014
num_reg_arguments += 2 * num_double_arguments;
3016
// Up to four simple arguments are passed in registers r0..r3.
3017
if (num_reg_arguments > kRegisterPassedArguments) {
3018
stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
3020
return stack_passed_words;
3024
void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3025
int num_double_arguments,
3027
int frame_alignment = ActivationFrameAlignment();
3028
int stack_passed_arguments = CalculateStackPassedWords(
3029
num_reg_arguments, num_double_arguments);
3030
if (frame_alignment > kPointerSize) {
3031
// Make stack end at alignment and make room for num_arguments - 4 words
3032
// and the original value of sp.
3034
sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
3035
ASSERT(IsPowerOf2(frame_alignment));
3036
and_(sp, sp, Operand(-frame_alignment));
3037
str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3039
sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
3044
void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3046
PrepareCallCFunction(num_reg_arguments, 0, scratch);
3050
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
3051
if (use_eabi_hardfloat()) {
3059
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
3060
DoubleRegister dreg2) {
3061
if (use_eabi_hardfloat()) {
3063
ASSERT(!dreg1.is(d1));
3071
vmov(r0, r1, dreg1);
3072
vmov(r2, r3, dreg2);
3077
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
3079
if (use_eabi_hardfloat()) {
3089
void MacroAssembler::CallCFunction(ExternalReference function,
3090
int num_reg_arguments,
3091
int num_double_arguments) {
3092
CallCFunctionHelper(no_reg,
3096
num_double_arguments);
3100
void MacroAssembler::CallCFunction(Register function,
3102
int num_reg_arguments,
3103
int num_double_arguments) {
3104
CallCFunctionHelper(function,
3105
ExternalReference::the_hole_value_location(isolate()),
3108
num_double_arguments);
3112
void MacroAssembler::CallCFunction(ExternalReference function,
3113
int num_arguments) {
3114
CallCFunction(function, num_arguments, 0);
3118
void MacroAssembler::CallCFunction(Register function,
3120
int num_arguments) {
3121
CallCFunction(function, scratch, num_arguments, 0);
3125
void MacroAssembler::CallCFunctionHelper(Register function,
3126
ExternalReference function_reference,
3128
int num_reg_arguments,
3129
int num_double_arguments) {
3130
// Make sure that the stack is aligned before calling a C function unless
3131
// running in the simulator. The simulator has its own alignment check which
3132
// provides more information.
3133
#if defined(V8_HOST_ARCH_ARM)
3134
if (emit_debug_code()) {
3135
int frame_alignment = OS::ActivationFrameAlignment();
3136
int frame_alignment_mask = frame_alignment - 1;
3137
if (frame_alignment > kPointerSize) {
3138
ASSERT(IsPowerOf2(frame_alignment));
3139
Label alignment_as_expected;
3140
tst(sp, Operand(frame_alignment_mask));
3141
b(eq, &alignment_as_expected);
3142
// Don't use Check here, as it will call Runtime_Abort possibly
3143
// re-entering here.
3144
stop("Unexpected alignment");
3145
bind(&alignment_as_expected);
3150
// Just call directly. The function called cannot cause a GC, or
3151
// allow preemption, so the return address in the link register
3153
if (function.is(no_reg)) {
3154
mov(scratch, Operand(function_reference));
3158
int stack_passed_arguments = CalculateStackPassedWords(
3159
num_reg_arguments, num_double_arguments);
3160
if (ActivationFrameAlignment() > kPointerSize) {
3161
ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
3163
add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
3168
void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
3170
const uint32_t kLdrOffsetMask = (1 << 12) - 1;
3171
const int32_t kPCRegOffset = 2 * kPointerSize;
3172
ldr(result, MemOperand(ldr_location));
3173
if (emit_debug_code()) {
3174
// Check that the instruction is a ldr reg, [pc + offset] .
3175
and_(result, result, Operand(kLdrPCPattern));
3176
cmp(result, Operand(kLdrPCPattern));
3177
Check(eq, "The instruction to patch should be a load from pc.");
3178
// Result was clobbered. Restore it.
3179
ldr(result, MemOperand(ldr_location));
3181
// Get the address of the constant.
3182
and_(result, result, Operand(kLdrOffsetMask));
3183
add(result, ldr_location, Operand(result));
3184
add(result, result, Operand(kPCRegOffset));
3188
void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3189
Usat(output_reg, 8, Operand(input_reg));
3193
void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3194
DoubleRegister input_reg,
3195
DoubleRegister temp_double_reg) {
3200
Vmov(temp_double_reg, 0.0);
3201
VFPCompareAndSetFlags(input_reg, temp_double_reg);
3204
// Double value is less than zero, NaN or Inf, return 0.
3205
mov(result_reg, Operand(0));
3208
// Double value is >= 255, return 255.
3210
Vmov(temp_double_reg, 255.0);
3211
VFPCompareAndSetFlags(input_reg, temp_double_reg);
3213
mov(result_reg, Operand(255));
3216
// In 0-255 range, round and truncate.
3218
Vmov(temp_double_reg, 0.5);
3219
vadd(temp_double_reg, input_reg, temp_double_reg);
3220
vcvt_u32_f64(s0, temp_double_reg);
3221
vmov(result_reg, s0);
3226
void MacroAssembler::LoadInstanceDescriptors(Register map,
3227
Register descriptors) {
3229
FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
3231
JumpIfNotSmi(descriptors, ¬_smi);
3232
mov(descriptors, Operand(FACTORY->empty_descriptor_array()));
3237
CodePatcher::CodePatcher(byte* address, int instructions)
3238
: address_(address),
3239
instructions_(instructions),
3240
size_(instructions * Assembler::kInstrSize),
3241
masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
3242
// Create a new macro assembler pointing to the address of the code to patch.
3243
// The size is adjusted with kGap on order for the assembler to generate size
3244
// bytes of instructions without failing with buffer size constraints.
3245
ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3249
CodePatcher::~CodePatcher() {
3250
// Indicate that code has changed.
3251
CPU::FlushICache(address_, size_);
3253
// Check that the code was patched as expected.
3254
ASSERT(masm_.pc_ == address_ + size_);
3255
ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3259
void CodePatcher::Emit(Instr instr) {
3260
masm()->emit(instr);
3264
void CodePatcher::Emit(Address addr) {
3265
masm()->emit(reinterpret_cast<Instr>(addr));
3269
void CodePatcher::EmitCondition(Condition cond) {
3270
Instr instr = Assembler::instr_at(masm_.pc_);
3271
instr = (instr & ~kCondMask) | cond;
3276
} } // namespace v8::internal
3278
#endif // V8_TARGET_ARCH_ARM