1
// Copyright 2011 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
6
// * Redistributions of source code must retain the above copyright
7
// notice, this list of conditions and the following disclaimer.
8
// * Redistributions in binary form must reproduce the above
9
// copyright notice, this list of conditions and the following
10
// disclaimer in the documentation and/or other materials provided
11
// with the distribution.
12
// * Neither the name of Google Inc. nor the names of its
13
// contributors may be used to endorse or promote products derived
14
// from this software without specific prior written permission.
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
#if defined(V8_TARGET_ARCH_ARM)
32
#include "bootstrapper.h"
33
#include "code-stubs.h"
34
#include "regexp-macro-assembler.h"
40
#define __ ACCESS_MASM(masm)
42
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
46
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
52
static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond);
53
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
58
// Check if the operand is a heap number.
59
static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
60
Register scratch1, Register scratch2,
61
Label* not_a_heap_number) {
62
__ ldr(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
63
__ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
64
__ cmp(scratch1, scratch2);
65
__ b(ne, not_a_heap_number);
69
void ToNumberStub::Generate(MacroAssembler* masm) {
70
// The ToNumber stub takes one argument in eax.
71
Label check_heap_number, call_builtin;
72
__ JumpIfNotSmi(r0, &check_heap_number);
75
__ bind(&check_heap_number);
76
EmitCheckForHeapNumber(masm, r0, r1, ip, &call_builtin);
79
__ bind(&call_builtin);
81
__ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
85
void FastNewClosureStub::Generate(MacroAssembler* masm) {
86
// Create a new closure from the given function info in new
87
// space. Set the context to the current context in cp.
90
// Pop the function info from the stack.
93
// Attempt to allocate new JSFunction in new space.
94
__ AllocateInNewSpace(JSFunction::kSize,
101
int map_index = (language_mode_ == CLASSIC_MODE)
102
? Context::FUNCTION_MAP_INDEX
103
: Context::STRICT_MODE_FUNCTION_MAP_INDEX;
105
// Compute the function map in the current global context and set that
106
// as the map of the allocated object.
107
__ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
108
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
109
__ ldr(r2, MemOperand(r2, Context::SlotOffset(map_index)));
110
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
112
// Initialize the rest of the function. We don't have to update the
113
// write barrier because the allocated object is in new space.
114
__ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
115
__ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
116
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
117
__ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
118
__ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
119
__ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
120
__ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
121
__ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
122
__ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
123
__ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
126
// Initialize the code pointer in the function to be the one
127
// found in the shared function info object.
128
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
129
__ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
130
__ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
132
// Return result. The argument function info has been popped already.
135
// Create a new closure through the slower runtime call.
137
__ LoadRoot(r4, Heap::kFalseValueRootIndex);
139
__ TailCallRuntime(Runtime::kNewClosure, 3, 1);
143
void FastNewContextStub::Generate(MacroAssembler* masm) {
144
// Try to allocate the context in new space.
146
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
148
// Attempt to allocate the context in new space.
149
__ AllocateInNewSpace(FixedArray::SizeFor(length),
156
// Load the function from the stack.
157
__ ldr(r3, MemOperand(sp, 0));
159
// Set up the object header.
160
__ LoadRoot(r2, Heap::kFunctionContextMapRootIndex);
161
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
162
__ mov(r2, Operand(Smi::FromInt(length)));
163
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
165
// Set up the fixed slots.
166
__ mov(r1, Operand(Smi::FromInt(0)));
167
__ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
168
__ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
169
__ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
171
// Copy the global object from the previous context.
172
__ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
173
__ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
175
// Initialize the rest of the slots to undefined.
176
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
177
for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
178
__ str(r1, MemOperand(r0, Context::SlotOffset(i)));
181
// Remove the on-stack argument and return.
186
// Need to collect. Call into runtime system.
188
__ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
192
void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
193
// Stack layout on entry:
196
// [sp + kPointerSize]: serialized scope info
198
// Try to allocate the context in new space.
200
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
201
__ AllocateInNewSpace(FixedArray::SizeFor(length),
202
r0, r1, r2, &gc, TAG_OBJECT);
204
// Load the function from the stack.
205
__ ldr(r3, MemOperand(sp, 0));
207
// Load the serialized scope info from the stack.
208
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
210
// Set up the object header.
211
__ LoadRoot(r2, Heap::kBlockContextMapRootIndex);
212
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
213
__ mov(r2, Operand(Smi::FromInt(length)));
214
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
216
// If this block context is nested in the global context we get a smi
217
// sentinel instead of a function. The block context should get the
218
// canonical empty function of the global context as its closure which
219
// we still have to look up.
220
Label after_sentinel;
221
__ JumpIfNotSmi(r3, &after_sentinel);
222
if (FLAG_debug_code) {
223
const char* message = "Expected 0 as a Smi sentinel";
224
__ cmp(r3, Operand::Zero());
225
__ Assert(eq, message);
227
__ ldr(r3, GlobalObjectOperand());
228
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
229
__ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
230
__ bind(&after_sentinel);
232
// Set up the fixed slots.
233
__ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
234
__ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
235
__ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
237
// Copy the global object from the previous context.
238
__ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
239
__ str(r1, ContextOperand(r0, Context::GLOBAL_INDEX));
241
// Initialize the rest of the slots to the hole value.
242
__ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
243
for (int i = 0; i < slots_; i++) {
244
__ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS));
247
// Remove the on-stack argument and return.
249
__ add(sp, sp, Operand(2 * kPointerSize));
252
// Need to collect. Call into runtime system.
254
__ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
258
static void GenerateFastCloneShallowArrayCommon(
259
MacroAssembler* masm,
261
FastCloneShallowArrayStub::Mode mode,
263
// Registers on entry:
265
// r3: boilerplate literal array.
266
ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
268
// All sizes here are multiples of kPointerSize.
269
int elements_size = 0;
271
elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
272
? FixedDoubleArray::SizeFor(length)
273
: FixedArray::SizeFor(length);
275
int size = JSArray::kSize + elements_size;
277
// Allocate both the JS array and the elements array in one big
278
// allocation. This avoids multiple limit checks.
279
__ AllocateInNewSpace(size,
286
// Copy the JS array part.
287
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
288
if ((i != JSArray::kElementsOffset) || (length == 0)) {
289
__ ldr(r1, FieldMemOperand(r3, i));
290
__ str(r1, FieldMemOperand(r0, i));
295
// Get hold of the elements array of the boilerplate and setup the
296
// elements pointer in the resulting object.
297
__ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
298
__ add(r2, r0, Operand(JSArray::kSize));
299
__ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
301
// Copy the elements array.
302
ASSERT((elements_size % kPointerSize) == 0);
303
__ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
307
void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
308
// Stack layout on entry:
310
// [sp]: constant elements.
311
// [sp + kPointerSize]: literal index.
312
// [sp + (2 * kPointerSize)]: literals array.
314
// Load boilerplate object into r3 and check if we need to create a
317
__ ldr(r3, MemOperand(sp, 2 * kPointerSize));
318
__ ldr(r0, MemOperand(sp, 1 * kPointerSize));
319
__ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
320
__ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
321
__ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
322
__ b(eq, &slow_case);
324
FastCloneShallowArrayStub::Mode mode = mode_;
325
if (mode == CLONE_ANY_ELEMENTS) {
326
Label double_elements, check_fast_elements;
327
__ ldr(r0, FieldMemOperand(r3, JSArray::kElementsOffset));
328
__ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
329
__ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
331
__ b(ne, &check_fast_elements);
332
GenerateFastCloneShallowArrayCommon(masm, 0,
333
COPY_ON_WRITE_ELEMENTS, &slow_case);
334
// Return and remove the on-stack parameters.
335
__ add(sp, sp, Operand(3 * kPointerSize));
338
__ bind(&check_fast_elements);
339
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
341
__ b(ne, &double_elements);
342
GenerateFastCloneShallowArrayCommon(masm, length_,
343
CLONE_ELEMENTS, &slow_case);
344
// Return and remove the on-stack parameters.
345
__ add(sp, sp, Operand(3 * kPointerSize));
348
__ bind(&double_elements);
349
mode = CLONE_DOUBLE_ELEMENTS;
350
// Fall through to generate the code to handle double elements.
353
if (FLAG_debug_code) {
355
Heap::RootListIndex expected_map_index;
356
if (mode == CLONE_ELEMENTS) {
357
message = "Expected (writable) fixed array";
358
expected_map_index = Heap::kFixedArrayMapRootIndex;
359
} else if (mode == CLONE_DOUBLE_ELEMENTS) {
360
message = "Expected (writable) fixed double array";
361
expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
363
ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
364
message = "Expected copy-on-write fixed array";
365
expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
368
__ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
369
__ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
370
__ CompareRoot(r3, expected_map_index);
371
__ Assert(eq, message);
375
GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
377
// Return and remove the on-stack parameters.
378
__ add(sp, sp, Operand(3 * kPointerSize));
382
__ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
386
void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
387
// Stack layout on entry:
389
// [sp]: object literal flags.
390
// [sp + kPointerSize]: constant properties.
391
// [sp + (2 * kPointerSize)]: literal index.
392
// [sp + (3 * kPointerSize)]: literals array.
394
// Load boilerplate object into r3 and check if we need to create a
397
__ ldr(r3, MemOperand(sp, 3 * kPointerSize));
398
__ ldr(r0, MemOperand(sp, 2 * kPointerSize));
399
__ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
400
__ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
401
__ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
402
__ b(eq, &slow_case);
404
// Check that the boilerplate contains only fast properties and we can
405
// statically determine the instance size.
406
int size = JSObject::kHeaderSize + length_ * kPointerSize;
407
__ ldr(r0, FieldMemOperand(r3, HeapObject::kMapOffset));
408
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceSizeOffset));
409
__ cmp(r0, Operand(size >> kPointerSizeLog2));
410
__ b(ne, &slow_case);
412
// Allocate the JS object and copy header together with all in-object
413
// properties from the boilerplate.
414
__ AllocateInNewSpace(size, r0, r1, r2, &slow_case, TAG_OBJECT);
415
for (int i = 0; i < size; i += kPointerSize) {
416
__ ldr(r1, FieldMemOperand(r3, i));
417
__ str(r1, FieldMemOperand(r0, i));
420
// Return and remove the on-stack parameters.
421
__ add(sp, sp, Operand(4 * kPointerSize));
425
__ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
429
// Takes a Smi and converts to an IEEE 64 bit floating point value in two
430
// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
431
// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
432
// scratch register. Destroys the source register. No GC occurs during this
433
// stub so you don't have to set up the frame.
434
class ConvertToDoubleStub : public CodeStub {
436
ConvertToDoubleStub(Register result_reg_1,
437
Register result_reg_2,
439
Register scratch_reg)
440
: result1_(result_reg_1),
441
result2_(result_reg_2),
443
zeros_(scratch_reg) { }
451
// Minor key encoding in 16 bits.
452
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
453
class OpBits: public BitField<Token::Value, 2, 14> {};
455
Major MajorKey() { return ConvertToDouble; }
457
// Encode the parameters in a unique 16 bit value.
458
return result1_.code() +
459
(result2_.code() << 4) +
460
(source_.code() << 8) +
461
(zeros_.code() << 12);
464
void Generate(MacroAssembler* masm);
468
void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
469
Register exponent = result1_;
470
Register mantissa = result2_;
473
// Convert from Smi to integer.
474
__ mov(source_, Operand(source_, ASR, kSmiTagSize));
475
// Move sign bit from source to destination. This works because the sign bit
476
// in the exponent word of the double has the same position and polarity as
477
// the 2's complement sign bit in a Smi.
478
STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
479
__ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
480
// Subtract from 0 if source was negative.
481
__ rsb(source_, source_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
483
// We have -1, 0 or 1, which we treat specially. Register source_ contains
484
// absolute value: it is either equal to 1 (special case of -1 and 1),
485
// greater than 1 (not a special case) or less than 1 (special case of 0).
486
__ cmp(source_, Operand(1));
487
__ b(gt, ¬_special);
489
// For 1 or -1 we need to or in the 0 exponent (biased to 1023).
490
static const uint32_t exponent_word_for_1 =
491
HeapNumber::kExponentBias << HeapNumber::kExponentShift;
492
__ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
493
// 1, 0 and -1 all have 0 for the second word.
494
__ mov(mantissa, Operand(0, RelocInfo::NONE));
497
__ bind(¬_special);
498
// Count leading zeros. Uses mantissa for a scratch register on pre-ARM5.
499
// Gets the wrong answer for 0, but we already checked for that case above.
500
__ CountLeadingZeros(zeros_, source_, mantissa);
501
// Compute exponent and or it into the exponent register.
502
// We use mantissa as a scratch register here. Use a fudge factor to
503
// divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
504
// that fit in the ARM's constant field.
506
__ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
507
__ add(mantissa, mantissa, Operand(fudge));
510
Operand(mantissa, LSL, HeapNumber::kExponentShift));
511
// Shift up the source chopping the top bit off.
512
__ add(zeros_, zeros_, Operand(1));
513
// This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
514
__ mov(source_, Operand(source_, LSL, zeros_));
515
// Compute lower part of fraction (last 12 bits).
516
__ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
517
// And the top (top 20 bits).
520
Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
525
void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
526
FloatingPointHelper::Destination destination,
529
if (CpuFeatures::IsSupported(VFP3)) {
530
CpuFeatures::Scope scope(VFP3);
531
__ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
532
__ vmov(d7.high(), scratch1);
533
__ vcvt_f64_s32(d7, d7.high());
534
__ mov(scratch1, Operand(r1, ASR, kSmiTagSize));
535
__ vmov(d6.high(), scratch1);
536
__ vcvt_f64_s32(d6, d6.high());
537
if (destination == kCoreRegisters) {
542
ASSERT(destination == kCoreRegisters);
543
// Write Smi from r0 to r3 and r2 in double format.
544
__ mov(scratch1, Operand(r0));
545
ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
547
__ Call(stub1.GetCode());
548
// Write Smi from r1 to r1 and r0 in double format.
549
__ mov(scratch1, Operand(r1));
550
ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
551
__ Call(stub2.GetCode());
557
void FloatingPointHelper::LoadOperands(
558
MacroAssembler* masm,
559
FloatingPointHelper::Destination destination,
560
Register heap_number_map,
565
// Load right operand (r0) to d6 or r2/r3.
566
LoadNumber(masm, destination,
567
r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow);
569
// Load left operand (r1) to d7 or r0/r1.
570
LoadNumber(masm, destination,
571
r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow);
575
void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
576
Destination destination,
581
Register heap_number_map,
585
if (FLAG_debug_code) {
586
__ AbortIfNotRootValue(heap_number_map,
587
Heap::kHeapNumberMapRootIndex,
588
"HeapNumberMap register clobbered.");
593
__ JumpIfSmi(object, &is_smi);
594
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
596
// Handle loading a double from a heap number.
597
if (CpuFeatures::IsSupported(VFP3) &&
598
destination == kVFPRegisters) {
599
CpuFeatures::Scope scope(VFP3);
600
// Load the double from tagged HeapNumber to double register.
601
__ sub(scratch1, object, Operand(kHeapObjectTag));
602
__ vldr(dst, scratch1, HeapNumber::kValueOffset);
604
ASSERT(destination == kCoreRegisters);
605
// Load the double from heap number to dst1 and dst2 in double format.
606
__ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
610
// Handle loading a double from a smi.
612
if (CpuFeatures::IsSupported(VFP3)) {
613
CpuFeatures::Scope scope(VFP3);
614
// Convert smi to double using VFP instructions.
615
__ SmiUntag(scratch1, object);
616
__ vmov(dst.high(), scratch1);
617
__ vcvt_f64_s32(dst, dst.high());
618
if (destination == kCoreRegisters) {
619
// Load the converted smi to dst1 and dst2 in double format.
620
__ vmov(dst1, dst2, dst);
623
ASSERT(destination == kCoreRegisters);
624
// Write smi to dst1 and dst2 double format.
625
__ mov(scratch1, Operand(object));
626
ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
628
__ Call(stub.GetCode());
636
void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
639
Register heap_number_map,
643
DwVfpRegister double_scratch,
645
if (FLAG_debug_code) {
646
__ AbortIfNotRootValue(heap_number_map,
647
Heap::kHeapNumberMapRootIndex,
648
"HeapNumberMap register clobbered.");
652
Label not_in_int32_range;
654
__ JumpIfSmi(object, &is_smi);
655
__ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
656
__ cmp(scratch1, heap_number_map);
657
__ b(ne, not_number);
658
__ ConvertToInt32(object,
663
¬_in_int32_range);
666
__ bind(¬_in_int32_range);
667
__ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
668
__ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
670
__ EmitOutOfInt32RangeTruncate(dst,
677
__ SmiUntag(dst, object);
682
void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
683
Register int_scratch,
684
Destination destination,
685
DwVfpRegister double_dst,
689
SwVfpRegister single_scratch) {
690
ASSERT(!int_scratch.is(scratch2));
691
ASSERT(!int_scratch.is(dst1));
692
ASSERT(!int_scratch.is(dst2));
696
if (CpuFeatures::IsSupported(VFP3)) {
697
CpuFeatures::Scope scope(VFP3);
698
__ vmov(single_scratch, int_scratch);
699
__ vcvt_f64_s32(double_dst, single_scratch);
700
if (destination == kCoreRegisters) {
701
__ vmov(dst1, dst2, double_dst);
704
Label fewer_than_20_useful_bits;
707
// | s | exp | mantissa |
710
__ cmp(int_scratch, Operand::Zero());
711
__ mov(dst2, int_scratch);
712
__ mov(dst1, int_scratch);
715
// Preload the sign of the value.
716
__ and_(dst2, int_scratch, Operand(HeapNumber::kSignMask), SetCC);
717
// Get the absolute value of the object (as an unsigned integer).
718
__ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi);
720
// Get mantissa[51:20].
722
// Get the position of the first set bit.
723
__ CountLeadingZeros(dst1, int_scratch, scratch2);
724
__ rsb(dst1, dst1, Operand(31));
727
__ add(scratch2, dst1, Operand(HeapNumber::kExponentBias));
728
__ Bfi(dst2, scratch2, scratch2,
729
HeapNumber::kExponentShift, HeapNumber::kExponentBits);
731
// Clear the first non null bit.
732
__ mov(scratch2, Operand(1));
733
__ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst1));
735
__ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
736
// Get the number of bits to set in the lower part of the mantissa.
737
__ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
738
__ b(mi, &fewer_than_20_useful_bits);
739
// Set the higher 20 bits of the mantissa.
740
__ orr(dst2, dst2, Operand(int_scratch, LSR, scratch2));
741
__ rsb(scratch2, scratch2, Operand(32));
742
__ mov(dst1, Operand(int_scratch, LSL, scratch2));
745
__ bind(&fewer_than_20_useful_bits);
746
__ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
747
__ mov(scratch2, Operand(int_scratch, LSL, scratch2));
748
__ orr(dst2, dst2, scratch2);
750
__ mov(dst1, Operand::Zero());
756
void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
758
Destination destination,
759
DwVfpRegister double_dst,
762
Register heap_number_map,
765
SwVfpRegister single_scratch,
767
ASSERT(!scratch1.is(object) && !scratch2.is(object));
768
ASSERT(!scratch1.is(scratch2));
769
ASSERT(!heap_number_map.is(object) &&
770
!heap_number_map.is(scratch1) &&
771
!heap_number_map.is(scratch2));
773
Label done, obj_is_not_smi;
775
__ JumpIfNotSmi(object, &obj_is_not_smi);
776
__ SmiUntag(scratch1, object);
777
ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
778
scratch2, single_scratch);
781
__ bind(&obj_is_not_smi);
782
if (FLAG_debug_code) {
783
__ AbortIfNotRootValue(heap_number_map,
784
Heap::kHeapNumberMapRootIndex,
785
"HeapNumberMap register clobbered.");
787
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
790
if (CpuFeatures::IsSupported(VFP3)) {
791
CpuFeatures::Scope scope(VFP3);
792
// Load the double value.
793
__ sub(scratch1, object, Operand(kHeapObjectTag));
794
__ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
796
__ EmitVFPTruncate(kRoundToZero,
801
kCheckForInexactConversion);
803
// Jump to not_int32 if the operation did not succeed.
806
if (destination == kCoreRegisters) {
807
__ vmov(dst1, dst2, double_dst);
811
ASSERT(!scratch1.is(object) && !scratch2.is(object));
812
// Load the double value in the destination registers..
813
__ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
815
// Check for 0 and -0.
816
__ bic(scratch1, dst1, Operand(HeapNumber::kSignMask));
817
__ orr(scratch1, scratch1, Operand(dst2));
818
__ cmp(scratch1, Operand::Zero());
821
// Check that the value can be exactly represented by a 32-bit integer.
822
// Jump to not_int32 if that's not the case.
823
DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
825
// dst1 and dst2 were trashed. Reload the double value.
826
__ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
833
void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
836
Register heap_number_map,
840
DwVfpRegister double_scratch,
842
ASSERT(!dst.is(object));
843
ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
844
ASSERT(!scratch1.is(scratch2) &&
845
!scratch1.is(scratch3) &&
846
!scratch2.is(scratch3));
850
// Untag the object into the destination register.
851
__ SmiUntag(dst, object);
852
// Just return if the object is a smi.
853
__ JumpIfSmi(object, &done);
855
if (FLAG_debug_code) {
856
__ AbortIfNotRootValue(heap_number_map,
857
Heap::kHeapNumberMapRootIndex,
858
"HeapNumberMap register clobbered.");
860
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
862
// Object is a heap number.
863
// Convert the floating point value to a 32-bit integer.
864
if (CpuFeatures::IsSupported(VFP3)) {
865
CpuFeatures::Scope scope(VFP3);
866
SwVfpRegister single_scratch = double_scratch.low();
867
// Load the double value.
868
__ sub(scratch1, object, Operand(kHeapObjectTag));
869
__ vldr(double_scratch, scratch1, HeapNumber::kValueOffset);
871
__ EmitVFPTruncate(kRoundToZero,
876
kCheckForInexactConversion);
878
// Jump to not_int32 if the operation did not succeed.
880
// Get the result in the destination register.
881
__ vmov(dst, single_scratch);
884
// Load the double value in the destination registers.
885
__ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
886
__ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
888
// Check for 0 and -0.
889
__ bic(dst, scratch1, Operand(HeapNumber::kSignMask));
890
__ orr(dst, scratch2, Operand(dst));
891
__ cmp(dst, Operand::Zero());
894
DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
896
// Registers state after DoubleIs32BitInteger.
897
// dst: mantissa[51:20].
900
// Shift back the higher bits of the mantissa.
901
__ mov(dst, Operand(dst, LSR, scratch3));
902
// Set the implicit first bit.
903
__ rsb(scratch3, scratch3, Operand(32));
904
__ orr(dst, dst, Operand(scratch2, LSL, scratch3));
906
__ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
907
__ tst(scratch1, Operand(HeapNumber::kSignMask));
908
__ rsb(dst, dst, Operand::Zero(), LeaveCC, mi);
915
void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
921
// Get exponent alone in scratch.
924
HeapNumber::kExponentShift,
925
HeapNumber::kExponentBits);
927
// Substract the bias from the exponent.
928
__ sub(scratch, scratch, Operand(HeapNumber::kExponentBias), SetCC);
930
// src1: higher (exponent) part of the double value.
931
// src2: lower (mantissa) part of the double value.
932
// scratch: unbiased exponent.
934
// Fast cases. Check for obvious non 32-bit integer values.
935
// Negative exponent cannot yield 32-bit integers.
937
// Exponent greater than 31 cannot yield 32-bit integers.
938
// Also, a positive value with an exponent equal to 31 is outside of the
939
// signed 32-bit integer range.
940
// Another way to put it is that if (exponent - signbit) > 30 then the
941
// number cannot be represented as an int32.
943
__ sub(tmp, scratch, Operand(src1, LSR, 31));
944
__ cmp(tmp, Operand(30));
946
// - Bits [21:0] in the mantissa are not null.
947
__ tst(src2, Operand(0x3fffff));
950
// Otherwise the exponent needs to be big enough to shift left all the
951
// non zero bits left. So we need the (30 - exponent) last bits of the
952
// 31 higher bits of the mantissa to be null.
953
// Because bits [21:0] are null, we can check instead that the
954
// (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
956
// Get the 32 higher bits of the mantissa in dst.
959
HeapNumber::kMantissaBitsInTopWord,
960
32 - HeapNumber::kMantissaBitsInTopWord);
963
Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord));
965
// Create the mask and test the lower bits (of the higher bits).
966
__ rsb(scratch, scratch, Operand(32));
967
__ mov(src2, Operand(1));
968
__ mov(src1, Operand(src2, LSL, scratch));
969
__ sub(src1, src1, Operand(1));
975
void FloatingPointHelper::CallCCodeForDoubleOperation(
976
MacroAssembler* masm,
978
Register heap_number_result,
980
// Using core registers:
981
// r0: Left value (least significant part of mantissa).
982
// r1: Left value (sign, exponent, top of mantissa).
983
// r2: Right value (least significant part of mantissa).
984
// r3: Right value (sign, exponent, top of mantissa).
986
// Assert that heap_number_result is callee-saved.
987
// We currently always use r5 to pass it.
988
ASSERT(heap_number_result.is(r5));
990
// Push the current return address before the C call. Return will be
991
// through pop(pc) below.
993
__ PrepareCallCFunction(0, 2, scratch);
994
if (masm->use_eabi_hardfloat()) {
995
CpuFeatures::Scope scope(VFP3);
1000
AllowExternalCallThatCantCauseGC scope(masm);
1002
ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
1004
// Store answer in the overwritable heap number. Double returned in
1005
// registers r0 and r1 or in d0.
1006
if (masm->use_eabi_hardfloat()) {
1007
CpuFeatures::Scope scope(VFP3);
1009
FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
1011
__ Strd(r0, r1, FieldMemOperand(heap_number_result,
1012
HeapNumber::kValueOffset));
1014
// Place heap_number_result in r0 and return to the pushed return address.
1015
__ mov(r0, Operand(heap_number_result));
1020
bool WriteInt32ToHeapNumberStub::IsPregenerated() {
1021
// These variants are compiled ahead of time. See next method.
1022
if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
1025
if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) {
1028
// Other register combinations are generated as and when they are needed,
1029
// so it is unsafe to call them from stubs (we can't generate a stub while
1030
// we are generating a stub).
1035
void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
1036
WriteInt32ToHeapNumberStub stub1(r1, r0, r2);
1037
WriteInt32ToHeapNumberStub stub2(r2, r0, r3);
1038
stub1.GetCode()->set_is_pregenerated(true);
1039
stub2.GetCode()->set_is_pregenerated(true);
1043
// See comment for class.
1044
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
1045
Label max_negative_int;
1046
// the_int_ has the answer which is a signed int32 but not a Smi.
1047
// We test for the special value that has a different exponent. This test
1048
// has the neat side effect of setting the flags according to the sign.
1049
STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
1050
__ cmp(the_int_, Operand(0x80000000u));
1051
__ b(eq, &max_negative_int);
1052
// Set up the correct exponent in scratch_. All non-Smi int32s have the same.
1053
// A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
1054
uint32_t non_smi_exponent =
1055
(HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
1056
__ mov(scratch_, Operand(non_smi_exponent));
1057
// Set the sign bit in scratch_ if the value was negative.
1058
__ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
1059
// Subtract from 0 if the value was negative.
1060
__ rsb(the_int_, the_int_, Operand(0, RelocInfo::NONE), LeaveCC, cs);
1061
// We should be masking the implict first digit of the mantissa away here,
1062
// but it just ends up combining harmlessly with the last digit of the
1063
// exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
1064
// the most significant 1 to hit the last bit of the 12 bit sign and exponent.
1065
ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
1066
const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1067
__ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
1068
__ str(scratch_, FieldMemOperand(the_heap_number_,
1069
HeapNumber::kExponentOffset));
1070
__ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
1071
__ str(scratch_, FieldMemOperand(the_heap_number_,
1072
HeapNumber::kMantissaOffset));
1075
__ bind(&max_negative_int);
1076
// The max negative int32 is stored as a positive number in the mantissa of
1077
// a double because it uses a sign bit instead of using two's complement.
1078
// The actual mantissa bits stored are all 0 because the implicit most
1079
// significant 1 bit is not stored.
1080
non_smi_exponent += 1 << HeapNumber::kExponentShift;
1081
__ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
1082
__ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
1083
__ mov(ip, Operand(0, RelocInfo::NONE));
1084
__ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
1089
// Handle the case where the lhs and rhs are the same object.
1090
// Equality is almost reflexive (everything but NaN), so this is a test
1091
// for "identity and not NaN".
1092
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
1095
bool never_nan_nan) {
1096
Label not_identical;
1097
Label heap_number, return_equal;
1099
__ b(ne, ¬_identical);
1101
// The two objects are identical. If we know that one of them isn't NaN then
1102
// we now know they test equal.
1103
if (cond != eq || !never_nan_nan) {
1104
// Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
1105
// so we do the second best thing - test it ourselves.
1106
// They are both equal and they are not both Smis so both of them are not
1107
// Smis. If it's not a heap number, then return equal.
1108
if (cond == lt || cond == gt) {
1109
__ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
1112
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
1113
__ b(eq, &heap_number);
1114
// Comparing JS objects with <=, >= is complicated.
1116
__ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
1118
// Normally here we fall through to return_equal, but undefined is
1119
// special: (undefined == undefined) == true, but
1120
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
1121
if (cond == le || cond == ge) {
1122
__ cmp(r4, Operand(ODDBALL_TYPE));
1123
__ b(ne, &return_equal);
1124
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
1126
__ b(ne, &return_equal);
1128
// undefined <= undefined should fail.
1129
__ mov(r0, Operand(GREATER));
1131
// undefined >= undefined should fail.
1132
__ mov(r0, Operand(LESS));
1140
__ bind(&return_equal);
1142
__ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
1143
} else if (cond == gt) {
1144
__ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
1146
__ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
1150
if (cond != eq || !never_nan_nan) {
1151
// For less and greater we don't have to check for NaN since the result of
1152
// x < x is false regardless. For the others here is some code to check
1154
if (cond != lt && cond != gt) {
1155
__ bind(&heap_number);
1156
// It is a heap number, so return non-equal if it's NaN and equal if it's
1159
// The representation of NaN values has all exponent bits (52..62) set,
1160
// and not all mantissa bits (0..51) clear.
1161
// Read top bits of double representation (second word of value).
1162
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
1163
// Test that exponent bits are all set.
1164
__ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
1165
// NaNs have all-one exponents so they sign extend to -1.
1166
__ cmp(r3, Operand(-1));
1167
__ b(ne, &return_equal);
1169
// Shift out flag and all exponent bits, retaining only mantissa.
1170
__ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
1171
// Or with all low-bits of mantissa.
1172
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
1173
__ orr(r0, r3, Operand(r2), SetCC);
1174
// For equal we already have the right value in r0: Return zero (equal)
1175
// if all bits in mantissa are zero (it's an Infinity) and non-zero if
1176
// not (it's a NaN). For <= and >= we need to load r0 with the failing
1177
// value if it's a NaN.
1179
// All-zero means Infinity means equal.
1182
__ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
1184
__ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
1189
// No fall through here.
1192
__ bind(¬_identical);
1196
// See comment at call site.
1197
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
1203
ASSERT((lhs.is(r0) && rhs.is(r1)) ||
1204
(lhs.is(r1) && rhs.is(r0)));
1207
__ JumpIfSmi(rhs, &rhs_is_smi);
1209
// Lhs is a Smi. Check whether the rhs is a heap number.
1210
__ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
1212
// If rhs is not a number and lhs is a Smi then strict equality cannot
1213
// succeed. Return non-equal
1214
// If rhs is r0 then there is already a non zero value in it.
1216
__ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
1220
// Smi compared non-strictly with a non-Smi non-heap-number. Call
1225
// Lhs is a smi, rhs is a number.
1226
if (CpuFeatures::IsSupported(VFP3)) {
1227
// Convert lhs to a double in d7.
1228
CpuFeatures::Scope scope(VFP3);
1229
__ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
1230
// Load the double from rhs, tagged HeapNumber r0, to d6.
1231
__ sub(r7, rhs, Operand(kHeapObjectTag));
1232
__ vldr(d6, r7, HeapNumber::kValueOffset);
1235
// Convert lhs to a double in r2, r3.
1236
__ mov(r7, Operand(lhs));
1237
ConvertToDoubleStub stub1(r3, r2, r7, r6);
1238
__ Call(stub1.GetCode());
1239
// Load rhs to a double in r0, r1.
1240
__ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1244
// We now have both loaded as doubles but we can skip the lhs nan check
1245
// since it's a smi.
1246
__ jmp(lhs_not_nan);
1248
__ bind(&rhs_is_smi);
1249
// Rhs is a smi. Check whether the non-smi lhs is a heap number.
1250
__ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
1252
// If lhs is not a number and rhs is a smi then strict equality cannot
1253
// succeed. Return non-equal.
1254
// If lhs is r0 then there is already a non zero value in it.
1256
__ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
1260
// Smi compared non-strictly with a non-smi non-heap-number. Call
1265
// Rhs is a smi, lhs is a heap number.
1266
if (CpuFeatures::IsSupported(VFP3)) {
1267
CpuFeatures::Scope scope(VFP3);
1268
// Load the double from lhs, tagged HeapNumber r1, to d7.
1269
__ sub(r7, lhs, Operand(kHeapObjectTag));
1270
__ vldr(d7, r7, HeapNumber::kValueOffset);
1271
// Convert rhs to a double in d6 .
1272
__ SmiToDoubleVFPRegister(rhs, d6, r7, s13);
1275
// Load lhs to a double in r2, r3.
1276
__ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1277
// Convert rhs to a double in r0, r1.
1278
__ mov(r7, Operand(rhs));
1279
ConvertToDoubleStub stub2(r1, r0, r7, r6);
1280
__ Call(stub2.GetCode());
1283
// Fall through to both_loaded_as_doubles.
1287
void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) {
1288
bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1289
Register rhs_exponent = exp_first ? r0 : r1;
1290
Register lhs_exponent = exp_first ? r2 : r3;
1291
Register rhs_mantissa = exp_first ? r1 : r0;
1292
Register lhs_mantissa = exp_first ? r3 : r2;
1293
Label one_is_nan, neither_is_nan;
1297
HeapNumber::kExponentShift,
1298
HeapNumber::kExponentBits);
1299
// NaNs have all-one exponents so they sign extend to -1.
1300
__ cmp(r4, Operand(-1));
1301
__ b(ne, lhs_not_nan);
1303
Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
1305
__ b(ne, &one_is_nan);
1306
__ cmp(lhs_mantissa, Operand(0, RelocInfo::NONE));
1307
__ b(ne, &one_is_nan);
1309
__ bind(lhs_not_nan);
1312
HeapNumber::kExponentShift,
1313
HeapNumber::kExponentBits);
1314
// NaNs have all-one exponents so they sign extend to -1.
1315
__ cmp(r4, Operand(-1));
1316
__ b(ne, &neither_is_nan);
1318
Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
1320
__ b(ne, &one_is_nan);
1321
__ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE));
1322
__ b(eq, &neither_is_nan);
1324
__ bind(&one_is_nan);
1325
// NaN comparisons always fail.
1326
// Load whatever we need in r0 to make the comparison fail.
1327
if (cond == lt || cond == le) {
1328
__ mov(r0, Operand(GREATER));
1330
__ mov(r0, Operand(LESS));
1334
__ bind(&neither_is_nan);
1338
// See comment at call site.
1339
static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
1341
bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1342
Register rhs_exponent = exp_first ? r0 : r1;
1343
Register lhs_exponent = exp_first ? r2 : r3;
1344
Register rhs_mantissa = exp_first ? r1 : r0;
1345
Register lhs_mantissa = exp_first ? r3 : r2;
1347
// r0, r1, r2, r3 have the two doubles. Neither is a NaN.
1349
// Doubles are not equal unless they have the same bit pattern.
1350
// Exception: 0 and -0.
1351
__ cmp(rhs_mantissa, Operand(lhs_mantissa));
1352
__ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
1353
// Return non-zero if the numbers are unequal.
1356
__ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
1357
// If exponents are equal then return 0.
1360
// Exponents are unequal. The only way we can return that the numbers
1361
// are equal is if one is -0 and the other is 0. We already dealt
1362
// with the case where both are -0 or both are 0.
1363
// We start by seeing if the mantissas (that are equal) or the bottom
1364
// 31 bits of the rhs exponent are non-zero. If so we return not
1366
__ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
1367
__ mov(r0, Operand(r4), LeaveCC, ne);
1369
// Now they are equal if and only if the lhs exponent is zero in its
1371
__ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
1374
// Call a native function to do a comparison between two non-NaNs.
1375
// Call C routine that may not cause GC or other trouble.
1377
__ PrepareCallCFunction(0, 2, r5);
1378
if (masm->use_eabi_hardfloat()) {
1379
CpuFeatures::Scope scope(VFP3);
1380
__ vmov(d0, r0, r1);
1381
__ vmov(d1, r2, r3);
1384
AllowExternalCallThatCantCauseGC scope(masm);
1385
__ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
1387
__ pop(pc); // Return.
1392
// See comment at call site.
1393
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
1396
ASSERT((lhs.is(r0) && rhs.is(r1)) ||
1397
(lhs.is(r1) && rhs.is(r0)));
1399
// If either operand is a JS object or an oddball value, then they are
1400
// not equal since their pointers are different.
1401
// There is no test for undetectability in strict equality.
1402
STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
1403
Label first_non_object;
1404
// Get the type of the first operand into r2 and compare it with
1405
// FIRST_SPEC_OBJECT_TYPE.
1406
__ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE);
1407
__ b(lt, &first_non_object);
1409
// Return non-zero (r0 is not zero)
1410
Label return_not_equal;
1411
__ bind(&return_not_equal);
1414
__ bind(&first_non_object);
1415
// Check for oddballs: true, false, null, undefined.
1416
__ cmp(r2, Operand(ODDBALL_TYPE));
1417
__ b(eq, &return_not_equal);
1419
__ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE);
1420
__ b(ge, &return_not_equal);
1422
// Check for oddballs: true, false, null, undefined.
1423
__ cmp(r3, Operand(ODDBALL_TYPE));
1424
__ b(eq, &return_not_equal);
1426
// Now that we have the types we might as well check for symbol-symbol.
1427
// Ensure that no non-strings have the symbol bit set.
1428
STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
1429
STATIC_ASSERT(kSymbolTag != 0);
1430
__ and_(r2, r2, Operand(r3));
1431
__ tst(r2, Operand(kIsSymbolMask));
1432
__ b(ne, &return_not_equal);
1436
// See comment at call site.
1437
static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
1440
Label* both_loaded_as_doubles,
1441
Label* not_heap_numbers,
1443
ASSERT((lhs.is(r0) && rhs.is(r1)) ||
1444
(lhs.is(r1) && rhs.is(r0)));
1446
__ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
1447
__ b(ne, not_heap_numbers);
1448
__ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
1450
__ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
1452
// Both are heap numbers. Load them up then jump to the code we have
1454
if (CpuFeatures::IsSupported(VFP3)) {
1455
CpuFeatures::Scope scope(VFP3);
1456
__ sub(r7, rhs, Operand(kHeapObjectTag));
1457
__ vldr(d6, r7, HeapNumber::kValueOffset);
1458
__ sub(r7, lhs, Operand(kHeapObjectTag));
1459
__ vldr(d7, r7, HeapNumber::kValueOffset);
1461
__ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1462
__ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1464
__ jmp(both_loaded_as_doubles);
1468
// Fast negative check for symbol-to-symbol equality.
1469
static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
1472
Label* possible_strings,
1473
Label* not_both_strings) {
1474
ASSERT((lhs.is(r0) && rhs.is(r1)) ||
1475
(lhs.is(r1) && rhs.is(r0)));
1477
// r2 is object type of rhs.
1478
// Ensure that no non-strings have the symbol bit set.
1480
STATIC_ASSERT(kSymbolTag != 0);
1481
__ tst(r2, Operand(kIsNotStringMask));
1482
__ b(ne, &object_test);
1483
__ tst(r2, Operand(kIsSymbolMask));
1484
__ b(eq, possible_strings);
1485
__ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
1486
__ b(ge, not_both_strings);
1487
__ tst(r3, Operand(kIsSymbolMask));
1488
__ b(eq, possible_strings);
1490
// Both are symbols. We already checked they weren't the same pointer
1491
// so they are not equal.
1492
__ mov(r0, Operand(NOT_EQUAL));
1495
__ bind(&object_test);
1496
__ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE));
1497
__ b(lt, not_both_strings);
1498
__ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE);
1499
__ b(lt, not_both_strings);
1500
// If both objects are undetectable, they are equal. Otherwise, they
1501
// are not equal, since they are different objects and an object is not
1502
// equal to undefined.
1503
__ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
1504
__ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
1505
__ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
1506
__ and_(r0, r2, Operand(r3));
1507
__ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
1508
__ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
1513
void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
1521
// Use of registers. Register result is used as a temporary.
1522
Register number_string_cache = result;
1523
Register mask = scratch3;
1525
// Load the number string cache.
1526
__ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
1528
// Make the hash mask from the length of the number string cache. It
1529
// contains two elements (number and string) for each cache entry.
1530
__ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
1531
// Divide length by two (length is a smi).
1532
__ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
1533
__ sub(mask, mask, Operand(1)); // Make mask.
1535
// Calculate the entry in the number string cache. The hash value in the
1536
// number string cache for smis is just the smi value, and the hash for
1537
// doubles is the xor of the upper and lower words. See
1538
// Heap::GetNumberStringCache.
1539
Isolate* isolate = masm->isolate();
1541
Label load_result_from_cache;
1542
if (!object_is_smi) {
1543
__ JumpIfSmi(object, &is_smi);
1544
if (CpuFeatures::IsSupported(VFP3)) {
1545
CpuFeatures::Scope scope(VFP3);
1548
Heap::kHeapNumberMapRootIndex,
1552
STATIC_ASSERT(8 == kDoubleSize);
1555
Operand(HeapNumber::kValueOffset - kHeapObjectTag));
1556
__ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
1557
__ eor(scratch1, scratch1, Operand(scratch2));
1558
__ and_(scratch1, scratch1, Operand(mask));
1560
// Calculate address of entry in string cache: each entry consists
1561
// of two pointer sized fields.
1563
number_string_cache,
1564
Operand(scratch1, LSL, kPointerSizeLog2 + 1));
1566
Register probe = mask;
1568
FieldMemOperand(scratch1, FixedArray::kHeaderSize));
1569
__ JumpIfSmi(probe, not_found);
1570
__ sub(scratch2, object, Operand(kHeapObjectTag));
1571
__ vldr(d0, scratch2, HeapNumber::kValueOffset);
1572
__ sub(probe, probe, Operand(kHeapObjectTag));
1573
__ vldr(d1, probe, HeapNumber::kValueOffset);
1574
__ VFPCompareAndSetFlags(d0, d1);
1575
__ b(ne, not_found); // The cache did not contain this value.
1576
__ b(&load_result_from_cache);
1583
Register scratch = scratch1;
1584
__ and_(scratch, mask, Operand(object, ASR, 1));
1585
// Calculate address of entry in string cache: each entry consists
1586
// of two pointer sized fields.
1588
number_string_cache,
1589
Operand(scratch, LSL, kPointerSizeLog2 + 1));
1591
// Check if the entry is the smi we are looking for.
1592
Register probe = mask;
1593
__ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
1594
__ cmp(object, probe);
1595
__ b(ne, not_found);
1597
// Get the result from the cache.
1598
__ bind(&load_result_from_cache);
1600
FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
1601
__ IncrementCounter(isolate->counters()->number_to_string_native(),
1608
void NumberToStringStub::Generate(MacroAssembler* masm) {
1611
__ ldr(r1, MemOperand(sp, 0));
1613
// Generate code to lookup number in the number string cache.
1614
GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime);
1615
__ add(sp, sp, Operand(1 * kPointerSize));
1619
// Handle number to string in the runtime system if not found in the cache.
1620
__ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
1624
// On entry lhs_ and rhs_ are the values to be compared.
1625
// On exit r0 is 0, positive or negative to indicate the result of
1627
void CompareStub::Generate(MacroAssembler* masm) {
1628
ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
1629
(lhs_.is(r1) && rhs_.is(r0)));
1631
Label slow; // Call builtin.
1632
Label not_smis, both_loaded_as_doubles, lhs_not_nan;
1634
if (include_smi_compare_) {
1635
Label not_two_smis, smi_done;
1637
__ JumpIfNotSmi(r2, ¬_two_smis);
1638
__ mov(r1, Operand(r1, ASR, 1));
1639
__ sub(r0, r1, Operand(r0, ASR, 1));
1641
__ bind(¬_two_smis);
1642
} else if (FLAG_debug_code) {
1644
__ tst(r2, Operand(kSmiTagMask));
1645
__ Assert(ne, "CompareStub: unexpected smi operands.");
1648
// NOTICE! This code is only reached after a smi-fast-case check, so
1649
// it is certain that at least one operand isn't a smi.
1651
// Handle the case where the objects are identical. Either returns the answer
1652
// or goes to slow. Only falls through if the objects were not identical.
1653
EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
1655
// If either is a Smi (we know that not both are), then they can only
1656
// be strictly equal if the other is a HeapNumber.
1657
STATIC_ASSERT(kSmiTag == 0);
1658
ASSERT_EQ(0, Smi::FromInt(0));
1659
__ and_(r2, lhs_, Operand(rhs_));
1660
__ JumpIfNotSmi(r2, ¬_smis);
1661
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
1662
// 1) Return the answer.
1664
// 3) Fall through to both_loaded_as_doubles.
1665
// 4) Jump to lhs_not_nan.
1666
// In cases 3 and 4 we have found out we were dealing with a number-number
1667
// comparison. If VFP3 is supported the double values of the numbers have
1668
// been loaded into d7 and d6. Otherwise, the double values have been loaded
1669
// into r0, r1, r2, and r3.
1670
EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_);
1672
__ bind(&both_loaded_as_doubles);
1673
// The arguments have been converted to doubles and stored in d6 and d7, if
1674
// VFP3 is supported, or in r0, r1, r2, and r3.
1675
Isolate* isolate = masm->isolate();
1676
if (CpuFeatures::IsSupported(VFP3)) {
1677
__ bind(&lhs_not_nan);
1678
CpuFeatures::Scope scope(VFP3);
1680
// ARMv7 VFP3 instructions to implement double precision comparison.
1681
__ VFPCompareAndSetFlags(d7, d6);
1684
__ mov(r0, Operand(EQUAL), LeaveCC, eq);
1685
__ mov(r0, Operand(LESS), LeaveCC, lt);
1686
__ mov(r0, Operand(GREATER), LeaveCC, gt);
1690
// If one of the sides was a NaN then the v flag is set. Load r0 with
1691
// whatever it takes to make the comparison fail, since comparisons with NaN
1693
if (cc_ == lt || cc_ == le) {
1694
__ mov(r0, Operand(GREATER));
1696
__ mov(r0, Operand(LESS));
1700
// Checks for NaN in the doubles we have loaded. Can return the answer or
1701
// fall through if neither is a NaN. Also binds lhs_not_nan.
1702
EmitNanCheck(masm, &lhs_not_nan, cc_);
1703
// Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
1704
// answer. Never falls through.
1705
EmitTwoNonNanDoubleComparison(masm, cc_);
1709
// At this point we know we are dealing with two different objects,
1710
// and neither of them is a Smi. The objects are in rhs_ and lhs_.
1712
// This returns non-equal for some object types, or falls through if it
1714
EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
1717
Label check_for_symbols;
1718
Label flat_string_check;
1719
// Check for heap-number-heap-number comparison. Can jump to slow case,
1720
// or load both doubles into r0, r1, r2, r3 and jump to the code that handles
1721
// that case. If the inputs are not doubles then jumps to check_for_symbols.
1722
// In this case r2 will contain the type of rhs_. Never falls through.
1723
EmitCheckForTwoHeapNumbers(masm,
1726
&both_loaded_as_doubles,
1728
&flat_string_check);
1730
__ bind(&check_for_symbols);
1731
// In the strict case the EmitStrictTwoHeapObjectCompare already took care of
1733
if (cc_ == eq && !strict_) {
1734
// Returns an answer for two symbols or two detectable objects.
1735
// Otherwise jumps to string case or not both strings case.
1736
// Assumes that r2 is the type of rhs_ on entry.
1737
EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
1740
// Check for both being sequential ASCII strings, and inline if that is the
1742
__ bind(&flat_string_check);
1744
__ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
1746
__ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3);
1748
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
1755
StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
1763
// Never falls through to here.
1767
__ Push(lhs_, rhs_);
1768
// Figure out which native to call and setup the arguments.
1769
Builtins::JavaScript native;
1771
native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1773
native = Builtins::COMPARE;
1774
int ncr; // NaN compare result
1775
if (cc_ == lt || cc_ == le) {
1778
ASSERT(cc_ == gt || cc_ == ge); // remaining cases
1781
__ mov(r0, Operand(Smi::FromInt(ncr)));
1785
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1786
// tagged as a small integer.
1787
__ InvokeBuiltin(native, JUMP_FUNCTION);
1791
// The stub expects its argument in the tos_ register and returns its result in
1792
// it, too: zero for false, and a non-zero value for true.
1793
void ToBooleanStub::Generate(MacroAssembler* masm) {
1794
// This stub overrides SometimesSetsUpAFrame() to return false. That means
1795
// we cannot call anything that could cause a GC from this stub.
1796
// This stub uses VFP3 instructions.
1797
CpuFeatures::Scope scope(VFP3);
1800
const Register map = r9.is(tos_) ? r7 : r9;
1802
// undefined -> false.
1803
CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
1805
// Boolean -> its value.
1806
CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
1807
CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
1810
CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
1812
if (types_.Contains(SMI)) {
1813
// Smis: 0 -> false, all other -> true
1814
__ tst(tos_, Operand(kSmiTagMask));
1815
// tos_ contains the correct return value already
1817
} else if (types_.NeedsMap()) {
1818
// If we need a map later and have a Smi -> patch.
1819
__ JumpIfSmi(tos_, &patch);
1822
if (types_.NeedsMap()) {
1823
__ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
1825
if (types_.CanBeUndetectable()) {
1826
__ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
1827
__ tst(ip, Operand(1 << Map::kIsUndetectable));
1828
// Undetectable -> false.
1829
__ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
1834
if (types_.Contains(SPEC_OBJECT)) {
1835
// Spec object -> true.
1836
__ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
1837
// tos_ contains the correct non-zero return value already.
1841
if (types_.Contains(STRING)) {
1842
// String value -> false iff empty.
1843
__ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
1844
__ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset), lt);
1845
__ Ret(lt); // the string length is OK as the return value
1848
if (types_.Contains(HEAP_NUMBER)) {
1849
// Heap number -> false iff +0, -0, or NaN.
1850
Label not_heap_number;
1851
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
1852
__ b(ne, ¬_heap_number);
1853
__ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
1854
__ VFPCompareAndSetFlags(d1, 0.0);
1855
// "tos_" is a register, and contains a non zero value by default.
1856
// Hence we only need to overwrite "tos_" with zero to return false for
1857
// FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
1858
__ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO
1859
__ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN
1861
__ bind(¬_heap_number);
1865
GenerateTypeTransition(masm);
1869
void ToBooleanStub::CheckOddball(MacroAssembler* masm,
1871
Heap::RootListIndex value,
1873
if (types_.Contains(type)) {
1874
// If we see an expected oddball, return its ToBoolean value tos_.
1875
__ LoadRoot(ip, value);
1877
// The value of a root is never NULL, so we can avoid loading a non-null
1878
// value into tos_ when we want to return 'true'.
1880
__ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq);
1887
void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
1889
__ mov(r3, Operand(tos_));
1891
__ mov(r2, Operand(Smi::FromInt(tos_.code())));
1892
__ mov(r1, Operand(Smi::FromInt(types_.ToByte())));
1893
__ Push(r3, r2, r1);
1894
// Patch the caller to an appropriate specialized stub and return the
1895
// operation result to the caller of the stub.
1896
__ TailCallExternalReference(
1897
ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
1903
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1904
// We don't allow a GC during a store buffer overflow so there is no need to
1905
// store the registers in any particular way, but we do have to store and
1907
__ stm(db_w, sp, kCallerSaved | lr.bit());
1908
if (save_doubles_ == kSaveFPRegs) {
1909
CpuFeatures::Scope scope(VFP3);
1910
__ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
1911
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
1912
DwVfpRegister reg = DwVfpRegister::from_code(i);
1913
__ vstr(reg, MemOperand(sp, i * kDoubleSize));
1916
const int argument_count = 1;
1917
const int fp_argument_count = 0;
1918
const Register scratch = r1;
1920
AllowExternalCallThatCantCauseGC scope(masm);
1921
__ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
1922
__ mov(r0, Operand(ExternalReference::isolate_address()));
1924
ExternalReference::store_buffer_overflow_function(masm->isolate()),
1926
if (save_doubles_ == kSaveFPRegs) {
1927
CpuFeatures::Scope scope(VFP3);
1928
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
1929
DwVfpRegister reg = DwVfpRegister::from_code(i);
1930
__ vldr(reg, MemOperand(sp, i * kDoubleSize));
1932
__ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
1934
__ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
1938
void UnaryOpStub::PrintName(StringStream* stream) {
1939
const char* op_name = Token::Name(op_);
1940
const char* overwrite_name = NULL; // Make g++ happy.
1942
case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
1943
case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
1945
stream->Add("UnaryOpStub_%s_%s_%s",
1948
UnaryOpIC::GetName(operand_type_));
1952
// TODO(svenpanne): Use virtual functions instead of switch.
1953
void UnaryOpStub::Generate(MacroAssembler* masm) {
1954
switch (operand_type_) {
1955
case UnaryOpIC::UNINITIALIZED:
1956
GenerateTypeTransition(masm);
1958
case UnaryOpIC::SMI:
1959
GenerateSmiStub(masm);
1961
case UnaryOpIC::HEAP_NUMBER:
1962
GenerateHeapNumberStub(masm);
1964
case UnaryOpIC::GENERIC:
1965
GenerateGenericStub(masm);
1971
void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1972
__ mov(r3, Operand(r0)); // the operand
1973
__ mov(r2, Operand(Smi::FromInt(op_)));
1974
__ mov(r1, Operand(Smi::FromInt(mode_)));
1975
__ mov(r0, Operand(Smi::FromInt(operand_type_)));
1976
__ Push(r3, r2, r1, r0);
1978
__ TailCallExternalReference(
1979
ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
1983
// TODO(svenpanne): Use virtual functions instead of switch.
1984
void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1987
GenerateSmiStubSub(masm);
1989
case Token::BIT_NOT:
1990
GenerateSmiStubBitNot(masm);
1998
void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
1999
Label non_smi, slow;
2000
GenerateSmiCodeSub(masm, &non_smi, &slow);
2003
GenerateTypeTransition(masm);
2007
void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
2009
GenerateSmiCodeBitNot(masm, &non_smi);
2011
GenerateTypeTransition(masm);
2015
void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
2018
__ JumpIfNotSmi(r0, non_smi);
2020
// The result of negating zero or the smallest negative smi is not a smi.
2021
__ bic(ip, r0, Operand(0x80000000), SetCC);
2024
// Return '0 - value'.
2025
__ rsb(r0, r0, Operand(0, RelocInfo::NONE));
2030
void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
2032
__ JumpIfNotSmi(r0, non_smi);
2034
// Flip bits and revert inverted smi-tag.
2035
__ mvn(r0, Operand(r0));
2036
__ bic(r0, r0, Operand(kSmiTagMask));
2041
// TODO(svenpanne): Use virtual functions instead of switch.
2042
void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2045
GenerateHeapNumberStubSub(masm);
2047
case Token::BIT_NOT:
2048
GenerateHeapNumberStubBitNot(masm);
2056
void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
2057
Label non_smi, slow, call_builtin;
2058
GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
2060
GenerateHeapNumberCodeSub(masm, &slow);
2062
GenerateTypeTransition(masm);
2063
__ bind(&call_builtin);
2064
GenerateGenericCodeFallback(masm);
2068
void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
2069
Label non_smi, slow;
2070
GenerateSmiCodeBitNot(masm, &non_smi);
2072
GenerateHeapNumberCodeBitNot(masm, &slow);
2074
GenerateTypeTransition(masm);
2077
void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
2079
EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
2080
// r0 is a heap number. Get a new heap number in r1.
2081
if (mode_ == UNARY_OVERWRITE) {
2082
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
2083
__ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
2084
__ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
2086
Label slow_allocate_heapnumber, heapnumber_allocated;
2087
__ AllocateHeapNumber(r1, r2, r3, r6, &slow_allocate_heapnumber);
2088
__ jmp(&heapnumber_allocated);
2090
__ bind(&slow_allocate_heapnumber);
2092
FrameScope scope(masm, StackFrame::INTERNAL);
2094
__ CallRuntime(Runtime::kNumberAlloc, 0);
2095
__ mov(r1, Operand(r0));
2099
__ bind(&heapnumber_allocated);
2100
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
2101
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
2102
__ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
2103
__ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
2104
__ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
2105
__ mov(r0, Operand(r1));
2111
void UnaryOpStub::GenerateHeapNumberCodeBitNot(
2112
MacroAssembler* masm, Label* slow) {
2115
EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
2116
// Convert the heap number is r0 to an untagged integer in r1.
2117
__ ConvertToInt32(r0, r1, r2, r3, d0, slow);
2119
// Do the bitwise operation and check if the result fits in a smi.
2121
__ mvn(r1, Operand(r1));
2122
__ add(r2, r1, Operand(0x40000000), SetCC);
2123
__ b(mi, &try_float);
2125
// Tag the result as a smi and we're done.
2126
__ mov(r0, Operand(r1, LSL, kSmiTagSize));
2129
// Try to store the result in a heap number.
2130
__ bind(&try_float);
2131
if (mode_ == UNARY_NO_OVERWRITE) {
2132
Label slow_allocate_heapnumber, heapnumber_allocated;
2133
// Allocate a new heap number without zapping r0, which we need if it fails.
2134
__ AllocateHeapNumber(r2, r3, r4, r6, &slow_allocate_heapnumber);
2135
__ jmp(&heapnumber_allocated);
2137
__ bind(&slow_allocate_heapnumber);
2139
FrameScope scope(masm, StackFrame::INTERNAL);
2140
__ push(r0); // Push the heap number, not the untagged int32.
2141
__ CallRuntime(Runtime::kNumberAlloc, 0);
2142
__ mov(r2, r0); // Move the new heap number into r2.
2143
// Get the heap number into r0, now that the new heap number is in r2.
2147
// Convert the heap number in r0 to an untagged integer in r1.
2148
// This can't go slow-case because it's the same number we already
2149
// converted once again.
2150
__ ConvertToInt32(r0, r1, r3, r4, d0, &impossible);
2151
__ mvn(r1, Operand(r1));
2153
__ bind(&heapnumber_allocated);
2154
__ mov(r0, r2); // Move newly allocated heap number to r0.
2157
if (CpuFeatures::IsSupported(VFP3)) {
2158
// Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
2159
CpuFeatures::Scope scope(VFP3);
2161
__ vcvt_f64_s32(d0, s0);
2162
__ sub(r2, r0, Operand(kHeapObjectTag));
2163
__ vstr(d0, r2, HeapNumber::kValueOffset);
2166
// WriteInt32ToHeapNumberStub does not trigger GC, so we do not
2167
// have to set up a frame.
2168
WriteInt32ToHeapNumberStub stub(r1, r0, r2);
2169
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2172
__ bind(&impossible);
2173
if (FLAG_debug_code) {
2174
__ stop("Incorrect assumption in bit-not stub");
2179
// TODO(svenpanne): Use virtual functions instead of switch.
2180
void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
2183
GenerateGenericStubSub(masm);
2185
case Token::BIT_NOT:
2186
GenerateGenericStubBitNot(masm);
2194
void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
2195
Label non_smi, slow;
2196
GenerateSmiCodeSub(masm, &non_smi, &slow);
2198
GenerateHeapNumberCodeSub(masm, &slow);
2200
GenerateGenericCodeFallback(masm);
2204
void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
2205
Label non_smi, slow;
2206
GenerateSmiCodeBitNot(masm, &non_smi);
2208
GenerateHeapNumberCodeBitNot(masm, &slow);
2210
GenerateGenericCodeFallback(masm);
2214
void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
2215
// Handle the slow case by jumping to the JavaScript builtin.
2219
__ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
2221
case Token::BIT_NOT:
2222
__ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
2230
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2235
__ mov(r2, Operand(Smi::FromInt(MinorKey())));
2236
__ mov(r1, Operand(Smi::FromInt(op_)));
2237
__ mov(r0, Operand(Smi::FromInt(operands_type_)));
2238
__ Push(r2, r1, r0);
2240
__ TailCallExternalReference(
2241
ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
2248
void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
2249
MacroAssembler* masm) {
2254
void BinaryOpStub::Generate(MacroAssembler* masm) {
2255
// Explicitly allow generation of nested stubs. It is safe here because
2256
// generation code does not use any raw pointers.
2257
AllowStubCallsScope allow_stub_calls(masm, true);
2259
switch (operands_type_) {
2260
case BinaryOpIC::UNINITIALIZED:
2261
GenerateTypeTransition(masm);
2263
case BinaryOpIC::SMI:
2264
GenerateSmiStub(masm);
2266
case BinaryOpIC::INT32:
2267
GenerateInt32Stub(masm);
2269
case BinaryOpIC::HEAP_NUMBER:
2270
GenerateHeapNumberStub(masm);
2272
case BinaryOpIC::ODDBALL:
2273
GenerateOddballStub(masm);
2275
case BinaryOpIC::BOTH_STRING:
2276
GenerateBothStringStub(masm);
2278
case BinaryOpIC::STRING:
2279
GenerateStringStub(masm);
2281
case BinaryOpIC::GENERIC:
2282
GenerateGeneric(masm);
2290
void BinaryOpStub::PrintName(StringStream* stream) {
2291
const char* op_name = Token::Name(op_);
2292
const char* overwrite_name;
2294
case NO_OVERWRITE: overwrite_name = "Alloc"; break;
2295
case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
2296
case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
2297
default: overwrite_name = "UnknownOverwrite"; break;
2299
stream->Add("BinaryOpStub_%s_%s_%s",
2302
BinaryOpIC::GetName(operands_type_));
2306
void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
2308
Register right = r0;
2309
Register scratch1 = r7;
2310
Register scratch2 = r9;
2312
ASSERT(right.is(r0));
2313
STATIC_ASSERT(kSmiTag == 0);
2315
Label not_smi_result;
2318
__ add(right, left, Operand(right), SetCC); // Add optimistically.
2320
__ sub(right, right, Operand(left)); // Revert optimistic add.
2323
__ sub(right, left, Operand(right), SetCC); // Subtract optimistically.
2325
__ sub(right, left, Operand(right)); // Revert optimistic subtract.
2328
// Remove tag from one of the operands. This way the multiplication result
2329
// will be a smi if it fits the smi range.
2330
__ SmiUntag(ip, right);
2331
// Do multiplication
2332
// scratch1 = lower 32 bits of ip * left.
2333
// scratch2 = higher 32 bits of ip * left.
2334
__ smull(scratch1, scratch2, left, ip);
2335
// Check for overflowing the smi range - no overflow if higher 33 bits of
2336
// the result are identical.
2337
__ mov(ip, Operand(scratch1, ASR, 31));
2338
__ cmp(ip, Operand(scratch2));
2339
__ b(ne, ¬_smi_result);
2340
// Go slow on zero result to handle -0.
2341
__ tst(scratch1, Operand(scratch1));
2342
__ mov(right, Operand(scratch1), LeaveCC, ne);
2344
// We need -0 if we were multiplying a negative number with 0 to get 0.
2345
// We know one of them was zero.
2346
__ add(scratch2, right, Operand(left), SetCC);
2347
__ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
2348
__ Ret(pl); // Return smi 0 if the non-zero one was positive.
2349
// We fall through here if we multiplied a negative number with 0, because
2350
// that would mean we should produce -0.
2353
// Check for power of two on the right hand side.
2354
__ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result);
2355
// Check for positive and no remainder (scratch1 contains right - 1).
2356
__ orr(scratch2, scratch1, Operand(0x80000000u));
2357
__ tst(left, scratch2);
2358
__ b(ne, ¬_smi_result);
2360
// Perform division by shifting.
2361
__ CountLeadingZeros(scratch1, scratch1, scratch2);
2362
__ rsb(scratch1, scratch1, Operand(31));
2363
__ mov(right, Operand(left, LSR, scratch1));
2367
// Check for two positive smis.
2368
__ orr(scratch1, left, Operand(right));
2369
__ tst(scratch1, Operand(0x80000000u | kSmiTagMask));
2370
__ b(ne, ¬_smi_result);
2372
// Check for power of two on the right hand side.
2373
__ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result);
2375
// Perform modulus by masking.
2376
__ and_(right, left, Operand(scratch1));
2380
__ orr(right, left, Operand(right));
2383
case Token::BIT_AND:
2384
__ and_(right, left, Operand(right));
2387
case Token::BIT_XOR:
2388
__ eor(right, left, Operand(right));
2392
// Remove tags from right operand.
2393
__ GetLeastBitsFromSmi(scratch1, right, 5);
2394
__ mov(right, Operand(left, ASR, scratch1));
2396
__ bic(right, right, Operand(kSmiTagMask));
2400
// Remove tags from operands. We can't do this on a 31 bit number
2401
// because then the 0s get shifted into bit 30 instead of bit 31.
2402
__ SmiUntag(scratch1, left);
2403
__ GetLeastBitsFromSmi(scratch2, right, 5);
2404
__ mov(scratch1, Operand(scratch1, LSR, scratch2));
2405
// Unsigned shift is not allowed to produce a negative number, so
2406
// check the sign bit and the sign bit after Smi tagging.
2407
__ tst(scratch1, Operand(0xc0000000));
2408
__ b(ne, ¬_smi_result);
2410
__ SmiTag(right, scratch1);
2414
// Remove tags from operands.
2415
__ SmiUntag(scratch1, left);
2416
__ GetLeastBitsFromSmi(scratch2, right, 5);
2417
__ mov(scratch1, Operand(scratch1, LSL, scratch2));
2418
// Check that the signed result fits in a Smi.
2419
__ add(scratch2, scratch1, Operand(0x40000000), SetCC);
2420
__ b(mi, ¬_smi_result);
2421
__ SmiTag(right, scratch1);
2427
__ bind(¬_smi_result);
2431
void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2434
Label* gc_required) {
2436
Register right = r0;
2437
Register scratch1 = r7;
2438
Register scratch2 = r9;
2439
Register scratch3 = r4;
2441
ASSERT(smi_operands || (not_numbers != NULL));
2442
if (smi_operands && FLAG_debug_code) {
2443
__ AbortIfNotSmi(left);
2444
__ AbortIfNotSmi(right);
2447
Register heap_number_map = r6;
2448
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2456
// Load left and right operands into d6 and d7 or r0/r1 and r2/r3
2457
// depending on whether VFP3 is available or not.
2458
FloatingPointHelper::Destination destination =
2459
CpuFeatures::IsSupported(VFP3) &&
2461
FloatingPointHelper::kVFPRegisters :
2462
FloatingPointHelper::kCoreRegisters;
2464
// Allocate new heap number for result.
2465
Register result = r5;
2466
GenerateHeapResultAllocation(
2467
masm, result, heap_number_map, scratch1, scratch2, gc_required);
2469
// Load the operands.
2471
FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2473
FloatingPointHelper::LoadOperands(masm,
2481
// Calculate the result.
2482
if (destination == FloatingPointHelper::kVFPRegisters) {
2483
// Using VFP registers:
2486
CpuFeatures::Scope scope(VFP3);
2489
__ vadd(d5, d6, d7);
2492
__ vsub(d5, d6, d7);
2495
__ vmul(d5, d6, d7);
2498
__ vdiv(d5, d6, d7);
2504
__ sub(r0, result, Operand(kHeapObjectTag));
2505
__ vstr(d5, r0, HeapNumber::kValueOffset);
2506
__ add(r0, r0, Operand(kHeapObjectTag));
2509
// Call the C function to handle the double operation.
2510
FloatingPointHelper::CallCCodeForDoubleOperation(masm,
2514
if (FLAG_debug_code) {
2515
__ stop("Unreachable code.");
2521
case Token::BIT_XOR:
2522
case Token::BIT_AND:
2527
__ SmiUntag(r3, left);
2528
__ SmiUntag(r2, right);
2530
// Convert operands to 32-bit integers. Right in r2 and left in r3.
2531
FloatingPointHelper::ConvertNumberToInt32(masm,
2540
FloatingPointHelper::ConvertNumberToInt32(masm,
2551
Label result_not_a_smi;
2554
__ orr(r2, r3, Operand(r2));
2556
case Token::BIT_XOR:
2557
__ eor(r2, r3, Operand(r2));
2559
case Token::BIT_AND:
2560
__ and_(r2, r3, Operand(r2));
2563
// Use only the 5 least significant bits of the shift count.
2564
__ GetLeastBitsFromInt32(r2, r2, 5);
2565
__ mov(r2, Operand(r3, ASR, r2));
2568
// Use only the 5 least significant bits of the shift count.
2569
__ GetLeastBitsFromInt32(r2, r2, 5);
2570
__ mov(r2, Operand(r3, LSR, r2), SetCC);
2571
// SHR is special because it is required to produce a positive answer.
2572
// The code below for writing into heap numbers isn't capable of
2573
// writing the register as an unsigned int so we go to slow case if we
2575
if (CpuFeatures::IsSupported(VFP3)) {
2576
__ b(mi, &result_not_a_smi);
2578
__ b(mi, not_numbers);
2582
// Use only the 5 least significant bits of the shift count.
2583
__ GetLeastBitsFromInt32(r2, r2, 5);
2584
__ mov(r2, Operand(r3, LSL, r2));
2590
// Check that the *signed* result fits in a smi.
2591
__ add(r3, r2, Operand(0x40000000), SetCC);
2592
__ b(mi, &result_not_a_smi);
2596
// Allocate new heap number for result.
2597
__ bind(&result_not_a_smi);
2598
Register result = r5;
2600
__ AllocateHeapNumber(
2601
result, scratch1, scratch2, heap_number_map, gc_required);
2603
GenerateHeapResultAllocation(
2604
masm, result, heap_number_map, scratch1, scratch2, gc_required);
2607
// r2: Answer as signed int32.
2608
// r5: Heap number to write answer into.
2610
// Nothing can go wrong now, so move the heap number to r0, which is the
2612
__ mov(r0, Operand(r5));
2614
if (CpuFeatures::IsSupported(VFP3)) {
2615
// Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
2616
// mentioned above SHR needs to always produce a positive result.
2617
CpuFeatures::Scope scope(VFP3);
2619
if (op_ == Token::SHR) {
2620
__ vcvt_f64_u32(d0, s0);
2622
__ vcvt_f64_s32(d0, s0);
2624
__ sub(r3, r0, Operand(kHeapObjectTag));
2625
__ vstr(d0, r3, HeapNumber::kValueOffset);
2628
// Tail call that writes the int32 in r2 to the heap number in r0, using
2629
// r3 as scratch. r0 is preserved and returned.
2630
WriteInt32ToHeapNumberStub stub(r2, r0, r3);
2631
__ TailCallStub(&stub);
2641
// Generate the smi code. If the operation on smis are successful this return is
2642
// generated. If the result is not a smi and heap number allocation is not
2643
// requested the code falls through. If number allocation is requested but a
2644
// heap number cannot be allocated the code jumps to the lable gc_required.
2645
void BinaryOpStub::GenerateSmiCode(
2646
MacroAssembler* masm,
2649
SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
2653
Register right = r0;
2654
Register scratch1 = r7;
2656
// Perform combined smi check on both operands.
2657
__ orr(scratch1, left, Operand(right));
2658
STATIC_ASSERT(kSmiTag == 0);
2659
__ JumpIfNotSmi(scratch1, ¬_smis);
2661
// If the smi-smi operation results in a smi return is generated.
2662
GenerateSmiSmiOperation(masm);
2664
// If heap number results are possible generate the result in an allocated
2666
if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
2667
GenerateFPOperation(masm, true, use_runtime, gc_required);
2673
void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2674
Label not_smis, call_runtime;
2676
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
2677
result_type_ == BinaryOpIC::SMI) {
2678
// Only allow smi results.
2679
GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
2681
// Allow heap number result and don't make a transition if a heap number
2682
// cannot be allocated.
2683
GenerateSmiCode(masm,
2686
ALLOW_HEAPNUMBER_RESULTS);
2689
// Code falls through if the result is not returned as either a smi or heap
2691
GenerateTypeTransition(masm);
2693
__ bind(&call_runtime);
2694
GenerateCallRuntime(masm);
2698
void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
2699
ASSERT(operands_type_ == BinaryOpIC::STRING);
2700
ASSERT(op_ == Token::ADD);
2701
// Try to add arguments as strings, otherwise, transition to the generic
2703
GenerateAddStrings(masm);
2704
GenerateTypeTransition(masm);
2708
void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
2710
ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
2711
ASSERT(op_ == Token::ADD);
2712
// If both arguments are strings, call the string add stub.
2713
// Otherwise, do a transition.
2715
// Registers containing left and right operands respectively.
2717
Register right = r0;
2719
// Test if left operand is a string.
2720
__ JumpIfSmi(left, &call_runtime);
2721
__ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
2722
__ b(ge, &call_runtime);
2724
// Test if right operand is a string.
2725
__ JumpIfSmi(right, &call_runtime);
2726
__ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
2727
__ b(ge, &call_runtime);
2729
StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
2730
GenerateRegisterArgsPush(masm);
2731
__ TailCallStub(&string_add_stub);
2733
__ bind(&call_runtime);
2734
GenerateTypeTransition(masm);
2738
void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2739
ASSERT(operands_type_ == BinaryOpIC::INT32);
2742
Register right = r0;
2743
Register scratch1 = r7;
2744
Register scratch2 = r9;
2745
DwVfpRegister double_scratch = d0;
2746
SwVfpRegister single_scratch = s3;
2748
Register heap_number_result = no_reg;
2749
Register heap_number_map = r6;
2750
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2753
// Labels for type transition, used for wrong input or output types.
2754
// Both label are currently actually bound to the same position. We use two
2755
// different label to differentiate the cause leading to type transition.
2758
// Smi-smi fast case.
2760
__ orr(scratch1, left, right);
2761
__ JumpIfNotSmi(scratch1, &skip);
2762
GenerateSmiSmiOperation(masm);
2763
// Fall through if the result is not a smi.
2772
// Load both operands and check that they are 32-bit integer.
2773
// Jump to type transition if they are not. The registers r0 and r1 (right
2774
// and left) are preserved for the runtime call.
2775
FloatingPointHelper::Destination destination =
2776
(CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD)
2777
? FloatingPointHelper::kVFPRegisters
2778
: FloatingPointHelper::kCoreRegisters;
2780
FloatingPointHelper::LoadNumberAsInt32Double(masm,
2791
FloatingPointHelper::LoadNumberAsInt32Double(masm,
2803
if (destination == FloatingPointHelper::kVFPRegisters) {
2804
CpuFeatures::Scope scope(VFP3);
2805
Label return_heap_number;
2808
__ vadd(d5, d6, d7);
2811
__ vsub(d5, d6, d7);
2814
__ vmul(d5, d6, d7);
2817
__ vdiv(d5, d6, d7);
2823
if (op_ != Token::DIV) {
2824
// These operations produce an integer result.
2825
// Try to return a smi if we can.
2826
// Otherwise return a heap number if allowed, or jump to type
2829
__ EmitVFPTruncate(kRoundToZero,
2835
if (result_type_ <= BinaryOpIC::INT32) {
2836
// If the ne condition is set, result does
2837
// not fit in a 32-bit integer.
2838
__ b(ne, &transition);
2841
// Check if the result fits in a smi.
2842
__ vmov(scratch1, single_scratch);
2843
__ add(scratch2, scratch1, Operand(0x40000000), SetCC);
2844
// If not try to return a heap number.
2845
__ b(mi, &return_heap_number);
2846
// Check for minus zero. Return heap number for minus zero.
2848
__ cmp(scratch1, Operand::Zero());
2849
__ b(ne, ¬_zero);
2850
__ vmov(scratch2, d5.high());
2851
__ tst(scratch2, Operand(HeapNumber::kSignMask));
2852
__ b(ne, &return_heap_number);
2855
// Tag the result and return.
2856
__ SmiTag(r0, scratch1);
2859
// DIV just falls through to allocating a heap number.
2862
__ bind(&return_heap_number);
2863
// Return a heap number, or fall through to type transition or runtime
2864
// call if we can't.
2865
if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
2866
: BinaryOpIC::INT32)) {
2867
// We are using vfp registers so r5 is available.
2868
heap_number_result = r5;
2869
GenerateHeapResultAllocation(masm,
2875
__ sub(r0, heap_number_result, Operand(kHeapObjectTag));
2876
__ vstr(d5, r0, HeapNumber::kValueOffset);
2877
__ mov(r0, heap_number_result);
2881
// A DIV operation expecting an integer result falls through
2882
// to type transition.
2885
// We preserved r0 and r1 to be able to call runtime.
2886
// Save the left value on the stack.
2889
Label pop_and_call_runtime;
2891
// Allocate a heap number to store the result.
2892
heap_number_result = r5;
2893
GenerateHeapResultAllocation(masm,
2898
&pop_and_call_runtime);
2900
// Load the left value from the value saved on the stack.
2903
// Call the C function to handle the double operation.
2904
FloatingPointHelper::CallCCodeForDoubleOperation(
2905
masm, op_, heap_number_result, scratch1);
2906
if (FLAG_debug_code) {
2907
__ stop("Unreachable code.");
2910
__ bind(&pop_and_call_runtime);
2912
__ b(&call_runtime);
2919
case Token::BIT_XOR:
2920
case Token::BIT_AND:
2924
Label return_heap_number;
2925
Register scratch3 = r5;
2926
// Convert operands to 32-bit integers. Right in r2 and left in r3. The
2927
// registers r0 and r1 (right and left) are preserved for the runtime
2929
FloatingPointHelper::LoadNumberAsInt32(masm,
2938
FloatingPointHelper::LoadNumberAsInt32(masm,
2948
// The ECMA-262 standard specifies that, for shift operations, only the
2949
// 5 least significant bits of the shift value should be used.
2952
__ orr(r2, r3, Operand(r2));
2954
case Token::BIT_XOR:
2955
__ eor(r2, r3, Operand(r2));
2957
case Token::BIT_AND:
2958
__ and_(r2, r3, Operand(r2));
2961
__ and_(r2, r2, Operand(0x1f));
2962
__ mov(r2, Operand(r3, ASR, r2));
2965
__ and_(r2, r2, Operand(0x1f));
2966
__ mov(r2, Operand(r3, LSR, r2), SetCC);
2967
// SHR is special because it is required to produce a positive answer.
2968
// We only get a negative result if the shift value (r2) is 0.
2969
// This result cannot be respresented as a signed 32-bit integer, try
2970
// to return a heap number if we can.
2971
// The non vfp3 code does not support this special case, so jump to
2972
// runtime if we don't support it.
2973
if (CpuFeatures::IsSupported(VFP3)) {
2974
__ b(mi, (result_type_ <= BinaryOpIC::INT32)
2976
: &return_heap_number);
2978
__ b(mi, (result_type_ <= BinaryOpIC::INT32)
2984
__ and_(r2, r2, Operand(0x1f));
2985
__ mov(r2, Operand(r3, LSL, r2));
2991
// Check if the result fits in a smi.
2992
__ add(scratch1, r2, Operand(0x40000000), SetCC);
2993
// If not try to return a heap number. (We know the result is an int32.)
2994
__ b(mi, &return_heap_number);
2995
// Tag the result and return.
2999
__ bind(&return_heap_number);
3000
heap_number_result = r5;
3001
GenerateHeapResultAllocation(masm,
3008
if (CpuFeatures::IsSupported(VFP3)) {
3009
CpuFeatures::Scope scope(VFP3);
3010
if (op_ != Token::SHR) {
3011
// Convert the result to a floating point value.
3012
__ vmov(double_scratch.low(), r2);
3013
__ vcvt_f64_s32(double_scratch, double_scratch.low());
3015
// The result must be interpreted as an unsigned 32-bit integer.
3016
__ vmov(double_scratch.low(), r2);
3017
__ vcvt_f64_u32(double_scratch, double_scratch.low());
3020
// Store the result.
3021
__ sub(r0, heap_number_result, Operand(kHeapObjectTag));
3022
__ vstr(double_scratch, r0, HeapNumber::kValueOffset);
3023
__ mov(r0, heap_number_result);
3026
// Tail call that writes the int32 in r2 to the heap number in r0, using
3027
// r3 as scratch. r0 is preserved and returned.
3029
WriteInt32ToHeapNumberStub stub(r2, r0, r3);
3030
__ TailCallStub(&stub);
3040
// We never expect DIV to yield an integer result, so we always generate
3041
// type transition code for DIV operations expecting an integer result: the
3042
// code will fall through to this type transition.
3043
if (transition.is_linked() ||
3044
((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
3045
__ bind(&transition);
3046
GenerateTypeTransition(masm);
3049
__ bind(&call_runtime);
3050
GenerateCallRuntime(masm);
3054
void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
3057
if (op_ == Token::ADD) {
3058
// Handle string addition here, because it is the only operation
3059
// that does not do a ToNumber conversion on the operands.
3060
GenerateAddStrings(masm);
3063
// Convert oddball arguments to numbers.
3065
__ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
3067
if (Token::IsBitOp(op_)) {
3068
__ mov(r1, Operand(Smi::FromInt(0)));
3070
__ LoadRoot(r1, Heap::kNanValueRootIndex);
3074
__ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
3076
if (Token::IsBitOp(op_)) {
3077
__ mov(r0, Operand(Smi::FromInt(0)));
3079
__ LoadRoot(r0, Heap::kNanValueRootIndex);
3083
GenerateHeapNumberStub(masm);
3087
void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
3089
GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
3091
__ bind(&call_runtime);
3092
GenerateCallRuntime(masm);
3096
void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
3097
Label call_runtime, call_string_add_or_runtime;
3099
GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
3101
GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
3103
__ bind(&call_string_add_or_runtime);
3104
if (op_ == Token::ADD) {
3105
GenerateAddStrings(masm);
3108
__ bind(&call_runtime);
3109
GenerateCallRuntime(masm);
3113
void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
3114
ASSERT(op_ == Token::ADD);
3115
Label left_not_string, call_runtime;
3118
Register right = r0;
3120
// Check if left argument is a string.
3121
__ JumpIfSmi(left, &left_not_string);
3122
__ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
3123
__ b(ge, &left_not_string);
3125
StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
3126
GenerateRegisterArgsPush(masm);
3127
__ TailCallStub(&string_add_left_stub);
3129
// Left operand is not a string, test right.
3130
__ bind(&left_not_string);
3131
__ JumpIfSmi(right, &call_runtime);
3132
__ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
3133
__ b(ge, &call_runtime);
3135
StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
3136
GenerateRegisterArgsPush(masm);
3137
__ TailCallStub(&string_add_right_stub);
3139
// At least one argument is not a string.
3140
__ bind(&call_runtime);
3144
void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
3145
GenerateRegisterArgsPush(masm);
3148
__ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
3151
__ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
3154
__ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
3157
__ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
3160
__ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
3163
__ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
3165
case Token::BIT_AND:
3166
__ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
3168
case Token::BIT_XOR:
3169
__ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
3172
__ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
3175
__ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
3178
__ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
3186
void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
3188
Register heap_number_map,
3191
Label* gc_required) {
3192
// Code below will scratch result if allocation fails. To keep both arguments
3193
// intact for the runtime call result cannot be one of these.
3194
ASSERT(!result.is(r0) && !result.is(r1));
3196
if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
3197
Label skip_allocation, allocated;
3198
Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0;
3199
// If the overwritable operand is already an object, we skip the
3200
// allocation of a heap number.
3201
__ JumpIfNotSmi(overwritable_operand, &skip_allocation);
3202
// Allocate a heap number for the result.
3203
__ AllocateHeapNumber(
3204
result, scratch1, scratch2, heap_number_map, gc_required);
3206
__ bind(&skip_allocation);
3207
// Use object holding the overwritable operand for result.
3208
__ mov(result, Operand(overwritable_operand));
3209
__ bind(&allocated);
3211
ASSERT(mode_ == NO_OVERWRITE);
3212
__ AllocateHeapNumber(
3213
result, scratch1, scratch2, heap_number_map, gc_required);
3218
void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
3223
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
3224
// Untagged case: double input in d2, double result goes
3226
// Tagged case: tagged input on top of stack and in r0,
3227
// tagged result (heap number) goes into r0.
3229
Label input_not_smi;
3232
Label invalid_cache;
3233
const Register scratch0 = r9;
3234
const Register scratch1 = r7;
3235
const Register cache_entry = r0;
3236
const bool tagged = (argument_type_ == TAGGED);
3238
if (CpuFeatures::IsSupported(VFP3)) {
3239
CpuFeatures::Scope scope(VFP3);
3241
// Argument is a number and is on stack and in r0.
3242
// Load argument and check if it is a smi.
3243
__ JumpIfNotSmi(r0, &input_not_smi);
3245
// Input is a smi. Convert to double and load the low and high words
3246
// of the double into r2, r3.
3247
__ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
3250
__ bind(&input_not_smi);
3251
// Check if input is a HeapNumber.
3254
Heap::kHeapNumberMapRootIndex,
3257
// Input is a HeapNumber. Load it to a double register and store the
3258
// low and high words into r2, r3.
3259
__ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
3260
__ vmov(r2, r3, d0);
3262
// Input is untagged double in d2. Output goes to d2.
3263
__ vmov(r2, r3, d2);
3266
// r2 = low 32 bits of double value
3267
// r3 = high 32 bits of double value
3268
// Compute hash (the shifts are arithmetic):
3269
// h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
3270
__ eor(r1, r2, Operand(r3));
3271
__ eor(r1, r1, Operand(r1, ASR, 16));
3272
__ eor(r1, r1, Operand(r1, ASR, 8));
3273
ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
3274
__ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
3276
// r2 = low 32 bits of double value.
3277
// r3 = high 32 bits of double value.
3278
// r1 = TranscendentalCache::hash(double value).
3279
Isolate* isolate = masm->isolate();
3280
ExternalReference cache_array =
3281
ExternalReference::transcendental_cache_array_address(isolate);
3282
__ mov(cache_entry, Operand(cache_array));
3283
// cache_entry points to cache array.
3284
int cache_array_index
3285
= type_ * sizeof(isolate->transcendental_cache()->caches_[0]);
3286
__ ldr(cache_entry, MemOperand(cache_entry, cache_array_index));
3287
// r0 points to the cache for the type type_.
3288
// If NULL, the cache hasn't been initialized yet, so go through runtime.
3289
__ cmp(cache_entry, Operand(0, RelocInfo::NONE));
3290
__ b(eq, &invalid_cache);
3293
// Check that the layout of cache elements match expectations.
3294
{ TranscendentalCache::SubCache::Element test_elem[2];
3295
char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
3296
char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
3297
char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
3298
char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
3299
char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
3300
CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
3301
CHECK_EQ(0, elem_in0 - elem_start);
3302
CHECK_EQ(kIntSize, elem_in1 - elem_start);
3303
CHECK_EQ(2 * kIntSize, elem_out - elem_start);
3307
// Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
3308
__ add(r1, r1, Operand(r1, LSL, 1));
3309
__ add(cache_entry, cache_entry, Operand(r1, LSL, 2));
3310
// Check if cache matches: Double value is stored in uint32_t[2] array.
3311
__ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
3313
__ b(ne, &calculate);
3315
__ b(ne, &calculate);
3316
// Cache hit. Load result, cleanup and return.
3317
Counters* counters = masm->isolate()->counters();
3318
__ IncrementCounter(
3319
counters->transcendental_cache_hit(), 1, scratch0, scratch1);
3321
// Pop input value from stack and load result into r0.
3323
__ mov(r0, Operand(r6));
3325
// Load result into d2.
3326
__ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
3329
} // if (CpuFeatures::IsSupported(VFP3))
3331
__ bind(&calculate);
3332
Counters* counters = masm->isolate()->counters();
3333
__ IncrementCounter(
3334
counters->transcendental_cache_miss(), 1, scratch0, scratch1);
3336
__ bind(&invalid_cache);
3337
ExternalReference runtime_function =
3338
ExternalReference(RuntimeFunction(), masm->isolate());
3339
__ TailCallExternalReference(runtime_function, 1, 1);
3341
if (!CpuFeatures::IsSupported(VFP3)) UNREACHABLE();
3342
CpuFeatures::Scope scope(VFP3);
3347
// Call C function to calculate the result and update the cache.
3348
// Register r0 holds precalculated cache entry address; preserve
3349
// it on the stack and pop it into register cache_entry after the
3351
__ push(cache_entry);
3352
GenerateCallCFunction(masm, scratch0);
3353
__ GetCFunctionDoubleResult(d2);
3355
// Try to update the cache. If we cannot allocate a
3356
// heap number, we return the result without updating.
3357
__ pop(cache_entry);
3358
__ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
3359
__ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update);
3360
__ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
3361
__ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit());
3364
__ bind(&invalid_cache);
3365
// The cache is invalid. Call runtime which will recreate the
3367
__ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
3368
__ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
3369
__ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
3371
FrameScope scope(masm, StackFrame::INTERNAL);
3373
__ CallRuntime(RuntimeFunction(), 1);
3375
__ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
3378
__ bind(&skip_cache);
3379
// Call C function to calculate the result and answer directly
3380
// without updating the cache.
3381
GenerateCallCFunction(masm, scratch0);
3382
__ GetCFunctionDoubleResult(d2);
3383
__ bind(&no_update);
3385
// We return the value in d2 without adding it to the cache, but
3386
// we cause a scavenging GC so that future allocations will succeed.
3388
FrameScope scope(masm, StackFrame::INTERNAL);
3390
// Allocate an aligned object larger than a HeapNumber.
3391
ASSERT(4 * kPointerSize >= HeapNumber::kSize);
3392
__ mov(scratch0, Operand(4 * kPointerSize));
3394
__ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
3401
void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
3403
Isolate* isolate = masm->isolate();
3406
__ PrepareCallCFunction(0, 1, scratch);
3407
if (masm->use_eabi_hardfloat()) {
3410
__ vmov(r0, r1, d2);
3412
AllowExternalCallThatCantCauseGC scope(masm);
3414
case TranscendentalCache::SIN:
3415
__ CallCFunction(ExternalReference::math_sin_double_function(isolate),
3418
case TranscendentalCache::COS:
3419
__ CallCFunction(ExternalReference::math_cos_double_function(isolate),
3422
case TranscendentalCache::TAN:
3423
__ CallCFunction(ExternalReference::math_tan_double_function(isolate),
3426
case TranscendentalCache::LOG:
3427
__ CallCFunction(ExternalReference::math_log_double_function(isolate),
3438
Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
3440
// Add more cases when necessary.
3441
case TranscendentalCache::SIN: return Runtime::kMath_sin;
3442
case TranscendentalCache::COS: return Runtime::kMath_cos;
3443
case TranscendentalCache::TAN: return Runtime::kMath_tan;
3444
case TranscendentalCache::LOG: return Runtime::kMath_log;
3447
return Runtime::kAbort;
3452
void StackCheckStub::Generate(MacroAssembler* masm) {
3453
__ TailCallRuntime(Runtime::kStackGuard, 0, 1);
3457
void MathPowStub::Generate(MacroAssembler* masm) {
3458
CpuFeatures::Scope vfp3_scope(VFP3);
3459
const Register base = r1;
3460
const Register exponent = r2;
3461
const Register heapnumbermap = r5;
3462
const Register heapnumber = r0;
3463
const DoubleRegister double_base = d1;
3464
const DoubleRegister double_exponent = d2;
3465
const DoubleRegister double_result = d3;
3466
const DoubleRegister double_scratch = d0;
3467
const SwVfpRegister single_scratch = s0;
3468
const Register scratch = r9;
3469
const Register scratch2 = r7;
3471
Label call_runtime, done, exponent_not_smi, int_exponent;
3472
if (exponent_type_ == ON_STACK) {
3473
Label base_is_smi, unpack_exponent;
3474
// The exponent and base are supplied as arguments on the stack.
3475
// This can only happen if the stub is called from non-optimized code.
3476
// Load input parameters from stack to double registers.
3477
__ ldr(base, MemOperand(sp, 1 * kPointerSize));
3478
__ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
3480
__ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
3482
__ JumpIfSmi(base, &base_is_smi);
3483
__ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
3484
__ cmp(scratch, heapnumbermap);
3485
__ b(ne, &call_runtime);
3487
__ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
3488
__ jmp(&unpack_exponent);
3490
__ bind(&base_is_smi);
3492
__ vmov(single_scratch, base);
3493
__ vcvt_f64_s32(double_base, single_scratch);
3494
__ bind(&unpack_exponent);
3496
__ JumpIfNotSmi(exponent, &exponent_not_smi);
3497
__ SmiUntag(exponent);
3498
__ jmp(&int_exponent);
3500
__ bind(&exponent_not_smi);
3501
__ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
3502
__ cmp(scratch, heapnumbermap);
3503
__ b(ne, &call_runtime);
3504
__ vldr(double_exponent,
3505
FieldMemOperand(exponent, HeapNumber::kValueOffset));
3506
} else if (exponent_type_ == TAGGED) {
3507
// Base is already in double_base.
3508
__ JumpIfNotSmi(exponent, &exponent_not_smi);
3509
__ SmiUntag(exponent);
3510
__ jmp(&int_exponent);
3512
__ bind(&exponent_not_smi);
3513
__ vldr(double_exponent,
3514
FieldMemOperand(exponent, HeapNumber::kValueOffset));
3517
if (exponent_type_ != INTEGER) {
3518
Label int_exponent_convert;
3519
// Detect integer exponents stored as double.
3520
__ vcvt_u32_f64(single_scratch, double_exponent);
3521
// We do not check for NaN or Infinity here because comparing numbers on
3522
// ARM correctly distinguishes NaNs. We end up calling the built-in.
3523
__ vcvt_f64_u32(double_scratch, single_scratch);
3524
__ VFPCompareAndSetFlags(double_scratch, double_exponent);
3525
__ b(eq, &int_exponent_convert);
3527
if (exponent_type_ == ON_STACK) {
3528
// Detect square root case. Crankshaft detects constant +/-0.5 at
3529
// compile time and uses DoMathPowHalf instead. We then skip this check
3530
// for non-constant cases of +/-0.5 as these hardly occur.
3531
Label not_plus_half;
3534
__ vmov(double_scratch, 0.5);
3535
__ VFPCompareAndSetFlags(double_exponent, double_scratch);
3536
__ b(ne, ¬_plus_half);
3538
// Calculates square root of base. Check for the special case of
3539
// Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
3540
__ vmov(double_scratch, -V8_INFINITY);
3541
__ VFPCompareAndSetFlags(double_base, double_scratch);
3542
__ vneg(double_result, double_scratch, eq);
3545
// Add +0 to convert -0 to +0.
3546
__ vadd(double_scratch, double_base, kDoubleRegZero);
3547
__ vsqrt(double_result, double_scratch);
3550
__ bind(¬_plus_half);
3551
__ vmov(double_scratch, -0.5);
3552
__ VFPCompareAndSetFlags(double_exponent, double_scratch);
3553
__ b(ne, &call_runtime);
3555
// Calculates square root of base. Check for the special case of
3556
// Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
3557
__ vmov(double_scratch, -V8_INFINITY);
3558
__ VFPCompareAndSetFlags(double_base, double_scratch);
3559
__ vmov(double_result, kDoubleRegZero, eq);
3562
// Add +0 to convert -0 to +0.
3563
__ vadd(double_scratch, double_base, kDoubleRegZero);
3564
__ vmov(double_result, 1);
3565
__ vsqrt(double_scratch, double_scratch);
3566
__ vdiv(double_result, double_result, double_scratch);
3572
AllowExternalCallThatCantCauseGC scope(masm);
3573
__ PrepareCallCFunction(0, 2, scratch);
3574
__ SetCallCDoubleArguments(double_base, double_exponent);
3576
ExternalReference::power_double_double_function(masm->isolate()),
3580
__ GetCFunctionDoubleResult(double_result);
3583
__ bind(&int_exponent_convert);
3584
__ vcvt_u32_f64(single_scratch, double_exponent);
3585
__ vmov(exponent, single_scratch);
3588
// Calculate power with integer exponent.
3589
__ bind(&int_exponent);
3591
__ mov(scratch, exponent); // Back up exponent.
3592
__ vmov(double_scratch, double_base); // Back up base.
3593
__ vmov(double_result, 1.0);
3595
// Get absolute value of exponent.
3596
__ cmp(scratch, Operand(0));
3597
__ mov(scratch2, Operand(0), LeaveCC, mi);
3598
__ sub(scratch, scratch2, scratch, LeaveCC, mi);
3601
__ bind(&while_true);
3602
__ mov(scratch, Operand(scratch, ASR, 1), SetCC);
3603
__ vmul(double_result, double_result, double_scratch, cs);
3604
__ vmul(double_scratch, double_scratch, double_scratch, ne);
3605
__ b(ne, &while_true);
3607
__ cmp(exponent, Operand(0));
3609
__ vmov(double_scratch, 1.0);
3610
__ vdiv(double_result, double_scratch, double_result);
3611
// Test whether result is zero. Bail out to check for subnormal result.
3612
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
3613
__ VFPCompareAndSetFlags(double_result, 0.0);
3615
// double_exponent may not containe the exponent value if the input was a
3616
// smi. We set it with exponent value before bailing out.
3617
__ vmov(single_scratch, exponent);
3618
__ vcvt_f64_s32(double_exponent, single_scratch);
3620
// Returning or bailing out.
3621
Counters* counters = masm->isolate()->counters();
3622
if (exponent_type_ == ON_STACK) {
3623
// The arguments are still on the stack.
3624
__ bind(&call_runtime);
3625
__ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
3627
// The stub is called from non-optimized code, which expects the result
3628
// as heap number in exponent.
3630
__ AllocateHeapNumber(
3631
heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
3632
__ vstr(double_result,
3633
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
3634
ASSERT(heapnumber.is(r0));
3635
__ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
3640
AllowExternalCallThatCantCauseGC scope(masm);
3641
__ PrepareCallCFunction(0, 2, scratch);
3642
__ SetCallCDoubleArguments(double_base, double_exponent);
3644
ExternalReference::power_double_double_function(masm->isolate()),
3648
__ GetCFunctionDoubleResult(double_result);
3651
__ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
3657
bool CEntryStub::NeedsImmovableCode() {
3662
bool CEntryStub::IsPregenerated() {
3663
return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
3668
void CodeStub::GenerateStubsAheadOfTime() {
3669
CEntryStub::GenerateAheadOfTime();
3670
WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
3671
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
3672
RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
3676
void CodeStub::GenerateFPStubs() {
3677
CEntryStub save_doubles(1, kSaveFPRegs);
3678
Handle<Code> code = save_doubles.GetCode();
3679
code->set_is_pregenerated(true);
3680
StoreBufferOverflowStub stub(kSaveFPRegs);
3681
stub.GetCode()->set_is_pregenerated(true);
3682
code->GetIsolate()->set_fp_stubs_generated(true);
3686
void CEntryStub::GenerateAheadOfTime() {
3687
CEntryStub stub(1, kDontSaveFPRegs);
3688
Handle<Code> code = stub.GetCode();
3689
code->set_is_pregenerated(true);
3693
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
3698
void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
3699
UncatchableExceptionType type) {
3700
__ ThrowUncatchable(type, r0);
3704
void CEntryStub::GenerateCore(MacroAssembler* masm,
3705
Label* throw_normal_exception,
3706
Label* throw_termination_exception,
3707
Label* throw_out_of_memory_exception,
3709
bool always_allocate) {
3710
// r0: result parameter for PerformGC, if any
3711
// r4: number of arguments including receiver (C callee-saved)
3712
// r5: pointer to builtin function (C callee-saved)
3713
// r6: pointer to the first argument (C callee-saved)
3714
Isolate* isolate = masm->isolate();
3718
__ PrepareCallCFunction(1, 0, r1);
3719
__ CallCFunction(ExternalReference::perform_gc_function(isolate),
3723
ExternalReference scope_depth =
3724
ExternalReference::heap_always_allocate_scope_depth(isolate);
3725
if (always_allocate) {
3726
__ mov(r0, Operand(scope_depth));
3727
__ ldr(r1, MemOperand(r0));
3728
__ add(r1, r1, Operand(1));
3729
__ str(r1, MemOperand(r0));
3733
// r0 = argc, r1 = argv
3734
__ mov(r0, Operand(r4));
3735
__ mov(r1, Operand(r6));
3737
#if defined(V8_HOST_ARCH_ARM)
3738
int frame_alignment = MacroAssembler::ActivationFrameAlignment();
3739
int frame_alignment_mask = frame_alignment - 1;
3740
if (FLAG_debug_code) {
3741
if (frame_alignment > kPointerSize) {
3742
Label alignment_as_expected;
3743
ASSERT(IsPowerOf2(frame_alignment));
3744
__ tst(sp, Operand(frame_alignment_mask));
3745
__ b(eq, &alignment_as_expected);
3746
// Don't use Check here, as it will call Runtime_Abort re-entering here.
3747
__ stop("Unexpected alignment");
3748
__ bind(&alignment_as_expected);
3753
__ mov(r2, Operand(ExternalReference::isolate_address()));
3755
// To let the GC traverse the return address of the exit frames, we need to
3756
// know where the return address is. The CEntryStub is unmovable, so
3757
// we can store the address on the stack to be able to find it again and
3758
// we never have to restore it, because it will not change.
3759
// Compute the return address in lr to return to after the jump below. Pc is
3760
// already at '+ 8' from the current instruction but return is after three
3761
// instructions so add another 4 to pc to get the return address.
3762
masm->add(lr, pc, Operand(4));
3763
__ str(lr, MemOperand(sp, 0));
3766
if (always_allocate) {
3767
// It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
3768
// though (contain the result).
3769
__ mov(r2, Operand(scope_depth));
3770
__ ldr(r3, MemOperand(r2));
3771
__ sub(r3, r3, Operand(1));
3772
__ str(r3, MemOperand(r2));
3775
// check for failure result
3776
Label failure_returned;
3777
STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
3778
// Lower 2 bits of r2 are 0 iff r0 has failure tag.
3779
__ add(r2, r0, Operand(1));
3780
__ tst(r2, Operand(kFailureTagMask));
3781
__ b(eq, &failure_returned);
3783
// Exit C frame and return.
3785
// sp: stack pointer
3786
// fp: frame pointer
3787
// Callee-saved register r4 still holds argc.
3788
__ LeaveExitFrame(save_doubles_, r4);
3791
// check if we should retry or throw exception
3793
__ bind(&failure_returned);
3794
STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
3795
__ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
3798
// Special handling of out of memory exceptions.
3799
Failure* out_of_memory = Failure::OutOfMemoryException();
3800
__ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
3801
__ b(eq, throw_out_of_memory_exception);
3803
// Retrieve the pending exception and clear the variable.
3804
__ mov(r3, Operand(isolate->factory()->the_hole_value()));
3805
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3807
__ ldr(r0, MemOperand(ip));
3808
__ str(r3, MemOperand(ip));
3810
// Special handling of termination exceptions which are uncatchable
3811
// by javascript code.
3812
__ cmp(r0, Operand(isolate->factory()->termination_exception()));
3813
__ b(eq, throw_termination_exception);
3815
// Handle normal exception.
3816
__ jmp(throw_normal_exception);
3818
__ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
3822
void CEntryStub::Generate(MacroAssembler* masm) {
3823
// Called from JavaScript; parameters are on stack as if calling JS function
3824
// r0: number of arguments including receiver
3825
// r1: pointer to builtin function
3826
// fp: frame pointer (restored after C call)
3827
// sp: stack pointer (restored as callee's sp after C call)
3828
// cp: current context (C callee-saved)
3830
// Result returned in r0 or r0+r1 by default.
3832
// NOTE: Invocations of builtins may return failure objects
3833
// instead of a proper result. The builtin entry handles
3834
// this by performing a garbage collection and retrying the
3837
// Compute the argv pointer in a callee-saved register.
3838
__ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
3839
__ sub(r6, r6, Operand(kPointerSize));
3841
// Enter the exit frame that transitions from JavaScript to C++.
3842
FrameScope scope(masm, StackFrame::MANUAL);
3843
__ EnterExitFrame(save_doubles_);
3845
// Set up argc and the builtin function in callee-saved registers.
3846
__ mov(r4, Operand(r0));
3847
__ mov(r5, Operand(r1));
3849
// r4: number of arguments (C callee-saved)
3850
// r5: pointer to builtin function (C callee-saved)
3851
// r6: pointer to first argument (C callee-saved)
3853
Label throw_normal_exception;
3854
Label throw_termination_exception;
3855
Label throw_out_of_memory_exception;
3857
// Call into the runtime system.
3859
&throw_normal_exception,
3860
&throw_termination_exception,
3861
&throw_out_of_memory_exception,
3865
// Do space-specific GC and retry runtime call.
3867
&throw_normal_exception,
3868
&throw_termination_exception,
3869
&throw_out_of_memory_exception,
3873
// Do full GC and retry runtime call one final time.
3874
Failure* failure = Failure::InternalError();
3875
__ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
3877
&throw_normal_exception,
3878
&throw_termination_exception,
3879
&throw_out_of_memory_exception,
3883
__ bind(&throw_out_of_memory_exception);
3884
GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
3886
__ bind(&throw_termination_exception);
3887
GenerateThrowUncatchable(masm, TERMINATION);
3889
__ bind(&throw_normal_exception);
3890
GenerateThrowTOS(masm);
3894
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
3901
Label invoke, handler_entry, exit;
3903
// Called from C, so do not pop argc and args on exit (preserve sp)
3904
// No need to save register-passed args
3905
// Save callee-saved registers (incl. cp and fp), sp, and lr
3906
__ stm(db_w, sp, kCalleeSaved | lr.bit());
3908
if (CpuFeatures::IsSupported(VFP3)) {
3909
CpuFeatures::Scope scope(VFP3);
3910
// Save callee-saved vfp registers.
3911
__ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
3912
// Set up the reserved register for 0.0.
3913
__ vmov(kDoubleRegZero, 0.0);
3916
// Get address of argv, see stm above.
3922
// Set up argv in r4.
3923
int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
3924
if (CpuFeatures::IsSupported(VFP3)) {
3925
offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
3927
__ ldr(r4, MemOperand(sp, offset_to_argv));
3929
// Push a frame with special values setup to mark it as an entry frame.
3935
Isolate* isolate = masm->isolate();
3936
__ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
3937
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
3938
__ mov(r7, Operand(Smi::FromInt(marker)));
3939
__ mov(r6, Operand(Smi::FromInt(marker)));
3941
Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
3942
__ ldr(r5, MemOperand(r5));
3943
__ Push(r8, r7, r6, r5);
3945
// Set up frame pointer for the frame to be pushed.
3946
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
3948
// If this is the outermost JS call, set js_entry_sp value.
3949
Label non_outermost_js;
3950
ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
3951
__ mov(r5, Operand(ExternalReference(js_entry_sp)));
3952
__ ldr(r6, MemOperand(r5));
3953
__ cmp(r6, Operand::Zero());
3954
__ b(ne, &non_outermost_js);
3955
__ str(fp, MemOperand(r5));
3956
__ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
3959
__ bind(&non_outermost_js);
3960
__ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
3964
// Jump to a faked try block that does the invoke, with a faked catch
3965
// block that sets the pending exception.
3967
__ bind(&handler_entry);
3968
handler_offset_ = handler_entry.pos();
3969
// Caught exception: Store result (exception) in the pending exception
3970
// field in the JSEnv and return a failure sentinel. Coming in here the
3971
// fp will be invalid because the PushTryHandler below sets it to 0 to
3972
// signal the existence of the JSEntry frame.
3973
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3975
__ str(r0, MemOperand(ip));
3976
__ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
3979
// Invoke: Link this frame into the handler chain. There's only one
3980
// handler block in this code object, so its index is 0.
3982
// Must preserve r0-r4, r5-r7 are available.
3983
__ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER, 0);
3984
// If an exception not caught by another handler occurs, this handler
3985
// returns control to the code after the bl(&invoke) above, which
3986
// restores all kCalleeSaved registers (including cp and fp) to their
3987
// saved values before returning a failure to C.
3989
// Clear any pending exceptions.
3990
__ mov(r5, Operand(isolate->factory()->the_hole_value()));
3991
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3993
__ str(r5, MemOperand(ip));
3995
// Invoke the function by calling through JS entry trampoline builtin.
3996
// Notice that we cannot store a reference to the trampoline code directly in
3997
// this stub, because runtime stubs are not traversed when doing GC.
3999
// Expected registers by Builtins::JSEntryTrampoline
4006
ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
4008
__ mov(ip, Operand(construct_entry));
4010
ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
4011
__ mov(ip, Operand(entry));
4013
__ ldr(ip, MemOperand(ip)); // deref address
4015
// Branch and link to JSEntryTrampoline. We don't use the double underscore
4016
// macro for the add instruction because we don't want the coverage tool
4017
// inserting instructions here after we read the pc.
4018
__ mov(lr, Operand(pc));
4019
masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
4021
// Unlink this frame from the handler chain.
4024
__ bind(&exit); // r0 holds result
4025
// Check if the current stack frame is marked as the outermost JS frame.
4026
Label non_outermost_js_2;
4028
__ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
4029
__ b(ne, &non_outermost_js_2);
4030
__ mov(r6, Operand::Zero());
4031
__ mov(r5, Operand(ExternalReference(js_entry_sp)));
4032
__ str(r6, MemOperand(r5));
4033
__ bind(&non_outermost_js_2);
4035
// Restore the top frame descriptors from the stack.
4038
Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
4039
__ str(r3, MemOperand(ip));
4041
// Reset the stack to the callee saved registers.
4042
__ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
4044
// Restore callee-saved registers and return.
4046
if (FLAG_debug_code) {
4047
__ mov(lr, Operand(pc));
4051
if (CpuFeatures::IsSupported(VFP3)) {
4052
CpuFeatures::Scope scope(VFP3);
4053
// Restore callee-saved vfp registers.
4054
__ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
4057
__ ldm(ia_w, sp, kCalleeSaved | pc.bit());
4061
// Uses registers r0 to r4.
4062
// Expected input (depending on whether args are in registers or on the stack):
4063
// * object: r0 or at sp + 1 * kPointerSize.
4064
// * function: r1 or at sp.
4066
// An inlined call site may have been generated before calling this stub.
4067
// In this case the offset to the inline site to patch is passed on the stack,
4068
// in the safepoint slot for register r4.
4069
// (See LCodeGen::DoInstanceOfKnownGlobal)
4070
void InstanceofStub::Generate(MacroAssembler* masm) {
4071
// Call site inlining and patching implies arguments in registers.
4072
ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
4073
// ReturnTrueFalse is only implemented for inlined call sites.
4074
ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
4076
// Fixed register usage throughout the stub:
4077
const Register object = r0; // Object (lhs).
4078
Register map = r3; // Map of the object.
4079
const Register function = r1; // Function (rhs).
4080
const Register prototype = r4; // Prototype of the function.
4081
const Register inline_site = r9;
4082
const Register scratch = r2;
4084
const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize;
4086
Label slow, loop, is_instance, is_not_instance, not_js_object;
4088
if (!HasArgsInRegisters()) {
4089
__ ldr(object, MemOperand(sp, 1 * kPointerSize));
4090
__ ldr(function, MemOperand(sp, 0));
4093
// Check that the left hand is a JS object and load map.
4094
__ JumpIfSmi(object, ¬_js_object);
4095
__ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
4097
// If there is a call site cache don't look in the global cache, but do the
4098
// real lookup and update the call site cache.
4099
if (!HasCallSiteInlineCheck()) {
4101
__ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
4102
__ cmp(function, ip);
4104
__ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
4107
__ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
4108
__ Ret(HasArgsInRegisters() ? 0 : 2);
4113
// Get the prototype of the function.
4114
__ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
4116
// Check that the function prototype is a JS object.
4117
__ JumpIfSmi(prototype, &slow);
4118
__ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
4120
// Update the global instanceof or call site inlined cache with the current
4121
// map and function. The cached answer will be set when it is known below.
4122
if (!HasCallSiteInlineCheck()) {
4123
__ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
4124
__ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
4126
ASSERT(HasArgsInRegisters());
4127
// Patch the (relocated) inlined map check.
4129
// The offset was stored in r4 safepoint slot.
4130
// (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
4131
__ LoadFromSafepointRegisterSlot(scratch, r4);
4132
__ sub(inline_site, lr, scratch);
4133
// Get the map location in scratch and patch it.
4134
__ GetRelocatedValueLocation(inline_site, scratch);
4135
__ ldr(scratch, MemOperand(scratch));
4136
__ str(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
4139
// Register mapping: r3 is object map and r4 is function prototype.
4140
// Get prototype of object into r2.
4141
__ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
4143
// We don't need map any more. Use it as a scratch register.
4144
Register scratch2 = map;
4147
// Loop through the prototype chain looking for the function prototype.
4148
__ LoadRoot(scratch2, Heap::kNullValueRootIndex);
4150
__ cmp(scratch, Operand(prototype));
4151
__ b(eq, &is_instance);
4152
__ cmp(scratch, scratch2);
4153
__ b(eq, &is_not_instance);
4154
__ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
4155
__ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
4158
__ bind(&is_instance);
4159
if (!HasCallSiteInlineCheck()) {
4160
__ mov(r0, Operand(Smi::FromInt(0)));
4161
__ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
4163
// Patch the call site to return true.
4164
__ LoadRoot(r0, Heap::kTrueValueRootIndex);
4165
__ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4166
// Get the boolean result location in scratch and patch it.
4167
__ GetRelocatedValueLocation(inline_site, scratch);
4168
__ str(r0, MemOperand(scratch));
4170
if (!ReturnTrueFalseObject()) {
4171
__ mov(r0, Operand(Smi::FromInt(0)));
4174
__ Ret(HasArgsInRegisters() ? 0 : 2);
4176
__ bind(&is_not_instance);
4177
if (!HasCallSiteInlineCheck()) {
4178
__ mov(r0, Operand(Smi::FromInt(1)));
4179
__ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
4181
// Patch the call site to return false.
4182
__ LoadRoot(r0, Heap::kFalseValueRootIndex);
4183
__ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4184
// Get the boolean result location in scratch and patch it.
4185
__ GetRelocatedValueLocation(inline_site, scratch);
4186
__ str(r0, MemOperand(scratch));
4188
if (!ReturnTrueFalseObject()) {
4189
__ mov(r0, Operand(Smi::FromInt(1)));
4192
__ Ret(HasArgsInRegisters() ? 0 : 2);
4194
Label object_not_null, object_not_null_or_smi;
4195
__ bind(¬_js_object);
4196
// Before null, smi and string value checks, check that the rhs is a function
4197
// as for a non-function rhs an exception needs to be thrown.
4198
__ JumpIfSmi(function, &slow);
4199
__ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE);
4202
// Null is not instance of anything.
4203
__ cmp(scratch, Operand(masm->isolate()->factory()->null_value()));
4204
__ b(ne, &object_not_null);
4205
__ mov(r0, Operand(Smi::FromInt(1)));
4206
__ Ret(HasArgsInRegisters() ? 0 : 2);
4208
__ bind(&object_not_null);
4209
// Smi values are not instances of anything.
4210
__ JumpIfNotSmi(object, &object_not_null_or_smi);
4211
__ mov(r0, Operand(Smi::FromInt(1)));
4212
__ Ret(HasArgsInRegisters() ? 0 : 2);
4214
__ bind(&object_not_null_or_smi);
4215
// String values are not instances of anything.
4216
__ IsObjectJSStringType(object, scratch, &slow);
4217
__ mov(r0, Operand(Smi::FromInt(1)));
4218
__ Ret(HasArgsInRegisters() ? 0 : 2);
4220
// Slow-case. Tail call builtin.
4222
if (!ReturnTrueFalseObject()) {
4223
if (HasArgsInRegisters()) {
4226
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
4229
FrameScope scope(masm, StackFrame::INTERNAL);
4231
__ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
4233
__ cmp(r0, Operand::Zero());
4234
__ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
4235
__ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
4236
__ Ret(HasArgsInRegisters() ? 0 : 2);
4241
Register InstanceofStub::left() { return r0; }
4244
Register InstanceofStub::right() { return r1; }
4247
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
4248
// The displacement is the offset of the last parameter (if any)
4249
// relative to the frame pointer.
4250
static const int kDisplacement =
4251
StandardFrameConstants::kCallerSPOffset - kPointerSize;
4253
// Check that the key is a smi.
4255
__ JumpIfNotSmi(r1, &slow);
4257
// Check if the calling frame is an arguments adaptor frame.
4259
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4260
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
4261
__ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4264
// Check index against formal parameters count limit passed in
4265
// through register r0. Use unsigned comparison to get negative
4270
// Read the argument from the stack and return it.
4272
__ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
4273
__ ldr(r0, MemOperand(r3, kDisplacement));
4276
// Arguments adaptor case: Check index against actual arguments
4277
// limit found in the arguments adaptor frame. Use unsigned
4278
// comparison to get negative check for free.
4280
__ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4284
// Read the argument from the adaptor frame and return it.
4286
__ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
4287
__ ldr(r0, MemOperand(r3, kDisplacement));
4290
// Slow-case: Handle non-smi or out-of-bounds access to arguments
4291
// by calling the runtime system.
4294
__ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
4298
void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
4299
// sp[0] : number of parameters
4300
// sp[4] : receiver displacement
4303
// Check if the calling frame is an arguments adaptor frame.
4305
__ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4306
__ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
4307
__ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4310
// Patch the arguments.length and the parameters pointer in the current frame.
4311
__ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4312
__ str(r2, MemOperand(sp, 0 * kPointerSize));
4313
__ add(r3, r3, Operand(r2, LSL, 1));
4314
__ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
4315
__ str(r3, MemOperand(sp, 1 * kPointerSize));
4318
__ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4322
void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
4324
// sp[0] : number of parameters (tagged)
4325
// sp[4] : address of receiver argument
4327
// Registers used over whole function:
4328
// r6 : allocated object (tagged)
4329
// r9 : mapped parameter count (tagged)
4331
__ ldr(r1, MemOperand(sp, 0 * kPointerSize));
4332
// r1 = parameter count (tagged)
4334
// Check if the calling frame is an arguments adaptor frame.
4336
Label adaptor_frame, try_allocate;
4337
__ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4338
__ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
4339
__ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4340
__ b(eq, &adaptor_frame);
4342
// No adaptor, parameter count = argument count.
4344
__ b(&try_allocate);
4346
// We have an adaptor frame. Patch the parameters pointer.
4347
__ bind(&adaptor_frame);
4348
__ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4349
__ add(r3, r3, Operand(r2, LSL, 1));
4350
__ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
4351
__ str(r3, MemOperand(sp, 1 * kPointerSize));
4353
// r1 = parameter count (tagged)
4354
// r2 = argument count (tagged)
4355
// Compute the mapped parameter count = min(r1, r2) in r1.
4356
__ cmp(r1, Operand(r2));
4357
__ mov(r1, Operand(r2), LeaveCC, gt);
4359
__ bind(&try_allocate);
4361
// Compute the sizes of backing store, parameter map, and arguments object.
4362
// 1. Parameter map, has 2 extra words containing context and backing store.
4363
const int kParameterMapHeaderSize =
4364
FixedArray::kHeaderSize + 2 * kPointerSize;
4365
// If there are no mapped parameters, we do not need the parameter_map.
4366
__ cmp(r1, Operand(Smi::FromInt(0)));
4367
__ mov(r9, Operand::Zero(), LeaveCC, eq);
4368
__ mov(r9, Operand(r1, LSL, 1), LeaveCC, ne);
4369
__ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
4371
// 2. Backing store.
4372
__ add(r9, r9, Operand(r2, LSL, 1));
4373
__ add(r9, r9, Operand(FixedArray::kHeaderSize));
4375
// 3. Arguments object.
4376
__ add(r9, r9, Operand(Heap::kArgumentsObjectSize));
4378
// Do the allocation of all three objects in one go.
4379
__ AllocateInNewSpace(r9, r0, r3, r4, &runtime, TAG_OBJECT);
4381
// r0 = address of new object(s) (tagged)
4382
// r2 = argument count (tagged)
4383
// Get the arguments boilerplate from the current (global) context into r4.
4384
const int kNormalOffset =
4385
Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
4386
const int kAliasedOffset =
4387
Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
4389
__ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_INDEX)));
4390
__ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
4391
__ cmp(r1, Operand::Zero());
4392
__ ldr(r4, MemOperand(r4, kNormalOffset), eq);
4393
__ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
4395
// r0 = address of new object (tagged)
4396
// r1 = mapped parameter count (tagged)
4397
// r2 = argument count (tagged)
4398
// r4 = address of boilerplate object (tagged)
4399
// Copy the JS object part.
4400
for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
4401
__ ldr(r3, FieldMemOperand(r4, i));
4402
__ str(r3, FieldMemOperand(r0, i));
4405
// Set up the callee in-object property.
4406
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
4407
__ ldr(r3, MemOperand(sp, 2 * kPointerSize));
4408
const int kCalleeOffset = JSObject::kHeaderSize +
4409
Heap::kArgumentsCalleeIndex * kPointerSize;
4410
__ str(r3, FieldMemOperand(r0, kCalleeOffset));
4412
// Use the length (smi tagged) and set that as an in-object property too.
4413
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4414
const int kLengthOffset = JSObject::kHeaderSize +
4415
Heap::kArgumentsLengthIndex * kPointerSize;
4416
__ str(r2, FieldMemOperand(r0, kLengthOffset));
4418
// Set up the elements pointer in the allocated arguments object.
4419
// If we allocated a parameter map, r4 will point there, otherwise
4420
// it will point to the backing store.
4421
__ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
4422
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
4424
// r0 = address of new object (tagged)
4425
// r1 = mapped parameter count (tagged)
4426
// r2 = argument count (tagged)
4427
// r4 = address of parameter map or backing store (tagged)
4428
// Initialize parameter map. If there are no mapped arguments, we're done.
4429
Label skip_parameter_map;
4430
__ cmp(r1, Operand(Smi::FromInt(0)));
4431
// Move backing store address to r3, because it is
4432
// expected there when filling in the unmapped arguments.
4433
__ mov(r3, r4, LeaveCC, eq);
4434
__ b(eq, &skip_parameter_map);
4436
__ LoadRoot(r6, Heap::kNonStrictArgumentsElementsMapRootIndex);
4437
__ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset));
4438
__ add(r6, r1, Operand(Smi::FromInt(2)));
4439
__ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
4440
__ str(r8, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
4441
__ add(r6, r4, Operand(r1, LSL, 1));
4442
__ add(r6, r6, Operand(kParameterMapHeaderSize));
4443
__ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
4445
// Copy the parameter slots and the holes in the arguments.
4446
// We need to fill in mapped_parameter_count slots. They index the context,
4447
// where parameters are stored in reverse order, at
4448
// MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
4449
// The mapped parameter thus need to get indices
4450
// MIN_CONTEXT_SLOTS+parameter_count-1 ..
4451
// MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
4452
// We loop from right to left.
4453
Label parameters_loop, parameters_test;
4455
__ ldr(r9, MemOperand(sp, 0 * kPointerSize));
4456
__ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
4457
__ sub(r9, r9, Operand(r1));
4458
__ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
4459
__ add(r3, r4, Operand(r6, LSL, 1));
4460
__ add(r3, r3, Operand(kParameterMapHeaderSize));
4462
// r6 = loop variable (tagged)
4463
// r1 = mapping index (tagged)
4464
// r3 = address of backing store (tagged)
4465
// r4 = address of parameter map (tagged)
4466
// r5 = temporary scratch (a.o., for address calculation)
4467
// r7 = the hole value
4468
__ jmp(¶meters_test);
4470
__ bind(¶meters_loop);
4471
__ sub(r6, r6, Operand(Smi::FromInt(1)));
4472
__ mov(r5, Operand(r6, LSL, 1));
4473
__ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag));
4474
__ str(r9, MemOperand(r4, r5));
4475
__ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
4476
__ str(r7, MemOperand(r3, r5));
4477
__ add(r9, r9, Operand(Smi::FromInt(1)));
4478
__ bind(¶meters_test);
4479
__ cmp(r6, Operand(Smi::FromInt(0)));
4480
__ b(ne, ¶meters_loop);
4482
__ bind(&skip_parameter_map);
4483
// r2 = argument count (tagged)
4484
// r3 = address of backing store (tagged)
4486
// Copy arguments header and remaining slots (if there are any).
4487
__ LoadRoot(r5, Heap::kFixedArrayMapRootIndex);
4488
__ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset));
4489
__ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset));
4491
Label arguments_loop, arguments_test;
4493
__ ldr(r4, MemOperand(sp, 1 * kPointerSize));
4494
__ sub(r4, r4, Operand(r9, LSL, 1));
4495
__ jmp(&arguments_test);
4497
__ bind(&arguments_loop);
4498
__ sub(r4, r4, Operand(kPointerSize));
4499
__ ldr(r6, MemOperand(r4, 0));
4500
__ add(r5, r3, Operand(r9, LSL, 1));
4501
__ str(r6, FieldMemOperand(r5, FixedArray::kHeaderSize));
4502
__ add(r9, r9, Operand(Smi::FromInt(1)));
4504
__ bind(&arguments_test);
4505
__ cmp(r9, Operand(r2));
4506
__ b(lt, &arguments_loop);
4508
// Return and remove the on-stack parameters.
4509
__ add(sp, sp, Operand(3 * kPointerSize));
4512
// Do the runtime call to allocate the arguments object.
4513
// r2 = argument count (tagged)
4515
__ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
4516
__ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4520
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
4521
// sp[0] : number of parameters
4522
// sp[4] : receiver displacement
4524
// Check if the calling frame is an arguments adaptor frame.
4525
Label adaptor_frame, try_allocate, runtime;
4526
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4527
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
4528
__ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4529
__ b(eq, &adaptor_frame);
4531
// Get the length from the frame.
4532
__ ldr(r1, MemOperand(sp, 0));
4533
__ b(&try_allocate);
4535
// Patch the arguments.length and the parameters pointer.
4536
__ bind(&adaptor_frame);
4537
__ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4538
__ str(r1, MemOperand(sp, 0));
4539
__ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
4540
__ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
4541
__ str(r3, MemOperand(sp, 1 * kPointerSize));
4543
// Try the new space allocation. Start out with computing the size
4544
// of the arguments object and the elements array in words.
4545
Label add_arguments_object;
4546
__ bind(&try_allocate);
4547
__ cmp(r1, Operand(0, RelocInfo::NONE));
4548
__ b(eq, &add_arguments_object);
4549
__ mov(r1, Operand(r1, LSR, kSmiTagSize));
4550
__ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
4551
__ bind(&add_arguments_object);
4552
__ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
4554
// Do the allocation of both objects in one go.
4555
__ AllocateInNewSpace(r1,
4560
static_cast<AllocationFlags>(TAG_OBJECT |
4563
// Get the arguments boilerplate from the current (global) context.
4564
__ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4565
__ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
4566
__ ldr(r4, MemOperand(r4, Context::SlotOffset(
4567
Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
4569
// Copy the JS object part.
4570
__ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize);
4572
// Get the length (smi tagged) and set that as an in-object property too.
4573
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4574
__ ldr(r1, MemOperand(sp, 0 * kPointerSize));
4575
__ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
4576
Heap::kArgumentsLengthIndex * kPointerSize));
4578
// If there are no actual arguments, we're done.
4580
__ cmp(r1, Operand(0, RelocInfo::NONE));
4583
// Get the parameters pointer from the stack.
4584
__ ldr(r2, MemOperand(sp, 1 * kPointerSize));
4586
// Set up the elements pointer in the allocated arguments object and
4587
// initialize the header in the elements fixed array.
4588
__ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
4589
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
4590
__ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
4591
__ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
4592
__ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
4593
// Untag the length for the loop.
4594
__ mov(r1, Operand(r1, LSR, kSmiTagSize));
4596
// Copy the fixed array slots.
4598
// Set up r4 to point to the first array slot.
4599
__ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4601
// Pre-decrement r2 with kPointerSize on each iteration.
4602
// Pre-decrement in order to skip receiver.
4603
__ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
4604
// Post-increment r4 with kPointerSize on each iteration.
4605
__ str(r3, MemOperand(r4, kPointerSize, PostIndex));
4606
__ sub(r1, r1, Operand(1));
4607
__ cmp(r1, Operand(0, RelocInfo::NONE));
4610
// Return and remove the on-stack parameters.
4612
__ add(sp, sp, Operand(3 * kPointerSize));
4615
// Do the runtime call to allocate the arguments object.
4617
__ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
4621
void RegExpExecStub::Generate(MacroAssembler* masm) {
4622
// Just jump directly to runtime if native RegExp is not selected at compile
4623
// time or if regexp entry in generated code is turned off runtime switch or
4625
#ifdef V8_INTERPRETED_REGEXP
4626
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4627
#else // V8_INTERPRETED_REGEXP
4629
// Stack frame on entry.
4630
// sp[0]: last_match_info (expected JSArray)
4631
// sp[4]: previous index
4632
// sp[8]: subject string
4633
// sp[12]: JSRegExp object
4635
static const int kLastMatchInfoOffset = 0 * kPointerSize;
4636
static const int kPreviousIndexOffset = 1 * kPointerSize;
4637
static const int kSubjectOffset = 2 * kPointerSize;
4638
static const int kJSRegExpOffset = 3 * kPointerSize;
4640
Label runtime, invoke_regexp;
4642
// Allocation of registers for this function. These are in callee save
4643
// registers and will be preserved by the call to the native RegExp code, as
4644
// this code is called using the normal C calling convention. When calling
4645
// directly from generated code the native RegExp code will not do a GC and
4646
// therefore the content of these registers are safe to use after the call.
4647
Register subject = r4;
4648
Register regexp_data = r5;
4649
Register last_match_info_elements = r6;
4651
// Ensure that a RegExp stack is allocated.
4652
Isolate* isolate = masm->isolate();
4653
ExternalReference address_of_regexp_stack_memory_address =
4654
ExternalReference::address_of_regexp_stack_memory_address(isolate);
4655
ExternalReference address_of_regexp_stack_memory_size =
4656
ExternalReference::address_of_regexp_stack_memory_size(isolate);
4657
__ mov(r0, Operand(address_of_regexp_stack_memory_size));
4658
__ ldr(r0, MemOperand(r0, 0));
4659
__ tst(r0, Operand(r0));
4662
// Check that the first argument is a JSRegExp object.
4663
__ ldr(r0, MemOperand(sp, kJSRegExpOffset));
4664
STATIC_ASSERT(kSmiTag == 0);
4665
__ JumpIfSmi(r0, &runtime);
4666
__ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
4669
// Check that the RegExp has been compiled (data contains a fixed array).
4670
__ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
4671
if (FLAG_debug_code) {
4672
__ tst(regexp_data, Operand(kSmiTagMask));
4673
__ Check(ne, "Unexpected type for RegExp data, FixedArray expected");
4674
__ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
4675
__ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
4678
// regexp_data: RegExp data (FixedArray)
4679
// Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
4680
__ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
4681
__ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
4684
// regexp_data: RegExp data (FixedArray)
4685
// Check that the number of captures fit in the static offsets vector buffer.
4687
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
4688
// Calculate number of capture registers (number_of_captures + 1) * 2. This
4689
// uses the asumption that smis are 2 * their untagged value.
4690
STATIC_ASSERT(kSmiTag == 0);
4691
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
4692
__ add(r2, r2, Operand(2)); // r2 was a smi.
4693
// Check that the static offsets vector buffer is large enough.
4694
__ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
4697
// r2: Number of capture registers
4698
// regexp_data: RegExp data (FixedArray)
4699
// Check that the second argument is a string.
4700
__ ldr(subject, MemOperand(sp, kSubjectOffset));
4701
__ JumpIfSmi(subject, &runtime);
4702
Condition is_string = masm->IsObjectStringType(subject, r0);
4703
__ b(NegateCondition(is_string), &runtime);
4704
// Get the length of the string to r3.
4705
__ ldr(r3, FieldMemOperand(subject, String::kLengthOffset));
4707
// r2: Number of capture registers
4708
// r3: Length of subject string as a smi
4709
// subject: Subject string
4710
// regexp_data: RegExp data (FixedArray)
4711
// Check that the third argument is a positive smi less than the subject
4712
// string length. A negative value will be greater (unsigned comparison).
4713
__ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
4714
__ JumpIfNotSmi(r0, &runtime);
4715
__ cmp(r3, Operand(r0));
4718
// r2: Number of capture registers
4719
// subject: Subject string
4720
// regexp_data: RegExp data (FixedArray)
4721
// Check that the fourth object is a JSArray object.
4722
__ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
4723
__ JumpIfSmi(r0, &runtime);
4724
__ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
4726
// Check that the JSArray is in fast case.
4727
__ ldr(last_match_info_elements,
4728
FieldMemOperand(r0, JSArray::kElementsOffset));
4729
__ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
4730
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
4733
// Check that the last match info has space for the capture registers and the
4734
// additional information.
4736
FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
4737
__ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
4738
__ cmp(r2, Operand(r0, ASR, kSmiTagSize));
4741
// Reset offset for possibly sliced string.
4742
__ mov(r9, Operand(0));
4743
// subject: Subject string
4744
// regexp_data: RegExp data (FixedArray)
4745
// Check the representation and encoding of the subject string.
4747
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
4748
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
4749
// First check for flat string. None of the following string type tests will
4750
// succeed if subject is not a string or a short external string.
4753
Operand(kIsNotStringMask |
4754
kStringRepresentationMask |
4755
kShortExternalStringMask),
4757
STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
4758
__ b(eq, &seq_string);
4760
// subject: Subject string
4761
// regexp_data: RegExp data (FixedArray)
4762
// r1: whether subject is a string and if yes, its string representation
4763
// Check for flat cons string or sliced string.
4764
// A flat cons string is a cons string where the second part is the empty
4765
// string. In that case the subject string is just the first part of the cons
4766
// string. Also in this case the first part of the cons string is known to be
4767
// a sequential string or an external string.
4768
// In the case of a sliced string its offset has to be taken into account.
4769
Label cons_string, external_string, check_encoding;
4770
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
4771
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
4772
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
4773
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
4774
__ cmp(r1, Operand(kExternalStringTag));
4775
__ b(lt, &cons_string);
4776
__ b(eq, &external_string);
4778
// Catch non-string subject or short external string.
4779
STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
4780
__ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
4783
// String is sliced.
4784
__ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
4785
__ mov(r9, Operand(r9, ASR, kSmiTagSize));
4786
__ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
4787
// r9: offset of sliced string, smi-tagged.
4788
__ jmp(&check_encoding);
4789
// String is a cons string, check whether it is flat.
4790
__ bind(&cons_string);
4791
__ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
4792
__ CompareRoot(r0, Heap::kEmptyStringRootIndex);
4794
__ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
4795
// Is first part of cons or parent of slice a flat string?
4796
__ bind(&check_encoding);
4797
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
4798
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
4799
STATIC_ASSERT(kSeqStringTag == 0);
4800
__ tst(r0, Operand(kStringRepresentationMask));
4801
__ b(ne, &external_string);
4803
__ bind(&seq_string);
4804
// subject: Subject string
4805
// regexp_data: RegExp data (FixedArray)
4806
// r0: Instance type of subject string
4807
STATIC_ASSERT(4 == kAsciiStringTag);
4808
STATIC_ASSERT(kTwoByteStringTag == 0);
4809
// Find the code object based on the assumptions above.
4810
__ and_(r0, r0, Operand(kStringEncodingMask));
4811
__ mov(r3, Operand(r0, ASR, 2), SetCC);
4812
__ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
4813
__ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
4815
// Check that the irregexp code has been generated for the actual string
4816
// encoding. If it has, the field contains a code object otherwise it contains
4817
// a smi (code flushing support).
4818
__ JumpIfSmi(r7, &runtime);
4820
// r3: encoding of subject string (1 if ASCII, 0 if two_byte);
4822
// subject: Subject string
4823
// regexp_data: RegExp data (FixedArray)
4824
// Load used arguments before starting to push arguments for call to native
4825
// RegExp code to avoid handling changing stack height.
4826
__ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
4827
__ mov(r1, Operand(r1, ASR, kSmiTagSize));
4829
// r1: previous index
4830
// r3: encoding of subject string (1 if ASCII, 0 if two_byte);
4832
// subject: Subject string
4833
// regexp_data: RegExp data (FixedArray)
4834
// All checks done. Now push arguments for native regexp code.
4835
__ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
4837
// Isolates: note we add an additional parameter here (isolate pointer).
4838
static const int kRegExpExecuteArguments = 8;
4839
static const int kParameterRegisters = 4;
4840
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
4842
// Stack pointer now points to cell where return address is to be written.
4843
// Arguments are before that on the stack or in registers.
4845
// Argument 8 (sp[16]): Pass current isolate address.
4846
__ mov(r0, Operand(ExternalReference::isolate_address()));
4847
__ str(r0, MemOperand(sp, 4 * kPointerSize));
4849
// Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript.
4850
__ mov(r0, Operand(1));
4851
__ str(r0, MemOperand(sp, 3 * kPointerSize));
4853
// Argument 6 (sp[8]): Start (high end) of backtracking stack memory area.
4854
__ mov(r0, Operand(address_of_regexp_stack_memory_address));
4855
__ ldr(r0, MemOperand(r0, 0));
4856
__ mov(r2, Operand(address_of_regexp_stack_memory_size));
4857
__ ldr(r2, MemOperand(r2, 0));
4858
__ add(r0, r0, Operand(r2));
4859
__ str(r0, MemOperand(sp, 2 * kPointerSize));
4861
// Argument 5 (sp[4]): static offsets vector buffer.
4863
Operand(ExternalReference::address_of_static_offsets_vector(isolate)));
4864
__ str(r0, MemOperand(sp, 1 * kPointerSize));
4866
// For arguments 4 and 3 get string length, calculate start of string data and
4867
// calculate the shift of the index (0 for ASCII and 1 for two byte).
4868
__ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
4869
__ eor(r3, r3, Operand(1));
4870
// Load the length from the original subject string from the previous stack
4871
// frame. Therefore we have to use fp, which points exactly to two pointer
4872
// sizes below the previous sp. (Because creating a new stack frame pushes
4873
// the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
4874
__ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
4875
// If slice offset is not 0, load the length from the original sliced string.
4876
// Argument 4, r3: End of string data
4877
// Argument 3, r2: Start of string data
4878
// Prepare start and end index of the input.
4879
__ add(r9, r8, Operand(r9, LSL, r3));
4880
__ add(r2, r9, Operand(r1, LSL, r3));
4882
__ ldr(r8, FieldMemOperand(subject, String::kLengthOffset));
4883
__ mov(r8, Operand(r8, ASR, kSmiTagSize));
4884
__ add(r3, r9, Operand(r8, LSL, r3));
4886
// Argument 2 (r1): Previous index.
4889
// Argument 1 (r0): Subject string.
4890
__ mov(r0, subject);
4892
// Locate the code entry and call it.
4893
__ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
4894
DirectCEntryStub stub;
4895
stub.GenerateCall(masm, r7);
4897
__ LeaveExitFrame(false, no_reg);
4900
// subject: subject string (callee saved)
4901
// regexp_data: RegExp data (callee saved)
4902
// last_match_info_elements: Last match info elements (callee saved)
4904
// Check the result.
4907
__ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
4910
__ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
4912
__ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
4913
// If not exception it can only be retry. Handle that in the runtime system.
4915
// Result must now be exception. If there is no pending exception already a
4916
// stack overflow (on the backtrack stack) was detected in RegExp code but
4917
// haven't created the exception yet. Handle that in the runtime system.
4918
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
4919
__ mov(r1, Operand(isolate->factory()->the_hole_value()));
4920
__ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4922
__ ldr(r0, MemOperand(r2, 0));
4926
__ str(r1, MemOperand(r2, 0)); // Clear pending exception.
4928
// Check if the exception is a termination. If so, throw as uncatchable.
4929
__ CompareRoot(r0, Heap::kTerminationExceptionRootIndex);
4931
Label termination_exception;
4932
__ b(eq, &termination_exception);
4934
__ Throw(r0); // Expects thrown value in r0.
4936
__ bind(&termination_exception);
4937
__ ThrowUncatchable(TERMINATION, r0); // Expects thrown value in r0.
4940
// For failure and exception return null.
4941
__ mov(r0, Operand(masm->isolate()->factory()->null_value()));
4942
__ add(sp, sp, Operand(4 * kPointerSize));
4945
// Process the result from the native regexp code.
4948
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
4949
// Calculate number of capture registers (number_of_captures + 1) * 2.
4950
STATIC_ASSERT(kSmiTag == 0);
4951
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
4952
__ add(r1, r1, Operand(2)); // r1 was a smi.
4954
// r1: number of capture registers
4955
// r4: subject string
4956
// Store the capture count.
4957
__ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi.
4958
__ str(r2, FieldMemOperand(last_match_info_elements,
4959
RegExpImpl::kLastCaptureCountOffset));
4960
// Store last subject and last input.
4962
FieldMemOperand(last_match_info_elements,
4963
RegExpImpl::kLastSubjectOffset));
4964
__ mov(r2, subject);
4965
__ RecordWriteField(last_match_info_elements,
4966
RegExpImpl::kLastSubjectOffset,
4972
FieldMemOperand(last_match_info_elements,
4973
RegExpImpl::kLastInputOffset));
4974
__ RecordWriteField(last_match_info_elements,
4975
RegExpImpl::kLastInputOffset,
4981
// Get the static offsets vector filled by the native regexp code.
4982
ExternalReference address_of_static_offsets_vector =
4983
ExternalReference::address_of_static_offsets_vector(isolate);
4984
__ mov(r2, Operand(address_of_static_offsets_vector));
4986
// r1: number of capture registers
4987
// r2: offsets vector
4988
Label next_capture, done;
4989
// Capture register counter starts from number of capture registers and
4990
// counts down until wraping after zero.
4992
last_match_info_elements,
4993
Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
4994
__ bind(&next_capture);
4995
__ sub(r1, r1, Operand(1), SetCC);
4997
// Read the value from the static offsets vector buffer.
4998
__ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
4999
// Store the smi value in the last match info.
5000
__ mov(r3, Operand(r3, LSL, kSmiTagSize));
5001
__ str(r3, MemOperand(r0, kPointerSize, PostIndex));
5002
__ jmp(&next_capture);
5005
// Return last match info.
5006
__ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
5007
__ add(sp, sp, Operand(4 * kPointerSize));
5010
// External string. Short external strings have already been ruled out.
5012
__ bind(&external_string);
5013
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
5014
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
5015
if (FLAG_debug_code) {
5016
// Assert that we do not have a cons or slice (indirect strings) here.
5017
// Sequential strings have already been ruled out.
5018
__ tst(r0, Operand(kIsIndirectStringMask));
5019
__ Assert(eq, "external string expected, but not found");
5022
FieldMemOperand(subject, ExternalString::kResourceDataOffset));
5023
// Move the pointer so that offset-wise, it looks like a sequential string.
5024
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
5027
Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5028
__ jmp(&seq_string);
5030
// Do the runtime call to execute the regexp.
5032
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
5033
#endif // V8_INTERPRETED_REGEXP
5037
void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
5038
const int kMaxInlineLength = 100;
5041
Factory* factory = masm->isolate()->factory();
5043
__ ldr(r1, MemOperand(sp, kPointerSize * 2));
5044
STATIC_ASSERT(kSmiTag == 0);
5045
STATIC_ASSERT(kSmiTagSize == 1);
5046
__ JumpIfNotSmi(r1, &slowcase);
5047
__ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
5048
__ b(hi, &slowcase);
5049
// Smi-tagging is equivalent to multiplying by 2.
5050
// Allocate RegExpResult followed by FixedArray with size in ebx.
5051
// JSArray: [Map][empty properties][Elements][Length-smi][index][input]
5052
// Elements: [Map][Length][..elements..]
5053
// Size of JSArray with two in-object properties and the header of a
5056
(JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
5057
__ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
5058
__ add(r2, r5, Operand(objects_size));
5059
__ AllocateInNewSpace(
5060
r2, // In: Size, in words.
5061
r0, // Out: Start of allocation (tagged).
5062
r3, // Scratch register.
5063
r4, // Scratch register.
5065
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
5066
// r0: Start of allocated area, object-tagged.
5067
// r1: Number of elements in array, as smi.
5068
// r5: Number of elements, untagged.
5070
// Set JSArray map to global.regexp_result_map().
5071
// Set empty properties FixedArray.
5072
// Set elements to point to FixedArray allocated right after the JSArray.
5073
// Interleave operations for better latency.
5074
__ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
5075
__ add(r3, r0, Operand(JSRegExpResult::kSize));
5076
__ mov(r4, Operand(factory->empty_fixed_array()));
5077
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
5078
__ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
5079
__ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
5080
__ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
5081
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
5083
// Set input, index and length fields from arguments.
5084
__ ldr(r1, MemOperand(sp, kPointerSize * 0));
5085
__ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
5086
__ ldr(r1, MemOperand(sp, kPointerSize * 1));
5087
__ str(r1, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
5088
__ ldr(r1, MemOperand(sp, kPointerSize * 2));
5089
__ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
5091
// Fill out the elements FixedArray.
5092
// r0: JSArray, tagged.
5093
// r3: FixedArray, tagged.
5094
// r5: Number of elements in array, untagged.
5097
__ mov(r2, Operand(factory->fixed_array_map()));
5098
__ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
5099
// Set FixedArray length.
5100
__ mov(r6, Operand(r5, LSL, kSmiTagSize));
5101
__ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
5102
// Fill contents of fixed-array with the-hole.
5103
__ mov(r2, Operand(factory->the_hole_value()));
5104
__ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5105
// Fill fixed array elements with hole.
5106
// r0: JSArray, tagged.
5108
// r3: Start of elements in FixedArray.
5109
// r5: Number of elements to fill.
5111
__ tst(r5, Operand(r5));
5113
__ b(le, &done); // Jump if r1 is negative or zero.
5114
__ sub(r5, r5, Operand(1), SetCC);
5115
__ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
5119
__ add(sp, sp, Operand(3 * kPointerSize));
5123
__ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
5127
void CallFunctionStub::FinishCode(Handle<Code> code) {
5128
code->set_has_function_cache(false);
5132
void CallFunctionStub::Clear(Heap* heap, Address address) {
5137
Object* CallFunctionStub::GetCachedValue(Address address) {
5143
void CallFunctionStub::Generate(MacroAssembler* masm) {
5144
// r1 : the function to call
5145
Label slow, non_function;
5147
// The receiver might implicitly be the global object. This is
5148
// indicated by passing the hole as the receiver to the call
5150
if (ReceiverMightBeImplicit()) {
5152
// Get the receiver from the stack.
5153
// function, receiver [, arguments]
5154
__ ldr(r4, MemOperand(sp, argc_ * kPointerSize));
5155
// Call as function is indicated with the hole.
5156
__ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
5158
// Patch the receiver on the stack with the global receiver object.
5159
__ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
5160
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
5161
__ str(r2, MemOperand(sp, argc_ * kPointerSize));
5165
// Check that the function is really a JavaScript function.
5166
// r1: pushed function (to be verified)
5167
__ JumpIfSmi(r1, &non_function);
5168
// Get the map of the function object.
5169
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
5172
// Fast-case: Invoke the function now.
5173
// r1: pushed function
5174
ParameterCount actual(argc_);
5176
if (ReceiverMightBeImplicit()) {
5177
Label call_as_function;
5178
__ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
5179
__ b(eq, &call_as_function);
5180
__ InvokeFunction(r1,
5185
__ bind(&call_as_function);
5187
__ InvokeFunction(r1,
5193
// Slow-case: Non-function called.
5195
// Check for function proxy.
5196
__ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE));
5197
__ b(ne, &non_function);
5198
__ push(r1); // put proxy as additional argument
5199
__ mov(r0, Operand(argc_ + 1, RelocInfo::NONE));
5200
__ mov(r2, Operand(0, RelocInfo::NONE));
5201
__ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
5202
__ SetCallKind(r5, CALL_AS_METHOD);
5204
Handle<Code> adaptor =
5205
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
5206
__ Jump(adaptor, RelocInfo::CODE_TARGET);
5209
// CALL_NON_FUNCTION expects the non-function callee as receiver (instead
5210
// of the original receiver from the call site).
5211
__ bind(&non_function);
5212
__ str(r1, MemOperand(sp, argc_ * kPointerSize));
5213
__ mov(r0, Operand(argc_)); // Set up the number of arguments.
5214
__ mov(r2, Operand(0, RelocInfo::NONE));
5215
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
5216
__ SetCallKind(r5, CALL_AS_METHOD);
5217
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5218
RelocInfo::CODE_TARGET);
5222
// Unfortunately you have to run without snapshots to see most of these
5223
// names in the profile since most compare stubs end up in the snapshot.
5224
void CompareStub::PrintName(StringStream* stream) {
5225
ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
5226
(lhs_.is(r1) && rhs_.is(r0)));
5227
const char* cc_name;
5229
case lt: cc_name = "LT"; break;
5230
case gt: cc_name = "GT"; break;
5231
case le: cc_name = "LE"; break;
5232
case ge: cc_name = "GE"; break;
5233
case eq: cc_name = "EQ"; break;
5234
case ne: cc_name = "NE"; break;
5235
default: cc_name = "UnknownCondition"; break;
5237
bool is_equality = cc_ == eq || cc_ == ne;
5238
stream->Add("CompareStub_%s", cc_name);
5239
stream->Add(lhs_.is(r0) ? "_r0" : "_r1");
5240
stream->Add(rhs_.is(r0) ? "_r0" : "_r1");
5241
if (strict_ && is_equality) stream->Add("_STRICT");
5242
if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
5243
if (!include_number_compare_) stream->Add("_NO_NUMBER");
5244
if (!include_smi_compare_) stream->Add("_NO_SMI");
5248
int CompareStub::MinorKey() {
5249
// Encode the three parameters in a unique 16 bit value. To avoid duplicate
5250
// stubs the never NaN NaN condition is only taken into account if the
5251
// condition is equals.
5252
ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12));
5253
ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
5254
(lhs_.is(r1) && rhs_.is(r0)));
5255
return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
5256
| RegisterField::encode(lhs_.is(r0))
5257
| StrictField::encode(strict_)
5258
| NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
5259
| IncludeNumberCompareField::encode(include_number_compare_)
5260
| IncludeSmiCompareField::encode(include_smi_compare_);
5264
// StringCharCodeAtGenerator
5265
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
5268
Label got_char_code;
5269
Label sliced_string;
5271
// If the receiver is a smi trigger the non-string case.
5272
__ JumpIfSmi(object_, receiver_not_string_);
5274
// Fetch the instance type of the receiver into result register.
5275
__ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5276
__ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5277
// If the receiver is not a string trigger the non-string case.
5278
__ tst(result_, Operand(kIsNotStringMask));
5279
__ b(ne, receiver_not_string_);
5281
// If the index is non-smi trigger the non-smi case.
5282
__ JumpIfNotSmi(index_, &index_not_smi_);
5283
__ bind(&got_smi_index_);
5285
// Check for index out of range.
5286
__ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
5287
__ cmp(ip, Operand(index_));
5288
__ b(ls, index_out_of_range_);
5290
__ mov(index_, Operand(index_, ASR, kSmiTagSize));
5292
StringCharLoadGenerator::Generate(masm,
5298
__ mov(result_, Operand(result_, LSL, kSmiTagSize));
5303
void StringCharCodeAtGenerator::GenerateSlow(
5304
MacroAssembler* masm,
5305
const RuntimeCallHelper& call_helper) {
5306
__ Abort("Unexpected fallthrough to CharCodeAt slow case");
5308
// Index is not a smi.
5309
__ bind(&index_not_smi_);
5310
// If index is a heap number, try converting it to an integer.
5313
Heap::kHeapNumberMapRootIndex,
5316
call_helper.BeforeCall(masm);
5318
__ push(index_); // Consumed by runtime conversion function.
5319
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
5320
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
5322
ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
5323
// NumberToSmi discards numbers that are not exact integers.
5324
__ CallRuntime(Runtime::kNumberToSmi, 1);
5326
// Save the conversion result before the pop instructions below
5327
// have a chance to overwrite it.
5328
__ Move(index_, r0);
5330
// Reload the instance type.
5331
__ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5332
__ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5333
call_helper.AfterCall(masm);
5334
// If index is still not a smi, it must be out of range.
5335
__ JumpIfNotSmi(index_, index_out_of_range_);
5336
// Otherwise, return to the fast path.
5337
__ jmp(&got_smi_index_);
5339
// Call runtime. We get here when the receiver is a string and the
5340
// index is a number, but the code of getting the actual character
5341
// is too complex (e.g., when the string needs to be flattened).
5342
__ bind(&call_runtime_);
5343
call_helper.BeforeCall(masm);
5344
__ mov(index_, Operand(index_, LSL, kSmiTagSize));
5345
__ Push(object_, index_);
5346
__ CallRuntime(Runtime::kStringCharCodeAt, 2);
5347
__ Move(result_, r0);
5348
call_helper.AfterCall(masm);
5351
__ Abort("Unexpected fallthrough from CharCodeAt slow case");
5355
// -------------------------------------------------------------------------
5356
// StringCharFromCodeGenerator
5358
void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
5359
// Fast case of Heap::LookupSingleCharacterStringFromCode.
5360
STATIC_ASSERT(kSmiTag == 0);
5361
STATIC_ASSERT(kSmiShiftSize == 0);
5362
ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
5364
Operand(kSmiTagMask |
5365
((~String::kMaxAsciiCharCode) << kSmiTagSize)));
5366
__ b(ne, &slow_case_);
5368
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
5369
// At this point code register contains smi tagged ASCII char code.
5370
STATIC_ASSERT(kSmiTag == 0);
5371
__ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
5372
__ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
5373
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5374
__ cmp(result_, Operand(ip));
5375
__ b(eq, &slow_case_);
5380
void StringCharFromCodeGenerator::GenerateSlow(
5381
MacroAssembler* masm,
5382
const RuntimeCallHelper& call_helper) {
5383
__ Abort("Unexpected fallthrough to CharFromCode slow case");
5385
__ bind(&slow_case_);
5386
call_helper.BeforeCall(masm);
5388
__ CallRuntime(Runtime::kCharFromCode, 1);
5389
__ Move(result_, r0);
5390
call_helper.AfterCall(masm);
5393
__ Abort("Unexpected fallthrough from CharFromCode slow case");
5397
// -------------------------------------------------------------------------
5398
// StringCharAtGenerator
5400
void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
5401
char_code_at_generator_.GenerateFast(masm);
5402
char_from_code_generator_.GenerateFast(masm);
5406
void StringCharAtGenerator::GenerateSlow(
5407
MacroAssembler* masm,
5408
const RuntimeCallHelper& call_helper) {
5409
char_code_at_generator_.GenerateSlow(masm, call_helper);
5410
char_from_code_generator_.GenerateSlow(masm, call_helper);
5414
void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
5422
// This loop just copies one character at a time, as it is only used for very
5425
__ add(count, count, Operand(count), SetCC);
5427
__ cmp(count, Operand(0, RelocInfo::NONE));
5432
__ ldrb(scratch, MemOperand(src, 1, PostIndex));
5433
// Perform sub between load and dependent store to get the load time to
5435
__ sub(count, count, Operand(1), SetCC);
5436
__ strb(scratch, MemOperand(dest, 1, PostIndex));
5444
enum CopyCharactersFlags {
5446
DEST_ALWAYS_ALIGNED = 2
5450
void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
5460
bool ascii = (flags & COPY_ASCII) != 0;
5461
bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
5463
if (dest_always_aligned && FLAG_debug_code) {
5464
// Check that destination is actually word aligned if the flag says
5466
__ tst(dest, Operand(kPointerAlignmentMask));
5467
__ Check(eq, "Destination of copy not aligned.");
5470
const int kReadAlignment = 4;
5471
const int kReadAlignmentMask = kReadAlignment - 1;
5472
// Ensure that reading an entire aligned word containing the last character
5473
// of a string will not read outside the allocated area (because we pad up
5474
// to kObjectAlignment).
5475
STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
5476
// Assumes word reads and writes are little endian.
5477
// Nothing to do for zero characters.
5480
__ add(count, count, Operand(count), SetCC);
5482
__ cmp(count, Operand(0, RelocInfo::NONE));
5486
// Assume that you cannot read (or write) unaligned.
5488
// Must copy at least eight bytes, otherwise just do it one byte at a time.
5489
__ cmp(count, Operand(8));
5490
__ add(count, dest, Operand(count));
5491
Register limit = count; // Read until src equals this.
5492
__ b(lt, &byte_loop);
5494
if (!dest_always_aligned) {
5495
// Align dest by byte copying. Copies between zero and three bytes.
5496
__ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
5498
__ b(eq, &dest_aligned);
5499
__ cmp(scratch4, Operand(2));
5500
__ ldrb(scratch1, MemOperand(src, 1, PostIndex));
5501
__ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
5502
__ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
5503
__ strb(scratch1, MemOperand(dest, 1, PostIndex));
5504
__ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
5505
__ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
5506
__ bind(&dest_aligned);
5511
__ sub(scratch4, dest, Operand(src));
5512
__ and_(scratch4, scratch4, Operand(0x03), SetCC);
5513
__ b(eq, &simple_loop);
5514
// Shift register is number of bits in a source word that
5515
// must be combined with bits in the next source word in order
5516
// to create a destination word.
5518
// Complex loop for src/dst that are not aligned the same way.
5521
__ mov(scratch4, Operand(scratch4, LSL, 3));
5522
Register left_shift = scratch4;
5523
__ and_(src, src, Operand(~3)); // Round down to load previous word.
5524
__ ldr(scratch1, MemOperand(src, 4, PostIndex));
5525
// Store the "shift" most significant bits of scratch in the least
5526
// signficant bits (i.e., shift down by (32-shift)).
5527
__ rsb(scratch2, left_shift, Operand(32));
5528
Register right_shift = scratch2;
5529
__ mov(scratch1, Operand(scratch1, LSR, right_shift));
5532
__ ldr(scratch3, MemOperand(src, 4, PostIndex));
5533
__ sub(scratch5, limit, Operand(dest));
5534
__ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
5535
__ str(scratch1, MemOperand(dest, 4, PostIndex));
5536
__ mov(scratch1, Operand(scratch3, LSR, right_shift));
5537
// Loop if four or more bytes left to copy.
5538
// Compare to eight, because we did the subtract before increasing dst.
5539
__ sub(scratch5, scratch5, Operand(8), SetCC);
5542
// There is now between zero and three bytes left to copy (negative that
5543
// number is in scratch5), and between one and three bytes already read into
5544
// scratch1 (eight times that number in scratch4). We may have read past
5545
// the end of the string, but because objects are aligned, we have not read
5546
// past the end of the object.
5547
// Find the minimum of remaining characters to move and preloaded characters
5548
// and write those as bytes.
5549
__ add(scratch5, scratch5, Operand(4), SetCC);
5551
__ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
5552
// Move minimum of bytes read and bytes left to copy to scratch4.
5553
__ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
5554
// Between one and three (value in scratch5) characters already read into
5555
// scratch ready to write.
5556
__ cmp(scratch5, Operand(2));
5557
__ strb(scratch1, MemOperand(dest, 1, PostIndex));
5558
__ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
5559
__ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
5560
__ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
5561
__ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
5562
// Copy any remaining bytes.
5566
// Copy words from src to dst, until less than four bytes left.
5567
// Both src and dest are word aligned.
5568
__ bind(&simple_loop);
5572
__ ldr(scratch1, MemOperand(src, 4, PostIndex));
5573
__ sub(scratch3, limit, Operand(dest));
5574
__ str(scratch1, MemOperand(dest, 4, PostIndex));
5575
// Compare to 8, not 4, because we do the substraction before increasing
5577
__ cmp(scratch3, Operand(8));
5581
// Copy bytes from src to dst until dst hits limit.
5582
__ bind(&byte_loop);
5583
__ cmp(dest, Operand(limit));
5584
__ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
5586
__ strb(scratch1, MemOperand(dest, 1, PostIndex));
5593
void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5602
// Register scratch3 is the general scratch register in this function.
5603
Register scratch = scratch3;
5605
// Make sure that both characters are not digits as such strings has a
5606
// different hash algorithm. Don't try to look for these in the symbol table.
5607
Label not_array_index;
5608
__ sub(scratch, c1, Operand(static_cast<int>('0')));
5609
__ cmp(scratch, Operand(static_cast<int>('9' - '0')));
5610
__ b(hi, ¬_array_index);
5611
__ sub(scratch, c2, Operand(static_cast<int>('0')));
5612
__ cmp(scratch, Operand(static_cast<int>('9' - '0')));
5614
// If check failed combine both characters into single halfword.
5615
// This is required by the contract of the method: code at the
5616
// not_found branch expects this combination in c1 register
5617
__ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls);
5618
__ b(ls, not_found);
5620
__ bind(¬_array_index);
5621
// Calculate the two character string hash.
5622
Register hash = scratch1;
5623
StringHelper::GenerateHashInit(masm, hash, c1);
5624
StringHelper::GenerateHashAddCharacter(masm, hash, c2);
5625
StringHelper::GenerateHashGetHash(masm, hash);
5627
// Collect the two characters in a register.
5628
Register chars = c1;
5629
__ orr(chars, chars, Operand(c2, LSL, kBitsPerByte));
5631
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5632
// hash: hash of two character string.
5634
// Load symbol table
5635
// Load address of first element of the symbol table.
5636
Register symbol_table = c2;
5637
__ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
5639
Register undefined = scratch4;
5640
__ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
5642
// Calculate capacity mask from the symbol table capacity.
5643
Register mask = scratch2;
5644
__ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
5645
__ mov(mask, Operand(mask, ASR, 1));
5646
__ sub(mask, mask, Operand(1));
5648
// Calculate untagged address of the first element of the symbol table.
5649
Register first_symbol_table_element = symbol_table;
5650
__ add(first_symbol_table_element, symbol_table,
5651
Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
5654
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5655
// hash: hash of two character string
5656
// mask: capacity mask
5657
// first_symbol_table_element: address of the first element of
5659
// undefined: the undefined object
5662
// Perform a number of probes in the symbol table.
5663
static const int kProbes = 4;
5664
Label found_in_symbol_table;
5665
Label next_probe[kProbes];
5666
Register candidate = scratch5; // Scratch register contains candidate.
5667
for (int i = 0; i < kProbes; i++) {
5668
// Calculate entry in symbol table.
5670
__ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
5672
__ mov(candidate, hash);
5675
__ and_(candidate, candidate, Operand(mask));
5677
// Load the entry from the symble table.
5678
STATIC_ASSERT(SymbolTable::kEntrySize == 1);
5680
MemOperand(first_symbol_table_element,
5685
// If entry is undefined no string with this hash can be found.
5687
__ CompareObjectType(candidate, scratch, scratch, ODDBALL_TYPE);
5688
__ b(ne, &is_string);
5690
__ cmp(undefined, candidate);
5691
__ b(eq, not_found);
5692
// Must be the hole (deleted entry).
5693
if (FLAG_debug_code) {
5694
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
5695
__ cmp(ip, candidate);
5696
__ Assert(eq, "oddball in symbol table is not undefined or the hole");
5698
__ jmp(&next_probe[i]);
5700
__ bind(&is_string);
5702
// Check that the candidate is a non-external ASCII string. The instance
5703
// type is still in the scratch register from the CompareObjectType
5705
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
5707
// If length is not 2 the string is not a candidate.
5708
__ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
5709
__ cmp(scratch, Operand(Smi::FromInt(2)));
5710
__ b(ne, &next_probe[i]);
5712
// Check if the two characters match.
5713
// Assumes that word load is little endian.
5714
__ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
5715
__ cmp(chars, scratch);
5716
__ b(eq, &found_in_symbol_table);
5717
__ bind(&next_probe[i]);
5720
// No matching 2 character string found by probing.
5723
// Scratch register contains result when we fall through to here.
5724
Register result = candidate;
5725
__ bind(&found_in_symbol_table);
5726
__ Move(r0, result);
5730
void StringHelper::GenerateHashInit(MacroAssembler* masm,
5732
Register character) {
5733
// hash = character + (character << 10);
5734
__ LoadRoot(hash, Heap::kHashSeedRootIndex);
5735
// Untag smi seed and add the character.
5736
__ add(hash, character, Operand(hash, LSR, kSmiTagSize));
5737
// hash += hash << 10;
5738
__ add(hash, hash, Operand(hash, LSL, 10));
5739
// hash ^= hash >> 6;
5740
__ eor(hash, hash, Operand(hash, LSR, 6));
5744
void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
5746
Register character) {
5747
// hash += character;
5748
__ add(hash, hash, Operand(character));
5749
// hash += hash << 10;
5750
__ add(hash, hash, Operand(hash, LSL, 10));
5751
// hash ^= hash >> 6;
5752
__ eor(hash, hash, Operand(hash, LSR, 6));
5756
void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
5758
// hash += hash << 3;
5759
__ add(hash, hash, Operand(hash, LSL, 3));
5760
// hash ^= hash >> 11;
5761
__ eor(hash, hash, Operand(hash, LSR, 11));
5762
// hash += hash << 15;
5763
__ add(hash, hash, Operand(hash, LSL, 15));
5765
__ and_(hash, hash, Operand(String::kHashBitMask), SetCC);
5767
// if (hash == 0) hash = 27;
5768
__ mov(hash, Operand(StringHasher::kZeroHash), LeaveCC, eq);
5772
void SubStringStub::Generate(MacroAssembler* masm) {
5775
// Stack frame on entry.
5776
// lr: return address
5781
// This stub is called from the native-call %_SubString(...), so
5782
// nothing can be assumed about the arguments. It is tested that:
5783
// "string" is a sequential string,
5784
// both "from" and "to" are smis, and
5785
// 0 <= from <= to <= string.length.
5786
// If any of these assumptions fail, we call the runtime system.
5788
static const int kToOffset = 0 * kPointerSize;
5789
static const int kFromOffset = 1 * kPointerSize;
5790
static const int kStringOffset = 2 * kPointerSize;
5792
__ Ldrd(r2, r3, MemOperand(sp, kToOffset));
5793
STATIC_ASSERT(kFromOffset == kToOffset + 4);
5794
STATIC_ASSERT(kSmiTag == 0);
5795
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
5797
// I.e., arithmetic shift right by one un-smi-tags.
5798
__ mov(r2, Operand(r2, ASR, 1), SetCC);
5799
__ mov(r3, Operand(r3, ASR, 1), SetCC, cc);
5800
// If either to or from had the smi tag bit set, then carry is set now.
5801
__ b(cs, &runtime); // Either "from" or "to" is not a smi.
5802
__ b(mi, &runtime); // From is negative.
5804
// Both r2 and r3 are untagged integers.
5805
__ sub(r2, r2, Operand(r3), SetCC);
5806
__ b(mi, &runtime); // Fail if from > to.
5808
// Make sure first argument is a string.
5809
__ ldr(r0, MemOperand(sp, kStringOffset));
5810
STATIC_ASSERT(kSmiTag == 0);
5811
__ JumpIfSmi(r0, &runtime);
5812
Condition is_string = masm->IsObjectStringType(r0, r1);
5813
__ b(NegateCondition(is_string), &runtime);
5815
// Short-cut for the case of trivial substring.
5817
// r0: original string
5818
// r2: result string length
5819
__ ldr(r4, FieldMemOperand(r0, String::kLengthOffset));
5820
__ cmp(r2, Operand(r4, ASR, 1));
5821
__ b(eq, &return_r0);
5823
Label result_longer_than_two;
5824
// Check for special case of two character ASCII string, in which case
5825
// we do a lookup in the symbol table first.
5826
__ cmp(r2, Operand(2));
5827
__ b(gt, &result_longer_than_two);
5830
__ JumpIfInstanceTypeIsNotSequentialAscii(r1, r1, &runtime);
5832
// Get the two characters forming the sub string.
5833
__ add(r0, r0, Operand(r3));
5834
__ ldrb(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
5835
__ ldrb(r4, FieldMemOperand(r0, SeqAsciiString::kHeaderSize + 1));
5837
// Try to lookup two character string in symbol table.
5838
Label make_two_character_string;
5839
StringHelper::GenerateTwoCharacterSymbolTableProbe(
5840
masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
5843
// r2: result string length.
5844
// r3: two characters combined into halfword in little endian byte order.
5845
__ bind(&make_two_character_string);
5846
__ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime);
5847
__ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
5850
__ bind(&result_longer_than_two);
5851
// Deal with different string types: update the index if necessary
5852
// and put the underlying string into r5.
5853
// r0: original string
5854
// r1: instance type
5856
// r3: from index (untagged)
5857
Label underlying_unpacked, sliced_string, seq_or_external_string;
5858
// If the string is not indirect, it can only be sequential or external.
5859
STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
5860
STATIC_ASSERT(kIsIndirectStringMask != 0);
5861
__ tst(r1, Operand(kIsIndirectStringMask));
5862
__ b(eq, &seq_or_external_string);
5864
__ tst(r1, Operand(kSlicedNotConsMask));
5865
__ b(ne, &sliced_string);
5866
// Cons string. Check whether it is flat, then fetch first part.
5867
__ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset));
5868
__ CompareRoot(r5, Heap::kEmptyStringRootIndex);
5870
__ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset));
5871
// Update instance type.
5872
__ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
5873
__ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
5874
__ jmp(&underlying_unpacked);
5876
__ bind(&sliced_string);
5877
// Sliced string. Fetch parent and correct start index by offset.
5878
__ ldr(r5, FieldMemOperand(r0, SlicedString::kOffsetOffset));
5879
__ add(r3, r3, Operand(r5, ASR, 1));
5880
__ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
5881
// Update instance type.
5882
__ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
5883
__ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
5884
__ jmp(&underlying_unpacked);
5886
__ bind(&seq_or_external_string);
5887
// Sequential or external string. Just move string to the expected register.
5890
__ bind(&underlying_unpacked);
5892
if (FLAG_string_slices) {
5894
// r5: underlying subject string
5895
// r1: instance type of underlying subject string
5897
// r3: adjusted start index (untagged)
5898
__ cmp(r2, Operand(SlicedString::kMinLength));
5899
// Short slice. Copy instead of slicing.
5900
__ b(lt, ©_routine);
5901
// Allocate new sliced string. At this point we do not reload the instance
5902
// type including the string encoding because we simply rely on the info
5903
// provided by the original string. It does not matter if the original
5904
// string's encoding is wrong because we always have to recheck encoding of
5905
// the newly created string's parent anyways due to externalized strings.
5906
Label two_byte_slice, set_slice_header;
5907
STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
5908
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
5909
__ tst(r1, Operand(kStringEncodingMask));
5910
__ b(eq, &two_byte_slice);
5911
__ AllocateAsciiSlicedString(r0, r2, r6, r7, &runtime);
5912
__ jmp(&set_slice_header);
5913
__ bind(&two_byte_slice);
5914
__ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime);
5915
__ bind(&set_slice_header);
5916
__ mov(r3, Operand(r3, LSL, 1));
5917
__ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset));
5918
__ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
5921
__ bind(©_routine);
5924
// r5: underlying subject string
5925
// r1: instance type of underlying subject string
5927
// r3: adjusted start index (untagged)
5928
Label two_byte_sequential, sequential_string, allocate_result;
5929
STATIC_ASSERT(kExternalStringTag != 0);
5930
STATIC_ASSERT(kSeqStringTag == 0);
5931
__ tst(r1, Operand(kExternalStringTag));
5932
__ b(eq, &sequential_string);
5934
// Handle external string.
5935
// Rule out short external strings.
5936
STATIC_CHECK(kShortExternalStringTag != 0);
5937
__ tst(r1, Operand(kShortExternalStringTag));
5939
__ ldr(r5, FieldMemOperand(r5, ExternalString::kResourceDataOffset));
5940
// r5 already points to the first character of underlying string.
5941
__ jmp(&allocate_result);
5943
__ bind(&sequential_string);
5944
// Locate first character of underlying subject string.
5945
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
5946
__ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5948
__ bind(&allocate_result);
5949
// Sequential acii string. Allocate the result.
5950
STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
5951
__ tst(r1, Operand(kStringEncodingMask));
5952
__ b(eq, &two_byte_sequential);
5954
// Allocate and copy the resulting ASCII string.
5955
__ AllocateAsciiString(r0, r2, r4, r6, r7, &runtime);
5957
// Locate first character of substring to copy.
5959
// Locate first character of result.
5960
__ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5962
// r0: result string
5963
// r1: first character of result string
5964
// r2: result string length
5965
// r5: first character of substring to copy
5966
STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
5967
StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
5968
COPY_ASCII | DEST_ALWAYS_ALIGNED);
5971
// Allocate and copy the resulting two-byte string.
5972
__ bind(&two_byte_sequential);
5973
__ AllocateTwoByteString(r0, r2, r4, r6, r7, &runtime);
5975
// Locate first character of substring to copy.
5976
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
5977
__ add(r5, r5, Operand(r3, LSL, 1));
5978
// Locate first character of result.
5979
__ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5981
// r0: result string.
5982
// r1: first character of result.
5983
// r2: result length.
5984
// r5: first character of substring to copy.
5985
STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
5986
StringHelper::GenerateCopyCharactersLong(
5987
masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED);
5989
__ bind(&return_r0);
5990
Counters* counters = masm->isolate()->counters();
5991
__ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
5992
__ add(sp, sp, Operand(3 * kPointerSize));
5995
// Just jump to runtime to create the sub string.
5997
__ TailCallRuntime(Runtime::kSubString, 3, 1);
6001
void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
6006
Register scratch3) {
6007
Register length = scratch1;
6010
Label strings_not_equal, check_zero_length;
6011
__ ldr(length, FieldMemOperand(left, String::kLengthOffset));
6012
__ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
6013
__ cmp(length, scratch2);
6014
__ b(eq, &check_zero_length);
6015
__ bind(&strings_not_equal);
6016
__ mov(r0, Operand(Smi::FromInt(NOT_EQUAL)));
6019
// Check if the length is zero.
6020
Label compare_chars;
6021
__ bind(&check_zero_length);
6022
STATIC_ASSERT(kSmiTag == 0);
6023
__ tst(length, Operand(length));
6024
__ b(ne, &compare_chars);
6025
__ mov(r0, Operand(Smi::FromInt(EQUAL)));
6028
// Compare characters.
6029
__ bind(&compare_chars);
6030
GenerateAsciiCharsCompareLoop(masm,
6031
left, right, length, scratch2, scratch3,
6032
&strings_not_equal);
6034
// Characters are equal.
6035
__ mov(r0, Operand(Smi::FromInt(EQUAL)));
6040
void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
6046
Register scratch4) {
6047
Label result_not_equal, compare_lengths;
6048
// Find minimum length and length difference.
6049
__ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
6050
__ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
6051
__ sub(scratch3, scratch1, Operand(scratch2), SetCC);
6052
Register length_delta = scratch3;
6053
__ mov(scratch1, scratch2, LeaveCC, gt);
6054
Register min_length = scratch1;
6055
STATIC_ASSERT(kSmiTag == 0);
6056
__ tst(min_length, Operand(min_length));
6057
__ b(eq, &compare_lengths);
6060
GenerateAsciiCharsCompareLoop(masm,
6061
left, right, min_length, scratch2, scratch4,
6064
// Compare lengths - strings up to min-length are equal.
6065
__ bind(&compare_lengths);
6066
ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
6067
// Use length_delta as result if it's zero.
6068
__ mov(r0, Operand(length_delta), SetCC);
6069
__ bind(&result_not_equal);
6070
// Conditionally update the result based either on length_delta or
6071
// the last comparion performed in the loop above.
6072
__ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
6073
__ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
6078
void StringCompareStub::GenerateAsciiCharsCompareLoop(
6079
MacroAssembler* masm,
6085
Label* chars_not_equal) {
6086
// Change index to run from -length to -1 by adding length to string
6087
// start. This means that loop ends when index reaches zero, which
6088
// doesn't need an additional compare.
6089
__ SmiUntag(length);
6090
__ add(scratch1, length,
6091
Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6092
__ add(left, left, Operand(scratch1));
6093
__ add(right, right, Operand(scratch1));
6094
__ rsb(length, length, Operand::Zero());
6095
Register index = length; // index = -length;
6100
__ ldrb(scratch1, MemOperand(left, index));
6101
__ ldrb(scratch2, MemOperand(right, index));
6102
__ cmp(scratch1, scratch2);
6103
__ b(ne, chars_not_equal);
6104
__ add(index, index, Operand(1), SetCC);
6109
void StringCompareStub::Generate(MacroAssembler* masm) {
6112
Counters* counters = masm->isolate()->counters();
6114
// Stack frame on entry.
6115
// sp[0]: right string
6116
// sp[4]: left string
6117
__ Ldrd(r0 , r1, MemOperand(sp)); // Load right in r0, left in r1.
6121
__ b(ne, ¬_same);
6122
STATIC_ASSERT(EQUAL == 0);
6123
STATIC_ASSERT(kSmiTag == 0);
6124
__ mov(r0, Operand(Smi::FromInt(EQUAL)));
6125
__ IncrementCounter(counters->string_compare_native(), 1, r1, r2);
6126
__ add(sp, sp, Operand(2 * kPointerSize));
6131
// Check that both objects are sequential ASCII strings.
6132
__ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime);
6134
// Compare flat ASCII strings natively. Remove arguments from stack first.
6135
__ IncrementCounter(counters->string_compare_native(), 1, r2, r3);
6136
__ add(sp, sp, Operand(2 * kPointerSize));
6137
GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5);
6139
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
6140
// tagged as a small integer.
6142
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6146
void StringAddStub::Generate(MacroAssembler* masm) {
6147
Label call_runtime, call_builtin;
6148
Builtins::JavaScript builtin_id = Builtins::ADD;
6150
Counters* counters = masm->isolate()->counters();
6153
// sp[0]: second argument (right).
6154
// sp[4]: first argument (left).
6156
// Load the two arguments.
6157
__ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument.
6158
__ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
6160
// Make sure that both arguments are strings if not known in advance.
6161
if (flags_ == NO_STRING_ADD_FLAGS) {
6162
__ JumpIfEitherSmi(r0, r1, &call_runtime);
6163
// Load instance types.
6164
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
6165
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
6166
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
6167
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
6168
STATIC_ASSERT(kStringTag == 0);
6169
// If either is not a string, go to runtime.
6170
__ tst(r4, Operand(kIsNotStringMask));
6171
__ tst(r5, Operand(kIsNotStringMask), eq);
6172
__ b(ne, &call_runtime);
6174
// Here at least one of the arguments is definitely a string.
6175
// We convert the one that is not known to be a string.
6176
if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
6177
ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
6178
GenerateConvertArgument(
6179
masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin);
6180
builtin_id = Builtins::STRING_ADD_RIGHT;
6181
} else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
6182
ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
6183
GenerateConvertArgument(
6184
masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin);
6185
builtin_id = Builtins::STRING_ADD_LEFT;
6189
// Both arguments are strings.
6191
// r1: second string
6192
// r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6193
// r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6195
Label strings_not_empty;
6196
// Check if either of the strings are empty. In that case return the other.
6197
__ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
6198
__ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
6199
STATIC_ASSERT(kSmiTag == 0);
6200
__ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty.
6201
__ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second.
6202
STATIC_ASSERT(kSmiTag == 0);
6203
// Else test if second string is empty.
6204
__ cmp(r3, Operand(Smi::FromInt(0)), ne);
6205
__ b(ne, &strings_not_empty); // If either string was empty, return r0.
6207
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
6208
__ add(sp, sp, Operand(2 * kPointerSize));
6211
__ bind(&strings_not_empty);
6214
__ mov(r2, Operand(r2, ASR, kSmiTagSize));
6215
__ mov(r3, Operand(r3, ASR, kSmiTagSize));
6216
// Both strings are non-empty.
6218
// r1: second string
6219
// r2: length of first string
6220
// r3: length of second string
6221
// r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6222
// r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6223
// Look at the length of the result of adding the two strings.
6224
Label string_add_flat_result, longer_than_two;
6225
// Adding two lengths can't overflow.
6226
STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
6227
__ add(r6, r2, Operand(r3));
6228
// Use the symbol table when adding two one character strings, as it
6229
// helps later optimizations to return a symbol here.
6230
__ cmp(r6, Operand(2));
6231
__ b(ne, &longer_than_two);
6233
// Check that both strings are non-external ASCII strings.
6234
if (flags_ != NO_STRING_ADD_FLAGS) {
6235
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
6236
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
6237
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
6238
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
6240
__ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
6243
// Get the two characters forming the sub string.
6244
__ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
6245
__ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
6247
// Try to lookup two character string in symbol table. If it is not found
6248
// just allocate a new one.
6249
Label make_two_character_string;
6250
StringHelper::GenerateTwoCharacterSymbolTableProbe(
6251
masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
6252
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
6253
__ add(sp, sp, Operand(2 * kPointerSize));
6256
__ bind(&make_two_character_string);
6257
// Resulting string has length 2 and first chars of two strings
6258
// are combined into single halfword in r2 register.
6259
// So we can fill resulting string without two loops by a single
6260
// halfword store instruction (which assumes that processor is
6261
// in a little endian mode)
6262
__ mov(r6, Operand(2));
6263
__ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
6264
__ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
6265
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
6266
__ add(sp, sp, Operand(2 * kPointerSize));
6269
__ bind(&longer_than_two);
6270
// Check if resulting string will be flat.
6271
__ cmp(r6, Operand(ConsString::kMinLength));
6272
__ b(lt, &string_add_flat_result);
6273
// Handle exceptionally long strings in the runtime system.
6274
STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
6275
ASSERT(IsPowerOf2(String::kMaxLength + 1));
6276
// kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
6277
__ cmp(r6, Operand(String::kMaxLength + 1));
6278
__ b(hs, &call_runtime);
6280
// If result is not supposed to be flat, allocate a cons string object.
6281
// If both strings are ASCII the result is an ASCII cons string.
6282
if (flags_ != NO_STRING_ADD_FLAGS) {
6283
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
6284
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
6285
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
6286
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
6288
Label non_ascii, allocated, ascii_data;
6289
STATIC_ASSERT(kTwoByteStringTag == 0);
6290
__ tst(r4, Operand(kStringEncodingMask));
6291
__ tst(r5, Operand(kStringEncodingMask), ne);
6292
__ b(eq, &non_ascii);
6294
// Allocate an ASCII cons string.
6295
__ bind(&ascii_data);
6296
__ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime);
6297
__ bind(&allocated);
6298
// Fill the fields of the cons string.
6299
__ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
6300
__ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
6301
__ mov(r0, Operand(r7));
6302
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
6303
__ add(sp, sp, Operand(2 * kPointerSize));
6306
__ bind(&non_ascii);
6307
// At least one of the strings is two-byte. Check whether it happens
6308
// to contain only ASCII characters.
6309
// r4: first instance type.
6310
// r5: second instance type.
6311
__ tst(r4, Operand(kAsciiDataHintMask));
6312
__ tst(r5, Operand(kAsciiDataHintMask), ne);
6313
__ b(ne, &ascii_data);
6314
__ eor(r4, r4, Operand(r5));
6315
STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
6316
__ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
6317
__ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
6318
__ b(eq, &ascii_data);
6320
// Allocate a two byte cons string.
6321
__ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime);
6324
// We cannot encounter sliced strings or cons strings here since:
6325
STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
6326
// Handle creating a flat result from either external or sequential strings.
6327
// Locate the first characters' locations.
6329
// r1: second string
6330
// r2: length of first string
6331
// r3: length of second string
6332
// r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6333
// r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6334
// r6: sum of lengths.
6335
Label first_prepared, second_prepared;
6336
__ bind(&string_add_flat_result);
6337
if (flags_ != NO_STRING_ADD_FLAGS) {
6338
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
6339
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
6340
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
6341
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
6344
// Check whether both strings have same encoding
6345
__ eor(r7, r4, Operand(r5));
6346
__ tst(r7, Operand(kStringEncodingMask));
6347
__ b(ne, &call_runtime);
6349
STATIC_ASSERT(kSeqStringTag == 0);
6350
__ tst(r4, Operand(kStringRepresentationMask));
6351
STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
6354
Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
6357
__ b(eq, &first_prepared);
6358
// External string: rule out short external string and load string resource.
6359
STATIC_ASSERT(kShortExternalStringTag != 0);
6360
__ tst(r4, Operand(kShortExternalStringMask));
6361
__ b(ne, &call_runtime);
6362
__ ldr(r7, FieldMemOperand(r0, ExternalString::kResourceDataOffset));
6363
__ bind(&first_prepared);
6365
STATIC_ASSERT(kSeqStringTag == 0);
6366
__ tst(r5, Operand(kStringRepresentationMask));
6367
STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
6370
Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
6373
__ b(eq, &second_prepared);
6374
// External string: rule out short external string and load string resource.
6375
STATIC_ASSERT(kShortExternalStringTag != 0);
6376
__ tst(r5, Operand(kShortExternalStringMask));
6377
__ b(ne, &call_runtime);
6378
__ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset));
6379
__ bind(&second_prepared);
6381
Label non_ascii_string_add_flat_result;
6382
// r7: first character of first string
6383
// r1: first character of second string
6384
// r2: length of first string.
6385
// r3: length of second string.
6386
// r6: sum of lengths.
6387
// Both strings have the same encoding.
6388
STATIC_ASSERT(kTwoByteStringTag == 0);
6389
__ tst(r5, Operand(kStringEncodingMask));
6390
__ b(eq, &non_ascii_string_add_flat_result);
6392
__ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
6393
__ add(r6, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6394
// r0: result string.
6395
// r7: first character of first string.
6396
// r1: first character of second string.
6397
// r2: length of first string.
6398
// r3: length of second string.
6399
// r6: first character of result.
6400
StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, true);
6401
// r6: next character of result.
6402
StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
6403
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
6404
__ add(sp, sp, Operand(2 * kPointerSize));
6407
__ bind(&non_ascii_string_add_flat_result);
6408
__ AllocateTwoByteString(r0, r6, r4, r5, r9, &call_runtime);
6409
__ add(r6, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6410
// r0: result string.
6411
// r7: first character of first string.
6412
// r1: first character of second string.
6413
// r2: length of first string.
6414
// r3: length of second string.
6415
// r6: first character of result.
6416
StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, false);
6417
// r6: next character of result.
6418
StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
6419
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
6420
__ add(sp, sp, Operand(2 * kPointerSize));
6423
// Just jump to runtime to add the two strings.
6424
__ bind(&call_runtime);
6425
__ TailCallRuntime(Runtime::kStringAdd, 2, 1);
6427
if (call_builtin.is_linked()) {
6428
__ bind(&call_builtin);
6429
__ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
6434
void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
6442
// First check if the argument is already a string.
6443
Label not_string, done;
6444
__ JumpIfSmi(arg, ¬_string);
6445
__ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE);
6448
// Check the number to string cache.
6450
__ bind(¬_string);
6451
// Puts the cached result into scratch1.
6452
NumberToStringStub::GenerateLookupNumberStringCache(masm,
6460
__ mov(arg, scratch1);
6461
__ str(arg, MemOperand(sp, stack_offset));
6464
// Check if the argument is a safe string wrapper.
6465
__ bind(¬_cached);
6466
__ JumpIfSmi(arg, slow);
6467
__ CompareObjectType(
6468
arg, scratch1, scratch2, JS_VALUE_TYPE); // map -> scratch1.
6470
__ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
6472
scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
6474
Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
6476
__ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset));
6477
__ str(arg, MemOperand(sp, stack_offset));
6483
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
6484
ASSERT(state_ == CompareIC::SMIS);
6487
__ JumpIfNotSmi(r2, &miss);
6489
if (GetCondition() == eq) {
6490
// For equality we do not care about the sign of the result.
6491
__ sub(r0, r0, r1, SetCC);
6493
// Untag before subtracting to avoid handling overflow.
6495
__ sub(r0, r1, SmiUntagOperand(r0));
6504
void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6505
ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6510
__ and_(r2, r1, Operand(r0));
6511
__ JumpIfSmi(r2, &generic_stub);
6513
__ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
6515
__ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
6518
// Inlining the double comparison and falling back to the general compare
6519
// stub if NaN is involved or VFP3 is unsupported.
6520
if (CpuFeatures::IsSupported(VFP3)) {
6521
CpuFeatures::Scope scope(VFP3);
6523
// Load left and right operand
6524
__ sub(r2, r1, Operand(kHeapObjectTag));
6525
__ vldr(d0, r2, HeapNumber::kValueOffset);
6526
__ sub(r2, r0, Operand(kHeapObjectTag));
6527
__ vldr(d1, r2, HeapNumber::kValueOffset);
6530
__ VFPCompareAndSetFlags(d0, d1);
6532
// Don't base result on status bits when a NaN is involved.
6533
__ b(vs, &unordered);
6535
// Return a result of -1, 0, or 1, based on status bits.
6536
__ mov(r0, Operand(EQUAL), LeaveCC, eq);
6537
__ mov(r0, Operand(LESS), LeaveCC, lt);
6538
__ mov(r0, Operand(GREATER), LeaveCC, gt);
6541
__ bind(&unordered);
6544
CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
6545
__ bind(&generic_stub);
6546
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
6553
void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
6554
ASSERT(state_ == CompareIC::SYMBOLS);
6557
// Registers containing left and right operands respectively.
6559
Register right = r0;
6563
// Check that both operands are heap objects.
6564
__ JumpIfEitherSmi(left, right, &miss);
6566
// Check that both operands are symbols.
6567
__ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6568
__ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6569
__ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6570
__ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6571
STATIC_ASSERT(kSymbolTag != 0);
6572
__ and_(tmp1, tmp1, Operand(tmp2));
6573
__ tst(tmp1, Operand(kIsSymbolMask));
6576
// Symbols are compared by identity.
6577
__ cmp(left, right);
6578
// Make sure r0 is non-zero. At this point input operands are
6579
// guaranteed to be non-zero.
6580
ASSERT(right.is(r0));
6581
STATIC_ASSERT(EQUAL == 0);
6582
STATIC_ASSERT(kSmiTag == 0);
6583
__ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
6591
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
6592
ASSERT(state_ == CompareIC::STRINGS);
6595
// Registers containing left and right operands respectively.
6597
Register right = r0;
6603
// Check that both operands are heap objects.
6604
__ JumpIfEitherSmi(left, right, &miss);
6606
// Check that both operands are strings. This leaves the instance
6607
// types loaded in tmp1 and tmp2.
6608
__ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6609
__ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6610
__ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6611
__ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6612
STATIC_ASSERT(kNotStringTag != 0);
6613
__ orr(tmp3, tmp1, tmp2);
6614
__ tst(tmp3, Operand(kIsNotStringMask));
6617
// Fast check for identical strings.
6618
__ cmp(left, right);
6619
STATIC_ASSERT(EQUAL == 0);
6620
STATIC_ASSERT(kSmiTag == 0);
6621
__ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
6624
// Handle not identical strings.
6626
// Check that both strings are symbols. If they are, we're done
6627
// because we already know they are not identical.
6628
ASSERT(GetCondition() == eq);
6629
STATIC_ASSERT(kSymbolTag != 0);
6630
__ and_(tmp3, tmp1, Operand(tmp2));
6631
__ tst(tmp3, Operand(kIsSymbolMask));
6632
// Make sure r0 is non-zero. At this point input operands are
6633
// guaranteed to be non-zero.
6634
ASSERT(right.is(r0));
6637
// Check that both strings are sequential ASCII.
6639
__ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4,
6642
// Compare flat ASCII strings. Returns when done.
6643
StringCompareStub::GenerateFlatAsciiStringEquals(
6644
masm, left, right, tmp1, tmp2, tmp3);
6646
// Handle more complex cases in runtime.
6648
__ Push(left, right);
6649
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
6656
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
6657
ASSERT(state_ == CompareIC::OBJECTS);
6659
__ and_(r2, r1, Operand(r0));
6660
__ JumpIfSmi(r2, &miss);
6662
__ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
6664
__ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
6667
ASSERT(GetCondition() == eq);
6668
__ sub(r0, r0, Operand(r1));
6676
void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
6678
__ and_(r2, r1, Operand(r0));
6679
__ JumpIfSmi(r2, &miss);
6680
__ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
6681
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
6682
__ cmp(r2, Operand(known_map_));
6684
__ cmp(r3, Operand(known_map_));
6687
__ sub(r0, r0, Operand(r1));
6696
void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
6698
// Call the runtime system in a fresh internal frame.
6699
ExternalReference miss =
6700
ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
6702
FrameScope scope(masm, StackFrame::INTERNAL);
6706
__ mov(ip, Operand(Smi::FromInt(op_)));
6708
__ CallExternalReference(miss, 3);
6709
// Compute the entry point of the rewritten stub.
6710
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
6711
// Restore registers.
6721
void DirectCEntryStub::Generate(MacroAssembler* masm) {
6722
__ ldr(pc, MemOperand(sp, 0));
6726
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6727
ExternalReference function) {
6728
__ mov(r2, Operand(function));
6729
GenerateCall(masm, r2);
6733
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6735
__ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
6736
RelocInfo::CODE_TARGET));
6737
// Push return address (accessible to GC through exit frame pc).
6738
// Note that using pc with str is deprecated.
6741
__ add(ip, pc, Operand(Assembler::kInstrSize));
6742
__ str(ip, MemOperand(sp, 0));
6743
__ Jump(target); // Call the C++ function.
6744
ASSERT_EQ(Assembler::kInstrSize + Assembler::kPcLoadDelta,
6745
masm->SizeOfCodeGeneratedSince(&start));
6749
void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
6753
Register properties,
6754
Handle<String> name,
6755
Register scratch0) {
6756
// If names of slots in range from 1 to kProbes - 1 for the hash value are
6757
// not equal to the name and kProbes-th slot is not used (its name is the
6758
// undefined value), it guarantees the hash table doesn't contain the
6759
// property. It's true even if some slots represent deleted properties
6760
// (their names are the null value).
6761
for (int i = 0; i < kInlinedProbes; i++) {
6762
// scratch0 points to properties hash.
6763
// Compute the masked index: (hash + i + i * i) & mask.
6764
Register index = scratch0;
6765
// Capacity is smi 2^n.
6766
__ ldr(index, FieldMemOperand(properties, kCapacityOffset));
6767
__ sub(index, index, Operand(1));
6768
__ and_(index, index, Operand(
6769
Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
6771
// Scale the index by multiplying by the entry size.
6772
ASSERT(StringDictionary::kEntrySize == 3);
6773
__ add(index, index, Operand(index, LSL, 1)); // index *= 3.
6775
Register entity_name = scratch0;
6776
// Having undefined at this place means the name is not contained.
6777
ASSERT_EQ(kSmiTagSize, 1);
6778
Register tmp = properties;
6779
__ add(tmp, properties, Operand(index, LSL, 1));
6780
__ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
6782
ASSERT(!tmp.is(entity_name));
6783
__ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
6784
__ cmp(entity_name, tmp);
6787
if (i != kInlinedProbes - 1) {
6788
// Stop if found the property.
6789
__ cmp(entity_name, Operand(Handle<String>(name)));
6792
// Check if the entry name is not a symbol.
6793
__ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
6794
__ ldrb(entity_name,
6795
FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
6796
__ tst(entity_name, Operand(kIsSymbolMask));
6799
// Restore the properties.
6801
FieldMemOperand(receiver, JSObject::kPropertiesOffset));
6805
const int spill_mask =
6806
(lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
6807
r2.bit() | r1.bit() | r0.bit());
6809
__ stm(db_w, sp, spill_mask);
6810
__ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
6811
__ mov(r1, Operand(Handle<String>(name)));
6812
StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
6814
__ tst(r0, Operand(r0));
6815
__ ldm(ia_w, sp, spill_mask);
6822
// Probe the string dictionary in the |elements| register. Jump to the
6823
// |done| label if a property with the given name is found. Jump to
6824
// the |miss| label otherwise.
6825
// If lookup was successful |scratch2| will be equal to elements + 4 * index.
6826
void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
6832
Register scratch2) {
6833
ASSERT(!elements.is(scratch1));
6834
ASSERT(!elements.is(scratch2));
6835
ASSERT(!name.is(scratch1));
6836
ASSERT(!name.is(scratch2));
6838
// Assert that name contains a string.
6839
if (FLAG_debug_code) __ AbortIfNotString(name);
6841
// Compute the capacity mask.
6842
__ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
6843
__ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize)); // convert smi to int
6844
__ sub(scratch1, scratch1, Operand(1));
6846
// Generate an unrolled loop that performs a few probes before
6847
// giving up. Measurements done on Gmail indicate that 2 probes
6848
// cover ~93% of loads from dictionaries.
6849
for (int i = 0; i < kInlinedProbes; i++) {
6850
// Compute the masked index: (hash + i + i * i) & mask.
6851
__ ldr(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
6853
// Add the probe offset (i + i * i) left shifted to avoid right shifting
6854
// the hash in a separate instruction. The value hash + i + i * i is right
6855
// shifted in the following and instruction.
6856
ASSERT(StringDictionary::GetProbeOffset(i) <
6857
1 << (32 - String::kHashFieldOffset));
6858
__ add(scratch2, scratch2, Operand(
6859
StringDictionary::GetProbeOffset(i) << String::kHashShift));
6861
__ and_(scratch2, scratch1, Operand(scratch2, LSR, String::kHashShift));
6863
// Scale the index by multiplying by the element size.
6864
ASSERT(StringDictionary::kEntrySize == 3);
6865
// scratch2 = scratch2 * 3.
6866
__ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
6868
// Check if the key is identical to the name.
6869
__ add(scratch2, elements, Operand(scratch2, LSL, 2));
6870
__ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
6871
__ cmp(name, Operand(ip));
6875
const int spill_mask =
6876
(lr.bit() | r6.bit() | r5.bit() | r4.bit() |
6877
r3.bit() | r2.bit() | r1.bit() | r0.bit()) &
6878
~(scratch1.bit() | scratch2.bit());
6880
__ stm(db_w, sp, spill_mask);
6882
ASSERT(!elements.is(r1));
6884
__ Move(r0, elements);
6886
__ Move(r0, elements);
6889
StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
6891
__ tst(r0, Operand(r0));
6892
__ mov(scratch2, Operand(r2));
6893
__ ldm(ia_w, sp, spill_mask);
6900
void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
6901
// This stub overrides SometimesSetsUpAFrame() to return false. That means
6902
// we cannot call anything that could cause a GC from this stub.
6904
// result: StringDictionary to probe
6906
// : StringDictionary to probe.
6907
// index_: will hold an index of entry if lookup is successful.
6908
// might alias with result_.
6910
// result_ is zero if lookup failed, non zero otherwise.
6912
Register result = r0;
6913
Register dictionary = r0;
6915
Register index = r2;
6918
Register undefined = r5;
6919
Register entry_key = r6;
6921
Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
6923
__ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
6924
__ mov(mask, Operand(mask, ASR, kSmiTagSize));
6925
__ sub(mask, mask, Operand(1));
6927
__ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset));
6929
__ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
6931
for (int i = kInlinedProbes; i < kTotalProbes; i++) {
6932
// Compute the masked index: (hash + i + i * i) & mask.
6933
// Capacity is smi 2^n.
6935
// Add the probe offset (i + i * i) left shifted to avoid right shifting
6936
// the hash in a separate instruction. The value hash + i + i * i is right
6937
// shifted in the following and instruction.
6938
ASSERT(StringDictionary::GetProbeOffset(i) <
6939
1 << (32 - String::kHashFieldOffset));
6940
__ add(index, hash, Operand(
6941
StringDictionary::GetProbeOffset(i) << String::kHashShift));
6943
__ mov(index, Operand(hash));
6945
__ and_(index, mask, Operand(index, LSR, String::kHashShift));
6947
// Scale the index by multiplying by the entry size.
6948
ASSERT(StringDictionary::kEntrySize == 3);
6949
__ add(index, index, Operand(index, LSL, 1)); // index *= 3.
6951
ASSERT_EQ(kSmiTagSize, 1);
6952
__ add(index, dictionary, Operand(index, LSL, 2));
6953
__ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
6955
// Having undefined at this place means the name is not contained.
6956
__ cmp(entry_key, Operand(undefined));
6957
__ b(eq, ¬_in_dictionary);
6959
// Stop if found the property.
6960
__ cmp(entry_key, Operand(key));
6961
__ b(eq, &in_dictionary);
6963
if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
6964
// Check if the entry name is not a symbol.
6965
__ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
6967
FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
6968
__ tst(entry_key, Operand(kIsSymbolMask));
6969
__ b(eq, &maybe_in_dictionary);
6973
__ bind(&maybe_in_dictionary);
6974
// If we are doing negative lookup then probing failure should be
6975
// treated as a lookup success. For positive lookup probing failure
6976
// should be treated as lookup failure.
6977
if (mode_ == POSITIVE_LOOKUP) {
6978
__ mov(result, Operand::Zero());
6982
__ bind(&in_dictionary);
6983
__ mov(result, Operand(1));
6986
__ bind(¬_in_dictionary);
6987
__ mov(result, Operand::Zero());
6992
struct AheadOfTimeWriteBarrierStubList {
6993
Register object, value, address;
6994
RememberedSetAction action;
6998
struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
6999
// Used in RegExpExecStub.
7000
{ r6, r4, r7, EMIT_REMEMBERED_SET },
7001
{ r6, r2, r7, EMIT_REMEMBERED_SET },
7002
// Used in CompileArrayPushCall.
7003
// Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
7004
// Also used in KeyedStoreIC::GenerateGeneric.
7005
{ r3, r4, r5, EMIT_REMEMBERED_SET },
7006
// Used in CompileStoreGlobal.
7007
{ r4, r1, r2, OMIT_REMEMBERED_SET },
7008
// Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
7009
{ r1, r2, r3, EMIT_REMEMBERED_SET },
7010
{ r3, r2, r1, EMIT_REMEMBERED_SET },
7011
// Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
7012
{ r2, r1, r3, EMIT_REMEMBERED_SET },
7013
{ r3, r1, r2, EMIT_REMEMBERED_SET },
7014
// KeyedStoreStubCompiler::GenerateStoreFastElement.
7015
{ r4, r2, r3, EMIT_REMEMBERED_SET },
7016
// ElementsTransitionGenerator::GenerateSmiOnlyToObject
7017
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
7018
// and ElementsTransitionGenerator::GenerateDoubleToObject
7019
{ r2, r3, r9, EMIT_REMEMBERED_SET },
7020
// ElementsTransitionGenerator::GenerateDoubleToObject
7021
{ r6, r2, r0, EMIT_REMEMBERED_SET },
7022
{ r2, r6, r9, EMIT_REMEMBERED_SET },
7023
// StoreArrayLiteralElementStub::Generate
7024
{ r5, r0, r6, EMIT_REMEMBERED_SET },
7025
// Null termination.
7026
{ no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
7030
bool RecordWriteStub::IsPregenerated() {
7031
for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7032
!entry->object.is(no_reg);
7034
if (object_.is(entry->object) &&
7035
value_.is(entry->value) &&
7036
address_.is(entry->address) &&
7037
remembered_set_action_ == entry->action &&
7038
save_fp_regs_mode_ == kDontSaveFPRegs) {
7046
bool StoreBufferOverflowStub::IsPregenerated() {
7047
return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
7051
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
7052
StoreBufferOverflowStub stub1(kDontSaveFPRegs);
7053
stub1.GetCode()->set_is_pregenerated(true);
7057
void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
7058
for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7059
!entry->object.is(no_reg);
7061
RecordWriteStub stub(entry->object,
7066
stub.GetCode()->set_is_pregenerated(true);
7071
// Takes the input in 3 registers: address_ value_ and object_. A pointer to
7072
// the value has just been written into the object, now this stub makes sure
7073
// we keep the GC informed. The word in the object where the value has been
7074
// written is in the address register.
7075
void RecordWriteStub::Generate(MacroAssembler* masm) {
7076
Label skip_to_incremental_noncompacting;
7077
Label skip_to_incremental_compacting;
7079
// The first two instructions are generated with labels so as to get the
7080
// offset fixed up correctly by the bind(Label*) call. We patch it back and
7081
// forth between a compare instructions (a nop in this position) and the
7082
// real branch when we start and stop incremental heap marking.
7083
// See RecordWriteStub::Patch for details.
7084
__ b(&skip_to_incremental_noncompacting);
7085
__ b(&skip_to_incremental_compacting);
7087
if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7088
__ RememberedSetHelper(object_,
7092
MacroAssembler::kReturnAtEnd);
7096
__ bind(&skip_to_incremental_noncompacting);
7097
GenerateIncremental(masm, INCREMENTAL);
7099
__ bind(&skip_to_incremental_compacting);
7100
GenerateIncremental(masm, INCREMENTAL_COMPACTION);
7102
// Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
7103
// Will be checked in IncrementalMarking::ActivateGeneratedStub.
7104
ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
7105
ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
7106
PatchBranchIntoNop(masm, 0);
7107
PatchBranchIntoNop(masm, Assembler::kInstrSize);
7111
void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
7114
if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7115
Label dont_need_remembered_set;
7117
__ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
7118
__ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
7120
&dont_need_remembered_set);
7122
__ CheckPageFlag(regs_.object(),
7124
1 << MemoryChunk::SCAN_ON_SCAVENGE,
7126
&dont_need_remembered_set);
7128
// First notify the incremental marker if necessary, then update the
7130
CheckNeedsToInformIncrementalMarker(
7131
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
7132
InformIncrementalMarker(masm, mode);
7133
regs_.Restore(masm);
7134
__ RememberedSetHelper(object_,
7138
MacroAssembler::kReturnAtEnd);
7140
__ bind(&dont_need_remembered_set);
7143
CheckNeedsToInformIncrementalMarker(
7144
masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
7145
InformIncrementalMarker(masm, mode);
7146
regs_.Restore(masm);
7151
void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
7152
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
7153
int argument_count = 3;
7154
__ PrepareCallCFunction(argument_count, regs_.scratch0());
7156
r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
7157
ASSERT(!address.is(regs_.object()));
7158
ASSERT(!address.is(r0));
7159
__ Move(address, regs_.address());
7160
__ Move(r0, regs_.object());
7161
if (mode == INCREMENTAL_COMPACTION) {
7162
__ Move(r1, address);
7164
ASSERT(mode == INCREMENTAL);
7165
__ ldr(r1, MemOperand(address, 0));
7167
__ mov(r2, Operand(ExternalReference::isolate_address()));
7169
AllowExternalCallThatCantCauseGC scope(masm);
7170
if (mode == INCREMENTAL_COMPACTION) {
7172
ExternalReference::incremental_evacuation_record_write_function(
7176
ASSERT(mode == INCREMENTAL);
7178
ExternalReference::incremental_marking_record_write_function(
7182
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
7186
void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
7187
MacroAssembler* masm,
7188
OnNoNeedToInformIncrementalMarker on_no_need,
7191
Label need_incremental;
7192
Label need_incremental_pop_scratch;
7194
// Let's look at the color of the object: If it is not black we don't have
7195
// to inform the incremental marker.
7196
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
7198
regs_.Restore(masm);
7199
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7200
__ RememberedSetHelper(object_,
7204
MacroAssembler::kReturnAtEnd);
7211
// Get the value from the slot.
7212
__ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
7214
if (mode == INCREMENTAL_COMPACTION) {
7215
Label ensure_not_white;
7217
__ CheckPageFlag(regs_.scratch0(), // Contains value.
7218
regs_.scratch1(), // Scratch.
7219
MemoryChunk::kEvacuationCandidateMask,
7223
__ CheckPageFlag(regs_.object(),
7224
regs_.scratch1(), // Scratch.
7225
MemoryChunk::kSkipEvacuationSlotsRecordingMask,
7229
__ bind(&ensure_not_white);
7232
// We need extra registers for this, so we push the object and the address
7233
// register temporarily.
7234
__ Push(regs_.object(), regs_.address());
7235
__ EnsureNotWhite(regs_.scratch0(), // The value.
7236
regs_.scratch1(), // Scratch.
7237
regs_.object(), // Scratch.
7238
regs_.address(), // Scratch.
7239
&need_incremental_pop_scratch);
7240
__ Pop(regs_.object(), regs_.address());
7242
regs_.Restore(masm);
7243
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7244
__ RememberedSetHelper(object_,
7248
MacroAssembler::kReturnAtEnd);
7253
__ bind(&need_incremental_pop_scratch);
7254
__ Pop(regs_.object(), regs_.address());
7256
__ bind(&need_incremental);
7258
// Fall through when we need to inform the incremental marker.
7262
void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
7263
// ----------- S t a t e -------------
7264
// -- r0 : element value to store
7265
// -- r1 : array literal
7266
// -- r2 : map of array literal
7267
// -- r3 : element index as smi
7268
// -- r4 : array literal index in function as smi
7269
// -----------------------------------
7272
Label double_elements;
7274
Label slow_elements;
7275
Label fast_elements;
7277
__ CheckFastElements(r2, r5, &double_elements);
7278
// FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
7279
__ JumpIfSmi(r0, &smi_element);
7280
__ CheckFastSmiOnlyElements(r2, r5, &fast_elements);
7282
// Store into the array literal requires a elements transition. Call into
7284
__ bind(&slow_elements);
7286
__ Push(r1, r3, r0);
7287
__ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
7288
__ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset));
7290
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
7292
// Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
7293
__ bind(&fast_elements);
7294
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
7295
__ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
7296
__ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
7297
__ str(r0, MemOperand(r6, 0));
7298
// Update the write barrier for the array store.
7299
__ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs,
7300
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
7303
// Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
7304
// FAST_ELEMENTS, and value is Smi.
7305
__ bind(&smi_element);
7306
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
7307
__ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
7308
__ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize));
7311
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
7312
__ bind(&double_elements);
7313
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
7314
__ StoreNumberToDoubleElements(r0, r3, r1, r5, r6, r7, r9, r10,
7321
} } // namespace v8::internal
7323
#endif // V8_TARGET_ARCH_ARM