1
// Copyright 2012 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
6
// * Redistributions of source code must retain the above copyright
7
// notice, this list of conditions and the following disclaimer.
8
// * Redistributions in binary form must reproduce the above
9
// copyright notice, this list of conditions and the following
10
// disclaimer in the documentation and/or other materials provided
11
// with the distribution.
12
// * Neither the name of Google Inc. nor the names of its
13
// contributors may be used to endorse or promote products derived
14
// from this software without specific prior written permission.
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
#if defined(V8_TARGET_ARCH_IA32)
34
#include "macro-assembler.h"
40
// -------------------------------------------------------------------------
41
// Platform-specific RuntimeCallHelper functions.
43
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
44
masm->EnterFrame(StackFrame::INTERNAL);
45
ASSERT(!masm->has_frame());
46
masm->set_has_frame(true);
50
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
51
masm->LeaveFrame(StackFrame::INTERNAL);
52
ASSERT(masm->has_frame());
53
masm->set_has_frame(false);
60
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
62
// Allocate buffer in executable space.
63
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
67
// Fallback to library function if function cannot be created.
69
case TranscendentalCache::SIN: return &sin;
70
case TranscendentalCache::COS: return &cos;
71
case TranscendentalCache::TAN: return &tan;
72
case TranscendentalCache::LOG: return &log;
73
default: UNIMPLEMENTED();
77
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
78
// esp[1 * kPointerSize]: raw double input
79
// esp[0 * kPointerSize]: return address
80
// Move double input into registers.
85
__ fld_d(Operand(esp, 4 * kPointerSize));
86
__ mov(ebx, Operand(esp, 4 * kPointerSize));
87
__ mov(edx, Operand(esp, 5 * kPointerSize));
88
TranscendentalCacheStub::GenerateOperation(&masm, type);
89
// The return value is expected to be on ST(0) of the FPU stack.
97
ASSERT(desc.reloc_size == 0);
99
CPU::FlushICache(buffer, actual_size);
100
OS::ProtectCode(buffer, actual_size);
101
return FUNCTION_CAST<UnaryMathFunction>(buffer);
105
UnaryMathFunction CreateSqrtFunction() {
107
// Allocate buffer in executable space.
108
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
111
// If SSE2 is not available, we can use libc's implementation to ensure
112
// consistency since code by fullcodegen's calls into runtime in that case.
113
if (buffer == NULL || !CpuFeatures::IsSupported(SSE2)) return &sqrt;
114
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
115
// esp[1 * kPointerSize]: raw double input
116
// esp[0 * kPointerSize]: return address
117
// Move double input into registers.
119
CpuFeatures::Scope use_sse2(SSE2);
120
__ movdbl(xmm0, Operand(esp, 1 * kPointerSize));
121
__ sqrtsd(xmm0, xmm0);
122
__ movdbl(Operand(esp, 1 * kPointerSize), xmm0);
123
// Load result into floating point register as return value.
124
__ fld_d(Operand(esp, 1 * kPointerSize));
130
ASSERT(desc.reloc_size == 0);
132
CPU::FlushICache(buffer, actual_size);
133
OS::ProtectCode(buffer, actual_size);
134
return FUNCTION_CAST<UnaryMathFunction>(buffer);
138
static void MemCopyWrapper(void* dest, const void* src, size_t size) {
139
memcpy(dest, src, size);
143
OS::MemCopyFunction CreateMemCopyFunction() {
145
// Allocate buffer in executable space.
146
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
149
if (buffer == NULL) return &MemCopyWrapper;
150
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
152
// Generated code is put into a fixed, unmovable, buffer, and not into
153
// the V8 heap. We can't, and don't, refer to any relocatable addresses
154
// (e.g. the JavaScript nan-object).
156
// 32-bit C declaration function calls pass arguments on stack.
159
// esp[12]: Third argument, size.
160
// esp[8]: Second argument, source pointer.
161
// esp[4]: First argument, destination pointer.
162
// esp[0]: return address
164
const int kDestinationOffset = 1 * kPointerSize;
165
const int kSourceOffset = 2 * kPointerSize;
166
const int kSizeOffset = 3 * kPointerSize;
168
int stack_offset = 0; // Update if we change the stack height.
170
if (FLAG_debug_code) {
171
__ cmp(Operand(esp, kSizeOffset + stack_offset),
172
Immediate(OS::kMinComplexMemCopy));
174
__ j(greater_equal, &ok);
178
if (CpuFeatures::IsSupported(SSE2)) {
179
CpuFeatures::Scope enable(SSE2);
182
stack_offset += 2 * kPointerSize;
185
Register count = ecx;
186
__ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
187
__ mov(src, Operand(esp, stack_offset + kSourceOffset));
188
__ mov(count, Operand(esp, stack_offset + kSizeOffset));
191
__ movdqu(xmm0, Operand(src, 0));
192
__ movdqu(Operand(dst, 0), xmm0);
196
__ add(edx, Immediate(16));
201
// edi is now aligned. Check if esi is also aligned.
202
Label unaligned_source;
203
__ test(src, Immediate(0x0F));
204
__ j(not_zero, &unaligned_source);
206
// Copy loop for aligned source and destination.
208
Register loop_count = ecx;
209
Register count = edx;
210
__ shr(loop_count, 5);
215
__ prefetch(Operand(src, 0x20), 1);
216
__ movdqa(xmm0, Operand(src, 0x00));
217
__ movdqa(xmm1, Operand(src, 0x10));
218
__ add(src, Immediate(0x20));
220
__ movdqa(Operand(dst, 0x00), xmm0);
221
__ movdqa(Operand(dst, 0x10), xmm1);
222
__ add(dst, Immediate(0x20));
225
__ j(not_zero, &loop);
228
// At most 31 bytes to copy.
230
__ test(count, Immediate(0x10));
231
__ j(zero, &move_less_16);
232
__ movdqa(xmm0, Operand(src, 0));
233
__ add(src, Immediate(0x10));
234
__ movdqa(Operand(dst, 0), xmm0);
235
__ add(dst, Immediate(0x10));
236
__ bind(&move_less_16);
238
// At most 15 bytes to copy. Copy 16 bytes at end of string.
240
__ movdqu(xmm0, Operand(src, count, times_1, -0x10));
241
__ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
243
__ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
250
// Copy loop for unaligned source and aligned destination.
251
// If source is not aligned, we can't read it as efficiently.
252
__ bind(&unaligned_source);
254
Register loop_count = ecx;
255
Register count = edx;
256
__ shr(loop_count, 5);
261
__ prefetch(Operand(src, 0x20), 1);
262
__ movdqu(xmm0, Operand(src, 0x00));
263
__ movdqu(xmm1, Operand(src, 0x10));
264
__ add(src, Immediate(0x20));
266
__ movdqa(Operand(dst, 0x00), xmm0);
267
__ movdqa(Operand(dst, 0x10), xmm1);
268
__ add(dst, Immediate(0x20));
271
__ j(not_zero, &loop);
274
// At most 31 bytes to copy.
276
__ test(count, Immediate(0x10));
277
__ j(zero, &move_less_16);
278
__ movdqu(xmm0, Operand(src, 0));
279
__ add(src, Immediate(0x10));
280
__ movdqa(Operand(dst, 0), xmm0);
281
__ add(dst, Immediate(0x10));
282
__ bind(&move_less_16);
284
// At most 15 bytes to copy. Copy 16 bytes at end of string.
285
__ and_(count, 0x0F);
286
__ movdqu(xmm0, Operand(src, count, times_1, -0x10));
287
__ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
289
__ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
296
// SSE2 not supported. Unlikely to happen in practice.
299
stack_offset += 2 * kPointerSize;
303
Register count = ecx;
304
__ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
305
__ mov(src, Operand(esp, stack_offset + kSourceOffset));
306
__ mov(count, Operand(esp, stack_offset + kSizeOffset));
308
// Copy the first word.
309
__ mov(eax, Operand(src, 0));
310
__ mov(Operand(dst, 0), eax);
312
// Increment src,dstso that dst is aligned.
316
__ add(edx, Immediate(4)); // edx = 4 - (dst & 3)
320
// edi is now aligned, ecx holds number of remaning bytes to copy.
324
__ shr(ecx, 2); // Make word count instead of byte count.
327
// At most 3 bytes left to copy. Copy 4 bytes at end of string.
329
__ mov(eax, Operand(src, count, times_1, -4));
330
__ mov(Operand(dst, count, times_1, -4), eax);
332
__ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
340
ASSERT(desc.reloc_size == 0);
342
CPU::FlushICache(buffer, actual_size);
343
OS::ProtectCode(buffer, actual_size);
344
return FUNCTION_CAST<OS::MemCopyFunction>(buffer);
349
// -------------------------------------------------------------------------
352
#define __ ACCESS_MASM(masm)
354
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
355
MacroAssembler* masm) {
356
// ----------- S t a t e -------------
358
// -- ebx : target map
361
// -- esp[0] : return address
362
// -----------------------------------
363
// Set transitioned map.
364
__ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
365
__ RecordWriteField(edx,
366
HeapObject::kMapOffset,
375
void ElementsTransitionGenerator::GenerateSmiToDouble(
376
MacroAssembler* masm, Label* fail) {
377
// ----------- S t a t e -------------
379
// -- ebx : target map
382
// -- esp[0] : return address
383
// -----------------------------------
384
Label loop, entry, convert_hole, gc_required, only_change_map;
386
// Check for empty arrays, which only require a map transition and no changes
387
// to the backing store.
388
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
389
__ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
390
__ j(equal, &only_change_map);
395
__ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
397
// Allocate new FixedDoubleArray.
399
// edi: length of source FixedArray (smi-tagged)
400
__ lea(esi, Operand(edi,
402
FixedDoubleArray::kHeaderSize + kPointerSize));
403
__ AllocateInNewSpace(esi, eax, ebx, no_reg, &gc_required, TAG_OBJECT);
405
Label aligned, aligned_done;
406
__ test(eax, Immediate(kDoubleAlignmentMask - kHeapObjectTag));
407
__ j(zero, &aligned, Label::kNear);
408
__ mov(FieldOperand(eax, 0),
409
Immediate(masm->isolate()->factory()->one_pointer_filler_map()));
410
__ add(eax, Immediate(kPointerSize));
411
__ jmp(&aligned_done);
414
__ mov(Operand(eax, esi, times_1, -kPointerSize-1),
415
Immediate(masm->isolate()->factory()->one_pointer_filler_map()));
417
__ bind(&aligned_done);
419
// eax: destination FixedDoubleArray
420
// edi: number of elements
422
__ mov(FieldOperand(eax, HeapObject::kMapOffset),
423
Immediate(masm->isolate()->factory()->fixed_double_array_map()));
424
__ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi);
425
__ mov(esi, FieldOperand(edx, JSObject::kElementsOffset));
426
// Replace receiver's backing store with newly created FixedDoubleArray.
427
__ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
429
__ RecordWriteField(edx,
430
JSObject::kElementsOffset,
437
__ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
439
// Prepare for conversion loop.
440
ExternalReference canonical_the_hole_nan_reference =
441
ExternalReference::address_of_the_hole_nan();
442
XMMRegister the_hole_nan = xmm1;
443
if (CpuFeatures::IsSupported(SSE2)) {
444
CpuFeatures::Scope use_sse2(SSE2);
445
__ movdbl(the_hole_nan,
446
Operand::StaticVariable(canonical_the_hole_nan_reference));
450
// Call into runtime if GC is required.
451
__ bind(&gc_required);
452
// Restore registers before jumping into runtime.
453
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
458
// Convert and copy elements
459
// esi: source FixedArray
461
__ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize));
462
// ebx: current element from source
463
// edi: index of current element
464
__ JumpIfNotSmi(ebx, &convert_hole);
466
// Normal smi, convert it to double and store.
468
if (CpuFeatures::IsSupported(SSE2)) {
469
CpuFeatures::Scope fscope(SSE2);
470
__ cvtsi2sd(xmm0, ebx);
471
__ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
475
__ fild_s(Operand(esp, 0));
477
__ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
481
// Found hole, store hole_nan_as_double instead.
482
__ bind(&convert_hole);
484
if (FLAG_debug_code) {
485
__ cmp(ebx, masm->isolate()->factory()->the_hole_value());
486
__ Assert(equal, "object found in smi-only array");
489
if (CpuFeatures::IsSupported(SSE2)) {
490
CpuFeatures::Scope use_sse2(SSE2);
491
__ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
494
__ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
495
__ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
499
__ sub(edi, Immediate(Smi::FromInt(1)));
500
__ j(not_sign, &loop);
506
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
508
__ bind(&only_change_map);
511
// Set transitioned map.
512
__ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
513
__ RecordWriteField(edx,
514
HeapObject::kMapOffset,
523
void ElementsTransitionGenerator::GenerateDoubleToObject(
524
MacroAssembler* masm, Label* fail) {
525
// ----------- S t a t e -------------
527
// -- ebx : target map
530
// -- esp[0] : return address
531
// -----------------------------------
532
Label loop, entry, convert_hole, gc_required, only_change_map, success;
534
// Check for empty arrays, which only require a map transition and no changes
535
// to the backing store.
536
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
537
__ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
538
__ j(equal, &only_change_map);
544
__ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
546
// Allocate new FixedArray.
547
// ebx: length of source FixedDoubleArray (smi-tagged)
548
__ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
549
__ AllocateInNewSpace(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
551
// eax: destination FixedArray
552
// ebx: number of elements
553
__ mov(FieldOperand(eax, HeapObject::kMapOffset),
554
Immediate(masm->isolate()->factory()->fixed_array_map()));
555
__ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx);
556
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
562
// Set transitioned map.
563
__ bind(&only_change_map);
564
__ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
565
__ RecordWriteField(edx,
566
HeapObject::kMapOffset,
574
// Call into runtime if GC is required.
575
__ bind(&gc_required);
576
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
582
// Box doubles into heap numbers.
583
// edi: source FixedDoubleArray
584
// eax: destination FixedArray
586
// ebx: index of current element (smi-tagged)
587
uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
588
__ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
589
__ j(equal, &convert_hole);
591
// Non-hole double, copy value into a heap number.
592
__ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
593
// edx: new heap number
594
if (CpuFeatures::IsSupported(SSE2)) {
595
CpuFeatures::Scope fscope(SSE2);
597
FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
598
__ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
600
__ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
601
__ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
602
__ mov(esi, FieldOperand(edi, ebx, times_4, offset));
603
__ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi);
605
__ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
607
__ RecordWriteArray(eax,
613
__ jmp(&entry, Label::kNear);
615
// Replace the-hole NaN with the-hole pointer.
616
__ bind(&convert_hole);
617
__ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
618
masm->isolate()->factory()->the_hole_value());
621
__ sub(ebx, Immediate(Smi::FromInt(1)));
622
__ j(not_sign, &loop);
628
// Set transitioned map.
629
__ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
630
__ RecordWriteField(edx,
631
HeapObject::kMapOffset,
637
// Replace receiver's backing store with newly created and filled FixedArray.
638
__ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
639
__ RecordWriteField(edx,
640
JSObject::kElementsOffset,
647
// Restore registers.
649
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
655
void StringCharLoadGenerator::Generate(MacroAssembler* masm,
660
Label* call_runtime) {
661
// Fetch the instance type of the receiver into result register.
662
__ mov(result, FieldOperand(string, HeapObject::kMapOffset));
663
__ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
665
// We need special handling for indirect strings.
666
Label check_sequential;
667
__ test(result, Immediate(kIsIndirectStringMask));
668
__ j(zero, &check_sequential, Label::kNear);
670
// Dispatch on the indirect string shape: slice or cons.
672
__ test(result, Immediate(kSlicedNotConsMask));
673
__ j(zero, &cons_string, Label::kNear);
676
Label indirect_string_loaded;
677
__ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
679
__ add(index, result);
680
__ mov(string, FieldOperand(string, SlicedString::kParentOffset));
681
__ jmp(&indirect_string_loaded, Label::kNear);
683
// Handle cons strings.
684
// Check whether the right hand side is the empty string (i.e. if
685
// this is really a flat string in a cons string). If that is not
686
// the case we would rather go to the runtime system now to flatten
688
__ bind(&cons_string);
689
__ cmp(FieldOperand(string, ConsString::kSecondOffset),
690
Immediate(factory->empty_string()));
691
__ j(not_equal, call_runtime);
692
__ mov(string, FieldOperand(string, ConsString::kFirstOffset));
694
__ bind(&indirect_string_loaded);
695
__ mov(result, FieldOperand(string, HeapObject::kMapOffset));
696
__ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
698
// Distinguish sequential and external strings. Only these two string
699
// representations can reach here (slices and flat cons strings have been
700
// reduced to the underlying sequential or external string).
702
__ bind(&check_sequential);
703
STATIC_ASSERT(kSeqStringTag == 0);
704
__ test(result, Immediate(kStringRepresentationMask));
705
__ j(zero, &seq_string, Label::kNear);
707
// Handle external strings.
708
Label ascii_external, done;
709
if (FLAG_debug_code) {
710
// Assert that we do not have a cons or slice (indirect strings) here.
711
// Sequential strings have already been ruled out.
712
__ test(result, Immediate(kIsIndirectStringMask));
713
__ Assert(zero, "external string expected, but not found");
715
// Rule out short external strings.
716
STATIC_CHECK(kShortExternalStringTag != 0);
717
__ test_b(result, kShortExternalStringMask);
718
__ j(not_zero, call_runtime);
720
STATIC_ASSERT(kTwoByteStringTag == 0);
721
__ test_b(result, kStringEncodingMask);
722
__ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
723
__ j(not_equal, &ascii_external, Label::kNear);
725
__ movzx_w(result, Operand(result, index, times_2, 0));
726
__ jmp(&done, Label::kNear);
727
__ bind(&ascii_external);
729
__ movzx_b(result, Operand(result, index, times_1, 0));
730
__ jmp(&done, Label::kNear);
732
// Dispatch on the encoding: ASCII or two-byte.
734
__ bind(&seq_string);
735
STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
736
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
737
__ test(result, Immediate(kStringEncodingMask));
738
__ j(not_zero, &ascii, Label::kNear);
741
// Load the two-byte character code into the result register.
742
__ movzx_w(result, FieldOperand(string,
745
SeqTwoByteString::kHeaderSize));
746
__ jmp(&done, Label::kNear);
749
// Load the byte into the result register.
751
__ movzx_b(result, FieldOperand(string,
754
SeqAsciiString::kHeaderSize));
760
} } // namespace v8::internal
762
#endif // V8_TARGET_ARCH_IA32