2
* Copyright (C) 2009 University of Szeged
5
* Redistribution and use in source and binary forms, with or without
6
* modification, are permitted provided that the following conditions
8
* 1. Redistributions of source code must retain the above copyright
9
* notice, this list of conditions and the following disclaimer.
10
* 2. Redistributions in binary form must reproduce the above copyright
11
* notice, this list of conditions and the following disclaimer in the
12
* documentation and/or other materials provided with the distribution.
14
* THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
15
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
18
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
31
#include "ARMAssembler.h"
37
void ARMAssembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
39
ARMWord *ldr = reinterpret_cast<ARMWord*>(loadAddr);
40
ARMWord diff = reinterpret_cast<ARMWord*>(constPoolAddr) - ldr;
41
ARMWord index = (*ldr & 0xfff) >> 1;
44
if (diff >= 2 || index > 0) {
45
diff = (diff + index - 2) * sizeof(ARMWord);
46
ASSERT(diff <= 0xfff);
47
*ldr = (*ldr & ~0xfff) | diff;
49
*ldr = (*ldr & ~(0xfff | ARMAssembler::DataTransferUp)) | sizeof(ARMWord);
54
ARMWord ARMAssembler::getOp2(ARMWord imm)
59
return Op2Immediate | imm;
61
if ((imm & 0xff000000) == 0) {
66
imm = (imm << 24) | (imm >> 8);
70
if ((imm & 0xff000000) == 0) {
75
if ((imm & 0xf0000000) == 0) {
80
if ((imm & 0xc0000000) == 0) {
85
if ((imm & 0x00ffffff) == 0)
86
return Op2Immediate | (imm >> 24) | (rol << 8);
88
return InvalidImmediate;
91
int ARMAssembler::genInt(int reg, ARMWord imm, bool positive)
93
// Step1: Search a non-immediate part
102
if ((imm & mask) == 0) {
103
imm = (imm << rol) | (imm >> (32 - rol));
104
rol = 4 + (rol >> 1);
111
imm = (imm << 8) | (imm >> 24);
115
if ((imm & mask) == 0) {
116
imm = (imm << rol) | (imm >> (32 - rol));
117
rol = (rol >> 1) - 8;
129
ASSERT((imm & 0xff) == 0);
131
if ((imm & 0xff000000) == 0) {
132
imm1 = Op2Immediate | ((imm >> 16) & 0xff) | (((rol + 4) & 0xf) << 8);
133
imm2 = Op2Immediate | ((imm >> 8) & 0xff) | (((rol + 8) & 0xf) << 8);
134
} else if (imm & 0xc0000000) {
135
imm1 = Op2Immediate | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
139
if ((imm & 0xff000000) == 0) {
144
if ((imm & 0xf0000000) == 0) {
149
if ((imm & 0xc0000000) == 0) {
154
if ((imm & 0x00ffffff) == 0)
155
imm2 = Op2Immediate | (imm >> 24) | ((rol & 0xf) << 8);
159
if ((imm & 0xf0000000) == 0) {
164
if ((imm & 0xc0000000) == 0) {
169
imm1 = Op2Immediate | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
173
if ((imm & 0xf0000000) == 0) {
178
if ((imm & 0xc0000000) == 0) {
183
if ((imm & 0x00ffffff) == 0)
184
imm2 = Op2Immediate | (imm >> 24) | ((rol & 0xf) << 8);
200
ARMWord ARMAssembler::getImm(ARMWord imm, int tmpReg, bool invert)
204
// Do it by 1 instruction
206
if (tmp != InvalidImmediate)
210
if (tmp != InvalidImmediate) {
212
return tmp | Op2InvertedImmediate;
217
return encodeComplexImm(imm, tmpReg);
220
void ARMAssembler::moveImm(ARMWord imm, int dest)
224
// Do it by 1 instruction
226
if (tmp != InvalidImmediate) {
232
if (tmp != InvalidImmediate) {
237
encodeComplexImm(imm, dest);
240
ARMWord ARMAssembler::encodeComplexImm(ARMWord imm, int dest)
242
#if WTF_ARM_ARCH_AT_LEAST(7)
243
ARMWord tmp = getImm16Op2(imm);
244
if (tmp != InvalidImmediate) {
248
movw(dest, getImm16Op2(imm & 0xffff));
249
movt(dest, getImm16Op2(imm >> 16));
252
// Do it by 2 instruction
253
if (genInt(dest, imm, true))
255
if (genInt(dest, ~imm, false))
258
ldrImmediate(dest, imm);
263
// Memory load/store helpers
265
void ARMAssembler::dataTransfer32(DataTransferTypeA transferType, RegisterID srcDst, RegisterID base, int32_t offset)
269
dtrUp(transferType, srcDst, base, offset);
270
else if (offset <= 0xfffff) {
271
add(ARMRegisters::S0, base, Op2Immediate | (offset >> 12) | (10 << 8));
272
dtrUp(transferType, srcDst, ARMRegisters::S0, (offset & 0xfff));
274
moveImm(offset, ARMRegisters::S0);
275
dtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
278
if (offset >= -0xfff)
279
dtrDown(transferType, srcDst, base, -offset);
280
else if (offset >= -0xfffff) {
281
sub(ARMRegisters::S0, base, Op2Immediate | (-offset >> 12) | (10 << 8));
282
dtrDown(transferType, srcDst, ARMRegisters::S0, (-offset & 0xfff));
284
moveImm(offset, ARMRegisters::S0);
285
dtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
290
void ARMAssembler::baseIndexTransfer32(DataTransferTypeA transferType, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
292
ASSERT(scale >= 0 && scale <= 3);
293
ARMWord op2 = lsl(index, scale);
296
dtrUpRegister(transferType, srcDst, base, op2);
300
add(ARMRegisters::S1, base, op2);
301
dataTransfer32(transferType, srcDst, ARMRegisters::S1, offset);
304
void ARMAssembler::dataTransfer16(DataTransferTypeB transferType, RegisterID srcDst, RegisterID base, int32_t offset)
308
halfDtrUp(transferType, srcDst, base, getOp2Half(offset));
309
else if (offset <= 0xffff) {
310
add(ARMRegisters::S0, base, Op2Immediate | (offset >> 8) | (12 << 8));
311
halfDtrUp(transferType, srcDst, ARMRegisters::S0, getOp2Half(offset & 0xff));
313
moveImm(offset, ARMRegisters::S0);
314
halfDtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
318
halfDtrDown(transferType, srcDst, base, getOp2Half(-offset));
319
else if (offset >= -0xffff) {
320
sub(ARMRegisters::S0, base, Op2Immediate | (-offset >> 8) | (12 << 8));
321
halfDtrDown(transferType, srcDst, ARMRegisters::S0, getOp2Half(-offset & 0xff));
323
moveImm(offset, ARMRegisters::S0);
324
halfDtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
329
void ARMAssembler::baseIndexTransfer16(DataTransferTypeB transferType, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
331
if (!scale && !offset) {
332
halfDtrUpRegister(transferType, srcDst, base, index);
336
add(ARMRegisters::S1, base, lsl(index, scale));
337
dataTransfer16(transferType, srcDst, ARMRegisters::S1, offset);
340
void ARMAssembler::dataTransferFloat(DataTransferTypeFloat transferType, FPRegisterID srcDst, RegisterID base, int32_t offset)
342
// VFP cannot directly access memory that is not four-byte-aligned
343
if (!(offset & 0x3)) {
344
if (offset <= 0x3ff && offset >= 0) {
345
doubleDtrUp(transferType, srcDst, base, offset >> 2);
348
if (offset <= 0x3ffff && offset >= 0) {
349
add(ARMRegisters::S0, base, Op2Immediate | (offset >> 10) | (11 << 8));
350
doubleDtrUp(transferType, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff);
355
if (offset <= 0x3ff && offset >= 0) {
356
doubleDtrDown(transferType, srcDst, base, offset >> 2);
359
if (offset <= 0x3ffff && offset >= 0) {
360
sub(ARMRegisters::S0, base, Op2Immediate | (offset >> 10) | (11 << 8));
361
doubleDtrDown(transferType, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff);
367
moveImm(offset, ARMRegisters::S0);
368
add(ARMRegisters::S0, ARMRegisters::S0, base);
369
doubleDtrUp(transferType, srcDst, ARMRegisters::S0, 0);
372
void ARMAssembler::baseIndexTransferFloat(DataTransferTypeFloat transferType, FPRegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
374
add(ARMRegisters::S1, base, lsl(index, scale));
375
dataTransferFloat(transferType, srcDst, ARMRegisters::S1, offset);
378
PassRefPtr<ExecutableMemoryHandle> ARMAssembler::executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
380
// 64-bit alignment is required for next constant pool and JIT code as well
381
m_buffer.flushWithoutBarrier(true);
382
if (!m_buffer.isAligned(8))
385
RefPtr<ExecutableMemoryHandle> result = m_buffer.executableCopy(globalData, ownerUID, effort);
386
char* data = reinterpret_cast<char*>(result->start());
388
for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) {
389
// The last bit is set if the constant must be placed on constant pool.
390
int pos = (iter->m_offset) & (~0x1);
391
ARMWord* ldrAddr = reinterpret_cast_ptr<ARMWord*>(data + pos);
392
ARMWord* addr = getLdrImmAddress(ldrAddr);
393
if (*addr != InvalidBranchTarget) {
394
if (!(iter->m_offset & 1)) {
395
intptr_t difference = reinterpret_cast_ptr<ARMWord*>(data + *addr) - (ldrAddr + DefaultPrefetchOffset);
397
if ((difference <= MaximumBranchOffsetDistance && difference >= MinimumBranchOffsetDistance)) {
398
*ldrAddr = B | getConditionalField(*ldrAddr) | (difference & BranchOffsetMask);
402
*addr = reinterpret_cast<ARMWord>(data + *addr);
409
#if OS(LINUX) && COMPILER(RVCT)
411
__asm void ARMAssembler::cacheFlush(void* code, size_t size)
428
#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)