2
* Copyright (C) 2009 University of Szeged
5
* Redistribution and use in source and binary forms, with or without
6
* modification, are permitted provided that the following conditions
8
* 1. Redistributions of source code must retain the above copyright
9
* notice, this list of conditions and the following disclaimer.
10
* 2. Redistributions in binary form must reproduce the above copyright
11
* notice, this list of conditions and the following disclaimer in the
12
* documentation and/or other materials provided with the distribution.
14
* THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
15
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
18
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
#if ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)
31
#include "ARMAssembler.h"
37
ARMWord* ARMAssembler::getLdrImmAddress(ARMWord* insn, uint32_t* constPool)
39
// Must be an ldr ..., [pc +/- imm]
40
ASSERT((*insn & 0x0f7f0000) == 0x051f0000);
42
if (constPool && (*insn & 0x1))
43
return reinterpret_cast<ARMWord*>(constPool + ((*insn & SDT_OFFSET_MASK) >> 1));
45
ARMWord addr = reinterpret_cast<ARMWord>(insn) + 2 * sizeof(ARMWord);
47
return reinterpret_cast<ARMWord*>(addr + (*insn & SDT_OFFSET_MASK));
49
return reinterpret_cast<ARMWord*>(addr - (*insn & SDT_OFFSET_MASK));
52
void ARMAssembler::linkBranch(void* code, JmpSrc from, void* to, int useConstantPool)
54
ARMWord* insn = reinterpret_cast<ARMWord*>(code) + (from.m_offset / sizeof(ARMWord));
56
if (!useConstantPool) {
57
int diff = reinterpret_cast<ARMWord*>(to) - reinterpret_cast<ARMWord*>(insn + 2);
59
if ((diff <= BOFFSET_MAX && diff >= BOFFSET_MIN)) {
60
*insn = B | getConditionalField(*insn) | (diff & BRANCH_MASK);
61
ExecutableAllocator::cacheFlush(insn, sizeof(ARMWord));
65
ARMWord* addr = getLdrImmAddress(insn);
66
*addr = reinterpret_cast<ARMWord>(to);
67
ExecutableAllocator::cacheFlush(addr, sizeof(ARMWord));
70
void ARMAssembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
72
ARMWord *ldr = reinterpret_cast<ARMWord*>(loadAddr);
73
ARMWord diff = reinterpret_cast<ARMWord*>(constPoolAddr) - ldr;
74
ARMWord index = (*ldr & 0xfff) >> 1;
77
if (diff >= 2 || index > 0) {
78
diff = (diff + index - 2) * sizeof(ARMWord);
79
ASSERT(diff <= 0xfff);
80
*ldr = (*ldr & ~0xfff) | diff;
82
*ldr = (*ldr & ~(0xfff | ARMAssembler::DT_UP)) | sizeof(ARMWord);
87
ARMWord ARMAssembler::getOp2(ARMWord imm)
94
if ((imm & 0xff000000) == 0) {
99
imm = (imm << 24) | (imm >> 8);
103
if ((imm & 0xff000000) == 0) {
108
if ((imm & 0xf0000000) == 0) {
113
if ((imm & 0xc0000000) == 0) {
118
if ((imm & 0x00ffffff) == 0)
119
return OP2_IMM | (imm >> 24) | (rol << 8);
124
int ARMAssembler::genInt(int reg, ARMWord imm, bool positive)
126
// Step1: Search a non-immediate part
135
if ((imm & mask) == 0) {
136
imm = (imm << rol) | (imm >> (32 - rol));
137
rol = 4 + (rol >> 1);
144
imm = (imm << 8) | (imm >> 24);
148
if ((imm & mask) == 0) {
149
imm = (imm << rol) | (imm >> (32 - rol));
150
rol = (rol >> 1) - 8;
162
ASSERT((imm & 0xff) == 0);
164
if ((imm & 0xff000000) == 0) {
165
imm1 = OP2_IMM | ((imm >> 16) & 0xff) | (((rol + 4) & 0xf) << 8);
166
imm2 = OP2_IMM | ((imm >> 8) & 0xff) | (((rol + 8) & 0xf) << 8);
167
} else if (imm & 0xc0000000) {
168
imm1 = OP2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
172
if ((imm & 0xff000000) == 0) {
177
if ((imm & 0xf0000000) == 0) {
182
if ((imm & 0xc0000000) == 0) {
187
if ((imm & 0x00ffffff) == 0)
188
imm2 = OP2_IMM | (imm >> 24) | ((rol & 0xf) << 8);
192
if ((imm & 0xf0000000) == 0) {
197
if ((imm & 0xc0000000) == 0) {
202
imm1 = OP2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
206
if ((imm & 0xf0000000) == 0) {
211
if ((imm & 0xc0000000) == 0) {
216
if ((imm & 0x00ffffff) == 0)
217
imm2 = OP2_IMM | (imm >> 24) | ((rol & 0xf) << 8);
224
orr_r(reg, reg, imm2);
227
bic_r(reg, reg, imm2);
233
ARMWord ARMAssembler::getImm(ARMWord imm, int tmpReg, bool invert)
237
// Do it by 1 instruction
245
return tmp | OP2_INV_IMM;
250
// Do it by 2 instruction
251
if (genInt(tmpReg, imm, true))
253
if (genInt(tmpReg, ~imm, false))
256
ldr_imm(tmpReg, imm);
260
void ARMAssembler::moveImm(ARMWord imm, int dest)
264
// Do it by 1 instruction
277
// Do it by 2 instruction
278
if (genInt(dest, imm, true))
280
if (genInt(dest, ~imm, false))
286
// Memory load/store helpers
288
void ARMAssembler::dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset)
292
dtr_u(isLoad, srcDst, base, offset);
293
else if (offset <= 0xfffff) {
294
add_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 12) | (10 << 8));
295
dtr_u(isLoad, srcDst, ARMRegisters::S0, offset & 0xfff);
297
ARMWord reg = getImm(offset, ARMRegisters::S0);
298
dtr_ur(isLoad, srcDst, base, reg);
303
dtr_d(isLoad, srcDst, base, offset);
304
else if (offset <= 0xfffff) {
305
sub_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 12) | (10 << 8));
306
dtr_d(isLoad, srcDst, ARMRegisters::S0, offset & 0xfff);
308
ARMWord reg = getImm(offset, ARMRegisters::S0);
309
dtr_dr(isLoad, srcDst, base, reg);
314
void ARMAssembler::baseIndexTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
318
ASSERT(scale >= 0 && scale <= 3);
319
op2 = lsl(index, scale);
321
if (offset >= 0 && offset <= 0xfff) {
322
add_r(ARMRegisters::S0, base, op2);
323
dtr_u(isLoad, srcDst, ARMRegisters::S0, offset);
326
if (offset <= 0 && offset >= -0xfff) {
327
add_r(ARMRegisters::S0, base, op2);
328
dtr_d(isLoad, srcDst, ARMRegisters::S0, -offset);
332
ldr_un_imm(ARMRegisters::S0, offset);
333
add_r(ARMRegisters::S0, ARMRegisters::S0, op2);
334
dtr_ur(isLoad, srcDst, base, ARMRegisters::S0);
337
void ARMAssembler::doubleTransfer(bool isLoad, FPRegisterID srcDst, RegisterID base, int32_t offset)
340
if (offset <= 0x3ff && offset >= 0) {
341
fdtr_u(isLoad, srcDst, base, offset >> 2);
344
if (offset <= 0x3ffff && offset >= 0) {
345
add_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 10) | (11 << 8));
346
fdtr_u(isLoad, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff);
351
if (offset <= 0x3ff && offset >= 0) {
352
fdtr_d(isLoad, srcDst, base, offset >> 2);
355
if (offset <= 0x3ffff && offset >= 0) {
356
sub_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 10) | (11 << 8));
357
fdtr_d(isLoad, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff);
363
ldr_un_imm(ARMRegisters::S0, offset);
364
add_r(ARMRegisters::S0, ARMRegisters::S0, base);
365
fdtr_u(isLoad, srcDst, ARMRegisters::S0, 0);
368
void* ARMAssembler::executableCopy(ExecutablePool* allocator)
370
// 64-bit alignment is required for next constant pool and JIT code as well
371
m_buffer.flushWithoutBarrier(true);
372
if (m_buffer.uncheckedSize() & 0x7)
375
char* data = reinterpret_cast<char*>(m_buffer.executableCopy(allocator));
377
for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) {
378
// The last bit is set if the constant must be placed on constant pool.
379
int pos = (*iter) & (~0x1);
380
ARMWord* ldrAddr = reinterpret_cast<ARMWord*>(data + pos);
381
ARMWord offset = *getLdrImmAddress(ldrAddr);
382
if (offset != 0xffffffff) {
384
linkBranch(data, jmpSrc, data + offset, ((*iter) & 1));
393
#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)