1
/****************************************************************************
3
** Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
4
** Contact: http://www.qt-project.org/legal
6
** This file is part of the QtQml module of the Qt Toolkit.
8
** $QT_BEGIN_LICENSE:LGPL$
9
** Commercial License Usage
10
** Licensees holding valid commercial Qt licenses may use this file in
11
** accordance with the commercial license agreement provided with the
12
** Software or, alternatively, in accordance with the terms contained in
13
** a written agreement between you and Digia. For licensing terms and
14
** conditions see http://qt.digia.com/licensing. For further information
15
** use the contact form at http://qt.digia.com/contact-us.
17
** GNU Lesser General Public License Usage
18
** Alternatively, this file may be used under the terms of the GNU Lesser
19
** General Public License version 2.1 as published by the Free Software
20
** Foundation and appearing in the file LICENSE.LGPL included in the
21
** packaging of this file. Please review the following information to
22
** ensure the GNU Lesser General Public License version 2.1 requirements
23
** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
25
** In addition, as a special exception, Digia gives you certain additional
26
** rights. These rights are described in the Digia Qt LGPL Exception
27
** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
29
** GNU General Public License Usage
30
** Alternatively, this file may be used under the terms of the GNU
31
** General Public License version 3.0 as published by the Free Software
32
** Foundation and appearing in the file LICENSE.GPL included in the
33
** packaging of this file. Please review the following information to
34
** ensure the GNU General Public License version 3.0 requirements will be
35
** met: http://www.gnu.org/copyleft/gpl.html.
40
****************************************************************************/
42
#include "qv4isel_masm_p.h"
43
#include "qv4runtime_p.h"
44
#include "qv4object_p.h"
45
#include "qv4functionobject_p.h"
46
#include "qv4regexpobject_p.h"
47
#include "qv4lookup_p.h"
48
#include "qv4function_p.h"
50
#include "qv4regalloc_p.h"
51
#include "qv4assembler_p.h"
53
#include <assembler/LinkBuffer.h>
65
using namespace QV4::JIT;
67
CompilationUnit::~CompilationUnit()
71
void CompilationUnit::linkBackendToEngine(ExecutionEngine *engine)
73
runtimeFunctions.resize(data->functionTableSize);
74
runtimeFunctions.fill(0);
75
for (int i = 0 ;i < runtimeFunctions.size(); ++i) {
76
const CompiledData::Function *compiledFunction = data->functionAt(i);
78
QV4::Function *runtimeFunction = new QV4::Function(engine, this, compiledFunction,
79
(ReturnedValue (*)(QV4::ExecutionContext *, const uchar *)) codeRefs[i].code().executableAddress());
80
runtimeFunctions[i] = runtimeFunction;
84
QV4::ExecutableAllocator::ChunkOfPages *CompilationUnit::chunkForFunction(int functionIndex)
86
if (functionIndex < 0 || functionIndex >= codeRefs.count())
88
JSC::ExecutableMemoryHandle *handle = codeRefs[functionIndex].executableMemory();
91
return handle->chunk();
96
/* Platform/Calling convention/Architecture specific section */
99
# if OS(LINUX) || OS(MAC_OS_X)
100
static const Assembler::RegisterID calleeSavedRegisters[] = {
101
JSC::X86Registers::ebx,
102
JSC::X86Registers::r12, // LocalsRegister
103
JSC::X86Registers::r13,
104
JSC::X86Registers::r14, // ContextRegister
105
JSC::X86Registers::r15
108
static const Assembler::RegisterID calleeSavedRegisters[] = {
109
JSC::X86Registers::ebx,
110
JSC::X86Registers::esi,
111
JSC::X86Registers::edi,
112
JSC::X86Registers::r12, // LocalsRegister
113
JSC::X86Registers::r13,
114
JSC::X86Registers::r14, // ContextRegister
115
JSC::X86Registers::r15
121
static const Assembler::RegisterID calleeSavedRegisters[] = {
122
JSC::X86Registers::ebx, // temporary register
123
JSC::X86Registers::esi, // ContextRegister
124
JSC::X86Registers::edi // LocalsRegister
129
static const Assembler::RegisterID calleeSavedRegisters[] = {
130
JSC::ARMRegisters::r11,
131
JSC::ARMRegisters::r10,
132
JSC::ARMRegisters::r9,
133
JSC::ARMRegisters::r8,
134
JSC::ARMRegisters::r7,
135
JSC::ARMRegisters::r6,
136
JSC::ARMRegisters::r5,
137
JSC::ARMRegisters::r4
141
const int Assembler::calleeSavedRegisterCount = sizeof(calleeSavedRegisters) / sizeof(calleeSavedRegisters[0]);
143
/* End of platform/calling convention/architecture specific section */
146
const Assembler::VoidType Assembler::Void;
148
Assembler::Assembler(InstructionSelection *isel, IR::Function* function, QV4::ExecutableAllocator *executableAllocator,
149
int maxArgCountForBuiltins)
150
: _stackLayout(function, maxArgCountForBuiltins)
152
, _function(function)
154
, _executableAllocator(executableAllocator)
159
void Assembler::registerBlock(IR::BasicBlock* block, IR::BasicBlock *nextBlock)
161
_addrs[block] = label();
162
catchBlock = block->catchBlock;
163
_nextBlock = nextBlock;
166
void Assembler::jumpToBlock(IR::BasicBlock* current, IR::BasicBlock *target)
170
if (target != _nextBlock)
171
_patches[target].append(jump());
174
void Assembler::addPatch(IR::BasicBlock* targetBlock, Jump targetJump)
176
_patches[targetBlock].append(targetJump);
179
void Assembler::addPatch(DataLabelPtr patch, Label target)
184
_dataLabelPatches.append(p);
187
void Assembler::addPatch(DataLabelPtr patch, IR::BasicBlock *target)
189
_labelPatches[target].append(patch);
192
void Assembler::generateCJumpOnNonZero(RegisterID reg, IR::BasicBlock *currentBlock,
193
IR::BasicBlock *trueBlock, IR::BasicBlock *falseBlock)
195
generateCJumpOnCompare(NotEqual, reg, TrustedImm32(0), currentBlock, trueBlock, falseBlock);
198
void Assembler::generateCJumpOnCompare(RelationalCondition cond, RegisterID left,TrustedImm32 right,
199
IR::BasicBlock *currentBlock, IR::BasicBlock *trueBlock,
200
IR::BasicBlock *falseBlock)
202
if (trueBlock == _nextBlock) {
203
Jump target = branch32(invert(cond), left, right);
204
addPatch(falseBlock, target);
206
Jump target = branch32(cond, left, right);
207
addPatch(trueBlock, target);
208
jumpToBlock(currentBlock, falseBlock);
212
void Assembler::generateCJumpOnCompare(RelationalCondition cond, RegisterID left, RegisterID right,
213
IR::BasicBlock *currentBlock, IR::BasicBlock *trueBlock,
214
IR::BasicBlock *falseBlock)
216
if (trueBlock == _nextBlock) {
217
Jump target = branch32(invert(cond), left, right);
218
addPatch(falseBlock, target);
220
Jump target = branch32(cond, left, right);
221
addPatch(trueBlock, target);
222
jumpToBlock(currentBlock, falseBlock);
226
Assembler::Pointer Assembler::loadTempAddress(RegisterID baseReg, IR::Temp *t)
229
int scope = t->scope;
230
RegisterID context = ContextRegister;
232
loadPtr(Address(ContextRegister, qOffsetOf(ExecutionContext, outer)), baseReg);
236
loadPtr(Address(context, qOffsetOf(ExecutionContext, outer)), context);
241
case IR::Temp::Formal:
242
case IR::Temp::ScopedFormal: {
243
loadPtr(Address(context, qOffsetOf(ExecutionContext, callData)), baseReg);
244
offset = sizeof(CallData) + (t->index - 1) * sizeof(Value);
246
case IR::Temp::Local:
247
case IR::Temp::ScopedLocal: {
248
loadPtr(Address(context, qOffsetOf(CallContext, locals)), baseReg);
249
offset = t->index * sizeof(Value);
251
case IR::Temp::StackSlot: {
252
return stackSlotPointer(t);
257
return Pointer(baseReg, offset);
260
Assembler::Pointer Assembler::loadStringAddress(RegisterID reg, const QString &string)
262
loadPtr(Address(Assembler::ContextRegister, qOffsetOf(QV4::ExecutionContext, compilationUnit)), Assembler::ScratchRegister);
263
loadPtr(Address(Assembler::ScratchRegister, qOffsetOf(QV4::CompiledData::CompilationUnit, runtimeStrings)), reg);
264
const int id = _isel->registerString(string);
265
return Pointer(reg, id * sizeof(QV4::StringValue));
268
void Assembler::loadStringRef(RegisterID reg, const QString &string)
270
loadPtr(Address(Assembler::ContextRegister, qOffsetOf(QV4::ExecutionContext, compilationUnit)), reg);
271
loadPtr(Address(reg, qOffsetOf(QV4::CompiledData::CompilationUnit, runtimeStrings)), reg);
272
const int id = _isel->registerString(string);
273
addPtr(TrustedImmPtr(id * sizeof(QV4::StringValue)), reg);
276
void Assembler::storeValue(QV4::Primitive value, IR::Temp* destination)
278
Address addr = loadTempAddress(ScratchRegister, destination);
279
storeValue(value, addr);
282
void Assembler::enterStandardStackFrame()
284
platformEnterStandardStackFrame();
286
// ### FIXME: Handle through calleeSavedRegisters mechanism
287
// or eliminate StackFrameRegister altogether.
288
push(StackFrameRegister);
289
move(StackPointerRegister, StackFrameRegister);
291
int frameSize = _stackLayout.calculateStackFrameSize();
293
subPtr(TrustedImm32(frameSize), StackPointerRegister);
295
for (int i = 0; i < calleeSavedRegisterCount; ++i)
296
storePtr(calleeSavedRegisters[i], Address(StackFrameRegister, -(i + 1) * sizeof(void*)));
300
void Assembler::leaveStandardStackFrame()
302
// restore the callee saved registers
303
for (int i = calleeSavedRegisterCount - 1; i >= 0; --i)
304
loadPtr(Address(StackFrameRegister, -(i + 1) * sizeof(void*)), calleeSavedRegisters[i]);
306
int frameSize = _stackLayout.calculateStackFrameSize();
307
// Work around bug in ARMv7Assembler.h where add32(imm, sp, sp) doesn't
308
// work well for large immediates.
310
move(TrustedImm32(frameSize), JSC::ARMRegisters::r3);
311
add32(JSC::ARMRegisters::r3, StackPointerRegister);
313
addPtr(TrustedImm32(frameSize), StackPointerRegister);
316
pop(StackFrameRegister);
317
platformLeaveStandardStackFrame();
323
// Try to load the source expression into the destination FP register. This assumes that two
324
// general purpose (integer) registers are available: the ScratchRegister and the
325
// ReturnValueRegister. It returns a Jump if no conversion can be performed.
326
Assembler::Jump Assembler::genTryDoubleConversion(IR::Expr *src, Assembler::FPRegisterID dest)
330
moveDouble(toDoubleRegister(src, dest), dest);
331
return Assembler::Jump();
333
convertInt32ToDouble(toInt32Register(src, Assembler::ScratchRegister),
335
return Assembler::Jump();
337
convertUInt32ToDouble(toUInt32Register(src, Assembler::ScratchRegister),
338
dest, Assembler::ReturnValueRegister);
339
return Assembler::Jump();
341
case IR::UndefinedType:
350
IR::Temp *sourceTemp = src->asTemp();
351
Q_ASSERT(sourceTemp);
353
// It's not a number type, so it cannot be in a register.
354
Q_ASSERT(sourceTemp->kind != IR::Temp::PhysicalRegister || sourceTemp->type == IR::BoolType);
356
Assembler::Pointer tagAddr = loadTempAddress(Assembler::ScratchRegister, sourceTemp);
358
load32(tagAddr, Assembler::ScratchRegister);
360
// check if it's an int32:
361
Assembler::Jump isNoInt = branch32(Assembler::NotEqual, Assembler::ScratchRegister,
362
Assembler::TrustedImm32(Value::_Integer_Type));
363
convertInt32ToDouble(toInt32Register(src, Assembler::ScratchRegister), dest);
364
Assembler::Jump intDone = jump();
366
// not an int, check if it's a double:
368
#if QT_POINTER_SIZE == 8
369
and32(Assembler::TrustedImm32(Value::IsDouble_Mask), Assembler::ScratchRegister);
370
Assembler::Jump isNoDbl = branch32(Assembler::Equal, Assembler::ScratchRegister,
371
Assembler::TrustedImm32(0));
373
and32(Assembler::TrustedImm32(Value::NotDouble_Mask), Assembler::ScratchRegister);
374
Assembler::Jump isNoDbl = branch32(Assembler::Equal, Assembler::ScratchRegister,
375
Assembler::TrustedImm32(Value::NotDouble_Mask));
377
toDoubleRegister(src, dest);
383
#if !defined(QT_NO_DEBUG) || defined(QT_FORCE_ASSERTS)
385
inline bool isPregOrConst(IR::Expr *e)
387
if (IR::Temp *t = e->asTemp())
388
return t->kind == IR::Temp::PhysicalRegister;
389
return e->asConst() != 0;
391
} // anonymous namespace
394
Assembler::Jump Assembler::branchDouble(bool invertCondition, IR::AluOp op,
395
IR::Expr *left, IR::Expr *right)
397
Q_ASSERT(isPregOrConst(left));
398
Q_ASSERT(isPregOrConst(right));
399
Q_ASSERT(left->asConst() == 0 || right->asConst() == 0);
401
Assembler::DoubleCondition cond;
403
case IR::OpGt: cond = Assembler::DoubleGreaterThan; break;
404
case IR::OpLt: cond = Assembler::DoubleLessThan; break;
405
case IR::OpGe: cond = Assembler::DoubleGreaterThanOrEqual; break;
406
case IR::OpLe: cond = Assembler::DoubleLessThanOrEqual; break;
408
case IR::OpStrictEqual: cond = Assembler::DoubleEqual; break;
410
case IR::OpStrictNotEqual: cond = Assembler::DoubleNotEqualOrUnordered; break; // No, the inversion of DoubleEqual is NOT DoubleNotEqual.
415
cond = JSC::MacroAssembler::invert(cond);
417
return JSC::MacroAssembler::branchDouble(cond, toDoubleRegister(left), toDoubleRegister(right));