2
* Copyright (C) 2009, 2010, 2012, 2013 Apple Inc. All rights reserved.
3
* Copyright (C) 2010 University of Szeged
5
* Redistribution and use in source and binary forms, with or without
6
* modification, are permitted provided that the following conditions
8
* 1. Redistributions of source code must retain the above copyright
9
* notice, this list of conditions and the following disclaimer.
10
* 2. Redistributions in binary form must reproduce the above copyright
11
* notice, this list of conditions and the following disclaimer in the
12
* documentation and/or other materials provided with the distribution.
14
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
#ifndef ARMAssembler_h
28
#define ARMAssembler_h
30
#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
32
#include "AssemblerBuffer.h"
33
#include "MacroAssemblerCodeRef.h"
34
#include <wtf/Assertions.h>
35
#include <wtf/Vector.h>
39
#include <libkern/OSCacheControl.h>
44
namespace ARMRegisters {
53
r7, wr = r7, // thumb work register
55
r9, sb = r9, // static base
56
r10, sl = r10, // stack limit
57
r11, fp = r11, // frame pointer
132
} FPDoubleRegisterID;
169
inline FPSingleRegisterID asSingle(FPDoubleRegisterID reg)
172
return (FPSingleRegisterID)(reg << 1);
175
inline FPDoubleRegisterID asDouble(FPSingleRegisterID reg)
178
return (FPDoubleRegisterID)(reg >> 1);
182
class ARMv7Assembler;
183
class ARMThumbImmediate {
184
friend class ARMv7Assembler;
186
typedef uint8_t ThumbImmediateType;
187
static const ThumbImmediateType TypeInvalid = 0;
188
static const ThumbImmediateType TypeEncoded = 1;
189
static const ThumbImmediateType TypeUInt16 = 2;
199
// If this is an encoded immediate, then it may describe a shift, or a pattern.
201
unsigned shiftValue7 : 7;
202
unsigned shiftAmount : 5;
205
unsigned immediate : 8;
206
unsigned pattern : 4;
208
} ThumbImmediateValue;
210
// byte0 contains least significant bit; not using an array to make client code endian agnostic.
221
ALWAYS_INLINE static void countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N)
223
if (value & ~((1 << N) - 1)) /* check for any of the top N bits (of 2N bits) are set */
224
value >>= N; /* if any were set, lose the bottom N */
225
else /* if none of the top N bits are set, */
226
zeros += N; /* then we have identified N leading zeros */
229
static int32_t countLeadingZeros(uint32_t value)
235
countLeadingZerosPartial(value, zeros, 16);
236
countLeadingZerosPartial(value, zeros, 8);
237
countLeadingZerosPartial(value, zeros, 4);
238
countLeadingZerosPartial(value, zeros, 2);
239
countLeadingZerosPartial(value, zeros, 1);
244
: m_type(TypeInvalid)
249
ARMThumbImmediate(ThumbImmediateType type, ThumbImmediateValue value)
255
ARMThumbImmediate(ThumbImmediateType type, uint16_t value)
258
// Make sure this constructor is only reached with type TypeUInt16;
259
// this extra parameter makes the code a little clearer by making it
260
// explicit at call sites which type is being constructed
261
ASSERT_UNUSED(type, type == TypeUInt16);
263
m_value.asInt = value;
267
static ARMThumbImmediate makeEncodedImm(uint32_t value)
269
ThumbImmediateValue encoding;
272
// okay, these are easy.
274
encoding.immediate = value;
275
encoding.pattern = 0;
276
return ARMThumbImmediate(TypeEncoded, encoding);
279
int32_t leadingZeros = countLeadingZeros(value);
280
// if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
281
ASSERT(leadingZeros < 24);
283
// Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
284
// Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
285
// zero. count(B) == 8, so the count of bits to be checked is 24 - count(Z).
286
int32_t rightShiftAmount = 24 - leadingZeros;
287
if (value == ((value >> rightShiftAmount) << rightShiftAmount)) {
288
// Shift the value down to the low byte position. The assign to
289
// shiftValue7 drops the implicit top bit.
290
encoding.shiftValue7 = value >> rightShiftAmount;
291
// The endoded shift amount is the magnitude of a right rotate.
292
encoding.shiftAmount = 8 + leadingZeros;
293
return ARMThumbImmediate(TypeEncoded, encoding);
299
if ((bytes.byte0 == bytes.byte1) && (bytes.byte0 == bytes.byte2) && (bytes.byte0 == bytes.byte3)) {
300
encoding.immediate = bytes.byte0;
301
encoding.pattern = 3;
302
return ARMThumbImmediate(TypeEncoded, encoding);
305
if ((bytes.byte0 == bytes.byte2) && !(bytes.byte1 | bytes.byte3)) {
306
encoding.immediate = bytes.byte0;
307
encoding.pattern = 1;
308
return ARMThumbImmediate(TypeEncoded, encoding);
311
if ((bytes.byte1 == bytes.byte3) && !(bytes.byte0 | bytes.byte2)) {
312
encoding.immediate = bytes.byte1;
313
encoding.pattern = 2;
314
return ARMThumbImmediate(TypeEncoded, encoding);
317
return ARMThumbImmediate();
320
static ARMThumbImmediate makeUInt12(int32_t value)
322
return (!(value & 0xfffff000))
323
? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
324
: ARMThumbImmediate();
327
static ARMThumbImmediate makeUInt12OrEncodedImm(int32_t value)
329
// If this is not a 12-bit unsigned it, try making an encoded immediate.
330
return (!(value & 0xfffff000))
331
? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
332
: makeEncodedImm(value);
335
// The 'make' methods, above, return a !isValid() value if the argument
336
// cannot be represented as the requested type. This methods is called
337
// 'get' since the argument can always be represented.
338
static ARMThumbImmediate makeUInt16(uint16_t value)
340
return ARMThumbImmediate(TypeUInt16, value);
345
return m_type != TypeInvalid;
348
uint16_t asUInt16() const { return m_value.asInt; }
350
// These methods rely on the format of encoded byte values.
351
bool isUInt3() { return !(m_value.asInt & 0xfff8); }
352
bool isUInt4() { return !(m_value.asInt & 0xfff0); }
353
bool isUInt5() { return !(m_value.asInt & 0xffe0); }
354
bool isUInt6() { return !(m_value.asInt & 0xffc0); }
355
bool isUInt7() { return !(m_value.asInt & 0xff80); }
356
bool isUInt8() { return !(m_value.asInt & 0xff00); }
357
bool isUInt9() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfe00); }
358
bool isUInt10() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfc00); }
359
bool isUInt12() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xf000); }
360
bool isUInt16() { return m_type == TypeUInt16; }
361
uint8_t getUInt3() { ASSERT(isUInt3()); return m_value.asInt; }
362
uint8_t getUInt4() { ASSERT(isUInt4()); return m_value.asInt; }
363
uint8_t getUInt5() { ASSERT(isUInt5()); return m_value.asInt; }
364
uint8_t getUInt6() { ASSERT(isUInt6()); return m_value.asInt; }
365
uint8_t getUInt7() { ASSERT(isUInt7()); return m_value.asInt; }
366
uint8_t getUInt8() { ASSERT(isUInt8()); return m_value.asInt; }
367
uint16_t getUInt9() { ASSERT(isUInt9()); return m_value.asInt; }
368
uint16_t getUInt10() { ASSERT(isUInt10()); return m_value.asInt; }
369
uint16_t getUInt12() { ASSERT(isUInt12()); return m_value.asInt; }
370
uint16_t getUInt16() { ASSERT(isUInt16()); return m_value.asInt; }
372
bool isEncodedImm() { return m_type == TypeEncoded; }
375
ThumbImmediateType m_type;
376
ThumbImmediateValue m_value;
385
SRType_RRX = SRType_ROR
388
class ShiftTypeAndAmount {
389
friend class ARMv7Assembler;
394
m_u.type = (ARMShiftType)0;
398
ShiftTypeAndAmount(ARMShiftType type, unsigned amount)
401
m_u.amount = amount & 31;
404
unsigned lo4() { return m_u.lo4; }
405
unsigned hi4() { return m_u.hi4; }
420
class ARMv7Assembler {
422
typedef ARMRegisters::RegisterID RegisterID;
423
typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID;
424
typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID;
425
typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID;
427
// (HS, LO, HI, LS) -> (AE, B, A, BE)
428
// (VS, VC) -> (O, NO)
432
ConditionHS, ConditionCS = ConditionHS,
433
ConditionLO, ConditionCC = ConditionLO,
448
#define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 3) | (index))
449
#define JUMP_ENUM_SIZE(jump) ((jump) >> 3)
450
enum JumpType { JumpFixed = JUMP_ENUM_WITH_SIZE(0, 0),
451
JumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 5 * sizeof(uint16_t)),
452
JumpCondition = JUMP_ENUM_WITH_SIZE(2, 6 * sizeof(uint16_t)),
453
JumpNoConditionFixedSize = JUMP_ENUM_WITH_SIZE(3, 5 * sizeof(uint16_t)),
454
JumpConditionFixedSize = JUMP_ENUM_WITH_SIZE(4, 6 * sizeof(uint16_t))
457
LinkInvalid = JUMP_ENUM_WITH_SIZE(0, 0),
458
LinkJumpT1 = JUMP_ENUM_WITH_SIZE(1, sizeof(uint16_t)),
459
LinkJumpT2 = JUMP_ENUM_WITH_SIZE(2, sizeof(uint16_t)),
460
LinkJumpT3 = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint16_t)),
461
LinkJumpT4 = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint16_t)),
462
LinkConditionalJumpT4 = JUMP_ENUM_WITH_SIZE(5, 3 * sizeof(uint16_t)),
463
LinkBX = JUMP_ENUM_WITH_SIZE(6, 5 * sizeof(uint16_t)),
464
LinkConditionalBX = JUMP_ENUM_WITH_SIZE(7, 6 * sizeof(uint16_t))
469
LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition)
471
data.realTypes.m_from = from;
472
data.realTypes.m_to = to;
473
data.realTypes.m_type = type;
474
data.realTypes.m_linkType = LinkInvalid;
475
data.realTypes.m_condition = condition;
477
void operator=(const LinkRecord& other)
479
data.copyTypes.content[0] = other.data.copyTypes.content[0];
480
data.copyTypes.content[1] = other.data.copyTypes.content[1];
481
data.copyTypes.content[2] = other.data.copyTypes.content[2];
483
intptr_t from() const { return data.realTypes.m_from; }
484
void setFrom(intptr_t from) { data.realTypes.m_from = from; }
485
intptr_t to() const { return data.realTypes.m_to; }
486
JumpType type() const { return data.realTypes.m_type; }
487
JumpLinkType linkType() const { return data.realTypes.m_linkType; }
488
void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; }
489
Condition condition() const { return data.realTypes.m_condition; }
493
intptr_t m_from : 31;
496
JumpLinkType m_linkType : 8;
497
Condition m_condition : 16;
502
COMPILE_ASSERT(sizeof(RealTypes) == sizeof(CopyTypes), LinkRecordCopyStructSizeEqualsRealStruct);
507
: m_indexOfLastWatchpoint(INT_MIN)
508
, m_indexOfTailOfLastWatchpoint(INT_MIN)
515
static bool BadReg(RegisterID reg)
517
return (reg == ARMRegisters::sp) || (reg == ARMRegisters::pc);
520
uint32_t singleRegisterMask(FPSingleRegisterID rdNum, int highBitsShift, int lowBitShift)
522
uint32_t rdMask = (rdNum >> 1) << highBitsShift;
524
rdMask |= 1 << lowBitShift;
528
uint32_t doubleRegisterMask(FPDoubleRegisterID rdNum, int highBitShift, int lowBitsShift)
530
uint32_t rdMask = (rdNum & 0xf) << lowBitsShift;
532
rdMask |= 1 << highBitShift;
537
OP_ADD_reg_T1 = 0x1800,
538
OP_SUB_reg_T1 = 0x1A00,
539
OP_ADD_imm_T1 = 0x1C00,
540
OP_SUB_imm_T1 = 0x1E00,
541
OP_MOV_imm_T1 = 0x2000,
542
OP_CMP_imm_T1 = 0x2800,
543
OP_ADD_imm_T2 = 0x3000,
544
OP_SUB_imm_T2 = 0x3800,
545
OP_AND_reg_T1 = 0x4000,
546
OP_EOR_reg_T1 = 0x4040,
547
OP_TST_reg_T1 = 0x4200,
548
OP_RSB_imm_T1 = 0x4240,
549
OP_CMP_reg_T1 = 0x4280,
550
OP_ORR_reg_T1 = 0x4300,
551
OP_MVN_reg_T1 = 0x43C0,
552
OP_ADD_reg_T2 = 0x4400,
553
OP_MOV_reg_T1 = 0x4600,
556
OP_STR_reg_T1 = 0x5000,
557
OP_STRH_reg_T1 = 0x5200,
558
OP_STRB_reg_T1 = 0x5400,
559
OP_LDRSB_reg_T1 = 0x5600,
560
OP_LDR_reg_T1 = 0x5800,
561
OP_LDRH_reg_T1 = 0x5A00,
562
OP_LDRB_reg_T1 = 0x5C00,
563
OP_LDRSH_reg_T1 = 0x5E00,
564
OP_STR_imm_T1 = 0x6000,
565
OP_LDR_imm_T1 = 0x6800,
566
OP_STRB_imm_T1 = 0x7000,
567
OP_LDRB_imm_T1 = 0x7800,
568
OP_STRH_imm_T1 = 0x8000,
569
OP_LDRH_imm_T1 = 0x8800,
570
OP_STR_imm_T2 = 0x9000,
571
OP_LDR_imm_T2 = 0x9800,
572
OP_ADD_SP_imm_T1 = 0xA800,
573
OP_ADD_SP_imm_T2 = 0xB000,
574
OP_SUB_SP_imm_T1 = 0xB080,
583
OP_AND_reg_T2 = 0xEA00,
584
OP_TST_reg_T2 = 0xEA10,
585
OP_ORR_reg_T2 = 0xEA40,
586
OP_ORR_S_reg_T2 = 0xEA50,
587
OP_ASR_imm_T1 = 0xEA4F,
588
OP_LSL_imm_T1 = 0xEA4F,
589
OP_LSR_imm_T1 = 0xEA4F,
590
OP_ROR_imm_T1 = 0xEA4F,
591
OP_MVN_reg_T2 = 0xEA6F,
592
OP_EOR_reg_T2 = 0xEA80,
593
OP_ADD_reg_T3 = 0xEB00,
594
OP_ADD_S_reg_T3 = 0xEB10,
595
OP_SUB_reg_T2 = 0xEBA0,
596
OP_SUB_S_reg_T2 = 0xEBB0,
597
OP_CMP_reg_T2 = 0xEBB0,
598
OP_VMOV_CtoD = 0xEC00,
599
OP_VMOV_DtoC = 0xEC10,
604
OP_VMOV_CtoS = 0xEE00,
605
OP_VMOV_StoC = 0xEE10,
612
OP_VCVT_FPIVFP = 0xEEB0,
614
OP_VMOV_IMM_T2 = 0xEEB0,
617
OP_VSQRT_T1 = 0xEEB0,
618
OP_VCVTSD_T1 = 0xEEB0,
619
OP_VCVTDS_T1 = 0xEEB0,
622
OP_AND_imm_T1 = 0xF000,
624
OP_ORR_imm_T1 = 0xF040,
625
OP_MOV_imm_T2 = 0xF040,
627
OP_EOR_imm_T1 = 0xF080,
628
OP_ADD_imm_T3 = 0xF100,
629
OP_ADD_S_imm_T3 = 0xF110,
632
OP_SUB_imm_T3 = 0xF1A0,
633
OP_SUB_S_imm_T3 = 0xF1B0,
634
OP_CMP_imm_T2 = 0xF1B0,
635
OP_RSB_imm_T2 = 0xF1C0,
636
OP_RSB_S_imm_T2 = 0xF1D0,
637
OP_ADD_imm_T4 = 0xF200,
638
OP_MOV_imm_T3 = 0xF240,
639
OP_SUB_imm_T4 = 0xF2A0,
643
OP_STRB_imm_T3 = 0xF800,
644
OP_STRB_reg_T2 = 0xF800,
645
OP_LDRB_imm_T3 = 0xF810,
646
OP_LDRB_reg_T2 = 0xF810,
647
OP_STRH_imm_T3 = 0xF820,
648
OP_STRH_reg_T2 = 0xF820,
649
OP_LDRH_reg_T2 = 0xF830,
650
OP_LDRH_imm_T3 = 0xF830,
651
OP_STR_imm_T4 = 0xF840,
652
OP_STR_reg_T2 = 0xF840,
653
OP_LDR_imm_T4 = 0xF850,
654
OP_LDR_reg_T2 = 0xF850,
655
OP_STRB_imm_T2 = 0xF880,
656
OP_LDRB_imm_T2 = 0xF890,
657
OP_STRH_imm_T2 = 0xF8A0,
658
OP_LDRH_imm_T2 = 0xF8B0,
659
OP_STR_imm_T3 = 0xF8C0,
660
OP_LDR_imm_T3 = 0xF8D0,
661
OP_LDRSB_reg_T2 = 0xF910,
662
OP_LDRSH_reg_T2 = 0xF930,
663
OP_LSL_reg_T2 = 0xFA00,
664
OP_LSR_reg_T2 = 0xFA20,
665
OP_ASR_reg_T2 = 0xFA40,
666
OP_ROR_reg_T2 = 0xFA60,
668
OP_SMULL_T1 = 0xFB80,
669
#if CPU(APPLE_ARMV7S)
676
OP_VADD_T2b = 0x0A00,
680
OP_VMOV_IMM_T2b = 0x0A00,
681
OP_VMOV_T2b = 0x0A40,
682
OP_VMUL_T2b = 0x0A00,
685
OP_VMOV_StoCb = 0x0A10,
686
OP_VMOV_CtoSb = 0x0A10,
687
OP_VMOV_DtoCb = 0x0A10,
688
OP_VMOV_CtoDb = 0x0A10,
690
OP_VABS_T2b = 0x0A40,
692
OP_VCVT_FPIVFPb = 0x0A40,
693
OP_VNEG_T2b = 0x0A40,
694
OP_VSUB_T2b = 0x0A40,
695
OP_VSQRT_T1b = 0x0A40,
696
OP_VCVTSD_T1b = 0x0A40,
697
OP_VCVTDS_T1b = 0x0A40,
704
FourFours(unsigned f3, unsigned f2, unsigned f1, unsigned f0)
723
class ARMInstructionFormatter;
726
bool ifThenElseConditionBit(Condition condition, bool isIf)
728
return isIf ? (condition & 1) : !(condition & 1);
730
uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
732
int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
733
| (ifThenElseConditionBit(condition, inst3if) << 2)
734
| (ifThenElseConditionBit(condition, inst4if) << 1)
736
ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
737
return (condition << 4) | mask;
739
uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
741
int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
742
| (ifThenElseConditionBit(condition, inst3if) << 2)
744
ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
745
return (condition << 4) | mask;
747
uint8_t ifThenElse(Condition condition, bool inst2if)
749
int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
751
ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
752
return (condition << 4) | mask;
755
uint8_t ifThenElse(Condition condition)
758
return (condition << 4) | mask;
763
void adc(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
765
// Rd can only be SP if Rn is also SP.
766
ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
767
ASSERT(rd != ARMRegisters::pc);
768
ASSERT(rn != ARMRegisters::pc);
769
ASSERT(imm.isEncodedImm());
771
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADC_imm, rn, rd, imm);
774
void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
776
// Rd can only be SP if Rn is also SP.
777
ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
778
ASSERT(rd != ARMRegisters::pc);
779
ASSERT(rn != ARMRegisters::pc);
780
ASSERT(imm.isValid());
782
if (rn == ARMRegisters::sp) {
783
ASSERT(!(imm.getUInt16() & 3));
784
if (!(rd & 8) && imm.isUInt10()) {
785
m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, static_cast<uint8_t>(imm.getUInt10() >> 2));
787
} else if ((rd == ARMRegisters::sp) && imm.isUInt9()) {
788
m_formatter.oneWordOp9Imm7(OP_ADD_SP_imm_T2, static_cast<uint8_t>(imm.getUInt9() >> 2));
791
} else if (!((rd | rn) & 8)) {
793
m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
795
} else if ((rd == rn) && imm.isUInt8()) {
796
m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
801
if (imm.isEncodedImm())
802
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3, rn, rd, imm);
804
ASSERT(imm.isUInt12());
805
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4, rn, rd, imm);
809
ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
811
ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
812
ASSERT(rd != ARMRegisters::pc);
813
ASSERT(rn != ARMRegisters::pc);
815
m_formatter.twoWordOp12Reg4FourFours(OP_ADD_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
818
// NOTE: In an IT block, add doesn't modify the flags register.
819
ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm)
822
m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd);
824
m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rn, rd);
825
else if (!((rd | rn | rm) & 8))
826
m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
828
add(rd, rn, rm, ShiftTypeAndAmount());
831
// Not allowed in an IT (if then) block.
832
ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
834
// Rd can only be SP if Rn is also SP.
835
ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
836
ASSERT(rd != ARMRegisters::pc);
837
ASSERT(rn != ARMRegisters::pc);
838
ASSERT(imm.isEncodedImm());
840
if (!((rd | rn) & 8)) {
842
m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
844
} else if ((rd == rn) && imm.isUInt8()) {
845
m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
850
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3, rn, rd, imm);
853
// Not allowed in an IT (if then) block?
854
ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
856
ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
857
ASSERT(rd != ARMRegisters::pc);
858
ASSERT(rn != ARMRegisters::pc);
860
m_formatter.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
863
// Not allowed in an IT (if then) block.
864
ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm)
866
if (!((rd | rn | rm) & 8))
867
m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
869
add_S(rd, rn, rm, ShiftTypeAndAmount());
872
ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
876
ASSERT(imm.isEncodedImm());
877
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1, rn, rd, imm);
880
ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
885
m_formatter.twoWordOp12Reg4FourFours(OP_AND_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
888
ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm)
890
if ((rd == rn) && !((rd | rm) & 8))
891
m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rm, rd);
892
else if ((rd == rm) && !((rd | rn) & 8))
893
m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rn, rd);
895
ARM_and(rd, rn, rm, ShiftTypeAndAmount());
898
ALWAYS_INLINE void asr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
902
ShiftTypeAndAmount shift(SRType_ASR, shiftAmount);
903
m_formatter.twoWordOp16FourFours(OP_ASR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
906
ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, RegisterID rm)
911
m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
914
// Only allowed in IT (if then) block if last instruction.
915
ALWAYS_INLINE AssemblerLabel b()
917
m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b);
918
return m_formatter.label();
921
// Only allowed in IT (if then) block if last instruction.
922
ALWAYS_INLINE AssemblerLabel blx(RegisterID rm)
924
ASSERT(rm != ARMRegisters::pc);
925
m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8);
926
return m_formatter.label();
929
// Only allowed in IT (if then) block if last instruction.
930
ALWAYS_INLINE AssemblerLabel bx(RegisterID rm)
932
m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
933
return m_formatter.label();
936
void bkpt(uint8_t imm = 0)
938
m_formatter.oneWordOp8Imm8(OP_BKPT, imm);
941
ALWAYS_INLINE void clz(RegisterID rd, RegisterID rm)
945
m_formatter.twoWordOp12Reg4FourFours(OP_CLZ, rm, FourFours(0xf, rd, 8, rm));
948
ALWAYS_INLINE void cmn(RegisterID rn, ARMThumbImmediate imm)
950
ASSERT(rn != ARMRegisters::pc);
951
ASSERT(imm.isEncodedImm());
953
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm, rn, (RegisterID)0xf, imm);
956
ALWAYS_INLINE void cmp(RegisterID rn, ARMThumbImmediate imm)
958
ASSERT(rn != ARMRegisters::pc);
959
ASSERT(imm.isEncodedImm());
961
if (!(rn & 8) && imm.isUInt8())
962
m_formatter.oneWordOp5Reg3Imm8(OP_CMP_imm_T1, rn, imm.getUInt8());
964
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2, rn, (RegisterID)0xf, imm);
967
ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
969
ASSERT(rn != ARMRegisters::pc);
971
m_formatter.twoWordOp12Reg4FourFours(OP_CMP_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
974
ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm)
977
cmp(rn, rm, ShiftTypeAndAmount());
979
m_formatter.oneWordOp10Reg3Reg3(OP_CMP_reg_T1, rm, rn);
982
// xor is not spelled with an 'e'. :-(
983
ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
987
ASSERT(imm.isEncodedImm());
988
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1, rn, rd, imm);
991
// xor is not spelled with an 'e'. :-(
992
ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
997
m_formatter.twoWordOp12Reg4FourFours(OP_EOR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1000
// xor is not spelled with an 'e'. :-(
1001
void eor(RegisterID rd, RegisterID rn, RegisterID rm)
1003
if ((rd == rn) && !((rd | rm) & 8))
1004
m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rm, rd);
1005
else if ((rd == rm) && !((rd | rn) & 8))
1006
m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rn, rd);
1008
eor(rd, rn, rm, ShiftTypeAndAmount());
1011
ALWAYS_INLINE void it(Condition cond)
1013
m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond));
1016
ALWAYS_INLINE void it(Condition cond, bool inst2if)
1018
m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if));
1021
ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if)
1023
m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if));
1026
ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if, bool inst4if)
1028
m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if, inst4if));
1031
// rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1032
ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1034
ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1035
ASSERT(imm.isUInt12());
1037
if (!((rt | rn) & 8) && imm.isUInt7())
1038
m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1039
else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
1040
m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
1042
m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12());
1045
ALWAYS_INLINE void ldrWide8BitImmediate(RegisterID rt, RegisterID rn, uint8_t immediate)
1047
ASSERT(rn != ARMRegisters::pc);
1048
m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, immediate);
1051
ALWAYS_INLINE void ldrCompact(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1053
ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1054
ASSERT(imm.isUInt7());
1055
ASSERT(!((rt | rn) & 8));
1056
m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1059
// If index is set, this is a regular offset or a pre-indexed load;
1060
// if index is not set then is is a post-index load.
1062
// If wback is set rn is updated - this is a pre or post index load,
1063
// if wback is not set this is a regular offset memory access.
1065
// (-255 <= offset <= 255)
1067
// _tmp = _reg + offset
1068
// MEM[index ? _tmp : _reg] = REG[rt]
1069
// if (wback) REG[rn] = _tmp
1070
ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1072
ASSERT(rt != ARMRegisters::pc);
1073
ASSERT(rn != ARMRegisters::pc);
1074
ASSERT(index || wback);
1075
ASSERT(!wback | (rt != rn));
1082
ASSERT((offset & ~0xff) == 0);
1084
offset |= (wback << 8);
1085
offset |= (add << 9);
1086
offset |= (index << 10);
1087
offset |= (1 << 11);
1089
m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4, rn, rt, offset);
1092
// rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1093
ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1095
ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1096
ASSERT(!BadReg(rm));
1099
if (!shift && !((rt | rn | rm) & 8))
1100
m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1, rm, rn, rt);
1102
m_formatter.twoWordOp12Reg4FourFours(OP_LDR_reg_T2, rn, FourFours(rt, 0, shift, rm));
1105
// rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1106
ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1108
ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1109
ASSERT(imm.isUInt12());
1111
if (!((rt | rn) & 8) && imm.isUInt6())
1112
m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 2, rn, rt);
1114
m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12());
1117
// If index is set, this is a regular offset or a pre-indexed load;
1118
// if index is not set then is is a post-index load.
1120
// If wback is set rn is updated - this is a pre or post index load,
1121
// if wback is not set this is a regular offset memory access.
1123
// (-255 <= offset <= 255)
1125
// _tmp = _reg + offset
1126
// MEM[index ? _tmp : _reg] = REG[rt]
1127
// if (wback) REG[rn] = _tmp
1128
ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1130
ASSERT(rt != ARMRegisters::pc);
1131
ASSERT(rn != ARMRegisters::pc);
1132
ASSERT(index || wback);
1133
ASSERT(!wback | (rt != rn));
1140
ASSERT((offset & ~0xff) == 0);
1142
offset |= (wback << 8);
1143
offset |= (add << 9);
1144
offset |= (index << 10);
1145
offset |= (1 << 11);
1147
m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3, rn, rt, offset);
1150
ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1152
ASSERT(!BadReg(rt)); // Memory hint
1153
ASSERT(rn != ARMRegisters::pc); // LDRH (literal)
1154
ASSERT(!BadReg(rm));
1157
if (!shift && !((rt | rn | rm) & 8))
1158
m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1, rm, rn, rt);
1160
m_formatter.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1163
void ldrb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1165
ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1166
ASSERT(imm.isUInt12());
1168
if (!((rt | rn) & 8) && imm.isUInt5())
1169
m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRB_imm_T1, imm.getUInt5(), rn, rt);
1171
m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T2, rn, rt, imm.getUInt12());
1174
void ldrb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1176
ASSERT(rt != ARMRegisters::pc);
1177
ASSERT(rn != ARMRegisters::pc);
1178
ASSERT(index || wback);
1179
ASSERT(!wback | (rt != rn));
1187
ASSERT(!(offset & ~0xff));
1189
offset |= (wback << 8);
1190
offset |= (add << 9);
1191
offset |= (index << 10);
1192
offset |= (1 << 11);
1194
m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3, rn, rt, offset);
1197
ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1199
ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1200
ASSERT(!BadReg(rm));
1203
if (!shift && !((rt | rn | rm) & 8))
1204
m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRB_reg_T1, rm, rn, rt);
1206
m_formatter.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1209
void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1211
ASSERT(rn != ARMRegisters::pc);
1212
ASSERT(!BadReg(rm));
1215
if (!shift && !((rt | rn | rm) & 8))
1216
m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSB_reg_T1, rm, rn, rt);
1218
m_formatter.twoWordOp12Reg4FourFours(OP_LDRSB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1221
void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1223
ASSERT(rn != ARMRegisters::pc);
1224
ASSERT(!BadReg(rm));
1227
if (!shift && !((rt | rn | rm) & 8))
1228
m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSH_reg_T1, rm, rn, rt);
1230
m_formatter.twoWordOp12Reg4FourFours(OP_LDRSH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1233
void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1235
ASSERT(!BadReg(rd));
1236
ASSERT(!BadReg(rm));
1237
ShiftTypeAndAmount shift(SRType_LSL, shiftAmount);
1238
m_formatter.twoWordOp16FourFours(OP_LSL_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1241
ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
1243
ASSERT(!BadReg(rd));
1244
ASSERT(!BadReg(rn));
1245
ASSERT(!BadReg(rm));
1246
m_formatter.twoWordOp12Reg4FourFours(OP_LSL_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1249
ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1251
ASSERT(!BadReg(rd));
1252
ASSERT(!BadReg(rm));
1253
ShiftTypeAndAmount shift(SRType_LSR, shiftAmount);
1254
m_formatter.twoWordOp16FourFours(OP_LSR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1257
ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
1259
ASSERT(!BadReg(rd));
1260
ASSERT(!BadReg(rn));
1261
ASSERT(!BadReg(rm));
1262
m_formatter.twoWordOp12Reg4FourFours(OP_LSR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1265
ALWAYS_INLINE void movT3(RegisterID rd, ARMThumbImmediate imm)
1267
ASSERT(imm.isValid());
1268
ASSERT(!imm.isEncodedImm());
1269
ASSERT(!BadReg(rd));
1271
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3, imm.m_value.imm4, rd, imm);
1274
#if OS(LINUX) || OS(QNX)
1275
static void revertJumpTo_movT3movtcmpT2(void* instructionStart, RegisterID left, RegisterID right, uintptr_t imm)
1277
uint16_t* address = static_cast<uint16_t*>(instructionStart);
1278
ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm));
1279
ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm >> 16));
1280
address[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
1281
address[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right, lo16);
1282
address[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
1283
address[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right, hi16);
1284
address[4] = OP_CMP_reg_T2 | left;
1285
cacheFlush(address, sizeof(uint16_t) * 5);
1288
static void revertJumpTo_movT3(void* instructionStart, RegisterID rd, ARMThumbImmediate imm)
1290
ASSERT(imm.isValid());
1291
ASSERT(!imm.isEncodedImm());
1292
ASSERT(!BadReg(rd));
1294
uint16_t* address = static_cast<uint16_t*>(instructionStart);
1295
address[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, imm);
1296
address[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, imm);
1297
cacheFlush(address, sizeof(uint16_t) * 2);
1301
ALWAYS_INLINE void mov(RegisterID rd, ARMThumbImmediate imm)
1303
ASSERT(imm.isValid());
1304
ASSERT(!BadReg(rd));
1306
if ((rd < 8) && imm.isUInt8())
1307
m_formatter.oneWordOp5Reg3Imm8(OP_MOV_imm_T1, rd, imm.getUInt8());
1308
else if (imm.isEncodedImm())
1309
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2, 0xf, rd, imm);
1314
ALWAYS_INLINE void mov(RegisterID rd, RegisterID rm)
1316
m_formatter.oneWordOp8RegReg143(OP_MOV_reg_T1, rm, rd);
1319
ALWAYS_INLINE void movt(RegisterID rd, ARMThumbImmediate imm)
1321
ASSERT(imm.isUInt16());
1322
ASSERT(!BadReg(rd));
1323
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT, imm.m_value.imm4, rd, imm);
1326
ALWAYS_INLINE void mvn(RegisterID rd, ARMThumbImmediate imm)
1328
ASSERT(imm.isEncodedImm());
1329
ASSERT(!BadReg(rd));
1331
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm, 0xf, rd, imm);
1334
ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm, ShiftTypeAndAmount shift)
1336
ASSERT(!BadReg(rd));
1337
ASSERT(!BadReg(rm));
1338
m_formatter.twoWordOp16FourFours(OP_MVN_reg_T2, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1341
ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm)
1343
if (!((rd | rm) & 8))
1344
m_formatter.oneWordOp10Reg3Reg3(OP_MVN_reg_T1, rm, rd);
1346
mvn(rd, rm, ShiftTypeAndAmount());
1349
ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm)
1351
ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
1355
ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1357
ASSERT(!BadReg(rd));
1358
ASSERT(!BadReg(rn));
1359
ASSERT(imm.isEncodedImm());
1360
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1, rn, rd, imm);
1363
ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1365
ASSERT(!BadReg(rd));
1366
ASSERT(!BadReg(rn));
1367
ASSERT(!BadReg(rm));
1368
m_formatter.twoWordOp12Reg4FourFours(OP_ORR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1371
void orr(RegisterID rd, RegisterID rn, RegisterID rm)
1373
if ((rd == rn) && !((rd | rm) & 8))
1374
m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
1375
else if ((rd == rm) && !((rd | rn) & 8))
1376
m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
1378
orr(rd, rn, rm, ShiftTypeAndAmount());
1381
ALWAYS_INLINE void orr_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1383
ASSERT(!BadReg(rd));
1384
ASSERT(!BadReg(rn));
1385
ASSERT(!BadReg(rm));
1386
m_formatter.twoWordOp12Reg4FourFours(OP_ORR_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1389
void orr_S(RegisterID rd, RegisterID rn, RegisterID rm)
1391
if ((rd == rn) && !((rd | rm) & 8))
1392
m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
1393
else if ((rd == rm) && !((rd | rn) & 8))
1394
m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
1396
orr_S(rd, rn, rm, ShiftTypeAndAmount());
1399
ALWAYS_INLINE void ror(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1401
ASSERT(!BadReg(rd));
1402
ASSERT(!BadReg(rm));
1403
ShiftTypeAndAmount shift(SRType_ROR, shiftAmount);
1404
m_formatter.twoWordOp16FourFours(OP_ROR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1407
ALWAYS_INLINE void ror(RegisterID rd, RegisterID rn, RegisterID rm)
1409
ASSERT(!BadReg(rd));
1410
ASSERT(!BadReg(rn));
1411
ASSERT(!BadReg(rm));
1412
m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1415
#if CPU(APPLE_ARMV7S)
1416
ALWAYS_INLINE void sdiv(RegisterID rd, RegisterID rn, RegisterID rm)
1418
ASSERT(!BadReg(rd));
1419
ASSERT(!BadReg(rn));
1420
ASSERT(!BadReg(rm));
1421
m_formatter.twoWordOp12Reg4FourFours(OP_SDIV_T1, rn, FourFours(0xf, rd, 0xf, rm));
1425
ALWAYS_INLINE void smull(RegisterID rdLo, RegisterID rdHi, RegisterID rn, RegisterID rm)
1427
ASSERT(!BadReg(rdLo));
1428
ASSERT(!BadReg(rdHi));
1429
ASSERT(!BadReg(rn));
1430
ASSERT(!BadReg(rm));
1431
ASSERT(rdLo != rdHi);
1432
m_formatter.twoWordOp12Reg4FourFours(OP_SMULL_T1, rn, FourFours(rdLo, rdHi, 0, rm));
1435
// rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1436
ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1438
ASSERT(rt != ARMRegisters::pc);
1439
ASSERT(rn != ARMRegisters::pc);
1440
ASSERT(imm.isUInt12());
1442
if (!((rt | rn) & 8) && imm.isUInt7())
1443
m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1444
else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
1445
m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
1447
m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3, rn, rt, imm.getUInt12());
1450
// If index is set, this is a regular offset or a pre-indexed store;
1451
// if index is not set then is is a post-index store.
1453
// If wback is set rn is updated - this is a pre or post index store,
1454
// if wback is not set this is a regular offset memory access.
1456
// (-255 <= offset <= 255)
1458
// _tmp = _reg + offset
1459
// MEM[index ? _tmp : _reg] = REG[rt]
1460
// if (wback) REG[rn] = _tmp
1461
ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1463
ASSERT(rt != ARMRegisters::pc);
1464
ASSERT(rn != ARMRegisters::pc);
1465
ASSERT(index || wback);
1466
ASSERT(!wback | (rt != rn));
1473
ASSERT((offset & ~0xff) == 0);
1475
offset |= (wback << 8);
1476
offset |= (add << 9);
1477
offset |= (index << 10);
1478
offset |= (1 << 11);
1480
m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4, rn, rt, offset);
1483
// rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1484
ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1486
ASSERT(rn != ARMRegisters::pc);
1487
ASSERT(!BadReg(rm));
1490
if (!shift && !((rt | rn | rm) & 8))
1491
m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1, rm, rn, rt);
1493
m_formatter.twoWordOp12Reg4FourFours(OP_STR_reg_T2, rn, FourFours(rt, 0, shift, rm));
1496
// rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1497
ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1499
ASSERT(rt != ARMRegisters::pc);
1500
ASSERT(rn != ARMRegisters::pc);
1501
ASSERT(imm.isUInt12());
1503
if (!((rt | rn) & 8) && imm.isUInt7())
1504
m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRB_imm_T1, imm.getUInt7() >> 2, rn, rt);
1506
m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T2, rn, rt, imm.getUInt12());
1509
// If index is set, this is a regular offset or a pre-indexed store;
1510
// if index is not set then is is a post-index store.
1512
// If wback is set rn is updated - this is a pre or post index store,
1513
// if wback is not set this is a regular offset memory access.
1515
// (-255 <= offset <= 255)
1517
// _tmp = _reg + offset
1518
// MEM[index ? _tmp : _reg] = REG[rt]
1519
// if (wback) REG[rn] = _tmp
1520
ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1522
ASSERT(rt != ARMRegisters::pc);
1523
ASSERT(rn != ARMRegisters::pc);
1524
ASSERT(index || wback);
1525
ASSERT(!wback | (rt != rn));
1532
ASSERT((offset & ~0xff) == 0);
1534
offset |= (wback << 8);
1535
offset |= (add << 9);
1536
offset |= (index << 10);
1537
offset |= (1 << 11);
1539
m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T3, rn, rt, offset);
1542
// rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1543
ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1545
ASSERT(rn != ARMRegisters::pc);
1546
ASSERT(!BadReg(rm));
1549
if (!shift && !((rt | rn | rm) & 8))
1550
m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRB_reg_T1, rm, rn, rt);
1552
m_formatter.twoWordOp12Reg4FourFours(OP_STRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1555
// rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1556
ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1558
ASSERT(rt != ARMRegisters::pc);
1559
ASSERT(rn != ARMRegisters::pc);
1560
ASSERT(imm.isUInt12());
1562
if (!((rt | rn) & 8) && imm.isUInt7())
1563
m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1, imm.getUInt7() >> 2, rn, rt);
1565
m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T2, rn, rt, imm.getUInt12());
1568
// If index is set, this is a regular offset or a pre-indexed store;
1569
// if index is not set then is is a post-index store.
1571
// If wback is set rn is updated - this is a pre or post index store,
1572
// if wback is not set this is a regular offset memory access.
1574
// (-255 <= offset <= 255)
1576
// _tmp = _reg + offset
1577
// MEM[index ? _tmp : _reg] = REG[rt]
1578
// if (wback) REG[rn] = _tmp
1579
ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1581
ASSERT(rt != ARMRegisters::pc);
1582
ASSERT(rn != ARMRegisters::pc);
1583
ASSERT(index || wback);
1584
ASSERT(!wback | (rt != rn));
1591
ASSERT(!(offset & ~0xff));
1593
offset |= (wback << 8);
1594
offset |= (add << 9);
1595
offset |= (index << 10);
1596
offset |= (1 << 11);
1598
m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T3, rn, rt, offset);
1601
// rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1602
ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1604
ASSERT(rn != ARMRegisters::pc);
1605
ASSERT(!BadReg(rm));
1608
if (!shift && !((rt | rn | rm) & 8))
1609
m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRH_reg_T1, rm, rn, rt);
1611
m_formatter.twoWordOp12Reg4FourFours(OP_STRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1614
ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1616
// Rd can only be SP if Rn is also SP.
1617
ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1618
ASSERT(rd != ARMRegisters::pc);
1619
ASSERT(rn != ARMRegisters::pc);
1620
ASSERT(imm.isValid());
1622
if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
1623
ASSERT(!(imm.getUInt16() & 3));
1624
m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
1626
} else if (!((rd | rn) & 8)) {
1627
if (imm.isUInt3()) {
1628
m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
1630
} else if ((rd == rn) && imm.isUInt8()) {
1631
m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
1636
if (imm.isEncodedImm())
1637
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3, rn, rd, imm);
1639
ASSERT(imm.isUInt12());
1640
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4, rn, rd, imm);
1644
ALWAYS_INLINE void sub(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
1646
ASSERT(rd != ARMRegisters::pc);
1647
ASSERT(rn != ARMRegisters::pc);
1648
ASSERT(imm.isValid());
1649
ASSERT(imm.isUInt12());
1651
if (!((rd | rn) & 8) && !imm.getUInt12())
1652
m_formatter.oneWordOp10Reg3Reg3(OP_RSB_imm_T1, rn, rd);
1654
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_imm_T2, rn, rd, imm);
1657
ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1659
ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1660
ASSERT(rd != ARMRegisters::pc);
1661
ASSERT(rn != ARMRegisters::pc);
1662
ASSERT(!BadReg(rm));
1663
m_formatter.twoWordOp12Reg4FourFours(OP_SUB_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1666
// NOTE: In an IT block, add doesn't modify the flags register.
1667
ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm)
1669
if (!((rd | rn | rm) & 8))
1670
m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
1672
sub(rd, rn, rm, ShiftTypeAndAmount());
1675
// Not allowed in an IT (if then) block.
1676
void sub_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1678
// Rd can only be SP if Rn is also SP.
1679
ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1680
ASSERT(rd != ARMRegisters::pc);
1681
ASSERT(rn != ARMRegisters::pc);
1682
ASSERT(imm.isValid());
1684
if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
1685
ASSERT(!(imm.getUInt16() & 3));
1686
m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
1688
} else if (!((rd | rn) & 8)) {
1689
if (imm.isUInt3()) {
1690
m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
1692
} else if ((rd == rn) && imm.isUInt8()) {
1693
m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
1698
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3, rn, rd, imm);
1701
ALWAYS_INLINE void sub_S(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
1703
ASSERT(rd != ARMRegisters::pc);
1704
ASSERT(rn != ARMRegisters::pc);
1705
ASSERT(imm.isValid());
1706
ASSERT(imm.isUInt12());
1708
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_S_imm_T2, rn, rd, imm);
1711
// Not allowed in an IT (if then) block?
1712
ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1714
ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1715
ASSERT(rd != ARMRegisters::pc);
1716
ASSERT(rn != ARMRegisters::pc);
1717
ASSERT(!BadReg(rm));
1718
m_formatter.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1721
// Not allowed in an IT (if then) block.
1722
ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm)
1724
if (!((rd | rn | rm) & 8))
1725
m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
1727
sub_S(rd, rn, rm, ShiftTypeAndAmount());
1730
ALWAYS_INLINE void tst(RegisterID rn, ARMThumbImmediate imm)
1732
ASSERT(!BadReg(rn));
1733
ASSERT(imm.isEncodedImm());
1735
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm, rn, (RegisterID)0xf, imm);
1738
ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1740
ASSERT(!BadReg(rn));
1741
ASSERT(!BadReg(rm));
1742
m_formatter.twoWordOp12Reg4FourFours(OP_TST_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
1745
ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm)
1748
tst(rn, rm, ShiftTypeAndAmount());
1750
m_formatter.oneWordOp10Reg3Reg3(OP_TST_reg_T1, rm, rn);
1753
ALWAYS_INLINE void ubfx(RegisterID rd, RegisterID rn, unsigned lsb, unsigned width)
1756
ASSERT((width >= 1) && (width <= 32));
1757
ASSERT((lsb + width) <= 32);
1758
m_formatter.twoWordOp12Reg40Imm3Reg4Imm20Imm5(OP_UBFX_T1, rd, rn, (lsb & 0x1c) << 10, (lsb & 0x3) << 6, (width - 1) & 0x1f);
1761
#if CPU(APPLE_ARMV7S)
1762
ALWAYS_INLINE void udiv(RegisterID rd, RegisterID rn, RegisterID rm)
1764
ASSERT(!BadReg(rd));
1765
ASSERT(!BadReg(rn));
1766
ASSERT(!BadReg(rm));
1767
m_formatter.twoWordOp12Reg4FourFours(OP_UDIV_T1, rn, FourFours(0xf, rd, 0xf, rm));
1771
void vadd(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1773
m_formatter.vfpOp(OP_VADD_T2, OP_VADD_T2b, true, rn, rd, rm);
1776
void vcmp(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1778
m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(4), rd, rm);
1781
void vcmpz(FPDoubleRegisterID rd)
1783
m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(5), rd, VFPOperand(0));
1786
void vcvt_signedToFloatingPoint(FPDoubleRegisterID rd, FPSingleRegisterID rm)
1788
// boolean values are 64bit (toInt, unsigned, roundZero)
1789
m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(false, false, false), rd, rm);
1792
void vcvt_floatingPointToSigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1794
// boolean values are 64bit (toInt, unsigned, roundZero)
1795
m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, false, true), rd, rm);
1798
void vcvt_unsignedToFloatingPoint(FPDoubleRegisterID rd, FPSingleRegisterID rm)
1800
// boolean values are 64bit (toInt, unsigned, roundZero)
1801
m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(false, true, false), rd, rm);
1804
void vcvt_floatingPointToUnsigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1806
// boolean values are 64bit (toInt, unsigned, roundZero)
1807
m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, true, true), rd, rm);
1810
void vdiv(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1812
m_formatter.vfpOp(OP_VDIV, OP_VDIVb, true, rn, rd, rm);
1815
void vldr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
1817
m_formatter.vfpMemOp(OP_VLDR, OP_VLDRb, true, rn, rd, imm);
1820
void flds(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
1822
m_formatter.vfpMemOp(OP_FLDS, OP_FLDSb, false, rn, rd, imm);
1825
void vmov(RegisterID rd, FPSingleRegisterID rn)
1827
ASSERT(!BadReg(rd));
1828
m_formatter.vfpOp(OP_VMOV_StoC, OP_VMOV_StoCb, false, rn, rd, VFPOperand(0));
1831
void vmov(FPSingleRegisterID rd, RegisterID rn)
1833
ASSERT(!BadReg(rn));
1834
m_formatter.vfpOp(OP_VMOV_CtoS, OP_VMOV_CtoSb, false, rd, rn, VFPOperand(0));
1837
void vmov(RegisterID rd1, RegisterID rd2, FPDoubleRegisterID rn)
1839
ASSERT(!BadReg(rd1));
1840
ASSERT(!BadReg(rd2));
1841
m_formatter.vfpOp(OP_VMOV_DtoC, OP_VMOV_DtoCb, true, rd2, VFPOperand(rd1 | 16), rn);
1844
void vmov(FPDoubleRegisterID rd, RegisterID rn1, RegisterID rn2)
1846
ASSERT(!BadReg(rn1));
1847
ASSERT(!BadReg(rn2));
1848
m_formatter.vfpOp(OP_VMOV_CtoD, OP_VMOV_CtoDb, true, rn2, VFPOperand(rn1 | 16), rd);
1851
void vmov(FPDoubleRegisterID rd, FPDoubleRegisterID rn)
1853
m_formatter.vfpOp(OP_VMOV_T2, OP_VMOV_T2b, true, VFPOperand(0), rd, rn);
1856
void vmrs(RegisterID reg = ARMRegisters::pc)
1858
ASSERT(reg != ARMRegisters::sp);
1859
m_formatter.vfpOp(OP_VMRS, OP_VMRSb, false, VFPOperand(1), VFPOperand(0x10 | reg), VFPOperand(0));
1862
void vmul(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1864
m_formatter.vfpOp(OP_VMUL_T2, OP_VMUL_T2b, true, rn, rd, rm);
1867
void vstr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
1869
m_formatter.vfpMemOp(OP_VSTR, OP_VSTRb, true, rn, rd, imm);
1872
void fsts(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
1874
m_formatter.vfpMemOp(OP_FSTS, OP_FSTSb, false, rn, rd, imm);
1877
void vsub(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1879
m_formatter.vfpOp(OP_VSUB_T2, OP_VSUB_T2b, true, rn, rd, rm);
1882
void vabs(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1884
m_formatter.vfpOp(OP_VABS_T2, OP_VABS_T2b, true, VFPOperand(16), rd, rm);
1887
void vneg(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1889
m_formatter.vfpOp(OP_VNEG_T2, OP_VNEG_T2b, true, VFPOperand(1), rd, rm);
1892
void vsqrt(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1894
m_formatter.vfpOp(OP_VSQRT_T1, OP_VSQRT_T1b, true, VFPOperand(17), rd, rm);
1897
void vcvtds(FPDoubleRegisterID rd, FPSingleRegisterID rm)
1899
m_formatter.vfpOp(OP_VCVTDS_T1, OP_VCVTDS_T1b, false, VFPOperand(23), rd, rm);
1902
void vcvtsd(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1904
m_formatter.vfpOp(OP_VCVTSD_T1, OP_VCVTSD_T1b, true, VFPOperand(23), rd, rm);
1909
m_formatter.oneWordOp8Imm8(OP_NOP_T1, 0);
1914
m_formatter.twoWordOp16Op16(OP_NOP_T2a, OP_NOP_T2b);
1917
AssemblerLabel labelIgnoringWatchpoints()
1919
return m_formatter.label();
1922
AssemblerLabel labelForWatchpoint()
1924
AssemblerLabel result = m_formatter.label();
1925
if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
1927
m_indexOfLastWatchpoint = result.m_offset;
1928
m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
1932
AssemblerLabel label()
1934
AssemblerLabel result = m_formatter.label();
1935
while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
1936
if (UNLIKELY(static_cast<int>(result.m_offset) + 4 <= m_indexOfTailOfLastWatchpoint))
1940
result = m_formatter.label();
1945
AssemblerLabel align(int alignment)
1947
while (!m_formatter.isAligned(alignment))
1953
static void* getRelocatedAddress(void* code, AssemblerLabel label)
1955
ASSERT(label.isSet());
1956
return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
1959
static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
1961
return b.m_offset - a.m_offset;
1964
int executableOffsetFor(int location)
1968
return static_cast<int32_t*>(m_formatter.data())[location / sizeof(int32_t) - 1];
1971
int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
1973
// Assembler admin methods:
1975
static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
1977
return a.from() < b.from();
1980
bool canCompact(JumpType jumpType)
1982
// The following cannot be compacted:
1983
// JumpFixed: represents custom jump sequence
1984
// JumpNoConditionFixedSize: represents unconditional jump that must remain a fixed size
1985
// JumpConditionFixedSize: represents conditional jump that must remain a fixed size
1986
return (jumpType == JumpNoCondition) || (jumpType == JumpCondition);
1989
JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
1991
if (jumpType == JumpFixed)
1994
// for patchable jump we must leave space for the longest code sequence
1995
if (jumpType == JumpNoConditionFixedSize)
1997
if (jumpType == JumpConditionFixedSize)
1998
return LinkConditionalBX;
2000
const int paddingSize = JUMP_ENUM_SIZE(jumpType);
2002
if (jumpType == JumpCondition) {
2003
// 2-byte conditional T1
2004
const uint16_t* jumpT1Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT1)));
2005
if (canBeJumpT1(jumpT1Location, to))
2007
// 4-byte conditional T3
2008
const uint16_t* jumpT3Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT3)));
2009
if (canBeJumpT3(jumpT3Location, to))
2011
// 4-byte conditional T4 with IT
2012
const uint16_t* conditionalJumpT4Location =
2013
reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkConditionalJumpT4)));
2014
if (canBeJumpT4(conditionalJumpT4Location, to))
2015
return LinkConditionalJumpT4;
2017
// 2-byte unconditional T2
2018
const uint16_t* jumpT2Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT2)));
2019
if (canBeJumpT2(jumpT2Location, to))
2021
// 4-byte unconditional T4
2022
const uint16_t* jumpT4Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT4)));
2023
if (canBeJumpT4(jumpT4Location, to))
2025
// use long jump sequence
2029
ASSERT(jumpType == JumpCondition);
2030
return LinkConditionalBX;
2033
JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
2035
JumpLinkType linkType = computeJumpType(record.type(), from, to);
2036
record.setLinkType(linkType);
2040
void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset)
2042
int32_t ptr = regionStart / sizeof(int32_t);
2043
const int32_t end = regionEnd / sizeof(int32_t);
2044
int32_t* offsets = static_cast<int32_t*>(m_formatter.data());
2046
offsets[ptr++] = offset;
2049
Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink()
2051
std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
2052
return m_jumpsToLink;
2055
void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to)
2057
switch (record.linkType()) {
2059
linkJumpT1(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
2062
linkJumpT2(reinterpret_cast_ptr<uint16_t*>(from), to);
2065
linkJumpT3(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
2068
linkJumpT4(reinterpret_cast_ptr<uint16_t*>(from), to);
2070
case LinkConditionalJumpT4:
2071
linkConditionalJumpT4(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
2073
case LinkConditionalBX:
2074
linkConditionalBX(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
2077
linkBX(reinterpret_cast_ptr<uint16_t*>(from), to);
2080
RELEASE_ASSERT_NOT_REACHED();
2085
void* unlinkedCode() { return m_formatter.data(); }
2086
size_t codeSize() const { return m_formatter.codeSize(); }
2088
static unsigned getCallReturnOffset(AssemblerLabel call)
2090
ASSERT(call.isSet());
2091
return call.m_offset;
2094
// Linking & patching:
2096
// 'link' and 'patch' methods are for use on unprotected code - such as the code
2097
// within the AssemblerBuffer, and code being patched by the patch buffer. Once
2098
// code has been finalized it is (platform support permitting) within a non-
2099
// writable region of memory; to modify the code in an execute-only execuable
2100
// pool the 'repatch' and 'relink' methods should be used.
2102
void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition)
2105
ASSERT(from.isSet());
2106
m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition));
2109
static void linkJump(void* code, AssemblerLabel from, void* to)
2111
ASSERT(from.isSet());
2113
uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
2114
linkJumpAbsolute(location, to);
2117
static void linkCall(void* code, AssemblerLabel from, void* to)
2119
ASSERT(!(reinterpret_cast<intptr_t>(code) & 1));
2120
ASSERT(from.isSet());
2121
ASSERT_VALID_CODE_POINTER(to);
2123
setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to, false);
2126
static void linkPointer(void* code, AssemblerLabel where, void* value)
2128
setPointer(reinterpret_cast<char*>(code) + where.m_offset, value, false);
2131
static void relinkJump(void* from, void* to)
2133
ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
2134
ASSERT(!(reinterpret_cast<intptr_t>(to) & 1));
2136
linkJumpAbsolute(reinterpret_cast<uint16_t*>(from), to);
2138
cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 5 * sizeof(uint16_t));
2141
static void relinkCall(void* from, void* to)
2143
ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
2144
ASSERT(reinterpret_cast<intptr_t>(to) & 1);
2146
setPointer(reinterpret_cast<uint16_t*>(from) - 1, to, true);
2149
static void* readCallTarget(void* from)
2151
return readPointer(reinterpret_cast<uint16_t*>(from) - 1);
2154
static void repatchInt32(void* where, int32_t value)
2156
ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
2158
setInt32(where, value, true);
2161
static void repatchCompact(void* where, int32_t offset)
2163
ASSERT(offset >= -255 && offset <= 255);
2171
offset |= (add << 9);
2172
offset |= (1 << 10);
2173
offset |= (1 << 11);
2175
uint16_t* location = reinterpret_cast<uint16_t*>(where);
2176
location[1] &= ~((1 << 12) - 1);
2177
location[1] |= offset;
2178
cacheFlush(location, sizeof(uint16_t) * 2);
2181
static void repatchPointer(void* where, void* value)
2183
ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
2185
setPointer(where, value, true);
2188
static void* readPointer(void* where)
2190
return reinterpret_cast<void*>(readInt32(where));
2193
static void replaceWithJump(void* instructionStart, void* to)
2195
ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
2196
ASSERT(!(bitwise_cast<uintptr_t>(to) & 1));
2198
#if OS(LINUX) || OS(QNX)
2199
if (canBeJumpT4(reinterpret_cast<uint16_t*>(instructionStart), to)) {
2200
uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2;
2201
linkJumpT4(ptr, to);
2202
cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
2204
uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 5;
2206
cacheFlush(ptr - 5, sizeof(uint16_t) * 5);
2209
uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2;
2210
linkJumpT4(ptr, to);
2211
cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
2215
static ptrdiff_t maxJumpReplacementSize()
2217
#if OS(LINUX) || OS(QNX)
2224
static void replaceWithLoad(void* instructionStart)
2226
ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
2227
uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart);
2228
switch (ptr[0] & 0xFFF0) {
2232
ASSERT(!(ptr[1] & 0xF000));
2234
ptr[0] |= OP_LDR_imm_T3;
2235
ptr[1] |= (ptr[1] & 0x0F00) << 4;
2237
cacheFlush(ptr, sizeof(uint16_t) * 2);
2240
RELEASE_ASSERT_NOT_REACHED();
2244
static void replaceWithAddressComputation(void* instructionStart)
2246
ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
2247
uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart);
2248
switch (ptr[0] & 0xFFF0) {
2250
ASSERT(!(ptr[1] & 0x0F00));
2252
ptr[0] |= OP_ADD_imm_T3;
2253
ptr[1] |= (ptr[1] & 0xF000) >> 4;
2255
cacheFlush(ptr, sizeof(uint16_t) * 2);
2260
RELEASE_ASSERT_NOT_REACHED();
2264
unsigned debugOffset() { return m_formatter.debugOffset(); }
2267
static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
2279
: "r" (begin), "r" (end)
2280
: "r0", "r1", "r2");
2284
static void cacheFlush(void* code, size_t size)
2287
sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
2289
size_t page = pageSize();
2290
uintptr_t current = reinterpret_cast<uintptr_t>(code);
2291
uintptr_t end = current + size;
2292
uintptr_t firstPageEnd = (current & ~(page - 1)) + page;
2294
if (end <= firstPageEnd) {
2295
linuxPageFlush(current, end);
2299
linuxPageFlush(current, firstPageEnd);
2301
for (current = firstPageEnd; current + page < end; current += page)
2302
linuxPageFlush(current, current + page);
2304
linuxPageFlush(current, end);
2306
CacheRangeFlush(code, size, CACHE_SYNC_ALL);
2308
#if !ENABLE(ASSEMBLER_WX_EXCLUSIVE)
2309
msync(code, size, MS_INVALIDATE_ICACHE);
2315
#error "The cacheFlush support is missing on this platform."
2320
// VFP operations commonly take one or more 5-bit operands, typically representing a
2321
// floating point register number. This will commonly be encoded in the instruction
2322
// in two parts, with one single bit field, and one 4-bit field. In the case of
2323
// double precision operands the high bit of the register number will be encoded
2324
// separately, and for single precision operands the high bit of the register number
2325
// will be encoded individually.
2326
// VFPOperand encapsulates a 5-bit VFP operand, with bits 0..3 containing the 4-bit
2327
// field to be encoded together in the instruction (the low 4-bits of a double
2328
// register number, or the high 4-bits of a single register number), and bit 4
2329
// contains the bit value to be encoded individually.
2331
explicit VFPOperand(uint32_t value)
2334
ASSERT(!(m_value & ~0x1f));
2337
VFPOperand(FPDoubleRegisterID reg)
2342
VFPOperand(RegisterID reg)
2347
VFPOperand(FPSingleRegisterID reg)
2348
: m_value(((reg & 1) << 4) | (reg >> 1)) // rotate the lowest bit of 'reg' to the top.
2354
return m_value >> 4;
2359
return m_value & 0xf;
2365
VFPOperand vcvtOp(bool toInteger, bool isUnsigned, bool isRoundZero)
2367
// Cannot specify rounding when converting to float.
2368
ASSERT(toInteger || !isRoundZero);
2372
// opc2 indicates both toInteger & isUnsigned.
2373
op |= isUnsigned ? 0x4 : 0x5;
2374
// 'op' field in instruction is isRoundZero
2378
ASSERT(!isRoundZero);
2379
// 'op' field in instruction is isUnsigned
2383
return VFPOperand(op);
2386
static void setInt32(void* code, uint32_t value, bool flush)
2388
uint16_t* location = reinterpret_cast<uint16_t*>(code);
2389
ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
2391
ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value));
2392
ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value >> 16));
2393
location[-4] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2394
location[-3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-3] >> 8) & 0xf, lo16);
2395
location[-2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2396
location[-1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16);
2399
cacheFlush(location - 4, 4 * sizeof(uint16_t));
2402
static int32_t readInt32(void* code)
2404
uint16_t* location = reinterpret_cast<uint16_t*>(code);
2405
ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
2407
ARMThumbImmediate lo16;
2408
ARMThumbImmediate hi16;
2409
decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(lo16, location[-4]);
2410
decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(lo16, location[-3]);
2411
decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(hi16, location[-2]);
2412
decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(hi16, location[-1]);
2413
uint32_t result = hi16.asUInt16();
2415
result |= lo16.asUInt16();
2416
return static_cast<int32_t>(result);
2419
static void setUInt7ForLoad(void* code, ARMThumbImmediate imm)
2421
// Requires us to have planted a LDR_imm_T1
2422
ASSERT(imm.isValid());
2423
ASSERT(imm.isUInt7());
2424
uint16_t* location = reinterpret_cast<uint16_t*>(code);
2425
location[0] &= ~((static_cast<uint16_t>(0x7f) >> 2) << 6);
2426
location[0] |= (imm.getUInt7() >> 2) << 6;
2427
cacheFlush(location, sizeof(uint16_t));
2430
static void setPointer(void* code, void* value, bool flush)
2432
setInt32(code, reinterpret_cast<uint32_t>(value), flush);
2435
static bool isB(void* address)
2437
uint16_t* instruction = static_cast<uint16_t*>(address);
2438
return ((instruction[0] & 0xf800) == OP_B_T4a) && ((instruction[1] & 0xd000) == OP_B_T4b);
2441
static bool isBX(void* address)
2443
uint16_t* instruction = static_cast<uint16_t*>(address);
2444
return (instruction[0] & 0xff87) == OP_BX;
2447
static bool isMOV_imm_T3(void* address)
2449
uint16_t* instruction = static_cast<uint16_t*>(address);
2450
return ((instruction[0] & 0xFBF0) == OP_MOV_imm_T3) && ((instruction[1] & 0x8000) == 0);
2453
static bool isMOVT(void* address)
2455
uint16_t* instruction = static_cast<uint16_t*>(address);
2456
return ((instruction[0] & 0xFBF0) == OP_MOVT) && ((instruction[1] & 0x8000) == 0);
2459
static bool isNOP_T1(void* address)
2461
uint16_t* instruction = static_cast<uint16_t*>(address);
2462
return instruction[0] == OP_NOP_T1;
2465
static bool isNOP_T2(void* address)
2467
uint16_t* instruction = static_cast<uint16_t*>(address);
2468
return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b);
2471
static bool canBeJumpT1(const uint16_t* instruction, const void* target)
2473
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2474
ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2476
intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2477
// It does not appear to be documented in the ARM ARM (big surprise), but
2478
// for OP_B_T1 the branch displacement encoded in the instruction is 2
2479
// less than the actual displacement.
2481
return ((relative << 23) >> 23) == relative;
2484
static bool canBeJumpT2(const uint16_t* instruction, const void* target)
2486
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2487
ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2489
intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2490
// It does not appear to be documented in the ARM ARM (big surprise), but
2491
// for OP_B_T2 the branch displacement encoded in the instruction is 2
2492
// less than the actual displacement.
2494
return ((relative << 20) >> 20) == relative;
2497
static bool canBeJumpT3(const uint16_t* instruction, const void* target)
2499
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2500
ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2502
intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2503
return ((relative << 11) >> 11) == relative;
2506
static bool canBeJumpT4(const uint16_t* instruction, const void* target)
2508
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2509
ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2511
intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2512
return ((relative << 7) >> 7) == relative;
2515
void linkJumpT1(Condition cond, uint16_t* instruction, void* target)
2517
// FIMXE: this should be up in the MacroAssembler layer. :-(
2518
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2519
ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2520
ASSERT(canBeJumpT1(instruction, target));
2522
intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2523
// It does not appear to be documented in the ARM ARM (big surprise), but
2524
// for OP_B_T1 the branch displacement encoded in the instruction is 2
2525
// less than the actual displacement.
2528
// All branch offsets should be an even distance.
2529
ASSERT(!(relative & 1));
2530
instruction[-1] = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1);
2533
static void linkJumpT2(uint16_t* instruction, void* target)
2535
// FIMXE: this should be up in the MacroAssembler layer. :-(
2536
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2537
ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2538
ASSERT(canBeJumpT2(instruction, target));
2540
intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2541
// It does not appear to be documented in the ARM ARM (big surprise), but
2542
// for OP_B_T2 the branch displacement encoded in the instruction is 2
2543
// less than the actual displacement.
2546
// All branch offsets should be an even distance.
2547
ASSERT(!(relative & 1));
2548
instruction[-1] = OP_B_T2 | ((relative & 0xffe) >> 1);
2551
void linkJumpT3(Condition cond, uint16_t* instruction, void* target)
2553
// FIMXE: this should be up in the MacroAssembler layer. :-(
2554
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2555
ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2556
ASSERT(canBeJumpT3(instruction, target));
2558
intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2560
// All branch offsets should be an even distance.
2561
ASSERT(!(relative & 1));
2562
instruction[-2] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12);
2563
instruction[-1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1);
2566
static void linkJumpT4(uint16_t* instruction, void* target)
2568
// FIMXE: this should be up in the MacroAssembler layer. :-(
2569
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2570
ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2571
ASSERT(canBeJumpT4(instruction, target));
2573
intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2574
// ARM encoding for the top two bits below the sign bit is 'peculiar'.
2576
relative ^= 0xC00000;
2578
// All branch offsets should be an even distance.
2579
ASSERT(!(relative & 1));
2580
instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
2581
instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
2584
void linkConditionalJumpT4(Condition cond, uint16_t* instruction, void* target)
2586
// FIMXE: this should be up in the MacroAssembler layer. :-(
2587
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2588
ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2590
instruction[-3] = ifThenElse(cond) | OP_IT;
2591
linkJumpT4(instruction, target);
2594
static void linkBX(uint16_t* instruction, void* target)
2596
// FIMXE: this should be up in the MacroAssembler layer. :-(
2597
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2598
ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2600
const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
2601
ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
2602
ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
2603
instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2604
instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
2605
instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2606
instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
2607
instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
2610
void linkConditionalBX(Condition cond, uint16_t* instruction, void* target)
2612
// FIMXE: this should be up in the MacroAssembler layer. :-(
2613
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2614
ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2616
linkBX(instruction, target);
2617
instruction[-6] = ifThenElse(cond, true, true) | OP_IT;
2620
static void linkJumpAbsolute(uint16_t* instruction, void* target)
2622
// FIMXE: this should be up in the MacroAssembler layer. :-(
2623
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2624
ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2626
ASSERT((isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
2627
|| (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)));
2629
if (canBeJumpT4(instruction, target)) {
2630
// There may be a better way to fix this, but right now put the NOPs first, since in the
2631
// case of an conditional branch this will be coming after an ITTT predicating *three*
2632
// instructions! Looking backwards to modify the ITTT to an IT is not easy, due to
2633
// variable wdith encoding - the previous instruction might *look* like an ITTT but
2634
// actually be the second half of a 2-word op.
2635
instruction[-5] = OP_NOP_T1;
2636
instruction[-4] = OP_NOP_T2a;
2637
instruction[-3] = OP_NOP_T2b;
2638
linkJumpT4(instruction, target);
2640
const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
2641
ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
2642
ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
2643
instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2644
instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
2645
instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2646
instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
2647
instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
2651
static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op, ARMThumbImmediate imm)
2653
return op | (imm.m_value.i << 10) | imm.m_value.imm4;
2656
static void decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(ARMThumbImmediate& result, uint16_t value)
2658
result.m_value.i = (value >> 10) & 1;
2659
result.m_value.imm4 = value & 15;
2662
static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd, ARMThumbImmediate imm)
2664
return (imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8;
2667
static void decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(ARMThumbImmediate& result, uint16_t value)
2669
result.m_value.imm3 = (value >> 12) & 7;
2670
result.m_value.imm8 = value & 255;
2673
class ARMInstructionFormatter {
2675
ALWAYS_INLINE void oneWordOp5Reg3Imm8(OpcodeID op, RegisterID rd, uint8_t imm)
2677
m_buffer.putShort(op | (rd << 8) | imm);
2680
ALWAYS_INLINE void oneWordOp5Imm5Reg3Reg3(OpcodeID op, uint8_t imm, RegisterID reg1, RegisterID reg2)
2682
m_buffer.putShort(op | (imm << 6) | (reg1 << 3) | reg2);
2685
ALWAYS_INLINE void oneWordOp7Reg3Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2, RegisterID reg3)
2687
m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3);
2690
ALWAYS_INLINE void oneWordOp8Imm8(OpcodeID op, uint8_t imm)
2692
m_buffer.putShort(op | imm);
2695
ALWAYS_INLINE void oneWordOp8RegReg143(OpcodeID op, RegisterID reg1, RegisterID reg2)
2697
m_buffer.putShort(op | ((reg2 & 8) << 4) | (reg1 << 3) | (reg2 & 7));
2700
ALWAYS_INLINE void oneWordOp9Imm7(OpcodeID op, uint8_t imm)
2702
m_buffer.putShort(op | imm);
2705
ALWAYS_INLINE void oneWordOp10Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2)
2707
m_buffer.putShort(op | (reg1 << 3) | reg2);
2710
ALWAYS_INLINE void twoWordOp12Reg4FourFours(OpcodeID1 op, RegisterID reg, FourFours ff)
2712
m_buffer.putShort(op | reg);
2713
m_buffer.putShort(ff.m_u.value);
2716
ALWAYS_INLINE void twoWordOp16FourFours(OpcodeID1 op, FourFours ff)
2718
m_buffer.putShort(op);
2719
m_buffer.putShort(ff.m_u.value);
2722
ALWAYS_INLINE void twoWordOp16Op16(OpcodeID1 op1, OpcodeID2 op2)
2724
m_buffer.putShort(op1);
2725
m_buffer.putShort(op2);
2728
ALWAYS_INLINE void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm)
2730
ARMThumbImmediate newImm = imm;
2731
newImm.m_value.imm4 = imm4;
2733
m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmFirst(op, newImm));
2734
m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, newImm));
2737
ALWAYS_INLINE void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm)
2739
m_buffer.putShort(op | reg1);
2740
m_buffer.putShort((reg2 << 12) | imm);
2743
ALWAYS_INLINE void twoWordOp12Reg40Imm3Reg4Imm20Imm5(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm1, uint16_t imm2, uint16_t imm3)
2745
m_buffer.putShort(op | reg1);
2746
m_buffer.putShort((imm1 << 12) | (reg2 << 8) | (imm2 << 6) | imm3);
2749
// Formats up instructions of the pattern:
2750
// 111111111B11aaaa:bbbb222SA2C2cccc
2751
// Where 1s in the pattern come from op1, 2s in the pattern come from op2, S is the provided size bit.
2752
// Operands provide 5 bit values of the form Aaaaa, Bbbbb, Ccccc.
2753
ALWAYS_INLINE void vfpOp(OpcodeID1 op1, OpcodeID2 op2, bool size, VFPOperand a, VFPOperand b, VFPOperand c)
2755
ASSERT(!(op1 & 0x004f));
2756
ASSERT(!(op2 & 0xf1af));
2757
m_buffer.putShort(op1 | b.bits1() << 6 | a.bits4());
2758
m_buffer.putShort(op2 | b.bits4() << 12 | size << 8 | a.bits1() << 7 | c.bits1() << 5 | c.bits4());
2761
// Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
2762
// (i.e. +/-(0..255) 32-bit words)
2763
ALWAYS_INLINE void vfpMemOp(OpcodeID1 op1, OpcodeID2 op2, bool size, RegisterID rn, VFPOperand rd, int32_t imm)
2771
uint32_t offset = imm;
2772
ASSERT(!(offset & ~0x3fc));
2775
m_buffer.putShort(op1 | (up << 7) | rd.bits1() << 6 | rn);
2776
m_buffer.putShort(op2 | rd.bits4() << 12 | size << 8 | offset);
2779
// Administrative methods:
2781
size_t codeSize() const { return m_buffer.codeSize(); }
2782
AssemblerLabel label() const { return m_buffer.label(); }
2783
bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
2784
void* data() const { return m_buffer.data(); }
2786
unsigned debugOffset() { return m_buffer.debugOffset(); }
2789
AssemblerBuffer m_buffer;
2792
Vector<LinkRecord, 0, UnsafeVectorOverflow> m_jumpsToLink;
2793
int m_indexOfLastWatchpoint;
2794
int m_indexOfTailOfLastWatchpoint;
2799
#endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
2801
#endif // ARMAssembler_h