2
* Tiny Code Generator for QEMU
4
* Copyright (c) 2008 Andrzej Zaborowski
6
* Permission is hereby granted, free of charge, to any person obtaining a copy
7
* of this software and associated documentation files (the "Software"), to deal
8
* in the Software without restriction, including without limitation the rights
9
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
* copies of the Software, and to permit persons to whom the Software is
11
* furnished to do so, subject to the following conditions:
13
* The above copyright notice and this permission notice shall be included in
14
* all copies or substantial portions of the Software.
16
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24
const char *tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
42
int tcg_target_reg_alloc_order[] = {
60
const int tcg_target_call_iarg_regs[4] = {
61
TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
63
const int tcg_target_call_oarg_regs[2] = {
64
TCG_REG_R0, TCG_REG_R1
67
static void patch_reloc(uint8_t *code_ptr, int type,
68
tcg_target_long value, tcg_target_long addend)
72
*(uint32_t *) code_ptr = value;
81
*(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & 0xff000000) |
82
(((value - ((tcg_target_long) code_ptr + 8)) >> 2) & 0xffffff);
87
/* maximum number of register used for input function arguments */
88
static inline int tcg_target_get_call_iarg_regs_count(int flags)
93
/* parse target specific constraints */
94
int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
101
#ifndef CONFIG_SOFTMMU
107
ct->ct |= TCG_CT_REG;
108
tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
111
#ifdef CONFIG_SOFTMMU
112
/* qemu_ld/st inputs (unless 'X', 'd' or 'D') */
114
ct->ct |= TCG_CT_REG;
115
tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
116
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
117
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
120
/* qemu_ld64 data_reg */
122
ct->ct |= TCG_CT_REG;
123
tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
124
/* r1 is still needed to load data_reg2, so don't use it. */
125
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
128
/* qemu_ld/st64 data_reg2 */
130
ct->ct |= TCG_CT_REG;
131
tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
132
/* r0, r1 and optionally r2 will be overwritten by the address
133
* and the low word of data, so don't use these. */
134
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
135
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
136
# if TARGET_LONG_BITS == 64
137
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
141
# if TARGET_LONG_BITS == 64
142
/* qemu_ld/st addr_reg2 */
144
ct->ct |= TCG_CT_REG;
145
tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
146
/* r0 will be overwritten by the low word of base, so don't use it. */
147
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
148
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
154
ct->ct |= TCG_CT_REG;
155
tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
156
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
160
ct->ct |= TCG_CT_REG;
161
tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
162
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
163
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
175
/* Test if a constant matches the constraint.
176
* TODO: define constraints for:
178
* ldr/str offset: between -0xfff and 0xfff
179
* ldrh/strh offset: between -0xff and 0xff
180
* mov operand2: values represented with x << (2 * y), x < 0x100
181
* add, sub, eor...: ditto
183
static inline int tcg_target_const_match(tcg_target_long val,
184
const TCGArgConstraint *arg_ct)
188
if (ct & TCG_CT_CONST)
194
enum arm_data_opc_e {
212
#define TO_CPSR(opc) \
213
((opc == ARITH_CMP || opc == ARITH_CMN || opc == ARITH_TST) << 20)
215
#define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
216
#define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
217
#define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
218
#define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
219
#define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
220
#define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
221
#define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
222
#define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
224
enum arm_cond_code_e {
227
COND_CS = 0x2, /* Unsigned greater or equal */
228
COND_CC = 0x3, /* Unsigned less than */
229
COND_MI = 0x4, /* Negative */
230
COND_PL = 0x5, /* Zero or greater */
231
COND_VS = 0x6, /* Overflow */
232
COND_VC = 0x7, /* No overflow */
233
COND_HI = 0x8, /* Unsigned greater than */
234
COND_LS = 0x9, /* Unsigned less or equal */
242
static const uint8_t tcg_cond_to_arm_cond[10] = {
243
[TCG_COND_EQ] = COND_EQ,
244
[TCG_COND_NE] = COND_NE,
245
[TCG_COND_LT] = COND_LT,
246
[TCG_COND_GE] = COND_GE,
247
[TCG_COND_LE] = COND_LE,
248
[TCG_COND_GT] = COND_GT,
250
[TCG_COND_LTU] = COND_CC,
251
[TCG_COND_GEU] = COND_CS,
252
[TCG_COND_LEU] = COND_LS,
253
[TCG_COND_GTU] = COND_HI,
256
static inline void tcg_out_bx(TCGContext *s, int cond, int rn)
258
tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
261
static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
263
tcg_out32(s, (cond << 28) | 0x0a000000 |
264
(((offset - 8) >> 2) & 0x00ffffff));
267
static inline void tcg_out_b_noaddr(TCGContext *s, int cond)
269
#ifdef WORDS_BIGENDIAN
270
tcg_out8(s, (cond << 4) | 0x0a);
274
tcg_out8(s, (cond << 4) | 0x0a);
278
static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
280
tcg_out32(s, (cond << 28) | 0x0b000000 |
281
(((offset - 8) >> 2) & 0x00ffffff));
284
static inline void tcg_out_dat_reg(TCGContext *s,
285
int cond, int opc, int rd, int rn, int rm, int shift)
287
tcg_out32(s, (cond << 28) | (0 << 25) | (opc << 21) | TO_CPSR(opc) |
288
(rn << 16) | (rd << 12) | shift | rm);
291
static inline void tcg_out_dat_reg2(TCGContext *s,
292
int cond, int opc0, int opc1, int rd0, int rd1,
293
int rn0, int rn1, int rm0, int rm1, int shift)
295
tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) |
296
(rn0 << 16) | (rd0 << 12) | shift | rm0);
297
tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) |
298
(rn1 << 16) | (rd1 << 12) | shift | rm1);
301
static inline void tcg_out_dat_imm(TCGContext *s,
302
int cond, int opc, int rd, int rn, int im)
304
tcg_out32(s, (cond << 28) | (1 << 25) | (opc << 21) | TO_CPSR(opc) |
305
(rn << 16) | (rd << 12) | im);
308
static inline void tcg_out_movi32(TCGContext *s,
309
int cond, int rd, int32_t arg)
311
int offset = (uint32_t) arg - ((uint32_t) s->code_ptr + 8);
313
/* TODO: This is very suboptimal, we can easily have a constant
314
* pool somewhere after all the instructions. */
316
if (arg < 0 && arg > -0x100)
317
return tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, (~arg) & 0xff);
319
if (offset < 0x100 && offset > -0x100)
321
tcg_out_dat_imm(s, cond, ARITH_ADD, rd, 15, offset) :
322
tcg_out_dat_imm(s, cond, ARITH_SUB, rd, 15, -offset);
324
tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, arg & 0xff);
325
if (arg & 0x0000ff00)
326
tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
327
((arg >> 8) & 0xff) | 0xc00);
328
if (arg & 0x00ff0000)
329
tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
330
((arg >> 16) & 0xff) | 0x800);
331
if (arg & 0xff000000)
332
tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
333
((arg >> 24) & 0xff) | 0x400);
336
static inline void tcg_out_mul32(TCGContext *s,
337
int cond, int rd, int rs, int rm)
340
tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
341
(rs << 8) | 0x90 | rm);
343
tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
344
(rm << 8) | 0x90 | rs);
346
tcg_out32(s, (cond << 28) | ( 8 << 16) | (0 << 12) |
347
(rs << 8) | 0x90 | rm);
348
tcg_out_dat_reg(s, cond, ARITH_MOV,
349
rd, 0, 8, SHIFT_IMM_LSL(0));
353
static inline void tcg_out_umull32(TCGContext *s,
354
int cond, int rd0, int rd1, int rs, int rm)
356
if (rd0 != rm && rd1 != rm)
357
tcg_out32(s, (cond << 28) | 0x800090 |
358
(rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
359
else if (rd0 != rs && rd1 != rs)
360
tcg_out32(s, (cond << 28) | 0x800090 |
361
(rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
363
tcg_out_dat_reg(s, cond, ARITH_MOV,
364
TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
365
tcg_out32(s, (cond << 28) | 0x800098 |
366
(rd1 << 16) | (rd0 << 12) | (rs << 8));
370
static inline void tcg_out_smull32(TCGContext *s,
371
int cond, int rd0, int rd1, int rs, int rm)
373
if (rd0 != rm && rd1 != rm)
374
tcg_out32(s, (cond << 28) | 0xc00090 |
375
(rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
376
else if (rd0 != rs && rd1 != rs)
377
tcg_out32(s, (cond << 28) | 0xc00090 |
378
(rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
380
tcg_out_dat_reg(s, cond, ARITH_MOV,
381
TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
382
tcg_out32(s, (cond << 28) | 0xc00098 |
383
(rd1 << 16) | (rd0 << 12) | (rs << 8));
387
static inline void tcg_out_ld32_12(TCGContext *s, int cond,
388
int rd, int rn, tcg_target_long im)
391
tcg_out32(s, (cond << 28) | 0x05900000 |
392
(rn << 16) | (rd << 12) | (im & 0xfff));
394
tcg_out32(s, (cond << 28) | 0x05100000 |
395
(rn << 16) | (rd << 12) | ((-im) & 0xfff));
398
static inline void tcg_out_st32_12(TCGContext *s, int cond,
399
int rd, int rn, tcg_target_long im)
402
tcg_out32(s, (cond << 28) | 0x05800000 |
403
(rn << 16) | (rd << 12) | (im & 0xfff));
405
tcg_out32(s, (cond << 28) | 0x05000000 |
406
(rn << 16) | (rd << 12) | ((-im) & 0xfff));
409
static inline void tcg_out_ld32_r(TCGContext *s, int cond,
410
int rd, int rn, int rm)
412
tcg_out32(s, (cond << 28) | 0x07900000 |
413
(rn << 16) | (rd << 12) | rm);
416
static inline void tcg_out_st32_r(TCGContext *s, int cond,
417
int rd, int rn, int rm)
419
tcg_out32(s, (cond << 28) | 0x07800000 |
420
(rn << 16) | (rd << 12) | rm);
423
/* Register pre-increment with base writeback. */
424
static inline void tcg_out_ld32_rwb(TCGContext *s, int cond,
425
int rd, int rn, int rm)
427
tcg_out32(s, (cond << 28) | 0x07b00000 |
428
(rn << 16) | (rd << 12) | rm);
431
static inline void tcg_out_st32_rwb(TCGContext *s, int cond,
432
int rd, int rn, int rm)
434
tcg_out32(s, (cond << 28) | 0x07a00000 |
435
(rn << 16) | (rd << 12) | rm);
438
static inline void tcg_out_ld16u_8(TCGContext *s, int cond,
439
int rd, int rn, tcg_target_long im)
442
tcg_out32(s, (cond << 28) | 0x01d000b0 |
443
(rn << 16) | (rd << 12) |
444
((im & 0xf0) << 4) | (im & 0xf));
446
tcg_out32(s, (cond << 28) | 0x015000b0 |
447
(rn << 16) | (rd << 12) |
448
(((-im) & 0xf0) << 4) | ((-im) & 0xf));
451
static inline void tcg_out_st16u_8(TCGContext *s, int cond,
452
int rd, int rn, tcg_target_long im)
455
tcg_out32(s, (cond << 28) | 0x01c000b0 |
456
(rn << 16) | (rd << 12) |
457
((im & 0xf0) << 4) | (im & 0xf));
459
tcg_out32(s, (cond << 28) | 0x014000b0 |
460
(rn << 16) | (rd << 12) |
461
(((-im) & 0xf0) << 4) | ((-im) & 0xf));
464
static inline void tcg_out_ld16u_r(TCGContext *s, int cond,
465
int rd, int rn, int rm)
467
tcg_out32(s, (cond << 28) | 0x019000b0 |
468
(rn << 16) | (rd << 12) | rm);
471
static inline void tcg_out_st16u_r(TCGContext *s, int cond,
472
int rd, int rn, int rm)
474
tcg_out32(s, (cond << 28) | 0x018000b0 |
475
(rn << 16) | (rd << 12) | rm);
478
static inline void tcg_out_ld16s_8(TCGContext *s, int cond,
479
int rd, int rn, tcg_target_long im)
482
tcg_out32(s, (cond << 28) | 0x01d000f0 |
483
(rn << 16) | (rd << 12) |
484
((im & 0xf0) << 4) | (im & 0xf));
486
tcg_out32(s, (cond << 28) | 0x015000f0 |
487
(rn << 16) | (rd << 12) |
488
(((-im) & 0xf0) << 4) | ((-im) & 0xf));
491
static inline void tcg_out_st16s_8(TCGContext *s, int cond,
492
int rd, int rn, tcg_target_long im)
495
tcg_out32(s, (cond << 28) | 0x01c000f0 |
496
(rn << 16) | (rd << 12) |
497
((im & 0xf0) << 4) | (im & 0xf));
499
tcg_out32(s, (cond << 28) | 0x014000f0 |
500
(rn << 16) | (rd << 12) |
501
(((-im) & 0xf0) << 4) | ((-im) & 0xf));
504
static inline void tcg_out_ld16s_r(TCGContext *s, int cond,
505
int rd, int rn, int rm)
507
tcg_out32(s, (cond << 28) | 0x019000f0 |
508
(rn << 16) | (rd << 12) | rm);
511
static inline void tcg_out_st16s_r(TCGContext *s, int cond,
512
int rd, int rn, int rm)
514
tcg_out32(s, (cond << 28) | 0x018000f0 |
515
(rn << 16) | (rd << 12) | rm);
518
static inline void tcg_out_ld8_12(TCGContext *s, int cond,
519
int rd, int rn, tcg_target_long im)
522
tcg_out32(s, (cond << 28) | 0x05d00000 |
523
(rn << 16) | (rd << 12) | (im & 0xfff));
525
tcg_out32(s, (cond << 28) | 0x05500000 |
526
(rn << 16) | (rd << 12) | ((-im) & 0xfff));
529
static inline void tcg_out_st8_12(TCGContext *s, int cond,
530
int rd, int rn, tcg_target_long im)
533
tcg_out32(s, (cond << 28) | 0x05c00000 |
534
(rn << 16) | (rd << 12) | (im & 0xfff));
536
tcg_out32(s, (cond << 28) | 0x05400000 |
537
(rn << 16) | (rd << 12) | ((-im) & 0xfff));
540
static inline void tcg_out_ld8_r(TCGContext *s, int cond,
541
int rd, int rn, int rm)
543
tcg_out32(s, (cond << 28) | 0x07d00000 |
544
(rn << 16) | (rd << 12) | rm);
547
static inline void tcg_out_st8_r(TCGContext *s, int cond,
548
int rd, int rn, int rm)
550
tcg_out32(s, (cond << 28) | 0x07c00000 |
551
(rn << 16) | (rd << 12) | rm);
554
static inline void tcg_out_ld8s_8(TCGContext *s, int cond,
555
int rd, int rn, tcg_target_long im)
558
tcg_out32(s, (cond << 28) | 0x01d000d0 |
559
(rn << 16) | (rd << 12) |
560
((im & 0xf0) << 4) | (im & 0xf));
562
tcg_out32(s, (cond << 28) | 0x015000d0 |
563
(rn << 16) | (rd << 12) |
564
(((-im) & 0xf0) << 4) | ((-im) & 0xf));
567
static inline void tcg_out_st8s_8(TCGContext *s, int cond,
568
int rd, int rn, tcg_target_long im)
571
tcg_out32(s, (cond << 28) | 0x01c000d0 |
572
(rn << 16) | (rd << 12) |
573
((im & 0xf0) << 4) | (im & 0xf));
575
tcg_out32(s, (cond << 28) | 0x014000d0 |
576
(rn << 16) | (rd << 12) |
577
(((-im) & 0xf0) << 4) | ((-im) & 0xf));
580
static inline void tcg_out_ld8s_r(TCGContext *s, int cond,
581
int rd, int rn, int rm)
583
tcg_out32(s, (cond << 28) | 0x019000d0 |
584
(rn << 16) | (rd << 12) | rm);
587
static inline void tcg_out_st8s_r(TCGContext *s, int cond,
588
int rd, int rn, int rm)
590
tcg_out32(s, (cond << 28) | 0x018000d0 |
591
(rn << 16) | (rd << 12) | rm);
594
static inline void tcg_out_ld32u(TCGContext *s, int cond,
595
int rd, int rn, int32_t offset)
597
if (offset > 0xfff || offset < -0xfff) {
598
tcg_out_movi32(s, cond, TCG_REG_R8, offset);
599
tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_R8);
601
tcg_out_ld32_12(s, cond, rd, rn, offset);
604
static inline void tcg_out_st32(TCGContext *s, int cond,
605
int rd, int rn, int32_t offset)
607
if (offset > 0xfff || offset < -0xfff) {
608
tcg_out_movi32(s, cond, TCG_REG_R8, offset);
609
tcg_out_st32_r(s, cond, rd, rn, TCG_REG_R8);
611
tcg_out_st32_12(s, cond, rd, rn, offset);
614
static inline void tcg_out_ld16u(TCGContext *s, int cond,
615
int rd, int rn, int32_t offset)
617
if (offset > 0xff || offset < -0xff) {
618
tcg_out_movi32(s, cond, TCG_REG_R8, offset);
619
tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_R8);
621
tcg_out_ld16u_8(s, cond, rd, rn, offset);
624
static inline void tcg_out_ld16s(TCGContext *s, int cond,
625
int rd, int rn, int32_t offset)
627
if (offset > 0xff || offset < -0xff) {
628
tcg_out_movi32(s, cond, TCG_REG_R8, offset);
629
tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_R8);
631
tcg_out_ld16s_8(s, cond, rd, rn, offset);
634
static inline void tcg_out_st16u(TCGContext *s, int cond,
635
int rd, int rn, int32_t offset)
637
if (offset > 0xff || offset < -0xff) {
638
tcg_out_movi32(s, cond, TCG_REG_R8, offset);
639
tcg_out_st16u_r(s, cond, rd, rn, TCG_REG_R8);
641
tcg_out_st16u_8(s, cond, rd, rn, offset);
644
static inline void tcg_out_ld8u(TCGContext *s, int cond,
645
int rd, int rn, int32_t offset)
647
if (offset > 0xfff || offset < -0xfff) {
648
tcg_out_movi32(s, cond, TCG_REG_R8, offset);
649
tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_R8);
651
tcg_out_ld8_12(s, cond, rd, rn, offset);
654
static inline void tcg_out_ld8s(TCGContext *s, int cond,
655
int rd, int rn, int32_t offset)
657
if (offset > 0xff || offset < -0xff) {
658
tcg_out_movi32(s, cond, TCG_REG_R8, offset);
659
tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_R8);
661
tcg_out_ld8s_8(s, cond, rd, rn, offset);
664
static inline void tcg_out_st8u(TCGContext *s, int cond,
665
int rd, int rn, int32_t offset)
667
if (offset > 0xfff || offset < -0xfff) {
668
tcg_out_movi32(s, cond, TCG_REG_R8, offset);
669
tcg_out_st8_r(s, cond, rd, rn, TCG_REG_R8);
671
tcg_out_st8_12(s, cond, rd, rn, offset);
674
static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
678
val = addr - (tcg_target_long) s->code_ptr;
679
if (val - 8 < 0x01fffffd && val - 8 > -0x01fffffd)
680
tcg_out_b(s, cond, val);
685
if (cond == COND_AL) {
686
tcg_out_ld32_12(s, COND_AL, 15, 15, -4);
687
tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */
689
tcg_out_movi32(s, cond, TCG_REG_R8, val - 8);
690
tcg_out_dat_reg(s, cond, ARITH_ADD,
691
15, 15, TCG_REG_R8, SHIFT_IMM_LSL(0));
697
static inline void tcg_out_call(TCGContext *s, int cond, uint32_t addr)
702
tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R8, 0, 14, SHIFT_IMM_LSL(0));
705
val = addr - (tcg_target_long) s->code_ptr;
706
if (val < 0x01fffffd && val > -0x01fffffd)
707
tcg_out_bl(s, cond, val);
712
if (cond == COND_AL) {
713
tcg_out_dat_imm(s, cond, ARITH_ADD, 14, 15, 4);
714
tcg_out_ld32_12(s, COND_AL, 15, 15, -4);
715
tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */
717
tcg_out_movi32(s, cond, TCG_REG_R9, addr);
718
tcg_out_dat_imm(s, cond, ARITH_MOV, 14, 0, 15);
719
tcg_out_bx(s, cond, TCG_REG_R9);
725
tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
729
static inline void tcg_out_callr(TCGContext *s, int cond, int arg)
732
tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R8, 0, 14, SHIFT_IMM_LSL(0));
734
/* TODO: on ARMv5 and ARMv6 replace with tcg_out_blx(s, cond, arg); */
735
tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 15, SHIFT_IMM_LSL(0));
736
tcg_out_bx(s, cond, arg);
738
tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
742
static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
744
TCGLabel *l = &s->labels[label_index];
747
tcg_out_goto(s, cond, l->u.value);
748
else if (cond == COND_AL) {
749
tcg_out_ld32_12(s, COND_AL, 15, 15, -4);
750
tcg_out_reloc(s, s->code_ptr, R_ARM_ABS32, label_index, 31337);
753
/* Probably this should be preferred even for COND_AL... */
754
tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337);
755
tcg_out_b_noaddr(s, cond);
759
static void tcg_out_div_helper(TCGContext *s, int cond, const TCGArg *args,
760
void *helper_div, void *helper_rem, int shift)
762
int div_reg = args[0];
763
int rem_reg = args[1];
765
/* stmdb sp!, { r0 - r3, ip, lr } */
766
/* (Note that we need an even number of registers as per EABI) */
767
tcg_out32(s, (cond << 28) | 0x092d500f);
769
tcg_out_dat_reg(s, cond, ARITH_MOV, 0, 0, args[2], SHIFT_IMM_LSL(0));
770
tcg_out_dat_reg(s, cond, ARITH_MOV, 1, 0, args[3], SHIFT_IMM_LSL(0));
771
tcg_out_dat_reg(s, cond, ARITH_MOV, 2, 0, args[4], SHIFT_IMM_LSL(0));
772
tcg_out_dat_reg(s, cond, ARITH_MOV, 3, 0, 2, shift);
774
tcg_out_call(s, cond, (uint32_t) helper_div);
775
tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 0, SHIFT_IMM_LSL(0));
777
/* ldmia sp, { r0 - r3, fp, lr } */
778
tcg_out32(s, (cond << 28) | 0x089d500f);
780
tcg_out_dat_reg(s, cond, ARITH_MOV, 0, 0, args[2], SHIFT_IMM_LSL(0));
781
tcg_out_dat_reg(s, cond, ARITH_MOV, 1, 0, args[3], SHIFT_IMM_LSL(0));
782
tcg_out_dat_reg(s, cond, ARITH_MOV, 2, 0, args[4], SHIFT_IMM_LSL(0));
783
tcg_out_dat_reg(s, cond, ARITH_MOV, 3, 0, 2, shift);
785
tcg_out_call(s, cond, (uint32_t) helper_rem);
787
tcg_out_dat_reg(s, cond, ARITH_MOV, rem_reg, 0, 0, SHIFT_IMM_LSL(0));
788
tcg_out_dat_reg(s, cond, ARITH_MOV, div_reg, 0, 8, SHIFT_IMM_LSL(0));
790
/* ldr r0, [sp], #4 */
791
if (rem_reg != 0 && div_reg != 0)
792
tcg_out32(s, (cond << 28) | 0x04bd0004);
793
/* ldr r1, [sp], #4 */
794
if (rem_reg != 1 && div_reg != 1)
795
tcg_out32(s, (cond << 28) | 0x04bd1004);
796
/* ldr r2, [sp], #4 */
797
if (rem_reg != 2 && div_reg != 2)
798
tcg_out32(s, (cond << 28) | 0x04bd2004);
799
/* ldr r3, [sp], #4 */
800
if (rem_reg != 3 && div_reg != 3)
801
tcg_out32(s, (cond << 28) | 0x04bd3004);
802
/* ldr ip, [sp], #4 */
803
if (rem_reg != 12 && div_reg != 12)
804
tcg_out32(s, (cond << 28) | 0x04bdc004);
805
/* ldr lr, [sp], #4 */
806
if (rem_reg != 14 && div_reg != 14)
807
tcg_out32(s, (cond << 28) | 0x04bde004);
810
#ifdef CONFIG_SOFTMMU
811
extern void __ldb_mmu(void);
812
extern void __ldw_mmu(void);
813
extern void __ldl_mmu(void);
814
extern void __ldq_mmu(void);
816
extern void __stb_mmu(void);
817
extern void __stw_mmu(void);
818
extern void __stl_mmu(void);
819
extern void __stq_mmu(void);
821
static void *qemu_ld_helpers[4] = {
828
static void *qemu_st_helpers[4] = {
836
#define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
838
static inline void tcg_out_qemu_ld(TCGContext *s, int cond,
839
const TCGArg *args, int opc)
841
int addr_reg, data_reg, data_reg2;
842
#ifdef CONFIG_SOFTMMU
843
int mem_index, s_bits;
844
# if TARGET_LONG_BITS == 64
854
data_reg2 = 0; /* surpress warning */
856
#if TARGET_LONG_BITS == 64
859
#ifdef CONFIG_SOFTMMU
863
/* Should generate something like the following:
864
* shr r8, addr_reg, #TARGET_PAGE_BITS
865
* and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
866
* add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
868
# if CPU_TLB_BITS > 8
871
tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
872
8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
873
tcg_out_dat_imm(s, COND_AL, ARITH_AND,
874
0, 8, CPU_TLB_SIZE - 1);
875
tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
876
0, TCG_AREG0, 0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
878
* ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))]
879
* below, the offset is likely to exceed 12 bits if mem_index != 0 and
880
* not exceed otherwise, so use an
881
* add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
885
tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 0, 0,
886
(mem_index << (TLB_SHIFT & 1)) |
887
((16 - (TLB_SHIFT >> 1)) << 8));
888
tcg_out_ld32_12(s, COND_AL, 1, 0,
889
offsetof(CPUState, tlb_table[0][0].addr_read));
890
tcg_out_dat_reg(s, COND_AL, ARITH_CMP,
891
0, 1, 8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
892
/* Check alignment. */
894
tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
895
0, addr_reg, (1 << s_bits) - 1);
896
# if TARGET_LONG_BITS == 64
897
/* XXX: possibly we could use a block data load or writeback in
898
* the first access. */
899
tcg_out_ld32_12(s, COND_EQ, 1, 0,
900
offsetof(CPUState, tlb_table[0][0].addr_read) + 4);
901
tcg_out_dat_reg(s, COND_EQ, ARITH_CMP,
902
0, 1, addr_reg2, SHIFT_IMM_LSL(0));
904
tcg_out_ld32_12(s, COND_EQ, 1, 0,
905
offsetof(CPUState, tlb_table[0][0].addend));
909
tcg_out_ld8_r(s, COND_EQ, data_reg, addr_reg, 1);
912
tcg_out_ld8s_r(s, COND_EQ, data_reg, addr_reg, 1);
915
tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, 1);
918
tcg_out_ld16s_r(s, COND_EQ, data_reg, addr_reg, 1);
922
tcg_out_ld32_r(s, COND_EQ, data_reg, addr_reg, 1);
925
tcg_out_ld32_rwb(s, COND_EQ, data_reg, 1, addr_reg);
926
tcg_out_ld32_12(s, COND_EQ, data_reg2, 1, 4);
930
label_ptr = (void *) s->code_ptr;
931
tcg_out_b(s, COND_EQ, 8);
934
tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 14, SHIFT_IMM_LSL(0));
937
/* TODO: move this code to where the constants pool will be */
939
tcg_out_dat_reg(s, cond, ARITH_MOV,
940
0, 0, addr_reg, SHIFT_IMM_LSL(0));
941
# if TARGET_LONG_BITS == 32
942
tcg_out_dat_imm(s, cond, ARITH_MOV, 1, 0, mem_index);
945
tcg_out_dat_reg(s, cond, ARITH_MOV,
946
1, 0, addr_reg2, SHIFT_IMM_LSL(0));
947
tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index);
949
tcg_out_bl(s, cond, (tcg_target_long) qemu_ld_helpers[s_bits] -
950
(tcg_target_long) s->code_ptr);
954
tcg_out_dat_reg(s, cond, ARITH_MOV,
955
0, 0, 0, SHIFT_IMM_LSL(24));
956
tcg_out_dat_reg(s, cond, ARITH_MOV,
957
data_reg, 0, 0, SHIFT_IMM_ASR(24));
960
tcg_out_dat_reg(s, cond, ARITH_MOV,
961
0, 0, 0, SHIFT_IMM_LSL(16));
962
tcg_out_dat_reg(s, cond, ARITH_MOV,
963
data_reg, 0, 0, SHIFT_IMM_ASR(16));
970
tcg_out_dat_reg(s, cond, ARITH_MOV,
971
data_reg, 0, 0, SHIFT_IMM_LSL(0));
975
tcg_out_dat_reg(s, cond, ARITH_MOV,
976
data_reg, 0, 0, SHIFT_IMM_LSL(0));
978
tcg_out_dat_reg(s, cond, ARITH_MOV,
979
data_reg2, 0, 1, SHIFT_IMM_LSL(0));
984
tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 8, SHIFT_IMM_LSL(0));
987
*label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2;
991
tcg_out_ld8_12(s, COND_AL, data_reg, addr_reg, 0);
994
tcg_out_ld8s_8(s, COND_AL, data_reg, addr_reg, 0);
997
tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
1000
tcg_out_ld16s_8(s, COND_AL, data_reg, addr_reg, 0);
1004
tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
1007
/* TODO: use block load -
1008
* check that data_reg2 > data_reg or the other way */
1009
tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
1010
tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, 4);
1016
static inline void tcg_out_qemu_st(TCGContext *s, int cond,
1017
const TCGArg *args, int opc)
1019
int addr_reg, data_reg, data_reg2;
1020
#ifdef CONFIG_SOFTMMU
1021
int mem_index, s_bits;
1022
# if TARGET_LONG_BITS == 64
1025
uint32_t *label_ptr;
1030
data_reg2 = *args++;
1032
data_reg2 = 0; /* surpress warning */
1034
#if TARGET_LONG_BITS == 64
1035
addr_reg2 = *args++;
1037
#ifdef CONFIG_SOFTMMU
1041
/* Should generate something like the following:
1042
* shr r8, addr_reg, #TARGET_PAGE_BITS
1043
* and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
1044
* add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
1046
tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1047
8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
1048
tcg_out_dat_imm(s, COND_AL, ARITH_AND,
1049
0, 8, CPU_TLB_SIZE - 1);
1050
tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1051
0, TCG_AREG0, 0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
1053
* ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))]
1054
* below, the offset is likely to exceed 12 bits if mem_index != 0 and
1055
* not exceed otherwise, so use an
1056
* add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
1060
tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 0, 0,
1061
(mem_index << (TLB_SHIFT & 1)) |
1062
((16 - (TLB_SHIFT >> 1)) << 8));
1063
tcg_out_ld32_12(s, COND_AL, 1, 0,
1064
offsetof(CPUState, tlb_table[0][0].addr_write));
1065
tcg_out_dat_reg(s, COND_AL, ARITH_CMP,
1066
0, 1, 8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
1067
/* Check alignment. */
1069
tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
1070
0, addr_reg, (1 << s_bits) - 1);
1071
# if TARGET_LONG_BITS == 64
1072
/* XXX: possibly we could use a block data load or writeback in
1073
* the first access. */
1074
tcg_out_ld32_12(s, COND_EQ, 1, 0,
1075
offsetof(CPUState, tlb_table[0][0].addr_write)
1077
tcg_out_dat_reg(s, COND_EQ, ARITH_CMP,
1078
0, 1, addr_reg2, SHIFT_IMM_LSL(0));
1080
tcg_out_ld32_12(s, COND_EQ, 1, 0,
1081
offsetof(CPUState, tlb_table[0][0].addend));
1085
tcg_out_st8_r(s, COND_EQ, data_reg, addr_reg, 1);
1088
tcg_out_st8s_r(s, COND_EQ, data_reg, addr_reg, 1);
1091
tcg_out_st16u_r(s, COND_EQ, data_reg, addr_reg, 1);
1094
tcg_out_st16s_r(s, COND_EQ, data_reg, addr_reg, 1);
1098
tcg_out_st32_r(s, COND_EQ, data_reg, addr_reg, 1);
1101
tcg_out_st32_rwb(s, COND_EQ, data_reg, 1, addr_reg);
1102
tcg_out_st32_12(s, COND_EQ, data_reg2, 1, 4);
1106
label_ptr = (void *) s->code_ptr;
1107
tcg_out_b(s, COND_EQ, 8);
1109
/* TODO: move this code to where the constants pool will be */
1111
tcg_out_dat_reg(s, cond, ARITH_MOV,
1112
0, 0, addr_reg, SHIFT_IMM_LSL(0));
1113
# if TARGET_LONG_BITS == 32
1116
tcg_out_dat_imm(s, cond, ARITH_AND, 1, data_reg, 0xff);
1117
tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index);
1120
tcg_out_dat_reg(s, cond, ARITH_MOV,
1121
1, 0, data_reg, SHIFT_IMM_LSL(16));
1122
tcg_out_dat_reg(s, cond, ARITH_MOV,
1123
1, 0, 1, SHIFT_IMM_LSR(16));
1124
tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index);
1128
tcg_out_dat_reg(s, cond, ARITH_MOV,
1129
1, 0, data_reg, SHIFT_IMM_LSL(0));
1130
tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index);
1134
tcg_out_dat_reg(s, cond, ARITH_MOV,
1135
1, 0, data_reg, SHIFT_IMM_LSL(0));
1137
tcg_out_dat_reg(s, cond, ARITH_MOV,
1138
2, 0, data_reg2, SHIFT_IMM_LSL(0));
1139
tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index);
1144
tcg_out_dat_reg(s, cond, ARITH_MOV,
1145
1, 0, addr_reg2, SHIFT_IMM_LSL(0));
1148
tcg_out_dat_imm(s, cond, ARITH_AND, 2, data_reg, 0xff);
1149
tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index);
1152
tcg_out_dat_reg(s, cond, ARITH_MOV,
1153
2, 0, data_reg, SHIFT_IMM_LSL(16));
1154
tcg_out_dat_reg(s, cond, ARITH_MOV,
1155
2, 0, 2, SHIFT_IMM_LSR(16));
1156
tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index);
1160
tcg_out_dat_reg(s, cond, ARITH_MOV,
1161
2, 0, data_reg, SHIFT_IMM_LSL(0));
1162
tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index);
1165
tcg_out_dat_imm(s, cond, ARITH_MOV, 8, 0, mem_index);
1166
tcg_out32(s, (cond << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
1168
tcg_out_dat_reg(s, cond, ARITH_MOV,
1169
2, 0, data_reg, SHIFT_IMM_LSL(0));
1171
tcg_out_dat_reg(s, cond, ARITH_MOV,
1172
3, 0, data_reg2, SHIFT_IMM_LSL(0));
1178
tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 14, SHIFT_IMM_LSL(0));
1181
tcg_out_bl(s, cond, (tcg_target_long) qemu_st_helpers[s_bits] -
1182
(tcg_target_long) s->code_ptr);
1183
# if TARGET_LONG_BITS == 64
1185
tcg_out_dat_imm(s, cond, ARITH_ADD, 13, 13, 0x10);
1189
tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 8, SHIFT_IMM_LSL(0));
1192
*label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2;
1196
tcg_out_st8_12(s, COND_AL, data_reg, addr_reg, 0);
1199
tcg_out_st8s_8(s, COND_AL, data_reg, addr_reg, 0);
1202
tcg_out_st16u_8(s, COND_AL, data_reg, addr_reg, 0);
1205
tcg_out_st16s_8(s, COND_AL, data_reg, addr_reg, 0);
1209
tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1212
/* TODO: use block store -
1213
* check that data_reg2 > data_reg or the other way */
1214
tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1215
tcg_out_st32_12(s, COND_AL, data_reg2, addr_reg, 4);
1221
static uint8_t *tb_ret_addr;
1223
static inline void tcg_out_op(TCGContext *s, int opc,
1224
const TCGArg *args, const int *const_args)
1229
case INDEX_op_exit_tb:
1232
tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, 15, 0);
1234
tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R0, 0, args[0]);
1235
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, 15, 0, 14, SHIFT_IMM_LSL(0));
1237
tcg_out32(s, args[0]);
1240
tcg_out_ld32_12(s, COND_AL, 0, 15, 0);
1242
tcg_out_dat_imm(s, COND_AL, ARITH_MOV, 0, 0, args[0]);
1243
tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
1245
tcg_out32(s, args[0]);
1248
case INDEX_op_goto_tb:
1249
if (s->tb_jmp_offset) {
1250
/* Direct jump method */
1252
s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1253
tcg_out_b(s, COND_AL, 8);
1255
tcg_out_ld32_12(s, COND_AL, 15, 15, -4);
1256
s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1260
/* Indirect jump method */
1262
c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8);
1263
if (c > 0xfff || c < -0xfff) {
1264
tcg_out_movi32(s, COND_AL, TCG_REG_R0,
1265
(tcg_target_long) (s->tb_next + args[0]));
1266
tcg_out_ld32_12(s, COND_AL, 15, TCG_REG_R0, 0);
1268
tcg_out_ld32_12(s, COND_AL, 15, 15, c);
1270
tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, 15, 0);
1271
tcg_out_ld32_12(s, COND_AL, 15, TCG_REG_R0, 0);
1272
tcg_out32(s, (tcg_target_long) (s->tb_next + args[0]));
1275
s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1279
tcg_out_call(s, COND_AL, args[0]);
1281
tcg_out_callr(s, COND_AL, args[0]);
1285
tcg_out_goto(s, COND_AL, args[0]);
1287
tcg_out_bx(s, COND_AL, args[0]);
1290
tcg_out_goto_label(s, COND_AL, args[0]);
1293
case INDEX_op_ld8u_i32:
1294
tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
1296
case INDEX_op_ld8s_i32:
1297
tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
1299
case INDEX_op_ld16u_i32:
1300
tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
1302
case INDEX_op_ld16s_i32:
1303
tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
1305
case INDEX_op_ld_i32:
1306
tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
1308
case INDEX_op_st8_i32:
1309
tcg_out_st8u(s, COND_AL, args[0], args[1], args[2]);
1311
case INDEX_op_st16_i32:
1312
tcg_out_st16u(s, COND_AL, args[0], args[1], args[2]);
1314
case INDEX_op_st_i32:
1315
tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
1318
case INDEX_op_mov_i32:
1319
tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1320
args[0], 0, args[1], SHIFT_IMM_LSL(0));
1322
case INDEX_op_movi_i32:
1323
tcg_out_movi32(s, COND_AL, args[0], args[1]);
1325
case INDEX_op_add_i32:
1328
case INDEX_op_sub_i32:
1331
case INDEX_op_and_i32:
1334
case INDEX_op_or_i32:
1337
case INDEX_op_xor_i32:
1341
tcg_out_dat_reg(s, COND_AL, c,
1342
args[0], args[1], args[2], SHIFT_IMM_LSL(0));
1344
case INDEX_op_add2_i32:
1345
tcg_out_dat_reg2(s, COND_AL, ARITH_ADD, ARITH_ADC,
1346
args[0], args[1], args[2], args[3],
1347
args[4], args[5], SHIFT_IMM_LSL(0));
1349
case INDEX_op_sub2_i32:
1350
tcg_out_dat_reg2(s, COND_AL, ARITH_SUB, ARITH_SBC,
1351
args[0], args[1], args[2], args[3],
1352
args[4], args[5], SHIFT_IMM_LSL(0));
1354
case INDEX_op_neg_i32:
1355
tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
1357
case INDEX_op_mul_i32:
1358
tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
1360
case INDEX_op_mulu2_i32:
1361
tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1363
case INDEX_op_div2_i32:
1364
tcg_out_div_helper(s, COND_AL, args,
1365
tcg_helper_div_i64, tcg_helper_rem_i64,
1368
case INDEX_op_divu2_i32:
1369
tcg_out_div_helper(s, COND_AL, args,
1370
tcg_helper_divu_i64, tcg_helper_remu_i64,
1373
/* XXX: Perhaps args[2] & 0x1f is wrong */
1374
case INDEX_op_shl_i32:
1376
SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
1378
case INDEX_op_shr_i32:
1379
c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
1380
SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
1382
case INDEX_op_sar_i32:
1383
c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
1384
SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
1387
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
1390
case INDEX_op_brcond_i32:
1391
tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1392
args[0], args[1], SHIFT_IMM_LSL(0));
1393
tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], args[3]);
1395
case INDEX_op_brcond2_i32:
1396
/* The resulting conditions are:
1397
* TCG_COND_EQ --> a0 == a2 && a1 == a3,
1398
* TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3,
1399
* TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3,
1400
* TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
1401
* TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
1402
* TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3,
1404
tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1405
args[1], args[3], SHIFT_IMM_LSL(0));
1406
tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1407
args[0], args[2], SHIFT_IMM_LSL(0));
1408
tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[4]], args[5]);
1411
case INDEX_op_qemu_ld8u:
1412
tcg_out_qemu_ld(s, COND_AL, args, 0);
1414
case INDEX_op_qemu_ld8s:
1415
tcg_out_qemu_ld(s, COND_AL, args, 0 | 4);
1417
case INDEX_op_qemu_ld16u:
1418
tcg_out_qemu_ld(s, COND_AL, args, 1);
1420
case INDEX_op_qemu_ld16s:
1421
tcg_out_qemu_ld(s, COND_AL, args, 1 | 4);
1423
case INDEX_op_qemu_ld32u:
1424
tcg_out_qemu_ld(s, COND_AL, args, 2);
1426
case INDEX_op_qemu_ld64:
1427
tcg_out_qemu_ld(s, COND_AL, args, 3);
1430
case INDEX_op_qemu_st8:
1431
tcg_out_qemu_st(s, COND_AL, args, 0);
1433
case INDEX_op_qemu_st16:
1434
tcg_out_qemu_st(s, COND_AL, args, 1);
1436
case INDEX_op_qemu_st32:
1437
tcg_out_qemu_st(s, COND_AL, args, 2);
1439
case INDEX_op_qemu_st64:
1440
tcg_out_qemu_st(s, COND_AL, args, 3);
1443
case INDEX_op_ext8s_i32:
1444
tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1445
args[0], 0, args[1], SHIFT_IMM_LSL(24));
1446
tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1447
args[0], 0, args[0], SHIFT_IMM_ASR(24));
1449
case INDEX_op_ext16s_i32:
1450
tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1451
args[0], 0, args[1], SHIFT_IMM_LSL(16));
1452
tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1453
args[0], 0, args[0], SHIFT_IMM_ASR(16));
1461
static const TCGTargetOpDef arm_op_defs[] = {
1462
{ INDEX_op_exit_tb, { } },
1463
{ INDEX_op_goto_tb, { } },
1464
{ INDEX_op_call, { "ri" } },
1465
{ INDEX_op_jmp, { "ri" } },
1466
{ INDEX_op_br, { } },
1468
{ INDEX_op_mov_i32, { "r", "r" } },
1469
{ INDEX_op_movi_i32, { "r" } },
1471
{ INDEX_op_ld8u_i32, { "r", "r" } },
1472
{ INDEX_op_ld8s_i32, { "r", "r" } },
1473
{ INDEX_op_ld16u_i32, { "r", "r" } },
1474
{ INDEX_op_ld16s_i32, { "r", "r" } },
1475
{ INDEX_op_ld_i32, { "r", "r" } },
1476
{ INDEX_op_st8_i32, { "r", "r" } },
1477
{ INDEX_op_st16_i32, { "r", "r" } },
1478
{ INDEX_op_st_i32, { "r", "r" } },
1480
/* TODO: "r", "r", "ri" */
1481
{ INDEX_op_add_i32, { "r", "r", "r" } },
1482
{ INDEX_op_sub_i32, { "r", "r", "r" } },
1483
{ INDEX_op_mul_i32, { "r", "r", "r" } },
1484
{ INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
1485
{ INDEX_op_div2_i32, { "r", "r", "r", "1", "2" } },
1486
{ INDEX_op_divu2_i32, { "r", "r", "r", "1", "2" } },
1487
{ INDEX_op_and_i32, { "r", "r", "r" } },
1488
{ INDEX_op_or_i32, { "r", "r", "r" } },
1489
{ INDEX_op_xor_i32, { "r", "r", "r" } },
1490
{ INDEX_op_neg_i32, { "r", "r" } },
1492
{ INDEX_op_shl_i32, { "r", "r", "ri" } },
1493
{ INDEX_op_shr_i32, { "r", "r", "ri" } },
1494
{ INDEX_op_sar_i32, { "r", "r", "ri" } },
1496
{ INDEX_op_brcond_i32, { "r", "r" } },
1498
/* TODO: "r", "r", "r", "r", "ri", "ri" */
1499
{ INDEX_op_add2_i32, { "r", "r", "r", "r", "r", "r" } },
1500
{ INDEX_op_sub2_i32, { "r", "r", "r", "r", "r", "r" } },
1501
{ INDEX_op_brcond2_i32, { "r", "r", "r", "r" } },
1503
{ INDEX_op_qemu_ld8u, { "r", "x", "X" } },
1504
{ INDEX_op_qemu_ld8s, { "r", "x", "X" } },
1505
{ INDEX_op_qemu_ld16u, { "r", "x", "X" } },
1506
{ INDEX_op_qemu_ld16s, { "r", "x", "X" } },
1507
{ INDEX_op_qemu_ld32u, { "r", "x", "X" } },
1508
{ INDEX_op_qemu_ld64, { "d", "r", "x", "X" } },
1510
{ INDEX_op_qemu_st8, { "x", "x", "X" } },
1511
{ INDEX_op_qemu_st16, { "x", "x", "X" } },
1512
{ INDEX_op_qemu_st32, { "x", "x", "X" } },
1513
{ INDEX_op_qemu_st64, { "x", "D", "x", "X" } },
1515
{ INDEX_op_ext8s_i32, { "r", "r" } },
1516
{ INDEX_op_ext16s_i32, { "r", "r" } },
1521
void tcg_target_init(TCGContext *s)
1524
if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
1527
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0,
1528
((2 << TCG_REG_R14) - 1) & ~(1 << TCG_REG_R8));
1529
tcg_regset_set32(tcg_target_call_clobber_regs, 0,
1530
((2 << TCG_REG_R3) - 1) |
1531
(1 << TCG_REG_R12) | (1 << TCG_REG_R14));
1533
tcg_regset_clear(s->reserved_regs);
1535
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R14);
1537
tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
1538
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R8);
1540
tcg_add_target_add_op_defs(arm_op_defs);
1543
static inline void tcg_out_ld(TCGContext *s, TCGType type, int arg,
1544
int arg1, tcg_target_long arg2)
1546
tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
1549
static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
1550
int arg1, tcg_target_long arg2)
1552
tcg_out_st32(s, COND_AL, arg, arg1, arg2);
1555
void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
1559
tcg_out_dat_imm(s, COND_AL, ARITH_ADD, reg, reg, val);
1564
tcg_out_dat_imm(s, COND_AL, ARITH_SUB, reg, reg, -val);
1570
static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
1572
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0));
1575
static inline void tcg_out_movi(TCGContext *s, TCGType type,
1576
int ret, tcg_target_long arg)
1578
tcg_out_movi32(s, COND_AL, ret, arg);
1581
void tcg_target_qemu_prologue(TCGContext *s)
1583
/* stmdb sp!, { r9 - r11, lr } */
1584
tcg_out32(s, (COND_AL << 28) | 0x092d4e00);
1586
tcg_out_bx(s, COND_AL, TCG_REG_R0);
1587
tb_ret_addr = s->code_ptr;
1589
/* ldmia sp!, { r9 - r11, pc } */
1590
tcg_out32(s, (COND_AL << 28) | 0x08bd8e00);