2
* Tiny Code Generator for QEMU
4
* Copyright (c) 2008 Fabrice Bellard
6
* Permission is hereby granted, free of charge, to any person obtaining a copy
7
* of this software and associated documentation files (the "Software"), to deal
8
* in the Software without restriction, including without limitation the rights
9
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
* copies of the Software, and to permit persons to whom the Software is
11
* furnished to do so, subject to the following conditions:
13
* The above copyright notice and this permission notice shall be included in
14
* all copies or substantial portions of the Software.
16
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
27
"%r0", "%r1", "%rp", "%r3", "%r4", "%r5", "%r6", "%r7",
28
"%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
29
"%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
30
"%r24", "%r25", "%r26", "%dp", "%ret0", "%ret1", "%sp", "%r31",
34
/* This is an 8 byte temp slot in the stack frame. */
35
#define STACK_TEMP_OFS -16
37
#ifdef CONFIG_USE_GUEST_BASE
38
#define TCG_GUEST_BASE_REG TCG_REG_R16
40
#define TCG_GUEST_BASE_REG TCG_REG_R0
43
static const int tcg_target_reg_alloc_order[] = {
69
static const int tcg_target_call_iarg_regs[4] = {
76
static const int tcg_target_call_oarg_regs[2] = {
81
/* True iff val fits a signed field of width BITS. */
82
static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
84
return (val << ((sizeof(tcg_target_long) * 8 - bits))
85
>> (sizeof(tcg_target_long) * 8 - bits)) == val;
88
/* True iff depi can be used to compute (reg | MASK).
89
Accept a bit pattern like:
93
Copied from gcc sources. */
94
static inline int or_mask_p(tcg_target_ulong mask)
96
if (mask == 0 || mask == -1) {
100
return (mask & (mask - 1)) == 0;
103
/* True iff depi or extru can be used to compute (reg & mask).
104
Accept a bit pattern like these:
108
Copied from gcc sources. */
109
static inline int and_mask_p(tcg_target_ulong mask)
111
return or_mask_p(~mask);
114
static int low_sign_ext(int val, int len)
116
return (((val << 1) & ~(-1u << len)) | ((val >> (len - 1)) & 1));
119
static int reassemble_12(int as12)
121
return (((as12 & 0x800) >> 11) |
122
((as12 & 0x400) >> 8) |
123
((as12 & 0x3ff) << 3));
126
static int reassemble_17(int as17)
128
return (((as17 & 0x10000) >> 16) |
129
((as17 & 0x0f800) << 5) |
130
((as17 & 0x00400) >> 8) |
131
((as17 & 0x003ff) << 3));
134
static int reassemble_21(int as21)
136
return (((as21 & 0x100000) >> 20) |
137
((as21 & 0x0ffe00) >> 8) |
138
((as21 & 0x000180) << 7) |
139
((as21 & 0x00007c) << 14) |
140
((as21 & 0x000003) << 12));
143
/* ??? Bizzarely, there is no PCREL12F relocation type. I guess all
144
such relocations are simply fully handled by the assembler. */
145
#define R_PARISC_PCREL12F R_PARISC_NONE
147
static void patch_reloc(uint8_t *code_ptr, int type,
148
tcg_target_long value, tcg_target_long addend)
150
uint32_t *insn_ptr = (uint32_t *)code_ptr;
151
uint32_t insn = *insn_ptr;
152
tcg_target_long pcrel;
155
pcrel = (value - ((tcg_target_long)code_ptr + 8)) >> 2;
158
case R_PARISC_PCREL12F:
159
assert(check_fit_tl(pcrel, 12));
160
/* ??? We assume all patches are forward. See tcg_out_brcond
161
re setting the NUL bit on the branch and eliding the nop. */
164
insn |= reassemble_12(pcrel);
166
case R_PARISC_PCREL17F:
167
assert(check_fit_tl(pcrel, 17));
169
insn |= reassemble_17(pcrel);
178
/* maximum number of register used for input function arguments */
179
static inline int tcg_target_get_call_iarg_regs_count(int flags)
184
/* parse target specific constraints */
185
static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
192
ct->ct |= TCG_CT_REG;
193
tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
195
case 'L': /* qemu_ld/st constraint */
196
ct->ct |= TCG_CT_REG;
197
tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
198
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R26);
199
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R25);
200
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R24);
201
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R23);
204
ct->ct |= TCG_CT_CONST_0;
207
ct->ct |= TCG_CT_CONST_S11;
210
ct->ct |= TCG_CT_CONST_S5;
213
ct->ct |= TCG_CT_CONST_MS11;
216
ct->ct |= TCG_CT_CONST_AND;
219
ct->ct |= TCG_CT_CONST_OR;
229
/* test if a constant matches the constraint */
230
static int tcg_target_const_match(tcg_target_long val,
231
const TCGArgConstraint *arg_ct)
234
if (ct & TCG_CT_CONST) {
236
} else if (ct & TCG_CT_CONST_0) {
238
} else if (ct & TCG_CT_CONST_S5) {
239
return check_fit_tl(val, 5);
240
} else if (ct & TCG_CT_CONST_S11) {
241
return check_fit_tl(val, 11);
242
} else if (ct & TCG_CT_CONST_MS11) {
243
return check_fit_tl(-val, 11);
244
} else if (ct & TCG_CT_CONST_AND) {
245
return and_mask_p(val);
246
} else if (ct & TCG_CT_CONST_OR) {
247
return or_mask_p(val);
252
#define INSN_OP(x) ((x) << 26)
253
#define INSN_EXT3BR(x) ((x) << 13)
254
#define INSN_EXT3SH(x) ((x) << 10)
255
#define INSN_EXT4(x) ((x) << 6)
256
#define INSN_EXT5(x) (x)
257
#define INSN_EXT6(x) ((x) << 6)
258
#define INSN_EXT7(x) ((x) << 6)
259
#define INSN_EXT8A(x) ((x) << 6)
260
#define INSN_EXT8B(x) ((x) << 5)
261
#define INSN_T(x) (x)
262
#define INSN_R1(x) ((x) << 16)
263
#define INSN_R2(x) ((x) << 21)
264
#define INSN_DEP_LEN(x) (32 - (x))
265
#define INSN_SHDEP_CP(x) ((31 - (x)) << 5)
266
#define INSN_SHDEP_P(x) ((x) << 5)
267
#define INSN_COND(x) ((x) << 13)
268
#define INSN_IM11(x) low_sign_ext(x, 11)
269
#define INSN_IM14(x) low_sign_ext(x, 14)
270
#define INSN_IM5(x) (low_sign_ext(x, 5) << 16)
282
#define INSN_ADD (INSN_OP(0x02) | INSN_EXT6(0x18))
283
#define INSN_ADDC (INSN_OP(0x02) | INSN_EXT6(0x1c))
284
#define INSN_ADDI (INSN_OP(0x2d))
285
#define INSN_ADDIL (INSN_OP(0x0a))
286
#define INSN_ADDL (INSN_OP(0x02) | INSN_EXT6(0x28))
287
#define INSN_AND (INSN_OP(0x02) | INSN_EXT6(0x08))
288
#define INSN_ANDCM (INSN_OP(0x02) | INSN_EXT6(0x00))
289
#define INSN_COMCLR (INSN_OP(0x02) | INSN_EXT6(0x22))
290
#define INSN_COMICLR (INSN_OP(0x24))
291
#define INSN_DEP (INSN_OP(0x35) | INSN_EXT3SH(3))
292
#define INSN_DEPI (INSN_OP(0x35) | INSN_EXT3SH(7))
293
#define INSN_EXTRS (INSN_OP(0x34) | INSN_EXT3SH(7))
294
#define INSN_EXTRU (INSN_OP(0x34) | INSN_EXT3SH(6))
295
#define INSN_LDIL (INSN_OP(0x08))
296
#define INSN_LDO (INSN_OP(0x0d))
297
#define INSN_MTCTL (INSN_OP(0x00) | INSN_EXT8B(0xc2))
298
#define INSN_OR (INSN_OP(0x02) | INSN_EXT6(0x09))
299
#define INSN_SHD (INSN_OP(0x34) | INSN_EXT3SH(2))
300
#define INSN_SUB (INSN_OP(0x02) | INSN_EXT6(0x10))
301
#define INSN_SUBB (INSN_OP(0x02) | INSN_EXT6(0x14))
302
#define INSN_SUBI (INSN_OP(0x25))
303
#define INSN_VEXTRS (INSN_OP(0x34) | INSN_EXT3SH(5))
304
#define INSN_VEXTRU (INSN_OP(0x34) | INSN_EXT3SH(4))
305
#define INSN_VSHD (INSN_OP(0x34) | INSN_EXT3SH(0))
306
#define INSN_XOR (INSN_OP(0x02) | INSN_EXT6(0x0a))
307
#define INSN_ZDEP (INSN_OP(0x35) | INSN_EXT3SH(2))
308
#define INSN_ZVDEP (INSN_OP(0x35) | INSN_EXT3SH(0))
310
#define INSN_BL (INSN_OP(0x3a) | INSN_EXT3BR(0))
311
#define INSN_BL_N (INSN_OP(0x3a) | INSN_EXT3BR(0) | 2)
312
#define INSN_BLR (INSN_OP(0x3a) | INSN_EXT3BR(2))
313
#define INSN_BV (INSN_OP(0x3a) | INSN_EXT3BR(6))
314
#define INSN_BV_N (INSN_OP(0x3a) | INSN_EXT3BR(6) | 2)
315
#define INSN_BLE_SR4 (INSN_OP(0x39) | (1 << 13))
317
#define INSN_LDB (INSN_OP(0x10))
318
#define INSN_LDH (INSN_OP(0x11))
319
#define INSN_LDW (INSN_OP(0x12))
320
#define INSN_LDWM (INSN_OP(0x13))
321
#define INSN_FLDDS (INSN_OP(0x0b) | INSN_EXT4(0) | (1 << 12))
323
#define INSN_LDBX (INSN_OP(0x03) | INSN_EXT4(0))
324
#define INSN_LDHX (INSN_OP(0x03) | INSN_EXT4(1))
325
#define INSN_LDWX (INSN_OP(0x03) | INSN_EXT4(2))
327
#define INSN_STB (INSN_OP(0x18))
328
#define INSN_STH (INSN_OP(0x19))
329
#define INSN_STW (INSN_OP(0x1a))
330
#define INSN_STWM (INSN_OP(0x1b))
331
#define INSN_FSTDS (INSN_OP(0x0b) | INSN_EXT4(8) | (1 << 12))
333
#define INSN_COMBT (INSN_OP(0x20))
334
#define INSN_COMBF (INSN_OP(0x22))
335
#define INSN_COMIBT (INSN_OP(0x21))
336
#define INSN_COMIBF (INSN_OP(0x23))
338
/* supplied by libgcc */
339
extern void *__canonicalize_funcptr_for_compare(void *);
341
static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
343
/* PA1.1 defines COPY as OR r,0,t; PA2.0 defines COPY as LDO 0(r),t
344
but hppa-dis.c is unaware of this definition */
346
tcg_out32(s, INSN_OR | INSN_T(ret) | INSN_R1(arg)
347
| INSN_R2(TCG_REG_R0));
351
static void tcg_out_movi(TCGContext *s, TCGType type,
352
TCGReg ret, tcg_target_long arg)
354
if (check_fit_tl(arg, 14)) {
355
tcg_out32(s, INSN_LDO | INSN_R1(ret)
356
| INSN_R2(TCG_REG_R0) | INSN_IM14(arg));
362
tcg_out32(s, INSN_LDIL | INSN_R2(ret) | reassemble_21(hi));
364
tcg_out32(s, INSN_LDO | INSN_R1(ret)
365
| INSN_R2(ret) | INSN_IM14(lo));
370
static void tcg_out_ldst(TCGContext *s, int ret, int addr,
371
tcg_target_long offset, int op)
373
if (!check_fit_tl(offset, 14)) {
379
if (addr == TCG_REG_R0) {
380
op = INSN_LDIL | INSN_R2(TCG_REG_R1);
382
op = INSN_ADDIL | INSN_R2(addr);
384
tcg_out32(s, op | reassemble_21(hi));
390
if (ret != addr || offset != 0 || op != INSN_LDO) {
391
tcg_out32(s, op | INSN_R1(ret) | INSN_R2(addr) | INSN_IM14(offset));
395
/* This function is required by tcg.c. */
396
static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
397
TCGReg arg1, tcg_target_long arg2)
399
tcg_out_ldst(s, ret, arg1, arg2, INSN_LDW);
402
/* This function is required by tcg.c. */
403
static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg ret,
404
TCGReg arg1, tcg_target_long arg2)
406
tcg_out_ldst(s, ret, arg1, arg2, INSN_STW);
409
static void tcg_out_ldst_index(TCGContext *s, int data,
410
int base, int index, int op)
412
tcg_out32(s, op | INSN_T(data) | INSN_R1(index) | INSN_R2(base));
415
static inline void tcg_out_addi2(TCGContext *s, int ret, int arg1,
418
tcg_out_ldst(s, ret, arg1, val, INSN_LDO);
421
/* This function is required by tcg.c. */
422
static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
424
tcg_out_addi2(s, reg, reg, val);
427
static inline void tcg_out_arith(TCGContext *s, int t, int r1, int r2, int op)
429
tcg_out32(s, op | INSN_T(t) | INSN_R1(r1) | INSN_R2(r2));
432
static inline void tcg_out_arithi(TCGContext *s, int t, int r1,
433
tcg_target_long val, int op)
435
assert(check_fit_tl(val, 11));
436
tcg_out32(s, op | INSN_R1(t) | INSN_R2(r1) | INSN_IM11(val));
439
static inline void tcg_out_nop(TCGContext *s)
441
tcg_out_arith(s, TCG_REG_R0, TCG_REG_R0, TCG_REG_R0, INSN_OR);
444
static inline void tcg_out_mtctl_sar(TCGContext *s, int arg)
446
tcg_out32(s, INSN_MTCTL | INSN_R2(11) | INSN_R1(arg));
449
/* Extract LEN bits at position OFS from ARG and place in RET.
450
Note that here the bit ordering is reversed from the PA-RISC
451
standard, such that the right-most bit is 0. */
452
static inline void tcg_out_extr(TCGContext *s, int ret, int arg,
453
unsigned ofs, unsigned len, int sign)
455
assert(ofs < 32 && len <= 32 - ofs);
456
tcg_out32(s, (sign ? INSN_EXTRS : INSN_EXTRU)
457
| INSN_R1(ret) | INSN_R2(arg)
458
| INSN_SHDEP_P(31 - ofs) | INSN_DEP_LEN(len));
461
/* Likewise with OFS interpreted little-endian. */
462
static inline void tcg_out_dep(TCGContext *s, int ret, int arg,
463
unsigned ofs, unsigned len)
465
assert(ofs < 32 && len <= 32 - ofs);
466
tcg_out32(s, INSN_DEP | INSN_R2(ret) | INSN_R1(arg)
467
| INSN_SHDEP_CP(31 - ofs) | INSN_DEP_LEN(len));
470
static inline void tcg_out_depi(TCGContext *s, int ret, int arg,
471
unsigned ofs, unsigned len)
473
assert(ofs < 32 && len <= 32 - ofs);
474
tcg_out32(s, INSN_DEPI | INSN_R2(ret) | INSN_IM5(arg)
475
| INSN_SHDEP_CP(31 - ofs) | INSN_DEP_LEN(len));
478
static inline void tcg_out_shd(TCGContext *s, int ret, int hi, int lo,
482
tcg_out32(s, INSN_SHD | INSN_R1(hi) | INSN_R2(lo) | INSN_T(ret)
483
| INSN_SHDEP_CP(count));
486
static void tcg_out_vshd(TCGContext *s, int ret, int hi, int lo, int creg)
488
tcg_out_mtctl_sar(s, creg);
489
tcg_out32(s, INSN_VSHD | INSN_T(ret) | INSN_R1(hi) | INSN_R2(lo));
492
static void tcg_out_ori(TCGContext *s, int ret, int arg, tcg_target_ulong m)
496
/* Note that the argument is constrained to match or_mask_p. */
497
for (bs0 = 0; bs0 < 32; bs0++) {
498
if ((m & (1u << bs0)) != 0) {
502
for (bs1 = bs0; bs1 < 32; bs1++) {
503
if ((m & (1u << bs1)) == 0) {
507
assert(bs1 == 32 || (1ul << bs1) > m);
509
tcg_out_mov(s, TCG_TYPE_I32, ret, arg);
510
tcg_out_depi(s, ret, -1, bs0, bs1 - bs0);
513
static void tcg_out_andi(TCGContext *s, int ret, int arg, tcg_target_ulong m)
517
/* Note that the argument is constrained to match and_mask_p. */
518
for (ls0 = 0; ls0 < 32; ls0++) {
519
if ((m & (1u << ls0)) == 0) {
523
for (ls1 = ls0; ls1 < 32; ls1++) {
524
if ((m & (1u << ls1)) != 0) {
528
for (ms0 = ls1; ms0 < 32; ms0++) {
529
if ((m & (1u << ms0)) == 0) {
536
tcg_out_extr(s, ret, arg, 0, ls0, 0);
538
tcg_out_mov(s, TCG_TYPE_I32, ret, arg);
539
tcg_out_depi(s, ret, 0, ls0, ls1 - ls0);
543
static inline void tcg_out_ext8s(TCGContext *s, int ret, int arg)
545
tcg_out_extr(s, ret, arg, 0, 8, 1);
548
static inline void tcg_out_ext16s(TCGContext *s, int ret, int arg)
550
tcg_out_extr(s, ret, arg, 0, 16, 1);
553
static void tcg_out_shli(TCGContext *s, int ret, int arg, int count)
556
tcg_out32(s, INSN_ZDEP | INSN_R2(ret) | INSN_R1(arg)
557
| INSN_SHDEP_CP(31 - count) | INSN_DEP_LEN(32 - count));
560
static void tcg_out_shl(TCGContext *s, int ret, int arg, int creg)
562
tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
563
tcg_out_mtctl_sar(s, TCG_REG_R20);
564
tcg_out32(s, INSN_ZVDEP | INSN_R2(ret) | INSN_R1(arg) | INSN_DEP_LEN(32));
567
static void tcg_out_shri(TCGContext *s, int ret, int arg, int count)
570
tcg_out_extr(s, ret, arg, count, 32 - count, 0);
573
static void tcg_out_shr(TCGContext *s, int ret, int arg, int creg)
575
tcg_out_vshd(s, ret, TCG_REG_R0, arg, creg);
578
static void tcg_out_sari(TCGContext *s, int ret, int arg, int count)
581
tcg_out_extr(s, ret, arg, count, 32 - count, 1);
584
static void tcg_out_sar(TCGContext *s, int ret, int arg, int creg)
586
tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
587
tcg_out_mtctl_sar(s, TCG_REG_R20);
588
tcg_out32(s, INSN_VEXTRS | INSN_R1(ret) | INSN_R2(arg) | INSN_DEP_LEN(32));
591
static void tcg_out_rotli(TCGContext *s, int ret, int arg, int count)
594
tcg_out_shd(s, ret, arg, arg, 32 - count);
597
static void tcg_out_rotl(TCGContext *s, int ret, int arg, int creg)
599
tcg_out_arithi(s, TCG_REG_R20, creg, 32, INSN_SUBI);
600
tcg_out_vshd(s, ret, arg, arg, TCG_REG_R20);
603
static void tcg_out_rotri(TCGContext *s, int ret, int arg, int count)
606
tcg_out_shd(s, ret, arg, arg, count);
609
static void tcg_out_rotr(TCGContext *s, int ret, int arg, int creg)
611
tcg_out_vshd(s, ret, arg, arg, creg);
614
static void tcg_out_bswap16(TCGContext *s, int ret, int arg, int sign)
617
tcg_out_mov(s, TCG_TYPE_I32, ret, arg); /* arg = xxAB */
619
tcg_out_dep(s, ret, ret, 16, 8); /* ret = xBAB */
620
tcg_out_extr(s, ret, ret, 8, 16, sign); /* ret = ..BA */
623
static void tcg_out_bswap32(TCGContext *s, int ret, int arg, int temp)
626
tcg_out_rotri(s, temp, arg, 16); /* temp = CDAB */
627
tcg_out_dep(s, temp, temp, 16, 8); /* temp = CBAB */
628
tcg_out_shd(s, ret, arg, temp, 8); /* ret = DCBA */
631
static void tcg_out_call(TCGContext *s, void *func)
633
tcg_target_long val, hi, lo, disp;
635
val = (uint32_t)__canonicalize_funcptr_for_compare(func);
636
disp = (val - ((tcg_target_long)s->code_ptr + 8)) >> 2;
638
if (check_fit_tl(disp, 17)) {
639
tcg_out32(s, INSN_BL_N | INSN_R2(TCG_REG_RP) | reassemble_17(disp));
644
tcg_out32(s, INSN_LDIL | INSN_R2(TCG_REG_R20) | reassemble_21(hi));
645
tcg_out32(s, INSN_BLE_SR4 | INSN_R2(TCG_REG_R20)
646
| reassemble_17(lo >> 2));
647
tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_RP, TCG_REG_R31);
651
static void tcg_out_xmpyu(TCGContext *s, int retl, int reth,
654
/* Store both words into the stack for copy to the FPU. */
655
tcg_out_ldst(s, arg1, TCG_REG_CALL_STACK, STACK_TEMP_OFS, INSN_STW);
656
tcg_out_ldst(s, arg2, TCG_REG_CALL_STACK, STACK_TEMP_OFS + 4, INSN_STW);
658
/* Load both words into the FPU at the same time. We get away
659
with this because we can address the left and right half of the
660
FPU registers individually once loaded. */
661
/* fldds stack_temp(sp),fr22 */
662
tcg_out32(s, INSN_FLDDS | INSN_R2(TCG_REG_CALL_STACK)
663
| INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
665
/* xmpyu fr22r,fr22,fr22 */
666
tcg_out32(s, 0x3ad64796);
668
/* Store the 64-bit result back into the stack. */
669
/* fstds stack_temp(sp),fr22 */
670
tcg_out32(s, INSN_FSTDS | INSN_R2(TCG_REG_CALL_STACK)
671
| INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
673
/* Load the pieces of the result that the caller requested. */
675
tcg_out_ldst(s, reth, TCG_REG_CALL_STACK, STACK_TEMP_OFS, INSN_LDW);
678
tcg_out_ldst(s, retl, TCG_REG_CALL_STACK, STACK_TEMP_OFS + 4,
683
static void tcg_out_add2(TCGContext *s, int destl, int desth,
684
int al, int ah, int bl, int bh, int blconst)
686
int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
689
tcg_out_arithi(s, tmp, al, bl, INSN_ADDI);
691
tcg_out_arith(s, tmp, al, bl, INSN_ADD);
693
tcg_out_arith(s, desth, ah, bh, INSN_ADDC);
695
tcg_out_mov(s, TCG_TYPE_I32, destl, tmp);
698
static void tcg_out_sub2(TCGContext *s, int destl, int desth, int al, int ah,
699
int bl, int bh, int alconst, int blconst)
701
int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
705
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R20, bl);
708
tcg_out_arithi(s, tmp, bl, al, INSN_SUBI);
709
} else if (blconst) {
710
tcg_out_arithi(s, tmp, al, -bl, INSN_ADDI);
712
tcg_out_arith(s, tmp, al, bl, INSN_SUB);
714
tcg_out_arith(s, desth, ah, bh, INSN_SUBB);
716
tcg_out_mov(s, TCG_TYPE_I32, destl, tmp);
719
static void tcg_out_branch(TCGContext *s, int label_index, int nul)
721
TCGLabel *l = &s->labels[label_index];
722
uint32_t op = nul ? INSN_BL_N : INSN_BL;
725
tcg_target_long val = l->u.value;
727
val -= (tcg_target_long)s->code_ptr + 8;
729
assert(check_fit_tl(val, 17));
731
tcg_out32(s, op | reassemble_17(val));
733
/* We need to keep the offset unchanged for retranslation. */
734
uint32_t old_insn = *(uint32_t *)s->code_ptr;
736
tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL17F, label_index, 0);
737
tcg_out32(s, op | (old_insn & 0x1f1ffdu));
741
static const uint8_t tcg_cond_to_cmp_cond[10] =
743
[TCG_COND_EQ] = COND_EQ,
744
[TCG_COND_NE] = COND_EQ | COND_FALSE,
745
[TCG_COND_LT] = COND_LT,
746
[TCG_COND_GE] = COND_LT | COND_FALSE,
747
[TCG_COND_LE] = COND_LE,
748
[TCG_COND_GT] = COND_LE | COND_FALSE,
749
[TCG_COND_LTU] = COND_LTU,
750
[TCG_COND_GEU] = COND_LTU | COND_FALSE,
751
[TCG_COND_LEU] = COND_LEU,
752
[TCG_COND_GTU] = COND_LEU | COND_FALSE,
755
static void tcg_out_brcond(TCGContext *s, int cond, TCGArg c1,
756
TCGArg c2, int c2const, int label_index)
758
TCGLabel *l = &s->labels[label_index];
761
/* Note that COMIB operates as if the immediate is the first
762
operand. We model brcond with the immediate in the second
763
to better match what targets are likely to give us. For
764
consistency, model COMB with reversed operands as well. */
765
pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
768
op = (pacond & COND_FALSE ? INSN_COMIBF : INSN_COMIBT);
771
op = (pacond & COND_FALSE ? INSN_COMBF : INSN_COMBT);
775
op |= INSN_COND(pacond & 7);
778
tcg_target_long val = l->u.value;
780
val -= (tcg_target_long)s->code_ptr + 8;
782
assert(check_fit_tl(val, 12));
784
/* ??? Assume that all branches to defined labels are backward.
785
Which means that if the nul bit is set, the delay slot is
786
executed if the branch is taken, and not executed in fallthru. */
787
tcg_out32(s, op | reassemble_12(val));
790
/* We need to keep the offset unchanged for retranslation. */
791
uint32_t old_insn = *(uint32_t *)s->code_ptr;
793
tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL12F, label_index, 0);
794
/* ??? Assume that all branches to undefined labels are forward.
795
Which means that if the nul bit is set, the delay slot is
796
not executed if the branch is taken, which is what we want. */
797
tcg_out32(s, op | 2 | (old_insn & 0x1ffdu));
801
static void tcg_out_comclr(TCGContext *s, int cond, TCGArg ret,
802
TCGArg c1, TCGArg c2, int c2const)
806
/* Note that COMICLR operates as if the immediate is the first
807
operand. We model setcond with the immediate in the second
808
to better match what targets are likely to give us. For
809
consistency, model COMCLR with reversed operands as well. */
810
pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
813
op = INSN_COMICLR | INSN_R2(c1) | INSN_R1(ret) | INSN_IM11(c2);
815
op = INSN_COMCLR | INSN_R2(c1) | INSN_R1(c2) | INSN_T(ret);
817
op |= INSN_COND(pacond & 7);
818
op |= pacond & COND_FALSE ? 1 << 12 : 0;
823
static void tcg_out_brcond2(TCGContext *s, int cond, TCGArg al, TCGArg ah,
824
TCGArg bl, int blconst, TCGArg bh, int bhconst,
830
tcg_out_comclr(s, tcg_invert_cond(cond), TCG_REG_R0, al, bl, blconst);
831
tcg_out_brcond(s, cond, ah, bh, bhconst, label_index);
835
tcg_out_brcond(s, cond, ah, bh, bhconst, label_index);
836
tcg_out_comclr(s, TCG_COND_NE, TCG_REG_R0, ah, bh, bhconst);
837
tcg_out_brcond(s, tcg_unsigned_cond(cond),
838
al, bl, blconst, label_index);
843
static void tcg_out_setcond(TCGContext *s, int cond, TCGArg ret,
844
TCGArg c1, TCGArg c2, int c2const)
846
tcg_out_comclr(s, tcg_invert_cond(cond), ret, c1, c2, c2const);
847
tcg_out_movi(s, TCG_TYPE_I32, ret, 1);
850
static void tcg_out_setcond2(TCGContext *s, int cond, TCGArg ret,
851
TCGArg al, TCGArg ah, TCGArg bl, int blconst,
852
TCGArg bh, int bhconst)
854
int scratch = TCG_REG_R20;
856
if (ret != al && ret != ah
857
&& (blconst || ret != bl)
858
&& (bhconst || ret != bh)) {
865
tcg_out_setcond(s, cond, scratch, al, bl, blconst);
866
tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
867
tcg_out_movi(s, TCG_TYPE_I32, scratch, cond == TCG_COND_NE);
871
tcg_out_setcond(s, tcg_unsigned_cond(cond), scratch, al, bl, blconst);
872
tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
873
tcg_out_movi(s, TCG_TYPE_I32, scratch, 0);
874
tcg_out_comclr(s, cond, TCG_REG_R0, ah, bh, bhconst);
875
tcg_out_movi(s, TCG_TYPE_I32, scratch, 1);
879
tcg_out_mov(s, TCG_TYPE_I32, ret, scratch);
882
#if defined(CONFIG_SOFTMMU)
883
#include "../../softmmu_defs.h"
885
static void *qemu_ld_helpers[4] = {
892
static void *qemu_st_helpers[4] = {
899
/* Load and compare a TLB entry, and branch if TLB miss. OFFSET is set to
900
the offset of the first ADDR_READ or ADDR_WRITE member of the appropriate
901
TLB for the memory index. The return value is the offset from ENV
902
contained in R1 afterward (to be used when loading ADDEND); if the
903
return value is 0, R1 is not used. */
905
static int tcg_out_tlb_read(TCGContext *s, int r0, int r1, int addrlo,
906
int addrhi, int s_bits, int lab_miss, int offset)
910
/* Extracting the index into the TLB. The "normal C operation" is
911
r1 = addr_reg >> TARGET_PAGE_BITS;
912
r1 &= CPU_TLB_SIZE - 1;
913
r1 <<= CPU_TLB_ENTRY_BITS;
914
What this does is extract CPU_TLB_BITS beginning at TARGET_PAGE_BITS
915
and place them at CPU_TLB_ENTRY_BITS. We can combine the first two
916
operations with an EXTRU. Unfortunately, the current value of
917
CPU_TLB_ENTRY_BITS is > 3, so we can't merge that shift with the
919
tcg_out_extr(s, r1, addrlo, TARGET_PAGE_BITS, CPU_TLB_BITS, 0);
920
tcg_out_shli(s, r1, r1, CPU_TLB_ENTRY_BITS);
921
tcg_out_arith(s, r1, r1, TCG_AREG0, INSN_ADDL);
923
/* Make sure that both the addr_{read,write} and addend can be
924
read with a 14-bit offset from the same base register. */
925
if (check_fit_tl(offset + CPU_TLB_SIZE, 14)) {
928
ret = (offset + 0x400) & ~0x7ff;
929
offset = ret - offset;
930
tcg_out_addi2(s, TCG_REG_R1, r1, ret);
934
/* Load the entry from the computed slot. */
935
if (TARGET_LONG_BITS == 64) {
936
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R23, r1, offset);
937
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset + 4);
939
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset);
942
/* Compute the value that ought to appear in the TLB for a hit, namely, the page
943
of the address. We include the low N bits of the address to catch unaligned
944
accesses and force them onto the slow path. Do this computation after having
945
issued the load from the TLB slot to give the load time to complete. */
946
tcg_out_andi(s, r0, addrlo, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
948
/* If not equal, jump to lab_miss. */
949
if (TARGET_LONG_BITS == 64) {
950
tcg_out_brcond2(s, TCG_COND_NE, TCG_REG_R20, TCG_REG_R23,
951
r0, 0, addrhi, 0, lab_miss);
953
tcg_out_brcond(s, TCG_COND_NE, TCG_REG_R20, r0, 0, lab_miss);
960
static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo_reg, int datahi_reg,
961
int addr_reg, int addend_reg, int opc)
963
#ifdef TARGET_WORDS_BIGENDIAN
971
tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDBX);
974
tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDBX);
975
tcg_out_ext8s(s, datalo_reg, datalo_reg);
978
tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDHX);
980
tcg_out_bswap16(s, datalo_reg, datalo_reg, 0);
984
tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDHX);
986
tcg_out_bswap16(s, datalo_reg, datalo_reg, 1);
988
tcg_out_ext16s(s, datalo_reg, datalo_reg);
992
tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDWX);
994
tcg_out_bswap32(s, datalo_reg, datalo_reg, TCG_REG_R20);
1000
datahi_reg = datalo_reg;
1003
/* We can't access the low-part with a reg+reg addressing mode,
1004
so perform the addition now and use reg_ofs addressing mode. */
1005
if (addend_reg != TCG_REG_R0) {
1006
tcg_out_arith(s, TCG_REG_R20, addr_reg, addend_reg, INSN_ADD);
1007
addr_reg = TCG_REG_R20;
1009
/* Make sure not to clobber the base register. */
1010
if (datahi_reg == addr_reg) {
1011
tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_LDW);
1012
tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_LDW);
1014
tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_LDW);
1015
tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_LDW);
1018
tcg_out_bswap32(s, datalo_reg, datalo_reg, TCG_REG_R20);
1019
tcg_out_bswap32(s, datahi_reg, datahi_reg, TCG_REG_R20);
1027
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
1029
int datalo_reg = *args++;
1030
/* Note that datahi_reg is only used for 64-bit loads. */
1031
int datahi_reg = (opc == 3 ? *args++ : TCG_REG_R0);
1032
int addrlo_reg = *args++;
1034
#if defined(CONFIG_SOFTMMU)
1035
/* Note that addrhi_reg is only used for 64-bit guests. */
1036
int addrhi_reg = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
1037
int mem_index = *args;
1038
int lab1, lab2, argreg, offset;
1040
lab1 = gen_new_label();
1041
lab2 = gen_new_label();
1043
offset = offsetof(CPUState, tlb_table[mem_index][0].addr_read);
1044
offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg, addrhi_reg,
1045
opc & 3, lab1, offset);
1048
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : TCG_REG_R25),
1049
offsetof(CPUState, tlb_table[mem_index][0].addend) - offset);
1050
tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg, TCG_REG_R20, opc);
1051
tcg_out_branch(s, lab2, 1);
1055
tcg_out_label(s, lab1, (tcg_target_long)s->code_ptr);
1057
argreg = TCG_REG_R26;
1058
tcg_out_mov(s, TCG_TYPE_I32, argreg--, addrlo_reg);
1059
if (TARGET_LONG_BITS == 64) {
1060
tcg_out_mov(s, TCG_TYPE_I32, argreg--, addrhi_reg);
1062
tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1064
tcg_out_call(s, qemu_ld_helpers[opc & 3]);
1068
tcg_out_andi(s, datalo_reg, TCG_REG_RET0, 0xff);
1071
tcg_out_ext8s(s, datalo_reg, TCG_REG_RET0);
1074
tcg_out_andi(s, datalo_reg, TCG_REG_RET0, 0xffff);
1077
tcg_out_ext16s(s, datalo_reg, TCG_REG_RET0);
1081
tcg_out_mov(s, TCG_TYPE_I32, datalo_reg, TCG_REG_RET0);
1084
tcg_out_mov(s, TCG_TYPE_I32, datahi_reg, TCG_REG_RET0);
1085
tcg_out_mov(s, TCG_TYPE_I32, datalo_reg, TCG_REG_RET1);
1092
tcg_out_label(s, lab2, (tcg_target_long)s->code_ptr);
1094
tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg,
1095
(GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_R0), opc);
1099
static void tcg_out_qemu_st_direct(TCGContext *s, int datalo_reg, int datahi_reg,
1100
int addr_reg, int opc)
1102
#ifdef TARGET_WORDS_BIGENDIAN
1103
const int bswap = 0;
1105
const int bswap = 1;
1110
tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STB);
1114
tcg_out_bswap16(s, TCG_REG_R20, datalo_reg, 0);
1115
datalo_reg = TCG_REG_R20;
1117
tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STH);
1121
tcg_out_bswap32(s, TCG_REG_R20, datalo_reg, TCG_REG_R20);
1122
datalo_reg = TCG_REG_R20;
1124
tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STW);
1128
tcg_out_bswap32(s, TCG_REG_R20, datalo_reg, TCG_REG_R20);
1129
tcg_out_bswap32(s, TCG_REG_R23, datahi_reg, TCG_REG_R23);
1130
datahi_reg = TCG_REG_R20;
1131
datalo_reg = TCG_REG_R23;
1133
tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_STW);
1134
tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_STW);
1142
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
1144
int datalo_reg = *args++;
1145
/* Note that datahi_reg is only used for 64-bit loads. */
1146
int datahi_reg = (opc == 3 ? *args++ : TCG_REG_R0);
1147
int addrlo_reg = *args++;
1149
#if defined(CONFIG_SOFTMMU)
1150
/* Note that addrhi_reg is only used for 64-bit guests. */
1151
int addrhi_reg = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
1152
int mem_index = *args;
1153
int lab1, lab2, argreg, offset;
1155
lab1 = gen_new_label();
1156
lab2 = gen_new_label();
1158
offset = offsetof(CPUState, tlb_table[mem_index][0].addr_write);
1159
offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg, addrhi_reg,
1163
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : TCG_REG_R25),
1164
offsetof(CPUState, tlb_table[mem_index][0].addend) - offset);
1166
/* There are no indexed stores, so we must do this addition explitly.
1167
Careful to avoid R20, which is used for the bswaps to follow. */
1168
tcg_out_arith(s, TCG_REG_R31, addrlo_reg, TCG_REG_R20, INSN_ADDL);
1169
tcg_out_qemu_st_direct(s, datalo_reg, datahi_reg, TCG_REG_R31, opc);
1170
tcg_out_branch(s, lab2, 1);
1174
tcg_out_label(s, lab1, (tcg_target_long)s->code_ptr);
1176
argreg = TCG_REG_R26;
1177
tcg_out_mov(s, TCG_TYPE_I32, argreg--, addrlo_reg);
1178
if (TARGET_LONG_BITS == 64) {
1179
tcg_out_mov(s, TCG_TYPE_I32, argreg--, addrhi_reg);
1184
tcg_out_andi(s, argreg--, datalo_reg, 0xff);
1185
tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1188
tcg_out_andi(s, argreg--, datalo_reg, 0xffff);
1189
tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1192
tcg_out_mov(s, TCG_TYPE_I32, argreg--, datalo_reg);
1193
tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1196
/* Because of the alignment required by the 64-bit data argument,
1197
we will always use R23/R24. Also, we will always run out of
1198
argument registers for storing mem_index, so that will have
1199
to go on the stack. */
1200
if (mem_index == 0) {
1201
argreg = TCG_REG_R0;
1203
argreg = TCG_REG_R20;
1204
tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1206
tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R23, datahi_reg);
1207
tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R24, datalo_reg);
1208
tcg_out_st(s, TCG_TYPE_I32, argreg, TCG_REG_CALL_STACK,
1209
TCG_TARGET_CALL_STACK_OFFSET - 4);
1215
tcg_out_call(s, qemu_st_helpers[opc]);
1218
tcg_out_label(s, lab2, (tcg_target_long)s->code_ptr);
1220
/* There are no indexed stores, so if GUEST_BASE is set we must do the add
1221
explicitly. Careful to avoid R20, which is used for the bswaps to follow. */
1222
if (GUEST_BASE != 0) {
1223
tcg_out_arith(s, TCG_REG_R31, addrlo_reg, TCG_GUEST_BASE_REG, INSN_ADDL);
1224
addrlo_reg = TCG_REG_R31;
1226
tcg_out_qemu_st_direct(s, datalo_reg, datahi_reg, addrlo_reg, opc);
1230
static void tcg_out_exit_tb(TCGContext *s, TCGArg arg)
1232
if (!check_fit_tl(arg, 14)) {
1237
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, hi);
1238
tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18));
1239
tcg_out_addi(s, TCG_REG_RET0, lo);
1244
tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18));
1245
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, arg);
1248
static void tcg_out_goto_tb(TCGContext *s, TCGArg arg)
1250
if (s->tb_jmp_offset) {
1251
/* direct jump method */
1252
fprintf(stderr, "goto_tb direct\n");
1255
/* indirect jump method */
1256
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, TCG_REG_R0,
1257
(tcg_target_long)(s->tb_next + arg));
1258
tcg_out32(s, INSN_BV_N | INSN_R2(TCG_REG_R20));
1260
s->tb_next_offset[arg] = s->code_ptr - s->code_buf;
1263
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
1264
const int *const_args)
1267
case INDEX_op_exit_tb:
1268
tcg_out_exit_tb(s, args[0]);
1270
case INDEX_op_goto_tb:
1271
tcg_out_goto_tb(s, args[0]);
1275
if (const_args[0]) {
1276
tcg_out_call(s, (void *)args[0]);
1278
/* ??? FIXME: the value in the register in args[0] is almost
1279
certainly a procedure descriptor, not a code address. We
1280
probably need to use the millicode $$dyncall routine. */
1286
fprintf(stderr, "unimplemented jmp\n");
1291
tcg_out_branch(s, args[0], 1);
1294
case INDEX_op_movi_i32:
1295
tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
1298
case INDEX_op_ld8u_i32:
1299
tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB);
1301
case INDEX_op_ld8s_i32:
1302
tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB);
1303
tcg_out_ext8s(s, args[0], args[0]);
1305
case INDEX_op_ld16u_i32:
1306
tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH);
1308
case INDEX_op_ld16s_i32:
1309
tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH);
1310
tcg_out_ext16s(s, args[0], args[0]);
1312
case INDEX_op_ld_i32:
1313
tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDW);
1316
case INDEX_op_st8_i32:
1317
tcg_out_ldst(s, args[0], args[1], args[2], INSN_STB);
1319
case INDEX_op_st16_i32:
1320
tcg_out_ldst(s, args[0], args[1], args[2], INSN_STH);
1322
case INDEX_op_st_i32:
1323
tcg_out_ldst(s, args[0], args[1], args[2], INSN_STW);
1326
case INDEX_op_add_i32:
1327
if (const_args[2]) {
1328
tcg_out_addi2(s, args[0], args[1], args[2]);
1330
tcg_out_arith(s, args[0], args[1], args[2], INSN_ADDL);
1334
case INDEX_op_sub_i32:
1335
if (const_args[1]) {
1336
if (const_args[2]) {
1337
tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1] - args[2]);
1339
/* Recall that SUBI is a reversed subtract. */
1340
tcg_out_arithi(s, args[0], args[2], args[1], INSN_SUBI);
1342
} else if (const_args[2]) {
1343
tcg_out_addi2(s, args[0], args[1], -args[2]);
1345
tcg_out_arith(s, args[0], args[1], args[2], INSN_SUB);
1349
case INDEX_op_and_i32:
1350
if (const_args[2]) {
1351
tcg_out_andi(s, args[0], args[1], args[2]);
1353
tcg_out_arith(s, args[0], args[1], args[2], INSN_AND);
1357
case INDEX_op_or_i32:
1358
if (const_args[2]) {
1359
tcg_out_ori(s, args[0], args[1], args[2]);
1361
tcg_out_arith(s, args[0], args[1], args[2], INSN_OR);
1365
case INDEX_op_xor_i32:
1366
tcg_out_arith(s, args[0], args[1], args[2], INSN_XOR);
1369
case INDEX_op_andc_i32:
1370
if (const_args[2]) {
1371
tcg_out_andi(s, args[0], args[1], ~args[2]);
1373
tcg_out_arith(s, args[0], args[1], args[2], INSN_ANDCM);
1377
case INDEX_op_shl_i32:
1378
if (const_args[2]) {
1379
tcg_out_shli(s, args[0], args[1], args[2]);
1381
tcg_out_shl(s, args[0], args[1], args[2]);
1385
case INDEX_op_shr_i32:
1386
if (const_args[2]) {
1387
tcg_out_shri(s, args[0], args[1], args[2]);
1389
tcg_out_shr(s, args[0], args[1], args[2]);
1393
case INDEX_op_sar_i32:
1394
if (const_args[2]) {
1395
tcg_out_sari(s, args[0], args[1], args[2]);
1397
tcg_out_sar(s, args[0], args[1], args[2]);
1401
case INDEX_op_rotl_i32:
1402
if (const_args[2]) {
1403
tcg_out_rotli(s, args[0], args[1], args[2]);
1405
tcg_out_rotl(s, args[0], args[1], args[2]);
1409
case INDEX_op_rotr_i32:
1410
if (const_args[2]) {
1411
tcg_out_rotri(s, args[0], args[1], args[2]);
1413
tcg_out_rotr(s, args[0], args[1], args[2]);
1417
case INDEX_op_mul_i32:
1418
tcg_out_xmpyu(s, args[0], TCG_REG_R0, args[1], args[2]);
1420
case INDEX_op_mulu2_i32:
1421
tcg_out_xmpyu(s, args[0], args[1], args[2], args[3]);
1424
case INDEX_op_bswap16_i32:
1425
tcg_out_bswap16(s, args[0], args[1], 0);
1427
case INDEX_op_bswap32_i32:
1428
tcg_out_bswap32(s, args[0], args[1], TCG_REG_R20);
1431
case INDEX_op_not_i32:
1432
tcg_out_arithi(s, args[0], args[1], -1, INSN_SUBI);
1434
case INDEX_op_ext8s_i32:
1435
tcg_out_ext8s(s, args[0], args[1]);
1437
case INDEX_op_ext16s_i32:
1438
tcg_out_ext16s(s, args[0], args[1]);
1441
case INDEX_op_brcond_i32:
1442
tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], args[3]);
1444
case INDEX_op_brcond2_i32:
1445
tcg_out_brcond2(s, args[4], args[0], args[1],
1446
args[2], const_args[2],
1447
args[3], const_args[3], args[5]);
1450
case INDEX_op_setcond_i32:
1451
tcg_out_setcond(s, args[3], args[0], args[1], args[2], const_args[2]);
1453
case INDEX_op_setcond2_i32:
1454
tcg_out_setcond2(s, args[5], args[0], args[1], args[2],
1455
args[3], const_args[3], args[4], const_args[4]);
1458
case INDEX_op_add2_i32:
1459
tcg_out_add2(s, args[0], args[1], args[2], args[3],
1460
args[4], args[5], const_args[4]);
1463
case INDEX_op_sub2_i32:
1464
tcg_out_sub2(s, args[0], args[1], args[2], args[3],
1465
args[4], args[5], const_args[2], const_args[4]);
1468
case INDEX_op_deposit_i32:
1469
if (const_args[2]) {
1470
tcg_out_depi(s, args[0], args[2], args[3], args[4]);
1472
tcg_out_dep(s, args[0], args[2], args[3], args[4]);
1476
case INDEX_op_qemu_ld8u:
1477
tcg_out_qemu_ld(s, args, 0);
1479
case INDEX_op_qemu_ld8s:
1480
tcg_out_qemu_ld(s, args, 0 | 4);
1482
case INDEX_op_qemu_ld16u:
1483
tcg_out_qemu_ld(s, args, 1);
1485
case INDEX_op_qemu_ld16s:
1486
tcg_out_qemu_ld(s, args, 1 | 4);
1488
case INDEX_op_qemu_ld32:
1489
tcg_out_qemu_ld(s, args, 2);
1491
case INDEX_op_qemu_ld64:
1492
tcg_out_qemu_ld(s, args, 3);
1495
case INDEX_op_qemu_st8:
1496
tcg_out_qemu_st(s, args, 0);
1498
case INDEX_op_qemu_st16:
1499
tcg_out_qemu_st(s, args, 1);
1501
case INDEX_op_qemu_st32:
1502
tcg_out_qemu_st(s, args, 2);
1504
case INDEX_op_qemu_st64:
1505
tcg_out_qemu_st(s, args, 3);
1509
fprintf(stderr, "unknown opcode 0x%x\n", opc);
1514
static const TCGTargetOpDef hppa_op_defs[] = {
1515
{ INDEX_op_exit_tb, { } },
1516
{ INDEX_op_goto_tb, { } },
1518
{ INDEX_op_call, { "ri" } },
1519
{ INDEX_op_jmp, { "r" } },
1520
{ INDEX_op_br, { } },
1522
{ INDEX_op_mov_i32, { "r", "r" } },
1523
{ INDEX_op_movi_i32, { "r" } },
1525
{ INDEX_op_ld8u_i32, { "r", "r" } },
1526
{ INDEX_op_ld8s_i32, { "r", "r" } },
1527
{ INDEX_op_ld16u_i32, { "r", "r" } },
1528
{ INDEX_op_ld16s_i32, { "r", "r" } },
1529
{ INDEX_op_ld_i32, { "r", "r" } },
1530
{ INDEX_op_st8_i32, { "rZ", "r" } },
1531
{ INDEX_op_st16_i32, { "rZ", "r" } },
1532
{ INDEX_op_st_i32, { "rZ", "r" } },
1534
{ INDEX_op_add_i32, { "r", "rZ", "ri" } },
1535
{ INDEX_op_sub_i32, { "r", "rI", "ri" } },
1536
{ INDEX_op_and_i32, { "r", "rZ", "rM" } },
1537
{ INDEX_op_or_i32, { "r", "rZ", "rO" } },
1538
{ INDEX_op_xor_i32, { "r", "rZ", "rZ" } },
1539
/* Note that the second argument will be inverted, which means
1540
we want a constant whose inversion matches M, and that O = ~M.
1541
See the implementation of and_mask_p. */
1542
{ INDEX_op_andc_i32, { "r", "rZ", "rO" } },
1544
{ INDEX_op_mul_i32, { "r", "r", "r" } },
1545
{ INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
1547
{ INDEX_op_shl_i32, { "r", "r", "ri" } },
1548
{ INDEX_op_shr_i32, { "r", "r", "ri" } },
1549
{ INDEX_op_sar_i32, { "r", "r", "ri" } },
1550
{ INDEX_op_rotl_i32, { "r", "r", "ri" } },
1551
{ INDEX_op_rotr_i32, { "r", "r", "ri" } },
1553
{ INDEX_op_bswap16_i32, { "r", "r" } },
1554
{ INDEX_op_bswap32_i32, { "r", "r" } },
1555
{ INDEX_op_not_i32, { "r", "r" } },
1557
{ INDEX_op_ext8s_i32, { "r", "r" } },
1558
{ INDEX_op_ext16s_i32, { "r", "r" } },
1560
{ INDEX_op_brcond_i32, { "rZ", "rJ" } },
1561
{ INDEX_op_brcond2_i32, { "rZ", "rZ", "rJ", "rJ" } },
1563
{ INDEX_op_setcond_i32, { "r", "rZ", "rI" } },
1564
{ INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rI", "rI" } },
1566
{ INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rI", "rZ" } },
1567
{ INDEX_op_sub2_i32, { "r", "r", "rI", "rZ", "rK", "rZ" } },
1569
{ INDEX_op_deposit_i32, { "r", "0", "rJ" } },
1571
#if TARGET_LONG_BITS == 32
1572
{ INDEX_op_qemu_ld8u, { "r", "L" } },
1573
{ INDEX_op_qemu_ld8s, { "r", "L" } },
1574
{ INDEX_op_qemu_ld16u, { "r", "L" } },
1575
{ INDEX_op_qemu_ld16s, { "r", "L" } },
1576
{ INDEX_op_qemu_ld32, { "r", "L" } },
1577
{ INDEX_op_qemu_ld64, { "r", "r", "L" } },
1579
{ INDEX_op_qemu_st8, { "LZ", "L" } },
1580
{ INDEX_op_qemu_st16, { "LZ", "L" } },
1581
{ INDEX_op_qemu_st32, { "LZ", "L" } },
1582
{ INDEX_op_qemu_st64, { "LZ", "LZ", "L" } },
1584
{ INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1585
{ INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1586
{ INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1587
{ INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1588
{ INDEX_op_qemu_ld32, { "r", "L", "L" } },
1589
{ INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
1591
{ INDEX_op_qemu_st8, { "LZ", "L", "L" } },
1592
{ INDEX_op_qemu_st16, { "LZ", "L", "L" } },
1593
{ INDEX_op_qemu_st32, { "LZ", "L", "L" } },
1594
{ INDEX_op_qemu_st64, { "LZ", "LZ", "L", "L" } },
1599
static int tcg_target_callee_save_regs[] = {
1600
/* R2, the return address register, is saved specially
1601
in the caller's frame. */
1602
/* R3, the frame pointer, is not currently modified. */
1616
TCG_REG_R17, /* R17 is the global env. */
1620
static void tcg_target_qemu_prologue(TCGContext *s)
1624
/* Allocate space for the fixed frame marker. */
1625
frame_size = -TCG_TARGET_CALL_STACK_OFFSET;
1626
frame_size += TCG_TARGET_STATIC_CALL_ARGS_SIZE;
1628
/* Allocate space for the saved registers. */
1629
frame_size += ARRAY_SIZE(tcg_target_callee_save_regs) * 4;
1631
/* Allocate space for the TCG temps. */
1632
frame_size += CPU_TEMP_BUF_NLONGS * sizeof(long);
1634
/* Align the allocated space. */
1635
frame_size = ((frame_size + TCG_TARGET_STACK_ALIGN - 1)
1636
& -TCG_TARGET_STACK_ALIGN);
1638
/* The return address is stored in the caller's frame. */
1639
tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_CALL_STACK, -20);
1641
/* Allocate stack frame, saving the first register at the same time. */
1642
tcg_out_ldst(s, tcg_target_callee_save_regs[0],
1643
TCG_REG_CALL_STACK, frame_size, INSN_STWM);
1645
/* Save all callee saved registers. */
1646
for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1647
tcg_out_st(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i],
1648
TCG_REG_CALL_STACK, -frame_size + i * 4);
1651
/* Record the location of the TCG temps. */
1652
tcg_set_frame(s, TCG_REG_CALL_STACK, -frame_size + i * 4,
1653
CPU_TEMP_BUF_NLONGS * sizeof(long));
1655
#ifdef CONFIG_USE_GUEST_BASE
1656
if (GUEST_BASE != 0) {
1657
tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
1658
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1662
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1664
/* Jump to TB, and adjust R18 to be the return address. */
1665
tcg_out32(s, INSN_BLE_SR4 | INSN_R2(tcg_target_call_iarg_regs[1]));
1666
tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R18, TCG_REG_R31);
1668
/* Restore callee saved registers. */
1669
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_CALL_STACK,
1671
for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1672
tcg_out_ld(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i],
1673
TCG_REG_CALL_STACK, -frame_size + i * 4);
1676
/* Deallocate stack frame and return. */
1677
tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_RP));
1678
tcg_out_ldst(s, tcg_target_callee_save_regs[0],
1679
TCG_REG_CALL_STACK, -frame_size, INSN_LDWM);
1682
static void tcg_target_init(TCGContext *s)
1684
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
1686
tcg_regset_clear(tcg_target_call_clobber_regs);
1687
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R20);
1688
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R21);
1689
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R22);
1690
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R23);
1691
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R24);
1692
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R25);
1693
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R26);
1694
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET0);
1695
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET1);
1697
tcg_regset_clear(s->reserved_regs);
1698
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* hardwired to zero */
1699
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* addil target */
1700
tcg_regset_set_reg(s->reserved_regs, TCG_REG_RP); /* link register */
1701
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R3); /* frame pointer */
1702
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R18); /* return pointer */
1703
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R19); /* clobbered w/o pic */
1704
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R20); /* reserved */
1705
tcg_regset_set_reg(s->reserved_regs, TCG_REG_DP); /* data pointer */
1706
tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); /* stack pointer */
1707
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R31); /* ble link reg */
1709
tcg_add_target_add_op_defs(hppa_op_defs);