4
* Copyright (c) 2015 Chen Gang
6
* This library is free software; you can redistribute it and/or
7
* modify it under the terms of the GNU Lesser General Public
8
* License as published by the Free Software Foundation; either
9
* version 2.1 of the License, or (at your option) any later version.
11
* This library is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14
* Lesser General Public License for more details.
16
* You should have received a copy of the GNU Lesser General Public
17
* License along with this library; if not, see
18
* <http://www.gnu.org/licenses/lgpl-2.1.html>
21
#include "qemu/osdep.h"
25
#include "disas/disas.h"
26
#include "exec/exec-all.h"
28
#include "exec/cpu_ldst.h"
29
#include "linux-user/syscall_defs.h"
31
#include "opcode_tilegx.h"
32
#include "spr_def_64.h"
34
#define FMT64X "%016" PRIx64
36
static TCGv_env cpu_env;
38
static TCGv cpu_regs[TILEGX_R_COUNT];
40
static const char * const reg_names[64] = {
41
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
42
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
43
"r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
44
"r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
45
"r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
46
"r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
47
"r48", "r49", "r50", "r51", "bp", "tp", "sp", "lr",
48
"sn", "idn0", "idn1", "udn0", "udn1", "udn2", "udn2", "zero"
51
/* Modified registers are cached in temporaries until the end of the bundle. */
57
#define MAX_WRITEBACK 4
59
/* This is the state at translation time. */
61
uint64_t pc; /* Current pc */
63
TCGv zero; /* For zero register */
65
DisasContextTemp wb[MAX_WRITEBACK];
72
TCGCond cond; /* branch condition */
73
TCGv dest; /* branch destination */
74
TCGv val1; /* value to be compared against zero, for cond */
75
} jmp; /* Jump object, only once in each TB block */
78
#include "exec/gen-icount.h"
80
/* Differentiate the various pipe encodings. */
86
/* Remerge the base opcode and extension fields for switching.
87
The X opcode fields are 3 bits; Y0/Y1 opcode fields are 4 bits;
88
Y2 opcode field is 2 bits. */
89
#define OE(OP, EXT, XY) (TY_##XY + OP * 4 + EXT * 64)
91
/* Similar, but for Y2 only. */
92
#define OEY2(OP, MODE) (OP + MODE * 4)
94
/* Similar, but make sure opcode names match up. */
95
#define OE_RR_X0(E) OE(RRR_0_OPCODE_X0, E##_UNARY_OPCODE_X0, X0)
96
#define OE_RR_X1(E) OE(RRR_0_OPCODE_X1, E##_UNARY_OPCODE_X1, X1)
97
#define OE_RR_Y0(E) OE(RRR_1_OPCODE_Y0, E##_UNARY_OPCODE_Y0, Y0)
98
#define OE_RR_Y1(E) OE(RRR_1_OPCODE_Y1, E##_UNARY_OPCODE_Y1, Y1)
99
#define OE_RRR(E,N,XY) OE(RRR_##N##_OPCODE_##XY, E##_RRR_##N##_OPCODE_##XY, XY)
100
#define OE_IM(E,XY) OE(IMM8_OPCODE_##XY, E##_IMM8_OPCODE_##XY, XY)
101
#define OE_SH(E,XY) OE(SHIFT_OPCODE_##XY, E##_SHIFT_OPCODE_##XY, XY)
103
#define V1_IMM(X) (((X) & 0xff) * 0x0101010101010101ull)
104
#define V2_IMM(X) (((X) & 0xffff) * 0x0001000100010001ull)
107
static void gen_exception(DisasContext *dc, TileExcp num)
111
tcg_gen_movi_tl(cpu_pc, dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
113
tmp = tcg_const_i32(num);
114
gen_helper_exception(cpu_env, tmp);
115
tcg_temp_free_i32(tmp);
119
static bool check_gr(DisasContext *dc, uint8_t reg)
121
if (likely(reg < TILEGX_R_COUNT)) {
131
gen_exception(dc, TILEGX_EXCP_REG_IDN_ACCESS);
137
gen_exception(dc, TILEGX_EXCP_REG_UDN_ACCESS);
140
g_assert_not_reached();
145
static TCGv load_zero(DisasContext *dc)
147
if (TCGV_IS_UNUSED_I64(dc->zero)) {
148
dc->zero = tcg_const_i64(0);
153
static TCGv load_gr(DisasContext *dc, unsigned reg)
155
if (check_gr(dc, reg)) {
156
return cpu_regs[reg];
158
return load_zero(dc);
161
static TCGv dest_gr(DisasContext *dc, unsigned reg)
165
/* Skip the result, mark the exception if necessary, and continue */
170
return dc->wb[n].val = tcg_temp_new_i64();
173
static void gen_saturate_op(TCGv tdest, TCGv tsrca, TCGv tsrcb,
174
void (*operate)(TCGv, TCGv, TCGv))
176
TCGv t0 = tcg_temp_new();
178
tcg_gen_ext32s_tl(tdest, tsrca);
179
tcg_gen_ext32s_tl(t0, tsrcb);
180
operate(tdest, tdest, t0);
182
tcg_gen_movi_tl(t0, 0x7fffffff);
183
tcg_gen_movcond_tl(TCG_COND_GT, tdest, tdest, t0, t0, tdest);
184
tcg_gen_movi_tl(t0, -0x80000000LL);
185
tcg_gen_movcond_tl(TCG_COND_LT, tdest, tdest, t0, t0, tdest);
190
static void gen_atomic_excp(DisasContext *dc, unsigned dest, TCGv tdest,
191
TCGv tsrca, TCGv tsrcb, TileExcp excp)
193
#ifdef CONFIG_USER_ONLY
196
tcg_gen_st_tl(tsrca, cpu_env, offsetof(CPUTLGState, atomic_srca));
197
tcg_gen_st_tl(tsrcb, cpu_env, offsetof(CPUTLGState, atomic_srcb));
198
t = tcg_const_i32(dest);
199
tcg_gen_st_i32(t, cpu_env, offsetof(CPUTLGState, atomic_dstr));
200
tcg_temp_free_i32(t);
202
/* We're going to write the real result in the exception. But in
203
the meantime we've already created a writeback register, and
204
we don't want that to remain uninitialized. */
205
tcg_gen_movi_tl(tdest, 0);
207
/* Note that we need to delay issuing the exception that implements
208
the atomic operation until after writing back the results of the
209
instruction occupying the X0 pipe. */
210
dc->atomic_excp = excp;
212
gen_exception(dc, TILEGX_EXCP_OPCODE_UNIMPLEMENTED);
216
/* Shift the 128-bit value TSRCA:TSRCD right by the number of bytes
217
specified by the bottom 3 bits of TSRCB, and set TDEST to the
218
low 64 bits of the resulting value. */
219
static void gen_dblalign(TCGv tdest, TCGv tsrcd, TCGv tsrca, TCGv tsrcb)
221
TCGv t0 = tcg_temp_new();
223
tcg_gen_andi_tl(t0, tsrcb, 7);
224
tcg_gen_shli_tl(t0, t0, 3);
225
tcg_gen_shr_tl(tdest, tsrcd, t0);
227
/* We want to do "t0 = tsrca << (64 - t0)". Two's complement
228
arithmetic on a 6-bit field tells us that 64 - t0 is equal
229
to (t0 ^ 63) + 1. So we can do the shift in two parts,
230
neither of which will be an invalid shift by 64. */
231
tcg_gen_xori_tl(t0, t0, 63);
232
tcg_gen_shl_tl(t0, tsrca, t0);
233
tcg_gen_shli_tl(t0, t0, 1);
234
tcg_gen_or_tl(tdest, tdest, t0);
239
/* Similarly, except that the 128-bit value is TSRCA:TSRCB, and the
240
right shift is an immediate. */
241
static void gen_dblaligni(TCGv tdest, TCGv tsrca, TCGv tsrcb, int shr)
243
TCGv t0 = tcg_temp_new();
245
tcg_gen_shri_tl(t0, tsrcb, shr);
246
tcg_gen_shli_tl(tdest, tsrca, 64 - shr);
247
tcg_gen_or_tl(tdest, tdest, t0);
256
static void gen_ext_half(TCGv d, TCGv s, MulHalf h)
260
tcg_gen_ext32u_tl(d, s);
263
tcg_gen_ext32s_tl(d, s);
266
tcg_gen_shri_tl(d, s, 32);
269
tcg_gen_sari_tl(d, s, 32);
274
static void gen_mul_half(TCGv tdest, TCGv tsrca, TCGv tsrcb,
275
MulHalf ha, MulHalf hb)
277
TCGv t = tcg_temp_new();
278
gen_ext_half(t, tsrca, ha);
279
gen_ext_half(tdest, tsrcb, hb);
280
tcg_gen_mul_tl(tdest, tdest, t);
284
static void gen_cmul2(TCGv tdest, TCGv tsrca, TCGv tsrcb, int sh, int rd)
286
TCGv_i32 tsh = tcg_const_i32(sh);
287
TCGv_i32 trd = tcg_const_i32(rd);
288
gen_helper_cmul2(tdest, tsrca, tsrcb, tsh, trd);
289
tcg_temp_free_i32(tsh);
290
tcg_temp_free_i32(trd);
293
static TileExcp gen_st_opcode(DisasContext *dc, unsigned dest, unsigned srca,
294
unsigned srcb, TCGMemOp memop, const char *name)
297
return TILEGX_EXCP_OPCODE_UNKNOWN;
300
tcg_gen_qemu_st_tl(load_gr(dc, srcb), load_gr(dc, srca),
303
qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s", name,
304
reg_names[srca], reg_names[srcb]);
305
return TILEGX_EXCP_NONE;
308
static TileExcp gen_st_add_opcode(DisasContext *dc, unsigned srca, unsigned srcb,
309
int imm, TCGMemOp memop, const char *name)
311
TCGv tsrca = load_gr(dc, srca);
312
TCGv tsrcb = load_gr(dc, srcb);
314
tcg_gen_qemu_st_tl(tsrcb, tsrca, dc->mmuidx, memop);
315
tcg_gen_addi_tl(dest_gr(dc, srca), tsrca, imm);
317
qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %d", name,
318
reg_names[srca], reg_names[srcb], imm);
319
return TILEGX_EXCP_NONE;
322
/* Equality comparison with zero can be done quickly and efficiently. */
323
static void gen_v1cmpeq0(TCGv v)
325
TCGv m = tcg_const_tl(V1_IMM(0x7f));
326
TCGv c = tcg_temp_new();
328
/* ~(((v & m) + m) | m | v). Sets the msb for each byte == 0. */
329
tcg_gen_and_tl(c, v, m);
330
tcg_gen_add_tl(c, c, m);
331
tcg_gen_or_tl(c, c, m);
332
tcg_gen_nor_tl(c, c, v);
335
/* Shift the msb down to form the lsb boolean result. */
336
tcg_gen_shri_tl(v, c, 7);
340
static void gen_v1cmpne0(TCGv v)
342
TCGv m = tcg_const_tl(V1_IMM(0x7f));
343
TCGv c = tcg_temp_new();
345
/* (((v & m) + m) | v) & ~m. Sets the msb for each byte != 0. */
346
tcg_gen_and_tl(c, v, m);
347
tcg_gen_add_tl(c, c, m);
348
tcg_gen_or_tl(c, c, v);
349
tcg_gen_andc_tl(c, c, m);
352
/* Shift the msb down to form the lsb boolean result. */
353
tcg_gen_shri_tl(v, c, 7);
357
/* Vector addition can be performed via arithmetic plus masking. It is
358
efficient this way only for 4 or more elements. */
359
static void gen_v12add(TCGv tdest, TCGv tsrca, TCGv tsrcb, uint64_t sign)
361
TCGv tmask = tcg_const_tl(~sign);
362
TCGv t0 = tcg_temp_new();
363
TCGv t1 = tcg_temp_new();
365
/* ((a & ~sign) + (b & ~sign)) ^ ((a ^ b) & sign). */
366
tcg_gen_and_tl(t0, tsrca, tmask);
367
tcg_gen_and_tl(t1, tsrcb, tmask);
368
tcg_gen_add_tl(tdest, t0, t1);
369
tcg_gen_xor_tl(t0, tsrca, tsrcb);
370
tcg_gen_andc_tl(t0, t0, tmask);
371
tcg_gen_xor_tl(tdest, tdest, t0);
375
tcg_temp_free(tmask);
378
/* Similarly for vector subtraction. */
379
static void gen_v12sub(TCGv tdest, TCGv tsrca, TCGv tsrcb, uint64_t sign)
381
TCGv tsign = tcg_const_tl(sign);
382
TCGv t0 = tcg_temp_new();
383
TCGv t1 = tcg_temp_new();
385
/* ((a | sign) - (b & ~sign)) ^ ((a ^ ~b) & sign). */
386
tcg_gen_or_tl(t0, tsrca, tsign);
387
tcg_gen_andc_tl(t1, tsrcb, tsign);
388
tcg_gen_sub_tl(tdest, t0, t1);
389
tcg_gen_eqv_tl(t0, tsrca, tsrcb);
390
tcg_gen_and_tl(t0, t0, tsign);
391
tcg_gen_xor_tl(tdest, tdest, t0);
395
tcg_temp_free(tsign);
398
static void gen_v4sh(TCGv d64, TCGv a64, TCGv b64,
399
void (*generate)(TCGv_i32, TCGv_i32, TCGv_i32))
401
TCGv_i32 al = tcg_temp_new_i32();
402
TCGv_i32 ah = tcg_temp_new_i32();
403
TCGv_i32 bl = tcg_temp_new_i32();
405
tcg_gen_extr_i64_i32(al, ah, a64);
406
tcg_gen_extrl_i64_i32(bl, b64);
407
tcg_gen_andi_i32(bl, bl, 31);
408
generate(al, al, bl);
409
generate(ah, ah, bl);
410
tcg_gen_concat_i32_i64(d64, al, ah);
412
tcg_temp_free_i32(al);
413
tcg_temp_free_i32(ah);
414
tcg_temp_free_i32(bl);
417
static void gen_v4op(TCGv d64, TCGv a64, TCGv b64,
418
void (*generate)(TCGv_i32, TCGv_i32, TCGv_i32))
420
TCGv_i32 al = tcg_temp_new_i32();
421
TCGv_i32 ah = tcg_temp_new_i32();
422
TCGv_i32 bl = tcg_temp_new_i32();
423
TCGv_i32 bh = tcg_temp_new_i32();
425
tcg_gen_extr_i64_i32(al, ah, a64);
426
tcg_gen_extr_i64_i32(bl, bh, b64);
427
generate(al, al, bl);
428
generate(ah, ah, bh);
429
tcg_gen_concat_i32_i64(d64, al, ah);
431
tcg_temp_free_i32(al);
432
tcg_temp_free_i32(ah);
433
tcg_temp_free_i32(bl);
434
tcg_temp_free_i32(bh);
437
static TileExcp gen_signal(DisasContext *dc, int signo, int sigcode,
438
const char *mnemonic)
440
TCGv_i32 t0 = tcg_const_i32(signo);
441
TCGv_i32 t1 = tcg_const_i32(sigcode);
443
tcg_gen_st_i32(t0, cpu_env, offsetof(CPUTLGState, signo));
444
tcg_gen_st_i32(t1, cpu_env, offsetof(CPUTLGState, sigcode));
446
tcg_temp_free_i32(t1);
447
tcg_temp_free_i32(t0);
449
qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s", mnemonic);
450
return TILEGX_EXCP_SIGNAL;
453
static bool parse_from_addli(uint64_t bundle, int *signo, int *sigcode)
457
if ((get_Opcode_X0(bundle) != ADDLI_OPCODE_X0)
458
|| (get_Dest_X0(bundle) != TILEGX_R_ZERO)
459
|| (get_SrcA_X0(bundle) != TILEGX_R_ZERO)) {
463
imm = get_Imm16_X0(bundle);
465
*sigcode = (imm >> 6) & 0xf;
467
/* ??? The linux kernel validates both signo and the sigcode vs the
468
known max for each signal. Don't bother here. */
472
static TileExcp gen_specill(DisasContext *dc, unsigned dest, unsigned srca,
475
const char *mnemonic;
479
if (dest == 0x1c && srca == 0x25) {
480
signo = TARGET_SIGTRAP;
481
sigcode = TARGET_TRAP_BRKPT;
483
} else if (dest == 0x1d && srca == 0x25
484
&& parse_from_addli(bundle, &signo, &sigcode)) {
487
signo = TARGET_SIGILL;
488
sigcode = TARGET_ILL_ILLOPC;
492
return gen_signal(dc, signo, sigcode, mnemonic);
495
static TileExcp gen_rr_opcode(DisasContext *dc, unsigned opext,
496
unsigned dest, unsigned srca, uint64_t bundle)
499
const char *mnemonic;
501
TileExcp ret = TILEGX_EXCP_NONE;
502
bool prefetch_nofault = false;
504
/* Eliminate instructions with no output before doing anything else. */
518
case OE_RR_X1(DRAIN):
521
case OE_RR_X1(FLUSHWB):
522
mnemonic = "flushwb";
525
return gen_specill(dc, dest, srca, bundle);
527
return gen_signal(dc, TARGET_SIGILL, TARGET_ILL_ILLOPC, "ill");
532
/* ??? This should yield, especially in system mode. */
536
gen_helper_ext01_ics(cpu_env);
537
dc->jmp.cond = TCG_COND_ALWAYS;
538
dc->jmp.dest = tcg_temp_new();
539
tcg_gen_ld_tl(dc->jmp.dest, cpu_env,
540
offsetof(CPUTLGState, spregs[TILEGX_SPR_EX_CONTEXT_0_0]));
541
tcg_gen_andi_tl(dc->jmp.dest, dc->jmp.dest, ~7);
544
case OE_RR_X1(SWINT0):
545
case OE_RR_X1(SWINT2):
546
case OE_RR_X1(SWINT3):
547
return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
548
case OE_RR_X1(SWINT1):
549
ret = TILEGX_EXCP_SYSCALL;
553
return TILEGX_EXCP_OPCODE_UNKNOWN;
555
qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s", mnemonic);
558
case OE_RR_X1(DTLBPR):
559
return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
563
case OE_RR_X1(FLUSH):
583
case OE_RR_X1(JALRP):
584
case OE_RR_Y1(JALRP):
591
tcg_gen_movi_tl(dest_gr(dc, TILEGX_R_LR),
592
dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
594
dc->jmp.cond = TCG_COND_ALWAYS;
595
dc->jmp.dest = tcg_temp_new();
596
tcg_gen_andi_tl(dc->jmp.dest, load_gr(dc, srca), ~7);
599
return TILEGX_EXCP_OPCODE_UNKNOWN;
601
qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s", mnemonic, reg_names[srca]);
605
tdest = dest_gr(dc, dest);
606
tsrca = load_gr(dc, srca);
609
case OE_RR_X0(CNTLZ):
610
case OE_RR_Y0(CNTLZ):
611
gen_helper_cntlz(tdest, tsrca);
614
case OE_RR_X0(CNTTZ):
615
case OE_RR_Y0(CNTTZ):
616
gen_helper_cnttz(tdest, tsrca);
619
case OE_RR_X0(FSINGLE_PACK1):
620
case OE_RR_Y0(FSINGLE_PACK1):
621
return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
624
mnemonic = "ld1s"; /* prefetch_l1_fault */
628
mnemonic = "ld1u"; /* prefetch, prefetch_l1 */
629
prefetch_nofault = (dest == TILEGX_R_ZERO);
633
mnemonic = "ld2s"; /* prefetch_l2_fault */
637
mnemonic = "ld2u"; /* prefetch_l2 */
638
prefetch_nofault = (dest == TILEGX_R_ZERO);
642
mnemonic = "ld4s"; /* prefetch_l3_fault */
646
mnemonic = "ld4u"; /* prefetch_l3 */
647
prefetch_nofault = (dest == TILEGX_R_ZERO);
649
case OE_RR_X1(LDNT1S):
653
case OE_RR_X1(LDNT1U):
657
case OE_RR_X1(LDNT2S):
661
case OE_RR_X1(LDNT2U):
665
case OE_RR_X1(LDNT4S):
669
case OE_RR_X1(LDNT4U):
681
if (!prefetch_nofault) {
682
tcg_gen_qemu_ld_tl(tdest, tsrca, dc->mmuidx, memop);
686
tcg_gen_andi_tl(tdest, tsrca, ~7);
687
tcg_gen_qemu_ld_tl(tdest, tdest, dc->mmuidx, MO_TEQ);
693
return TILEGX_EXCP_OPCODE_UNKNOWN;
695
tcg_gen_movi_tl(tdest, dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
700
gen_helper_pcnt(tdest, tsrca);
703
case OE_RR_X0(REVBITS):
704
case OE_RR_Y0(REVBITS):
705
gen_helper_revbits(tdest, tsrca);
706
mnemonic = "revbits";
708
case OE_RR_X0(REVBYTES):
709
case OE_RR_Y0(REVBYTES):
710
tcg_gen_bswap64_tl(tdest, tsrca);
711
mnemonic = "revbytes";
713
case OE_RR_X0(TBLIDXB0):
714
case OE_RR_Y0(TBLIDXB0):
715
tcg_gen_deposit_tl(tdest, load_gr(dc, dest), tsrca, 2, 8);
716
mnemonic = "tblidxb0";
718
case OE_RR_X0(TBLIDXB1):
719
case OE_RR_Y0(TBLIDXB1):
720
tcg_gen_shri_tl(tdest, tsrca, 8);
721
tcg_gen_deposit_tl(tdest, load_gr(dc, dest), tdest, 2, 8);
722
mnemonic = "tblidxb1";
724
case OE_RR_X0(TBLIDXB2):
725
case OE_RR_Y0(TBLIDXB2):
726
tcg_gen_shri_tl(tdest, tsrca, 16);
727
tcg_gen_deposit_tl(tdest, load_gr(dc, dest), tdest, 2, 8);
728
mnemonic = "tblidxb2";
730
case OE_RR_X0(TBLIDXB3):
731
case OE_RR_Y0(TBLIDXB3):
732
tcg_gen_shri_tl(tdest, tsrca, 24);
733
tcg_gen_deposit_tl(tdest, load_gr(dc, dest), tdest, 2, 8);
734
mnemonic = "tblidxb3";
737
return TILEGX_EXCP_OPCODE_UNKNOWN;
740
qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s", mnemonic,
741
reg_names[dest], reg_names[srca]);
745
static TileExcp gen_rrr_opcode(DisasContext *dc, unsigned opext,
746
unsigned dest, unsigned srca, unsigned srcb)
748
TCGv tdest = dest_gr(dc, dest);
749
TCGv tsrca = load_gr(dc, srca);
750
TCGv tsrcb = load_gr(dc, srcb);
752
const char *mnemonic;
755
case OE_RRR(ADDXSC, 0, X0):
756
case OE_RRR(ADDXSC, 0, X1):
757
gen_saturate_op(tdest, tsrca, tsrcb, tcg_gen_add_tl);
760
case OE_RRR(ADDX, 0, X0):
761
case OE_RRR(ADDX, 0, X1):
762
case OE_RRR(ADDX, 0, Y0):
763
case OE_RRR(ADDX, 0, Y1):
764
tcg_gen_add_tl(tdest, tsrca, tsrcb);
765
tcg_gen_ext32s_tl(tdest, tdest);
768
case OE_RRR(ADD, 0, X0):
769
case OE_RRR(ADD, 0, X1):
770
case OE_RRR(ADD, 0, Y0):
771
case OE_RRR(ADD, 0, Y1):
772
tcg_gen_add_tl(tdest, tsrca, tsrcb);
775
case OE_RRR(AND, 0, X0):
776
case OE_RRR(AND, 0, X1):
777
case OE_RRR(AND, 5, Y0):
778
case OE_RRR(AND, 5, Y1):
779
tcg_gen_and_tl(tdest, tsrca, tsrcb);
782
case OE_RRR(CMOVEQZ, 0, X0):
783
case OE_RRR(CMOVEQZ, 4, Y0):
784
tcg_gen_movcond_tl(TCG_COND_EQ, tdest, tsrca, load_zero(dc),
785
tsrcb, load_gr(dc, dest));
786
mnemonic = "cmoveqz";
788
case OE_RRR(CMOVNEZ, 0, X0):
789
case OE_RRR(CMOVNEZ, 4, Y0):
790
tcg_gen_movcond_tl(TCG_COND_NE, tdest, tsrca, load_zero(dc),
791
tsrcb, load_gr(dc, dest));
792
mnemonic = "cmovnez";
794
case OE_RRR(CMPEQ, 0, X0):
795
case OE_RRR(CMPEQ, 0, X1):
796
case OE_RRR(CMPEQ, 3, Y0):
797
case OE_RRR(CMPEQ, 3, Y1):
798
tcg_gen_setcond_tl(TCG_COND_EQ, tdest, tsrca, tsrcb);
801
case OE_RRR(CMPEXCH4, 0, X1):
802
gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
803
TILEGX_EXCP_OPCODE_CMPEXCH4);
804
mnemonic = "cmpexch4";
806
case OE_RRR(CMPEXCH, 0, X1):
807
gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
808
TILEGX_EXCP_OPCODE_CMPEXCH);
809
mnemonic = "cmpexch";
811
case OE_RRR(CMPLES, 0, X0):
812
case OE_RRR(CMPLES, 0, X1):
813
case OE_RRR(CMPLES, 2, Y0):
814
case OE_RRR(CMPLES, 2, Y1):
815
tcg_gen_setcond_tl(TCG_COND_LE, tdest, tsrca, tsrcb);
818
case OE_RRR(CMPLEU, 0, X0):
819
case OE_RRR(CMPLEU, 0, X1):
820
case OE_RRR(CMPLEU, 2, Y0):
821
case OE_RRR(CMPLEU, 2, Y1):
822
tcg_gen_setcond_tl(TCG_COND_LEU, tdest, tsrca, tsrcb);
825
case OE_RRR(CMPLTS, 0, X0):
826
case OE_RRR(CMPLTS, 0, X1):
827
case OE_RRR(CMPLTS, 2, Y0):
828
case OE_RRR(CMPLTS, 2, Y1):
829
tcg_gen_setcond_tl(TCG_COND_LT, tdest, tsrca, tsrcb);
832
case OE_RRR(CMPLTU, 0, X0):
833
case OE_RRR(CMPLTU, 0, X1):
834
case OE_RRR(CMPLTU, 2, Y0):
835
case OE_RRR(CMPLTU, 2, Y1):
836
tcg_gen_setcond_tl(TCG_COND_LTU, tdest, tsrca, tsrcb);
839
case OE_RRR(CMPNE, 0, X0):
840
case OE_RRR(CMPNE, 0, X1):
841
case OE_RRR(CMPNE, 3, Y0):
842
case OE_RRR(CMPNE, 3, Y1):
843
tcg_gen_setcond_tl(TCG_COND_NE, tdest, tsrca, tsrcb);
846
case OE_RRR(CMULAF, 0, X0):
847
gen_helper_cmulaf(tdest, load_gr(dc, dest), tsrca, tsrcb);
850
case OE_RRR(CMULA, 0, X0):
851
gen_helper_cmula(tdest, load_gr(dc, dest), tsrca, tsrcb);
854
case OE_RRR(CMULFR, 0, X0):
855
gen_cmul2(tdest, tsrca, tsrcb, 15, 1 << 14);
858
case OE_RRR(CMULF, 0, X0):
859
gen_cmul2(tdest, tsrca, tsrcb, 15, 0);
862
case OE_RRR(CMULHR, 0, X0):
863
gen_cmul2(tdest, tsrca, tsrcb, 16, 1 << 15);
866
case OE_RRR(CMULH, 0, X0):
867
gen_cmul2(tdest, tsrca, tsrcb, 16, 0);
870
case OE_RRR(CMUL, 0, X0):
871
gen_helper_cmula(tdest, load_zero(dc), tsrca, tsrcb);
874
case OE_RRR(CRC32_32, 0, X0):
875
gen_helper_crc32_32(tdest, tsrca, tsrcb);
876
mnemonic = "crc32_32";
878
case OE_RRR(CRC32_8, 0, X0):
879
gen_helper_crc32_8(tdest, tsrca, tsrcb);
880
mnemonic = "crc32_8";
882
case OE_RRR(DBLALIGN2, 0, X0):
883
case OE_RRR(DBLALIGN2, 0, X1):
884
gen_dblaligni(tdest, tsrca, tsrcb, 16);
885
mnemonic = "dblalign2";
887
case OE_RRR(DBLALIGN4, 0, X0):
888
case OE_RRR(DBLALIGN4, 0, X1):
889
gen_dblaligni(tdest, tsrca, tsrcb, 32);
890
mnemonic = "dblalign4";
892
case OE_RRR(DBLALIGN6, 0, X0):
893
case OE_RRR(DBLALIGN6, 0, X1):
894
gen_dblaligni(tdest, tsrca, tsrcb, 48);
895
mnemonic = "dblalign6";
897
case OE_RRR(DBLALIGN, 0, X0):
898
gen_dblalign(tdest, load_gr(dc, dest), tsrca, tsrcb);
899
mnemonic = "dblalign";
901
case OE_RRR(EXCH4, 0, X1):
902
gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
903
TILEGX_EXCP_OPCODE_EXCH4);
906
case OE_RRR(EXCH, 0, X1):
907
gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
908
TILEGX_EXCP_OPCODE_EXCH);
911
case OE_RRR(FDOUBLE_ADDSUB, 0, X0):
912
case OE_RRR(FDOUBLE_ADD_FLAGS, 0, X0):
913
case OE_RRR(FDOUBLE_MUL_FLAGS, 0, X0):
914
case OE_RRR(FDOUBLE_PACK1, 0, X0):
915
case OE_RRR(FDOUBLE_PACK2, 0, X0):
916
case OE_RRR(FDOUBLE_SUB_FLAGS, 0, X0):
917
case OE_RRR(FDOUBLE_UNPACK_MAX, 0, X0):
918
case OE_RRR(FDOUBLE_UNPACK_MIN, 0, X0):
919
return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
920
case OE_RRR(FETCHADD4, 0, X1):
921
gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
922
TILEGX_EXCP_OPCODE_FETCHADD4);
923
mnemonic = "fetchadd4";
925
case OE_RRR(FETCHADDGEZ4, 0, X1):
926
gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
927
TILEGX_EXCP_OPCODE_FETCHADDGEZ4);
928
mnemonic = "fetchaddgez4";
930
case OE_RRR(FETCHADDGEZ, 0, X1):
931
gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
932
TILEGX_EXCP_OPCODE_FETCHADDGEZ);
933
mnemonic = "fetchaddgez";
935
case OE_RRR(FETCHADD, 0, X1):
936
gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
937
TILEGX_EXCP_OPCODE_FETCHADD);
938
mnemonic = "fetchadd";
940
case OE_RRR(FETCHAND4, 0, X1):
941
gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
942
TILEGX_EXCP_OPCODE_FETCHAND4);
943
mnemonic = "fetchand4";
945
case OE_RRR(FETCHAND, 0, X1):
946
gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
947
TILEGX_EXCP_OPCODE_FETCHAND);
948
mnemonic = "fetchand";
950
case OE_RRR(FETCHOR4, 0, X1):
951
gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
952
TILEGX_EXCP_OPCODE_FETCHOR4);
953
mnemonic = "fetchor4";
955
case OE_RRR(FETCHOR, 0, X1):
956
gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
957
TILEGX_EXCP_OPCODE_FETCHOR);
958
mnemonic = "fetchor";
960
case OE_RRR(FSINGLE_ADD1, 0, X0):
961
case OE_RRR(FSINGLE_ADDSUB2, 0, X0):
962
case OE_RRR(FSINGLE_MUL1, 0, X0):
963
case OE_RRR(FSINGLE_MUL2, 0, X0):
964
case OE_RRR(FSINGLE_PACK2, 0, X0):
965
case OE_RRR(FSINGLE_SUB1, 0, X0):
966
return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
967
case OE_RRR(MNZ, 0, X0):
968
case OE_RRR(MNZ, 0, X1):
969
case OE_RRR(MNZ, 4, Y0):
970
case OE_RRR(MNZ, 4, Y1):
972
tcg_gen_movcond_tl(TCG_COND_NE, tdest, tsrca, t0, tsrcb, t0);
975
case OE_RRR(MULAX, 0, X0):
976
case OE_RRR(MULAX, 3, Y0):
977
tcg_gen_mul_tl(tdest, tsrca, tsrcb);
978
tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
979
tcg_gen_ext32s_tl(tdest, tdest);
982
case OE_RRR(MULA_HS_HS, 0, X0):
983
case OE_RRR(MULA_HS_HS, 9, Y0):
984
gen_mul_half(tdest, tsrca, tsrcb, HS, HS);
985
tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
986
mnemonic = "mula_hs_hs";
988
case OE_RRR(MULA_HS_HU, 0, X0):
989
gen_mul_half(tdest, tsrca, tsrcb, HS, HU);
990
tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
991
mnemonic = "mula_hs_hu";
993
case OE_RRR(MULA_HS_LS, 0, X0):
994
gen_mul_half(tdest, tsrca, tsrcb, HS, LS);
995
tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
996
mnemonic = "mula_hs_ls";
998
case OE_RRR(MULA_HS_LU, 0, X0):
999
gen_mul_half(tdest, tsrca, tsrcb, HS, LU);
1000
tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
1001
mnemonic = "mula_hs_lu";
1003
case OE_RRR(MULA_HU_HU, 0, X0):
1004
case OE_RRR(MULA_HU_HU, 9, Y0):
1005
gen_mul_half(tdest, tsrca, tsrcb, HU, HU);
1006
tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
1007
mnemonic = "mula_hu_hu";
1009
case OE_RRR(MULA_HU_LS, 0, X0):
1010
gen_mul_half(tdest, tsrca, tsrcb, HU, LS);
1011
tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
1012
mnemonic = "mula_hu_ls";
1014
case OE_RRR(MULA_HU_LU, 0, X0):
1015
gen_mul_half(tdest, tsrca, tsrcb, HU, LU);
1016
tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
1017
mnemonic = "mula_hu_lu";
1019
case OE_RRR(MULA_LS_LS, 0, X0):
1020
case OE_RRR(MULA_LS_LS, 9, Y0):
1021
gen_mul_half(tdest, tsrca, tsrcb, LS, LS);
1022
tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
1023
mnemonic = "mula_ls_ls";
1025
case OE_RRR(MULA_LS_LU, 0, X0):
1026
gen_mul_half(tdest, tsrca, tsrcb, LS, LU);
1027
tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
1028
mnemonic = "mula_ls_lu";
1030
case OE_RRR(MULA_LU_LU, 0, X0):
1031
case OE_RRR(MULA_LU_LU, 9, Y0):
1032
gen_mul_half(tdest, tsrca, tsrcb, LU, LU);
1033
tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
1034
mnemonic = "mula_lu_lu";
1036
case OE_RRR(MULX, 0, X0):
1037
case OE_RRR(MULX, 3, Y0):
1038
tcg_gen_mul_tl(tdest, tsrca, tsrcb);
1039
tcg_gen_ext32s_tl(tdest, tdest);
1042
case OE_RRR(MUL_HS_HS, 0, X0):
1043
case OE_RRR(MUL_HS_HS, 8, Y0):
1044
gen_mul_half(tdest, tsrca, tsrcb, HS, HS);
1045
mnemonic = "mul_hs_hs";
1047
case OE_RRR(MUL_HS_HU, 0, X0):
1048
gen_mul_half(tdest, tsrca, tsrcb, HS, HU);
1049
mnemonic = "mul_hs_hu";
1051
case OE_RRR(MUL_HS_LS, 0, X0):
1052
gen_mul_half(tdest, tsrca, tsrcb, HS, LS);
1053
mnemonic = "mul_hs_ls";
1055
case OE_RRR(MUL_HS_LU, 0, X0):
1056
gen_mul_half(tdest, tsrca, tsrcb, HS, LU);
1057
mnemonic = "mul_hs_lu";
1059
case OE_RRR(MUL_HU_HU, 0, X0):
1060
case OE_RRR(MUL_HU_HU, 8, Y0):
1061
gen_mul_half(tdest, tsrca, tsrcb, HU, HU);
1062
mnemonic = "mul_hu_hu";
1064
case OE_RRR(MUL_HU_LS, 0, X0):
1065
gen_mul_half(tdest, tsrca, tsrcb, HU, LS);
1066
mnemonic = "mul_hu_ls";
1068
case OE_RRR(MUL_HU_LU, 0, X0):
1069
gen_mul_half(tdest, tsrca, tsrcb, HU, LU);
1070
mnemonic = "mul_hu_lu";
1072
case OE_RRR(MUL_LS_LS, 0, X0):
1073
case OE_RRR(MUL_LS_LS, 8, Y0):
1074
gen_mul_half(tdest, tsrca, tsrcb, LS, LS);
1075
mnemonic = "mul_ls_ls";
1077
case OE_RRR(MUL_LS_LU, 0, X0):
1078
gen_mul_half(tdest, tsrca, tsrcb, LS, LU);
1079
mnemonic = "mul_ls_lu";
1081
case OE_RRR(MUL_LU_LU, 0, X0):
1082
case OE_RRR(MUL_LU_LU, 8, Y0):
1083
gen_mul_half(tdest, tsrca, tsrcb, LU, LU);
1084
mnemonic = "mul_lu_lu";
1086
case OE_RRR(MZ, 0, X0):
1087
case OE_RRR(MZ, 0, X1):
1088
case OE_RRR(MZ, 4, Y0):
1089
case OE_RRR(MZ, 4, Y1):
1091
tcg_gen_movcond_tl(TCG_COND_EQ, tdest, tsrca, t0, tsrcb, t0);
1094
case OE_RRR(NOR, 0, X0):
1095
case OE_RRR(NOR, 0, X1):
1096
case OE_RRR(NOR, 5, Y0):
1097
case OE_RRR(NOR, 5, Y1):
1098
tcg_gen_nor_tl(tdest, tsrca, tsrcb);
1101
case OE_RRR(OR, 0, X0):
1102
case OE_RRR(OR, 0, X1):
1103
case OE_RRR(OR, 5, Y0):
1104
case OE_RRR(OR, 5, Y1):
1105
tcg_gen_or_tl(tdest, tsrca, tsrcb);
1108
case OE_RRR(ROTL, 0, X0):
1109
case OE_RRR(ROTL, 0, X1):
1110
case OE_RRR(ROTL, 6, Y0):
1111
case OE_RRR(ROTL, 6, Y1):
1112
tcg_gen_andi_tl(tdest, tsrcb, 63);
1113
tcg_gen_rotl_tl(tdest, tsrca, tdest);
1116
case OE_RRR(SHL1ADDX, 0, X0):
1117
case OE_RRR(SHL1ADDX, 0, X1):
1118
case OE_RRR(SHL1ADDX, 7, Y0):
1119
case OE_RRR(SHL1ADDX, 7, Y1):
1120
tcg_gen_shli_tl(tdest, tsrca, 1);
1121
tcg_gen_add_tl(tdest, tdest, tsrcb);
1122
tcg_gen_ext32s_tl(tdest, tdest);
1123
mnemonic = "shl1addx";
1125
case OE_RRR(SHL1ADD, 0, X0):
1126
case OE_RRR(SHL1ADD, 0, X1):
1127
case OE_RRR(SHL1ADD, 1, Y0):
1128
case OE_RRR(SHL1ADD, 1, Y1):
1129
tcg_gen_shli_tl(tdest, tsrca, 1);
1130
tcg_gen_add_tl(tdest, tdest, tsrcb);
1131
mnemonic = "shl1add";
1133
case OE_RRR(SHL2ADDX, 0, X0):
1134
case OE_RRR(SHL2ADDX, 0, X1):
1135
case OE_RRR(SHL2ADDX, 7, Y0):
1136
case OE_RRR(SHL2ADDX, 7, Y1):
1137
tcg_gen_shli_tl(tdest, tsrca, 2);
1138
tcg_gen_add_tl(tdest, tdest, tsrcb);
1139
tcg_gen_ext32s_tl(tdest, tdest);
1140
mnemonic = "shl2addx";
1142
case OE_RRR(SHL2ADD, 0, X0):
1143
case OE_RRR(SHL2ADD, 0, X1):
1144
case OE_RRR(SHL2ADD, 1, Y0):
1145
case OE_RRR(SHL2ADD, 1, Y1):
1146
tcg_gen_shli_tl(tdest, tsrca, 2);
1147
tcg_gen_add_tl(tdest, tdest, tsrcb);
1148
mnemonic = "shl2add";
1150
case OE_RRR(SHL3ADDX, 0, X0):
1151
case OE_RRR(SHL3ADDX, 0, X1):
1152
case OE_RRR(SHL3ADDX, 7, Y0):
1153
case OE_RRR(SHL3ADDX, 7, Y1):
1154
tcg_gen_shli_tl(tdest, tsrca, 3);
1155
tcg_gen_add_tl(tdest, tdest, tsrcb);
1156
tcg_gen_ext32s_tl(tdest, tdest);
1157
mnemonic = "shl3addx";
1159
case OE_RRR(SHL3ADD, 0, X0):
1160
case OE_RRR(SHL3ADD, 0, X1):
1161
case OE_RRR(SHL3ADD, 1, Y0):
1162
case OE_RRR(SHL3ADD, 1, Y1):
1163
tcg_gen_shli_tl(tdest, tsrca, 3);
1164
tcg_gen_add_tl(tdest, tdest, tsrcb);
1165
mnemonic = "shl3add";
1167
case OE_RRR(SHLX, 0, X0):
1168
case OE_RRR(SHLX, 0, X1):
1169
tcg_gen_andi_tl(tdest, tsrcb, 31);
1170
tcg_gen_shl_tl(tdest, tsrca, tdest);
1171
tcg_gen_ext32s_tl(tdest, tdest);
1174
case OE_RRR(SHL, 0, X0):
1175
case OE_RRR(SHL, 0, X1):
1176
case OE_RRR(SHL, 6, Y0):
1177
case OE_RRR(SHL, 6, Y1):
1178
tcg_gen_andi_tl(tdest, tsrcb, 63);
1179
tcg_gen_shl_tl(tdest, tsrca, tdest);
1182
case OE_RRR(SHRS, 0, X0):
1183
case OE_RRR(SHRS, 0, X1):
1184
case OE_RRR(SHRS, 6, Y0):
1185
case OE_RRR(SHRS, 6, Y1):
1186
tcg_gen_andi_tl(tdest, tsrcb, 63);
1187
tcg_gen_sar_tl(tdest, tsrca, tdest);
1190
case OE_RRR(SHRUX, 0, X0):
1191
case OE_RRR(SHRUX, 0, X1):
1192
t0 = tcg_temp_new();
1193
tcg_gen_andi_tl(t0, tsrcb, 31);
1194
tcg_gen_ext32u_tl(tdest, tsrca);
1195
tcg_gen_shr_tl(tdest, tdest, t0);
1196
tcg_gen_ext32s_tl(tdest, tdest);
1200
case OE_RRR(SHRU, 0, X0):
1201
case OE_RRR(SHRU, 0, X1):
1202
case OE_RRR(SHRU, 6, Y0):
1203
case OE_RRR(SHRU, 6, Y1):
1204
tcg_gen_andi_tl(tdest, tsrcb, 63);
1205
tcg_gen_shr_tl(tdest, tsrca, tdest);
1208
case OE_RRR(SHUFFLEBYTES, 0, X0):
1209
gen_helper_shufflebytes(tdest, load_gr(dc, dest), tsrca, tsrca);
1210
mnemonic = "shufflebytes";
1212
case OE_RRR(SUBXSC, 0, X0):
1213
case OE_RRR(SUBXSC, 0, X1):
1214
gen_saturate_op(tdest, tsrca, tsrcb, tcg_gen_sub_tl);
1215
mnemonic = "subxsc";
1217
case OE_RRR(SUBX, 0, X0):
1218
case OE_RRR(SUBX, 0, X1):
1219
case OE_RRR(SUBX, 0, Y0):
1220
case OE_RRR(SUBX, 0, Y1):
1221
tcg_gen_sub_tl(tdest, tsrca, tsrcb);
1222
tcg_gen_ext32s_tl(tdest, tdest);
1225
case OE_RRR(SUB, 0, X0):
1226
case OE_RRR(SUB, 0, X1):
1227
case OE_RRR(SUB, 0, Y0):
1228
case OE_RRR(SUB, 0, Y1):
1229
tcg_gen_sub_tl(tdest, tsrca, tsrcb);
1232
case OE_RRR(V1ADDUC, 0, X0):
1233
case OE_RRR(V1ADDUC, 0, X1):
1234
return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1235
case OE_RRR(V1ADD, 0, X0):
1236
case OE_RRR(V1ADD, 0, X1):
1237
gen_v12add(tdest, tsrca, tsrcb, V1_IMM(0x80));
1240
case OE_RRR(V1ADIFFU, 0, X0):
1241
case OE_RRR(V1AVGU, 0, X0):
1242
return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1243
case OE_RRR(V1CMPEQ, 0, X0):
1244
case OE_RRR(V1CMPEQ, 0, X1):
1245
tcg_gen_xor_tl(tdest, tsrca, tsrcb);
1246
gen_v1cmpeq0(tdest);
1247
mnemonic = "v1cmpeq";
1249
case OE_RRR(V1CMPLES, 0, X0):
1250
case OE_RRR(V1CMPLES, 0, X1):
1251
case OE_RRR(V1CMPLEU, 0, X0):
1252
case OE_RRR(V1CMPLEU, 0, X1):
1253
case OE_RRR(V1CMPLTS, 0, X0):
1254
case OE_RRR(V1CMPLTS, 0, X1):
1255
case OE_RRR(V1CMPLTU, 0, X0):
1256
case OE_RRR(V1CMPLTU, 0, X1):
1257
return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1258
case OE_RRR(V1CMPNE, 0, X0):
1259
case OE_RRR(V1CMPNE, 0, X1):
1260
tcg_gen_xor_tl(tdest, tsrca, tsrcb);
1261
gen_v1cmpne0(tdest);
1262
mnemonic = "v1cmpne";
1264
case OE_RRR(V1DDOTPUA, 0, X0):
1265
case OE_RRR(V1DDOTPUSA, 0, X0):
1266
case OE_RRR(V1DDOTPUS, 0, X0):
1267
case OE_RRR(V1DDOTPU, 0, X0):
1268
case OE_RRR(V1DOTPA, 0, X0):
1269
case OE_RRR(V1DOTPUA, 0, X0):
1270
case OE_RRR(V1DOTPUSA, 0, X0):
1271
case OE_RRR(V1DOTPUS, 0, X0):
1272
case OE_RRR(V1DOTPU, 0, X0):
1273
case OE_RRR(V1DOTP, 0, X0):
1274
return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1275
case OE_RRR(V1INT_H, 0, X0):
1276
case OE_RRR(V1INT_H, 0, X1):
1277
gen_helper_v1int_h(tdest, tsrca, tsrcb);
1278
mnemonic = "v1int_h";
1280
case OE_RRR(V1INT_L, 0, X0):
1281
case OE_RRR(V1INT_L, 0, X1):
1282
gen_helper_v1int_l(tdest, tsrca, tsrcb);
1283
mnemonic = "v1int_l";
1285
case OE_RRR(V1MAXU, 0, X0):
1286
case OE_RRR(V1MAXU, 0, X1):
1287
case OE_RRR(V1MINU, 0, X0):
1288
case OE_RRR(V1MINU, 0, X1):
1289
case OE_RRR(V1MNZ, 0, X0):
1290
case OE_RRR(V1MNZ, 0, X1):
1291
return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1292
case OE_RRR(V1MULTU, 0, X0):
1293
gen_helper_v1multu(tdest, tsrca, tsrcb);
1294
mnemonic = "v1multu";
1296
case OE_RRR(V1MULUS, 0, X0):
1297
case OE_RRR(V1MULU, 0, X0):
1298
case OE_RRR(V1MZ, 0, X0):
1299
case OE_RRR(V1MZ, 0, X1):
1300
case OE_RRR(V1SADAU, 0, X0):
1301
case OE_RRR(V1SADU, 0, X0):
1302
return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1303
case OE_RRR(V1SHL, 0, X0):
1304
case OE_RRR(V1SHL, 0, X1):
1305
gen_helper_v1shl(tdest, tsrca, tsrcb);
1308
case OE_RRR(V1SHRS, 0, X0):
1309
case OE_RRR(V1SHRS, 0, X1):
1310
gen_helper_v1shrs(tdest, tsrca, tsrcb);
1311
mnemonic = "v1shrs";
1313
case OE_RRR(V1SHRU, 0, X0):
1314
case OE_RRR(V1SHRU, 0, X1):
1315
gen_helper_v1shru(tdest, tsrca, tsrcb);
1316
mnemonic = "v1shru";
1318
case OE_RRR(V1SUBUC, 0, X0):
1319
case OE_RRR(V1SUBUC, 0, X1):
1320
return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1321
case OE_RRR(V1SUB, 0, X0):
1322
case OE_RRR(V1SUB, 0, X1):
1323
gen_v12sub(tdest, tsrca, tsrcb, V1_IMM(0x80));
1326
case OE_RRR(V2ADDSC, 0, X0):
1327
case OE_RRR(V2ADDSC, 0, X1):
1328
return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1329
case OE_RRR(V2ADD, 0, X0):
1330
case OE_RRR(V2ADD, 0, X1):
1331
gen_v12add(tdest, tsrca, tsrcb, V2_IMM(0x8000));
1334
case OE_RRR(V2ADIFFS, 0, X0):
1335
case OE_RRR(V2AVGS, 0, X0):
1336
case OE_RRR(V2CMPEQ, 0, X0):
1337
case OE_RRR(V2CMPEQ, 0, X1):
1338
case OE_RRR(V2CMPLES, 0, X0):
1339
case OE_RRR(V2CMPLES, 0, X1):
1340
case OE_RRR(V2CMPLEU, 0, X0):
1341
case OE_RRR(V2CMPLEU, 0, X1):
1342
case OE_RRR(V2CMPLTS, 0, X0):
1343
case OE_RRR(V2CMPLTS, 0, X1):
1344
case OE_RRR(V2CMPLTU, 0, X0):
1345
case OE_RRR(V2CMPLTU, 0, X1):
1346
case OE_RRR(V2CMPNE, 0, X0):
1347
case OE_RRR(V2CMPNE, 0, X1):
1348
case OE_RRR(V2DOTPA, 0, X0):
1349
case OE_RRR(V2DOTP, 0, X0):
1350
return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1351
case OE_RRR(V2INT_H, 0, X0):
1352
case OE_RRR(V2INT_H, 0, X1):
1353
gen_helper_v2int_h(tdest, tsrca, tsrcb);
1354
mnemonic = "v2int_h";
1356
case OE_RRR(V2INT_L, 0, X0):
1357
case OE_RRR(V2INT_L, 0, X1):
1358
gen_helper_v2int_l(tdest, tsrca, tsrcb);
1359
mnemonic = "v2int_l";
1361
case OE_RRR(V2MAXS, 0, X0):
1362
case OE_RRR(V2MAXS, 0, X1):
1363
case OE_RRR(V2MINS, 0, X0):
1364
case OE_RRR(V2MINS, 0, X1):
1365
case OE_RRR(V2MNZ, 0, X0):
1366
case OE_RRR(V2MNZ, 0, X1):
1367
case OE_RRR(V2MULFSC, 0, X0):
1368
case OE_RRR(V2MULS, 0, X0):
1369
return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1370
case OE_RRR(V2MULTS, 0, X0):
1371
gen_helper_v2mults(tdest, tsrca, tsrcb);
1372
mnemonic = "v2mults";
1374
case OE_RRR(V2MZ, 0, X0):
1375
case OE_RRR(V2MZ, 0, X1):
1376
case OE_RRR(V2PACKH, 0, X0):
1377
case OE_RRR(V2PACKH, 0, X1):
1378
case OE_RRR(V2PACKL, 0, X0):
1379
case OE_RRR(V2PACKL, 0, X1):
1380
case OE_RRR(V2PACKUC, 0, X0):
1381
case OE_RRR(V2PACKUC, 0, X1):
1382
case OE_RRR(V2SADAS, 0, X0):
1383
case OE_RRR(V2SADAU, 0, X0):
1384
case OE_RRR(V2SADS, 0, X0):
1385
case OE_RRR(V2SADU, 0, X0):
1386
case OE_RRR(V2SHLSC, 0, X0):
1387
case OE_RRR(V2SHLSC, 0, X1):
1388
return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1389
case OE_RRR(V2SHL, 0, X0):
1390
case OE_RRR(V2SHL, 0, X1):
1391
gen_helper_v2shl(tdest, tsrca, tsrcb);
1394
case OE_RRR(V2SHRS, 0, X0):
1395
case OE_RRR(V2SHRS, 0, X1):
1396
gen_helper_v2shrs(tdest, tsrca, tsrcb);
1397
mnemonic = "v2shrs";
1399
case OE_RRR(V2SHRU, 0, X0):
1400
case OE_RRR(V2SHRU, 0, X1):
1401
gen_helper_v2shru(tdest, tsrca, tsrcb);
1402
mnemonic = "v2shru";
1404
case OE_RRR(V2SUBSC, 0, X0):
1405
case OE_RRR(V2SUBSC, 0, X1):
1406
return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1407
case OE_RRR(V2SUB, 0, X0):
1408
case OE_RRR(V2SUB, 0, X1):
1409
gen_v12sub(tdest, tsrca, tsrcb, V2_IMM(0x8000));
1412
case OE_RRR(V4ADDSC, 0, X0):
1413
case OE_RRR(V4ADDSC, 0, X1):
1414
return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1415
case OE_RRR(V4ADD, 0, X0):
1416
case OE_RRR(V4ADD, 0, X1):
1417
gen_v4op(tdest, tsrca, tsrcb, tcg_gen_add_i32);
1420
case OE_RRR(V4INT_H, 0, X0):
1421
case OE_RRR(V4INT_H, 0, X1):
1422
tcg_gen_shri_tl(tdest, tsrcb, 32);
1423
tcg_gen_deposit_tl(tdest, tsrca, tdest, 0, 32);
1424
mnemonic = "v4int_h";
1426
case OE_RRR(V4INT_L, 0, X0):
1427
case OE_RRR(V4INT_L, 0, X1):
1428
tcg_gen_deposit_tl(tdest, tsrcb, tsrca, 32, 32);
1429
mnemonic = "v4int_l";
1431
case OE_RRR(V4PACKSC, 0, X0):
1432
case OE_RRR(V4PACKSC, 0, X1):
1433
case OE_RRR(V4SHLSC, 0, X0):
1434
case OE_RRR(V4SHLSC, 0, X1):
1435
return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1436
case OE_RRR(V4SHL, 0, X0):
1437
case OE_RRR(V4SHL, 0, X1):
1438
gen_v4sh(tdest, tsrca, tsrcb, tcg_gen_shl_i32);
1441
case OE_RRR(V4SHRS, 0, X0):
1442
case OE_RRR(V4SHRS, 0, X1):
1443
gen_v4sh(tdest, tsrca, tsrcb, tcg_gen_sar_i32);
1444
mnemonic = "v4shrs";
1446
case OE_RRR(V4SHRU, 0, X0):
1447
case OE_RRR(V4SHRU, 0, X1):
1448
gen_v4sh(tdest, tsrca, tsrcb, tcg_gen_shr_i32);
1449
mnemonic = "v4shru";
1451
case OE_RRR(V4SUBSC, 0, X0):
1452
case OE_RRR(V4SUBSC, 0, X1):
1453
return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1454
case OE_RRR(V4SUB, 0, X0):
1455
case OE_RRR(V4SUB, 0, X1):
1456
gen_v4op(tdest, tsrca, tsrcb, tcg_gen_sub_i32);
1459
case OE_RRR(XOR, 0, X0):
1460
case OE_RRR(XOR, 0, X1):
1461
case OE_RRR(XOR, 5, Y0):
1462
case OE_RRR(XOR, 5, Y1):
1463
tcg_gen_xor_tl(tdest, tsrca, tsrcb);
1467
return TILEGX_EXCP_OPCODE_UNKNOWN;
1470
qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %s", mnemonic,
1471
reg_names[dest], reg_names[srca], reg_names[srcb]);
1472
return TILEGX_EXCP_NONE;
1475
static TileExcp gen_rri_opcode(DisasContext *dc, unsigned opext,
1476
unsigned dest, unsigned srca, int imm)
1478
TCGv tdest = dest_gr(dc, dest);
1479
TCGv tsrca = load_gr(dc, srca);
1480
bool prefetch_nofault = false;
1481
const char *mnemonic;
1487
case OE(ADDI_OPCODE_Y0, 0, Y0):
1488
case OE(ADDI_OPCODE_Y1, 0, Y1):
1489
case OE_IM(ADDI, X0):
1490
case OE_IM(ADDI, X1):
1491
tcg_gen_addi_tl(tdest, tsrca, imm);
1494
case OE(ADDXI_OPCODE_Y0, 0, Y0):
1495
case OE(ADDXI_OPCODE_Y1, 0, Y1):
1496
case OE_IM(ADDXI, X0):
1497
case OE_IM(ADDXI, X1):
1498
tcg_gen_addi_tl(tdest, tsrca, imm);
1499
tcg_gen_ext32s_tl(tdest, tdest);
1502
case OE(ANDI_OPCODE_Y0, 0, Y0):
1503
case OE(ANDI_OPCODE_Y1, 0, Y1):
1504
case OE_IM(ANDI, X0):
1505
case OE_IM(ANDI, X1):
1506
tcg_gen_andi_tl(tdest, tsrca, imm);
1509
case OE(CMPEQI_OPCODE_Y0, 0, Y0):
1510
case OE(CMPEQI_OPCODE_Y1, 0, Y1):
1511
case OE_IM(CMPEQI, X0):
1512
case OE_IM(CMPEQI, X1):
1513
tcg_gen_setcondi_tl(TCG_COND_EQ, tdest, tsrca, imm);
1514
mnemonic = "cmpeqi";
1516
case OE(CMPLTSI_OPCODE_Y0, 0, Y0):
1517
case OE(CMPLTSI_OPCODE_Y1, 0, Y1):
1518
case OE_IM(CMPLTSI, X0):
1519
case OE_IM(CMPLTSI, X1):
1520
tcg_gen_setcondi_tl(TCG_COND_LT, tdest, tsrca, imm);
1521
mnemonic = "cmpltsi";
1523
case OE_IM(CMPLTUI, X0):
1524
case OE_IM(CMPLTUI, X1):
1525
tcg_gen_setcondi_tl(TCG_COND_LTU, tdest, tsrca, imm);
1526
mnemonic = "cmpltui";
1528
case OE_IM(LD1S_ADD, X1):
1530
mnemonic = "ld1s_add"; /* prefetch_add_l1_fault */
1532
case OE_IM(LD1U_ADD, X1):
1534
mnemonic = "ld1u_add"; /* prefetch_add_l1 */
1535
prefetch_nofault = (dest == TILEGX_R_ZERO);
1537
case OE_IM(LD2S_ADD, X1):
1539
mnemonic = "ld2s_add"; /* prefetch_add_l2_fault */
1541
case OE_IM(LD2U_ADD, X1):
1543
mnemonic = "ld2u_add"; /* prefetch_add_l2 */
1544
prefetch_nofault = (dest == TILEGX_R_ZERO);
1546
case OE_IM(LD4S_ADD, X1):
1548
mnemonic = "ld4s_add"; /* prefetch_add_l3_fault */
1550
case OE_IM(LD4U_ADD, X1):
1552
mnemonic = "ld4u_add"; /* prefetch_add_l3 */
1553
prefetch_nofault = (dest == TILEGX_R_ZERO);
1555
case OE_IM(LDNT1S_ADD, X1):
1557
mnemonic = "ldnt1s_add";
1559
case OE_IM(LDNT1U_ADD, X1):
1561
mnemonic = "ldnt1u_add";
1563
case OE_IM(LDNT2S_ADD, X1):
1565
mnemonic = "ldnt2s_add";
1567
case OE_IM(LDNT2U_ADD, X1):
1569
mnemonic = "ldnt2u_add";
1571
case OE_IM(LDNT4S_ADD, X1):
1573
mnemonic = "ldnt4s_add";
1575
case OE_IM(LDNT4U_ADD, X1):
1577
mnemonic = "ldnt4u_add";
1579
case OE_IM(LDNT_ADD, X1):
1581
mnemonic = "ldnt_add";
1583
case OE_IM(LD_ADD, X1):
1585
mnemonic = "ld_add";
1587
if (!prefetch_nofault) {
1588
tcg_gen_qemu_ld_tl(tdest, tsrca, dc->mmuidx, memop);
1590
tcg_gen_addi_tl(dest_gr(dc, srca), tsrca, imm);
1592
case OE_IM(LDNA_ADD, X1):
1593
tcg_gen_andi_tl(tdest, tsrca, ~7);
1594
tcg_gen_qemu_ld_tl(tdest, tdest, dc->mmuidx, MO_TEQ);
1595
tcg_gen_addi_tl(dest_gr(dc, srca), tsrca, imm);
1596
mnemonic = "ldna_add";
1598
case OE_IM(ORI, X0):
1599
case OE_IM(ORI, X1):
1600
tcg_gen_ori_tl(tdest, tsrca, imm);
1603
case OE_IM(V1ADDI, X0):
1604
case OE_IM(V1ADDI, X1):
1605
t0 = tcg_const_tl(V1_IMM(imm));
1606
gen_v12add(tdest, tsrca, t0, V1_IMM(0x80));
1608
mnemonic = "v1addi";
1610
case OE_IM(V1CMPEQI, X0):
1611
case OE_IM(V1CMPEQI, X1):
1612
tcg_gen_xori_tl(tdest, tsrca, V1_IMM(imm));
1613
gen_v1cmpeq0(tdest);
1614
mnemonic = "v1cmpeqi";
1616
case OE_IM(V1CMPLTSI, X0):
1617
case OE_IM(V1CMPLTSI, X1):
1618
case OE_IM(V1CMPLTUI, X0):
1619
case OE_IM(V1CMPLTUI, X1):
1620
case OE_IM(V1MAXUI, X0):
1621
case OE_IM(V1MAXUI, X1):
1622
case OE_IM(V1MINUI, X0):
1623
case OE_IM(V1MINUI, X1):
1624
return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1625
case OE_IM(V2ADDI, X0):
1626
case OE_IM(V2ADDI, X1):
1627
t0 = tcg_const_tl(V2_IMM(imm));
1628
gen_v12add(tdest, tsrca, t0, V2_IMM(0x8000));
1630
mnemonic = "v2addi";
1632
case OE_IM(V2CMPEQI, X0):
1633
case OE_IM(V2CMPEQI, X1):
1634
case OE_IM(V2CMPLTSI, X0):
1635
case OE_IM(V2CMPLTSI, X1):
1636
case OE_IM(V2CMPLTUI, X0):
1637
case OE_IM(V2CMPLTUI, X1):
1638
case OE_IM(V2MAXSI, X0):
1639
case OE_IM(V2MAXSI, X1):
1640
case OE_IM(V2MINSI, X0):
1641
case OE_IM(V2MINSI, X1):
1642
return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1643
case OE_IM(XORI, X0):
1644
case OE_IM(XORI, X1):
1645
tcg_gen_xori_tl(tdest, tsrca, imm);
1649
case OE_SH(ROTLI, X0):
1650
case OE_SH(ROTLI, X1):
1651
case OE_SH(ROTLI, Y0):
1652
case OE_SH(ROTLI, Y1):
1653
tcg_gen_rotli_tl(tdest, tsrca, imm);
1656
case OE_SH(SHLI, X0):
1657
case OE_SH(SHLI, X1):
1658
case OE_SH(SHLI, Y0):
1659
case OE_SH(SHLI, Y1):
1660
tcg_gen_shli_tl(tdest, tsrca, imm);
1663
case OE_SH(SHLXI, X0):
1664
case OE_SH(SHLXI, X1):
1665
tcg_gen_shli_tl(tdest, tsrca, imm & 31);
1666
tcg_gen_ext32s_tl(tdest, tdest);
1669
case OE_SH(SHRSI, X0):
1670
case OE_SH(SHRSI, X1):
1671
case OE_SH(SHRSI, Y0):
1672
case OE_SH(SHRSI, Y1):
1673
tcg_gen_sari_tl(tdest, tsrca, imm);
1676
case OE_SH(SHRUI, X0):
1677
case OE_SH(SHRUI, X1):
1678
case OE_SH(SHRUI, Y0):
1679
case OE_SH(SHRUI, Y1):
1680
tcg_gen_shri_tl(tdest, tsrca, imm);
1683
case OE_SH(SHRUXI, X0):
1684
case OE_SH(SHRUXI, X1):
1685
if ((imm & 31) == 0) {
1686
tcg_gen_ext32s_tl(tdest, tsrca);
1688
tcg_gen_ext32u_tl(tdest, tsrca);
1689
tcg_gen_shri_tl(tdest, tdest, imm & 31);
1693
case OE_SH(V1SHLI, X0):
1694
case OE_SH(V1SHLI, X1):
1697
tcg_gen_andi_tl(tdest, tsrca, V1_IMM(i3));
1698
tcg_gen_shli_tl(tdest, tdest, i2);
1699
mnemonic = "v1shli";
1701
case OE_SH(V1SHRSI, X0):
1702
case OE_SH(V1SHRSI, X1):
1703
t0 = tcg_const_tl(imm & 7);
1704
gen_helper_v1shrs(tdest, tsrca, t0);
1706
mnemonic = "v1shrsi";
1708
case OE_SH(V1SHRUI, X0):
1709
case OE_SH(V1SHRUI, X1):
1711
i3 = (0xff << i2) & 0xff;
1712
tcg_gen_andi_tl(tdest, tsrca, V1_IMM(i3));
1713
tcg_gen_shri_tl(tdest, tdest, i2);
1714
mnemonic = "v1shrui";
1716
case OE_SH(V2SHLI, X0):
1717
case OE_SH(V2SHLI, X1):
1720
tcg_gen_andi_tl(tdest, tsrca, V2_IMM(i3));
1721
tcg_gen_shli_tl(tdest, tdest, i2);
1722
mnemonic = "v2shli";
1724
case OE_SH(V2SHRSI, X0):
1725
case OE_SH(V2SHRSI, X1):
1726
t0 = tcg_const_tl(imm & 15);
1727
gen_helper_v2shrs(tdest, tsrca, t0);
1729
mnemonic = "v2shrsi";
1731
case OE_SH(V2SHRUI, X0):
1732
case OE_SH(V2SHRUI, X1):
1734
i3 = (0xffff << i2) & 0xffff;
1735
tcg_gen_andi_tl(tdest, tsrca, V2_IMM(i3));
1736
tcg_gen_shri_tl(tdest, tdest, i2);
1737
mnemonic = "v2shrui";
1740
case OE(ADDLI_OPCODE_X0, 0, X0):
1741
case OE(ADDLI_OPCODE_X1, 0, X1):
1742
tcg_gen_addi_tl(tdest, tsrca, imm);
1745
case OE(ADDXLI_OPCODE_X0, 0, X0):
1746
case OE(ADDXLI_OPCODE_X1, 0, X1):
1747
tcg_gen_addi_tl(tdest, tsrca, imm);
1748
tcg_gen_ext32s_tl(tdest, tdest);
1749
mnemonic = "addxli";
1751
case OE(SHL16INSLI_OPCODE_X0, 0, X0):
1752
case OE(SHL16INSLI_OPCODE_X1, 0, X1):
1753
tcg_gen_shli_tl(tdest, tsrca, 16);
1754
tcg_gen_ori_tl(tdest, tdest, imm & 0xffff);
1755
mnemonic = "shl16insli";
1759
return TILEGX_EXCP_OPCODE_UNKNOWN;
1762
qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %d", mnemonic,
1763
reg_names[dest], reg_names[srca], imm);
1764
return TILEGX_EXCP_NONE;
1767
static TileExcp gen_bf_opcode_x0(DisasContext *dc, unsigned ext,
1768
unsigned dest, unsigned srca,
1769
unsigned bfs, unsigned bfe)
1771
TCGv tdest = dest_gr(dc, dest);
1772
TCGv tsrca = load_gr(dc, srca);
1775
const char *mnemonic;
1777
/* The bitfield is either between E and S inclusive,
1778
or up from S and down from E inclusive. */
1780
len = bfe - bfs + 1;
1782
len = (64 - bfs) + (bfe + 1);
1786
case BFEXTU_BF_OPCODE_X0:
1787
if (bfs == 0 && bfe == 7) {
1788
tcg_gen_ext8u_tl(tdest, tsrca);
1789
} else if (bfs == 0 && bfe == 15) {
1790
tcg_gen_ext16u_tl(tdest, tsrca);
1791
} else if (bfs == 0 && bfe == 31) {
1792
tcg_gen_ext32u_tl(tdest, tsrca);
1796
tcg_gen_shli_tl(tdest, tsrca, rol);
1798
tcg_gen_rotli_tl(tdest, tsrca, rol);
1800
tcg_gen_shri_tl(tdest, tdest, (bfs + rol) & 63);
1802
mnemonic = "bfextu";
1805
case BFEXTS_BF_OPCODE_X0:
1806
if (bfs == 0 && bfe == 7) {
1807
tcg_gen_ext8s_tl(tdest, tsrca);
1808
} else if (bfs == 0 && bfe == 15) {
1809
tcg_gen_ext16s_tl(tdest, tsrca);
1810
} else if (bfs == 0 && bfe == 31) {
1811
tcg_gen_ext32s_tl(tdest, tsrca);
1815
tcg_gen_shli_tl(tdest, tsrca, rol);
1817
tcg_gen_rotli_tl(tdest, tsrca, rol);
1819
tcg_gen_sari_tl(tdest, tdest, (bfs + rol) & 63);
1821
mnemonic = "bfexts";
1824
case BFINS_BF_OPCODE_X0:
1825
tsrcd = load_gr(dc, dest);
1827
tcg_gen_deposit_tl(tdest, tsrcd, tsrca, bfs, len);
1829
tcg_gen_rotri_tl(tdest, tsrcd, bfs);
1830
tcg_gen_deposit_tl(tdest, tdest, tsrca, 0, len);
1831
tcg_gen_rotli_tl(tdest, tdest, bfs);
1836
case MM_BF_OPCODE_X0:
1837
tsrcd = load_gr(dc, dest);
1839
tcg_gen_deposit_tl(tdest, tsrca, tsrcd, 0, len);
1841
uint64_t mask = len == 64 ? -1 : rol64((1ULL << len) - 1, bfs);
1842
TCGv tmp = tcg_const_tl(mask);
1844
tcg_gen_and_tl(tdest, tsrcd, tmp);
1845
tcg_gen_andc_tl(tmp, tsrca, tmp);
1846
tcg_gen_or_tl(tdest, tdest, tmp);
1853
return TILEGX_EXCP_OPCODE_UNKNOWN;
1856
qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %u, %u", mnemonic,
1857
reg_names[dest], reg_names[srca], bfs, bfe);
1858
return TILEGX_EXCP_NONE;
1861
static TileExcp gen_branch_opcode_x1(DisasContext *dc, unsigned ext,
1862
unsigned srca, int off)
1864
target_ulong tgt = dc->pc + off * TILEGX_BUNDLE_SIZE_IN_BYTES;
1865
const char *mnemonic;
1867
dc->jmp.dest = tcg_const_tl(tgt);
1868
dc->jmp.val1 = tcg_temp_new();
1869
tcg_gen_mov_tl(dc->jmp.val1, load_gr(dc, srca));
1871
/* Note that the "predict taken" opcodes have bit 0 clear.
1872
Therefore, fold the two cases together by setting bit 0. */
1874
case BEQZ_BRANCH_OPCODE_X1:
1875
dc->jmp.cond = TCG_COND_EQ;
1878
case BNEZ_BRANCH_OPCODE_X1:
1879
dc->jmp.cond = TCG_COND_NE;
1882
case BGEZ_BRANCH_OPCODE_X1:
1883
dc->jmp.cond = TCG_COND_GE;
1886
case BGTZ_BRANCH_OPCODE_X1:
1887
dc->jmp.cond = TCG_COND_GT;
1890
case BLEZ_BRANCH_OPCODE_X1:
1891
dc->jmp.cond = TCG_COND_LE;
1894
case BLTZ_BRANCH_OPCODE_X1:
1895
dc->jmp.cond = TCG_COND_LT;
1898
case BLBC_BRANCH_OPCODE_X1:
1899
dc->jmp.cond = TCG_COND_EQ;
1900
tcg_gen_andi_tl(dc->jmp.val1, dc->jmp.val1, 1);
1903
case BLBS_BRANCH_OPCODE_X1:
1904
dc->jmp.cond = TCG_COND_NE;
1905
tcg_gen_andi_tl(dc->jmp.val1, dc->jmp.val1, 1);
1909
return TILEGX_EXCP_OPCODE_UNKNOWN;
1912
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1913
qemu_log("%s%s %s, " TARGET_FMT_lx " <%s>",
1914
mnemonic, ext & 1 ? "" : "t",
1915
reg_names[srca], tgt, lookup_symbol(tgt));
1917
return TILEGX_EXCP_NONE;
1920
static TileExcp gen_jump_opcode_x1(DisasContext *dc, unsigned ext, int off)
1922
target_ulong tgt = dc->pc + off * TILEGX_BUNDLE_SIZE_IN_BYTES;
1923
const char *mnemonic = "j";
1925
/* The extension field is 1 bit, therefore we only have JAL and J. */
1926
if (ext == JAL_JUMP_OPCODE_X1) {
1927
tcg_gen_movi_tl(dest_gr(dc, TILEGX_R_LR),
1928
dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
1931
dc->jmp.cond = TCG_COND_ALWAYS;
1932
dc->jmp.dest = tcg_const_tl(tgt);
1934
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1935
qemu_log("%s " TARGET_FMT_lx " <%s>",
1936
mnemonic, tgt, lookup_symbol(tgt));
1938
return TILEGX_EXCP_NONE;
1944
void (*get)(TCGv, TCGv_ptr);
1945
void (*put)(TCGv_ptr, TCGv);
1948
static const TileSPR *find_spr(unsigned spr)
1950
/* Allow the compiler to construct the binary search tree. */
1951
#define D(N, O, G, P) \
1952
case SPR_##N: { static const TileSPR x = { #N, O, G, P }; return &x; }
1956
offsetof(CPUTLGState, spregs[TILEGX_SPR_CMPEXCH]), 0, 0)
1957
D(INTERRUPT_CRITICAL_SECTION,
1958
offsetof(CPUTLGState, spregs[TILEGX_SPR_CRITICAL_SEC]), 0, 0)
1960
offsetof(CPUTLGState, spregs[TILEGX_SPR_SIM_CONTROL]), 0, 0)
1962
offsetof(CPUTLGState, spregs[TILEGX_SPR_EX_CONTEXT_0_0]), 0, 0)
1964
offsetof(CPUTLGState, spregs[TILEGX_SPR_EX_CONTEXT_0_1]), 0, 0)
1969
qemu_log_mask(LOG_UNIMP, "UNIMP SPR %u\n", spr);
1973
static TileExcp gen_mtspr_x1(DisasContext *dc, unsigned spr, unsigned srca)
1975
const TileSPR *def = find_spr(spr);
1979
qemu_log_mask(CPU_LOG_TB_IN_ASM, "mtspr spr[%u], %s", spr, reg_names[srca]);
1980
return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1983
tsrca = load_gr(dc, srca);
1985
def->put(cpu_env, tsrca);
1987
tcg_gen_st_tl(tsrca, cpu_env, def->offset);
1989
qemu_log_mask(CPU_LOG_TB_IN_ASM, "mtspr %s, %s", def->name, reg_names[srca]);
1990
return TILEGX_EXCP_NONE;
1993
static TileExcp gen_mfspr_x1(DisasContext *dc, unsigned dest, unsigned spr)
1995
const TileSPR *def = find_spr(spr);
1999
qemu_log_mask(CPU_LOG_TB_IN_ASM, "mtspr %s, spr[%u]", reg_names[dest], spr);
2000
return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
2003
tdest = dest_gr(dc, dest);
2005
def->get(tdest, cpu_env);
2007
tcg_gen_ld_tl(tdest, cpu_env, def->offset);
2009
qemu_log_mask(CPU_LOG_TB_IN_ASM, "mfspr %s, %s", reg_names[dest], def->name);
2010
return TILEGX_EXCP_NONE;
2013
static TileExcp decode_y0(DisasContext *dc, tilegx_bundle_bits bundle)
2015
unsigned opc = get_Opcode_Y0(bundle);
2016
unsigned ext = get_RRROpcodeExtension_Y0(bundle);
2017
unsigned dest = get_Dest_Y0(bundle);
2018
unsigned srca = get_SrcA_Y0(bundle);
2023
case RRR_1_OPCODE_Y0:
2024
if (ext == UNARY_RRR_1_OPCODE_Y0) {
2025
ext = get_UnaryOpcodeExtension_Y0(bundle);
2026
return gen_rr_opcode(dc, OE(opc, ext, Y0), dest, srca, bundle);
2029
case RRR_0_OPCODE_Y0:
2030
case RRR_2_OPCODE_Y0:
2031
case RRR_3_OPCODE_Y0:
2032
case RRR_4_OPCODE_Y0:
2033
case RRR_5_OPCODE_Y0:
2034
case RRR_6_OPCODE_Y0:
2035
case RRR_7_OPCODE_Y0:
2036
case RRR_8_OPCODE_Y0:
2037
case RRR_9_OPCODE_Y0:
2038
srcb = get_SrcB_Y0(bundle);
2039
return gen_rrr_opcode(dc, OE(opc, ext, Y0), dest, srca, srcb);
2041
case SHIFT_OPCODE_Y0:
2042
ext = get_ShiftOpcodeExtension_Y0(bundle);
2043
imm = get_ShAmt_Y0(bundle);
2044
return gen_rri_opcode(dc, OE(opc, ext, Y0), dest, srca, imm);
2046
case ADDI_OPCODE_Y0:
2047
case ADDXI_OPCODE_Y0:
2048
case ANDI_OPCODE_Y0:
2049
case CMPEQI_OPCODE_Y0:
2050
case CMPLTSI_OPCODE_Y0:
2051
imm = (int8_t)get_Imm8_Y0(bundle);
2052
return gen_rri_opcode(dc, OE(opc, 0, Y0), dest, srca, imm);
2055
return TILEGX_EXCP_OPCODE_UNKNOWN;
2059
static TileExcp decode_y1(DisasContext *dc, tilegx_bundle_bits bundle)
2061
unsigned opc = get_Opcode_Y1(bundle);
2062
unsigned ext = get_RRROpcodeExtension_Y1(bundle);
2063
unsigned dest = get_Dest_Y1(bundle);
2064
unsigned srca = get_SrcA_Y1(bundle);
2068
switch (get_Opcode_Y1(bundle)) {
2069
case RRR_1_OPCODE_Y1:
2070
if (ext == UNARY_RRR_1_OPCODE_Y0) {
2071
ext = get_UnaryOpcodeExtension_Y1(bundle);
2072
return gen_rr_opcode(dc, OE(opc, ext, Y1), dest, srca, bundle);
2075
case RRR_0_OPCODE_Y1:
2076
case RRR_2_OPCODE_Y1:
2077
case RRR_3_OPCODE_Y1:
2078
case RRR_4_OPCODE_Y1:
2079
case RRR_5_OPCODE_Y1:
2080
case RRR_6_OPCODE_Y1:
2081
case RRR_7_OPCODE_Y1:
2082
srcb = get_SrcB_Y1(bundle);
2083
return gen_rrr_opcode(dc, OE(opc, ext, Y1), dest, srca, srcb);
2085
case SHIFT_OPCODE_Y1:
2086
ext = get_ShiftOpcodeExtension_Y1(bundle);
2087
imm = get_ShAmt_Y1(bundle);
2088
return gen_rri_opcode(dc, OE(opc, ext, Y1), dest, srca, imm);
2090
case ADDI_OPCODE_Y1:
2091
case ADDXI_OPCODE_Y1:
2092
case ANDI_OPCODE_Y1:
2093
case CMPEQI_OPCODE_Y1:
2094
case CMPLTSI_OPCODE_Y1:
2095
imm = (int8_t)get_Imm8_Y1(bundle);
2096
return gen_rri_opcode(dc, OE(opc, 0, Y1), dest, srca, imm);
2099
return TILEGX_EXCP_OPCODE_UNKNOWN;
2103
static TileExcp decode_y2(DisasContext *dc, tilegx_bundle_bits bundle)
2105
unsigned mode = get_Mode(bundle);
2106
unsigned opc = get_Opcode_Y2(bundle);
2107
unsigned srca = get_SrcA_Y2(bundle);
2108
unsigned srcbdest = get_SrcBDest_Y2(bundle);
2109
const char *mnemonic;
2111
bool prefetch_nofault = false;
2113
switch (OEY2(opc, mode)) {
2114
case OEY2(LD1S_OPCODE_Y2, MODE_OPCODE_YA2):
2116
mnemonic = "ld1s"; /* prefetch_l1_fault */
2118
case OEY2(LD1U_OPCODE_Y2, MODE_OPCODE_YA2):
2120
mnemonic = "ld1u"; /* prefetch, prefetch_l1 */
2121
prefetch_nofault = (srcbdest == TILEGX_R_ZERO);
2123
case OEY2(LD2S_OPCODE_Y2, MODE_OPCODE_YA2):
2125
mnemonic = "ld2s"; /* prefetch_l2_fault */
2127
case OEY2(LD2U_OPCODE_Y2, MODE_OPCODE_YA2):
2129
mnemonic = "ld2u"; /* prefetch_l2 */
2130
prefetch_nofault = (srcbdest == TILEGX_R_ZERO);
2132
case OEY2(LD4S_OPCODE_Y2, MODE_OPCODE_YB2):
2134
mnemonic = "ld4s"; /* prefetch_l3_fault */
2136
case OEY2(LD4U_OPCODE_Y2, MODE_OPCODE_YB2):
2138
mnemonic = "ld4u"; /* prefetch_l3 */
2139
prefetch_nofault = (srcbdest == TILEGX_R_ZERO);
2141
case OEY2(LD_OPCODE_Y2, MODE_OPCODE_YB2):
2145
if (!prefetch_nofault) {
2146
tcg_gen_qemu_ld_tl(dest_gr(dc, srcbdest), load_gr(dc, srca),
2149
qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s", mnemonic,
2150
reg_names[srcbdest], reg_names[srca]);
2151
return TILEGX_EXCP_NONE;
2153
case OEY2(ST1_OPCODE_Y2, MODE_OPCODE_YC2):
2154
return gen_st_opcode(dc, 0, srca, srcbdest, MO_UB, "st1");
2155
case OEY2(ST2_OPCODE_Y2, MODE_OPCODE_YC2):
2156
return gen_st_opcode(dc, 0, srca, srcbdest, MO_TEUW, "st2");
2157
case OEY2(ST4_OPCODE_Y2, MODE_OPCODE_YC2):
2158
return gen_st_opcode(dc, 0, srca, srcbdest, MO_TEUL, "st4");
2159
case OEY2(ST_OPCODE_Y2, MODE_OPCODE_YC2):
2160
return gen_st_opcode(dc, 0, srca, srcbdest, MO_TEQ, "st");
2163
return TILEGX_EXCP_OPCODE_UNKNOWN;
2167
static TileExcp decode_x0(DisasContext *dc, tilegx_bundle_bits bundle)
2169
unsigned opc = get_Opcode_X0(bundle);
2170
unsigned dest = get_Dest_X0(bundle);
2171
unsigned srca = get_SrcA_X0(bundle);
2172
unsigned ext, srcb, bfs, bfe;
2176
case RRR_0_OPCODE_X0:
2177
ext = get_RRROpcodeExtension_X0(bundle);
2178
if (ext == UNARY_RRR_0_OPCODE_X0) {
2179
ext = get_UnaryOpcodeExtension_X0(bundle);
2180
return gen_rr_opcode(dc, OE(opc, ext, X0), dest, srca, bundle);
2182
srcb = get_SrcB_X0(bundle);
2183
return gen_rrr_opcode(dc, OE(opc, ext, X0), dest, srca, srcb);
2185
case SHIFT_OPCODE_X0:
2186
ext = get_ShiftOpcodeExtension_X0(bundle);
2187
imm = get_ShAmt_X0(bundle);
2188
return gen_rri_opcode(dc, OE(opc, ext, X0), dest, srca, imm);
2190
case IMM8_OPCODE_X0:
2191
ext = get_Imm8OpcodeExtension_X0(bundle);
2192
imm = (int8_t)get_Imm8_X0(bundle);
2193
return gen_rri_opcode(dc, OE(opc, ext, X0), dest, srca, imm);
2196
ext = get_BFOpcodeExtension_X0(bundle);
2197
bfs = get_BFStart_X0(bundle);
2198
bfe = get_BFEnd_X0(bundle);
2199
return gen_bf_opcode_x0(dc, ext, dest, srca, bfs, bfe);
2201
case ADDLI_OPCODE_X0:
2202
case SHL16INSLI_OPCODE_X0:
2203
case ADDXLI_OPCODE_X0:
2204
imm = (int16_t)get_Imm16_X0(bundle);
2205
return gen_rri_opcode(dc, OE(opc, 0, X0), dest, srca, imm);
2208
return TILEGX_EXCP_OPCODE_UNKNOWN;
2212
static TileExcp decode_x1(DisasContext *dc, tilegx_bundle_bits bundle)
2214
unsigned opc = get_Opcode_X1(bundle);
2215
unsigned dest = get_Dest_X1(bundle);
2216
unsigned srca = get_SrcA_X1(bundle);
2221
case RRR_0_OPCODE_X1:
2222
ext = get_RRROpcodeExtension_X1(bundle);
2223
srcb = get_SrcB_X1(bundle);
2225
case UNARY_RRR_0_OPCODE_X1:
2226
ext = get_UnaryOpcodeExtension_X1(bundle);
2227
return gen_rr_opcode(dc, OE(opc, ext, X1), dest, srca, bundle);
2228
case ST1_RRR_0_OPCODE_X1:
2229
return gen_st_opcode(dc, dest, srca, srcb, MO_UB, "st1");
2230
case ST2_RRR_0_OPCODE_X1:
2231
return gen_st_opcode(dc, dest, srca, srcb, MO_TEUW, "st2");
2232
case ST4_RRR_0_OPCODE_X1:
2233
return gen_st_opcode(dc, dest, srca, srcb, MO_TEUL, "st4");
2234
case STNT1_RRR_0_OPCODE_X1:
2235
return gen_st_opcode(dc, dest, srca, srcb, MO_UB, "stnt1");
2236
case STNT2_RRR_0_OPCODE_X1:
2237
return gen_st_opcode(dc, dest, srca, srcb, MO_TEUW, "stnt2");
2238
case STNT4_RRR_0_OPCODE_X1:
2239
return gen_st_opcode(dc, dest, srca, srcb, MO_TEUL, "stnt4");
2240
case STNT_RRR_0_OPCODE_X1:
2241
return gen_st_opcode(dc, dest, srca, srcb, MO_TEQ, "stnt");
2242
case ST_RRR_0_OPCODE_X1:
2243
return gen_st_opcode(dc, dest, srca, srcb, MO_TEQ, "st");
2245
return gen_rrr_opcode(dc, OE(opc, ext, X1), dest, srca, srcb);
2247
case SHIFT_OPCODE_X1:
2248
ext = get_ShiftOpcodeExtension_X1(bundle);
2249
imm = get_ShAmt_X1(bundle);
2250
return gen_rri_opcode(dc, OE(opc, ext, X1), dest, srca, imm);
2252
case IMM8_OPCODE_X1:
2253
ext = get_Imm8OpcodeExtension_X1(bundle);
2254
imm = (int8_t)get_Dest_Imm8_X1(bundle);
2255
srcb = get_SrcB_X1(bundle);
2257
case ST1_ADD_IMM8_OPCODE_X1:
2258
return gen_st_add_opcode(dc, srca, srcb, imm, MO_UB, "st1_add");
2259
case ST2_ADD_IMM8_OPCODE_X1:
2260
return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUW, "st2_add");
2261
case ST4_ADD_IMM8_OPCODE_X1:
2262
return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUL, "st4_add");
2263
case STNT1_ADD_IMM8_OPCODE_X1:
2264
return gen_st_add_opcode(dc, srca, srcb, imm, MO_UB, "stnt1_add");
2265
case STNT2_ADD_IMM8_OPCODE_X1:
2266
return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUW, "stnt2_add");
2267
case STNT4_ADD_IMM8_OPCODE_X1:
2268
return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUL, "stnt4_add");
2269
case STNT_ADD_IMM8_OPCODE_X1:
2270
return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEQ, "stnt_add");
2271
case ST_ADD_IMM8_OPCODE_X1:
2272
return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEQ, "st_add");
2273
case MFSPR_IMM8_OPCODE_X1:
2274
return gen_mfspr_x1(dc, dest, get_MF_Imm14_X1(bundle));
2275
case MTSPR_IMM8_OPCODE_X1:
2276
return gen_mtspr_x1(dc, get_MT_Imm14_X1(bundle), srca);
2278
imm = (int8_t)get_Imm8_X1(bundle);
2279
return gen_rri_opcode(dc, OE(opc, ext, X1), dest, srca, imm);
2281
case BRANCH_OPCODE_X1:
2282
ext = get_BrType_X1(bundle);
2283
imm = sextract32(get_BrOff_X1(bundle), 0, 17);
2284
return gen_branch_opcode_x1(dc, ext, srca, imm);
2286
case JUMP_OPCODE_X1:
2287
ext = get_JumpOpcodeExtension_X1(bundle);
2288
imm = sextract32(get_JumpOff_X1(bundle), 0, 27);
2289
return gen_jump_opcode_x1(dc, ext, imm);
2291
case ADDLI_OPCODE_X1:
2292
case SHL16INSLI_OPCODE_X1:
2293
case ADDXLI_OPCODE_X1:
2294
imm = (int16_t)get_Imm16_X1(bundle);
2295
return gen_rri_opcode(dc, OE(opc, 0, X1), dest, srca, imm);
2298
return TILEGX_EXCP_OPCODE_UNKNOWN;
2302
static void notice_excp(DisasContext *dc, uint64_t bundle,
2303
const char *type, TileExcp excp)
2305
if (likely(excp == TILEGX_EXCP_NONE)) {
2308
gen_exception(dc, excp);
2310
case TILEGX_EXCP_OPCODE_UNIMPLEMENTED:
2311
qemu_log_mask(LOG_UNIMP, "UNIMP %s, [" FMT64X "]\n", type, bundle);
2313
case TILEGX_EXCP_OPCODE_UNKNOWN:
2314
qemu_log_mask(LOG_UNIMP, "UNKNOWN %s, [" FMT64X "]\n", type, bundle);
2321
static void translate_one_bundle(DisasContext *dc, uint64_t bundle)
2325
for (i = 0; i < ARRAY_SIZE(dc->wb); i++) {
2326
DisasContextTemp *wb = &dc->wb[i];
2327
wb->reg = TILEGX_R_NOREG;
2328
TCGV_UNUSED_I64(wb->val);
2332
qemu_log_mask(CPU_LOG_TB_IN_ASM, " %" PRIx64 ": { ", dc->pc);
2333
if (get_Mode(bundle)) {
2334
notice_excp(dc, bundle, "y0", decode_y0(dc, bundle));
2335
qemu_log_mask(CPU_LOG_TB_IN_ASM, " ; ");
2336
notice_excp(dc, bundle, "y1", decode_y1(dc, bundle));
2337
qemu_log_mask(CPU_LOG_TB_IN_ASM, " ; ");
2338
notice_excp(dc, bundle, "y2", decode_y2(dc, bundle));
2340
notice_excp(dc, bundle, "x0", decode_x0(dc, bundle));
2341
qemu_log_mask(CPU_LOG_TB_IN_ASM, " ; ");
2342
notice_excp(dc, bundle, "x1", decode_x1(dc, bundle));
2344
qemu_log_mask(CPU_LOG_TB_IN_ASM, " }\n");
2346
for (i = dc->num_wb - 1; i >= 0; --i) {
2347
DisasContextTemp *wb = &dc->wb[i];
2348
if (wb->reg < TILEGX_R_COUNT) {
2349
tcg_gen_mov_i64(cpu_regs[wb->reg], wb->val);
2351
tcg_temp_free_i64(wb->val);
2354
if (dc->jmp.cond != TCG_COND_NEVER) {
2355
if (dc->jmp.cond == TCG_COND_ALWAYS) {
2356
tcg_gen_mov_i64(cpu_pc, dc->jmp.dest);
2358
TCGv next = tcg_const_i64(dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
2359
tcg_gen_movcond_i64(dc->jmp.cond, cpu_pc,
2360
dc->jmp.val1, load_zero(dc),
2361
dc->jmp.dest, next);
2362
tcg_temp_free_i64(dc->jmp.val1);
2363
tcg_temp_free_i64(next);
2365
tcg_temp_free_i64(dc->jmp.dest);
2368
} else if (dc->atomic_excp != TILEGX_EXCP_NONE) {
2369
gen_exception(dc, dc->atomic_excp);
2373
void gen_intermediate_code(CPUTLGState *env, struct TranslationBlock *tb)
2375
TileGXCPU *cpu = tilegx_env_get_cpu(env);
2377
DisasContext *dc = &ctx;
2378
CPUState *cs = CPU(cpu);
2379
uint64_t pc_start = tb->pc;
2380
uint64_t next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2382
int max_insns = tb->cflags & CF_COUNT_MASK;
2386
dc->exit_tb = false;
2387
dc->atomic_excp = TILEGX_EXCP_NONE;
2388
dc->jmp.cond = TCG_COND_NEVER;
2389
TCGV_UNUSED_I64(dc->jmp.dest);
2390
TCGV_UNUSED_I64(dc->jmp.val1);
2391
TCGV_UNUSED_I64(dc->zero);
2393
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2395
qemu_log("IN: %s\n", lookup_symbol(pc_start));
2398
max_insns = CF_COUNT_MASK;
2400
if (cs->singlestep_enabled || singlestep) {
2403
if (max_insns > TCG_MAX_INSNS) {
2404
max_insns = TCG_MAX_INSNS;
2409
tcg_gen_insn_start(dc->pc);
2412
translate_one_bundle(dc, cpu_ldq_data(env, dc->pc));
2415
/* PC updated and EXIT_TB/GOTO_TB/exception emitted. */
2418
dc->pc += TILEGX_BUNDLE_SIZE_IN_BYTES;
2419
if (num_insns >= max_insns
2420
|| dc->pc >= next_page_start
2421
|| tcg_op_buf_full()) {
2422
/* Ending the TB due to TB size or page boundary. Set PC. */
2423
tcg_gen_movi_tl(cpu_pc, dc->pc);
2429
gen_tb_end(tb, num_insns);
2430
tb->size = dc->pc - pc_start;
2431
tb->icount = num_insns;
2433
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2439
void restore_state_to_opc(CPUTLGState *env, TranslationBlock *tb,
2445
void tilegx_tcg_init(void)
2449
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
2450
tcg_ctx.tcg_env = cpu_env;
2451
cpu_pc = tcg_global_mem_new_i64(cpu_env, offsetof(CPUTLGState, pc), "pc");
2452
for (i = 0; i < TILEGX_R_COUNT; i++) {
2453
cpu_regs[i] = tcg_global_mem_new_i64(cpu_env,
2454
offsetof(CPUTLGState, regs[i]),