4
* Copyright (c) 2005 Samuel Tardieu
6
* This library is free software; you can redistribute it and/or
7
* modify it under the terms of the GNU Lesser General Public
8
* License as published by the Free Software Foundation; either
9
* version 2 of the License, or (at your option) any later version.
11
* This library is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14
* Lesser General Public License for more details.
16
* You should have received a copy of the GNU Lesser General Public
17
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
22
#include "qemu/osdep.h"
24
#include "disas/disas.h"
25
#include "exec/exec-all.h"
27
#include "exec/cpu_ldst.h"
29
#include "exec/helper-proto.h"
30
#include "exec/helper-gen.h"
32
#include "trace-tcg.h"
36
typedef struct DisasContext {
37
struct TranslationBlock *tb;
44
int singlestep_enabled;
49
#if defined(CONFIG_USER_ONLY)
50
#define IS_USER(ctx) 1
52
#define IS_USER(ctx) (!(ctx->flags & (1u << SR_MD)))
56
BS_NONE = 0, /* We go out of the TB without reaching a branch or an
59
BS_STOP = 1, /* We want to stop translation for any reason */
60
BS_BRANCH = 2, /* We reached a branch condition */
61
BS_EXCP = 3, /* We reached an exception condition */
64
/* global register indexes */
65
static TCGv_env cpu_env;
66
static TCGv cpu_gregs[24];
67
static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
68
static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
69
static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
70
static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
71
static TCGv cpu_fregs[32];
73
/* internal register indexes */
74
static TCGv cpu_flags, cpu_delayed_pc;
76
#include "exec/gen-icount.h"
78
void sh4_translate_init(void)
81
static int done_init = 0;
82
static const char * const gregnames[24] = {
83
"R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
84
"R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
85
"R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
86
"R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
87
"R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
89
static const char * const fregnames[32] = {
90
"FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
91
"FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
92
"FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
93
"FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
94
"FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
95
"FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
96
"FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
97
"FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
103
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
104
tcg_ctx.tcg_env = cpu_env;
106
for (i = 0; i < 24; i++)
107
cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
108
offsetof(CPUSH4State, gregs[i]),
111
cpu_pc = tcg_global_mem_new_i32(cpu_env,
112
offsetof(CPUSH4State, pc), "PC");
113
cpu_sr = tcg_global_mem_new_i32(cpu_env,
114
offsetof(CPUSH4State, sr), "SR");
115
cpu_sr_m = tcg_global_mem_new_i32(cpu_env,
116
offsetof(CPUSH4State, sr_m), "SR_M");
117
cpu_sr_q = tcg_global_mem_new_i32(cpu_env,
118
offsetof(CPUSH4State, sr_q), "SR_Q");
119
cpu_sr_t = tcg_global_mem_new_i32(cpu_env,
120
offsetof(CPUSH4State, sr_t), "SR_T");
121
cpu_ssr = tcg_global_mem_new_i32(cpu_env,
122
offsetof(CPUSH4State, ssr), "SSR");
123
cpu_spc = tcg_global_mem_new_i32(cpu_env,
124
offsetof(CPUSH4State, spc), "SPC");
125
cpu_gbr = tcg_global_mem_new_i32(cpu_env,
126
offsetof(CPUSH4State, gbr), "GBR");
127
cpu_vbr = tcg_global_mem_new_i32(cpu_env,
128
offsetof(CPUSH4State, vbr), "VBR");
129
cpu_sgr = tcg_global_mem_new_i32(cpu_env,
130
offsetof(CPUSH4State, sgr), "SGR");
131
cpu_dbr = tcg_global_mem_new_i32(cpu_env,
132
offsetof(CPUSH4State, dbr), "DBR");
133
cpu_mach = tcg_global_mem_new_i32(cpu_env,
134
offsetof(CPUSH4State, mach), "MACH");
135
cpu_macl = tcg_global_mem_new_i32(cpu_env,
136
offsetof(CPUSH4State, macl), "MACL");
137
cpu_pr = tcg_global_mem_new_i32(cpu_env,
138
offsetof(CPUSH4State, pr), "PR");
139
cpu_fpscr = tcg_global_mem_new_i32(cpu_env,
140
offsetof(CPUSH4State, fpscr), "FPSCR");
141
cpu_fpul = tcg_global_mem_new_i32(cpu_env,
142
offsetof(CPUSH4State, fpul), "FPUL");
144
cpu_flags = tcg_global_mem_new_i32(cpu_env,
145
offsetof(CPUSH4State, flags), "_flags_");
146
cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env,
147
offsetof(CPUSH4State, delayed_pc),
149
cpu_ldst = tcg_global_mem_new_i32(cpu_env,
150
offsetof(CPUSH4State, ldst), "_ldst_");
152
for (i = 0; i < 32; i++)
153
cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
154
offsetof(CPUSH4State, fregs[i]),
160
void superh_cpu_dump_state(CPUState *cs, FILE *f,
161
fprintf_function cpu_fprintf, int flags)
163
SuperHCPU *cpu = SUPERH_CPU(cs);
164
CPUSH4State *env = &cpu->env;
166
cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
167
env->pc, cpu_read_sr(env), env->pr, env->fpscr);
168
cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
169
env->spc, env->ssr, env->gbr, env->vbr);
170
cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
171
env->sgr, env->dbr, env->delayed_pc, env->fpul);
172
for (i = 0; i < 24; i += 4) {
173
cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
174
i, env->gregs[i], i + 1, env->gregs[i + 1],
175
i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
177
if (env->flags & DELAY_SLOT) {
178
cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
180
} else if (env->flags & DELAY_SLOT_CONDITIONAL) {
181
cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
186
static void gen_read_sr(TCGv dst)
188
TCGv t0 = tcg_temp_new();
189
tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
190
tcg_gen_or_i32(dst, dst, t0);
191
tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
192
tcg_gen_or_i32(dst, dst, t0);
193
tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
194
tcg_gen_or_i32(dst, cpu_sr, t0);
195
tcg_temp_free_i32(t0);
198
static void gen_write_sr(TCGv src)
200
tcg_gen_andi_i32(cpu_sr, src,
201
~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
202
tcg_gen_shri_i32(cpu_sr_q, src, SR_Q);
203
tcg_gen_andi_i32(cpu_sr_q, cpu_sr_q, 1);
204
tcg_gen_shri_i32(cpu_sr_m, src, SR_M);
205
tcg_gen_andi_i32(cpu_sr_m, cpu_sr_m, 1);
206
tcg_gen_shri_i32(cpu_sr_t, src, SR_T);
207
tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
210
static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
212
if (unlikely(ctx->singlestep_enabled)) {
216
#ifndef CONFIG_USER_ONLY
217
return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
223
static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
225
if (use_goto_tb(ctx, dest)) {
226
/* Use a direct jump if in same page and singlestep not enabled */
228
tcg_gen_movi_i32(cpu_pc, dest);
229
tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
231
tcg_gen_movi_i32(cpu_pc, dest);
232
if (ctx->singlestep_enabled)
233
gen_helper_debug(cpu_env);
238
static void gen_jump(DisasContext * ctx)
240
if (ctx->delayed_pc == (uint32_t) - 1) {
241
/* Target is not statically known, it comes necessarily from a
242
delayed jump as immediate jump are conditinal jumps */
243
tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
244
if (ctx->singlestep_enabled)
245
gen_helper_debug(cpu_env);
248
gen_goto_tb(ctx, 0, ctx->delayed_pc);
252
static inline void gen_branch_slot(uint32_t delayed_pc, int t)
254
TCGLabel *label = gen_new_label();
255
tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
256
tcg_gen_brcondi_i32(t ? TCG_COND_EQ : TCG_COND_NE, cpu_sr_t, 0, label);
257
tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
258
gen_set_label(label);
261
/* Immediate conditional jump (bt or bf) */
262
static void gen_conditional_jump(DisasContext * ctx,
263
target_ulong ift, target_ulong ifnott)
265
TCGLabel *l1 = gen_new_label();
266
tcg_gen_brcondi_i32(TCG_COND_NE, cpu_sr_t, 0, l1);
267
gen_goto_tb(ctx, 0, ifnott);
269
gen_goto_tb(ctx, 1, ift);
272
/* Delayed conditional jump (bt or bf) */
273
static void gen_delayed_conditional_jump(DisasContext * ctx)
278
l1 = gen_new_label();
280
tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE);
281
tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
282
gen_goto_tb(ctx, 1, ctx->pc + 2);
284
tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
288
static inline void gen_store_flags(uint32_t flags)
290
tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
291
tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
294
static inline void gen_load_fpr64(TCGv_i64 t, int reg)
296
tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
299
static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
301
TCGv_i32 tmp = tcg_temp_new_i32();
302
tcg_gen_extrl_i64_i32(tmp, t);
303
tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
304
tcg_gen_shri_i64(t, t, 32);
305
tcg_gen_extrl_i64_i32(tmp, t);
306
tcg_gen_mov_i32(cpu_fregs[reg], tmp);
307
tcg_temp_free_i32(tmp);
310
#define B3_0 (ctx->opcode & 0xf)
311
#define B6_4 ((ctx->opcode >> 4) & 0x7)
312
#define B7_4 ((ctx->opcode >> 4) & 0xf)
313
#define B7_0 (ctx->opcode & 0xff)
314
#define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
315
#define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
316
(ctx->opcode & 0xfff))
317
#define B11_8 ((ctx->opcode >> 8) & 0xf)
318
#define B15_12 ((ctx->opcode >> 12) & 0xf)
320
#define REG(x) ((x) < 8 && (ctx->flags & (1u << SR_MD))\
321
&& (ctx->flags & (1u << SR_RB))\
322
? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
324
#define ALTREG(x) ((x) < 8 && (!(ctx->flags & (1u << SR_MD))\
325
|| !(ctx->flags & (1u << SR_RB)))\
326
? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
328
#define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
329
#define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
330
#define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
331
#define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
333
#define CHECK_NOT_DELAY_SLOT \
334
if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
336
tcg_gen_movi_i32(cpu_pc, ctx->pc); \
337
gen_helper_raise_slot_illegal_instruction(cpu_env); \
338
ctx->bstate = BS_BRANCH; \
342
#define CHECK_PRIVILEGED \
343
if (IS_USER(ctx)) { \
344
tcg_gen_movi_i32(cpu_pc, ctx->pc); \
345
if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
346
gen_helper_raise_slot_illegal_instruction(cpu_env); \
348
gen_helper_raise_illegal_instruction(cpu_env); \
350
ctx->bstate = BS_BRANCH; \
354
#define CHECK_FPU_ENABLED \
355
if (ctx->flags & (1u << SR_FD)) { \
356
tcg_gen_movi_i32(cpu_pc, ctx->pc); \
357
if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
358
gen_helper_raise_slot_fpu_disable(cpu_env); \
360
gen_helper_raise_fpu_disable(cpu_env); \
362
ctx->bstate = BS_BRANCH; \
366
static void _decode_opc(DisasContext * ctx)
368
/* This code tries to make movcal emulation sufficiently
369
accurate for Linux purposes. This instruction writes
370
memory, and prior to that, always allocates a cache line.
371
It is used in two contexts:
372
- in memcpy, where data is copied in blocks, the first write
373
of to a block uses movca.l for performance.
374
- in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
375
to flush the cache. Here, the data written by movcal.l is never
376
written to memory, and the data written is just bogus.
378
To simulate this, we simulate movcal.l, we store the value to memory,
379
but we also remember the previous content. If we see ocbi, we check
380
if movcal.l for that address was done previously. If so, the write should
381
not have hit the memory, so we restore the previous content.
382
When we see an instruction that is neither movca.l
383
nor ocbi, the previous content is discarded.
385
To optimize, we only try to flush stores when we're at the start of
386
TB, or if we already saw movca.l in this TB and did not flush stores
390
int opcode = ctx->opcode & 0xf0ff;
391
if (opcode != 0x0093 /* ocbi */
392
&& opcode != 0x00c3 /* movca.l */)
394
gen_helper_discard_movcal_backup(cpu_env);
400
fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
403
switch (ctx->opcode) {
404
case 0x0019: /* div0u */
405
tcg_gen_movi_i32(cpu_sr_m, 0);
406
tcg_gen_movi_i32(cpu_sr_q, 0);
407
tcg_gen_movi_i32(cpu_sr_t, 0);
409
case 0x000b: /* rts */
411
tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
412
ctx->flags |= DELAY_SLOT;
413
ctx->delayed_pc = (uint32_t) - 1;
415
case 0x0028: /* clrmac */
416
tcg_gen_movi_i32(cpu_mach, 0);
417
tcg_gen_movi_i32(cpu_macl, 0);
419
case 0x0048: /* clrs */
420
tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
422
case 0x0008: /* clrt */
423
tcg_gen_movi_i32(cpu_sr_t, 0);
425
case 0x0038: /* ldtlb */
427
gen_helper_ldtlb(cpu_env);
429
case 0x002b: /* rte */
432
gen_write_sr(cpu_ssr);
433
tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
434
ctx->flags |= DELAY_SLOT;
435
ctx->delayed_pc = (uint32_t) - 1;
437
case 0x0058: /* sets */
438
tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
440
case 0x0018: /* sett */
441
tcg_gen_movi_i32(cpu_sr_t, 1);
443
case 0xfbfd: /* frchg */
444
tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
445
ctx->bstate = BS_STOP;
447
case 0xf3fd: /* fschg */
448
tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
449
ctx->bstate = BS_STOP;
451
case 0x0009: /* nop */
453
case 0x001b: /* sleep */
455
tcg_gen_movi_i32(cpu_pc, ctx->pc + 2);
456
gen_helper_sleep(cpu_env);
460
switch (ctx->opcode & 0xf000) {
461
case 0x1000: /* mov.l Rm,@(disp,Rn) */
463
TCGv addr = tcg_temp_new();
464
tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
465
tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
469
case 0x5000: /* mov.l @(disp,Rm),Rn */
471
TCGv addr = tcg_temp_new();
472
tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
473
tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
477
case 0xe000: /* mov #imm,Rn */
478
tcg_gen_movi_i32(REG(B11_8), B7_0s);
480
case 0x9000: /* mov.w @(disp,PC),Rn */
482
TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
483
tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
487
case 0xd000: /* mov.l @(disp,PC),Rn */
489
TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
490
tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
494
case 0x7000: /* add #imm,Rn */
495
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
497
case 0xa000: /* bra disp */
499
ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
500
tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
501
ctx->flags |= DELAY_SLOT;
503
case 0xb000: /* bsr disp */
505
tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
506
ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
507
tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
508
ctx->flags |= DELAY_SLOT;
512
switch (ctx->opcode & 0xf00f) {
513
case 0x6003: /* mov Rm,Rn */
514
tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
516
case 0x2000: /* mov.b Rm,@Rn */
517
tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
519
case 0x2001: /* mov.w Rm,@Rn */
520
tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
522
case 0x2002: /* mov.l Rm,@Rn */
523
tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
525
case 0x6000: /* mov.b @Rm,Rn */
526
tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
528
case 0x6001: /* mov.w @Rm,Rn */
529
tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
531
case 0x6002: /* mov.l @Rm,Rn */
532
tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
534
case 0x2004: /* mov.b Rm,@-Rn */
536
TCGv addr = tcg_temp_new();
537
tcg_gen_subi_i32(addr, REG(B11_8), 1);
538
/* might cause re-execution */
539
tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
540
tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
544
case 0x2005: /* mov.w Rm,@-Rn */
546
TCGv addr = tcg_temp_new();
547
tcg_gen_subi_i32(addr, REG(B11_8), 2);
548
tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
549
tcg_gen_mov_i32(REG(B11_8), addr);
553
case 0x2006: /* mov.l Rm,@-Rn */
555
TCGv addr = tcg_temp_new();
556
tcg_gen_subi_i32(addr, REG(B11_8), 4);
557
tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
558
tcg_gen_mov_i32(REG(B11_8), addr);
561
case 0x6004: /* mov.b @Rm+,Rn */
562
tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
564
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
566
case 0x6005: /* mov.w @Rm+,Rn */
567
tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
569
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
571
case 0x6006: /* mov.l @Rm+,Rn */
572
tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
574
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
576
case 0x0004: /* mov.b Rm,@(R0,Rn) */
578
TCGv addr = tcg_temp_new();
579
tcg_gen_add_i32(addr, REG(B11_8), REG(0));
580
tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
584
case 0x0005: /* mov.w Rm,@(R0,Rn) */
586
TCGv addr = tcg_temp_new();
587
tcg_gen_add_i32(addr, REG(B11_8), REG(0));
588
tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
592
case 0x0006: /* mov.l Rm,@(R0,Rn) */
594
TCGv addr = tcg_temp_new();
595
tcg_gen_add_i32(addr, REG(B11_8), REG(0));
596
tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
600
case 0x000c: /* mov.b @(R0,Rm),Rn */
602
TCGv addr = tcg_temp_new();
603
tcg_gen_add_i32(addr, REG(B7_4), REG(0));
604
tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
608
case 0x000d: /* mov.w @(R0,Rm),Rn */
610
TCGv addr = tcg_temp_new();
611
tcg_gen_add_i32(addr, REG(B7_4), REG(0));
612
tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
616
case 0x000e: /* mov.l @(R0,Rm),Rn */
618
TCGv addr = tcg_temp_new();
619
tcg_gen_add_i32(addr, REG(B7_4), REG(0));
620
tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
624
case 0x6008: /* swap.b Rm,Rn */
626
TCGv low = tcg_temp_new();;
627
tcg_gen_ext16u_i32(low, REG(B7_4));
628
tcg_gen_bswap16_i32(low, low);
629
tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
633
case 0x6009: /* swap.w Rm,Rn */
634
tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
636
case 0x200d: /* xtrct Rm,Rn */
639
high = tcg_temp_new();
640
tcg_gen_shli_i32(high, REG(B7_4), 16);
641
low = tcg_temp_new();
642
tcg_gen_shri_i32(low, REG(B11_8), 16);
643
tcg_gen_or_i32(REG(B11_8), high, low);
648
case 0x300c: /* add Rm,Rn */
649
tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
651
case 0x300e: /* addc Rm,Rn */
654
t0 = tcg_const_tl(0);
656
tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
657
tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
658
REG(B11_8), t0, t1, cpu_sr_t);
663
case 0x300f: /* addv Rm,Rn */
667
tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
669
tcg_gen_xor_i32(t1, t0, REG(B11_8));
671
tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
672
tcg_gen_andc_i32(cpu_sr_t, t1, t2);
674
tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
676
tcg_gen_mov_i32(REG(B7_4), t0);
680
case 0x2009: /* and Rm,Rn */
681
tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
683
case 0x3000: /* cmp/eq Rm,Rn */
684
tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
686
case 0x3003: /* cmp/ge Rm,Rn */
687
tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
689
case 0x3007: /* cmp/gt Rm,Rn */
690
tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
692
case 0x3006: /* cmp/hi Rm,Rn */
693
tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
695
case 0x3002: /* cmp/hs Rm,Rn */
696
tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
698
case 0x200c: /* cmp/str Rm,Rn */
700
TCGv cmp1 = tcg_temp_new();
701
TCGv cmp2 = tcg_temp_new();
702
tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
703
tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
704
tcg_gen_andc_i32(cmp1, cmp1, cmp2);
705
tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
706
tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
711
case 0x2007: /* div0s Rm,Rn */
712
tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31); /* SR_Q */
713
tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31); /* SR_M */
714
tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m); /* SR_T */
716
case 0x3004: /* div1 Rm,Rn */
718
TCGv t0 = tcg_temp_new();
719
TCGv t1 = tcg_temp_new();
720
TCGv t2 = tcg_temp_new();
721
TCGv zero = tcg_const_i32(0);
723
/* shift left arg1, saving the bit being pushed out and inserting
725
tcg_gen_shri_i32(t0, REG(B11_8), 31);
726
tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
727
tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
729
/* Add or subtract arg0 from arg1 depending if Q == M. To avoid
730
using 64-bit temps, we compute arg0's high part from q ^ m, so
731
that it is 0x00000000 when adding the value or 0xffffffff when
733
tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
734
tcg_gen_subi_i32(t1, t1, 1);
735
tcg_gen_neg_i32(t2, REG(B7_4));
736
tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
737
tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
739
/* compute T and Q depending on carry */
740
tcg_gen_andi_i32(t1, t1, 1);
741
tcg_gen_xor_i32(t1, t1, t0);
742
tcg_gen_xori_i32(cpu_sr_t, t1, 1);
743
tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
751
case 0x300d: /* dmuls.l Rm,Rn */
752
tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
754
case 0x3005: /* dmulu.l Rm,Rn */
755
tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
757
case 0x600e: /* exts.b Rm,Rn */
758
tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
760
case 0x600f: /* exts.w Rm,Rn */
761
tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
763
case 0x600c: /* extu.b Rm,Rn */
764
tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
766
case 0x600d: /* extu.w Rm,Rn */
767
tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
769
case 0x000f: /* mac.l @Rm+,@Rn+ */
772
arg0 = tcg_temp_new();
773
tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
774
arg1 = tcg_temp_new();
775
tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
776
gen_helper_macl(cpu_env, arg0, arg1);
779
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
780
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
783
case 0x400f: /* mac.w @Rm+,@Rn+ */
786
arg0 = tcg_temp_new();
787
tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
788
arg1 = tcg_temp_new();
789
tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
790
gen_helper_macw(cpu_env, arg0, arg1);
793
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
794
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
797
case 0x0007: /* mul.l Rm,Rn */
798
tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
800
case 0x200f: /* muls.w Rm,Rn */
803
arg0 = tcg_temp_new();
804
tcg_gen_ext16s_i32(arg0, REG(B7_4));
805
arg1 = tcg_temp_new();
806
tcg_gen_ext16s_i32(arg1, REG(B11_8));
807
tcg_gen_mul_i32(cpu_macl, arg0, arg1);
812
case 0x200e: /* mulu.w Rm,Rn */
815
arg0 = tcg_temp_new();
816
tcg_gen_ext16u_i32(arg0, REG(B7_4));
817
arg1 = tcg_temp_new();
818
tcg_gen_ext16u_i32(arg1, REG(B11_8));
819
tcg_gen_mul_i32(cpu_macl, arg0, arg1);
824
case 0x600b: /* neg Rm,Rn */
825
tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
827
case 0x600a: /* negc Rm,Rn */
829
TCGv t0 = tcg_const_i32(0);
830
tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
831
REG(B7_4), t0, cpu_sr_t, t0);
832
tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
833
t0, t0, REG(B11_8), cpu_sr_t);
834
tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
838
case 0x6007: /* not Rm,Rn */
839
tcg_gen_not_i32(REG(B11_8), REG(B7_4));
841
case 0x200b: /* or Rm,Rn */
842
tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
844
case 0x400c: /* shad Rm,Rn */
846
TCGv t0 = tcg_temp_new();
847
TCGv t1 = tcg_temp_new();
848
TCGv t2 = tcg_temp_new();
850
tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
852
/* positive case: shift to the left */
853
tcg_gen_shl_i32(t1, REG(B11_8), t0);
855
/* negative case: shift to the right in two steps to
856
correctly handle the -32 case */
857
tcg_gen_xori_i32(t0, t0, 0x1f);
858
tcg_gen_sar_i32(t2, REG(B11_8), t0);
859
tcg_gen_sari_i32(t2, t2, 1);
861
/* select between the two cases */
862
tcg_gen_movi_i32(t0, 0);
863
tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
870
case 0x400d: /* shld Rm,Rn */
872
TCGv t0 = tcg_temp_new();
873
TCGv t1 = tcg_temp_new();
874
TCGv t2 = tcg_temp_new();
876
tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
878
/* positive case: shift to the left */
879
tcg_gen_shl_i32(t1, REG(B11_8), t0);
881
/* negative case: shift to the right in two steps to
882
correctly handle the -32 case */
883
tcg_gen_xori_i32(t0, t0, 0x1f);
884
tcg_gen_shr_i32(t2, REG(B11_8), t0);
885
tcg_gen_shri_i32(t2, t2, 1);
887
/* select between the two cases */
888
tcg_gen_movi_i32(t0, 0);
889
tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
896
case 0x3008: /* sub Rm,Rn */
897
tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
899
case 0x300a: /* subc Rm,Rn */
902
t0 = tcg_const_tl(0);
904
tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
905
tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
906
REG(B11_8), t0, t1, cpu_sr_t);
907
tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
912
case 0x300b: /* subv Rm,Rn */
916
tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
918
tcg_gen_xor_i32(t1, t0, REG(B7_4));
920
tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
921
tcg_gen_and_i32(t1, t1, t2);
923
tcg_gen_shri_i32(cpu_sr_t, t1, 31);
925
tcg_gen_mov_i32(REG(B11_8), t0);
929
case 0x2008: /* tst Rm,Rn */
931
TCGv val = tcg_temp_new();
932
tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
933
tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
937
case 0x200a: /* xor Rm,Rn */
938
tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
940
case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
942
if (ctx->flags & FPSCR_SZ) {
943
TCGv_i64 fp = tcg_temp_new_i64();
944
gen_load_fpr64(fp, XREG(B7_4));
945
gen_store_fpr64(fp, XREG(B11_8));
946
tcg_temp_free_i64(fp);
948
tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
951
case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
953
if (ctx->flags & FPSCR_SZ) {
954
TCGv addr_hi = tcg_temp_new();
956
tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
957
tcg_gen_qemu_st_i32(cpu_fregs[fr], REG(B11_8),
958
ctx->memidx, MO_TEUL);
959
tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr_hi,
960
ctx->memidx, MO_TEUL);
961
tcg_temp_free(addr_hi);
963
tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], REG(B11_8),
964
ctx->memidx, MO_TEUL);
967
case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
969
if (ctx->flags & FPSCR_SZ) {
970
TCGv addr_hi = tcg_temp_new();
971
int fr = XREG(B11_8);
972
tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
973
tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL);
974
tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL);
975
tcg_temp_free(addr_hi);
977
tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4),
978
ctx->memidx, MO_TEUL);
981
case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
983
if (ctx->flags & FPSCR_SZ) {
984
TCGv addr_hi = tcg_temp_new();
985
int fr = XREG(B11_8);
986
tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
987
tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL);
988
tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL);
989
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
990
tcg_temp_free(addr_hi);
992
tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4),
993
ctx->memidx, MO_TEUL);
994
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
997
case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
999
TCGv addr = tcg_temp_new_i32();
1000
tcg_gen_subi_i32(addr, REG(B11_8), 4);
1001
if (ctx->flags & FPSCR_SZ) {
1002
int fr = XREG(B7_4);
1003
tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr, ctx->memidx, MO_TEUL);
1004
tcg_gen_subi_i32(addr, addr, 4);
1005
tcg_gen_qemu_st_i32(cpu_fregs[fr], addr, ctx->memidx, MO_TEUL);
1007
tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr,
1008
ctx->memidx, MO_TEUL);
1010
tcg_gen_mov_i32(REG(B11_8), addr);
1011
tcg_temp_free(addr);
1013
case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1016
TCGv addr = tcg_temp_new_i32();
1017
tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1018
if (ctx->flags & FPSCR_SZ) {
1019
int fr = XREG(B11_8);
1020
tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr,
1021
ctx->memidx, MO_TEUL);
1022
tcg_gen_addi_i32(addr, addr, 4);
1023
tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr,
1024
ctx->memidx, MO_TEUL);
1026
tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], addr,
1027
ctx->memidx, MO_TEUL);
1029
tcg_temp_free(addr);
1032
case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1035
TCGv addr = tcg_temp_new();
1036
tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1037
if (ctx->flags & FPSCR_SZ) {
1038
int fr = XREG(B7_4);
1039
tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr,
1040
ctx->memidx, MO_TEUL);
1041
tcg_gen_addi_i32(addr, addr, 4);
1042
tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr,
1043
ctx->memidx, MO_TEUL);
1045
tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr,
1046
ctx->memidx, MO_TEUL);
1048
tcg_temp_free(addr);
1051
case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1052
case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1053
case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1054
case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1055
case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1056
case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1059
if (ctx->flags & FPSCR_PR) {
1062
if (ctx->opcode & 0x0110)
1063
break; /* illegal instruction */
1064
fp0 = tcg_temp_new_i64();
1065
fp1 = tcg_temp_new_i64();
1066
gen_load_fpr64(fp0, DREG(B11_8));
1067
gen_load_fpr64(fp1, DREG(B7_4));
1068
switch (ctx->opcode & 0xf00f) {
1069
case 0xf000: /* fadd Rm,Rn */
1070
gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
1072
case 0xf001: /* fsub Rm,Rn */
1073
gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
1075
case 0xf002: /* fmul Rm,Rn */
1076
gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
1078
case 0xf003: /* fdiv Rm,Rn */
1079
gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
1081
case 0xf004: /* fcmp/eq Rm,Rn */
1082
gen_helper_fcmp_eq_DT(cpu_env, fp0, fp1);
1084
case 0xf005: /* fcmp/gt Rm,Rn */
1085
gen_helper_fcmp_gt_DT(cpu_env, fp0, fp1);
1088
gen_store_fpr64(fp0, DREG(B11_8));
1089
tcg_temp_free_i64(fp0);
1090
tcg_temp_free_i64(fp1);
1092
switch (ctx->opcode & 0xf00f) {
1093
case 0xf000: /* fadd Rm,Rn */
1094
gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1095
cpu_fregs[FREG(B11_8)],
1096
cpu_fregs[FREG(B7_4)]);
1098
case 0xf001: /* fsub Rm,Rn */
1099
gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1100
cpu_fregs[FREG(B11_8)],
1101
cpu_fregs[FREG(B7_4)]);
1103
case 0xf002: /* fmul Rm,Rn */
1104
gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1105
cpu_fregs[FREG(B11_8)],
1106
cpu_fregs[FREG(B7_4)]);
1108
case 0xf003: /* fdiv Rm,Rn */
1109
gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1110
cpu_fregs[FREG(B11_8)],
1111
cpu_fregs[FREG(B7_4)]);
1113
case 0xf004: /* fcmp/eq Rm,Rn */
1114
gen_helper_fcmp_eq_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1115
cpu_fregs[FREG(B7_4)]);
1117
case 0xf005: /* fcmp/gt Rm,Rn */
1118
gen_helper_fcmp_gt_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1119
cpu_fregs[FREG(B7_4)]);
1125
case 0xf00e: /* fmac FR0,RM,Rn */
1128
if (ctx->flags & FPSCR_PR) {
1129
break; /* illegal instruction */
1131
gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1132
cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)],
1133
cpu_fregs[FREG(B11_8)]);
1139
switch (ctx->opcode & 0xff00) {
1140
case 0xc900: /* and #imm,R0 */
1141
tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1143
case 0xcd00: /* and.b #imm,@(R0,GBR) */
1146
addr = tcg_temp_new();
1147
tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1148
val = tcg_temp_new();
1149
tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1150
tcg_gen_andi_i32(val, val, B7_0);
1151
tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1153
tcg_temp_free(addr);
1156
case 0x8b00: /* bf label */
1157
CHECK_NOT_DELAY_SLOT
1158
gen_conditional_jump(ctx, ctx->pc + 2,
1159
ctx->pc + 4 + B7_0s * 2);
1160
ctx->bstate = BS_BRANCH;
1162
case 0x8f00: /* bf/s label */
1163
CHECK_NOT_DELAY_SLOT
1164
gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
1165
ctx->flags |= DELAY_SLOT_CONDITIONAL;
1167
case 0x8900: /* bt label */
1168
CHECK_NOT_DELAY_SLOT
1169
gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
1171
ctx->bstate = BS_BRANCH;
1173
case 0x8d00: /* bt/s label */
1174
CHECK_NOT_DELAY_SLOT
1175
gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
1176
ctx->flags |= DELAY_SLOT_CONDITIONAL;
1178
case 0x8800: /* cmp/eq #imm,R0 */
1179
tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
1181
case 0xc400: /* mov.b @(disp,GBR),R0 */
1183
TCGv addr = tcg_temp_new();
1184
tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1185
tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1186
tcg_temp_free(addr);
1189
case 0xc500: /* mov.w @(disp,GBR),R0 */
1191
TCGv addr = tcg_temp_new();
1192
tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1193
tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1194
tcg_temp_free(addr);
1197
case 0xc600: /* mov.l @(disp,GBR),R0 */
1199
TCGv addr = tcg_temp_new();
1200
tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1201
tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
1202
tcg_temp_free(addr);
1205
case 0xc000: /* mov.b R0,@(disp,GBR) */
1207
TCGv addr = tcg_temp_new();
1208
tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1209
tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1210
tcg_temp_free(addr);
1213
case 0xc100: /* mov.w R0,@(disp,GBR) */
1215
TCGv addr = tcg_temp_new();
1216
tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1217
tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1218
tcg_temp_free(addr);
1221
case 0xc200: /* mov.l R0,@(disp,GBR) */
1223
TCGv addr = tcg_temp_new();
1224
tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1225
tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
1226
tcg_temp_free(addr);
1229
case 0x8000: /* mov.b R0,@(disp,Rn) */
1231
TCGv addr = tcg_temp_new();
1232
tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1233
tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1234
tcg_temp_free(addr);
1237
case 0x8100: /* mov.w R0,@(disp,Rn) */
1239
TCGv addr = tcg_temp_new();
1240
tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1241
tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1242
tcg_temp_free(addr);
1245
case 0x8400: /* mov.b @(disp,Rn),R0 */
1247
TCGv addr = tcg_temp_new();
1248
tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1249
tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1250
tcg_temp_free(addr);
1253
case 0x8500: /* mov.w @(disp,Rn),R0 */
1255
TCGv addr = tcg_temp_new();
1256
tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1257
tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1258
tcg_temp_free(addr);
1261
case 0xc700: /* mova @(disp,PC),R0 */
1262
tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1264
case 0xcb00: /* or #imm,R0 */
1265
tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1267
case 0xcf00: /* or.b #imm,@(R0,GBR) */
1270
addr = tcg_temp_new();
1271
tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1272
val = tcg_temp_new();
1273
tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1274
tcg_gen_ori_i32(val, val, B7_0);
1275
tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1277
tcg_temp_free(addr);
1280
case 0xc300: /* trapa #imm */
1283
CHECK_NOT_DELAY_SLOT
1284
tcg_gen_movi_i32(cpu_pc, ctx->pc);
1285
imm = tcg_const_i32(B7_0);
1286
gen_helper_trapa(cpu_env, imm);
1288
ctx->bstate = BS_BRANCH;
1291
case 0xc800: /* tst #imm,R0 */
1293
TCGv val = tcg_temp_new();
1294
tcg_gen_andi_i32(val, REG(0), B7_0);
1295
tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1299
case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1301
TCGv val = tcg_temp_new();
1302
tcg_gen_add_i32(val, REG(0), cpu_gbr);
1303
tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
1304
tcg_gen_andi_i32(val, val, B7_0);
1305
tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1309
case 0xca00: /* xor #imm,R0 */
1310
tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1312
case 0xce00: /* xor.b #imm,@(R0,GBR) */
1315
addr = tcg_temp_new();
1316
tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1317
val = tcg_temp_new();
1318
tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1319
tcg_gen_xori_i32(val, val, B7_0);
1320
tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1322
tcg_temp_free(addr);
1327
switch (ctx->opcode & 0xf08f) {
1328
case 0x408e: /* ldc Rm,Rn_BANK */
1330
tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1332
case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1334
tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
1335
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1337
case 0x0082: /* stc Rm_BANK,Rn */
1339
tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1341
case 0x4083: /* stc.l Rm_BANK,@-Rn */
1344
TCGv addr = tcg_temp_new();
1345
tcg_gen_subi_i32(addr, REG(B11_8), 4);
1346
tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
1347
tcg_gen_mov_i32(REG(B11_8), addr);
1348
tcg_temp_free(addr);
1353
switch (ctx->opcode & 0xf0ff) {
1354
case 0x0023: /* braf Rn */
1355
CHECK_NOT_DELAY_SLOT
1356
tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1357
ctx->flags |= DELAY_SLOT;
1358
ctx->delayed_pc = (uint32_t) - 1;
1360
case 0x0003: /* bsrf Rn */
1361
CHECK_NOT_DELAY_SLOT
1362
tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1363
tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1364
ctx->flags |= DELAY_SLOT;
1365
ctx->delayed_pc = (uint32_t) - 1;
1367
case 0x4015: /* cmp/pl Rn */
1368
tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
1370
case 0x4011: /* cmp/pz Rn */
1371
tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
1373
case 0x4010: /* dt Rn */
1374
tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1375
tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
1377
case 0x402b: /* jmp @Rn */
1378
CHECK_NOT_DELAY_SLOT
1379
tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1380
ctx->flags |= DELAY_SLOT;
1381
ctx->delayed_pc = (uint32_t) - 1;
1383
case 0x400b: /* jsr @Rn */
1384
CHECK_NOT_DELAY_SLOT
1385
tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1386
tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1387
ctx->flags |= DELAY_SLOT;
1388
ctx->delayed_pc = (uint32_t) - 1;
1390
case 0x400e: /* ldc Rm,SR */
1393
TCGv val = tcg_temp_new();
1394
tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1397
ctx->bstate = BS_STOP;
1400
case 0x4007: /* ldc.l @Rm+,SR */
1403
TCGv val = tcg_temp_new();
1404
tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
1405
tcg_gen_andi_i32(val, val, 0x700083f3);
1408
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1409
ctx->bstate = BS_STOP;
1412
case 0x0002: /* stc SR,Rn */
1414
gen_read_sr(REG(B11_8));
1416
case 0x4003: /* stc SR,@-Rn */
1419
TCGv addr = tcg_temp_new();
1420
TCGv val = tcg_temp_new();
1421
tcg_gen_subi_i32(addr, REG(B11_8), 4);
1423
tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1424
tcg_gen_mov_i32(REG(B11_8), addr);
1426
tcg_temp_free(addr);
1429
#define LD(reg,ldnum,ldpnum,prechk) \
1432
tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1436
tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1437
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1439
#define ST(reg,stnum,stpnum,prechk) \
1442
tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1447
TCGv addr = tcg_temp_new(); \
1448
tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1449
tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1450
tcg_gen_mov_i32(REG(B11_8), addr); \
1451
tcg_temp_free(addr); \
1454
#define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1455
LD(reg,ldnum,ldpnum,prechk) \
1456
ST(reg,stnum,stpnum,prechk)
1457
LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1458
LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1459
LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1460
LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1461
ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1462
LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;)
1463
LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1464
LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1465
LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1466
LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1467
LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1468
case 0x406a: /* lds Rm,FPSCR */
1470
gen_helper_ld_fpscr(cpu_env, REG(B11_8));
1471
ctx->bstate = BS_STOP;
1473
case 0x4066: /* lds.l @Rm+,FPSCR */
1476
TCGv addr = tcg_temp_new();
1477
tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
1478
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1479
gen_helper_ld_fpscr(cpu_env, addr);
1480
tcg_temp_free(addr);
1481
ctx->bstate = BS_STOP;
1484
case 0x006a: /* sts FPSCR,Rn */
1486
tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1488
case 0x4062: /* sts FPSCR,@-Rn */
1492
val = tcg_temp_new();
1493
tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1494
addr = tcg_temp_new();
1495
tcg_gen_subi_i32(addr, REG(B11_8), 4);
1496
tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1497
tcg_gen_mov_i32(REG(B11_8), addr);
1498
tcg_temp_free(addr);
1502
case 0x00c3: /* movca.l R0,@Rm */
1504
TCGv val = tcg_temp_new();
1505
tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
1506
gen_helper_movcal(cpu_env, REG(B11_8), val);
1507
tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1509
ctx->has_movcal = 1;
1512
/* MOVUA.L @Rm,R0 (Rm) -> R0
1513
Load non-boundary-aligned data */
1514
tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1517
/* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1518
Load non-boundary-aligned data */
1519
tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1520
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1522
case 0x0029: /* movt Rn */
1523
tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
1528
If (T == 1) R0 -> (Rn)
1531
if (ctx->features & SH_FEATURE_SH4A) {
1532
TCGLabel *label = gen_new_label();
1533
tcg_gen_mov_i32(cpu_sr_t, cpu_ldst);
1534
tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1535
tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1536
gen_set_label(label);
1537
tcg_gen_movi_i32(cpu_ldst, 0);
1545
When interrupt/exception
1548
if (ctx->features & SH_FEATURE_SH4A) {
1549
tcg_gen_movi_i32(cpu_ldst, 0);
1550
tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1551
tcg_gen_movi_i32(cpu_ldst, 1);
1555
case 0x0093: /* ocbi @Rn */
1557
gen_helper_ocbi(cpu_env, REG(B11_8));
1560
case 0x00a3: /* ocbp @Rn */
1561
case 0x00b3: /* ocbwb @Rn */
1562
/* These instructions are supposed to do nothing in case of
1563
a cache miss. Given that we only partially emulate caches
1564
it is safe to simply ignore them. */
1566
case 0x0083: /* pref @Rn */
1568
case 0x00d3: /* prefi @Rn */
1569
if (ctx->features & SH_FEATURE_SH4A)
1573
case 0x00e3: /* icbi @Rn */
1574
if (ctx->features & SH_FEATURE_SH4A)
1578
case 0x00ab: /* synco */
1579
if (ctx->features & SH_FEATURE_SH4A)
1583
case 0x4024: /* rotcl Rn */
1585
TCGv tmp = tcg_temp_new();
1586
tcg_gen_mov_i32(tmp, cpu_sr_t);
1587
tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1588
tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1589
tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1593
case 0x4025: /* rotcr Rn */
1595
TCGv tmp = tcg_temp_new();
1596
tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1597
tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1598
tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1599
tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1603
case 0x4004: /* rotl Rn */
1604
tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1605
tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1607
case 0x4005: /* rotr Rn */
1608
tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1609
tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1611
case 0x4000: /* shll Rn */
1612
case 0x4020: /* shal Rn */
1613
tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1614
tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1616
case 0x4021: /* shar Rn */
1617
tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1618
tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1620
case 0x4001: /* shlr Rn */
1621
tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1622
tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1624
case 0x4008: /* shll2 Rn */
1625
tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1627
case 0x4018: /* shll8 Rn */
1628
tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1630
case 0x4028: /* shll16 Rn */
1631
tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1633
case 0x4009: /* shlr2 Rn */
1634
tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1636
case 0x4019: /* shlr8 Rn */
1637
tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1639
case 0x4029: /* shlr16 Rn */
1640
tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1642
case 0x401b: /* tas.b @Rn */
1645
addr = tcg_temp_local_new();
1646
tcg_gen_mov_i32(addr, REG(B11_8));
1647
val = tcg_temp_local_new();
1648
tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1649
tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1650
tcg_gen_ori_i32(val, val, 0x80);
1651
tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1653
tcg_temp_free(addr);
1656
case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1658
tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
1660
case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1662
tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1664
case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1666
if (ctx->flags & FPSCR_PR) {
1668
if (ctx->opcode & 0x0100)
1669
break; /* illegal instruction */
1670
fp = tcg_temp_new_i64();
1671
gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1672
gen_store_fpr64(fp, DREG(B11_8));
1673
tcg_temp_free_i64(fp);
1676
gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_env, cpu_fpul);
1679
case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1681
if (ctx->flags & FPSCR_PR) {
1683
if (ctx->opcode & 0x0100)
1684
break; /* illegal instruction */
1685
fp = tcg_temp_new_i64();
1686
gen_load_fpr64(fp, DREG(B11_8));
1687
gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
1688
tcg_temp_free_i64(fp);
1691
gen_helper_ftrc_FT(cpu_fpul, cpu_env, cpu_fregs[FREG(B11_8)]);
1694
case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1697
gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1700
case 0xf05d: /* fabs FRn/DRn */
1702
if (ctx->flags & FPSCR_PR) {
1703
if (ctx->opcode & 0x0100)
1704
break; /* illegal instruction */
1705
TCGv_i64 fp = tcg_temp_new_i64();
1706
gen_load_fpr64(fp, DREG(B11_8));
1707
gen_helper_fabs_DT(fp, fp);
1708
gen_store_fpr64(fp, DREG(B11_8));
1709
tcg_temp_free_i64(fp);
1711
gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1714
case 0xf06d: /* fsqrt FRn */
1716
if (ctx->flags & FPSCR_PR) {
1717
if (ctx->opcode & 0x0100)
1718
break; /* illegal instruction */
1719
TCGv_i64 fp = tcg_temp_new_i64();
1720
gen_load_fpr64(fp, DREG(B11_8));
1721
gen_helper_fsqrt_DT(fp, cpu_env, fp);
1722
gen_store_fpr64(fp, DREG(B11_8));
1723
tcg_temp_free_i64(fp);
1725
gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1726
cpu_fregs[FREG(B11_8)]);
1729
case 0xf07d: /* fsrra FRn */
1732
case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1734
if (!(ctx->flags & FPSCR_PR)) {
1735
tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
1738
case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1740
if (!(ctx->flags & FPSCR_PR)) {
1741
tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
1744
case 0xf0ad: /* fcnvsd FPUL,DRn */
1747
TCGv_i64 fp = tcg_temp_new_i64();
1748
gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1749
gen_store_fpr64(fp, DREG(B11_8));
1750
tcg_temp_free_i64(fp);
1753
case 0xf0bd: /* fcnvds DRn,FPUL */
1756
TCGv_i64 fp = tcg_temp_new_i64();
1757
gen_load_fpr64(fp, DREG(B11_8));
1758
gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
1759
tcg_temp_free_i64(fp);
1762
case 0xf0ed: /* fipr FVm,FVn */
1764
if ((ctx->flags & FPSCR_PR) == 0) {
1766
m = tcg_const_i32((ctx->opcode >> 8) & 3);
1767
n = tcg_const_i32((ctx->opcode >> 10) & 3);
1768
gen_helper_fipr(cpu_env, m, n);
1774
case 0xf0fd: /* ftrv XMTRX,FVn */
1776
if ((ctx->opcode & 0x0300) == 0x0100 &&
1777
(ctx->flags & FPSCR_PR) == 0) {
1779
n = tcg_const_i32((ctx->opcode >> 10) & 3);
1780
gen_helper_ftrv(cpu_env, n);
1787
fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1788
ctx->opcode, ctx->pc);
1791
tcg_gen_movi_i32(cpu_pc, ctx->pc);
1792
if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1793
gen_helper_raise_slot_illegal_instruction(cpu_env);
1795
gen_helper_raise_illegal_instruction(cpu_env);
1797
ctx->bstate = BS_BRANCH;
1800
static void decode_opc(DisasContext * ctx)
1802
uint32_t old_flags = ctx->flags;
1806
if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1807
if (ctx->flags & DELAY_SLOT_CLEARME) {
1810
/* go out of the delay slot */
1811
uint32_t new_flags = ctx->flags;
1812
new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1813
gen_store_flags(new_flags);
1816
ctx->bstate = BS_BRANCH;
1817
if (old_flags & DELAY_SLOT_CONDITIONAL) {
1818
gen_delayed_conditional_jump(ctx);
1819
} else if (old_flags & DELAY_SLOT) {
1825
/* go into a delay slot */
1826
if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1827
gen_store_flags(ctx->flags);
1830
void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb)
1832
SuperHCPU *cpu = sh_env_get_cpu(env);
1833
CPUState *cs = CPU(cpu);
1835
target_ulong pc_start;
1841
ctx.flags = (uint32_t)tb->flags;
1842
ctx.bstate = BS_NONE;
1843
ctx.memidx = (ctx.flags & (1u << SR_MD)) == 0 ? 1 : 0;
1844
/* We don't know if the delayed pc came from a dynamic or static branch,
1845
so assume it is a dynamic branch. */
1846
ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1848
ctx.singlestep_enabled = cs->singlestep_enabled;
1849
ctx.features = env->features;
1850
ctx.has_movcal = (ctx.flags & TB_FLAG_PENDING_MOVCA);
1853
max_insns = tb->cflags & CF_COUNT_MASK;
1854
if (max_insns == 0) {
1855
max_insns = CF_COUNT_MASK;
1857
if (max_insns > TCG_MAX_INSNS) {
1858
max_insns = TCG_MAX_INSNS;
1862
while (ctx.bstate == BS_NONE && !tcg_op_buf_full()) {
1863
tcg_gen_insn_start(ctx.pc, ctx.flags);
1866
if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
1867
/* We have hit a breakpoint - make sure PC is up-to-date */
1868
tcg_gen_movi_i32(cpu_pc, ctx.pc);
1869
gen_helper_debug(cpu_env);
1870
ctx.bstate = BS_BRANCH;
1871
/* The address covered by the breakpoint must be included in
1872
[tb->pc, tb->pc + tb->size) in order to for it to be
1873
properly cleared -- thus we increment the PC here so that
1874
the logic setting tb->size below does the right thing. */
1879
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1883
ctx.opcode = cpu_lduw_code(env, ctx.pc);
1886
if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
1888
if (cs->singlestep_enabled) {
1891
if (num_insns >= max_insns)
1896
if (tb->cflags & CF_LAST_IO)
1898
if (cs->singlestep_enabled) {
1899
tcg_gen_movi_i32(cpu_pc, ctx.pc);
1900
gen_helper_debug(cpu_env);
1902
switch (ctx.bstate) {
1904
/* gen_op_interrupt_restart(); */
1908
gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
1910
gen_goto_tb(&ctx, 0, ctx.pc);
1913
/* gen_op_interrupt_restart(); */
1922
gen_tb_end(tb, num_insns);
1924
tb->size = ctx.pc - pc_start;
1925
tb->icount = num_insns;
1928
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1929
&& qemu_log_in_addr_range(pc_start)) {
1931
qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
1932
log_target_disas(cs, pc_start, ctx.pc - pc_start, 0);
1939
void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
1943
env->flags = data[1];