2
* Xilinx MicroBlaze emulation for qemu: main translation routines.
4
* Copyright (c) 2009 Edgar E. Iglesias.
5
* Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7
* This library is free software; you can redistribute it and/or
8
* modify it under the terms of the GNU Lesser General Public
9
* License as published by the Free Software Foundation; either
10
* version 2 of the License, or (at your option) any later version.
12
* This library is distributed in the hope that it will be useful,
13
* but WITHOUT ANY WARRANTY; without even the implied warranty of
14
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15
* Lesser General Public License for more details.
17
* You should have received a copy of the GNU Lesser General Public
18
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
21
#include "qemu/osdep.h"
23
#include "disas/disas.h"
24
#include "exec/exec-all.h"
26
#include "exec/helper-proto.h"
27
#include "microblaze-decode.h"
28
#include "exec/cpu_ldst.h"
29
#include "exec/helper-gen.h"
31
#include "trace-tcg.h"
38
#if DISAS_MB && !SIM_COMPAT
39
# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41
# define LOG_DIS(...) do { } while (0)
46
#define EXTRACT_FIELD(src, start, end) \
47
(((src) >> start) & ((1 << (end - start + 1)) - 1))
49
static TCGv env_debug;
50
static TCGv_env cpu_env;
51
static TCGv cpu_R[32];
52
static TCGv cpu_SR[18];
54
static TCGv env_btaken;
55
static TCGv env_btarget;
56
static TCGv env_iflags;
57
static TCGv env_res_addr;
58
static TCGv env_res_val;
60
#include "exec/gen-icount.h"
62
/* This is the state at translation time. */
63
typedef struct DisasContext {
74
unsigned int cpustate_changed;
75
unsigned int delayed_branch;
76
unsigned int tb_flags, synced_flags; /* tb dependent flags. */
77
unsigned int clear_imm;
82
#define JMP_DIRECT_CC 2
83
#define JMP_INDIRECT 3
87
int abort_at_next_insn;
89
struct TranslationBlock *tb;
90
int singlestep_enabled;
93
static const char *regnames[] =
95
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
96
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
97
"r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
98
"r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
101
static const char *special_regnames[] =
103
"rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
104
"sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
105
"sr16", "sr17", "sr18"
108
static inline void t_sync_flags(DisasContext *dc)
110
/* Synch the tb dependent flags between translator and runtime. */
111
if (dc->tb_flags != dc->synced_flags) {
112
tcg_gen_movi_tl(env_iflags, dc->tb_flags);
113
dc->synced_flags = dc->tb_flags;
117
static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
119
TCGv_i32 tmp = tcg_const_i32(index);
122
tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
123
gen_helper_raise_exception(cpu_env, tmp);
124
tcg_temp_free_i32(tmp);
125
dc->is_jmp = DISAS_UPDATE;
128
static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
130
#ifndef CONFIG_USER_ONLY
131
return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
137
static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
139
if (use_goto_tb(dc, dest)) {
141
tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
142
tcg_gen_exit_tb((uintptr_t)dc->tb + n);
144
tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
149
static void read_carry(DisasContext *dc, TCGv d)
151
tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
155
* write_carry sets the carry bits in MSR based on bit 0 of v.
156
* v[31:1] are ignored.
158
static void write_carry(DisasContext *dc, TCGv v)
160
TCGv t0 = tcg_temp_new();
161
tcg_gen_shli_tl(t0, v, 31);
162
tcg_gen_sari_tl(t0, t0, 31);
163
tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
164
tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
166
tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
170
static void write_carryi(DisasContext *dc, bool carry)
172
TCGv t0 = tcg_temp_new();
173
tcg_gen_movi_tl(t0, carry);
178
/* True if ALU operand b is a small immediate that may deserve
180
static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
182
/* Immediate insn without the imm prefix ? */
183
return dc->type_b && !(dc->tb_flags & IMM_FLAG);
186
static inline TCGv *dec_alu_op_b(DisasContext *dc)
189
if (dc->tb_flags & IMM_FLAG)
190
tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
192
tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
195
return &cpu_R[dc->rb];
198
static void dec_add(DisasContext *dc)
206
LOG_DIS("add%s%s%s r%d r%d r%d\n",
207
dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
208
dc->rd, dc->ra, dc->rb);
210
/* Take care of the easy cases first. */
212
/* k - keep carry, no need to update MSR. */
213
/* If rd == r0, it's a nop. */
215
tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
218
/* c - Add carry into the result. */
222
tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
229
/* From now on, we can assume k is zero. So we need to update MSR. */
235
tcg_gen_movi_tl(cf, 0);
239
TCGv ncf = tcg_temp_new();
240
gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
241
tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
242
tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
243
write_carry(dc, ncf);
246
gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
252
static void dec_sub(DisasContext *dc)
254
unsigned int u, cmp, k, c;
260
cmp = (dc->imm & 1) && (!dc->type_b) && k;
263
LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
266
gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
268
gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
273
LOG_DIS("sub%s%s r%d, r%d r%d\n",
274
k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
276
/* Take care of the easy cases first. */
278
/* k - keep carry, no need to update MSR. */
279
/* If rd == r0, it's a nop. */
281
tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
284
/* c - Add carry into the result. */
288
tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
295
/* From now on, we can assume k is zero. So we need to update MSR. */
296
/* Extract carry. And complement a into na. */
302
tcg_gen_movi_tl(cf, 1);
305
/* d = b + ~a + c. carry defaults to 1. */
306
tcg_gen_not_tl(na, cpu_R[dc->ra]);
309
TCGv ncf = tcg_temp_new();
310
gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
311
tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
312
tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
313
write_carry(dc, ncf);
316
gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
323
static void dec_pattern(DisasContext *dc)
327
if ((dc->tb_flags & MSR_EE_FLAG)
328
&& (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
329
&& !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
330
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
331
t_gen_raise_exception(dc, EXCP_HW_EXCP);
334
mode = dc->opcode & 3;
338
LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
340
gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
343
LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
345
tcg_gen_setcond_tl(TCG_COND_EQ, cpu_R[dc->rd],
346
cpu_R[dc->ra], cpu_R[dc->rb]);
350
LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
352
tcg_gen_setcond_tl(TCG_COND_NE, cpu_R[dc->rd],
353
cpu_R[dc->ra], cpu_R[dc->rb]);
357
cpu_abort(CPU(dc->cpu),
358
"unsupported pattern insn opcode=%x\n", dc->opcode);
363
static void dec_and(DisasContext *dc)
367
if (!dc->type_b && (dc->imm & (1 << 10))) {
372
not = dc->opcode & (1 << 1);
373
LOG_DIS("and%s\n", not ? "n" : "");
379
tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
381
tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
384
static void dec_or(DisasContext *dc)
386
if (!dc->type_b && (dc->imm & (1 << 10))) {
391
LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
393
tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
396
static void dec_xor(DisasContext *dc)
398
if (!dc->type_b && (dc->imm & (1 << 10))) {
403
LOG_DIS("xor r%d\n", dc->rd);
405
tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
408
static inline void msr_read(DisasContext *dc, TCGv d)
410
tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
413
static inline void msr_write(DisasContext *dc, TCGv v)
418
dc->cpustate_changed = 1;
419
/* PVR bit is not writable. */
420
tcg_gen_andi_tl(t, v, ~MSR_PVR);
421
tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
422
tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
426
static void dec_msr(DisasContext *dc)
428
CPUState *cs = CPU(dc->cpu);
430
unsigned int sr, to, rn;
431
int mem_index = cpu_mmu_index(&dc->cpu->env, false);
433
sr = dc->imm & ((1 << 14) - 1);
434
to = dc->imm & (1 << 14);
437
dc->cpustate_changed = 1;
439
/* msrclr and msrset. */
440
if (!(dc->imm & (1 << 15))) {
441
unsigned int clr = dc->ir & (1 << 16);
443
LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
446
if (!(dc->cpu->env.pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
451
if ((dc->tb_flags & MSR_EE_FLAG)
452
&& mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
453
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
454
t_gen_raise_exception(dc, EXCP_HW_EXCP);
459
msr_read(dc, cpu_R[dc->rd]);
464
tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
467
tcg_gen_not_tl(t1, t1);
468
tcg_gen_and_tl(t0, t0, t1);
470
tcg_gen_or_tl(t0, t0, t1);
474
tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
475
dc->is_jmp = DISAS_UPDATE;
480
if ((dc->tb_flags & MSR_EE_FLAG)
481
&& mem_index == MMU_USER_IDX) {
482
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
483
t_gen_raise_exception(dc, EXCP_HW_EXCP);
488
#if !defined(CONFIG_USER_ONLY)
489
/* Catch read/writes to the mmu block. */
490
if ((sr & ~0xff) == 0x1000) {
492
LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
494
gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
496
gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
502
LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
507
msr_write(dc, cpu_R[dc->ra]);
510
tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
513
tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
516
tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
519
tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
522
tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
525
cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
529
LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
533
tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
536
msr_read(dc, cpu_R[dc->rd]);
539
tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
542
tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
545
tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
548
tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
551
tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
554
tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
570
tcg_gen_ld_tl(cpu_R[dc->rd],
571
cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
574
cpu_abort(cs, "unknown mfs reg %x\n", sr);
580
tcg_gen_movi_tl(cpu_R[0], 0);
584
/* Multiplier unit. */
585
static void dec_mul(DisasContext *dc)
588
unsigned int subcode;
590
if ((dc->tb_flags & MSR_EE_FLAG)
591
&& (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
592
&& !(dc->cpu->env.pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
593
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
594
t_gen_raise_exception(dc, EXCP_HW_EXCP);
598
subcode = dc->imm & 3;
601
LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
602
tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
606
/* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
607
if (subcode >= 1 && subcode <= 3
608
&& !((dc->cpu->env.pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
612
tmp = tcg_temp_new();
615
LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
616
tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
619
LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
620
tcg_gen_muls2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
623
LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
624
tcg_gen_mulsu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
627
LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
628
tcg_gen_mulu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
631
cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
638
static void dec_div(DisasContext *dc)
645
if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
646
&& !((dc->cpu->env.pvr.regs[0] & PVR0_USE_DIV_MASK))) {
647
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
648
t_gen_raise_exception(dc, EXCP_HW_EXCP);
652
gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
655
gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
658
tcg_gen_movi_tl(cpu_R[dc->rd], 0);
661
static void dec_barrel(DisasContext *dc)
666
if ((dc->tb_flags & MSR_EE_FLAG)
667
&& (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
668
&& !(dc->cpu->env.pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
669
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
670
t_gen_raise_exception(dc, EXCP_HW_EXCP);
674
s = dc->imm & (1 << 10);
675
t = dc->imm & (1 << 9);
677
LOG_DIS("bs%s%s r%d r%d r%d\n",
678
s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
682
tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
683
tcg_gen_andi_tl(t0, t0, 31);
686
tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
689
tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
691
tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
695
static void dec_bit(DisasContext *dc)
697
CPUState *cs = CPU(dc->cpu);
700
int mem_index = cpu_mmu_index(&dc->cpu->env, false);
702
op = dc->ir & ((1 << 9) - 1);
708
LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
709
tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
710
write_carry(dc, cpu_R[dc->ra]);
712
tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
713
tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
721
LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
723
/* Update carry. Note that write carry only looks at the LSB. */
724
write_carry(dc, cpu_R[dc->ra]);
727
tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
729
tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
733
LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
734
tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
737
LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
738
tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
745
LOG_DIS("wdc r%d\n", dc->ra);
746
if ((dc->tb_flags & MSR_EE_FLAG)
747
&& mem_index == MMU_USER_IDX) {
748
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
749
t_gen_raise_exception(dc, EXCP_HW_EXCP);
755
LOG_DIS("wic r%d\n", dc->ra);
756
if ((dc->tb_flags & MSR_EE_FLAG)
757
&& mem_index == MMU_USER_IDX) {
758
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
759
t_gen_raise_exception(dc, EXCP_HW_EXCP);
764
if ((dc->tb_flags & MSR_EE_FLAG)
765
&& (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
766
&& !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
767
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
768
t_gen_raise_exception(dc, EXCP_HW_EXCP);
770
if (dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
771
tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
776
LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
777
tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
781
LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
782
tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
785
cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
786
dc->pc, op, dc->rd, dc->ra, dc->rb);
791
static inline void sync_jmpstate(DisasContext *dc)
793
if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
794
if (dc->jmp == JMP_DIRECT) {
795
tcg_gen_movi_tl(env_btaken, 1);
797
dc->jmp = JMP_INDIRECT;
798
tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
802
static void dec_imm(DisasContext *dc)
804
LOG_DIS("imm %x\n", dc->imm << 16);
805
tcg_gen_movi_tl(env_imm, (dc->imm << 16));
806
dc->tb_flags |= IMM_FLAG;
810
static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
812
unsigned int extimm = dc->tb_flags & IMM_FLAG;
813
/* Should be set to one if r1 is used by loadstores. */
816
/* All load/stores use ra. */
817
if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
821
/* Treat the common cases first. */
823
/* If any of the regs is r0, return a ptr to the other. */
825
return &cpu_R[dc->rb];
826
} else if (dc->rb == 0) {
827
return &cpu_R[dc->ra];
830
if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
835
tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
838
gen_helper_stackprot(cpu_env, *t);
845
return &cpu_R[dc->ra];
848
tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
849
tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
852
tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
856
gen_helper_stackprot(cpu_env, *t);
861
static void dec_load(DisasContext *dc)
864
unsigned int size, rev = 0, ex = 0;
867
mop = dc->opcode & 3;
870
rev = (dc->ir >> 9) & 1;
871
ex = (dc->ir >> 10) & 1;
878
if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
879
&& (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
880
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
881
t_gen_raise_exception(dc, EXCP_HW_EXCP);
885
LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
889
addr = compute_ldst_addr(dc, &t);
892
* When doing reverse accesses we need to do two things.
894
* 1. Reverse the address wrt endianness.
895
* 2. Byteswap the data lanes on the way back into the CPU core.
897
if (rev && size != 4) {
898
/* Endian reverse the address. t is addr. */
906
TCGv low = tcg_temp_new();
908
/* Force addr into the temp. */
911
tcg_gen_mov_tl(t, *addr);
915
tcg_gen_andi_tl(low, t, 3);
916
tcg_gen_sub_tl(low, tcg_const_tl(3), low);
917
tcg_gen_andi_tl(t, t, ~3);
918
tcg_gen_or_tl(t, t, low);
919
tcg_gen_mov_tl(env_imm, t);
927
/* Force addr into the temp. */
930
tcg_gen_xori_tl(t, *addr, 2);
933
tcg_gen_xori_tl(t, t, 2);
937
cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
942
/* lwx does not throw unaligned access errors, so force alignment */
944
/* Force addr into the temp. */
947
tcg_gen_mov_tl(t, *addr);
950
tcg_gen_andi_tl(t, t, ~3);
953
/* If we get a fault on a dslot, the jmpstate better be in sync. */
956
/* Verify alignment if needed. */
958
* Microblaze gives MMU faults priority over faults due to
959
* unaligned addresses. That's why we speculatively do the load
960
* into v. If the load succeeds, we verify alignment of the
961
* address and if that succeeds we write into the destination reg.
964
tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
966
if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
967
tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
968
gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
969
tcg_const_tl(0), tcg_const_tl(size - 1));
973
tcg_gen_mov_tl(env_res_addr, *addr);
974
tcg_gen_mov_tl(env_res_val, v);
977
tcg_gen_mov_tl(cpu_R[dc->rd], v);
982
/* no support for AXI exclusive so always clear C */
990
static void dec_store(DisasContext *dc)
992
TCGv t, *addr, swx_addr;
993
TCGLabel *swx_skip = NULL;
994
unsigned int size, rev = 0, ex = 0;
997
mop = dc->opcode & 3;
1000
rev = (dc->ir >> 9) & 1;
1001
ex = (dc->ir >> 10) & 1;
1008
if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1009
&& (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1010
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1011
t_gen_raise_exception(dc, EXCP_HW_EXCP);
1015
LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1018
/* If we get a fault on a dslot, the jmpstate better be in sync. */
1020
addr = compute_ldst_addr(dc, &t);
1022
swx_addr = tcg_temp_local_new();
1026
/* Force addr into the swx_addr. */
1027
tcg_gen_mov_tl(swx_addr, *addr);
1029
/* swx does not throw unaligned access errors, so force alignment */
1030
tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1032
write_carryi(dc, 1);
1033
swx_skip = gen_new_label();
1034
tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
1036
/* Compare the value loaded at lwx with current contents of
1037
the reserved location.
1038
FIXME: This only works for system emulation where we can expect
1039
this compare and the following write to be atomic. For user
1040
emulation we need to add atomicity between threads. */
1041
tval = tcg_temp_new();
1042
tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
1044
tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
1045
write_carryi(dc, 0);
1046
tcg_temp_free(tval);
1049
if (rev && size != 4) {
1050
/* Endian reverse the address. t is addr. */
1058
TCGv low = tcg_temp_new();
1060
/* Force addr into the temp. */
1063
tcg_gen_mov_tl(t, *addr);
1067
tcg_gen_andi_tl(low, t, 3);
1068
tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1069
tcg_gen_andi_tl(t, t, ~3);
1070
tcg_gen_or_tl(t, t, low);
1071
tcg_gen_mov_tl(env_imm, t);
1079
/* Force addr into the temp. */
1082
tcg_gen_xori_tl(t, *addr, 2);
1085
tcg_gen_xori_tl(t, t, 2);
1089
cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1093
tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
1095
/* Verify alignment if needed. */
1096
if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1097
tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1098
/* FIXME: if the alignment is wrong, we should restore the value
1099
* in memory. One possible way to achieve this is to probe
1100
* the MMU prior to the memaccess, thay way we could put
1101
* the alignment checks in between the probe and the mem
1104
gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1105
tcg_const_tl(1), tcg_const_tl(size - 1));
1109
gen_set_label(swx_skip);
1111
tcg_temp_free(swx_addr);
1117
static inline void eval_cc(DisasContext *dc, unsigned int cc,
1118
TCGv d, TCGv a, TCGv b)
1122
tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
1125
tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
1128
tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
1131
tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
1134
tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
1137
tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
1140
cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1145
static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1147
TCGLabel *l1 = gen_new_label();
1148
/* Conditional jmp. */
1149
tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1150
tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1151
tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1155
static void dec_bcc(DisasContext *dc)
1160
cc = EXTRACT_FIELD(dc->ir, 21, 23);
1161
dslot = dc->ir & (1 << 25);
1162
LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1164
dc->delayed_branch = 1;
1166
dc->delayed_branch = 2;
1167
dc->tb_flags |= D_FLAG;
1168
tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1169
cpu_env, offsetof(CPUMBState, bimm));
1172
if (dec_alu_op_b_is_small_imm(dc)) {
1173
int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1175
tcg_gen_movi_tl(env_btarget, dc->pc + offset);
1176
dc->jmp = JMP_DIRECT_CC;
1177
dc->jmp_pc = dc->pc + offset;
1179
dc->jmp = JMP_INDIRECT;
1180
tcg_gen_movi_tl(env_btarget, dc->pc);
1181
tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1183
eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
1186
static void dec_br(DisasContext *dc)
1188
unsigned int dslot, link, abs, mbar;
1189
int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1191
dslot = dc->ir & (1 << 20);
1192
abs = dc->ir & (1 << 19);
1193
link = dc->ir & (1 << 18);
1195
/* Memory barrier. */
1196
mbar = (dc->ir >> 16) & 31;
1197
if (mbar == 2 && dc->imm == 4) {
1198
/* mbar IMM & 16 decodes to sleep. */
1200
TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1201
TCGv_i32 tmp_1 = tcg_const_i32(1);
1206
tcg_gen_st_i32(tmp_1, cpu_env,
1207
-offsetof(MicroBlazeCPU, env)
1208
+offsetof(CPUState, halted));
1209
tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1210
gen_helper_raise_exception(cpu_env, tmp_hlt);
1211
tcg_temp_free_i32(tmp_hlt);
1212
tcg_temp_free_i32(tmp_1);
1215
LOG_DIS("mbar %d\n", dc->rd);
1217
dc->cpustate_changed = 1;
1221
LOG_DIS("br%s%s%s%s imm=%x\n",
1222
abs ? "a" : "", link ? "l" : "",
1223
dc->type_b ? "i" : "", dslot ? "d" : "",
1226
dc->delayed_branch = 1;
1228
dc->delayed_branch = 2;
1229
dc->tb_flags |= D_FLAG;
1230
tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1231
cpu_env, offsetof(CPUMBState, bimm));
1234
tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1236
dc->jmp = JMP_INDIRECT;
1238
tcg_gen_movi_tl(env_btaken, 1);
1239
tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
1240
if (link && !dslot) {
1241
if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1242
t_gen_raise_exception(dc, EXCP_BREAK);
1244
if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1245
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1246
t_gen_raise_exception(dc, EXCP_HW_EXCP);
1250
t_gen_raise_exception(dc, EXCP_DEBUG);
1254
if (dec_alu_op_b_is_small_imm(dc)) {
1255
dc->jmp = JMP_DIRECT;
1256
dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1258
tcg_gen_movi_tl(env_btaken, 1);
1259
tcg_gen_movi_tl(env_btarget, dc->pc);
1260
tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1265
static inline void do_rti(DisasContext *dc)
1268
t0 = tcg_temp_new();
1269
t1 = tcg_temp_new();
1270
tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1271
tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1272
tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1274
tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1275
tcg_gen_or_tl(t1, t1, t0);
1279
dc->tb_flags &= ~DRTI_FLAG;
1282
static inline void do_rtb(DisasContext *dc)
1285
t0 = tcg_temp_new();
1286
t1 = tcg_temp_new();
1287
tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1288
tcg_gen_shri_tl(t0, t1, 1);
1289
tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1291
tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1292
tcg_gen_or_tl(t1, t1, t0);
1296
dc->tb_flags &= ~DRTB_FLAG;
1299
static inline void do_rte(DisasContext *dc)
1302
t0 = tcg_temp_new();
1303
t1 = tcg_temp_new();
1305
tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1306
tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1307
tcg_gen_shri_tl(t0, t1, 1);
1308
tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1310
tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1311
tcg_gen_or_tl(t1, t1, t0);
1315
dc->tb_flags &= ~DRTE_FLAG;
1318
static void dec_rts(DisasContext *dc)
1320
unsigned int b_bit, i_bit, e_bit;
1321
int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1323
i_bit = dc->ir & (1 << 21);
1324
b_bit = dc->ir & (1 << 22);
1325
e_bit = dc->ir & (1 << 23);
1327
dc->delayed_branch = 2;
1328
dc->tb_flags |= D_FLAG;
1329
tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1330
cpu_env, offsetof(CPUMBState, bimm));
1333
LOG_DIS("rtid ir=%x\n", dc->ir);
1334
if ((dc->tb_flags & MSR_EE_FLAG)
1335
&& mem_index == MMU_USER_IDX) {
1336
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1337
t_gen_raise_exception(dc, EXCP_HW_EXCP);
1339
dc->tb_flags |= DRTI_FLAG;
1341
LOG_DIS("rtbd ir=%x\n", dc->ir);
1342
if ((dc->tb_flags & MSR_EE_FLAG)
1343
&& mem_index == MMU_USER_IDX) {
1344
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1345
t_gen_raise_exception(dc, EXCP_HW_EXCP);
1347
dc->tb_flags |= DRTB_FLAG;
1349
LOG_DIS("rted ir=%x\n", dc->ir);
1350
if ((dc->tb_flags & MSR_EE_FLAG)
1351
&& mem_index == MMU_USER_IDX) {
1352
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1353
t_gen_raise_exception(dc, EXCP_HW_EXCP);
1355
dc->tb_flags |= DRTE_FLAG;
1357
LOG_DIS("rts ir=%x\n", dc->ir);
1359
dc->jmp = JMP_INDIRECT;
1360
tcg_gen_movi_tl(env_btaken, 1);
1361
tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1364
static int dec_check_fpuv2(DisasContext *dc)
1366
if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1367
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1368
t_gen_raise_exception(dc, EXCP_HW_EXCP);
1370
return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
1373
static void dec_fpu(DisasContext *dc)
1375
unsigned int fpu_insn;
1377
if ((dc->tb_flags & MSR_EE_FLAG)
1378
&& (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1379
&& (dc->cpu->cfg.use_fpu != 1)) {
1380
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1381
t_gen_raise_exception(dc, EXCP_HW_EXCP);
1385
fpu_insn = (dc->ir >> 7) & 7;
1389
gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1394
gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1399
gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1404
gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1409
switch ((dc->ir >> 4) & 7) {
1411
gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1412
cpu_R[dc->ra], cpu_R[dc->rb]);
1415
gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1416
cpu_R[dc->ra], cpu_R[dc->rb]);
1419
gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1420
cpu_R[dc->ra], cpu_R[dc->rb]);
1423
gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1424
cpu_R[dc->ra], cpu_R[dc->rb]);
1427
gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1428
cpu_R[dc->ra], cpu_R[dc->rb]);
1431
gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1432
cpu_R[dc->ra], cpu_R[dc->rb]);
1435
gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1436
cpu_R[dc->ra], cpu_R[dc->rb]);
1439
qemu_log_mask(LOG_UNIMP,
1440
"unimplemented fcmp fpu_insn=%x pc=%x"
1442
fpu_insn, dc->pc, dc->opcode);
1443
dc->abort_at_next_insn = 1;
1449
if (!dec_check_fpuv2(dc)) {
1452
gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1456
if (!dec_check_fpuv2(dc)) {
1459
gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1463
if (!dec_check_fpuv2(dc)) {
1466
gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1470
qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1472
fpu_insn, dc->pc, dc->opcode);
1473
dc->abort_at_next_insn = 1;
1478
static void dec_null(DisasContext *dc)
1480
if ((dc->tb_flags & MSR_EE_FLAG)
1481
&& (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1482
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1483
t_gen_raise_exception(dc, EXCP_HW_EXCP);
1486
qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1487
dc->abort_at_next_insn = 1;
1490
/* Insns connected to FSL or AXI stream attached devices. */
1491
static void dec_stream(DisasContext *dc)
1493
int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1494
TCGv_i32 t_id, t_ctrl;
1497
LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1498
dc->type_b ? "" : "d", dc->imm);
1500
if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1501
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1502
t_gen_raise_exception(dc, EXCP_HW_EXCP);
1506
t_id = tcg_temp_new();
1508
tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1509
ctrl = dc->imm >> 10;
1511
tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1512
ctrl = dc->imm >> 5;
1515
t_ctrl = tcg_const_tl(ctrl);
1518
gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1520
gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1522
tcg_temp_free(t_id);
1523
tcg_temp_free(t_ctrl);
1526
static struct decoder_info {
1531
void (*dec)(DisasContext *dc);
1539
{DEC_BARREL, dec_barrel},
1541
{DEC_ST, dec_store},
1550
{DEC_STREAM, dec_stream},
1554
static inline void decode(DisasContext *dc, uint32_t ir)
1559
LOG_DIS("%8.8x\t", dc->ir);
1564
if ((dc->tb_flags & MSR_EE_FLAG)
1565
&& (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1566
&& (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1567
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1568
t_gen_raise_exception(dc, EXCP_HW_EXCP);
1572
LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1574
if (dc->nr_nops > 4) {
1575
cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
1578
/* bit 2 seems to indicate insn type. */
1579
dc->type_b = ir & (1 << 29);
1581
dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1582
dc->rd = EXTRACT_FIELD(ir, 21, 25);
1583
dc->ra = EXTRACT_FIELD(ir, 16, 20);
1584
dc->rb = EXTRACT_FIELD(ir, 11, 15);
1585
dc->imm = EXTRACT_FIELD(ir, 0, 15);
1587
/* Large switch for all insns. */
1588
for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1589
if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1596
/* generate intermediate code for basic block 'tb'. */
1597
void gen_intermediate_code(CPUMBState *env, struct TranslationBlock *tb)
1599
MicroBlazeCPU *cpu = mb_env_get_cpu(env);
1600
CPUState *cs = CPU(cpu);
1602
struct DisasContext ctx;
1603
struct DisasContext *dc = &ctx;
1604
uint32_t next_page_start, org_flags;
1612
org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1614
dc->is_jmp = DISAS_NEXT;
1616
dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1617
if (dc->delayed_branch) {
1618
dc->jmp = JMP_INDIRECT;
1621
dc->singlestep_enabled = cs->singlestep_enabled;
1622
dc->cpustate_changed = 0;
1623
dc->abort_at_next_insn = 0;
1627
cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1630
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1632
max_insns = tb->cflags & CF_COUNT_MASK;
1633
if (max_insns == 0) {
1634
max_insns = CF_COUNT_MASK;
1636
if (max_insns > TCG_MAX_INSNS) {
1637
max_insns = TCG_MAX_INSNS;
1643
tcg_gen_insn_start(dc->pc);
1647
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1648
tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1653
if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1654
t_gen_raise_exception(dc, EXCP_DEBUG);
1655
dc->is_jmp = DISAS_UPDATE;
1656
/* The address covered by the breakpoint must be included in
1657
[tb->pc, tb->pc + tb->size) in order to for it to be
1658
properly cleared -- thus we increment the PC here so that
1659
the logic setting tb->size below does the right thing. */
1665
LOG_DIS("%8.8x:\t", dc->pc);
1667
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1672
decode(dc, cpu_ldl_code(env, dc->pc));
1674
dc->tb_flags &= ~IMM_FLAG;
1677
if (dc->delayed_branch) {
1678
dc->delayed_branch--;
1679
if (!dc->delayed_branch) {
1680
if (dc->tb_flags & DRTI_FLAG)
1682
if (dc->tb_flags & DRTB_FLAG)
1684
if (dc->tb_flags & DRTE_FLAG)
1686
/* Clear the delay slot flag. */
1687
dc->tb_flags &= ~D_FLAG;
1688
/* If it is a direct jump, try direct chaining. */
1689
if (dc->jmp == JMP_INDIRECT) {
1690
eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1691
dc->is_jmp = DISAS_JUMP;
1692
} else if (dc->jmp == JMP_DIRECT) {
1694
gen_goto_tb(dc, 0, dc->jmp_pc);
1695
dc->is_jmp = DISAS_TB_JUMP;
1696
} else if (dc->jmp == JMP_DIRECT_CC) {
1697
TCGLabel *l1 = gen_new_label();
1699
/* Conditional jmp. */
1700
tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1701
gen_goto_tb(dc, 1, dc->pc);
1703
gen_goto_tb(dc, 0, dc->jmp_pc);
1705
dc->is_jmp = DISAS_TB_JUMP;
1710
if (cs->singlestep_enabled) {
1713
} while (!dc->is_jmp && !dc->cpustate_changed
1714
&& !tcg_op_buf_full()
1716
&& (dc->pc < next_page_start)
1717
&& num_insns < max_insns);
1720
if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1721
if (dc->tb_flags & D_FLAG) {
1722
dc->is_jmp = DISAS_UPDATE;
1723
tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1729
if (tb->cflags & CF_LAST_IO)
1731
/* Force an update if the per-tb cpu state has changed. */
1732
if (dc->is_jmp == DISAS_NEXT
1733
&& (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1734
dc->is_jmp = DISAS_UPDATE;
1735
tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1739
if (unlikely(cs->singlestep_enabled)) {
1740
TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1742
if (dc->is_jmp != DISAS_JUMP) {
1743
tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1745
gen_helper_raise_exception(cpu_env, tmp);
1746
tcg_temp_free_i32(tmp);
1748
switch(dc->is_jmp) {
1750
gen_goto_tb(dc, 1, npc);
1755
/* indicate that the hash table must be used
1756
to find the next TB */
1760
/* nothing more to generate */
1764
gen_tb_end(tb, num_insns);
1766
tb->size = dc->pc - pc_start;
1767
tb->icount = num_insns;
1771
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1772
&& qemu_log_in_addr_range(pc_start)) {
1774
qemu_log("--------------\n");
1776
log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
1778
qemu_log("\nisize=%d osize=%d\n",
1779
dc->pc - pc_start, tcg_op_buf_count());
1784
assert(!dc->abort_at_next_insn);
1787
void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1790
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1791
CPUMBState *env = &cpu->env;
1797
cpu_fprintf(f, "IN: PC=%x %s\n",
1798
env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1799
cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1800
env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1801
env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1802
cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1803
env->btaken, env->btarget,
1804
(env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1805
(env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1806
(env->sregs[SR_MSR] & MSR_EIP),
1807
(env->sregs[SR_MSR] & MSR_IE));
1809
for (i = 0; i < 32; i++) {
1810
cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1811
if ((i + 1) % 4 == 0)
1812
cpu_fprintf(f, "\n");
1814
cpu_fprintf(f, "\n\n");
1817
MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
1821
cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
1823
object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
1828
void mb_tcg_init(void)
1832
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1833
tcg_ctx.tcg_env = cpu_env;
1835
env_debug = tcg_global_mem_new(cpu_env,
1836
offsetof(CPUMBState, debug),
1838
env_iflags = tcg_global_mem_new(cpu_env,
1839
offsetof(CPUMBState, iflags),
1841
env_imm = tcg_global_mem_new(cpu_env,
1842
offsetof(CPUMBState, imm),
1844
env_btarget = tcg_global_mem_new(cpu_env,
1845
offsetof(CPUMBState, btarget),
1847
env_btaken = tcg_global_mem_new(cpu_env,
1848
offsetof(CPUMBState, btaken),
1850
env_res_addr = tcg_global_mem_new(cpu_env,
1851
offsetof(CPUMBState, res_addr),
1853
env_res_val = tcg_global_mem_new(cpu_env,
1854
offsetof(CPUMBState, res_val),
1856
for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1857
cpu_R[i] = tcg_global_mem_new(cpu_env,
1858
offsetof(CPUMBState, regs[i]),
1861
for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1862
cpu_SR[i] = tcg_global_mem_new(cpu_env,
1863
offsetof(CPUMBState, sregs[i]),
1864
special_regnames[i]);
1868
void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1871
env->sregs[SR_PC] = data[0];