3
* http://www.tensilica.com/products/literature-docs/documentation/xtensa-isa-databook.htm
5
* Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
8
* Redistribution and use in source and binary forms, with or without
9
* modification, are permitted provided that the following conditions are met:
10
* * Redistributions of source code must retain the above copyright
11
* notice, this list of conditions and the following disclaimer.
12
* * Redistributions in binary form must reproduce the above copyright
13
* notice, this list of conditions and the following disclaimer in the
14
* documentation and/or other materials provided with the distribution.
15
* * Neither the name of the Open Source and Linux Lab nor the
16
* names of its contributors may be used to endorse or promote products
17
* derived from this software without specific prior written permission.
19
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
23
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44
typedef struct DisasContext {
45
const XtensaConfig *config;
55
int singlestep_enabled;
59
bool sar_m32_allocated;
62
uint32_t ccount_delta;
66
static TCGv_ptr cpu_env;
67
static TCGv_i32 cpu_pc;
68
static TCGv_i32 cpu_R[16];
69
static TCGv_i32 cpu_SR[256];
70
static TCGv_i32 cpu_UR[256];
72
#include "gen-icount.h"
74
static const char * const sregnames[256] = {
80
[LITBASE] = "LITBASE",
81
[SCOMPARE1] = "SCOMPARE1",
88
[WINDOW_BASE] = "WINDOW_BASE",
89
[WINDOW_START] = "WINDOW_START",
90
[PTEVADDR] = "PTEVADDR",
92
[ITLBCFG] = "ITLBCFG",
93
[DTLBCFG] = "DTLBCFG",
108
[EXCSAVE1] = "EXCSAVE1",
109
[EXCSAVE1 + 1] = "EXCSAVE2",
110
[EXCSAVE1 + 2] = "EXCSAVE3",
111
[EXCSAVE1 + 3] = "EXCSAVE4",
112
[EXCSAVE1 + 4] = "EXCSAVE5",
113
[EXCSAVE1 + 5] = "EXCSAVE6",
114
[EXCSAVE1 + 6] = "EXCSAVE7",
115
[CPENABLE] = "CPENABLE",
117
[INTCLEAR] = "INTCLEAR",
118
[INTENABLE] = "INTENABLE",
120
[VECBASE] = "VECBASE",
121
[EXCCAUSE] = "EXCCAUSE",
124
[EXCVADDR] = "EXCVADDR",
125
[CCOMPARE] = "CCOMPARE0",
126
[CCOMPARE + 1] = "CCOMPARE1",
127
[CCOMPARE + 2] = "CCOMPARE2",
130
static const char * const uregnames[256] = {
131
[THREADPTR] = "THREADPTR",
136
void xtensa_translate_init(void)
138
static const char * const regnames[] = {
139
"ar0", "ar1", "ar2", "ar3",
140
"ar4", "ar5", "ar6", "ar7",
141
"ar8", "ar9", "ar10", "ar11",
142
"ar12", "ar13", "ar14", "ar15",
146
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
147
cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
148
offsetof(CPUState, pc), "pc");
150
for (i = 0; i < 16; i++) {
151
cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
152
offsetof(CPUState, regs[i]),
156
for (i = 0; i < 256; ++i) {
158
cpu_SR[i] = tcg_global_mem_new_i32(TCG_AREG0,
159
offsetof(CPUState, sregs[i]),
164
for (i = 0; i < 256; ++i) {
166
cpu_UR[i] = tcg_global_mem_new_i32(TCG_AREG0,
167
offsetof(CPUState, uregs[i]),
175
static inline bool option_bits_enabled(DisasContext *dc, uint64_t opt)
177
return xtensa_option_bits_enabled(dc->config, opt);
180
static inline bool option_enabled(DisasContext *dc, int opt)
182
return xtensa_option_enabled(dc->config, opt);
185
static void init_litbase(DisasContext *dc)
187
if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
188
dc->litbase = tcg_temp_local_new_i32();
189
tcg_gen_andi_i32(dc->litbase, cpu_SR[LITBASE], 0xfffff000);
193
static void reset_litbase(DisasContext *dc)
195
if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
196
tcg_temp_free(dc->litbase);
200
static void init_sar_tracker(DisasContext *dc)
202
dc->sar_5bit = false;
203
dc->sar_m32_5bit = false;
204
dc->sar_m32_allocated = false;
207
static void reset_sar_tracker(DisasContext *dc)
209
if (dc->sar_m32_allocated) {
210
tcg_temp_free(dc->sar_m32);
214
static void gen_right_shift_sar(DisasContext *dc, TCGv_i32 sa)
216
tcg_gen_andi_i32(cpu_SR[SAR], sa, 0x1f);
217
if (dc->sar_m32_5bit) {
218
tcg_gen_discard_i32(dc->sar_m32);
221
dc->sar_m32_5bit = false;
224
static void gen_left_shift_sar(DisasContext *dc, TCGv_i32 sa)
226
TCGv_i32 tmp = tcg_const_i32(32);
227
if (!dc->sar_m32_allocated) {
228
dc->sar_m32 = tcg_temp_local_new_i32();
229
dc->sar_m32_allocated = true;
231
tcg_gen_andi_i32(dc->sar_m32, sa, 0x1f);
232
tcg_gen_sub_i32(cpu_SR[SAR], tmp, dc->sar_m32);
233
dc->sar_5bit = false;
234
dc->sar_m32_5bit = true;
238
static void gen_advance_ccount(DisasContext *dc)
240
if (dc->ccount_delta > 0) {
241
TCGv_i32 tmp = tcg_const_i32(dc->ccount_delta);
242
dc->ccount_delta = 0;
243
gen_helper_advance_ccount(tmp);
248
static void reset_used_window(DisasContext *dc)
253
static void gen_exception(DisasContext *dc, int excp)
255
TCGv_i32 tmp = tcg_const_i32(excp);
256
gen_advance_ccount(dc);
257
gen_helper_exception(tmp);
261
static void gen_exception_cause(DisasContext *dc, uint32_t cause)
263
TCGv_i32 tpc = tcg_const_i32(dc->pc);
264
TCGv_i32 tcause = tcg_const_i32(cause);
265
gen_advance_ccount(dc);
266
gen_helper_exception_cause(tpc, tcause);
268
tcg_temp_free(tcause);
269
if (cause == ILLEGAL_INSTRUCTION_CAUSE ||
270
cause == SYSCALL_CAUSE) {
271
dc->is_jmp = DISAS_UPDATE;
275
static void gen_exception_cause_vaddr(DisasContext *dc, uint32_t cause,
278
TCGv_i32 tpc = tcg_const_i32(dc->pc);
279
TCGv_i32 tcause = tcg_const_i32(cause);
280
gen_advance_ccount(dc);
281
gen_helper_exception_cause_vaddr(tpc, tcause, vaddr);
283
tcg_temp_free(tcause);
286
static void gen_check_privilege(DisasContext *dc)
289
gen_exception_cause(dc, PRIVILEGED_CAUSE);
290
dc->is_jmp = DISAS_UPDATE;
294
static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot)
296
tcg_gen_mov_i32(cpu_pc, dest);
297
if (dc->singlestep_enabled) {
298
gen_exception(dc, EXCP_DEBUG);
300
gen_advance_ccount(dc);
302
tcg_gen_goto_tb(slot);
303
tcg_gen_exit_tb((tcg_target_long)dc->tb + slot);
308
dc->is_jmp = DISAS_UPDATE;
311
static void gen_jump(DisasContext *dc, TCGv dest)
313
gen_jump_slot(dc, dest, -1);
316
static void gen_jumpi(DisasContext *dc, uint32_t dest, int slot)
318
TCGv_i32 tmp = tcg_const_i32(dest);
319
if (((dc->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
322
gen_jump_slot(dc, tmp, slot);
326
static void gen_callw_slot(DisasContext *dc, int callinc, TCGv_i32 dest,
329
TCGv_i32 tcallinc = tcg_const_i32(callinc);
331
tcg_gen_deposit_i32(cpu_SR[PS], cpu_SR[PS],
332
tcallinc, PS_CALLINC_SHIFT, PS_CALLINC_LEN);
333
tcg_temp_free(tcallinc);
334
tcg_gen_movi_i32(cpu_R[callinc << 2],
335
(callinc << 30) | (dc->next_pc & 0x3fffffff));
336
gen_jump_slot(dc, dest, slot);
339
static void gen_callw(DisasContext *dc, int callinc, TCGv_i32 dest)
341
gen_callw_slot(dc, callinc, dest, -1);
344
static void gen_callwi(DisasContext *dc, int callinc, uint32_t dest, int slot)
346
TCGv_i32 tmp = tcg_const_i32(dest);
347
if (((dc->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
350
gen_callw_slot(dc, callinc, tmp, slot);
354
static bool gen_check_loop_end(DisasContext *dc, int slot)
356
if (option_enabled(dc, XTENSA_OPTION_LOOP) &&
357
!(dc->tb->flags & XTENSA_TBFLAG_EXCM) &&
358
dc->next_pc == dc->lend) {
359
int label = gen_new_label();
361
tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_SR[LCOUNT], 0, label);
362
tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_SR[LCOUNT], 1);
363
gen_jumpi(dc, dc->lbeg, slot);
364
gen_set_label(label);
365
gen_jumpi(dc, dc->next_pc, -1);
371
static void gen_jumpi_check_loop_end(DisasContext *dc, int slot)
373
if (!gen_check_loop_end(dc, slot)) {
374
gen_jumpi(dc, dc->next_pc, slot);
378
static void gen_brcond(DisasContext *dc, TCGCond cond,
379
TCGv_i32 t0, TCGv_i32 t1, uint32_t offset)
381
int label = gen_new_label();
383
tcg_gen_brcond_i32(cond, t0, t1, label);
384
gen_jumpi_check_loop_end(dc, 0);
385
gen_set_label(label);
386
gen_jumpi(dc, dc->pc + offset, 1);
389
static void gen_brcondi(DisasContext *dc, TCGCond cond,
390
TCGv_i32 t0, uint32_t t1, uint32_t offset)
392
TCGv_i32 tmp = tcg_const_i32(t1);
393
gen_brcond(dc, cond, t0, tmp, offset);
397
static void gen_rsr_ccount(DisasContext *dc, TCGv_i32 d, uint32_t sr)
399
gen_advance_ccount(dc);
400
tcg_gen_mov_i32(d, cpu_SR[sr]);
403
static void gen_rsr_ptevaddr(DisasContext *dc, TCGv_i32 d, uint32_t sr)
405
tcg_gen_shri_i32(d, cpu_SR[EXCVADDR], 10);
406
tcg_gen_or_i32(d, d, cpu_SR[sr]);
407
tcg_gen_andi_i32(d, d, 0xfffffffc);
410
static void gen_rsr(DisasContext *dc, TCGv_i32 d, uint32_t sr)
412
static void (* const rsr_handler[256])(DisasContext *dc,
413
TCGv_i32 d, uint32_t sr) = {
414
[CCOUNT] = gen_rsr_ccount,
415
[PTEVADDR] = gen_rsr_ptevaddr,
419
if (rsr_handler[sr]) {
420
rsr_handler[sr](dc, d, sr);
422
tcg_gen_mov_i32(d, cpu_SR[sr]);
425
qemu_log("RSR %d not implemented, ", sr);
429
static void gen_wsr_lbeg(DisasContext *dc, uint32_t sr, TCGv_i32 s)
431
gen_helper_wsr_lbeg(s);
434
static void gen_wsr_lend(DisasContext *dc, uint32_t sr, TCGv_i32 s)
436
gen_helper_wsr_lend(s);
439
static void gen_wsr_sar(DisasContext *dc, uint32_t sr, TCGv_i32 s)
441
tcg_gen_andi_i32(cpu_SR[sr], s, 0x3f);
442
if (dc->sar_m32_5bit) {
443
tcg_gen_discard_i32(dc->sar_m32);
445
dc->sar_5bit = false;
446
dc->sar_m32_5bit = false;
449
static void gen_wsr_br(DisasContext *dc, uint32_t sr, TCGv_i32 s)
451
tcg_gen_andi_i32(cpu_SR[sr], s, 0xffff);
454
static void gen_wsr_litbase(DisasContext *dc, uint32_t sr, TCGv_i32 s)
456
tcg_gen_andi_i32(cpu_SR[sr], s, 0xfffff001);
457
/* This can change tb->flags, so exit tb */
458
gen_jumpi_check_loop_end(dc, -1);
461
static void gen_wsr_acchi(DisasContext *dc, uint32_t sr, TCGv_i32 s)
463
tcg_gen_ext8s_i32(cpu_SR[sr], s);
466
static void gen_wsr_windowbase(DisasContext *dc, uint32_t sr, TCGv_i32 v)
468
gen_helper_wsr_windowbase(v);
469
reset_used_window(dc);
472
static void gen_wsr_windowstart(DisasContext *dc, uint32_t sr, TCGv_i32 v)
474
tcg_gen_andi_i32(cpu_SR[sr], v, (1 << dc->config->nareg / 4) - 1);
475
reset_used_window(dc);
478
static void gen_wsr_ptevaddr(DisasContext *dc, uint32_t sr, TCGv_i32 v)
480
tcg_gen_andi_i32(cpu_SR[sr], v, 0xffc00000);
483
static void gen_wsr_rasid(DisasContext *dc, uint32_t sr, TCGv_i32 v)
485
gen_helper_wsr_rasid(v);
486
/* This can change tb->flags, so exit tb */
487
gen_jumpi_check_loop_end(dc, -1);
490
static void gen_wsr_tlbcfg(DisasContext *dc, uint32_t sr, TCGv_i32 v)
492
tcg_gen_andi_i32(cpu_SR[sr], v, 0x01130000);
495
static void gen_wsr_intset(DisasContext *dc, uint32_t sr, TCGv_i32 v)
497
tcg_gen_andi_i32(cpu_SR[sr], v,
498
dc->config->inttype_mask[INTTYPE_SOFTWARE]);
499
gen_helper_check_interrupts(cpu_env);
500
gen_jumpi_check_loop_end(dc, 0);
503
static void gen_wsr_intclear(DisasContext *dc, uint32_t sr, TCGv_i32 v)
505
TCGv_i32 tmp = tcg_temp_new_i32();
507
tcg_gen_andi_i32(tmp, v,
508
dc->config->inttype_mask[INTTYPE_EDGE] |
509
dc->config->inttype_mask[INTTYPE_NMI] |
510
dc->config->inttype_mask[INTTYPE_SOFTWARE]);
511
tcg_gen_andc_i32(cpu_SR[INTSET], cpu_SR[INTSET], tmp);
513
gen_helper_check_interrupts(cpu_env);
516
static void gen_wsr_intenable(DisasContext *dc, uint32_t sr, TCGv_i32 v)
518
tcg_gen_mov_i32(cpu_SR[sr], v);
519
gen_helper_check_interrupts(cpu_env);
520
gen_jumpi_check_loop_end(dc, 0);
523
static void gen_wsr_ps(DisasContext *dc, uint32_t sr, TCGv_i32 v)
525
uint32_t mask = PS_WOE | PS_CALLINC | PS_OWB |
526
PS_UM | PS_EXCM | PS_INTLEVEL;
528
if (option_enabled(dc, XTENSA_OPTION_MMU)) {
531
tcg_gen_andi_i32(cpu_SR[sr], v, mask);
532
reset_used_window(dc);
533
gen_helper_check_interrupts(cpu_env);
534
/* This can change mmu index and tb->flags, so exit tb */
535
gen_jumpi_check_loop_end(dc, -1);
538
static void gen_wsr_prid(DisasContext *dc, uint32_t sr, TCGv_i32 v)
542
static void gen_wsr_ccompare(DisasContext *dc, uint32_t sr, TCGv_i32 v)
544
uint32_t id = sr - CCOMPARE;
545
if (id < dc->config->nccompare) {
546
uint32_t int_bit = 1 << dc->config->timerint[id];
547
gen_advance_ccount(dc);
548
tcg_gen_mov_i32(cpu_SR[sr], v);
549
tcg_gen_andi_i32(cpu_SR[INTSET], cpu_SR[INTSET], ~int_bit);
550
gen_helper_check_interrupts(cpu_env);
554
static void gen_wsr(DisasContext *dc, uint32_t sr, TCGv_i32 s)
556
static void (* const wsr_handler[256])(DisasContext *dc,
557
uint32_t sr, TCGv_i32 v) = {
558
[LBEG] = gen_wsr_lbeg,
559
[LEND] = gen_wsr_lend,
562
[LITBASE] = gen_wsr_litbase,
563
[ACCHI] = gen_wsr_acchi,
564
[WINDOW_BASE] = gen_wsr_windowbase,
565
[WINDOW_START] = gen_wsr_windowstart,
566
[PTEVADDR] = gen_wsr_ptevaddr,
567
[RASID] = gen_wsr_rasid,
568
[ITLBCFG] = gen_wsr_tlbcfg,
569
[DTLBCFG] = gen_wsr_tlbcfg,
570
[INTSET] = gen_wsr_intset,
571
[INTCLEAR] = gen_wsr_intclear,
572
[INTENABLE] = gen_wsr_intenable,
574
[PRID] = gen_wsr_prid,
575
[CCOMPARE] = gen_wsr_ccompare,
576
[CCOMPARE + 1] = gen_wsr_ccompare,
577
[CCOMPARE + 2] = gen_wsr_ccompare,
581
if (wsr_handler[sr]) {
582
wsr_handler[sr](dc, sr, s);
584
tcg_gen_mov_i32(cpu_SR[sr], s);
587
qemu_log("WSR %d not implemented, ", sr);
591
static void gen_load_store_alignment(DisasContext *dc, int shift,
592
TCGv_i32 addr, bool no_hw_alignment)
594
if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) {
595
tcg_gen_andi_i32(addr, addr, ~0 << shift);
596
} else if (option_enabled(dc, XTENSA_OPTION_HW_ALIGNMENT) &&
598
int label = gen_new_label();
599
TCGv_i32 tmp = tcg_temp_new_i32();
600
tcg_gen_andi_i32(tmp, addr, ~(~0 << shift));
601
tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
602
gen_exception_cause_vaddr(dc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
603
gen_set_label(label);
608
static void gen_waiti(DisasContext *dc, uint32_t imm4)
610
TCGv_i32 pc = tcg_const_i32(dc->next_pc);
611
TCGv_i32 intlevel = tcg_const_i32(imm4);
612
gen_advance_ccount(dc);
613
gen_helper_waiti(pc, intlevel);
615
tcg_temp_free(intlevel);
618
static void gen_window_check1(DisasContext *dc, unsigned r1)
620
if (dc->tb->flags & XTENSA_TBFLAG_EXCM) {
623
if (option_enabled(dc, XTENSA_OPTION_WINDOWED_REGISTER) &&
624
r1 / 4 > dc->used_window) {
625
TCGv_i32 pc = tcg_const_i32(dc->pc);
626
TCGv_i32 w = tcg_const_i32(r1 / 4);
628
dc->used_window = r1 / 4;
629
gen_advance_ccount(dc);
630
gen_helper_window_check(pc, w);
637
static void gen_window_check2(DisasContext *dc, unsigned r1, unsigned r2)
639
gen_window_check1(dc, r1 > r2 ? r1 : r2);
642
static void gen_window_check3(DisasContext *dc, unsigned r1, unsigned r2,
645
gen_window_check2(dc, r1, r2 > r3 ? r2 : r3);
648
static TCGv_i32 gen_mac16_m(TCGv_i32 v, bool hi, bool is_unsigned)
650
TCGv_i32 m = tcg_temp_new_i32();
653
(is_unsigned ? tcg_gen_shri_i32 : tcg_gen_sari_i32)(m, v, 16);
655
(is_unsigned ? tcg_gen_ext16u_i32 : tcg_gen_ext16s_i32)(m, v);
660
static void disas_xtensa_insn(DisasContext *dc)
662
#define HAS_OPTION_BITS(opt) do { \
663
if (!option_bits_enabled(dc, opt)) { \
664
qemu_log("Option is not enabled %s:%d\n", \
665
__FILE__, __LINE__); \
666
goto invalid_opcode; \
670
#define HAS_OPTION(opt) HAS_OPTION_BITS(XTENSA_OPTION_BIT(opt))
672
#define TBD() qemu_log("TBD(pc = %08x): %s:%d\n", dc->pc, __FILE__, __LINE__)
673
#define RESERVED() do { \
674
qemu_log("RESERVED(pc = %08x, %02x%02x%02x): %s:%d\n", \
675
dc->pc, b0, b1, b2, __FILE__, __LINE__); \
676
goto invalid_opcode; \
680
#ifdef TARGET_WORDS_BIGENDIAN
681
#define OP0 (((b0) & 0xf0) >> 4)
682
#define OP1 (((b2) & 0xf0) >> 4)
683
#define OP2 ((b2) & 0xf)
684
#define RRR_R ((b1) & 0xf)
685
#define RRR_S (((b1) & 0xf0) >> 4)
686
#define RRR_T ((b0) & 0xf)
688
#define OP0 (((b0) & 0xf))
689
#define OP1 (((b2) & 0xf))
690
#define OP2 (((b2) & 0xf0) >> 4)
691
#define RRR_R (((b1) & 0xf0) >> 4)
692
#define RRR_S (((b1) & 0xf))
693
#define RRR_T (((b0) & 0xf0) >> 4)
695
#define RRR_X ((RRR_R & 0x4) >> 2)
696
#define RRR_Y ((RRR_T & 0x4) >> 2)
697
#define RRR_W (RRR_R & 0x3)
706
#define RRI8_IMM8 (b2)
707
#define RRI8_IMM8_SE ((((b2) & 0x80) ? 0xffffff00 : 0) | RRI8_IMM8)
709
#ifdef TARGET_WORDS_BIGENDIAN
710
#define RI16_IMM16 (((b1) << 8) | (b2))
712
#define RI16_IMM16 (((b2) << 8) | (b1))
715
#ifdef TARGET_WORDS_BIGENDIAN
716
#define CALL_N (((b0) & 0xc) >> 2)
717
#define CALL_OFFSET ((((b0) & 0x3) << 16) | ((b1) << 8) | (b2))
719
#define CALL_N (((b0) & 0x30) >> 4)
720
#define CALL_OFFSET ((((b0) & 0xc0) >> 6) | ((b1) << 2) | ((b2) << 10))
722
#define CALL_OFFSET_SE \
723
(((CALL_OFFSET & 0x20000) ? 0xfffc0000 : 0) | CALL_OFFSET)
725
#define CALLX_N CALL_N
726
#ifdef TARGET_WORDS_BIGENDIAN
727
#define CALLX_M ((b0) & 0x3)
729
#define CALLX_M (((b0) & 0xc0) >> 6)
731
#define CALLX_S RRR_S
733
#define BRI12_M CALLX_M
734
#define BRI12_S RRR_S
735
#ifdef TARGET_WORDS_BIGENDIAN
736
#define BRI12_IMM12 ((((b1) & 0xf) << 8) | (b2))
738
#define BRI12_IMM12 ((((b1) & 0xf0) >> 4) | ((b2) << 4))
740
#define BRI12_IMM12_SE (((BRI12_IMM12 & 0x800) ? 0xfffff000 : 0) | BRI12_IMM12)
742
#define BRI8_M BRI12_M
743
#define BRI8_R RRI8_R
744
#define BRI8_S RRI8_S
745
#define BRI8_IMM8 RRI8_IMM8
746
#define BRI8_IMM8_SE RRI8_IMM8_SE
750
uint8_t b0 = ldub_code(dc->pc);
751
uint8_t b1 = ldub_code(dc->pc + 1);
752
uint8_t b2 = ldub_code(dc->pc + 2);
754
static const uint32_t B4CONST[] = {
755
0xffffffff, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256
758
static const uint32_t B4CONSTU[] = {
759
32768, 65536, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256
763
dc->next_pc = dc->pc + 2;
764
HAS_OPTION(XTENSA_OPTION_CODE_DENSITY);
766
dc->next_pc = dc->pc + 3;
775
if ((RRR_R & 0xc) == 0x8) {
776
HAS_OPTION(XTENSA_OPTION_BOOLEAN);
783
gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
794
gen_window_check1(dc, CALLX_S);
795
gen_jump(dc, cpu_R[CALLX_S]);
799
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
801
TCGv_i32 tmp = tcg_const_i32(dc->pc);
802
gen_advance_ccount(dc);
803
gen_helper_retw(tmp, tmp);
816
gen_window_check2(dc, CALLX_S, CALLX_N << 2);
820
TCGv_i32 tmp = tcg_temp_new_i32();
821
tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]);
822
tcg_gen_movi_i32(cpu_R[0], dc->next_pc);
831
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
833
TCGv_i32 tmp = tcg_temp_new_i32();
835
tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]);
836
gen_callw(dc, CALLX_N, tmp);
846
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
847
gen_window_check2(dc, RRR_T, RRR_S);
849
TCGv_i32 pc = tcg_const_i32(dc->pc);
850
gen_advance_ccount(dc);
851
gen_helper_movsp(pc);
852
tcg_gen_mov_i32(cpu_R[RRR_T], cpu_R[RRR_S]);
872
HAS_OPTION(XTENSA_OPTION_EXCEPTION);
884
default: /*reserved*/
893
HAS_OPTION(XTENSA_OPTION_EXCEPTION);
896
gen_check_privilege(dc);
897
tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM);
898
gen_helper_check_interrupts(cpu_env);
899
gen_jump(dc, cpu_SR[EPC1]);
907
gen_check_privilege(dc);
909
dc->config->ndepc ? DEPC : EPC1]);
914
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
915
gen_check_privilege(dc);
917
TCGv_i32 tmp = tcg_const_i32(1);
920
cpu_SR[PS], cpu_SR[PS], ~PS_EXCM);
921
tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]);
924
tcg_gen_andc_i32(cpu_SR[WINDOW_START],
925
cpu_SR[WINDOW_START], tmp);
927
tcg_gen_or_i32(cpu_SR[WINDOW_START],
928
cpu_SR[WINDOW_START], tmp);
931
gen_helper_restore_owb();
932
gen_helper_check_interrupts(cpu_env);
933
gen_jump(dc, cpu_SR[EPC1]);
939
default: /*reserved*/
946
HAS_OPTION(XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT);
947
if (RRR_S >= 2 && RRR_S <= dc->config->nlevel) {
948
gen_check_privilege(dc);
949
tcg_gen_mov_i32(cpu_SR[PS],
950
cpu_SR[EPS2 + RRR_S - 2]);
951
gen_helper_check_interrupts(cpu_env);
952
gen_jump(dc, cpu_SR[EPC1 + RRR_S - 1]);
954
qemu_log("RFI %d is illegal\n", RRR_S);
955
gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
963
default: /*reserved*/
971
HAS_OPTION(XTENSA_OPTION_EXCEPTION);
976
HAS_OPTION(XTENSA_OPTION_EXCEPTION);
979
gen_exception_cause(dc, SYSCALL_CAUSE);
983
if (semihosting_enabled) {
984
gen_check_privilege(dc);
985
gen_helper_simcall(cpu_env);
987
qemu_log("SIMCALL but semihosting is disabled\n");
988
gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
999
HAS_OPTION(XTENSA_OPTION_INTERRUPT);
1000
gen_check_privilege(dc);
1001
gen_window_check1(dc, RRR_T);
1002
tcg_gen_mov_i32(cpu_R[RRR_T], cpu_SR[PS]);
1003
tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_INTLEVEL);
1004
tcg_gen_ori_i32(cpu_SR[PS], cpu_SR[PS], RRR_S);
1005
gen_helper_check_interrupts(cpu_env);
1006
gen_jumpi_check_loop_end(dc, 0);
1010
HAS_OPTION(XTENSA_OPTION_INTERRUPT);
1011
gen_check_privilege(dc);
1012
gen_waiti(dc, RRR_S);
1019
HAS_OPTION(XTENSA_OPTION_BOOLEAN);
1021
const unsigned shift = (RRR_R & 2) ? 8 : 4;
1022
TCGv_i32 mask = tcg_const_i32(
1023
((1 << shift) - 1) << RRR_S);
1024
TCGv_i32 tmp = tcg_temp_new_i32();
1026
tcg_gen_and_i32(tmp, cpu_SR[BR], mask);
1027
if (RRR_R & 1) { /*ALL*/
1028
tcg_gen_addi_i32(tmp, tmp, 1 << RRR_S);
1030
tcg_gen_add_i32(tmp, tmp, mask);
1032
tcg_gen_shri_i32(tmp, tmp, RRR_S + shift);
1033
tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR],
1035
tcg_temp_free(mask);
1040
default: /*reserved*/
1048
gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1049
tcg_gen_and_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1053
gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1054
tcg_gen_or_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1058
gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1059
tcg_gen_xor_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1065
gen_window_check1(dc, RRR_S);
1066
gen_right_shift_sar(dc, cpu_R[RRR_S]);
1070
gen_window_check1(dc, RRR_S);
1071
gen_left_shift_sar(dc, cpu_R[RRR_S]);
1075
gen_window_check1(dc, RRR_S);
1077
TCGv_i32 tmp = tcg_temp_new_i32();
1078
tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3);
1079
gen_right_shift_sar(dc, tmp);
1085
gen_window_check1(dc, RRR_S);
1087
TCGv_i32 tmp = tcg_temp_new_i32();
1088
tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3);
1089
gen_left_shift_sar(dc, tmp);
1096
TCGv_i32 tmp = tcg_const_i32(
1097
RRR_S | ((RRR_T & 1) << 4));
1098
gen_right_shift_sar(dc, tmp);
1112
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1113
gen_check_privilege(dc);
1115
TCGv_i32 tmp = tcg_const_i32(
1116
RRR_T | ((RRR_T & 8) ? 0xfffffff0 : 0));
1117
gen_helper_rotw(tmp);
1119
reset_used_window(dc);
1124
HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA);
1125
gen_window_check2(dc, RRR_S, RRR_T);
1126
gen_helper_nsa(cpu_R[RRR_T], cpu_R[RRR_S]);
1130
HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA);
1131
gen_window_check2(dc, RRR_S, RRR_T);
1132
gen_helper_nsau(cpu_R[RRR_T], cpu_R[RRR_S]);
1135
default: /*reserved*/
1143
XTENSA_OPTION_BIT(XTENSA_OPTION_MMU) |
1144
XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
1145
XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION));
1146
gen_check_privilege(dc);
1147
gen_window_check2(dc, RRR_S, RRR_T);
1149
TCGv_i32 dtlb = tcg_const_i32((RRR_R & 8) != 0);
1151
switch (RRR_R & 7) {
1152
case 3: /*RITLB0*/ /*RDTLB0*/
1153
gen_helper_rtlb0(cpu_R[RRR_T], cpu_R[RRR_S], dtlb);
1156
case 4: /*IITLB*/ /*IDTLB*/
1157
gen_helper_itlb(cpu_R[RRR_S], dtlb);
1158
/* This could change memory mapping, so exit tb */
1159
gen_jumpi_check_loop_end(dc, -1);
1162
case 5: /*PITLB*/ /*PDTLB*/
1163
tcg_gen_movi_i32(cpu_pc, dc->pc);
1164
gen_helper_ptlb(cpu_R[RRR_T], cpu_R[RRR_S], dtlb);
1167
case 6: /*WITLB*/ /*WDTLB*/
1168
gen_helper_wtlb(cpu_R[RRR_T], cpu_R[RRR_S], dtlb);
1169
/* This could change memory mapping, so exit tb */
1170
gen_jumpi_check_loop_end(dc, -1);
1173
case 7: /*RITLB1*/ /*RDTLB1*/
1174
gen_helper_rtlb1(cpu_R[RRR_T], cpu_R[RRR_S], dtlb);
1178
tcg_temp_free(dtlb);
1182
tcg_temp_free(dtlb);
1187
gen_window_check2(dc, RRR_R, RRR_T);
1190
tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
1195
int label = gen_new_label();
1196
tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
1197
tcg_gen_brcondi_i32(
1198
TCG_COND_GE, cpu_R[RRR_R], 0, label);
1199
tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
1200
gen_set_label(label);
1204
default: /*reserved*/
1210
case 7: /*reserved*/
1215
gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1216
tcg_gen_add_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1222
gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1224
TCGv_i32 tmp = tcg_temp_new_i32();
1225
tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 8);
1226
tcg_gen_add_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]);
1232
gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1233
tcg_gen_sub_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1239
gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1241
TCGv_i32 tmp = tcg_temp_new_i32();
1242
tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 12);
1243
tcg_gen_sub_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]);
1254
gen_window_check2(dc, RRR_R, RRR_S);
1255
tcg_gen_shli_i32(cpu_R[RRR_R], cpu_R[RRR_S],
1256
32 - (RRR_T | ((OP2 & 1) << 4)));
1261
gen_window_check2(dc, RRR_R, RRR_T);
1262
tcg_gen_sari_i32(cpu_R[RRR_R], cpu_R[RRR_T],
1263
RRR_S | ((OP2 & 1) << 4));
1267
gen_window_check2(dc, RRR_R, RRR_T);
1268
tcg_gen_shri_i32(cpu_R[RRR_R], cpu_R[RRR_T], RRR_S);
1273
TCGv_i32 tmp = tcg_temp_new_i32();
1275
gen_check_privilege(dc);
1277
gen_window_check1(dc, RRR_T);
1278
tcg_gen_mov_i32(tmp, cpu_R[RRR_T]);
1279
gen_rsr(dc, cpu_R[RRR_T], RSR_SR);
1280
gen_wsr(dc, RSR_SR, tmp);
1282
if (!sregnames[RSR_SR]) {
1289
* Note: 64 bit ops are used here solely because SAR values
1292
#define gen_shift_reg(cmd, reg) do { \
1293
TCGv_i64 tmp = tcg_temp_new_i64(); \
1294
tcg_gen_extu_i32_i64(tmp, reg); \
1295
tcg_gen_##cmd##_i64(v, v, tmp); \
1296
tcg_gen_trunc_i64_i32(cpu_R[RRR_R], v); \
1297
tcg_temp_free_i64(v); \
1298
tcg_temp_free_i64(tmp); \
1301
#define gen_shift(cmd) gen_shift_reg(cmd, cpu_SR[SAR])
1304
gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1306
TCGv_i64 v = tcg_temp_new_i64();
1307
tcg_gen_concat_i32_i64(v, cpu_R[RRR_T], cpu_R[RRR_S]);
1313
gen_window_check2(dc, RRR_R, RRR_T);
1315
tcg_gen_shr_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]);
1317
TCGv_i64 v = tcg_temp_new_i64();
1318
tcg_gen_extu_i32_i64(v, cpu_R[RRR_T]);
1324
gen_window_check2(dc, RRR_R, RRR_S);
1325
if (dc->sar_m32_5bit) {
1326
tcg_gen_shl_i32(cpu_R[RRR_R], cpu_R[RRR_S], dc->sar_m32);
1328
TCGv_i64 v = tcg_temp_new_i64();
1329
TCGv_i32 s = tcg_const_i32(32);
1330
tcg_gen_sub_i32(s, s, cpu_SR[SAR]);
1331
tcg_gen_andi_i32(s, s, 0x3f);
1332
tcg_gen_extu_i32_i64(v, cpu_R[RRR_S]);
1333
gen_shift_reg(shl, s);
1339
gen_window_check2(dc, RRR_R, RRR_T);
1341
tcg_gen_sar_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]);
1343
TCGv_i64 v = tcg_temp_new_i64();
1344
tcg_gen_ext_i32_i64(v, cpu_R[RRR_T]);
1349
#undef gen_shift_reg
1352
HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL);
1353
gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1355
TCGv_i32 v1 = tcg_temp_new_i32();
1356
TCGv_i32 v2 = tcg_temp_new_i32();
1357
tcg_gen_ext16u_i32(v1, cpu_R[RRR_S]);
1358
tcg_gen_ext16u_i32(v2, cpu_R[RRR_T]);
1359
tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2);
1366
HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL);
1367
gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1369
TCGv_i32 v1 = tcg_temp_new_i32();
1370
TCGv_i32 v2 = tcg_temp_new_i32();
1371
tcg_gen_ext16s_i32(v1, cpu_R[RRR_S]);
1372
tcg_gen_ext16s_i32(v2, cpu_R[RRR_T]);
1373
tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2);
1379
default: /*reserved*/
1387
gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1391
HAS_OPTION(XTENSA_OPTION_32_BIT_IDIV);
1392
int label = gen_new_label();
1393
tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0, label);
1394
gen_exception_cause(dc, INTEGER_DIVIDE_BY_ZERO_CAUSE);
1395
gen_set_label(label);
1399
#define BOOLEAN_LOGIC(fn, r, s, t) \
1401
HAS_OPTION(XTENSA_OPTION_BOOLEAN); \
1402
TCGv_i32 tmp1 = tcg_temp_new_i32(); \
1403
TCGv_i32 tmp2 = tcg_temp_new_i32(); \
1405
tcg_gen_shri_i32(tmp1, cpu_SR[BR], s); \
1406
tcg_gen_shri_i32(tmp2, cpu_SR[BR], t); \
1407
tcg_gen_##fn##_i32(tmp1, tmp1, tmp2); \
1408
tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR], tmp1, r, 1); \
1409
tcg_temp_free(tmp1); \
1410
tcg_temp_free(tmp2); \
1414
BOOLEAN_LOGIC(and, RRR_R, RRR_S, RRR_T);
1418
BOOLEAN_LOGIC(andc, RRR_R, RRR_S, RRR_T);
1422
BOOLEAN_LOGIC(or, RRR_R, RRR_S, RRR_T);
1426
BOOLEAN_LOGIC(orc, RRR_R, RRR_S, RRR_T);
1430
BOOLEAN_LOGIC(xor, RRR_R, RRR_S, RRR_T);
1433
#undef BOOLEAN_LOGIC
1436
HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL);
1437
tcg_gen_mul_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1442
HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL_HIGH);
1444
TCGv_i64 r = tcg_temp_new_i64();
1445
TCGv_i64 s = tcg_temp_new_i64();
1446
TCGv_i64 t = tcg_temp_new_i64();
1449
tcg_gen_extu_i32_i64(s, cpu_R[RRR_S]);
1450
tcg_gen_extu_i32_i64(t, cpu_R[RRR_T]);
1452
tcg_gen_ext_i32_i64(s, cpu_R[RRR_S]);
1453
tcg_gen_ext_i32_i64(t, cpu_R[RRR_T]);
1455
tcg_gen_mul_i64(r, s, t);
1456
tcg_gen_shri_i64(r, r, 32);
1457
tcg_gen_trunc_i64_i32(cpu_R[RRR_R], r);
1459
tcg_temp_free_i64(r);
1460
tcg_temp_free_i64(s);
1461
tcg_temp_free_i64(t);
1466
tcg_gen_divu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1472
int label1 = gen_new_label();
1473
int label2 = gen_new_label();
1475
tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_S], 0x80000000,
1477
tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0xffffffff,
1479
tcg_gen_movi_i32(cpu_R[RRR_R],
1480
OP2 == 13 ? 0x80000000 : 0);
1482
gen_set_label(label1);
1484
tcg_gen_div_i32(cpu_R[RRR_R],
1485
cpu_R[RRR_S], cpu_R[RRR_T]);
1487
tcg_gen_rem_i32(cpu_R[RRR_R],
1488
cpu_R[RRR_S], cpu_R[RRR_T]);
1490
gen_set_label(label2);
1495
tcg_gen_remu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1498
default: /*reserved*/
1508
gen_check_privilege(dc);
1510
gen_window_check1(dc, RRR_T);
1511
gen_rsr(dc, cpu_R[RRR_T], RSR_SR);
1512
if (!sregnames[RSR_SR]) {
1519
gen_check_privilege(dc);
1521
gen_window_check1(dc, RRR_T);
1522
gen_wsr(dc, RSR_SR, cpu_R[RRR_T]);
1523
if (!sregnames[RSR_SR]) {
1529
HAS_OPTION(XTENSA_OPTION_MISC_OP_SEXT);
1530
gen_window_check2(dc, RRR_R, RRR_S);
1532
int shift = 24 - RRR_T;
1535
tcg_gen_ext8s_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1536
} else if (shift == 16) {
1537
tcg_gen_ext16s_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1539
TCGv_i32 tmp = tcg_temp_new_i32();
1540
tcg_gen_shli_i32(tmp, cpu_R[RRR_S], shift);
1541
tcg_gen_sari_i32(cpu_R[RRR_R], tmp, shift);
1548
HAS_OPTION(XTENSA_OPTION_MISC_OP_CLAMPS);
1549
gen_window_check2(dc, RRR_R, RRR_S);
1551
TCGv_i32 tmp1 = tcg_temp_new_i32();
1552
TCGv_i32 tmp2 = tcg_temp_new_i32();
1553
int label = gen_new_label();
1555
tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 24 - RRR_T);
1556
tcg_gen_xor_i32(tmp2, tmp1, cpu_R[RRR_S]);
1557
tcg_gen_andi_i32(tmp2, tmp2, 0xffffffff << (RRR_T + 7));
1558
tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1559
tcg_gen_brcondi_i32(TCG_COND_EQ, tmp2, 0, label);
1561
tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 31);
1562
tcg_gen_xori_i32(cpu_R[RRR_R], tmp1,
1563
0xffffffff >> (25 - RRR_T));
1565
gen_set_label(label);
1567
tcg_temp_free(tmp1);
1568
tcg_temp_free(tmp2);
1576
HAS_OPTION(XTENSA_OPTION_MISC_OP_MINMAX);
1577
gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1579
static const TCGCond cond[] = {
1585
int label = gen_new_label();
1587
if (RRR_R != RRR_T) {
1588
tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1589
tcg_gen_brcond_i32(cond[OP2 - 4],
1590
cpu_R[RRR_S], cpu_R[RRR_T], label);
1591
tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
1593
tcg_gen_brcond_i32(cond[OP2 - 4],
1594
cpu_R[RRR_T], cpu_R[RRR_S], label);
1595
tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1597
gen_set_label(label);
1605
gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1607
static const TCGCond cond[] = {
1613
int label = gen_new_label();
1614
tcg_gen_brcondi_i32(cond[OP2 - 8], cpu_R[RRR_T], 0, label);
1615
tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1616
gen_set_label(label);
1622
HAS_OPTION(XTENSA_OPTION_BOOLEAN);
1623
gen_window_check2(dc, RRR_R, RRR_S);
1625
int label = gen_new_label();
1626
TCGv_i32 tmp = tcg_temp_new_i32();
1628
tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRR_T);
1629
tcg_gen_brcondi_i32(
1630
OP2 & 1 ? TCG_COND_EQ : TCG_COND_NE,
1632
tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1633
gen_set_label(label);
1639
gen_window_check1(dc, RRR_R);
1641
int st = (RRR_S << 4) + RRR_T;
1642
if (uregnames[st]) {
1643
tcg_gen_mov_i32(cpu_R[RRR_R], cpu_UR[st]);
1645
qemu_log("RUR %d not implemented, ", st);
1652
gen_window_check1(dc, RRR_T);
1654
if (uregnames[RSR_SR]) {
1655
tcg_gen_mov_i32(cpu_UR[RSR_SR], cpu_R[RRR_T]);
1657
qemu_log("WUR %d not implemented, ", RSR_SR);
1668
gen_window_check2(dc, RRR_R, RRR_T);
1670
int shiftimm = RRR_S | (OP1 << 4);
1671
int maskimm = (1 << (OP2 + 1)) - 1;
1673
TCGv_i32 tmp = tcg_temp_new_i32();
1674
tcg_gen_shri_i32(tmp, cpu_R[RRR_T], shiftimm);
1675
tcg_gen_andi_i32(cpu_R[RRR_R], tmp, maskimm);
1689
HAS_OPTION(XTENSA_OPTION_COPROCESSOR);
1694
gen_window_check2(dc, RRR_S, RRR_T);
1697
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1698
gen_check_privilege(dc);
1700
TCGv_i32 addr = tcg_temp_new_i32();
1701
tcg_gen_addi_i32(addr, cpu_R[RRR_S],
1702
(0xffffffc0 | (RRR_R << 2)));
1703
tcg_gen_qemu_ld32u(cpu_R[RRR_T], addr, dc->ring);
1704
tcg_temp_free(addr);
1709
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1710
gen_check_privilege(dc);
1712
TCGv_i32 addr = tcg_temp_new_i32();
1713
tcg_gen_addi_i32(addr, cpu_R[RRR_S],
1714
(0xffffffc0 | (RRR_R << 2)));
1715
tcg_gen_qemu_st32(cpu_R[RRR_T], addr, dc->ring);
1716
tcg_temp_free(addr);
1727
HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
1732
HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
1736
default: /*reserved*/
1743
gen_window_check1(dc, RRR_T);
1745
TCGv_i32 tmp = tcg_const_i32(
1746
((dc->tb->flags & XTENSA_TBFLAG_LITBASE) ?
1747
0 : ((dc->pc + 3) & ~3)) +
1748
(0xfffc0000 | (RI16_IMM16 << 2)));
1750
if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
1751
tcg_gen_add_i32(tmp, tmp, dc->litbase);
1753
tcg_gen_qemu_ld32u(cpu_R[RRR_T], tmp, dc->cring);
1759
#define gen_load_store(type, shift) do { \
1760
TCGv_i32 addr = tcg_temp_new_i32(); \
1761
gen_window_check2(dc, RRI8_S, RRI8_T); \
1762
tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << shift); \
1764
gen_load_store_alignment(dc, shift, addr, false); \
1766
tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \
1767
tcg_temp_free(addr); \
1772
gen_load_store(ld8u, 0);
1776
gen_load_store(ld16u, 1);
1780
gen_load_store(ld32u, 2);
1784
gen_load_store(st8, 0);
1788
gen_load_store(st16, 1);
1792
gen_load_store(st32, 2);
1797
HAS_OPTION(XTENSA_OPTION_DCACHE);
1828
HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
1832
HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
1836
HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
1840
HAS_OPTION(XTENSA_OPTION_DCACHE);
1844
HAS_OPTION(XTENSA_OPTION_DCACHE);
1847
default: /*reserved*/
1855
HAS_OPTION(XTENSA_OPTION_ICACHE);
1861
HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
1865
HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
1869
HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
1872
default: /*reserved*/
1879
HAS_OPTION(XTENSA_OPTION_ICACHE);
1883
HAS_OPTION(XTENSA_OPTION_ICACHE);
1886
default: /*reserved*/
1893
gen_load_store(ld16s, 1);
1895
#undef gen_load_store
1898
gen_window_check1(dc, RRI8_T);
1899
tcg_gen_movi_i32(cpu_R[RRI8_T],
1900
RRI8_IMM8 | (RRI8_S << 8) |
1901
((RRI8_S & 0x8) ? 0xfffff000 : 0));
1904
#define gen_load_store_no_hw_align(type) do { \
1905
TCGv_i32 addr = tcg_temp_local_new_i32(); \
1906
gen_window_check2(dc, RRI8_S, RRI8_T); \
1907
tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2); \
1908
gen_load_store_alignment(dc, 2, addr, true); \
1909
tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \
1910
tcg_temp_free(addr); \
1914
HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO);
1915
gen_load_store_no_hw_align(ld32u); /*TODO acquire?*/
1919
gen_window_check2(dc, RRI8_S, RRI8_T);
1920
tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE);
1924
gen_window_check2(dc, RRI8_S, RRI8_T);
1925
tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE << 8);
1928
case 14: /*S32C1Iy*/
1929
HAS_OPTION(XTENSA_OPTION_CONDITIONAL_STORE);
1930
gen_window_check2(dc, RRI8_S, RRI8_T);
1932
int label = gen_new_label();
1933
TCGv_i32 tmp = tcg_temp_local_new_i32();
1934
TCGv_i32 addr = tcg_temp_local_new_i32();
1936
tcg_gen_mov_i32(tmp, cpu_R[RRI8_T]);
1937
tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2);
1938
gen_load_store_alignment(dc, 2, addr, true);
1939
tcg_gen_qemu_ld32u(cpu_R[RRI8_T], addr, dc->cring);
1940
tcg_gen_brcond_i32(TCG_COND_NE, cpu_R[RRI8_T],
1941
cpu_SR[SCOMPARE1], label);
1943
tcg_gen_qemu_st32(tmp, addr, dc->cring);
1945
gen_set_label(label);
1946
tcg_temp_free(addr);
1952
HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO);
1953
gen_load_store_no_hw_align(st32); /*TODO release?*/
1955
#undef gen_load_store_no_hw_align
1957
default: /*reserved*/
1964
HAS_OPTION(XTENSA_OPTION_COPROCESSOR);
1969
HAS_OPTION(XTENSA_OPTION_MAC16);
1978
bool is_m1_sr = (OP2 & 0x3) == 2;
1979
bool is_m2_sr = (OP2 & 0xc) == 0;
1980
uint32_t ld_offset = 0;
1987
case 0: /*MACI?/MACC?*/
1989
ld_offset = (OP2 & 1) ? -4 : 4;
1991
if (OP2 >= 8) { /*MACI/MACC*/
1992
if (OP1 == 0) { /*LDINC/LDDEC*/
1997
} else if (op != MAC16_MULA) { /*MULA.*.*.LDINC/LDDEC*/
2002
case 2: /*MACD?/MACA?*/
2003
if (op == MAC16_UMUL && OP2 != 7) { /*UMUL only in MACAA*/
2009
if (op != MAC16_NONE) {
2011
gen_window_check1(dc, RRR_S);
2014
gen_window_check1(dc, RRR_T);
2019
TCGv_i32 vaddr = tcg_temp_new_i32();
2020
TCGv_i32 mem32 = tcg_temp_new_i32();
2023
gen_window_check1(dc, RRR_S);
2024
tcg_gen_addi_i32(vaddr, cpu_R[RRR_S], ld_offset);
2025
gen_load_store_alignment(dc, 2, vaddr, false);
2026
tcg_gen_qemu_ld32u(mem32, vaddr, dc->cring);
2028
if (op != MAC16_NONE) {
2029
TCGv_i32 m1 = gen_mac16_m(
2030
is_m1_sr ? cpu_SR[MR + RRR_X] : cpu_R[RRR_S],
2031
OP1 & 1, op == MAC16_UMUL);
2032
TCGv_i32 m2 = gen_mac16_m(
2033
is_m2_sr ? cpu_SR[MR + 2 + RRR_Y] : cpu_R[RRR_T],
2034
OP1 & 2, op == MAC16_UMUL);
2036
if (op == MAC16_MUL || op == MAC16_UMUL) {
2037
tcg_gen_mul_i32(cpu_SR[ACCLO], m1, m2);
2038
if (op == MAC16_UMUL) {
2039
tcg_gen_movi_i32(cpu_SR[ACCHI], 0);
2041
tcg_gen_sari_i32(cpu_SR[ACCHI], cpu_SR[ACCLO], 31);
2044
TCGv_i32 res = tcg_temp_new_i32();
2045
TCGv_i64 res64 = tcg_temp_new_i64();
2046
TCGv_i64 tmp = tcg_temp_new_i64();
2048
tcg_gen_mul_i32(res, m1, m2);
2049
tcg_gen_ext_i32_i64(res64, res);
2050
tcg_gen_concat_i32_i64(tmp,
2051
cpu_SR[ACCLO], cpu_SR[ACCHI]);
2052
if (op == MAC16_MULA) {
2053
tcg_gen_add_i64(tmp, tmp, res64);
2055
tcg_gen_sub_i64(tmp, tmp, res64);
2057
tcg_gen_trunc_i64_i32(cpu_SR[ACCLO], tmp);
2058
tcg_gen_shri_i64(tmp, tmp, 32);
2059
tcg_gen_trunc_i64_i32(cpu_SR[ACCHI], tmp);
2060
tcg_gen_ext8s_i32(cpu_SR[ACCHI], cpu_SR[ACCHI]);
2063
tcg_temp_free_i64(res64);
2064
tcg_temp_free_i64(tmp);
2070
tcg_gen_mov_i32(cpu_R[RRR_S], vaddr);
2071
tcg_gen_mov_i32(cpu_SR[MR + RRR_W], mem32);
2073
tcg_temp_free(vaddr);
2074
tcg_temp_free(mem32);
2082
tcg_gen_movi_i32(cpu_R[0], dc->next_pc);
2083
gen_jumpi(dc, (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0);
2089
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
2090
gen_window_check1(dc, CALL_N << 2);
2091
gen_callwi(dc, CALL_N,
2092
(dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0);
2100
gen_jumpi(dc, dc->pc + 4 + CALL_OFFSET_SE, 0);
2104
gen_window_check1(dc, BRI12_S);
2106
static const TCGCond cond[] = {
2107
TCG_COND_EQ, /*BEQZ*/
2108
TCG_COND_NE, /*BNEZ*/
2109
TCG_COND_LT, /*BLTZ*/
2110
TCG_COND_GE, /*BGEZ*/
2113
gen_brcondi(dc, cond[BRI12_M & 3], cpu_R[BRI12_S], 0,
2114
4 + BRI12_IMM12_SE);
2119
gen_window_check1(dc, BRI8_S);
2121
static const TCGCond cond[] = {
2122
TCG_COND_EQ, /*BEQI*/
2123
TCG_COND_NE, /*BNEI*/
2124
TCG_COND_LT, /*BLTI*/
2125
TCG_COND_GE, /*BGEI*/
2128
gen_brcondi(dc, cond[BRI8_M & 3],
2129
cpu_R[BRI8_S], B4CONST[BRI8_R], 4 + BRI8_IMM8_SE);
2136
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
2138
TCGv_i32 pc = tcg_const_i32(dc->pc);
2139
TCGv_i32 s = tcg_const_i32(BRI12_S);
2140
TCGv_i32 imm = tcg_const_i32(BRI12_IMM12);
2141
gen_advance_ccount(dc);
2142
gen_helper_entry(pc, s, imm);
2146
reset_used_window(dc);
2154
HAS_OPTION(XTENSA_OPTION_BOOLEAN);
2156
TCGv_i32 tmp = tcg_temp_new_i32();
2157
tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRI8_S);
2159
BRI8_R == 1 ? TCG_COND_NE : TCG_COND_EQ,
2160
tmp, 0, 4 + RRI8_IMM8_SE);
2167
case 10: /*LOOPGTZ*/
2168
HAS_OPTION(XTENSA_OPTION_LOOP);
2169
gen_window_check1(dc, RRI8_S);
2171
uint32_t lend = dc->pc + RRI8_IMM8 + 4;
2172
TCGv_i32 tmp = tcg_const_i32(lend);
2174
tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_R[RRI8_S], 1);
2175
tcg_gen_movi_i32(cpu_SR[LBEG], dc->next_pc);
2176
gen_wsr_lend(dc, LEND, tmp);
2180
int label = gen_new_label();
2181
tcg_gen_brcondi_i32(
2182
BRI8_R == 9 ? TCG_COND_NE : TCG_COND_GT,
2183
cpu_R[RRI8_S], 0, label);
2184
gen_jumpi(dc, lend, 1);
2185
gen_set_label(label);
2188
gen_jumpi(dc, dc->next_pc, 0);
2192
default: /*reserved*/
2201
gen_window_check1(dc, BRI8_S);
2202
gen_brcondi(dc, BRI8_M == 2 ? TCG_COND_LTU : TCG_COND_GEU,
2203
cpu_R[BRI8_S], B4CONSTU[BRI8_R], 4 + BRI8_IMM8_SE);
2213
TCGCond eq_ne = (RRI8_R & 8) ? TCG_COND_NE : TCG_COND_EQ;
2215
switch (RRI8_R & 7) {
2216
case 0: /*BNONE*/ /*BANY*/
2217
gen_window_check2(dc, RRI8_S, RRI8_T);
2219
TCGv_i32 tmp = tcg_temp_new_i32();
2220
tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]);
2221
gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
2226
case 1: /*BEQ*/ /*BNE*/
2227
case 2: /*BLT*/ /*BGE*/
2228
case 3: /*BLTU*/ /*BGEU*/
2229
gen_window_check2(dc, RRI8_S, RRI8_T);
2231
static const TCGCond cond[] = {
2237
[11] = TCG_COND_GEU,
2239
gen_brcond(dc, cond[RRI8_R], cpu_R[RRI8_S], cpu_R[RRI8_T],
2244
case 4: /*BALL*/ /*BNALL*/
2245
gen_window_check2(dc, RRI8_S, RRI8_T);
2247
TCGv_i32 tmp = tcg_temp_new_i32();
2248
tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]);
2249
gen_brcond(dc, eq_ne, tmp, cpu_R[RRI8_T],
2255
case 5: /*BBC*/ /*BBS*/
2256
gen_window_check2(dc, RRI8_S, RRI8_T);
2258
TCGv_i32 bit = tcg_const_i32(1);
2259
TCGv_i32 tmp = tcg_temp_new_i32();
2260
tcg_gen_andi_i32(tmp, cpu_R[RRI8_T], 0x1f);
2261
tcg_gen_shl_i32(bit, bit, tmp);
2262
tcg_gen_and_i32(tmp, cpu_R[RRI8_S], bit);
2263
gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
2269
case 6: /*BBCI*/ /*BBSI*/
2271
gen_window_check1(dc, RRI8_S);
2273
TCGv_i32 tmp = tcg_temp_new_i32();
2274
tcg_gen_andi_i32(tmp, cpu_R[RRI8_S],
2275
1 << (((RRI8_R & 1) << 4) | RRI8_T));
2276
gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
2285
#define gen_narrow_load_store(type) do { \
2286
TCGv_i32 addr = tcg_temp_new_i32(); \
2287
gen_window_check2(dc, RRRN_S, RRRN_T); \
2288
tcg_gen_addi_i32(addr, cpu_R[RRRN_S], RRRN_R << 2); \
2289
gen_load_store_alignment(dc, 2, addr, false); \
2290
tcg_gen_qemu_##type(cpu_R[RRRN_T], addr, dc->cring); \
2291
tcg_temp_free(addr); \
2295
gen_narrow_load_store(ld32u);
2299
gen_narrow_load_store(st32);
2301
#undef gen_narrow_load_store
2304
gen_window_check3(dc, RRRN_R, RRRN_S, RRRN_T);
2305
tcg_gen_add_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], cpu_R[RRRN_T]);
2308
case 11: /*ADDI.Nn*/
2309
gen_window_check2(dc, RRRN_R, RRRN_S);
2310
tcg_gen_addi_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], RRRN_T ? RRRN_T : -1);
2314
gen_window_check1(dc, RRRN_S);
2315
if (RRRN_T < 8) { /*MOVI.Nn*/
2316
tcg_gen_movi_i32(cpu_R[RRRN_S],
2317
RRRN_R | (RRRN_T << 4) |
2318
((RRRN_T & 6) == 6 ? 0xffffff80 : 0));
2319
} else { /*BEQZ.Nn*/ /*BNEZ.Nn*/
2320
TCGCond eq_ne = (RRRN_T & 4) ? TCG_COND_NE : TCG_COND_EQ;
2322
gen_brcondi(dc, eq_ne, cpu_R[RRRN_S], 0,
2323
4 + (RRRN_R | ((RRRN_T & 3) << 4)));
2330
gen_window_check2(dc, RRRN_S, RRRN_T);
2331
tcg_gen_mov_i32(cpu_R[RRRN_T], cpu_R[RRRN_S]);
2337
gen_jump(dc, cpu_R[0]);
2341
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
2343
TCGv_i32 tmp = tcg_const_i32(dc->pc);
2344
gen_advance_ccount(dc);
2345
gen_helper_retw(tmp, tmp);
2351
case 2: /*BREAK.Nn*/
2359
gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
2362
default: /*reserved*/
2368
default: /*reserved*/
2374
default: /*reserved*/
2379
gen_check_loop_end(dc, 0);
2380
dc->pc = dc->next_pc;
2385
qemu_log("INVALID(pc = %08x)\n", dc->pc);
2386
gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
2390
static void check_breakpoint(CPUState *env, DisasContext *dc)
2394
if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
2395
QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
2396
if (bp->pc == dc->pc) {
2397
tcg_gen_movi_i32(cpu_pc, dc->pc);
2398
gen_exception(dc, EXCP_DEBUG);
2399
dc->is_jmp = DISAS_UPDATE;
2405
static void gen_intermediate_code_internal(
2406
CPUState *env, TranslationBlock *tb, int search_pc)
2411
uint16_t *gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2412
int max_insns = tb->cflags & CF_COUNT_MASK;
2413
uint32_t pc_start = tb->pc;
2414
uint32_t next_page_start =
2415
(pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2417
if (max_insns == 0) {
2418
max_insns = CF_COUNT_MASK;
2421
dc.config = env->config;
2422
dc.singlestep_enabled = env->singlestep_enabled;
2425
dc.ring = tb->flags & XTENSA_TBFLAG_RING_MASK;
2426
dc.cring = (tb->flags & XTENSA_TBFLAG_EXCM) ? 0 : dc.ring;
2427
dc.lbeg = env->sregs[LBEG];
2428
dc.lend = env->sregs[LEND];
2429
dc.is_jmp = DISAS_NEXT;
2430
dc.ccount_delta = 0;
2433
init_sar_tracker(&dc);
2434
reset_used_window(&dc);
2438
if (env->singlestep_enabled && env->exception_taken) {
2439
env->exception_taken = 0;
2440
tcg_gen_movi_i32(cpu_pc, dc.pc);
2441
gen_exception(&dc, EXCP_DEBUG);
2445
check_breakpoint(env, &dc);
2448
j = gen_opc_ptr - gen_opc_buf;
2452
gen_opc_instr_start[lj++] = 0;
2455
gen_opc_pc[lj] = dc.pc;
2456
gen_opc_instr_start[lj] = 1;
2457
gen_opc_icount[lj] = insn_count;
2460
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
2461
tcg_gen_debug_insn_start(dc.pc);
2466
if (insn_count + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
2470
disas_xtensa_insn(&dc);
2472
if (env->singlestep_enabled) {
2473
tcg_gen_movi_i32(cpu_pc, dc.pc);
2474
gen_exception(&dc, EXCP_DEBUG);
2477
} while (dc.is_jmp == DISAS_NEXT &&
2478
insn_count < max_insns &&
2479
dc.pc < next_page_start &&
2480
gen_opc_ptr < gen_opc_end);
2483
reset_sar_tracker(&dc);
2485
if (tb->cflags & CF_LAST_IO) {
2489
if (dc.is_jmp == DISAS_NEXT) {
2490
gen_jumpi(&dc, dc.pc, 0);
2492
gen_icount_end(tb, insn_count);
2493
*gen_opc_ptr = INDEX_op_end;
2496
tb->size = dc.pc - pc_start;
2497
tb->icount = insn_count;
2501
void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2503
gen_intermediate_code_internal(env, tb, 0);
2506
void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2508
gen_intermediate_code_internal(env, tb, 1);
2511
void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
2516
cpu_fprintf(f, "PC=%08x\n\n", env->pc);
2518
for (i = j = 0; i < 256; ++i) {
2520
cpu_fprintf(f, "%s=%08x%c", sregnames[i], env->sregs[i],
2521
(j++ % 4) == 3 ? '\n' : ' ');
2525
cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
2527
for (i = j = 0; i < 256; ++i) {
2529
cpu_fprintf(f, "%s=%08x%c", uregnames[i], env->uregs[i],
2530
(j++ % 4) == 3 ? '\n' : ' ');
2534
cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
2536
for (i = 0; i < 16; ++i) {
2537
cpu_fprintf(f, "A%02d=%08x%c", i, env->regs[i],
2538
(i % 4) == 3 ? '\n' : ' ');
2541
cpu_fprintf(f, "\n");
2543
for (i = 0; i < env->config->nareg; ++i) {
2544
cpu_fprintf(f, "AR%02d=%08x%c", i, env->phys_regs[i],
2545
(i % 4) == 3 ? '\n' : ' ');
2549
void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
2551
env->pc = gen_opc_pc[pc_pos];