2
* CRIS emulation for qemu: main translation routines.
4
* Copyright (c) 2008 AXIS Communications AB
5
* Written by Edgar E. Iglesias.
7
* This library is free software; you can redistribute it and/or
8
* modify it under the terms of the GNU Lesser General Public
9
* License as published by the Free Software Foundation; either
10
* version 2 of the License, or (at your option) any later version.
12
* This library is distributed in the hope that it will be useful,
13
* but WITHOUT ANY WARRANTY; without even the implied warranty of
14
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15
* Lesser General Public License for more details.
17
* You should have received a copy of the GNU Lesser General Public
18
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
23
* The condition code translation is in need of attention.
37
#include "crisv32-decode.h"
38
#include "qemu-common.h"
45
# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
47
# define LOG_DIS(...) do { } while (0)
51
#define BUG() (gen_BUG(dc, __FILE__, __LINE__))
52
#define BUG_ON(x) ({if (x) BUG();})
56
/* Used by the decoder. */
57
#define EXTRACT_FIELD(src, start, end) \
58
(((src) >> start) & ((1 << (end - start + 1)) - 1))
60
#define CC_MASK_NZ 0xc
61
#define CC_MASK_NZV 0xe
62
#define CC_MASK_NZVC 0xf
63
#define CC_MASK_RNZV 0x10e
65
static TCGv_ptr cpu_env;
66
static TCGv cpu_R[16];
67
static TCGv cpu_PR[16];
71
static TCGv cc_result;
76
static TCGv env_btaken;
77
static TCGv env_btarget;
80
#include "gen-icount.h"
82
/* This is the state at translation time. */
83
typedef struct DisasContext {
88
unsigned int (*decoder)(struct DisasContext *dc);
93
unsigned int zsize, zzsize;
107
int cc_size_uptodate; /* -1 invalid or last written value. */
109
int cc_x_uptodate; /* 1 - ccs, 2 - known | X_FLAG. 0 not uptodate. */
110
int flags_uptodate; /* Wether or not $ccs is uptodate. */
111
int flagx_known; /* Wether or not flags_x has the x flag known at
115
int clear_x; /* Clear x after this insn? */
116
int clear_prefix; /* Clear prefix after this insn? */
117
int clear_locked_irq; /* Clear the irq lockout. */
118
int cpustate_changed;
119
unsigned int tb_flags; /* tb dependent flags. */
124
#define JMP_DIRECT_CC 2
125
#define JMP_INDIRECT 3
126
int jmp; /* 0=nojmp, 1=direct, 2=indirect. */
131
struct TranslationBlock *tb;
132
int singlestep_enabled;
135
static void gen_BUG(DisasContext *dc, const char *file, int line)
137
printf ("BUG: pc=%x %s %d\n", dc->pc, file, line);
138
qemu_log("BUG: pc=%x %s %d\n", dc->pc, file, line);
139
cpu_abort(dc->env, "%s:%d\n", file, line);
142
static const char *regnames[] =
144
"$r0", "$r1", "$r2", "$r3",
145
"$r4", "$r5", "$r6", "$r7",
146
"$r8", "$r9", "$r10", "$r11",
147
"$r12", "$r13", "$sp", "$acr",
149
static const char *pregnames[] =
151
"$bz", "$vr", "$pid", "$srs",
152
"$wz", "$exs", "$eda", "$mof",
153
"$dz", "$ebp", "$erp", "$srp",
154
"$nrp", "$ccs", "$usp", "$spc",
157
/* We need this table to handle preg-moves with implicit width. */
158
static int preg_sizes[] = {
169
#define t_gen_mov_TN_env(tn, member) \
170
_t_gen_mov_TN_env((tn), offsetof(CPUState, member))
171
#define t_gen_mov_env_TN(member, tn) \
172
_t_gen_mov_env_TN(offsetof(CPUState, member), (tn))
174
static inline void t_gen_mov_TN_reg(TCGv tn, int r)
177
fprintf(stderr, "wrong register read $r%d\n", r);
178
tcg_gen_mov_tl(tn, cpu_R[r]);
180
static inline void t_gen_mov_reg_TN(int r, TCGv tn)
183
fprintf(stderr, "wrong register write $r%d\n", r);
184
tcg_gen_mov_tl(cpu_R[r], tn);
187
static inline void _t_gen_mov_TN_env(TCGv tn, int offset)
189
if (offset > sizeof (CPUState))
190
fprintf(stderr, "wrong load from env from off=%d\n", offset);
191
tcg_gen_ld_tl(tn, cpu_env, offset);
193
static inline void _t_gen_mov_env_TN(int offset, TCGv tn)
195
if (offset > sizeof (CPUState))
196
fprintf(stderr, "wrong store to env at off=%d\n", offset);
197
tcg_gen_st_tl(tn, cpu_env, offset);
200
static inline void t_gen_mov_TN_preg(TCGv tn, int r)
203
fprintf(stderr, "wrong register read $p%d\n", r);
204
if (r == PR_BZ || r == PR_WZ || r == PR_DZ)
205
tcg_gen_mov_tl(tn, tcg_const_tl(0));
207
tcg_gen_mov_tl(tn, tcg_const_tl(32));
209
tcg_gen_mov_tl(tn, cpu_PR[r]);
211
static inline void t_gen_mov_preg_TN(DisasContext *dc, int r, TCGv tn)
214
fprintf(stderr, "wrong register write $p%d\n", r);
215
if (r == PR_BZ || r == PR_WZ || r == PR_DZ)
217
else if (r == PR_SRS)
218
tcg_gen_andi_tl(cpu_PR[r], tn, 3);
221
gen_helper_tlb_flush_pid(tn);
222
if (dc->tb_flags & S_FLAG && r == PR_SPC)
223
gen_helper_spc_write(tn);
224
else if (r == PR_CCS)
225
dc->cpustate_changed = 1;
226
tcg_gen_mov_tl(cpu_PR[r], tn);
230
/* Sign extend at translation time. */
231
static int sign_extend(unsigned int val, unsigned int width)
243
static int cris_fetch(DisasContext *dc, uint32_t addr,
244
unsigned int size, unsigned int sign)
273
cpu_abort(dc->env, "Invalid fetch size %d\n", size);
279
static void cris_lock_irq(DisasContext *dc)
281
dc->clear_locked_irq = 0;
282
t_gen_mov_env_TN(locked_irq, tcg_const_tl(1));
285
static inline void t_gen_raise_exception(uint32_t index)
287
TCGv_i32 tmp = tcg_const_i32(index);
288
gen_helper_raise_exception(tmp);
289
tcg_temp_free_i32(tmp);
292
static void t_gen_lsl(TCGv d, TCGv a, TCGv b)
297
t_31 = tcg_const_tl(31);
298
tcg_gen_shl_tl(d, a, b);
300
tcg_gen_sub_tl(t0, t_31, b);
301
tcg_gen_sar_tl(t0, t0, t_31);
302
tcg_gen_and_tl(t0, t0, d);
303
tcg_gen_xor_tl(d, d, t0);
308
static void t_gen_lsr(TCGv d, TCGv a, TCGv b)
313
t_31 = tcg_temp_new();
314
tcg_gen_shr_tl(d, a, b);
316
tcg_gen_movi_tl(t_31, 31);
317
tcg_gen_sub_tl(t0, t_31, b);
318
tcg_gen_sar_tl(t0, t0, t_31);
319
tcg_gen_and_tl(t0, t0, d);
320
tcg_gen_xor_tl(d, d, t0);
325
static void t_gen_asr(TCGv d, TCGv a, TCGv b)
330
t_31 = tcg_temp_new();
331
tcg_gen_sar_tl(d, a, b);
333
tcg_gen_movi_tl(t_31, 31);
334
tcg_gen_sub_tl(t0, t_31, b);
335
tcg_gen_sar_tl(t0, t0, t_31);
336
tcg_gen_or_tl(d, d, t0);
341
/* 64-bit signed mul, lower result in d and upper in d2. */
342
static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
346
t0 = tcg_temp_new_i64();
347
t1 = tcg_temp_new_i64();
349
tcg_gen_ext_i32_i64(t0, a);
350
tcg_gen_ext_i32_i64(t1, b);
351
tcg_gen_mul_i64(t0, t0, t1);
353
tcg_gen_trunc_i64_i32(d, t0);
354
tcg_gen_shri_i64(t0, t0, 32);
355
tcg_gen_trunc_i64_i32(d2, t0);
357
tcg_temp_free_i64(t0);
358
tcg_temp_free_i64(t1);
361
/* 64-bit unsigned muls, lower result in d and upper in d2. */
362
static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
366
t0 = tcg_temp_new_i64();
367
t1 = tcg_temp_new_i64();
369
tcg_gen_extu_i32_i64(t0, a);
370
tcg_gen_extu_i32_i64(t1, b);
371
tcg_gen_mul_i64(t0, t0, t1);
373
tcg_gen_trunc_i64_i32(d, t0);
374
tcg_gen_shri_i64(t0, t0, 32);
375
tcg_gen_trunc_i64_i32(d2, t0);
377
tcg_temp_free_i64(t0);
378
tcg_temp_free_i64(t1);
381
static void t_gen_cris_dstep(TCGv d, TCGv a, TCGv b)
385
l1 = gen_new_label();
392
tcg_gen_shli_tl(d, a, 1);
393
tcg_gen_brcond_tl(TCG_COND_LTU, d, b, l1);
394
tcg_gen_sub_tl(d, d, b);
398
static void t_gen_cris_mstep(TCGv d, TCGv a, TCGv b, TCGv ccs)
408
tcg_gen_shli_tl(d, a, 1);
409
tcg_gen_shli_tl(t, ccs, 31 - 3);
410
tcg_gen_sari_tl(t, t, 31);
411
tcg_gen_and_tl(t, t, b);
412
tcg_gen_add_tl(d, d, t);
416
/* Extended arithmetics on CRIS. */
417
static inline void t_gen_add_flag(TCGv d, int flag)
422
t_gen_mov_TN_preg(c, PR_CCS);
423
/* Propagate carry into d. */
424
tcg_gen_andi_tl(c, c, 1 << flag);
426
tcg_gen_shri_tl(c, c, flag);
427
tcg_gen_add_tl(d, d, c);
431
static inline void t_gen_addx_carry(DisasContext *dc, TCGv d)
433
if (dc->flagx_known) {
438
t_gen_mov_TN_preg(c, PR_CCS);
439
/* C flag is already at bit 0. */
440
tcg_gen_andi_tl(c, c, C_FLAG);
441
tcg_gen_add_tl(d, d, c);
449
t_gen_mov_TN_preg(x, PR_CCS);
450
tcg_gen_mov_tl(c, x);
452
/* Propagate carry into d if X is set. Branch free. */
453
tcg_gen_andi_tl(c, c, C_FLAG);
454
tcg_gen_andi_tl(x, x, X_FLAG);
455
tcg_gen_shri_tl(x, x, 4);
457
tcg_gen_and_tl(x, x, c);
458
tcg_gen_add_tl(d, d, x);
464
static inline void t_gen_subx_carry(DisasContext *dc, TCGv d)
466
if (dc->flagx_known) {
471
t_gen_mov_TN_preg(c, PR_CCS);
472
/* C flag is already at bit 0. */
473
tcg_gen_andi_tl(c, c, C_FLAG);
474
tcg_gen_sub_tl(d, d, c);
482
t_gen_mov_TN_preg(x, PR_CCS);
483
tcg_gen_mov_tl(c, x);
485
/* Propagate carry into d if X is set. Branch free. */
486
tcg_gen_andi_tl(c, c, C_FLAG);
487
tcg_gen_andi_tl(x, x, X_FLAG);
488
tcg_gen_shri_tl(x, x, 4);
490
tcg_gen_and_tl(x, x, c);
491
tcg_gen_sub_tl(d, d, x);
497
/* Swap the two bytes within each half word of the s operand.
498
T0 = ((T0 << 8) & 0xff00ff00) | ((T0 >> 8) & 0x00ff00ff) */
499
static inline void t_gen_swapb(TCGv d, TCGv s)
504
org_s = tcg_temp_new();
506
/* d and s may refer to the same object. */
507
tcg_gen_mov_tl(org_s, s);
508
tcg_gen_shli_tl(t, org_s, 8);
509
tcg_gen_andi_tl(d, t, 0xff00ff00);
510
tcg_gen_shri_tl(t, org_s, 8);
511
tcg_gen_andi_tl(t, t, 0x00ff00ff);
512
tcg_gen_or_tl(d, d, t);
514
tcg_temp_free(org_s);
517
/* Swap the halfwords of the s operand. */
518
static inline void t_gen_swapw(TCGv d, TCGv s)
521
/* d and s refer the same object. */
523
tcg_gen_mov_tl(t, s);
524
tcg_gen_shli_tl(d, t, 16);
525
tcg_gen_shri_tl(t, t, 16);
526
tcg_gen_or_tl(d, d, t);
530
/* Reverse the within each byte.
531
T0 = (((T0 << 7) & 0x80808080) |
532
((T0 << 5) & 0x40404040) |
533
((T0 << 3) & 0x20202020) |
534
((T0 << 1) & 0x10101010) |
535
((T0 >> 1) & 0x08080808) |
536
((T0 >> 3) & 0x04040404) |
537
((T0 >> 5) & 0x02020202) |
538
((T0 >> 7) & 0x01010101));
540
static inline void t_gen_swapr(TCGv d, TCGv s)
543
int shift; /* LSL when positive, LSR when negative. */
558
/* d and s refer the same object. */
560
org_s = tcg_temp_new();
561
tcg_gen_mov_tl(org_s, s);
563
tcg_gen_shli_tl(t, org_s, bitrev[0].shift);
564
tcg_gen_andi_tl(d, t, bitrev[0].mask);
565
for (i = 1; i < ARRAY_SIZE(bitrev); i++) {
566
if (bitrev[i].shift >= 0) {
567
tcg_gen_shli_tl(t, org_s, bitrev[i].shift);
569
tcg_gen_shri_tl(t, org_s, -bitrev[i].shift);
571
tcg_gen_andi_tl(t, t, bitrev[i].mask);
572
tcg_gen_or_tl(d, d, t);
575
tcg_temp_free(org_s);
578
static void t_gen_cc_jmp(TCGv pc_true, TCGv pc_false)
582
l1 = gen_new_label();
584
/* Conditional jmp. */
585
tcg_gen_mov_tl(env_pc, pc_false);
586
tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
587
tcg_gen_mov_tl(env_pc, pc_true);
591
static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
593
TranslationBlock *tb;
595
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
597
tcg_gen_movi_tl(env_pc, dest);
598
tcg_gen_exit_tb((tcg_target_long)tb + n);
600
tcg_gen_movi_tl(env_pc, dest);
605
static inline void cris_clear_x_flag(DisasContext *dc)
607
if (dc->flagx_known && dc->flags_x)
608
dc->flags_uptodate = 0;
614
static void cris_flush_cc_state(DisasContext *dc)
616
if (dc->cc_size_uptodate != dc->cc_size) {
617
tcg_gen_movi_tl(cc_size, dc->cc_size);
618
dc->cc_size_uptodate = dc->cc_size;
620
tcg_gen_movi_tl(cc_op, dc->cc_op);
621
tcg_gen_movi_tl(cc_mask, dc->cc_mask);
624
static void cris_evaluate_flags(DisasContext *dc)
626
if (dc->flags_uptodate)
629
cris_flush_cc_state(dc);
634
gen_helper_evaluate_flags_mcp(cpu_PR[PR_CCS],
635
cpu_PR[PR_CCS], cc_src,
639
gen_helper_evaluate_flags_muls(cpu_PR[PR_CCS],
640
cpu_PR[PR_CCS], cc_result,
644
gen_helper_evaluate_flags_mulu(cpu_PR[PR_CCS],
645
cpu_PR[PR_CCS], cc_result,
658
gen_helper_evaluate_flags_move_4(cpu_PR[PR_CCS],
659
cpu_PR[PR_CCS], cc_result);
662
gen_helper_evaluate_flags_move_2(cpu_PR[PR_CCS],
663
cpu_PR[PR_CCS], cc_result);
666
gen_helper_evaluate_flags();
675
if (dc->cc_size == 4)
676
gen_helper_evaluate_flags_sub_4(cpu_PR[PR_CCS],
677
cpu_PR[PR_CCS], cc_src, cc_dest, cc_result);
679
gen_helper_evaluate_flags();
686
gen_helper_evaluate_flags_alu_4(cpu_PR[PR_CCS],
687
cpu_PR[PR_CCS], cc_src, cc_dest, cc_result);
690
gen_helper_evaluate_flags();
696
if (dc->flagx_known) {
698
tcg_gen_ori_tl(cpu_PR[PR_CCS],
699
cpu_PR[PR_CCS], X_FLAG);
700
else if (dc->cc_op == CC_OP_FLAGS)
701
tcg_gen_andi_tl(cpu_PR[PR_CCS],
702
cpu_PR[PR_CCS], ~X_FLAG);
704
dc->flags_uptodate = 1;
707
static void cris_cc_mask(DisasContext *dc, unsigned int mask)
716
/* Check if we need to evaluate the condition codes due to
718
ovl = (dc->cc_mask ^ mask) & ~mask;
720
/* TODO: optimize this case. It trigs all the time. */
721
cris_evaluate_flags (dc);
727
static void cris_update_cc_op(DisasContext *dc, int op, int size)
731
dc->flags_uptodate = 0;
734
static inline void cris_update_cc_x(DisasContext *dc)
736
/* Save the x flag state at the time of the cc snapshot. */
737
if (dc->flagx_known) {
738
if (dc->cc_x_uptodate == (2 | dc->flags_x))
740
tcg_gen_movi_tl(cc_x, dc->flags_x);
741
dc->cc_x_uptodate = 2 | dc->flags_x;
744
tcg_gen_andi_tl(cc_x, cpu_PR[PR_CCS], X_FLAG);
745
dc->cc_x_uptodate = 1;
749
/* Update cc prior to executing ALU op. Needs source operands untouched. */
750
static void cris_pre_alu_update_cc(DisasContext *dc, int op,
751
TCGv dst, TCGv src, int size)
754
cris_update_cc_op(dc, op, size);
755
tcg_gen_mov_tl(cc_src, src);
764
tcg_gen_mov_tl(cc_dest, dst);
766
cris_update_cc_x(dc);
770
/* Update cc after executing ALU op. needs the result. */
771
static inline void cris_update_result(DisasContext *dc, TCGv res)
774
tcg_gen_mov_tl(cc_result, res);
777
/* Returns one if the write back stage should execute. */
778
static void cris_alu_op_exec(DisasContext *dc, int op,
779
TCGv dst, TCGv a, TCGv b, int size)
781
/* Emit the ALU insns. */
785
tcg_gen_add_tl(dst, a, b);
786
/* Extended arithmetics. */
787
t_gen_addx_carry(dc, dst);
790
tcg_gen_add_tl(dst, a, b);
791
t_gen_add_flag(dst, 0); /* C_FLAG. */
794
tcg_gen_add_tl(dst, a, b);
795
t_gen_add_flag(dst, 8); /* R_FLAG. */
798
tcg_gen_sub_tl(dst, a, b);
799
/* Extended arithmetics. */
800
t_gen_subx_carry(dc, dst);
803
tcg_gen_mov_tl(dst, b);
806
tcg_gen_or_tl(dst, a, b);
809
tcg_gen_and_tl(dst, a, b);
812
tcg_gen_xor_tl(dst, a, b);
815
t_gen_lsl(dst, a, b);
818
t_gen_lsr(dst, a, b);
821
t_gen_asr(dst, a, b);
824
tcg_gen_neg_tl(dst, b);
825
/* Extended arithmetics. */
826
t_gen_subx_carry(dc, dst);
829
gen_helper_lz(dst, b);
832
t_gen_muls(dst, cpu_PR[PR_MOF], a, b);
835
t_gen_mulu(dst, cpu_PR[PR_MOF], a, b);
838
t_gen_cris_dstep(dst, a, b);
841
t_gen_cris_mstep(dst, a, b, cpu_PR[PR_CCS]);
846
l1 = gen_new_label();
847
tcg_gen_mov_tl(dst, a);
848
tcg_gen_brcond_tl(TCG_COND_LEU, a, b, l1);
849
tcg_gen_mov_tl(dst, b);
854
tcg_gen_sub_tl(dst, a, b);
855
/* Extended arithmetics. */
856
t_gen_subx_carry(dc, dst);
859
qemu_log("illegal ALU op.\n");
865
tcg_gen_andi_tl(dst, dst, 0xff);
867
tcg_gen_andi_tl(dst, dst, 0xffff);
870
static void cris_alu(DisasContext *dc, int op,
871
TCGv d, TCGv op_a, TCGv op_b, int size)
878
if (op == CC_OP_CMP) {
879
tmp = tcg_temp_new();
881
} else if (size == 4) {
885
tmp = tcg_temp_new();
888
cris_pre_alu_update_cc(dc, op, op_a, op_b, size);
889
cris_alu_op_exec(dc, op, tmp, op_a, op_b, size);
890
cris_update_result(dc, tmp);
895
tcg_gen_andi_tl(d, d, ~0xff);
897
tcg_gen_andi_tl(d, d, ~0xffff);
898
tcg_gen_or_tl(d, d, tmp);
900
if (!TCGV_EQUAL(tmp, d))
904
static int arith_cc(DisasContext *dc)
908
case CC_OP_ADDC: return 1;
909
case CC_OP_ADD: return 1;
910
case CC_OP_SUB: return 1;
911
case CC_OP_DSTEP: return 1;
912
case CC_OP_LSL: return 1;
913
case CC_OP_LSR: return 1;
914
case CC_OP_ASR: return 1;
915
case CC_OP_CMP: return 1;
916
case CC_OP_NEG: return 1;
917
case CC_OP_OR: return 1;
918
case CC_OP_AND: return 1;
919
case CC_OP_XOR: return 1;
920
case CC_OP_MULU: return 1;
921
case CC_OP_MULS: return 1;
929
static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond)
931
int arith_opt, move_opt;
933
/* TODO: optimize more condition codes. */
936
* If the flags are live, we've gotta look into the bits of CCS.
937
* Otherwise, if we just did an arithmetic operation we try to
938
* evaluate the condition code faster.
940
* When this function is done, T0 should be non-zero if the condition
943
arith_opt = arith_cc(dc) && !dc->flags_uptodate;
944
move_opt = (dc->cc_op == CC_OP_MOVE);
947
if ((arith_opt || move_opt)
948
&& dc->cc_x_uptodate != (2 | X_FLAG)) {
949
tcg_gen_setcond_tl(TCG_COND_EQ, cc,
950
cc_result, tcg_const_tl(0));
953
cris_evaluate_flags(dc);
955
cpu_PR[PR_CCS], Z_FLAG);
959
if ((arith_opt || move_opt)
960
&& dc->cc_x_uptodate != (2 | X_FLAG)) {
961
tcg_gen_mov_tl(cc, cc_result);
963
cris_evaluate_flags(dc);
964
tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
966
tcg_gen_andi_tl(cc, cc, Z_FLAG);
970
cris_evaluate_flags(dc);
971
tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], C_FLAG);
974
cris_evaluate_flags(dc);
975
tcg_gen_xori_tl(cc, cpu_PR[PR_CCS], C_FLAG);
976
tcg_gen_andi_tl(cc, cc, C_FLAG);
979
cris_evaluate_flags(dc);
980
tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], V_FLAG);
983
cris_evaluate_flags(dc);
984
tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
986
tcg_gen_andi_tl(cc, cc, V_FLAG);
989
if (arith_opt || move_opt) {
992
if (dc->cc_size == 1)
994
else if (dc->cc_size == 2)
997
tcg_gen_shri_tl(cc, cc_result, bits);
998
tcg_gen_xori_tl(cc, cc, 1);
1000
cris_evaluate_flags(dc);
1001
tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
1003
tcg_gen_andi_tl(cc, cc, N_FLAG);
1007
if (arith_opt || move_opt) {
1010
if (dc->cc_size == 1)
1012
else if (dc->cc_size == 2)
1015
tcg_gen_shri_tl(cc, cc_result, bits);
1016
tcg_gen_andi_tl(cc, cc, 1);
1019
cris_evaluate_flags(dc);
1020
tcg_gen_andi_tl(cc, cpu_PR[PR_CCS],
1025
cris_evaluate_flags(dc);
1026
tcg_gen_andi_tl(cc, cpu_PR[PR_CCS],
1030
cris_evaluate_flags(dc);
1034
tmp = tcg_temp_new();
1035
tcg_gen_xori_tl(tmp, cpu_PR[PR_CCS],
1037
/* Overlay the C flag on top of the Z. */
1038
tcg_gen_shli_tl(cc, tmp, 2);
1039
tcg_gen_and_tl(cc, tmp, cc);
1040
tcg_gen_andi_tl(cc, cc, Z_FLAG);
1046
cris_evaluate_flags(dc);
1047
/* Overlay the V flag on top of the N. */
1048
tcg_gen_shli_tl(cc, cpu_PR[PR_CCS], 2);
1050
cpu_PR[PR_CCS], cc);
1051
tcg_gen_andi_tl(cc, cc, N_FLAG);
1052
tcg_gen_xori_tl(cc, cc, N_FLAG);
1055
cris_evaluate_flags(dc);
1056
/* Overlay the V flag on top of the N. */
1057
tcg_gen_shli_tl(cc, cpu_PR[PR_CCS], 2);
1059
cpu_PR[PR_CCS], cc);
1060
tcg_gen_andi_tl(cc, cc, N_FLAG);
1063
cris_evaluate_flags(dc);
1070
/* To avoid a shift we overlay everything on
1072
tcg_gen_shri_tl(n, cpu_PR[PR_CCS], 2);
1073
tcg_gen_shri_tl(z, cpu_PR[PR_CCS], 1);
1075
tcg_gen_xori_tl(z, z, 2);
1077
tcg_gen_xor_tl(n, n, cpu_PR[PR_CCS]);
1078
tcg_gen_xori_tl(n, n, 2);
1079
tcg_gen_and_tl(cc, z, n);
1080
tcg_gen_andi_tl(cc, cc, 2);
1087
cris_evaluate_flags(dc);
1094
/* To avoid a shift we overlay everything on
1096
tcg_gen_shri_tl(n, cpu_PR[PR_CCS], 2);
1097
tcg_gen_shri_tl(z, cpu_PR[PR_CCS], 1);
1099
tcg_gen_xor_tl(n, n, cpu_PR[PR_CCS]);
1100
tcg_gen_or_tl(cc, z, n);
1101
tcg_gen_andi_tl(cc, cc, 2);
1108
cris_evaluate_flags(dc);
1109
tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], P_FLAG);
1112
tcg_gen_movi_tl(cc, 1);
1120
static void cris_store_direct_jmp(DisasContext *dc)
1122
/* Store the direct jmp state into the cpu-state. */
1123
if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1124
if (dc->jmp == JMP_DIRECT) {
1125
tcg_gen_movi_tl(env_btaken, 1);
1127
tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
1128
dc->jmp = JMP_INDIRECT;
1132
static void cris_prepare_cc_branch (DisasContext *dc,
1133
int offset, int cond)
1135
/* This helps us re-schedule the micro-code to insns in delay-slots
1136
before the actual jump. */
1137
dc->delayed_branch = 2;
1138
dc->jmp = JMP_DIRECT_CC;
1139
dc->jmp_pc = dc->pc + offset;
1141
gen_tst_cc (dc, env_btaken, cond);
1142
tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
1146
/* jumps, when the dest is in a live reg for example. Direct should be set
1147
when the dest addr is constant to allow tb chaining. */
1148
static inline void cris_prepare_jmp (DisasContext *dc, unsigned int type)
1150
/* This helps us re-schedule the micro-code to insns in delay-slots
1151
before the actual jump. */
1152
dc->delayed_branch = 2;
1154
if (type == JMP_INDIRECT) {
1155
tcg_gen_movi_tl(env_btaken, 1);
1159
static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
1161
int mem_index = cpu_mmu_index(dc->env);
1163
/* If we get a fault on a delayslot we must keep the jmp state in
1164
the cpu-state to be able to re-execute the jmp. */
1165
if (dc->delayed_branch == 1)
1166
cris_store_direct_jmp(dc);
1168
tcg_gen_qemu_ld64(dst, addr, mem_index);
1171
static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
1172
unsigned int size, int sign)
1174
int mem_index = cpu_mmu_index(dc->env);
1176
/* If we get a fault on a delayslot we must keep the jmp state in
1177
the cpu-state to be able to re-execute the jmp. */
1178
if (dc->delayed_branch == 1)
1179
cris_store_direct_jmp(dc);
1183
tcg_gen_qemu_ld8s(dst, addr, mem_index);
1185
tcg_gen_qemu_ld8u(dst, addr, mem_index);
1187
else if (size == 2) {
1189
tcg_gen_qemu_ld16s(dst, addr, mem_index);
1191
tcg_gen_qemu_ld16u(dst, addr, mem_index);
1193
else if (size == 4) {
1194
tcg_gen_qemu_ld32u(dst, addr, mem_index);
1201
static void gen_store (DisasContext *dc, TCGv addr, TCGv val,
1204
int mem_index = cpu_mmu_index(dc->env);
1206
/* If we get a fault on a delayslot we must keep the jmp state in
1207
the cpu-state to be able to re-execute the jmp. */
1208
if (dc->delayed_branch == 1)
1209
cris_store_direct_jmp(dc);
1212
/* Conditional writes. We only support the kind were X and P are known
1213
at translation time. */
1214
if (dc->flagx_known && dc->flags_x && (dc->tb_flags & P_FLAG)) {
1216
cris_evaluate_flags(dc);
1217
tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], C_FLAG);
1222
tcg_gen_qemu_st8(val, addr, mem_index);
1224
tcg_gen_qemu_st16(val, addr, mem_index);
1226
tcg_gen_qemu_st32(val, addr, mem_index);
1228
if (dc->flagx_known && dc->flags_x) {
1229
cris_evaluate_flags(dc);
1230
tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~C_FLAG);
1234
static inline void t_gen_sext(TCGv d, TCGv s, int size)
1237
tcg_gen_ext8s_i32(d, s);
1239
tcg_gen_ext16s_i32(d, s);
1240
else if(!TCGV_EQUAL(d, s))
1241
tcg_gen_mov_tl(d, s);
1244
static inline void t_gen_zext(TCGv d, TCGv s, int size)
1247
tcg_gen_ext8u_i32(d, s);
1249
tcg_gen_ext16u_i32(d, s);
1250
else if (!TCGV_EQUAL(d, s))
1251
tcg_gen_mov_tl(d, s);
1255
static char memsize_char(int size)
1259
case 1: return 'b'; break;
1260
case 2: return 'w'; break;
1261
case 4: return 'd'; break;
1269
static inline unsigned int memsize_z(DisasContext *dc)
1271
return dc->zsize + 1;
1274
static inline unsigned int memsize_zz(DisasContext *dc)
1285
static inline void do_postinc (DisasContext *dc, int size)
1288
tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], size);
1291
static inline void dec_prep_move_r(DisasContext *dc, int rs, int rd,
1292
int size, int s_ext, TCGv dst)
1295
t_gen_sext(dst, cpu_R[rs], size);
1297
t_gen_zext(dst, cpu_R[rs], size);
1300
/* Prepare T0 and T1 for a register alu operation.
1301
s_ext decides if the operand1 should be sign-extended or zero-extended when
1303
static void dec_prep_alu_r(DisasContext *dc, int rs, int rd,
1304
int size, int s_ext, TCGv dst, TCGv src)
1306
dec_prep_move_r(dc, rs, rd, size, s_ext, src);
1309
t_gen_sext(dst, cpu_R[rd], size);
1311
t_gen_zext(dst, cpu_R[rd], size);
1314
static int dec_prep_move_m(DisasContext *dc, int s_ext, int memsize,
1323
is_imm = rs == 15 && dc->postinc;
1325
/* Load [$rs] onto T1. */
1327
insn_len = 2 + memsize;
1331
imm = cris_fetch(dc, dc->pc + 2, memsize, s_ext);
1332
tcg_gen_movi_tl(dst, imm);
1335
cris_flush_cc_state(dc);
1336
gen_load(dc, dst, cpu_R[rs], memsize, 0);
1338
t_gen_sext(dst, dst, memsize);
1340
t_gen_zext(dst, dst, memsize);
1345
/* Prepare T0 and T1 for a memory + alu operation.
1346
s_ext decides if the operand1 should be sign-extended or zero-extended when
1348
static int dec_prep_alu_m(DisasContext *dc, int s_ext, int memsize,
1353
insn_len = dec_prep_move_m(dc, s_ext, memsize, src);
1354
tcg_gen_mov_tl(dst, cpu_R[dc->op2]);
1359
static const char *cc_name(int cc)
1361
static const char *cc_names[16] = {
1362
"cc", "cs", "ne", "eq", "vc", "vs", "pl", "mi",
1363
"ls", "hi", "ge", "lt", "gt", "le", "a", "p"
1366
return cc_names[cc];
1370
/* Start of insn decoders. */
1372
static int dec_bccq(DisasContext *dc)
1376
uint32_t cond = dc->op2;
1378
offset = EXTRACT_FIELD (dc->ir, 1, 7);
1379
sign = EXTRACT_FIELD(dc->ir, 0, 0);
1382
offset |= sign << 8;
1383
offset = sign_extend(offset, 8);
1385
LOG_DIS("b%s %x\n", cc_name(cond), dc->pc + offset);
1387
/* op2 holds the condition-code. */
1388
cris_cc_mask(dc, 0);
1389
cris_prepare_cc_branch (dc, offset, cond);
1392
static int dec_addoq(DisasContext *dc)
1396
dc->op1 = EXTRACT_FIELD(dc->ir, 0, 7);
1397
imm = sign_extend(dc->op1, 7);
1399
LOG_DIS("addoq %d, $r%u\n", imm, dc->op2);
1400
cris_cc_mask(dc, 0);
1401
/* Fetch register operand, */
1402
tcg_gen_addi_tl(cpu_R[R_ACR], cpu_R[dc->op2], imm);
1406
static int dec_addq(DisasContext *dc)
1408
LOG_DIS("addq %u, $r%u\n", dc->op1, dc->op2);
1410
dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1412
cris_cc_mask(dc, CC_MASK_NZVC);
1414
cris_alu(dc, CC_OP_ADD,
1415
cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(dc->op1), 4);
1418
static int dec_moveq(DisasContext *dc)
1422
dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1423
imm = sign_extend(dc->op1, 5);
1424
LOG_DIS("moveq %d, $r%u\n", imm, dc->op2);
1426
tcg_gen_movi_tl(cpu_R[dc->op2], imm);
1429
static int dec_subq(DisasContext *dc)
1431
dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1433
LOG_DIS("subq %u, $r%u\n", dc->op1, dc->op2);
1435
cris_cc_mask(dc, CC_MASK_NZVC);
1436
cris_alu(dc, CC_OP_SUB,
1437
cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(dc->op1), 4);
1440
static int dec_cmpq(DisasContext *dc)
1443
dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1444
imm = sign_extend(dc->op1, 5);
1446
LOG_DIS("cmpq %d, $r%d\n", imm, dc->op2);
1447
cris_cc_mask(dc, CC_MASK_NZVC);
1449
cris_alu(dc, CC_OP_CMP,
1450
cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
1453
static int dec_andq(DisasContext *dc)
1456
dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1457
imm = sign_extend(dc->op1, 5);
1459
LOG_DIS("andq %d, $r%d\n", imm, dc->op2);
1460
cris_cc_mask(dc, CC_MASK_NZ);
1462
cris_alu(dc, CC_OP_AND,
1463
cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
1466
static int dec_orq(DisasContext *dc)
1469
dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1470
imm = sign_extend(dc->op1, 5);
1471
LOG_DIS("orq %d, $r%d\n", imm, dc->op2);
1472
cris_cc_mask(dc, CC_MASK_NZ);
1474
cris_alu(dc, CC_OP_OR,
1475
cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
1478
static int dec_btstq(DisasContext *dc)
1480
dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1481
LOG_DIS("btstq %u, $r%d\n", dc->op1, dc->op2);
1483
cris_cc_mask(dc, CC_MASK_NZ);
1484
cris_evaluate_flags(dc);
1485
gen_helper_btst(cpu_PR[PR_CCS], cpu_R[dc->op2],
1486
tcg_const_tl(dc->op1), cpu_PR[PR_CCS]);
1487
cris_alu(dc, CC_OP_MOVE,
1488
cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4);
1489
cris_update_cc_op(dc, CC_OP_FLAGS, 4);
1490
dc->flags_uptodate = 1;
1493
static int dec_asrq(DisasContext *dc)
1495
dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1496
LOG_DIS("asrq %u, $r%d\n", dc->op1, dc->op2);
1497
cris_cc_mask(dc, CC_MASK_NZ);
1499
tcg_gen_sari_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
1500
cris_alu(dc, CC_OP_MOVE,
1502
cpu_R[dc->op2], cpu_R[dc->op2], 4);
1505
static int dec_lslq(DisasContext *dc)
1507
dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1508
LOG_DIS("lslq %u, $r%d\n", dc->op1, dc->op2);
1510
cris_cc_mask(dc, CC_MASK_NZ);
1512
tcg_gen_shli_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
1514
cris_alu(dc, CC_OP_MOVE,
1516
cpu_R[dc->op2], cpu_R[dc->op2], 4);
1519
static int dec_lsrq(DisasContext *dc)
1521
dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1522
LOG_DIS("lsrq %u, $r%d\n", dc->op1, dc->op2);
1524
cris_cc_mask(dc, CC_MASK_NZ);
1526
tcg_gen_shri_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
1527
cris_alu(dc, CC_OP_MOVE,
1529
cpu_R[dc->op2], cpu_R[dc->op2], 4);
1533
static int dec_move_r(DisasContext *dc)
1535
int size = memsize_zz(dc);
1537
LOG_DIS("move.%c $r%u, $r%u\n",
1538
memsize_char(size), dc->op1, dc->op2);
1540
cris_cc_mask(dc, CC_MASK_NZ);
1542
dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, cpu_R[dc->op2]);
1543
cris_cc_mask(dc, CC_MASK_NZ);
1544
cris_update_cc_op(dc, CC_OP_MOVE, 4);
1545
cris_update_cc_x(dc);
1546
cris_update_result(dc, cpu_R[dc->op2]);
1551
t0 = tcg_temp_new();
1552
dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, t0);
1553
cris_alu(dc, CC_OP_MOVE,
1555
cpu_R[dc->op2], t0, size);
1561
static int dec_scc_r(DisasContext *dc)
1565
LOG_DIS("s%s $r%u\n",
1566
cc_name(cond), dc->op1);
1572
gen_tst_cc (dc, cpu_R[dc->op1], cond);
1573
l1 = gen_new_label();
1574
tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_R[dc->op1], 0, l1);
1575
tcg_gen_movi_tl(cpu_R[dc->op1], 1);
1579
tcg_gen_movi_tl(cpu_R[dc->op1], 1);
1581
cris_cc_mask(dc, 0);
1585
static inline void cris_alu_alloc_temps(DisasContext *dc, int size, TCGv *t)
1588
t[0] = cpu_R[dc->op2];
1589
t[1] = cpu_R[dc->op1];
1591
t[0] = tcg_temp_new();
1592
t[1] = tcg_temp_new();
1596
static inline void cris_alu_free_temps(DisasContext *dc, int size, TCGv *t)
1599
tcg_temp_free(t[0]);
1600
tcg_temp_free(t[1]);
1604
static int dec_and_r(DisasContext *dc)
1607
int size = memsize_zz(dc);
1609
LOG_DIS("and.%c $r%u, $r%u\n",
1610
memsize_char(size), dc->op1, dc->op2);
1612
cris_cc_mask(dc, CC_MASK_NZ);
1614
cris_alu_alloc_temps(dc, size, t);
1615
dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1616
cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], size);
1617
cris_alu_free_temps(dc, size, t);
1621
static int dec_lz_r(DisasContext *dc)
1624
LOG_DIS("lz $r%u, $r%u\n",
1626
cris_cc_mask(dc, CC_MASK_NZ);
1627
t0 = tcg_temp_new();
1628
dec_prep_alu_r(dc, dc->op1, dc->op2, 4, 0, cpu_R[dc->op2], t0);
1629
cris_alu(dc, CC_OP_LZ, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
1634
static int dec_lsl_r(DisasContext *dc)
1637
int size = memsize_zz(dc);
1639
LOG_DIS("lsl.%c $r%u, $r%u\n",
1640
memsize_char(size), dc->op1, dc->op2);
1642
cris_cc_mask(dc, CC_MASK_NZ);
1643
cris_alu_alloc_temps(dc, size, t);
1644
dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1645
tcg_gen_andi_tl(t[1], t[1], 63);
1646
cris_alu(dc, CC_OP_LSL, cpu_R[dc->op2], t[0], t[1], size);
1647
cris_alu_alloc_temps(dc, size, t);
1651
static int dec_lsr_r(DisasContext *dc)
1654
int size = memsize_zz(dc);
1656
LOG_DIS("lsr.%c $r%u, $r%u\n",
1657
memsize_char(size), dc->op1, dc->op2);
1659
cris_cc_mask(dc, CC_MASK_NZ);
1660
cris_alu_alloc_temps(dc, size, t);
1661
dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1662
tcg_gen_andi_tl(t[1], t[1], 63);
1663
cris_alu(dc, CC_OP_LSR, cpu_R[dc->op2], t[0], t[1], size);
1664
cris_alu_free_temps(dc, size, t);
1668
static int dec_asr_r(DisasContext *dc)
1671
int size = memsize_zz(dc);
1673
LOG_DIS("asr.%c $r%u, $r%u\n",
1674
memsize_char(size), dc->op1, dc->op2);
1676
cris_cc_mask(dc, CC_MASK_NZ);
1677
cris_alu_alloc_temps(dc, size, t);
1678
dec_prep_alu_r(dc, dc->op1, dc->op2, size, 1, t[0], t[1]);
1679
tcg_gen_andi_tl(t[1], t[1], 63);
1680
cris_alu(dc, CC_OP_ASR, cpu_R[dc->op2], t[0], t[1], size);
1681
cris_alu_free_temps(dc, size, t);
1685
static int dec_muls_r(DisasContext *dc)
1688
int size = memsize_zz(dc);
1690
LOG_DIS("muls.%c $r%u, $r%u\n",
1691
memsize_char(size), dc->op1, dc->op2);
1692
cris_cc_mask(dc, CC_MASK_NZV);
1693
cris_alu_alloc_temps(dc, size, t);
1694
dec_prep_alu_r(dc, dc->op1, dc->op2, size, 1, t[0], t[1]);
1696
cris_alu(dc, CC_OP_MULS, cpu_R[dc->op2], t[0], t[1], 4);
1697
cris_alu_free_temps(dc, size, t);
1701
static int dec_mulu_r(DisasContext *dc)
1704
int size = memsize_zz(dc);
1706
LOG_DIS("mulu.%c $r%u, $r%u\n",
1707
memsize_char(size), dc->op1, dc->op2);
1708
cris_cc_mask(dc, CC_MASK_NZV);
1709
cris_alu_alloc_temps(dc, size, t);
1710
dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1712
cris_alu(dc, CC_OP_MULU, cpu_R[dc->op2], t[0], t[1], 4);
1713
cris_alu_alloc_temps(dc, size, t);
1718
static int dec_dstep_r(DisasContext *dc)
1720
LOG_DIS("dstep $r%u, $r%u\n", dc->op1, dc->op2);
1721
cris_cc_mask(dc, CC_MASK_NZ);
1722
cris_alu(dc, CC_OP_DSTEP,
1723
cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op1], 4);
1727
static int dec_xor_r(DisasContext *dc)
1730
int size = memsize_zz(dc);
1731
LOG_DIS("xor.%c $r%u, $r%u\n",
1732
memsize_char(size), dc->op1, dc->op2);
1733
BUG_ON(size != 4); /* xor is dword. */
1734
cris_cc_mask(dc, CC_MASK_NZ);
1735
cris_alu_alloc_temps(dc, size, t);
1736
dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1738
cris_alu(dc, CC_OP_XOR, cpu_R[dc->op2], t[0], t[1], 4);
1739
cris_alu_free_temps(dc, size, t);
1743
static int dec_bound_r(DisasContext *dc)
1746
int size = memsize_zz(dc);
1747
LOG_DIS("bound.%c $r%u, $r%u\n",
1748
memsize_char(size), dc->op1, dc->op2);
1749
cris_cc_mask(dc, CC_MASK_NZ);
1750
l0 = tcg_temp_local_new();
1751
dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, l0);
1752
cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], cpu_R[dc->op2], l0, 4);
1757
static int dec_cmp_r(DisasContext *dc)
1760
int size = memsize_zz(dc);
1761
LOG_DIS("cmp.%c $r%u, $r%u\n",
1762
memsize_char(size), dc->op1, dc->op2);
1763
cris_cc_mask(dc, CC_MASK_NZVC);
1764
cris_alu_alloc_temps(dc, size, t);
1765
dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1767
cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], t[0], t[1], size);
1768
cris_alu_free_temps(dc, size, t);
1772
static int dec_abs_r(DisasContext *dc)
1776
LOG_DIS("abs $r%u, $r%u\n",
1778
cris_cc_mask(dc, CC_MASK_NZ);
1780
t0 = tcg_temp_new();
1781
tcg_gen_sari_tl(t0, cpu_R[dc->op1], 31);
1782
tcg_gen_xor_tl(cpu_R[dc->op2], cpu_R[dc->op1], t0);
1783
tcg_gen_sub_tl(cpu_R[dc->op2], cpu_R[dc->op2], t0);
1786
cris_alu(dc, CC_OP_MOVE,
1787
cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4);
1791
static int dec_add_r(DisasContext *dc)
1794
int size = memsize_zz(dc);
1795
LOG_DIS("add.%c $r%u, $r%u\n",
1796
memsize_char(size), dc->op1, dc->op2);
1797
cris_cc_mask(dc, CC_MASK_NZVC);
1798
cris_alu_alloc_temps(dc, size, t);
1799
dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1801
cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], t[0], t[1], size);
1802
cris_alu_free_temps(dc, size, t);
1806
static int dec_addc_r(DisasContext *dc)
1808
LOG_DIS("addc $r%u, $r%u\n",
1810
cris_evaluate_flags(dc);
1811
/* Set for this insn. */
1812
dc->flagx_known = 1;
1813
dc->flags_x = X_FLAG;
1815
cris_cc_mask(dc, CC_MASK_NZVC);
1816
cris_alu(dc, CC_OP_ADDC,
1817
cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op1], 4);
1821
static int dec_mcp_r(DisasContext *dc)
1823
LOG_DIS("mcp $p%u, $r%u\n",
1825
cris_evaluate_flags(dc);
1826
cris_cc_mask(dc, CC_MASK_RNZV);
1827
cris_alu(dc, CC_OP_MCP,
1828
cpu_R[dc->op1], cpu_R[dc->op1], cpu_PR[dc->op2], 4);
1833
static char * swapmode_name(int mode, char *modename) {
1836
modename[i++] = 'n';
1838
modename[i++] = 'w';
1840
modename[i++] = 'b';
1842
modename[i++] = 'r';
1848
static int dec_swap_r(DisasContext *dc)
1854
LOG_DIS("swap%s $r%u\n",
1855
swapmode_name(dc->op2, modename), dc->op1);
1857
cris_cc_mask(dc, CC_MASK_NZ);
1858
t0 = tcg_temp_new();
1859
t_gen_mov_TN_reg(t0, dc->op1);
1861
tcg_gen_not_tl(t0, t0);
1863
t_gen_swapw(t0, t0);
1865
t_gen_swapb(t0, t0);
1867
t_gen_swapr(t0, t0);
1868
cris_alu(dc, CC_OP_MOVE,
1869
cpu_R[dc->op1], cpu_R[dc->op1], t0, 4);
1874
static int dec_or_r(DisasContext *dc)
1877
int size = memsize_zz(dc);
1878
LOG_DIS("or.%c $r%u, $r%u\n",
1879
memsize_char(size), dc->op1, dc->op2);
1880
cris_cc_mask(dc, CC_MASK_NZ);
1881
cris_alu_alloc_temps(dc, size, t);
1882
dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1883
cris_alu(dc, CC_OP_OR, cpu_R[dc->op2], t[0], t[1], size);
1884
cris_alu_free_temps(dc, size, t);
1888
static int dec_addi_r(DisasContext *dc)
1891
LOG_DIS("addi.%c $r%u, $r%u\n",
1892
memsize_char(memsize_zz(dc)), dc->op2, dc->op1);
1893
cris_cc_mask(dc, 0);
1894
t0 = tcg_temp_new();
1895
tcg_gen_shl_tl(t0, cpu_R[dc->op2], tcg_const_tl(dc->zzsize));
1896
tcg_gen_add_tl(cpu_R[dc->op1], cpu_R[dc->op1], t0);
1901
static int dec_addi_acr(DisasContext *dc)
1904
LOG_DIS("addi.%c $r%u, $r%u, $acr\n",
1905
memsize_char(memsize_zz(dc)), dc->op2, dc->op1);
1906
cris_cc_mask(dc, 0);
1907
t0 = tcg_temp_new();
1908
tcg_gen_shl_tl(t0, cpu_R[dc->op2], tcg_const_tl(dc->zzsize));
1909
tcg_gen_add_tl(cpu_R[R_ACR], cpu_R[dc->op1], t0);
1914
static int dec_neg_r(DisasContext *dc)
1917
int size = memsize_zz(dc);
1918
LOG_DIS("neg.%c $r%u, $r%u\n",
1919
memsize_char(size), dc->op1, dc->op2);
1920
cris_cc_mask(dc, CC_MASK_NZVC);
1921
cris_alu_alloc_temps(dc, size, t);
1922
dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1924
cris_alu(dc, CC_OP_NEG, cpu_R[dc->op2], t[0], t[1], size);
1925
cris_alu_free_temps(dc, size, t);
1929
static int dec_btst_r(DisasContext *dc)
1931
LOG_DIS("btst $r%u, $r%u\n",
1933
cris_cc_mask(dc, CC_MASK_NZ);
1934
cris_evaluate_flags(dc);
1935
gen_helper_btst(cpu_PR[PR_CCS], cpu_R[dc->op2],
1936
cpu_R[dc->op1], cpu_PR[PR_CCS]);
1937
cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2],
1938
cpu_R[dc->op2], cpu_R[dc->op2], 4);
1939
cris_update_cc_op(dc, CC_OP_FLAGS, 4);
1940
dc->flags_uptodate = 1;
1944
static int dec_sub_r(DisasContext *dc)
1947
int size = memsize_zz(dc);
1948
LOG_DIS("sub.%c $r%u, $r%u\n",
1949
memsize_char(size), dc->op1, dc->op2);
1950
cris_cc_mask(dc, CC_MASK_NZVC);
1951
cris_alu_alloc_temps(dc, size, t);
1952
dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1953
cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], size);
1954
cris_alu_free_temps(dc, size, t);
1958
/* Zero extension. From size to dword. */
1959
static int dec_movu_r(DisasContext *dc)
1962
int size = memsize_z(dc);
1963
LOG_DIS("movu.%c $r%u, $r%u\n",
1967
cris_cc_mask(dc, CC_MASK_NZ);
1968
t0 = tcg_temp_new();
1969
dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, t0);
1970
cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
1975
/* Sign extension. From size to dword. */
1976
static int dec_movs_r(DisasContext *dc)
1979
int size = memsize_z(dc);
1980
LOG_DIS("movs.%c $r%u, $r%u\n",
1984
cris_cc_mask(dc, CC_MASK_NZ);
1985
t0 = tcg_temp_new();
1986
/* Size can only be qi or hi. */
1987
t_gen_sext(t0, cpu_R[dc->op1], size);
1988
cris_alu(dc, CC_OP_MOVE,
1989
cpu_R[dc->op2], cpu_R[dc->op1], t0, 4);
1994
/* zero extension. From size to dword. */
1995
static int dec_addu_r(DisasContext *dc)
1998
int size = memsize_z(dc);
1999
LOG_DIS("addu.%c $r%u, $r%u\n",
2003
cris_cc_mask(dc, CC_MASK_NZVC);
2004
t0 = tcg_temp_new();
2005
/* Size can only be qi or hi. */
2006
t_gen_zext(t0, cpu_R[dc->op1], size);
2007
cris_alu(dc, CC_OP_ADD,
2008
cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
2013
/* Sign extension. From size to dword. */
2014
static int dec_adds_r(DisasContext *dc)
2017
int size = memsize_z(dc);
2018
LOG_DIS("adds.%c $r%u, $r%u\n",
2022
cris_cc_mask(dc, CC_MASK_NZVC);
2023
t0 = tcg_temp_new();
2024
/* Size can only be qi or hi. */
2025
t_gen_sext(t0, cpu_R[dc->op1], size);
2026
cris_alu(dc, CC_OP_ADD,
2027
cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
2032
/* Zero extension. From size to dword. */
2033
static int dec_subu_r(DisasContext *dc)
2036
int size = memsize_z(dc);
2037
LOG_DIS("subu.%c $r%u, $r%u\n",
2041
cris_cc_mask(dc, CC_MASK_NZVC);
2042
t0 = tcg_temp_new();
2043
/* Size can only be qi or hi. */
2044
t_gen_zext(t0, cpu_R[dc->op1], size);
2045
cris_alu(dc, CC_OP_SUB,
2046
cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
2051
/* Sign extension. From size to dword. */
2052
static int dec_subs_r(DisasContext *dc)
2055
int size = memsize_z(dc);
2056
LOG_DIS("subs.%c $r%u, $r%u\n",
2060
cris_cc_mask(dc, CC_MASK_NZVC);
2061
t0 = tcg_temp_new();
2062
/* Size can only be qi or hi. */
2063
t_gen_sext(t0, cpu_R[dc->op1], size);
2064
cris_alu(dc, CC_OP_SUB,
2065
cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
2070
static int dec_setclrf(DisasContext *dc)
2073
int set = (~dc->opcode >> 2) & 1;
2076
flags = (EXTRACT_FIELD(dc->ir, 12, 15) << 4)
2077
| EXTRACT_FIELD(dc->ir, 0, 3);
2078
if (set && flags == 0) {
2081
} else if (!set && (flags & 0x20)) {
2086
set ? "set" : "clr",
2090
/* User space is not allowed to touch these. Silently ignore. */
2091
if (dc->tb_flags & U_FLAG) {
2092
flags &= ~(S_FLAG | I_FLAG | U_FLAG);
2095
if (flags & X_FLAG) {
2096
dc->flagx_known = 1;
2098
dc->flags_x = X_FLAG;
2103
/* Break the TB if any of the SPI flag changes. */
2104
if (flags & (P_FLAG | S_FLAG)) {
2105
tcg_gen_movi_tl(env_pc, dc->pc + 2);
2106
dc->is_jmp = DISAS_UPDATE;
2107
dc->cpustate_changed = 1;
2110
/* For the I flag, only act on posedge. */
2111
if ((flags & I_FLAG)) {
2112
tcg_gen_movi_tl(env_pc, dc->pc + 2);
2113
dc->is_jmp = DISAS_UPDATE;
2114
dc->cpustate_changed = 1;
2118
/* Simply decode the flags. */
2119
cris_evaluate_flags (dc);
2120
cris_update_cc_op(dc, CC_OP_FLAGS, 4);
2121
cris_update_cc_x(dc);
2122
tcg_gen_movi_tl(cc_op, dc->cc_op);
2125
if (!(dc->tb_flags & U_FLAG) && (flags & U_FLAG)) {
2126
/* Enter user mode. */
2127
t_gen_mov_env_TN(ksp, cpu_R[R_SP]);
2128
tcg_gen_mov_tl(cpu_R[R_SP], cpu_PR[PR_USP]);
2129
dc->cpustate_changed = 1;
2131
tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], flags);
2134
tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~flags);
2136
dc->flags_uptodate = 1;
2141
static int dec_move_rs(DisasContext *dc)
2143
LOG_DIS("move $r%u, $s%u\n", dc->op1, dc->op2);
2144
cris_cc_mask(dc, 0);
2145
gen_helper_movl_sreg_reg(tcg_const_tl(dc->op2), tcg_const_tl(dc->op1));
2148
static int dec_move_sr(DisasContext *dc)
2150
LOG_DIS("move $s%u, $r%u\n", dc->op2, dc->op1);
2151
cris_cc_mask(dc, 0);
2152
gen_helper_movl_reg_sreg(tcg_const_tl(dc->op1), tcg_const_tl(dc->op2));
2156
static int dec_move_rp(DisasContext *dc)
2159
LOG_DIS("move $r%u, $p%u\n", dc->op1, dc->op2);
2160
cris_cc_mask(dc, 0);
2162
t[0] = tcg_temp_new();
2163
if (dc->op2 == PR_CCS) {
2164
cris_evaluate_flags(dc);
2165
t_gen_mov_TN_reg(t[0], dc->op1);
2166
if (dc->tb_flags & U_FLAG) {
2167
t[1] = tcg_temp_new();
2168
/* User space is not allowed to touch all flags. */
2169
tcg_gen_andi_tl(t[0], t[0], 0x39f);
2170
tcg_gen_andi_tl(t[1], cpu_PR[PR_CCS], ~0x39f);
2171
tcg_gen_or_tl(t[0], t[1], t[0]);
2172
tcg_temp_free(t[1]);
2176
t_gen_mov_TN_reg(t[0], dc->op1);
2178
t_gen_mov_preg_TN(dc, dc->op2, t[0]);
2179
if (dc->op2 == PR_CCS) {
2180
cris_update_cc_op(dc, CC_OP_FLAGS, 4);
2181
dc->flags_uptodate = 1;
2183
tcg_temp_free(t[0]);
2186
static int dec_move_pr(DisasContext *dc)
2189
LOG_DIS("move $p%u, $r%u\n", dc->op2, dc->op1);
2190
cris_cc_mask(dc, 0);
2192
if (dc->op2 == PR_CCS)
2193
cris_evaluate_flags(dc);
2195
if (dc->op2 == PR_DZ) {
2196
tcg_gen_movi_tl(cpu_R[dc->op1], 0);
2198
t0 = tcg_temp_new();
2199
t_gen_mov_TN_preg(t0, dc->op2);
2200
cris_alu(dc, CC_OP_MOVE,
2201
cpu_R[dc->op1], cpu_R[dc->op1], t0,
2202
preg_sizes[dc->op2]);
2208
static int dec_move_mr(DisasContext *dc)
2210
int memsize = memsize_zz(dc);
2212
LOG_DIS("move.%c [$r%u%s, $r%u\n",
2213
memsize_char(memsize),
2214
dc->op1, dc->postinc ? "+]" : "]",
2218
insn_len = dec_prep_move_m(dc, 0, 4, cpu_R[dc->op2]);
2219
cris_cc_mask(dc, CC_MASK_NZ);
2220
cris_update_cc_op(dc, CC_OP_MOVE, 4);
2221
cris_update_cc_x(dc);
2222
cris_update_result(dc, cpu_R[dc->op2]);
2227
t0 = tcg_temp_new();
2228
insn_len = dec_prep_move_m(dc, 0, memsize, t0);
2229
cris_cc_mask(dc, CC_MASK_NZ);
2230
cris_alu(dc, CC_OP_MOVE,
2231
cpu_R[dc->op2], cpu_R[dc->op2], t0, memsize);
2234
do_postinc(dc, memsize);
2238
static inline void cris_alu_m_alloc_temps(TCGv *t)
2240
t[0] = tcg_temp_new();
2241
t[1] = tcg_temp_new();
2244
static inline void cris_alu_m_free_temps(TCGv *t)
2246
tcg_temp_free(t[0]);
2247
tcg_temp_free(t[1]);
2250
static int dec_movs_m(DisasContext *dc)
2253
int memsize = memsize_z(dc);
2255
LOG_DIS("movs.%c [$r%u%s, $r%u\n",
2256
memsize_char(memsize),
2257
dc->op1, dc->postinc ? "+]" : "]",
2260
cris_alu_m_alloc_temps(t);
2262
insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2263
cris_cc_mask(dc, CC_MASK_NZ);
2264
cris_alu(dc, CC_OP_MOVE,
2265
cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2266
do_postinc(dc, memsize);
2267
cris_alu_m_free_temps(t);
2271
static int dec_addu_m(DisasContext *dc)
2274
int memsize = memsize_z(dc);
2276
LOG_DIS("addu.%c [$r%u%s, $r%u\n",
2277
memsize_char(memsize),
2278
dc->op1, dc->postinc ? "+]" : "]",
2281
cris_alu_m_alloc_temps(t);
2283
insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2284
cris_cc_mask(dc, CC_MASK_NZVC);
2285
cris_alu(dc, CC_OP_ADD,
2286
cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2287
do_postinc(dc, memsize);
2288
cris_alu_m_free_temps(t);
2292
static int dec_adds_m(DisasContext *dc)
2295
int memsize = memsize_z(dc);
2297
LOG_DIS("adds.%c [$r%u%s, $r%u\n",
2298
memsize_char(memsize),
2299
dc->op1, dc->postinc ? "+]" : "]",
2302
cris_alu_m_alloc_temps(t);
2304
insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2305
cris_cc_mask(dc, CC_MASK_NZVC);
2306
cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2307
do_postinc(dc, memsize);
2308
cris_alu_m_free_temps(t);
2312
static int dec_subu_m(DisasContext *dc)
2315
int memsize = memsize_z(dc);
2317
LOG_DIS("subu.%c [$r%u%s, $r%u\n",
2318
memsize_char(memsize),
2319
dc->op1, dc->postinc ? "+]" : "]",
2322
cris_alu_m_alloc_temps(t);
2324
insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2325
cris_cc_mask(dc, CC_MASK_NZVC);
2326
cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2327
do_postinc(dc, memsize);
2328
cris_alu_m_free_temps(t);
2332
static int dec_subs_m(DisasContext *dc)
2335
int memsize = memsize_z(dc);
2337
LOG_DIS("subs.%c [$r%u%s, $r%u\n",
2338
memsize_char(memsize),
2339
dc->op1, dc->postinc ? "+]" : "]",
2342
cris_alu_m_alloc_temps(t);
2344
insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2345
cris_cc_mask(dc, CC_MASK_NZVC);
2346
cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2347
do_postinc(dc, memsize);
2348
cris_alu_m_free_temps(t);
2352
static int dec_movu_m(DisasContext *dc)
2355
int memsize = memsize_z(dc);
2358
LOG_DIS("movu.%c [$r%u%s, $r%u\n",
2359
memsize_char(memsize),
2360
dc->op1, dc->postinc ? "+]" : "]",
2363
cris_alu_m_alloc_temps(t);
2364
insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2365
cris_cc_mask(dc, CC_MASK_NZ);
2366
cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2367
do_postinc(dc, memsize);
2368
cris_alu_m_free_temps(t);
2372
static int dec_cmpu_m(DisasContext *dc)
2375
int memsize = memsize_z(dc);
2377
LOG_DIS("cmpu.%c [$r%u%s, $r%u\n",
2378
memsize_char(memsize),
2379
dc->op1, dc->postinc ? "+]" : "]",
2382
cris_alu_m_alloc_temps(t);
2383
insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2384
cris_cc_mask(dc, CC_MASK_NZVC);
2385
cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2386
do_postinc(dc, memsize);
2387
cris_alu_m_free_temps(t);
2391
static int dec_cmps_m(DisasContext *dc)
2394
int memsize = memsize_z(dc);
2396
LOG_DIS("cmps.%c [$r%u%s, $r%u\n",
2397
memsize_char(memsize),
2398
dc->op1, dc->postinc ? "+]" : "]",
2401
cris_alu_m_alloc_temps(t);
2402
insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2403
cris_cc_mask(dc, CC_MASK_NZVC);
2404
cris_alu(dc, CC_OP_CMP,
2405
cpu_R[dc->op2], cpu_R[dc->op2], t[1],
2407
do_postinc(dc, memsize);
2408
cris_alu_m_free_temps(t);
2412
static int dec_cmp_m(DisasContext *dc)
2415
int memsize = memsize_zz(dc);
2417
LOG_DIS("cmp.%c [$r%u%s, $r%u\n",
2418
memsize_char(memsize),
2419
dc->op1, dc->postinc ? "+]" : "]",
2422
cris_alu_m_alloc_temps(t);
2423
insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2424
cris_cc_mask(dc, CC_MASK_NZVC);
2425
cris_alu(dc, CC_OP_CMP,
2426
cpu_R[dc->op2], cpu_R[dc->op2], t[1],
2428
do_postinc(dc, memsize);
2429
cris_alu_m_free_temps(t);
2433
static int dec_test_m(DisasContext *dc)
2436
int memsize = memsize_zz(dc);
2438
LOG_DIS("test.%c [$r%u%s] op2=%x\n",
2439
memsize_char(memsize),
2440
dc->op1, dc->postinc ? "+]" : "]",
2443
cris_evaluate_flags(dc);
2445
cris_alu_m_alloc_temps(t);
2446
insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2447
cris_cc_mask(dc, CC_MASK_NZ);
2448
tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~3);
2450
cris_alu(dc, CC_OP_CMP,
2451
cpu_R[dc->op2], t[1], tcg_const_tl(0), memsize_zz(dc));
2452
do_postinc(dc, memsize);
2453
cris_alu_m_free_temps(t);
2457
static int dec_and_m(DisasContext *dc)
2460
int memsize = memsize_zz(dc);
2462
LOG_DIS("and.%c [$r%u%s, $r%u\n",
2463
memsize_char(memsize),
2464
dc->op1, dc->postinc ? "+]" : "]",
2467
cris_alu_m_alloc_temps(t);
2468
insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2469
cris_cc_mask(dc, CC_MASK_NZ);
2470
cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
2471
do_postinc(dc, memsize);
2472
cris_alu_m_free_temps(t);
2476
static int dec_add_m(DisasContext *dc)
2479
int memsize = memsize_zz(dc);
2481
LOG_DIS("add.%c [$r%u%s, $r%u\n",
2482
memsize_char(memsize),
2483
dc->op1, dc->postinc ? "+]" : "]",
2486
cris_alu_m_alloc_temps(t);
2487
insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2488
cris_cc_mask(dc, CC_MASK_NZVC);
2489
cris_alu(dc, CC_OP_ADD,
2490
cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
2491
do_postinc(dc, memsize);
2492
cris_alu_m_free_temps(t);
2496
static int dec_addo_m(DisasContext *dc)
2499
int memsize = memsize_zz(dc);
2501
LOG_DIS("add.%c [$r%u%s, $r%u\n",
2502
memsize_char(memsize),
2503
dc->op1, dc->postinc ? "+]" : "]",
2506
cris_alu_m_alloc_temps(t);
2507
insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2508
cris_cc_mask(dc, 0);
2509
cris_alu(dc, CC_OP_ADD, cpu_R[R_ACR], t[0], t[1], 4);
2510
do_postinc(dc, memsize);
2511
cris_alu_m_free_temps(t);
2515
static int dec_bound_m(DisasContext *dc)
2518
int memsize = memsize_zz(dc);
2520
LOG_DIS("bound.%c [$r%u%s, $r%u\n",
2521
memsize_char(memsize),
2522
dc->op1, dc->postinc ? "+]" : "]",
2525
l[0] = tcg_temp_local_new();
2526
l[1] = tcg_temp_local_new();
2527
insn_len = dec_prep_alu_m(dc, 0, memsize, l[0], l[1]);
2528
cris_cc_mask(dc, CC_MASK_NZ);
2529
cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], l[0], l[1], 4);
2530
do_postinc(dc, memsize);
2531
tcg_temp_free(l[0]);
2532
tcg_temp_free(l[1]);
2536
static int dec_addc_mr(DisasContext *dc)
2540
LOG_DIS("addc [$r%u%s, $r%u\n",
2541
dc->op1, dc->postinc ? "+]" : "]",
2544
cris_evaluate_flags(dc);
2546
/* Set for this insn. */
2547
dc->flagx_known = 1;
2548
dc->flags_x = X_FLAG;
2550
cris_alu_m_alloc_temps(t);
2551
insn_len = dec_prep_alu_m(dc, 0, 4, t[0], t[1]);
2552
cris_cc_mask(dc, CC_MASK_NZVC);
2553
cris_alu(dc, CC_OP_ADDC, cpu_R[dc->op2], t[0], t[1], 4);
2555
cris_alu_m_free_temps(t);
2559
static int dec_sub_m(DisasContext *dc)
2562
int memsize = memsize_zz(dc);
2564
LOG_DIS("sub.%c [$r%u%s, $r%u ir=%x zz=%x\n",
2565
memsize_char(memsize),
2566
dc->op1, dc->postinc ? "+]" : "]",
2567
dc->op2, dc->ir, dc->zzsize);
2569
cris_alu_m_alloc_temps(t);
2570
insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2571
cris_cc_mask(dc, CC_MASK_NZVC);
2572
cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], memsize);
2573
do_postinc(dc, memsize);
2574
cris_alu_m_free_temps(t);
2578
static int dec_or_m(DisasContext *dc)
2581
int memsize = memsize_zz(dc);
2583
LOG_DIS("or.%c [$r%u%s, $r%u pc=%x\n",
2584
memsize_char(memsize),
2585
dc->op1, dc->postinc ? "+]" : "]",
2588
cris_alu_m_alloc_temps(t);
2589
insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2590
cris_cc_mask(dc, CC_MASK_NZ);
2591
cris_alu(dc, CC_OP_OR,
2592
cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
2593
do_postinc(dc, memsize);
2594
cris_alu_m_free_temps(t);
2598
static int dec_move_mp(DisasContext *dc)
2601
int memsize = memsize_zz(dc);
2604
LOG_DIS("move.%c [$r%u%s, $p%u\n",
2605
memsize_char(memsize),
2607
dc->postinc ? "+]" : "]",
2610
cris_alu_m_alloc_temps(t);
2611
insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2612
cris_cc_mask(dc, 0);
2613
if (dc->op2 == PR_CCS) {
2614
cris_evaluate_flags(dc);
2615
if (dc->tb_flags & U_FLAG) {
2616
/* User space is not allowed to touch all flags. */
2617
tcg_gen_andi_tl(t[1], t[1], 0x39f);
2618
tcg_gen_andi_tl(t[0], cpu_PR[PR_CCS], ~0x39f);
2619
tcg_gen_or_tl(t[1], t[0], t[1]);
2623
t_gen_mov_preg_TN(dc, dc->op2, t[1]);
2625
do_postinc(dc, memsize);
2626
cris_alu_m_free_temps(t);
2630
static int dec_move_pm(DisasContext *dc)
2635
memsize = preg_sizes[dc->op2];
2637
LOG_DIS("move.%c $p%u, [$r%u%s\n",
2638
memsize_char(memsize),
2639
dc->op2, dc->op1, dc->postinc ? "+]" : "]");
2641
/* prepare store. Address in T0, value in T1. */
2642
if (dc->op2 == PR_CCS)
2643
cris_evaluate_flags(dc);
2644
t0 = tcg_temp_new();
2645
t_gen_mov_TN_preg(t0, dc->op2);
2646
cris_flush_cc_state(dc);
2647
gen_store(dc, cpu_R[dc->op1], t0, memsize);
2650
cris_cc_mask(dc, 0);
2652
tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], memsize);
2656
static int dec_movem_mr(DisasContext *dc)
2662
int nr = dc->op2 + 1;
2664
LOG_DIS("movem [$r%u%s, $r%u\n", dc->op1,
2665
dc->postinc ? "+]" : "]", dc->op2);
2667
addr = tcg_temp_new();
2668
/* There are probably better ways of doing this. */
2669
cris_flush_cc_state(dc);
2670
for (i = 0; i < (nr >> 1); i++) {
2671
tmp[i] = tcg_temp_new_i64();
2672
tcg_gen_addi_tl(addr, cpu_R[dc->op1], i * 8);
2673
gen_load64(dc, tmp[i], addr);
2676
tmp32 = tcg_temp_new_i32();
2677
tcg_gen_addi_tl(addr, cpu_R[dc->op1], i * 8);
2678
gen_load(dc, tmp32, addr, 4, 0);
2681
tcg_temp_free(addr);
2683
for (i = 0; i < (nr >> 1); i++) {
2684
tcg_gen_trunc_i64_i32(cpu_R[i * 2], tmp[i]);
2685
tcg_gen_shri_i64(tmp[i], tmp[i], 32);
2686
tcg_gen_trunc_i64_i32(cpu_R[i * 2 + 1], tmp[i]);
2687
tcg_temp_free_i64(tmp[i]);
2690
tcg_gen_mov_tl(cpu_R[dc->op2], tmp32);
2691
tcg_temp_free(tmp32);
2694
/* writeback the updated pointer value. */
2696
tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], nr * 4);
2698
/* gen_load might want to evaluate the previous insns flags. */
2699
cris_cc_mask(dc, 0);
2703
static int dec_movem_rm(DisasContext *dc)
2709
LOG_DIS("movem $r%u, [$r%u%s\n", dc->op2, dc->op1,
2710
dc->postinc ? "+]" : "]");
2712
cris_flush_cc_state(dc);
2714
tmp = tcg_temp_new();
2715
addr = tcg_temp_new();
2716
tcg_gen_movi_tl(tmp, 4);
2717
tcg_gen_mov_tl(addr, cpu_R[dc->op1]);
2718
for (i = 0; i <= dc->op2; i++) {
2719
/* Displace addr. */
2720
/* Perform the store. */
2721
gen_store(dc, addr, cpu_R[i], 4);
2722
tcg_gen_add_tl(addr, addr, tmp);
2725
tcg_gen_mov_tl(cpu_R[dc->op1], addr);
2726
cris_cc_mask(dc, 0);
2728
tcg_temp_free(addr);
2732
static int dec_move_rm(DisasContext *dc)
2736
memsize = memsize_zz(dc);
2738
LOG_DIS("move.%c $r%u, [$r%u]\n",
2739
memsize_char(memsize), dc->op2, dc->op1);
2741
/* prepare store. */
2742
cris_flush_cc_state(dc);
2743
gen_store(dc, cpu_R[dc->op1], cpu_R[dc->op2], memsize);
2746
tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], memsize);
2747
cris_cc_mask(dc, 0);
2751
static int dec_lapcq(DisasContext *dc)
2753
LOG_DIS("lapcq %x, $r%u\n",
2754
dc->pc + dc->op1*2, dc->op2);
2755
cris_cc_mask(dc, 0);
2756
tcg_gen_movi_tl(cpu_R[dc->op2], dc->pc + dc->op1 * 2);
2760
static int dec_lapc_im(DisasContext *dc)
2768
cris_cc_mask(dc, 0);
2769
imm = cris_fetch(dc, dc->pc + 2, 4, 0);
2770
LOG_DIS("lapc 0x%x, $r%u\n", imm + dc->pc, dc->op2);
2774
tcg_gen_movi_tl(cpu_R[rd], pc);
2778
/* Jump to special reg. */
2779
static int dec_jump_p(DisasContext *dc)
2781
LOG_DIS("jump $p%u\n", dc->op2);
2783
if (dc->op2 == PR_CCS)
2784
cris_evaluate_flags(dc);
2785
t_gen_mov_TN_preg(env_btarget, dc->op2);
2786
/* rete will often have low bit set to indicate delayslot. */
2787
tcg_gen_andi_tl(env_btarget, env_btarget, ~1);
2788
cris_cc_mask(dc, 0);
2789
cris_prepare_jmp(dc, JMP_INDIRECT);
2793
/* Jump and save. */
2794
static int dec_jas_r(DisasContext *dc)
2796
LOG_DIS("jas $r%u, $p%u\n", dc->op1, dc->op2);
2797
cris_cc_mask(dc, 0);
2798
/* Store the return address in Pd. */
2799
tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]);
2802
t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 4));
2804
cris_prepare_jmp(dc, JMP_INDIRECT);
2808
static int dec_jas_im(DisasContext *dc)
2812
imm = cris_fetch(dc, dc->pc + 2, 4, 0);
2814
LOG_DIS("jas 0x%x\n", imm);
2815
cris_cc_mask(dc, 0);
2816
/* Store the return address in Pd. */
2817
t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8));
2820
cris_prepare_jmp(dc, JMP_DIRECT);
2824
static int dec_jasc_im(DisasContext *dc)
2828
imm = cris_fetch(dc, dc->pc + 2, 4, 0);
2830
LOG_DIS("jasc 0x%x\n", imm);
2831
cris_cc_mask(dc, 0);
2832
/* Store the return address in Pd. */
2833
t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8 + 4));
2836
cris_prepare_jmp(dc, JMP_DIRECT);
2840
static int dec_jasc_r(DisasContext *dc)
2842
LOG_DIS("jasc_r $r%u, $p%u\n", dc->op1, dc->op2);
2843
cris_cc_mask(dc, 0);
2844
/* Store the return address in Pd. */
2845
tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]);
2846
t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 4 + 4));
2847
cris_prepare_jmp(dc, JMP_INDIRECT);
2851
static int dec_bcc_im(DisasContext *dc)
2854
uint32_t cond = dc->op2;
2856
offset = cris_fetch(dc, dc->pc + 2, 2, 1);
2858
LOG_DIS("b%s %d pc=%x dst=%x\n",
2859
cc_name(cond), offset,
2860
dc->pc, dc->pc + offset);
2862
cris_cc_mask(dc, 0);
2863
/* op2 holds the condition-code. */
2864
cris_prepare_cc_branch (dc, offset, cond);
2868
static int dec_bas_im(DisasContext *dc)
2873
simm = cris_fetch(dc, dc->pc + 2, 4, 0);
2875
LOG_DIS("bas 0x%x, $p%u\n", dc->pc + simm, dc->op2);
2876
cris_cc_mask(dc, 0);
2877
/* Store the return address in Pd. */
2878
t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8));
2880
dc->jmp_pc = dc->pc + simm;
2881
cris_prepare_jmp(dc, JMP_DIRECT);
2885
static int dec_basc_im(DisasContext *dc)
2888
simm = cris_fetch(dc, dc->pc + 2, 4, 0);
2890
LOG_DIS("basc 0x%x, $p%u\n", dc->pc + simm, dc->op2);
2891
cris_cc_mask(dc, 0);
2892
/* Store the return address in Pd. */
2893
t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 12));
2895
dc->jmp_pc = dc->pc + simm;
2896
cris_prepare_jmp(dc, JMP_DIRECT);
2900
static int dec_rfe_etc(DisasContext *dc)
2902
cris_cc_mask(dc, 0);
2904
if (dc->op2 == 15) {
2905
t_gen_mov_env_TN(halted, tcg_const_tl(1));
2906
tcg_gen_movi_tl(env_pc, dc->pc + 2);
2907
t_gen_raise_exception(EXCP_HLT);
2911
switch (dc->op2 & 7) {
2915
cris_evaluate_flags(dc);
2917
dc->is_jmp = DISAS_UPDATE;
2922
cris_evaluate_flags(dc);
2924
dc->is_jmp = DISAS_UPDATE;
2927
LOG_DIS("break %d\n", dc->op1);
2928
cris_evaluate_flags (dc);
2930
tcg_gen_movi_tl(env_pc, dc->pc + 2);
2932
/* Breaks start at 16 in the exception vector. */
2933
t_gen_mov_env_TN(trap_vector,
2934
tcg_const_tl(dc->op1 + 16));
2935
t_gen_raise_exception(EXCP_BREAK);
2936
dc->is_jmp = DISAS_UPDATE;
2939
printf ("op2=%x\n", dc->op2);
2947
static int dec_ftag_fidx_d_m(DisasContext *dc)
2952
static int dec_ftag_fidx_i_m(DisasContext *dc)
2957
static int dec_null(DisasContext *dc)
2959
printf ("unknown insn pc=%x opc=%x op1=%x op2=%x\n",
2960
dc->pc, dc->opcode, dc->op1, dc->op2);
2966
static struct decoder_info {
2971
int (*dec)(DisasContext *dc);
2973
/* Order matters here. */
2974
{DEC_MOVEQ, dec_moveq},
2975
{DEC_BTSTQ, dec_btstq},
2976
{DEC_CMPQ, dec_cmpq},
2977
{DEC_ADDOQ, dec_addoq},
2978
{DEC_ADDQ, dec_addq},
2979
{DEC_SUBQ, dec_subq},
2980
{DEC_ANDQ, dec_andq},
2982
{DEC_ASRQ, dec_asrq},
2983
{DEC_LSLQ, dec_lslq},
2984
{DEC_LSRQ, dec_lsrq},
2985
{DEC_BCCQ, dec_bccq},
2987
{DEC_BCC_IM, dec_bcc_im},
2988
{DEC_JAS_IM, dec_jas_im},
2989
{DEC_JAS_R, dec_jas_r},
2990
{DEC_JASC_IM, dec_jasc_im},
2991
{DEC_JASC_R, dec_jasc_r},
2992
{DEC_BAS_IM, dec_bas_im},
2993
{DEC_BASC_IM, dec_basc_im},
2994
{DEC_JUMP_P, dec_jump_p},
2995
{DEC_LAPC_IM, dec_lapc_im},
2996
{DEC_LAPCQ, dec_lapcq},
2998
{DEC_RFE_ETC, dec_rfe_etc},
2999
{DEC_ADDC_MR, dec_addc_mr},
3001
{DEC_MOVE_MP, dec_move_mp},
3002
{DEC_MOVE_PM, dec_move_pm},
3003
{DEC_MOVEM_MR, dec_movem_mr},
3004
{DEC_MOVEM_RM, dec_movem_rm},
3005
{DEC_MOVE_PR, dec_move_pr},
3006
{DEC_SCC_R, dec_scc_r},
3007
{DEC_SETF, dec_setclrf},
3008
{DEC_CLEARF, dec_setclrf},
3010
{DEC_MOVE_SR, dec_move_sr},
3011
{DEC_MOVE_RP, dec_move_rp},
3012
{DEC_SWAP_R, dec_swap_r},
3013
{DEC_ABS_R, dec_abs_r},
3014
{DEC_LZ_R, dec_lz_r},
3015
{DEC_MOVE_RS, dec_move_rs},
3016
{DEC_BTST_R, dec_btst_r},
3017
{DEC_ADDC_R, dec_addc_r},
3019
{DEC_DSTEP_R, dec_dstep_r},
3020
{DEC_XOR_R, dec_xor_r},
3021
{DEC_MCP_R, dec_mcp_r},
3022
{DEC_CMP_R, dec_cmp_r},
3024
{DEC_ADDI_R, dec_addi_r},
3025
{DEC_ADDI_ACR, dec_addi_acr},
3027
{DEC_ADD_R, dec_add_r},
3028
{DEC_SUB_R, dec_sub_r},
3030
{DEC_ADDU_R, dec_addu_r},
3031
{DEC_ADDS_R, dec_adds_r},
3032
{DEC_SUBU_R, dec_subu_r},
3033
{DEC_SUBS_R, dec_subs_r},
3034
{DEC_LSL_R, dec_lsl_r},
3036
{DEC_AND_R, dec_and_r},
3037
{DEC_OR_R, dec_or_r},
3038
{DEC_BOUND_R, dec_bound_r},
3039
{DEC_ASR_R, dec_asr_r},
3040
{DEC_LSR_R, dec_lsr_r},
3042
{DEC_MOVU_R, dec_movu_r},
3043
{DEC_MOVS_R, dec_movs_r},
3044
{DEC_NEG_R, dec_neg_r},
3045
{DEC_MOVE_R, dec_move_r},
3047
{DEC_FTAG_FIDX_I_M, dec_ftag_fidx_i_m},
3048
{DEC_FTAG_FIDX_D_M, dec_ftag_fidx_d_m},
3050
{DEC_MULS_R, dec_muls_r},
3051
{DEC_MULU_R, dec_mulu_r},
3053
{DEC_ADDU_M, dec_addu_m},
3054
{DEC_ADDS_M, dec_adds_m},
3055
{DEC_SUBU_M, dec_subu_m},
3056
{DEC_SUBS_M, dec_subs_m},
3058
{DEC_CMPU_M, dec_cmpu_m},
3059
{DEC_CMPS_M, dec_cmps_m},
3060
{DEC_MOVU_M, dec_movu_m},
3061
{DEC_MOVS_M, dec_movs_m},
3063
{DEC_CMP_M, dec_cmp_m},
3064
{DEC_ADDO_M, dec_addo_m},
3065
{DEC_BOUND_M, dec_bound_m},
3066
{DEC_ADD_M, dec_add_m},
3067
{DEC_SUB_M, dec_sub_m},
3068
{DEC_AND_M, dec_and_m},
3069
{DEC_OR_M, dec_or_m},
3070
{DEC_MOVE_RM, dec_move_rm},
3071
{DEC_TEST_M, dec_test_m},
3072
{DEC_MOVE_MR, dec_move_mr},
3077
static unsigned int crisv32_decoder(DisasContext *dc)
3082
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
3083
tcg_gen_debug_insn_start(dc->pc);
3085
/* Load a halfword onto the instruction register. */
3086
dc->ir = cris_fetch(dc, dc->pc, 2, 0);
3088
/* Now decode it. */
3089
dc->opcode = EXTRACT_FIELD(dc->ir, 4, 11);
3090
dc->op1 = EXTRACT_FIELD(dc->ir, 0, 3);
3091
dc->op2 = EXTRACT_FIELD(dc->ir, 12, 15);
3092
dc->zsize = EXTRACT_FIELD(dc->ir, 4, 4);
3093
dc->zzsize = EXTRACT_FIELD(dc->ir, 4, 5);
3094
dc->postinc = EXTRACT_FIELD(dc->ir, 10, 10);
3096
/* Large switch for all insns. */
3097
for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
3098
if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits)
3100
insn_len = decinfo[i].dec(dc);
3105
#if !defined(CONFIG_USER_ONLY)
3106
/* Single-stepping ? */
3107
if (dc->tb_flags & S_FLAG) {
3110
l1 = gen_new_label();
3111
tcg_gen_brcondi_tl(TCG_COND_NE, cpu_PR[PR_SPC], dc->pc, l1);
3112
/* We treat SPC as a break with an odd trap vector. */
3113
cris_evaluate_flags (dc);
3114
t_gen_mov_env_TN(trap_vector, tcg_const_tl(3));
3115
tcg_gen_movi_tl(env_pc, dc->pc + insn_len);
3116
tcg_gen_movi_tl(cpu_PR[PR_SPC], dc->pc + insn_len);
3117
t_gen_raise_exception(EXCP_BREAK);
3124
static void check_breakpoint(CPUState *env, DisasContext *dc)
3128
if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3129
QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3130
if (bp->pc == dc->pc) {
3131
cris_evaluate_flags (dc);
3132
tcg_gen_movi_tl(env_pc, dc->pc);
3133
t_gen_raise_exception(EXCP_DEBUG);
3134
dc->is_jmp = DISAS_UPDATE;
3140
#include "translate_v10.c"
3143
* Delay slots on QEMU/CRIS.
3145
* If an exception hits on a delayslot, the core will let ERP (the Exception
3146
* Return Pointer) point to the branch (the previous) insn and set the lsb to
3147
* to give SW a hint that the exception actually hit on the dslot.
3149
* CRIS expects all PC addresses to be 16-bit aligned. The lsb is ignored by
3150
* the core and any jmp to an odd addresses will mask off that lsb. It is
3151
* simply there to let sw know there was an exception on a dslot.
3153
* When the software returns from an exception, the branch will re-execute.
3154
* On QEMU care needs to be taken when a branch+delayslot sequence is broken
3155
* and the branch and delayslot dont share pages.
3157
* The TB contaning the branch insn will set up env->btarget and evaluate
3158
* env->btaken. When the translation loop exits we will note that the branch
3159
* sequence is broken and let env->dslot be the size of the branch insn (those
3162
* The TB contaning the delayslot will have the PC of its real insn (i.e no lsb
3163
* set). It will also expect to have env->dslot setup with the size of the
3164
* delay slot so that env->pc - env->dslot point to the branch insn. This TB
3165
* will execute the dslot and take the branch, either to btarget or just one
3168
* When exceptions occur, we check for env->dslot in do_interrupt to detect
3169
* broken branch sequences and setup $erp accordingly (i.e let it point to the
3170
* branch and set lsb). Then env->dslot gets cleared so that the exception
3171
* handler can enter. When returning from exceptions (jump $erp) the lsb gets
3172
* masked off and we will reexecute the branch insn.
3176
/* generate intermediate code for basic block 'tb'. */
3178
gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
3181
uint16_t *gen_opc_end;
3183
unsigned int insn_len;
3185
struct DisasContext ctx;
3186
struct DisasContext *dc = &ctx;
3187
uint32_t next_page_start;
3192
qemu_log_try_set_file(stderr);
3194
if (env->pregs[PR_VR] == 32) {
3195
dc->decoder = crisv32_decoder;
3196
dc->clear_locked_irq = 0;
3198
dc->decoder = crisv10_decoder;
3199
dc->clear_locked_irq = 1;
3202
/* Odd PC indicates that branch is rexecuting due to exception in the
3203
* delayslot, like in real hw.
3205
pc_start = tb->pc & ~1;
3209
gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
3211
dc->is_jmp = DISAS_NEXT;
3214
dc->singlestep_enabled = env->singlestep_enabled;
3215
dc->flags_uptodate = 1;
3216
dc->flagx_known = 1;
3217
dc->flags_x = tb->flags & X_FLAG;
3218
dc->cc_x_uptodate = 0;
3221
dc->clear_prefix = 0;
3223
cris_update_cc_op(dc, CC_OP_FLAGS, 4);
3224
dc->cc_size_uptodate = -1;
3226
/* Decode TB flags. */
3227
dc->tb_flags = tb->flags & (S_FLAG | P_FLAG | U_FLAG \
3228
| X_FLAG | PFIX_FLAG);
3229
dc->delayed_branch = !!(tb->flags & 7);
3230
if (dc->delayed_branch)
3231
dc->jmp = JMP_INDIRECT;
3233
dc->jmp = JMP_NOJMP;
3235
dc->cpustate_changed = 0;
3237
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3239
"srch=%d pc=%x %x flg=%" PRIx64 " bt=%x ds=%u ccs=%x\n"
3245
search_pc, dc->pc, dc->ppc,
3246
(uint64_t)tb->flags,
3247
env->btarget, (unsigned)tb->flags & 7,
3249
env->pregs[PR_PID], env->pregs[PR_USP],
3250
env->regs[0], env->regs[1], env->regs[2], env->regs[3],
3251
env->regs[4], env->regs[5], env->regs[6], env->regs[7],
3252
env->regs[8], env->regs[9],
3253
env->regs[10], env->regs[11],
3254
env->regs[12], env->regs[13],
3255
env->regs[14], env->regs[15]);
3256
qemu_log("--------------\n");
3257
qemu_log("IN: %s\n", lookup_symbol(pc_start));
3260
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
3263
max_insns = tb->cflags & CF_COUNT_MASK;
3265
max_insns = CF_COUNT_MASK;
3270
check_breakpoint(env, dc);
3273
j = gen_opc_ptr - gen_opc_buf;
3277
gen_opc_instr_start[lj++] = 0;
3279
if (dc->delayed_branch == 1)
3280
gen_opc_pc[lj] = dc->ppc | 1;
3282
gen_opc_pc[lj] = dc->pc;
3283
gen_opc_instr_start[lj] = 1;
3284
gen_opc_icount[lj] = num_insns;
3288
LOG_DIS("%8.8x:\t", dc->pc);
3290
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3294
insn_len = dc->decoder(dc);
3298
cris_clear_x_flag(dc);
3301
/* Check for delayed branches here. If we do it before
3302
actually generating any host code, the simulator will just
3303
loop doing nothing for on this program location. */
3304
if (dc->delayed_branch) {
3305
dc->delayed_branch--;
3306
if (dc->delayed_branch == 0)
3309
t_gen_mov_env_TN(dslot,
3311
if (dc->cpustate_changed || !dc->flagx_known
3312
|| (dc->flags_x != (tb->flags & X_FLAG))) {
3313
cris_store_direct_jmp(dc);
3316
if (dc->clear_locked_irq) {
3317
dc->clear_locked_irq = 0;
3318
t_gen_mov_env_TN(locked_irq,
3322
if (dc->jmp == JMP_DIRECT_CC) {
3325
l1 = gen_new_label();
3326
cris_evaluate_flags(dc);
3328
/* Conditional jmp. */
3329
tcg_gen_brcondi_tl(TCG_COND_EQ,
3331
gen_goto_tb(dc, 1, dc->jmp_pc);
3333
gen_goto_tb(dc, 0, dc->pc);
3334
dc->is_jmp = DISAS_TB_JUMP;
3335
dc->jmp = JMP_NOJMP;
3336
} else if (dc->jmp == JMP_DIRECT) {
3337
cris_evaluate_flags(dc);
3338
gen_goto_tb(dc, 0, dc->jmp_pc);
3339
dc->is_jmp = DISAS_TB_JUMP;
3340
dc->jmp = JMP_NOJMP;
3342
t_gen_cc_jmp(env_btarget,
3343
tcg_const_tl(dc->pc));
3344
dc->is_jmp = DISAS_JUMP;
3350
/* If we are rexecuting a branch due to exceptions on
3351
delay slots dont break. */
3352
if (!(tb->pc & 1) && env->singlestep_enabled)
3354
} while (!dc->is_jmp && !dc->cpustate_changed
3355
&& gen_opc_ptr < gen_opc_end
3357
&& (dc->pc < next_page_start)
3358
&& num_insns < max_insns);
3360
if (dc->clear_locked_irq)
3361
t_gen_mov_env_TN(locked_irq, tcg_const_tl(0));
3365
if (tb->cflags & CF_LAST_IO)
3367
/* Force an update if the per-tb cpu state has changed. */
3368
if (dc->is_jmp == DISAS_NEXT
3369
&& (dc->cpustate_changed || !dc->flagx_known
3370
|| (dc->flags_x != (tb->flags & X_FLAG)))) {
3371
dc->is_jmp = DISAS_UPDATE;
3372
tcg_gen_movi_tl(env_pc, npc);
3374
/* Broken branch+delayslot sequence. */
3375
if (dc->delayed_branch == 1) {
3376
/* Set env->dslot to the size of the branch insn. */
3377
t_gen_mov_env_TN(dslot, tcg_const_tl(dc->pc - dc->ppc));
3378
cris_store_direct_jmp(dc);
3381
cris_evaluate_flags (dc);
3383
if (unlikely(env->singlestep_enabled)) {
3384
if (dc->is_jmp == DISAS_NEXT)
3385
tcg_gen_movi_tl(env_pc, npc);
3386
t_gen_raise_exception(EXCP_DEBUG);
3388
switch(dc->is_jmp) {
3390
gen_goto_tb(dc, 1, npc);
3395
/* indicate that the hash table must be used
3396
to find the next TB */
3401
/* nothing more to generate */
3405
gen_icount_end(tb, num_insns);
3406
*gen_opc_ptr = INDEX_op_end;
3408
j = gen_opc_ptr - gen_opc_buf;
3411
gen_opc_instr_start[lj++] = 0;
3413
tb->size = dc->pc - pc_start;
3414
tb->icount = num_insns;
3419
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3420
log_target_disas(pc_start, dc->pc - pc_start,
3421
dc->env->pregs[PR_VR]);
3422
qemu_log("\nisize=%d osize=%td\n",
3423
dc->pc - pc_start, gen_opc_ptr - gen_opc_buf);
3429
void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
3431
gen_intermediate_code_internal(env, tb, 0);
3434
void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
3436
gen_intermediate_code_internal(env, tb, 1);
3439
void cpu_dump_state (CPUState *env, FILE *f, fprintf_function cpu_fprintf,
3448
cpu_fprintf(f, "PC=%x CCS=%x btaken=%d btarget=%x\n"
3449
"cc_op=%d cc_src=%d cc_dest=%d cc_result=%x cc_mask=%x\n",
3450
env->pc, env->pregs[PR_CCS], env->btaken, env->btarget,
3452
env->cc_src, env->cc_dest, env->cc_result, env->cc_mask);
3455
for (i = 0; i < 16; i++) {
3456
cpu_fprintf(f, "%s=%8.8x ",regnames[i], env->regs[i]);
3457
if ((i + 1) % 4 == 0)
3458
cpu_fprintf(f, "\n");
3460
cpu_fprintf(f, "\nspecial regs:\n");
3461
for (i = 0; i < 16; i++) {
3462
cpu_fprintf(f, "%s=%8.8x ", pregnames[i], env->pregs[i]);
3463
if ((i + 1) % 4 == 0)
3464
cpu_fprintf(f, "\n");
3466
srs = env->pregs[PR_SRS];
3467
cpu_fprintf(f, "\nsupport function regs bank %x:\n", srs);
3469
for (i = 0; i < 16; i++) {
3470
cpu_fprintf(f, "s%2.2d=%8.8x ",
3471
i, env->sregs[srs][i]);
3472
if ((i + 1) % 4 == 0)
3473
cpu_fprintf(f, "\n");
3476
cpu_fprintf(f, "\n\n");
3492
void cris_cpu_list(FILE *f, fprintf_function cpu_fprintf)
3496
(*cpu_fprintf)(f, "Available CPUs:\n");
3497
for (i = 0; i < ARRAY_SIZE(cris_cores); i++) {
3498
(*cpu_fprintf)(f, " %s\n", cris_cores[i].name);
3502
static uint32_t vr_by_name(const char *name)
3505
for (i = 0; i < ARRAY_SIZE(cris_cores); i++) {
3506
if (strcmp(name, cris_cores[i].name) == 0) {
3507
return cris_cores[i].vr;
3513
CPUCRISState *cpu_cris_init (const char *cpu_model)
3516
static int tcg_initialized = 0;
3519
env = g_malloc0(sizeof(CPUCRISState));
3521
env->pregs[PR_VR] = vr_by_name(cpu_model);
3524
qemu_init_vcpu(env);
3526
if (tcg_initialized)
3529
tcg_initialized = 1;
3531
#define GEN_HELPER 2
3534
if (env->pregs[PR_VR] < 32) {
3535
cpu_crisv10_init(env);
3540
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
3541
cc_x = tcg_global_mem_new(TCG_AREG0,
3542
offsetof(CPUState, cc_x), "cc_x");
3543
cc_src = tcg_global_mem_new(TCG_AREG0,
3544
offsetof(CPUState, cc_src), "cc_src");
3545
cc_dest = tcg_global_mem_new(TCG_AREG0,
3546
offsetof(CPUState, cc_dest),
3548
cc_result = tcg_global_mem_new(TCG_AREG0,
3549
offsetof(CPUState, cc_result),
3551
cc_op = tcg_global_mem_new(TCG_AREG0,
3552
offsetof(CPUState, cc_op), "cc_op");
3553
cc_size = tcg_global_mem_new(TCG_AREG0,
3554
offsetof(CPUState, cc_size),
3556
cc_mask = tcg_global_mem_new(TCG_AREG0,
3557
offsetof(CPUState, cc_mask),
3560
env_pc = tcg_global_mem_new(TCG_AREG0,
3561
offsetof(CPUState, pc),
3563
env_btarget = tcg_global_mem_new(TCG_AREG0,
3564
offsetof(CPUState, btarget),
3566
env_btaken = tcg_global_mem_new(TCG_AREG0,
3567
offsetof(CPUState, btaken),
3569
for (i = 0; i < 16; i++) {
3570
cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
3571
offsetof(CPUState, regs[i]),
3574
for (i = 0; i < 16; i++) {
3575
cpu_PR[i] = tcg_global_mem_new(TCG_AREG0,
3576
offsetof(CPUState, pregs[i]),
3583
void cpu_reset (CPUCRISState *env)
3587
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
3588
qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
3589
log_cpu_state(env, 0);
3592
vr = env->pregs[PR_VR];
3593
memset(env, 0, offsetof(CPUCRISState, breakpoints));
3594
env->pregs[PR_VR] = vr;
3597
#if defined(CONFIG_USER_ONLY)
3598
/* start in user mode with interrupts enabled. */
3599
env->pregs[PR_CCS] |= U_FLAG | I_FLAG | P_FLAG;
3602
env->pregs[PR_CCS] = 0;
3606
void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
3608
env->pc = gen_opc_pc[pc_pos];