4
Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5
Copyright (C) 2003-2005 Fabrice Bellard
7
This library is free software; you can redistribute it and/or
8
modify it under the terms of the GNU Lesser General Public
9
License as published by the Free Software Foundation; either
10
version 2 of the License, or (at your option) any later version.
12
This library is distributed in the hope that it will be useful,
13
but WITHOUT ANY WARRANTY; without even the implied warranty of
14
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15
Lesser General Public License for more details.
17
You should have received a copy of the GNU Lesser General Public
18
License along with this library; if not, see <http://www.gnu.org/licenses/>.
21
#include "qemu/osdep.h"
24
#include "disas/disas.h"
25
#include "exec/helper-proto.h"
26
#include "exec/exec-all.h"
28
#include "exec/cpu_ldst.h"
30
#include "exec/helper-gen.h"
32
#include "trace-tcg.h"
39
#define DYNAMIC_PC 1 /* dynamic pc value */
40
#define JUMP_PC 2 /* dynamic pc value which takes only two values
41
according to jump_pc[T2] */
43
/* global register indexes */
44
static TCGv_env cpu_env;
45
static TCGv_ptr cpu_regwptr;
46
static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
47
static TCGv_i32 cpu_cc_op;
48
static TCGv_i32 cpu_psr;
49
static TCGv cpu_fsr, cpu_pc, cpu_npc;
50
static TCGv cpu_regs[32];
52
#ifndef CONFIG_USER_ONLY
57
static TCGv_i32 cpu_xcc, cpu_fprs;
59
static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
60
static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
64
/* Floating point registers */
65
static TCGv_i64 cpu_fpr[TARGET_DPREGS];
67
#include "exec/gen-icount.h"
69
typedef struct DisasContext {
70
target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
71
target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
72
target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
76
bool address_mask_32bit;
78
#ifndef CONFIG_USER_ONLY
85
uint32_t cc_op; /* current CC operation */
86
struct TranslationBlock *tb;
105
// This function uses non-native bit order
106
#define GET_FIELD(X, FROM, TO) \
107
((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
109
// This function uses the order in the manuals, i.e. bit 0 is 2^0
110
#define GET_FIELD_SP(X, FROM, TO) \
111
GET_FIELD(X, 31 - (TO), 31 - (FROM))
113
#define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
114
#define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
116
#ifdef TARGET_SPARC64
117
#define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
118
#define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
120
#define DFPREG(r) (r & 0x1e)
121
#define QFPREG(r) (r & 0x1c)
124
#define UA2005_HTRAP_MASK 0xff
125
#define V8_TRAP_MASK 0x7f
127
static int sign_extend(int x, int len)
130
return (x << len) >> len;
133
#define IS_IMM (insn & (1<<13))
135
static inline TCGv_i32 get_temp_i32(DisasContext *dc)
138
assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
139
dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
143
static inline TCGv get_temp_tl(DisasContext *dc)
146
assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
147
dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
151
static inline void gen_update_fprs_dirty(DisasContext *dc, int rd)
153
#if defined(TARGET_SPARC64)
154
int bit = (rd < 32) ? 1 : 2;
155
/* If we know we've already set this bit within the TB,
156
we can avoid setting it again. */
157
if (!(dc->fprs_dirty & bit)) {
158
dc->fprs_dirty |= bit;
159
tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
164
/* floating point registers moves */
165
static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
167
#if TCG_TARGET_REG_BITS == 32
169
return TCGV_LOW(cpu_fpr[src / 2]);
171
return TCGV_HIGH(cpu_fpr[src / 2]);
175
return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
177
TCGv_i32 ret = get_temp_i32(dc);
178
TCGv_i64 t = tcg_temp_new_i64();
180
tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
181
tcg_gen_extrl_i64_i32(ret, t);
182
tcg_temp_free_i64(t);
189
static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
191
#if TCG_TARGET_REG_BITS == 32
193
tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
195
tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
198
TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
199
tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
200
(dst & 1 ? 0 : 32), 32);
202
gen_update_fprs_dirty(dc, dst);
205
static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
207
return get_temp_i32(dc);
210
static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
213
return cpu_fpr[src / 2];
216
static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
219
tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
220
gen_update_fprs_dirty(dc, dst);
223
static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
225
return cpu_fpr[DFPREG(dst) / 2];
228
static void gen_op_load_fpr_QT0(unsigned int src)
230
tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
231
offsetof(CPU_QuadU, ll.upper));
232
tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
233
offsetof(CPU_QuadU, ll.lower));
236
static void gen_op_load_fpr_QT1(unsigned int src)
238
tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
239
offsetof(CPU_QuadU, ll.upper));
240
tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
241
offsetof(CPU_QuadU, ll.lower));
244
static void gen_op_store_QT0_fpr(unsigned int dst)
246
tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
247
offsetof(CPU_QuadU, ll.upper));
248
tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
249
offsetof(CPU_QuadU, ll.lower));
252
static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst,
253
TCGv_i64 v1, TCGv_i64 v2)
257
tcg_gen_mov_i64(cpu_fpr[dst / 2], v1);
258
tcg_gen_mov_i64(cpu_fpr[dst / 2 + 1], v2);
259
gen_update_fprs_dirty(dc, dst);
262
#ifdef TARGET_SPARC64
263
static TCGv_i64 gen_load_fpr_Q0(DisasContext *dc, unsigned int src)
266
return cpu_fpr[src / 2];
269
static TCGv_i64 gen_load_fpr_Q1(DisasContext *dc, unsigned int src)
272
return cpu_fpr[src / 2 + 1];
275
static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
280
tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
281
tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
282
gen_update_fprs_dirty(dc, rd);
287
#ifdef CONFIG_USER_ONLY
288
#define supervisor(dc) 0
289
#ifdef TARGET_SPARC64
290
#define hypervisor(dc) 0
293
#ifdef TARGET_SPARC64
294
#define hypervisor(dc) (dc->hypervisor)
295
#define supervisor(dc) (dc->supervisor | dc->hypervisor)
297
#define supervisor(dc) (dc->supervisor)
301
#ifdef TARGET_SPARC64
303
#define AM_CHECK(dc) ((dc)->address_mask_32bit)
305
#define AM_CHECK(dc) (1)
309
static inline void gen_address_mask(DisasContext *dc, TCGv addr)
311
#ifdef TARGET_SPARC64
313
tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
317
static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
321
return cpu_regs[reg];
323
TCGv t = get_temp_tl(dc);
324
tcg_gen_movi_tl(t, 0);
329
static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
333
tcg_gen_mov_tl(cpu_regs[reg], v);
337
static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
341
return cpu_regs[reg];
343
return get_temp_tl(dc);
347
static inline bool use_goto_tb(DisasContext *s, target_ulong pc,
350
if (unlikely(s->singlestep)) {
354
#ifndef CONFIG_USER_ONLY
355
return (pc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) &&
356
(npc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK);
362
static inline void gen_goto_tb(DisasContext *s, int tb_num,
363
target_ulong pc, target_ulong npc)
365
if (use_goto_tb(s, pc, npc)) {
366
/* jump to same page: we can use a direct jump */
367
tcg_gen_goto_tb(tb_num);
368
tcg_gen_movi_tl(cpu_pc, pc);
369
tcg_gen_movi_tl(cpu_npc, npc);
370
tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
372
/* jump to another page: currently not optimized */
373
tcg_gen_movi_tl(cpu_pc, pc);
374
tcg_gen_movi_tl(cpu_npc, npc);
380
static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
382
tcg_gen_extu_i32_tl(reg, src);
383
tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
384
tcg_gen_andi_tl(reg, reg, 0x1);
387
static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
389
tcg_gen_extu_i32_tl(reg, src);
390
tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
391
tcg_gen_andi_tl(reg, reg, 0x1);
394
static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
396
tcg_gen_extu_i32_tl(reg, src);
397
tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
398
tcg_gen_andi_tl(reg, reg, 0x1);
401
static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
403
tcg_gen_extu_i32_tl(reg, src);
404
tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
405
tcg_gen_andi_tl(reg, reg, 0x1);
408
static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
410
tcg_gen_mov_tl(cpu_cc_src, src1);
411
tcg_gen_mov_tl(cpu_cc_src2, src2);
412
tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
413
tcg_gen_mov_tl(dst, cpu_cc_dst);
416
static TCGv_i32 gen_add32_carry32(void)
418
TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
420
/* Carry is computed from a previous add: (dst < src) */
421
#if TARGET_LONG_BITS == 64
422
cc_src1_32 = tcg_temp_new_i32();
423
cc_src2_32 = tcg_temp_new_i32();
424
tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
425
tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
427
cc_src1_32 = cpu_cc_dst;
428
cc_src2_32 = cpu_cc_src;
431
carry_32 = tcg_temp_new_i32();
432
tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
434
#if TARGET_LONG_BITS == 64
435
tcg_temp_free_i32(cc_src1_32);
436
tcg_temp_free_i32(cc_src2_32);
442
static TCGv_i32 gen_sub32_carry32(void)
444
TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
446
/* Carry is computed from a previous borrow: (src1 < src2) */
447
#if TARGET_LONG_BITS == 64
448
cc_src1_32 = tcg_temp_new_i32();
449
cc_src2_32 = tcg_temp_new_i32();
450
tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
451
tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
453
cc_src1_32 = cpu_cc_src;
454
cc_src2_32 = cpu_cc_src2;
457
carry_32 = tcg_temp_new_i32();
458
tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
460
#if TARGET_LONG_BITS == 64
461
tcg_temp_free_i32(cc_src1_32);
462
tcg_temp_free_i32(cc_src2_32);
468
static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
469
TCGv src2, int update_cc)
477
/* Carry is known to be zero. Fall back to plain ADD. */
479
gen_op_add_cc(dst, src1, src2);
481
tcg_gen_add_tl(dst, src1, src2);
488
if (TARGET_LONG_BITS == 32) {
489
/* We can re-use the host's hardware carry generation by using
490
an ADD2 opcode. We discard the low part of the output.
491
Ideally we'd combine this operation with the add that
492
generated the carry in the first place. */
493
carry = tcg_temp_new();
494
tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
495
tcg_temp_free(carry);
498
carry_32 = gen_add32_carry32();
504
carry_32 = gen_sub32_carry32();
508
/* We need external help to produce the carry. */
509
carry_32 = tcg_temp_new_i32();
510
gen_helper_compute_C_icc(carry_32, cpu_env);
514
#if TARGET_LONG_BITS == 64
515
carry = tcg_temp_new();
516
tcg_gen_extu_i32_i64(carry, carry_32);
521
tcg_gen_add_tl(dst, src1, src2);
522
tcg_gen_add_tl(dst, dst, carry);
524
tcg_temp_free_i32(carry_32);
525
#if TARGET_LONG_BITS == 64
526
tcg_temp_free(carry);
531
tcg_gen_mov_tl(cpu_cc_src, src1);
532
tcg_gen_mov_tl(cpu_cc_src2, src2);
533
tcg_gen_mov_tl(cpu_cc_dst, dst);
534
tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
535
dc->cc_op = CC_OP_ADDX;
539
static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
541
tcg_gen_mov_tl(cpu_cc_src, src1);
542
tcg_gen_mov_tl(cpu_cc_src2, src2);
543
tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
544
tcg_gen_mov_tl(dst, cpu_cc_dst);
547
static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
548
TCGv src2, int update_cc)
556
/* Carry is known to be zero. Fall back to plain SUB. */
558
gen_op_sub_cc(dst, src1, src2);
560
tcg_gen_sub_tl(dst, src1, src2);
567
carry_32 = gen_add32_carry32();
573
if (TARGET_LONG_BITS == 32) {
574
/* We can re-use the host's hardware carry generation by using
575
a SUB2 opcode. We discard the low part of the output.
576
Ideally we'd combine this operation with the add that
577
generated the carry in the first place. */
578
carry = tcg_temp_new();
579
tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
580
tcg_temp_free(carry);
583
carry_32 = gen_sub32_carry32();
587
/* We need external help to produce the carry. */
588
carry_32 = tcg_temp_new_i32();
589
gen_helper_compute_C_icc(carry_32, cpu_env);
593
#if TARGET_LONG_BITS == 64
594
carry = tcg_temp_new();
595
tcg_gen_extu_i32_i64(carry, carry_32);
600
tcg_gen_sub_tl(dst, src1, src2);
601
tcg_gen_sub_tl(dst, dst, carry);
603
tcg_temp_free_i32(carry_32);
604
#if TARGET_LONG_BITS == 64
605
tcg_temp_free(carry);
610
tcg_gen_mov_tl(cpu_cc_src, src1);
611
tcg_gen_mov_tl(cpu_cc_src2, src2);
612
tcg_gen_mov_tl(cpu_cc_dst, dst);
613
tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
614
dc->cc_op = CC_OP_SUBX;
618
static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
620
TCGv r_temp, zero, t0;
622
r_temp = tcg_temp_new();
629
zero = tcg_const_tl(0);
630
tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
631
tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
632
tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
633
tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
638
// env->y = (b2 << 31) | (env->y >> 1);
639
tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
640
tcg_gen_shli_tl(r_temp, r_temp, 31);
641
tcg_gen_shri_tl(t0, cpu_y, 1);
642
tcg_gen_andi_tl(t0, t0, 0x7fffffff);
643
tcg_gen_or_tl(t0, t0, r_temp);
644
tcg_gen_andi_tl(cpu_y, t0, 0xffffffff);
647
gen_mov_reg_N(t0, cpu_psr);
648
gen_mov_reg_V(r_temp, cpu_psr);
649
tcg_gen_xor_tl(t0, t0, r_temp);
650
tcg_temp_free(r_temp);
652
// T0 = (b1 << 31) | (T0 >> 1);
654
tcg_gen_shli_tl(t0, t0, 31);
655
tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
656
tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
659
tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
661
tcg_gen_mov_tl(dst, cpu_cc_dst);
664
static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
666
#if TARGET_LONG_BITS == 32
668
tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
670
tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
673
TCGv t0 = tcg_temp_new_i64();
674
TCGv t1 = tcg_temp_new_i64();
677
tcg_gen_ext32s_i64(t0, src1);
678
tcg_gen_ext32s_i64(t1, src2);
680
tcg_gen_ext32u_i64(t0, src1);
681
tcg_gen_ext32u_i64(t1, src2);
684
tcg_gen_mul_i64(dst, t0, t1);
688
tcg_gen_shri_i64(cpu_y, dst, 32);
692
static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
694
/* zero-extend truncated operands before multiplication */
695
gen_op_multiply(dst, src1, src2, 0);
698
static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
700
/* sign-extend truncated operands before multiplication */
701
gen_op_multiply(dst, src1, src2, 1);
705
static inline void gen_op_eval_ba(TCGv dst)
707
tcg_gen_movi_tl(dst, 1);
711
static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
713
gen_mov_reg_Z(dst, src);
717
static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
719
TCGv t0 = tcg_temp_new();
720
gen_mov_reg_N(t0, src);
721
gen_mov_reg_V(dst, src);
722
tcg_gen_xor_tl(dst, dst, t0);
723
gen_mov_reg_Z(t0, src);
724
tcg_gen_or_tl(dst, dst, t0);
729
static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
731
TCGv t0 = tcg_temp_new();
732
gen_mov_reg_V(t0, src);
733
gen_mov_reg_N(dst, src);
734
tcg_gen_xor_tl(dst, dst, t0);
739
static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
741
TCGv t0 = tcg_temp_new();
742
gen_mov_reg_Z(t0, src);
743
gen_mov_reg_C(dst, src);
744
tcg_gen_or_tl(dst, dst, t0);
749
static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
751
gen_mov_reg_C(dst, src);
755
static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
757
gen_mov_reg_V(dst, src);
761
static inline void gen_op_eval_bn(TCGv dst)
763
tcg_gen_movi_tl(dst, 0);
767
static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
769
gen_mov_reg_N(dst, src);
773
static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
775
gen_mov_reg_Z(dst, src);
776
tcg_gen_xori_tl(dst, dst, 0x1);
780
static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
782
gen_op_eval_ble(dst, src);
783
tcg_gen_xori_tl(dst, dst, 0x1);
787
static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
789
gen_op_eval_bl(dst, src);
790
tcg_gen_xori_tl(dst, dst, 0x1);
794
static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
796
gen_op_eval_bleu(dst, src);
797
tcg_gen_xori_tl(dst, dst, 0x1);
801
static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
803
gen_mov_reg_C(dst, src);
804
tcg_gen_xori_tl(dst, dst, 0x1);
808
static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
810
gen_mov_reg_N(dst, src);
811
tcg_gen_xori_tl(dst, dst, 0x1);
815
static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
817
gen_mov_reg_V(dst, src);
818
tcg_gen_xori_tl(dst, dst, 0x1);
822
FPSR bit field FCC1 | FCC0:
828
static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
829
unsigned int fcc_offset)
831
tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
832
tcg_gen_andi_tl(reg, reg, 0x1);
835
static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
836
unsigned int fcc_offset)
838
tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
839
tcg_gen_andi_tl(reg, reg, 0x1);
843
static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
844
unsigned int fcc_offset)
846
TCGv t0 = tcg_temp_new();
847
gen_mov_reg_FCC0(dst, src, fcc_offset);
848
gen_mov_reg_FCC1(t0, src, fcc_offset);
849
tcg_gen_or_tl(dst, dst, t0);
853
// 1 or 2: FCC0 ^ FCC1
854
static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
855
unsigned int fcc_offset)
857
TCGv t0 = tcg_temp_new();
858
gen_mov_reg_FCC0(dst, src, fcc_offset);
859
gen_mov_reg_FCC1(t0, src, fcc_offset);
860
tcg_gen_xor_tl(dst, dst, t0);
865
static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
866
unsigned int fcc_offset)
868
gen_mov_reg_FCC0(dst, src, fcc_offset);
872
static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
873
unsigned int fcc_offset)
875
TCGv t0 = tcg_temp_new();
876
gen_mov_reg_FCC0(dst, src, fcc_offset);
877
gen_mov_reg_FCC1(t0, src, fcc_offset);
878
tcg_gen_andc_tl(dst, dst, t0);
883
static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
884
unsigned int fcc_offset)
886
gen_mov_reg_FCC1(dst, src, fcc_offset);
890
static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
891
unsigned int fcc_offset)
893
TCGv t0 = tcg_temp_new();
894
gen_mov_reg_FCC0(dst, src, fcc_offset);
895
gen_mov_reg_FCC1(t0, src, fcc_offset);
896
tcg_gen_andc_tl(dst, t0, dst);
901
static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
902
unsigned int fcc_offset)
904
TCGv t0 = tcg_temp_new();
905
gen_mov_reg_FCC0(dst, src, fcc_offset);
906
gen_mov_reg_FCC1(t0, src, fcc_offset);
907
tcg_gen_and_tl(dst, dst, t0);
912
static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
913
unsigned int fcc_offset)
915
TCGv t0 = tcg_temp_new();
916
gen_mov_reg_FCC0(dst, src, fcc_offset);
917
gen_mov_reg_FCC1(t0, src, fcc_offset);
918
tcg_gen_or_tl(dst, dst, t0);
919
tcg_gen_xori_tl(dst, dst, 0x1);
923
// 0 or 3: !(FCC0 ^ FCC1)
924
static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
925
unsigned int fcc_offset)
927
TCGv t0 = tcg_temp_new();
928
gen_mov_reg_FCC0(dst, src, fcc_offset);
929
gen_mov_reg_FCC1(t0, src, fcc_offset);
930
tcg_gen_xor_tl(dst, dst, t0);
931
tcg_gen_xori_tl(dst, dst, 0x1);
936
static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
937
unsigned int fcc_offset)
939
gen_mov_reg_FCC0(dst, src, fcc_offset);
940
tcg_gen_xori_tl(dst, dst, 0x1);
943
// !1: !(FCC0 & !FCC1)
944
static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
945
unsigned int fcc_offset)
947
TCGv t0 = tcg_temp_new();
948
gen_mov_reg_FCC0(dst, src, fcc_offset);
949
gen_mov_reg_FCC1(t0, src, fcc_offset);
950
tcg_gen_andc_tl(dst, dst, t0);
951
tcg_gen_xori_tl(dst, dst, 0x1);
956
static inline void gen_op_eval_fble(TCGv dst, TCGv src,
957
unsigned int fcc_offset)
959
gen_mov_reg_FCC1(dst, src, fcc_offset);
960
tcg_gen_xori_tl(dst, dst, 0x1);
963
// !2: !(!FCC0 & FCC1)
964
static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
965
unsigned int fcc_offset)
967
TCGv t0 = tcg_temp_new();
968
gen_mov_reg_FCC0(dst, src, fcc_offset);
969
gen_mov_reg_FCC1(t0, src, fcc_offset);
970
tcg_gen_andc_tl(dst, t0, dst);
971
tcg_gen_xori_tl(dst, dst, 0x1);
975
// !3: !(FCC0 & FCC1)
976
static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
977
unsigned int fcc_offset)
979
TCGv t0 = tcg_temp_new();
980
gen_mov_reg_FCC0(dst, src, fcc_offset);
981
gen_mov_reg_FCC1(t0, src, fcc_offset);
982
tcg_gen_and_tl(dst, dst, t0);
983
tcg_gen_xori_tl(dst, dst, 0x1);
987
static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
988
target_ulong pc2, TCGv r_cond)
990
TCGLabel *l1 = gen_new_label();
992
tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
994
gen_goto_tb(dc, 0, pc1, pc1 + 4);
997
gen_goto_tb(dc, 1, pc2, pc2 + 4);
1000
static void gen_branch_a(DisasContext *dc, target_ulong pc1)
1002
TCGLabel *l1 = gen_new_label();
1003
target_ulong npc = dc->npc;
1005
tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
1007
gen_goto_tb(dc, 0, npc, pc1);
1010
gen_goto_tb(dc, 1, npc + 4, npc + 8);
1015
static void gen_branch_n(DisasContext *dc, target_ulong pc1)
1017
target_ulong npc = dc->npc;
1019
if (likely(npc != DYNAMIC_PC)) {
1021
dc->jump_pc[0] = pc1;
1022
dc->jump_pc[1] = npc + 4;
1027
tcg_gen_mov_tl(cpu_pc, cpu_npc);
1029
tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1030
t = tcg_const_tl(pc1);
1031
z = tcg_const_tl(0);
1032
tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
1036
dc->pc = DYNAMIC_PC;
1040
static inline void gen_generic_branch(DisasContext *dc)
1042
TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
1043
TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
1044
TCGv zero = tcg_const_tl(0);
1046
tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1048
tcg_temp_free(npc0);
1049
tcg_temp_free(npc1);
1050
tcg_temp_free(zero);
1053
/* call this function before using the condition register as it may
1054
have been set for a jump */
1055
static inline void flush_cond(DisasContext *dc)
1057
if (dc->npc == JUMP_PC) {
1058
gen_generic_branch(dc);
1059
dc->npc = DYNAMIC_PC;
1063
static inline void save_npc(DisasContext *dc)
1065
if (dc->npc == JUMP_PC) {
1066
gen_generic_branch(dc);
1067
dc->npc = DYNAMIC_PC;
1068
} else if (dc->npc != DYNAMIC_PC) {
1069
tcg_gen_movi_tl(cpu_npc, dc->npc);
1073
static inline void update_psr(DisasContext *dc)
1075
if (dc->cc_op != CC_OP_FLAGS) {
1076
dc->cc_op = CC_OP_FLAGS;
1077
gen_helper_compute_psr(cpu_env);
1081
static inline void save_state(DisasContext *dc)
1083
tcg_gen_movi_tl(cpu_pc, dc->pc);
1087
static void gen_exception(DisasContext *dc, int which)
1092
t = tcg_const_i32(which);
1093
gen_helper_raise_exception(cpu_env, t);
1094
tcg_temp_free_i32(t);
1098
static void gen_check_align(TCGv addr, int mask)
1100
TCGv_i32 r_mask = tcg_const_i32(mask);
1101
gen_helper_check_align(cpu_env, addr, r_mask);
1102
tcg_temp_free_i32(r_mask);
1105
static inline void gen_mov_pc_npc(DisasContext *dc)
1107
if (dc->npc == JUMP_PC) {
1108
gen_generic_branch(dc);
1109
tcg_gen_mov_tl(cpu_pc, cpu_npc);
1110
dc->pc = DYNAMIC_PC;
1111
} else if (dc->npc == DYNAMIC_PC) {
1112
tcg_gen_mov_tl(cpu_pc, cpu_npc);
1113
dc->pc = DYNAMIC_PC;
1119
static inline void gen_op_next_insn(void)
1121
tcg_gen_mov_tl(cpu_pc, cpu_npc);
1122
tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1125
static void free_compare(DisasCompare *cmp)
1128
tcg_temp_free(cmp->c1);
1131
tcg_temp_free(cmp->c2);
1135
static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1138
static int subcc_cond[16] = {
1154
-1, /* no overflow */
1157
static int logic_cond[16] = {
1159
TCG_COND_EQ, /* eq: Z */
1160
TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1161
TCG_COND_LT, /* lt: N ^ V -> N */
1162
TCG_COND_EQ, /* leu: C | Z -> Z */
1163
TCG_COND_NEVER, /* ltu: C -> 0 */
1164
TCG_COND_LT, /* neg: N */
1165
TCG_COND_NEVER, /* vs: V -> 0 */
1167
TCG_COND_NE, /* ne: !Z */
1168
TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1169
TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1170
TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1171
TCG_COND_ALWAYS, /* geu: !C -> 1 */
1172
TCG_COND_GE, /* pos: !N */
1173
TCG_COND_ALWAYS, /* vc: !V -> 1 */
1179
#ifdef TARGET_SPARC64
1189
switch (dc->cc_op) {
1191
cmp->cond = logic_cond[cond];
1193
cmp->is_bool = false;
1195
cmp->c2 = tcg_const_tl(0);
1196
#ifdef TARGET_SPARC64
1199
cmp->c1 = tcg_temp_new();
1200
tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1205
cmp->c1 = cpu_cc_dst;
1212
cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1213
goto do_compare_dst_0;
1215
case 7: /* overflow */
1216
case 15: /* !overflow */
1220
cmp->cond = subcc_cond[cond];
1221
cmp->is_bool = false;
1222
#ifdef TARGET_SPARC64
1224
/* Note that sign-extension works for unsigned compares as
1225
long as both operands are sign-extended. */
1226
cmp->g1 = cmp->g2 = false;
1227
cmp->c1 = tcg_temp_new();
1228
cmp->c2 = tcg_temp_new();
1229
tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1230
tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1234
cmp->g1 = cmp->g2 = true;
1235
cmp->c1 = cpu_cc_src;
1236
cmp->c2 = cpu_cc_src2;
1243
gen_helper_compute_psr(cpu_env);
1244
dc->cc_op = CC_OP_FLAGS;
1248
/* We're going to generate a boolean result. */
1249
cmp->cond = TCG_COND_NE;
1250
cmp->is_bool = true;
1251
cmp->g1 = cmp->g2 = false;
1252
cmp->c1 = r_dst = tcg_temp_new();
1253
cmp->c2 = tcg_const_tl(0);
1257
gen_op_eval_bn(r_dst);
1260
gen_op_eval_be(r_dst, r_src);
1263
gen_op_eval_ble(r_dst, r_src);
1266
gen_op_eval_bl(r_dst, r_src);
1269
gen_op_eval_bleu(r_dst, r_src);
1272
gen_op_eval_bcs(r_dst, r_src);
1275
gen_op_eval_bneg(r_dst, r_src);
1278
gen_op_eval_bvs(r_dst, r_src);
1281
gen_op_eval_ba(r_dst);
1284
gen_op_eval_bne(r_dst, r_src);
1287
gen_op_eval_bg(r_dst, r_src);
1290
gen_op_eval_bge(r_dst, r_src);
1293
gen_op_eval_bgu(r_dst, r_src);
1296
gen_op_eval_bcc(r_dst, r_src);
1299
gen_op_eval_bpos(r_dst, r_src);
1302
gen_op_eval_bvc(r_dst, r_src);
1309
static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1311
unsigned int offset;
1314
/* For now we still generate a straight boolean result. */
1315
cmp->cond = TCG_COND_NE;
1316
cmp->is_bool = true;
1317
cmp->g1 = cmp->g2 = false;
1318
cmp->c1 = r_dst = tcg_temp_new();
1319
cmp->c2 = tcg_const_tl(0);
1339
gen_op_eval_bn(r_dst);
1342
gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1345
gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1348
gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1351
gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1354
gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1357
gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1360
gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1363
gen_op_eval_ba(r_dst);
1366
gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1369
gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1372
gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1375
gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1378
gen_op_eval_fble(r_dst, cpu_fsr, offset);
1381
gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1384
gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1389
static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1393
gen_compare(&cmp, cc, cond, dc);
1395
/* The interface is to return a boolean in r_dst. */
1397
tcg_gen_mov_tl(r_dst, cmp.c1);
1399
tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1405
static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1408
gen_fcompare(&cmp, cc, cond);
1410
/* The interface is to return a boolean in r_dst. */
1412
tcg_gen_mov_tl(r_dst, cmp.c1);
1414
tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1420
#ifdef TARGET_SPARC64
1422
static const int gen_tcg_cond_reg[8] = {
1433
static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1435
cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1436
cmp->is_bool = false;
1440
cmp->c2 = tcg_const_tl(0);
1443
static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1446
gen_compare_reg(&cmp, cond, r_src);
1448
/* The interface is to return a boolean in r_dst. */
1449
tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1455
static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1457
unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1458
target_ulong target = dc->pc + offset;
1460
#ifdef TARGET_SPARC64
1461
if (unlikely(AM_CHECK(dc))) {
1462
target &= 0xffffffffULL;
1466
/* unconditional not taken */
1468
dc->pc = dc->npc + 4;
1469
dc->npc = dc->pc + 4;
1472
dc->npc = dc->pc + 4;
1474
} else if (cond == 0x8) {
1475
/* unconditional taken */
1478
dc->npc = dc->pc + 4;
1482
tcg_gen_mov_tl(cpu_pc, cpu_npc);
1486
gen_cond(cpu_cond, cc, cond, dc);
1488
gen_branch_a(dc, target);
1490
gen_branch_n(dc, target);
1495
static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1497
unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1498
target_ulong target = dc->pc + offset;
1500
#ifdef TARGET_SPARC64
1501
if (unlikely(AM_CHECK(dc))) {
1502
target &= 0xffffffffULL;
1506
/* unconditional not taken */
1508
dc->pc = dc->npc + 4;
1509
dc->npc = dc->pc + 4;
1512
dc->npc = dc->pc + 4;
1514
} else if (cond == 0x8) {
1515
/* unconditional taken */
1518
dc->npc = dc->pc + 4;
1522
tcg_gen_mov_tl(cpu_pc, cpu_npc);
1526
gen_fcond(cpu_cond, cc, cond);
1528
gen_branch_a(dc, target);
1530
gen_branch_n(dc, target);
1535
#ifdef TARGET_SPARC64
1536
static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1539
unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1540
target_ulong target = dc->pc + offset;
1542
if (unlikely(AM_CHECK(dc))) {
1543
target &= 0xffffffffULL;
1546
gen_cond_reg(cpu_cond, cond, r_reg);
1548
gen_branch_a(dc, target);
1550
gen_branch_n(dc, target);
1554
static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1558
gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1561
gen_helper_fcmps_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1564
gen_helper_fcmps_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1567
gen_helper_fcmps_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1572
static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1576
gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1579
gen_helper_fcmpd_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1582
gen_helper_fcmpd_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1585
gen_helper_fcmpd_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1590
static inline void gen_op_fcmpq(int fccno)
1594
gen_helper_fcmpq(cpu_fsr, cpu_env);
1597
gen_helper_fcmpq_fcc1(cpu_fsr, cpu_env);
1600
gen_helper_fcmpq_fcc2(cpu_fsr, cpu_env);
1603
gen_helper_fcmpq_fcc3(cpu_fsr, cpu_env);
1608
static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1612
gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1615
gen_helper_fcmpes_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1618
gen_helper_fcmpes_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1621
gen_helper_fcmpes_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1626
static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1630
gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1633
gen_helper_fcmped_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1636
gen_helper_fcmped_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1639
gen_helper_fcmped_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1644
static inline void gen_op_fcmpeq(int fccno)
1648
gen_helper_fcmpeq(cpu_fsr, cpu_env);
1651
gen_helper_fcmpeq_fcc1(cpu_fsr, cpu_env);
1654
gen_helper_fcmpeq_fcc2(cpu_fsr, cpu_env);
1657
gen_helper_fcmpeq_fcc3(cpu_fsr, cpu_env);
1664
static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1666
gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1669
static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1671
gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1674
static inline void gen_op_fcmpq(int fccno)
1676
gen_helper_fcmpq(cpu_fsr, cpu_env);
1679
static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1681
gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1684
static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1686
gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1689
static inline void gen_op_fcmpeq(int fccno)
1691
gen_helper_fcmpeq(cpu_fsr, cpu_env);
1695
static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1697
tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1698
tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1699
gen_exception(dc, TT_FP_EXCP);
1702
static int gen_trap_ifnofpu(DisasContext *dc)
1704
#if !defined(CONFIG_USER_ONLY)
1705
if (!dc->fpu_enabled) {
1706
gen_exception(dc, TT_NFPU_INSN);
1713
static inline void gen_op_clear_ieee_excp_and_FTT(void)
1715
tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1718
static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1719
void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1723
src = gen_load_fpr_F(dc, rs);
1724
dst = gen_dest_fpr_F(dc);
1726
gen(dst, cpu_env, src);
1727
gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1729
gen_store_fpr_F(dc, rd, dst);
1732
static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1733
void (*gen)(TCGv_i32, TCGv_i32))
1737
src = gen_load_fpr_F(dc, rs);
1738
dst = gen_dest_fpr_F(dc);
1742
gen_store_fpr_F(dc, rd, dst);
1745
static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1746
void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1748
TCGv_i32 dst, src1, src2;
1750
src1 = gen_load_fpr_F(dc, rs1);
1751
src2 = gen_load_fpr_F(dc, rs2);
1752
dst = gen_dest_fpr_F(dc);
1754
gen(dst, cpu_env, src1, src2);
1755
gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1757
gen_store_fpr_F(dc, rd, dst);
1760
#ifdef TARGET_SPARC64
1761
static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1762
void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1764
TCGv_i32 dst, src1, src2;
1766
src1 = gen_load_fpr_F(dc, rs1);
1767
src2 = gen_load_fpr_F(dc, rs2);
1768
dst = gen_dest_fpr_F(dc);
1770
gen(dst, src1, src2);
1772
gen_store_fpr_F(dc, rd, dst);
1776
static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1777
void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1781
src = gen_load_fpr_D(dc, rs);
1782
dst = gen_dest_fpr_D(dc, rd);
1784
gen(dst, cpu_env, src);
1785
gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1787
gen_store_fpr_D(dc, rd, dst);
1790
#ifdef TARGET_SPARC64
1791
static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1792
void (*gen)(TCGv_i64, TCGv_i64))
1796
src = gen_load_fpr_D(dc, rs);
1797
dst = gen_dest_fpr_D(dc, rd);
1801
gen_store_fpr_D(dc, rd, dst);
1805
static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1806
void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1808
TCGv_i64 dst, src1, src2;
1810
src1 = gen_load_fpr_D(dc, rs1);
1811
src2 = gen_load_fpr_D(dc, rs2);
1812
dst = gen_dest_fpr_D(dc, rd);
1814
gen(dst, cpu_env, src1, src2);
1815
gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1817
gen_store_fpr_D(dc, rd, dst);
1820
#ifdef TARGET_SPARC64
1821
static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1822
void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1824
TCGv_i64 dst, src1, src2;
1826
src1 = gen_load_fpr_D(dc, rs1);
1827
src2 = gen_load_fpr_D(dc, rs2);
1828
dst = gen_dest_fpr_D(dc, rd);
1830
gen(dst, src1, src2);
1832
gen_store_fpr_D(dc, rd, dst);
1835
static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1836
void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1838
TCGv_i64 dst, src1, src2;
1840
src1 = gen_load_fpr_D(dc, rs1);
1841
src2 = gen_load_fpr_D(dc, rs2);
1842
dst = gen_dest_fpr_D(dc, rd);
1844
gen(dst, cpu_gsr, src1, src2);
1846
gen_store_fpr_D(dc, rd, dst);
1849
static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1850
void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1852
TCGv_i64 dst, src0, src1, src2;
1854
src1 = gen_load_fpr_D(dc, rs1);
1855
src2 = gen_load_fpr_D(dc, rs2);
1856
src0 = gen_load_fpr_D(dc, rd);
1857
dst = gen_dest_fpr_D(dc, rd);
1859
gen(dst, src0, src1, src2);
1861
gen_store_fpr_D(dc, rd, dst);
1865
static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1866
void (*gen)(TCGv_ptr))
1868
gen_op_load_fpr_QT1(QFPREG(rs));
1871
gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1873
gen_op_store_QT0_fpr(QFPREG(rd));
1874
gen_update_fprs_dirty(dc, QFPREG(rd));
1877
#ifdef TARGET_SPARC64
1878
static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1879
void (*gen)(TCGv_ptr))
1881
gen_op_load_fpr_QT1(QFPREG(rs));
1885
gen_op_store_QT0_fpr(QFPREG(rd));
1886
gen_update_fprs_dirty(dc, QFPREG(rd));
1890
static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1891
void (*gen)(TCGv_ptr))
1893
gen_op_load_fpr_QT0(QFPREG(rs1));
1894
gen_op_load_fpr_QT1(QFPREG(rs2));
1897
gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1899
gen_op_store_QT0_fpr(QFPREG(rd));
1900
gen_update_fprs_dirty(dc, QFPREG(rd));
1903
static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1904
void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1907
TCGv_i32 src1, src2;
1909
src1 = gen_load_fpr_F(dc, rs1);
1910
src2 = gen_load_fpr_F(dc, rs2);
1911
dst = gen_dest_fpr_D(dc, rd);
1913
gen(dst, cpu_env, src1, src2);
1914
gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1916
gen_store_fpr_D(dc, rd, dst);
1919
static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1920
void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1922
TCGv_i64 src1, src2;
1924
src1 = gen_load_fpr_D(dc, rs1);
1925
src2 = gen_load_fpr_D(dc, rs2);
1927
gen(cpu_env, src1, src2);
1928
gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1930
gen_op_store_QT0_fpr(QFPREG(rd));
1931
gen_update_fprs_dirty(dc, QFPREG(rd));
1934
#ifdef TARGET_SPARC64
1935
static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1936
void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1941
src = gen_load_fpr_F(dc, rs);
1942
dst = gen_dest_fpr_D(dc, rd);
1944
gen(dst, cpu_env, src);
1945
gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1947
gen_store_fpr_D(dc, rd, dst);
1951
static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1952
void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1957
src = gen_load_fpr_F(dc, rs);
1958
dst = gen_dest_fpr_D(dc, rd);
1960
gen(dst, cpu_env, src);
1962
gen_store_fpr_D(dc, rd, dst);
1965
static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1966
void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1971
src = gen_load_fpr_D(dc, rs);
1972
dst = gen_dest_fpr_F(dc);
1974
gen(dst, cpu_env, src);
1975
gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1977
gen_store_fpr_F(dc, rd, dst);
1980
static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1981
void (*gen)(TCGv_i32, TCGv_ptr))
1985
gen_op_load_fpr_QT1(QFPREG(rs));
1986
dst = gen_dest_fpr_F(dc);
1989
gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1991
gen_store_fpr_F(dc, rd, dst);
1994
static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1995
void (*gen)(TCGv_i64, TCGv_ptr))
1999
gen_op_load_fpr_QT1(QFPREG(rs));
2000
dst = gen_dest_fpr_D(dc, rd);
2003
gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
2005
gen_store_fpr_D(dc, rd, dst);
2008
static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
2009
void (*gen)(TCGv_ptr, TCGv_i32))
2013
src = gen_load_fpr_F(dc, rs);
2017
gen_op_store_QT0_fpr(QFPREG(rd));
2018
gen_update_fprs_dirty(dc, QFPREG(rd));
2021
static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
2022
void (*gen)(TCGv_ptr, TCGv_i64))
2026
src = gen_load_fpr_D(dc, rs);
2030
gen_op_store_QT0_fpr(QFPREG(rd));
2031
gen_update_fprs_dirty(dc, QFPREG(rd));
2034
static void gen_swap(DisasContext *dc, TCGv dst, TCGv src,
2035
TCGv addr, int mmu_idx, TCGMemOp memop)
2037
gen_address_mask(dc, addr);
2038
tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop);
2041
static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx)
2043
TCGv m1 = tcg_const_tl(0xff);
2044
gen_address_mask(dc, addr);
2045
tcg_gen_atomic_xchg_tl(dst, addr, m1, mmu_idx, MO_UB);
2050
#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2069
static DisasASI get_asi(DisasContext *dc, int insn, TCGMemOp memop)
2071
int asi = GET_FIELD(insn, 19, 26);
2072
ASIType type = GET_ASI_HELPER;
2073
int mem_idx = dc->mem_idx;
2075
#ifndef TARGET_SPARC64
2076
/* Before v9, all asis are immediate and privileged. */
2078
gen_exception(dc, TT_ILL_INSN);
2079
type = GET_ASI_EXCP;
2080
} else if (supervisor(dc)
2081
/* Note that LEON accepts ASI_USERDATA in user mode, for
2082
use with CASA. Also note that previous versions of
2083
QEMU allowed (and old versions of gcc emitted) ASI_P
2084
for LEON, which is incorrect. */
2085
|| (asi == ASI_USERDATA
2086
&& (dc->def->features & CPU_FEATURE_CASA))) {
2088
case ASI_USERDATA: /* User data access */
2089
mem_idx = MMU_USER_IDX;
2090
type = GET_ASI_DIRECT;
2092
case ASI_KERNELDATA: /* Supervisor data access */
2093
mem_idx = MMU_KERNEL_IDX;
2094
type = GET_ASI_DIRECT;
2096
case ASI_M_BYPASS: /* MMU passthrough */
2097
case ASI_LEON_BYPASS: /* LEON MMU passthrough */
2098
mem_idx = MMU_PHYS_IDX;
2099
type = GET_ASI_DIRECT;
2101
case ASI_M_BCOPY: /* Block copy, sta access */
2102
mem_idx = MMU_KERNEL_IDX;
2103
type = GET_ASI_BCOPY;
2105
case ASI_M_BFILL: /* Block fill, stda access */
2106
mem_idx = MMU_KERNEL_IDX;
2107
type = GET_ASI_BFILL;
2111
gen_exception(dc, TT_PRIV_INSN);
2112
type = GET_ASI_EXCP;
2118
/* With v9, all asis below 0x80 are privileged. */
2119
/* ??? We ought to check cpu_has_hypervisor, but we didn't copy
2120
down that bit into DisasContext. For the moment that's ok,
2121
since the direct implementations below doesn't have any ASIs
2122
in the restricted [0x30, 0x7f] range, and the check will be
2123
done properly in the helper. */
2124
if (!supervisor(dc) && asi < 0x80) {
2125
gen_exception(dc, TT_PRIV_ACT);
2126
type = GET_ASI_EXCP;
2129
case ASI_REAL: /* Bypass */
2130
case ASI_REAL_IO: /* Bypass, non-cacheable */
2131
case ASI_REAL_L: /* Bypass LE */
2132
case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
2133
case ASI_TWINX_REAL: /* Real address, twinx */
2134
case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
2135
case ASI_QUAD_LDD_PHYS:
2136
case ASI_QUAD_LDD_PHYS_L:
2137
mem_idx = MMU_PHYS_IDX;
2139
case ASI_N: /* Nucleus */
2140
case ASI_NL: /* Nucleus LE */
2143
case ASI_NUCLEUS_QUAD_LDD:
2144
case ASI_NUCLEUS_QUAD_LDD_L:
2145
if (hypervisor(dc)) {
2146
mem_idx = MMU_PHYS_IDX;
2148
mem_idx = MMU_NUCLEUS_IDX;
2151
case ASI_AIUP: /* As if user primary */
2152
case ASI_AIUPL: /* As if user primary LE */
2153
case ASI_TWINX_AIUP:
2154
case ASI_TWINX_AIUP_L:
2155
case ASI_BLK_AIUP_4V:
2156
case ASI_BLK_AIUP_L_4V:
2159
mem_idx = MMU_USER_IDX;
2161
case ASI_AIUS: /* As if user secondary */
2162
case ASI_AIUSL: /* As if user secondary LE */
2163
case ASI_TWINX_AIUS:
2164
case ASI_TWINX_AIUS_L:
2165
case ASI_BLK_AIUS_4V:
2166
case ASI_BLK_AIUS_L_4V:
2169
mem_idx = MMU_USER_SECONDARY_IDX;
2171
case ASI_S: /* Secondary */
2172
case ASI_SL: /* Secondary LE */
2175
case ASI_BLK_COMMIT_S:
2182
if (mem_idx == MMU_USER_IDX) {
2183
mem_idx = MMU_USER_SECONDARY_IDX;
2184
} else if (mem_idx == MMU_KERNEL_IDX) {
2185
mem_idx = MMU_KERNEL_SECONDARY_IDX;
2188
case ASI_P: /* Primary */
2189
case ASI_PL: /* Primary LE */
2192
case ASI_BLK_COMMIT_P:
2216
type = GET_ASI_DIRECT;
2218
case ASI_TWINX_REAL:
2219
case ASI_TWINX_REAL_L:
2222
case ASI_TWINX_AIUP:
2223
case ASI_TWINX_AIUP_L:
2224
case ASI_TWINX_AIUS:
2225
case ASI_TWINX_AIUS_L:
2230
case ASI_QUAD_LDD_PHYS:
2231
case ASI_QUAD_LDD_PHYS_L:
2232
case ASI_NUCLEUS_QUAD_LDD:
2233
case ASI_NUCLEUS_QUAD_LDD_L:
2234
type = GET_ASI_DTWINX;
2236
case ASI_BLK_COMMIT_P:
2237
case ASI_BLK_COMMIT_S:
2238
case ASI_BLK_AIUP_4V:
2239
case ASI_BLK_AIUP_L_4V:
2242
case ASI_BLK_AIUS_4V:
2243
case ASI_BLK_AIUS_L_4V:
2250
type = GET_ASI_BLOCK;
2257
type = GET_ASI_SHORT;
2264
type = GET_ASI_SHORT;
2267
/* The little-endian asis all have bit 3 set. */
2274
return (DisasASI){ type, asi, mem_idx, memop };
2277
static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
2278
int insn, TCGMemOp memop)
2280
DisasASI da = get_asi(dc, insn, memop);
2285
case GET_ASI_DTWINX: /* Reserved for ldda. */
2286
gen_exception(dc, TT_ILL_INSN);
2288
case GET_ASI_DIRECT:
2289
gen_address_mask(dc, addr);
2290
tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop);
2294
TCGv_i32 r_asi = tcg_const_i32(da.asi);
2295
TCGv_i32 r_mop = tcg_const_i32(memop);
2298
#ifdef TARGET_SPARC64
2299
gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_mop);
2302
TCGv_i64 t64 = tcg_temp_new_i64();
2303
gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2304
tcg_gen_trunc_i64_tl(dst, t64);
2305
tcg_temp_free_i64(t64);
2308
tcg_temp_free_i32(r_mop);
2309
tcg_temp_free_i32(r_asi);
2315
static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
2316
int insn, TCGMemOp memop)
2318
DisasASI da = get_asi(dc, insn, memop);
2323
case GET_ASI_DTWINX: /* Reserved for stda. */
2324
#ifndef TARGET_SPARC64
2325
gen_exception(dc, TT_ILL_INSN);
2328
if (!(dc->def->features & CPU_FEATURE_HYPV)) {
2329
/* Pre OpenSPARC CPUs don't have these */
2330
gen_exception(dc, TT_ILL_INSN);
2333
/* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions
2334
* are ST_BLKINIT_ ASIs */
2337
case GET_ASI_DIRECT:
2338
gen_address_mask(dc, addr);
2339
tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop);
2341
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
2343
/* Copy 32 bytes from the address in SRC to ADDR. */
2344
/* ??? The original qemu code suggests 4-byte alignment, dropping
2345
the low bits, but the only place I can see this used is in the
2346
Linux kernel with 32 byte alignment, which would make more sense
2347
as a cacheline-style operation. */
2349
TCGv saddr = tcg_temp_new();
2350
TCGv daddr = tcg_temp_new();
2351
TCGv four = tcg_const_tl(4);
2352
TCGv_i32 tmp = tcg_temp_new_i32();
2355
tcg_gen_andi_tl(saddr, src, -4);
2356
tcg_gen_andi_tl(daddr, addr, -4);
2357
for (i = 0; i < 32; i += 4) {
2358
/* Since the loads and stores are paired, allow the
2359
copy to happen in the host endianness. */
2360
tcg_gen_qemu_ld_i32(tmp, saddr, da.mem_idx, MO_UL);
2361
tcg_gen_qemu_st_i32(tmp, daddr, da.mem_idx, MO_UL);
2362
tcg_gen_add_tl(saddr, saddr, four);
2363
tcg_gen_add_tl(daddr, daddr, four);
2366
tcg_temp_free(saddr);
2367
tcg_temp_free(daddr);
2368
tcg_temp_free(four);
2369
tcg_temp_free_i32(tmp);
2375
TCGv_i32 r_asi = tcg_const_i32(da.asi);
2376
TCGv_i32 r_mop = tcg_const_i32(memop & MO_SIZE);
2379
#ifdef TARGET_SPARC64
2380
gen_helper_st_asi(cpu_env, addr, src, r_asi, r_mop);
2383
TCGv_i64 t64 = tcg_temp_new_i64();
2384
tcg_gen_extu_tl_i64(t64, src);
2385
gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2386
tcg_temp_free_i64(t64);
2389
tcg_temp_free_i32(r_mop);
2390
tcg_temp_free_i32(r_asi);
2392
/* A write to a TLB register may alter page maps. End the TB. */
2393
dc->npc = DYNAMIC_PC;
2399
static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src,
2400
TCGv addr, int insn)
2402
DisasASI da = get_asi(dc, insn, MO_TEUL);
2407
case GET_ASI_DIRECT:
2408
gen_swap(dc, dst, src, addr, da.mem_idx, da.memop);
2411
/* ??? Should be DAE_invalid_asi. */
2412
gen_exception(dc, TT_DATA_ACCESS);
2417
static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2420
DisasASI da = get_asi(dc, insn, MO_TEUL);
2426
case GET_ASI_DIRECT:
2427
oldv = tcg_temp_new();
2428
tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2429
da.mem_idx, da.memop);
2430
gen_store_gpr(dc, rd, oldv);
2431
tcg_temp_free(oldv);
2434
/* ??? Should be DAE_invalid_asi. */
2435
gen_exception(dc, TT_DATA_ACCESS);
2440
static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
2442
DisasASI da = get_asi(dc, insn, MO_UB);
2447
case GET_ASI_DIRECT:
2448
gen_ldstub(dc, dst, addr, da.mem_idx);
2451
/* ??? In theory, this should be raise DAE_invalid_asi.
2452
But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
2453
if (parallel_cpus) {
2454
gen_helper_exit_atomic(cpu_env);
2456
TCGv_i32 r_asi = tcg_const_i32(da.asi);
2457
TCGv_i32 r_mop = tcg_const_i32(MO_UB);
2461
t64 = tcg_temp_new_i64();
2462
gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2464
s64 = tcg_const_i64(0xff);
2465
gen_helper_st_asi(cpu_env, addr, s64, r_asi, r_mop);
2466
tcg_temp_free_i64(s64);
2467
tcg_temp_free_i32(r_mop);
2468
tcg_temp_free_i32(r_asi);
2470
tcg_gen_trunc_i64_tl(dst, t64);
2471
tcg_temp_free_i64(t64);
2474
dc->npc = DYNAMIC_PC;
2481
#ifdef TARGET_SPARC64
2482
static void gen_ldf_asi(DisasContext *dc, TCGv addr,
2483
int insn, int size, int rd)
2485
DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
2493
case GET_ASI_DIRECT:
2494
gen_address_mask(dc, addr);
2497
d32 = gen_dest_fpr_F(dc);
2498
tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop);
2499
gen_store_fpr_F(dc, rd, d32);
2502
tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2503
da.memop | MO_ALIGN_4);
2506
d64 = tcg_temp_new_i64();
2507
tcg_gen_qemu_ld_i64(d64, addr, da.mem_idx, da.memop | MO_ALIGN_4);
2508
tcg_gen_addi_tl(addr, addr, 8);
2509
tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx,
2510
da.memop | MO_ALIGN_4);
2511
tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2512
tcg_temp_free_i64(d64);
2515
g_assert_not_reached();
2520
/* Valid for lddfa on aligned registers only. */
2521
if (size == 8 && (rd & 7) == 0) {
2526
gen_address_mask(dc, addr);
2528
/* The first operation checks required alignment. */
2529
memop = da.memop | MO_ALIGN_64;
2530
eight = tcg_const_tl(8);
2531
for (i = 0; ; ++i) {
2532
tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr,
2537
tcg_gen_add_tl(addr, addr, eight);
2540
tcg_temp_free(eight);
2542
gen_exception(dc, TT_ILL_INSN);
2547
/* Valid for lddfa only. */
2549
gen_address_mask(dc, addr);
2550
tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2552
gen_exception(dc, TT_ILL_INSN);
2558
TCGv_i32 r_asi = tcg_const_i32(da.asi);
2559
TCGv_i32 r_mop = tcg_const_i32(da.memop);
2562
/* According to the table in the UA2011 manual, the only
2563
other asis that are valid for ldfa/lddfa/ldqfa are
2564
the NO_FAULT asis. We still need a helper for these,
2565
but we can just use the integer asi helper for them. */
2568
d64 = tcg_temp_new_i64();
2569
gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
2570
d32 = gen_dest_fpr_F(dc);
2571
tcg_gen_extrl_i64_i32(d32, d64);
2572
tcg_temp_free_i64(d64);
2573
gen_store_fpr_F(dc, rd, d32);
2576
gen_helper_ld_asi(cpu_fpr[rd / 2], cpu_env, addr, r_asi, r_mop);
2579
d64 = tcg_temp_new_i64();
2580
gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
2581
tcg_gen_addi_tl(addr, addr, 8);
2582
gen_helper_ld_asi(cpu_fpr[rd/2+1], cpu_env, addr, r_asi, r_mop);
2583
tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2584
tcg_temp_free_i64(d64);
2587
g_assert_not_reached();
2589
tcg_temp_free_i32(r_mop);
2590
tcg_temp_free_i32(r_asi);
2596
static void gen_stf_asi(DisasContext *dc, TCGv addr,
2597
int insn, int size, int rd)
2599
DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
2606
case GET_ASI_DIRECT:
2607
gen_address_mask(dc, addr);
2610
d32 = gen_load_fpr_F(dc, rd);
2611
tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop);
2614
tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2615
da.memop | MO_ALIGN_4);
2618
/* Only 4-byte alignment required. However, it is legal for the
2619
cpu to signal the alignment fault, and the OS trap handler is
2620
required to fix it up. Requiring 16-byte alignment here avoids
2621
having to probe the second page before performing the first
2623
tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2624
da.memop | MO_ALIGN_16);
2625
tcg_gen_addi_tl(addr, addr, 8);
2626
tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
2629
g_assert_not_reached();
2634
/* Valid for stdfa on aligned registers only. */
2635
if (size == 8 && (rd & 7) == 0) {
2640
gen_address_mask(dc, addr);
2642
/* The first operation checks required alignment. */
2643
memop = da.memop | MO_ALIGN_64;
2644
eight = tcg_const_tl(8);
2645
for (i = 0; ; ++i) {
2646
tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr,
2651
tcg_gen_add_tl(addr, addr, eight);
2654
tcg_temp_free(eight);
2656
gen_exception(dc, TT_ILL_INSN);
2661
/* Valid for stdfa only. */
2663
gen_address_mask(dc, addr);
2664
tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2666
gen_exception(dc, TT_ILL_INSN);
2671
/* According to the table in the UA2011 manual, the only
2672
other asis that are valid for ldfa/lddfa/ldqfa are
2673
the PST* asis, which aren't currently handled. */
2674
gen_exception(dc, TT_ILL_INSN);
2679
static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2681
DisasASI da = get_asi(dc, insn, MO_TEQ);
2682
TCGv_i64 hi = gen_dest_gpr(dc, rd);
2683
TCGv_i64 lo = gen_dest_gpr(dc, rd + 1);
2689
case GET_ASI_DTWINX:
2690
gen_address_mask(dc, addr);
2691
tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2692
tcg_gen_addi_tl(addr, addr, 8);
2693
tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop);
2696
case GET_ASI_DIRECT:
2698
TCGv_i64 tmp = tcg_temp_new_i64();
2700
gen_address_mask(dc, addr);
2701
tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop);
2703
/* Note that LE ldda acts as if each 32-bit register
2704
result is byte swapped. Having just performed one
2705
64-bit bswap, we need now to swap the writebacks. */
2706
if ((da.memop & MO_BSWAP) == MO_TE) {
2707
tcg_gen_extr32_i64(lo, hi, tmp);
2709
tcg_gen_extr32_i64(hi, lo, tmp);
2711
tcg_temp_free_i64(tmp);
2716
/* ??? In theory we've handled all of the ASIs that are valid
2717
for ldda, and this should raise DAE_invalid_asi. However,
2718
real hardware allows others. This can be seen with e.g.
2719
FreeBSD 10.3 wrt ASI_IC_TAG. */
2721
TCGv_i32 r_asi = tcg_const_i32(da.asi);
2722
TCGv_i32 r_mop = tcg_const_i32(da.memop);
2723
TCGv_i64 tmp = tcg_temp_new_i64();
2726
gen_helper_ld_asi(tmp, cpu_env, addr, r_asi, r_mop);
2727
tcg_temp_free_i32(r_asi);
2728
tcg_temp_free_i32(r_mop);
2731
if ((da.memop & MO_BSWAP) == MO_TE) {
2732
tcg_gen_extr32_i64(lo, hi, tmp);
2734
tcg_gen_extr32_i64(hi, lo, tmp);
2736
tcg_temp_free_i64(tmp);
2741
gen_store_gpr(dc, rd, hi);
2742
gen_store_gpr(dc, rd + 1, lo);
2745
static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2748
DisasASI da = get_asi(dc, insn, MO_TEQ);
2749
TCGv lo = gen_load_gpr(dc, rd + 1);
2755
case GET_ASI_DTWINX:
2756
gen_address_mask(dc, addr);
2757
tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2758
tcg_gen_addi_tl(addr, addr, 8);
2759
tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop);
2762
case GET_ASI_DIRECT:
2764
TCGv_i64 t64 = tcg_temp_new_i64();
2766
/* Note that LE stda acts as if each 32-bit register result is
2767
byte swapped. We will perform one 64-bit LE store, so now
2768
we must swap the order of the construction. */
2769
if ((da.memop & MO_BSWAP) == MO_TE) {
2770
tcg_gen_concat32_i64(t64, lo, hi);
2772
tcg_gen_concat32_i64(t64, hi, lo);
2774
gen_address_mask(dc, addr);
2775
tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
2776
tcg_temp_free_i64(t64);
2781
/* ??? In theory we've handled all of the ASIs that are valid
2782
for stda, and this should raise DAE_invalid_asi. */
2784
TCGv_i32 r_asi = tcg_const_i32(da.asi);
2785
TCGv_i32 r_mop = tcg_const_i32(da.memop);
2786
TCGv_i64 t64 = tcg_temp_new_i64();
2789
if ((da.memop & MO_BSWAP) == MO_TE) {
2790
tcg_gen_concat32_i64(t64, lo, hi);
2792
tcg_gen_concat32_i64(t64, hi, lo);
2796
gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2797
tcg_temp_free_i32(r_mop);
2798
tcg_temp_free_i32(r_asi);
2799
tcg_temp_free_i64(t64);
2805
static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2808
DisasASI da = get_asi(dc, insn, MO_TEQ);
2814
case GET_ASI_DIRECT:
2815
oldv = tcg_temp_new();
2816
tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2817
da.mem_idx, da.memop);
2818
gen_store_gpr(dc, rd, oldv);
2819
tcg_temp_free(oldv);
2822
/* ??? Should be DAE_invalid_asi. */
2823
gen_exception(dc, TT_DATA_ACCESS);
2828
#elif !defined(CONFIG_USER_ONLY)
2829
static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2831
/* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2832
whereby "rd + 1" elicits "error: array subscript is above array".
2833
Since we have already asserted that rd is even, the semantics
2835
TCGv lo = gen_dest_gpr(dc, rd | 1);
2836
TCGv hi = gen_dest_gpr(dc, rd);
2837
TCGv_i64 t64 = tcg_temp_new_i64();
2838
DisasASI da = get_asi(dc, insn, MO_TEQ);
2842
tcg_temp_free_i64(t64);
2844
case GET_ASI_DIRECT:
2845
gen_address_mask(dc, addr);
2846
tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop);
2850
TCGv_i32 r_asi = tcg_const_i32(da.asi);
2851
TCGv_i32 r_mop = tcg_const_i32(MO_Q);
2854
gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2855
tcg_temp_free_i32(r_mop);
2856
tcg_temp_free_i32(r_asi);
2861
tcg_gen_extr_i64_i32(lo, hi, t64);
2862
tcg_temp_free_i64(t64);
2863
gen_store_gpr(dc, rd | 1, lo);
2864
gen_store_gpr(dc, rd, hi);
2867
static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2870
DisasASI da = get_asi(dc, insn, MO_TEQ);
2871
TCGv lo = gen_load_gpr(dc, rd + 1);
2872
TCGv_i64 t64 = tcg_temp_new_i64();
2874
tcg_gen_concat_tl_i64(t64, lo, hi);
2879
case GET_ASI_DIRECT:
2880
gen_address_mask(dc, addr);
2881
tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
2884
/* Store 32 bytes of T64 to ADDR. */
2885
/* ??? The original qemu code suggests 8-byte alignment, dropping
2886
the low bits, but the only place I can see this used is in the
2887
Linux kernel with 32 byte alignment, which would make more sense
2888
as a cacheline-style operation. */
2890
TCGv d_addr = tcg_temp_new();
2891
TCGv eight = tcg_const_tl(8);
2894
tcg_gen_andi_tl(d_addr, addr, -8);
2895
for (i = 0; i < 32; i += 8) {
2896
tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop);
2897
tcg_gen_add_tl(d_addr, d_addr, eight);
2900
tcg_temp_free(d_addr);
2901
tcg_temp_free(eight);
2906
TCGv_i32 r_asi = tcg_const_i32(da.asi);
2907
TCGv_i32 r_mop = tcg_const_i32(MO_Q);
2910
gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2911
tcg_temp_free_i32(r_mop);
2912
tcg_temp_free_i32(r_asi);
2917
tcg_temp_free_i64(t64);
2921
static TCGv get_src1(DisasContext *dc, unsigned int insn)
2923
unsigned int rs1 = GET_FIELD(insn, 13, 17);
2924
return gen_load_gpr(dc, rs1);
2927
static TCGv get_src2(DisasContext *dc, unsigned int insn)
2929
if (IS_IMM) { /* immediate */
2930
target_long simm = GET_FIELDs(insn, 19, 31);
2931
TCGv t = get_temp_tl(dc);
2932
tcg_gen_movi_tl(t, simm);
2934
} else { /* register */
2935
unsigned int rs2 = GET_FIELD(insn, 27, 31);
2936
return gen_load_gpr(dc, rs2);
2940
#ifdef TARGET_SPARC64
2941
static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2943
TCGv_i32 c32, zero, dst, s1, s2;
2945
/* We have two choices here: extend the 32 bit data and use movcond_i64,
2946
or fold the comparison down to 32 bits and use movcond_i32. Choose
2948
c32 = tcg_temp_new_i32();
2950
tcg_gen_extrl_i64_i32(c32, cmp->c1);
2952
TCGv_i64 c64 = tcg_temp_new_i64();
2953
tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2954
tcg_gen_extrl_i64_i32(c32, c64);
2955
tcg_temp_free_i64(c64);
2958
s1 = gen_load_fpr_F(dc, rs);
2959
s2 = gen_load_fpr_F(dc, rd);
2960
dst = gen_dest_fpr_F(dc);
2961
zero = tcg_const_i32(0);
2963
tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2965
tcg_temp_free_i32(c32);
2966
tcg_temp_free_i32(zero);
2967
gen_store_fpr_F(dc, rd, dst);
2970
static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2972
TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2973
tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2974
gen_load_fpr_D(dc, rs),
2975
gen_load_fpr_D(dc, rd));
2976
gen_store_fpr_D(dc, rd, dst);
2979
static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2981
int qd = QFPREG(rd);
2982
int qs = QFPREG(rs);
2984
tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2985
cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2986
tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2987
cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2989
gen_update_fprs_dirty(dc, qd);
2992
#ifndef CONFIG_USER_ONLY
2993
static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env cpu_env)
2995
TCGv_i32 r_tl = tcg_temp_new_i32();
2997
/* load env->tl into r_tl */
2998
tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
3000
/* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
3001
tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
3003
/* calculate offset to current trap state from env->ts, reuse r_tl */
3004
tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
3005
tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
3007
/* tsptr = env->ts[env->tl & MAXTL_MASK] */
3009
TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
3010
tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
3011
tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
3012
tcg_temp_free_ptr(r_tl_tmp);
3015
tcg_temp_free_i32(r_tl);
3019
static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
3020
int width, bool cc, bool left)
3022
TCGv lo1, lo2, t1, t2;
3023
uint64_t amask, tabl, tabr;
3024
int shift, imask, omask;
3027
tcg_gen_mov_tl(cpu_cc_src, s1);
3028
tcg_gen_mov_tl(cpu_cc_src2, s2);
3029
tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
3030
tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3031
dc->cc_op = CC_OP_SUB;
3034
/* Theory of operation: there are two tables, left and right (not to
3035
be confused with the left and right versions of the opcode). These
3036
are indexed by the low 3 bits of the inputs. To make things "easy",
3037
these tables are loaded into two constants, TABL and TABR below.
3038
The operation index = (input & imask) << shift calculates the index
3039
into the constant, while val = (table >> index) & omask calculates
3040
the value we're looking for. */
3047
tabl = 0x80c0e0f0f8fcfeffULL;
3048
tabr = 0xff7f3f1f0f070301ULL;
3050
tabl = 0x0103070f1f3f7fffULL;
3051
tabr = 0xfffefcf8f0e0c080ULL;
3071
tabl = (2 << 2) | 3;
3072
tabr = (3 << 2) | 1;
3074
tabl = (1 << 2) | 3;
3075
tabr = (3 << 2) | 2;
3082
lo1 = tcg_temp_new();
3083
lo2 = tcg_temp_new();
3084
tcg_gen_andi_tl(lo1, s1, imask);
3085
tcg_gen_andi_tl(lo2, s2, imask);
3086
tcg_gen_shli_tl(lo1, lo1, shift);
3087
tcg_gen_shli_tl(lo2, lo2, shift);
3089
t1 = tcg_const_tl(tabl);
3090
t2 = tcg_const_tl(tabr);
3091
tcg_gen_shr_tl(lo1, t1, lo1);
3092
tcg_gen_shr_tl(lo2, t2, lo2);
3093
tcg_gen_andi_tl(dst, lo1, omask);
3094
tcg_gen_andi_tl(lo2, lo2, omask);
3098
amask &= 0xffffffffULL;
3100
tcg_gen_andi_tl(s1, s1, amask);
3101
tcg_gen_andi_tl(s2, s2, amask);
3103
/* We want to compute
3104
dst = (s1 == s2 ? lo1 : lo1 & lo2).
3105
We've already done dst = lo1, so this reduces to
3106
dst &= (s1 == s2 ? -1 : lo2)
3111
tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
3112
tcg_gen_neg_tl(t1, t1);
3113
tcg_gen_or_tl(lo2, lo2, t1);
3114
tcg_gen_and_tl(dst, dst, lo2);
3122
static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
3124
TCGv tmp = tcg_temp_new();
3126
tcg_gen_add_tl(tmp, s1, s2);
3127
tcg_gen_andi_tl(dst, tmp, -8);
3129
tcg_gen_neg_tl(tmp, tmp);
3131
tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3136
static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
3140
t1 = tcg_temp_new();
3141
t2 = tcg_temp_new();
3142
shift = tcg_temp_new();
3144
tcg_gen_andi_tl(shift, gsr, 7);
3145
tcg_gen_shli_tl(shift, shift, 3);
3146
tcg_gen_shl_tl(t1, s1, shift);
3148
/* A shift of 64 does not produce 0 in TCG. Divide this into a
3149
shift of (up to 63) followed by a constant shift of 1. */
3150
tcg_gen_xori_tl(shift, shift, 63);
3151
tcg_gen_shr_tl(t2, s2, shift);
3152
tcg_gen_shri_tl(t2, t2, 1);
3154
tcg_gen_or_tl(dst, t1, t2);
3158
tcg_temp_free(shift);
3162
#define CHECK_IU_FEATURE(dc, FEATURE) \
3163
if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3165
#define CHECK_FPU_FEATURE(dc, FEATURE) \
3166
if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3169
/* before an instruction, dc->pc must be static */
3170
static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
3172
unsigned int opc, rs1, rs2, rd;
3173
TCGv cpu_src1, cpu_src2;
3174
TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
3175
TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
3178
opc = GET_FIELD(insn, 0, 1);
3179
rd = GET_FIELD(insn, 2, 6);
3182
case 0: /* branches/sethi */
3184
unsigned int xop = GET_FIELD(insn, 7, 9);
3187
#ifdef TARGET_SPARC64
3188
case 0x1: /* V9 BPcc */
3192
target = GET_FIELD_SP(insn, 0, 18);
3193
target = sign_extend(target, 19);
3195
cc = GET_FIELD_SP(insn, 20, 21);
3197
do_branch(dc, target, insn, 0);
3199
do_branch(dc, target, insn, 1);
3204
case 0x3: /* V9 BPr */
3206
target = GET_FIELD_SP(insn, 0, 13) |
3207
(GET_FIELD_SP(insn, 20, 21) << 14);
3208
target = sign_extend(target, 16);
3210
cpu_src1 = get_src1(dc, insn);
3211
do_branch_reg(dc, target, insn, cpu_src1);
3214
case 0x5: /* V9 FBPcc */
3216
int cc = GET_FIELD_SP(insn, 20, 21);
3217
if (gen_trap_ifnofpu(dc)) {
3220
target = GET_FIELD_SP(insn, 0, 18);
3221
target = sign_extend(target, 19);
3223
do_fbranch(dc, target, insn, cc);
3227
case 0x7: /* CBN+x */
3232
case 0x2: /* BN+x */
3234
target = GET_FIELD(insn, 10, 31);
3235
target = sign_extend(target, 22);
3237
do_branch(dc, target, insn, 0);
3240
case 0x6: /* FBN+x */
3242
if (gen_trap_ifnofpu(dc)) {
3245
target = GET_FIELD(insn, 10, 31);
3246
target = sign_extend(target, 22);
3248
do_fbranch(dc, target, insn, 0);
3251
case 0x4: /* SETHI */
3252
/* Special-case %g0 because that's the canonical nop. */
3254
uint32_t value = GET_FIELD(insn, 10, 31);
3255
TCGv t = gen_dest_gpr(dc, rd);
3256
tcg_gen_movi_tl(t, value << 10);
3257
gen_store_gpr(dc, rd, t);
3260
case 0x0: /* UNIMPL */
3269
target_long target = GET_FIELDs(insn, 2, 31) << 2;
3270
TCGv o7 = gen_dest_gpr(dc, 15);
3272
tcg_gen_movi_tl(o7, dc->pc);
3273
gen_store_gpr(dc, 15, o7);
3276
#ifdef TARGET_SPARC64
3277
if (unlikely(AM_CHECK(dc))) {
3278
target &= 0xffffffffULL;
3284
case 2: /* FPU & Logical Operations */
3286
unsigned int xop = GET_FIELD(insn, 7, 12);
3287
TCGv cpu_dst = get_temp_tl(dc);
3290
if (xop == 0x3a) { /* generate trap */
3291
int cond = GET_FIELD(insn, 3, 6);
3293
TCGLabel *l1 = NULL;
3304
/* Conditional trap. */
3306
#ifdef TARGET_SPARC64
3308
int cc = GET_FIELD_SP(insn, 11, 12);
3310
gen_compare(&cmp, 0, cond, dc);
3311
} else if (cc == 2) {
3312
gen_compare(&cmp, 1, cond, dc);
3317
gen_compare(&cmp, 0, cond, dc);
3319
l1 = gen_new_label();
3320
tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
3321
cmp.c1, cmp.c2, l1);
3325
mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
3326
? UA2005_HTRAP_MASK : V8_TRAP_MASK);
3328
/* Don't use the normal temporaries, as they may well have
3329
gone out of scope with the branch above. While we're
3330
doing that we might as well pre-truncate to 32-bit. */
3331
trap = tcg_temp_new_i32();
3333
rs1 = GET_FIELD_SP(insn, 14, 18);
3335
rs2 = GET_FIELD_SP(insn, 0, 7);
3337
tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
3338
/* Signal that the trap value is fully constant. */
3341
TCGv t1 = gen_load_gpr(dc, rs1);
3342
tcg_gen_trunc_tl_i32(trap, t1);
3343
tcg_gen_addi_i32(trap, trap, rs2);
3347
rs2 = GET_FIELD_SP(insn, 0, 4);
3348
t1 = gen_load_gpr(dc, rs1);
3349
t2 = gen_load_gpr(dc, rs2);
3350
tcg_gen_add_tl(t1, t1, t2);
3351
tcg_gen_trunc_tl_i32(trap, t1);
3354
tcg_gen_andi_i32(trap, trap, mask);
3355
tcg_gen_addi_i32(trap, trap, TT_TRAP);
3358
gen_helper_raise_exception(cpu_env, trap);
3359
tcg_temp_free_i32(trap);
3362
/* An unconditional trap ends the TB. */
3366
/* A conditional trap falls through to the next insn. */
3370
} else if (xop == 0x28) {
3371
rs1 = GET_FIELD(insn, 13, 17);
3374
#ifndef TARGET_SPARC64
3375
case 0x01 ... 0x0e: /* undefined in the SPARCv8
3376
manual, rdy on the microSPARC
3378
case 0x0f: /* stbar in the SPARCv8 manual,
3379
rdy on the microSPARC II */
3380
case 0x10 ... 0x1f: /* implementation-dependent in the
3381
SPARCv8 manual, rdy on the
3384
if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
3385
TCGv t = gen_dest_gpr(dc, rd);
3386
/* Read Asr17 for a Leon3 monoprocessor */
3387
tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
3388
gen_store_gpr(dc, rd, t);
3392
gen_store_gpr(dc, rd, cpu_y);
3394
#ifdef TARGET_SPARC64
3395
case 0x2: /* V9 rdccr */
3397
gen_helper_rdccr(cpu_dst, cpu_env);
3398
gen_store_gpr(dc, rd, cpu_dst);
3400
case 0x3: /* V9 rdasi */
3401
tcg_gen_movi_tl(cpu_dst, dc->asi);
3402
gen_store_gpr(dc, rd, cpu_dst);
3404
case 0x4: /* V9 rdtick */
3409
r_tickptr = tcg_temp_new_ptr();
3410
r_const = tcg_const_i32(dc->mem_idx);
3411
tcg_gen_ld_ptr(r_tickptr, cpu_env,
3412
offsetof(CPUSPARCState, tick));
3413
gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3415
tcg_temp_free_ptr(r_tickptr);
3416
tcg_temp_free_i32(r_const);
3417
gen_store_gpr(dc, rd, cpu_dst);
3420
case 0x5: /* V9 rdpc */
3422
TCGv t = gen_dest_gpr(dc, rd);
3423
if (unlikely(AM_CHECK(dc))) {
3424
tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
3426
tcg_gen_movi_tl(t, dc->pc);
3428
gen_store_gpr(dc, rd, t);
3431
case 0x6: /* V9 rdfprs */
3432
tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
3433
gen_store_gpr(dc, rd, cpu_dst);
3435
case 0xf: /* V9 membar */
3436
break; /* no effect */
3437
case 0x13: /* Graphics Status */
3438
if (gen_trap_ifnofpu(dc)) {
3441
gen_store_gpr(dc, rd, cpu_gsr);
3443
case 0x16: /* Softint */
3444
tcg_gen_ld32s_tl(cpu_dst, cpu_env,
3445
offsetof(CPUSPARCState, softint));
3446
gen_store_gpr(dc, rd, cpu_dst);
3448
case 0x17: /* Tick compare */
3449
gen_store_gpr(dc, rd, cpu_tick_cmpr);
3451
case 0x18: /* System tick */
3456
r_tickptr = tcg_temp_new_ptr();
3457
r_const = tcg_const_i32(dc->mem_idx);
3458
tcg_gen_ld_ptr(r_tickptr, cpu_env,
3459
offsetof(CPUSPARCState, stick));
3460
gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3462
tcg_temp_free_ptr(r_tickptr);
3463
tcg_temp_free_i32(r_const);
3464
gen_store_gpr(dc, rd, cpu_dst);
3467
case 0x19: /* System tick compare */
3468
gen_store_gpr(dc, rd, cpu_stick_cmpr);
3470
case 0x1a: /* UltraSPARC-T1 Strand status */
3471
/* XXX HYPV check maybe not enough, UA2005 & UA2007 describe
3472
* this ASR as impl. dep
3474
CHECK_IU_FEATURE(dc, HYPV);
3476
TCGv t = gen_dest_gpr(dc, rd);
3477
tcg_gen_movi_tl(t, 1UL);
3478
gen_store_gpr(dc, rd, t);
3481
case 0x10: /* Performance Control */
3482
case 0x11: /* Performance Instrumentation Counter */
3483
case 0x12: /* Dispatch Control */
3484
case 0x14: /* Softint set, WO */
3485
case 0x15: /* Softint clear, WO */
3490
#if !defined(CONFIG_USER_ONLY)
3491
} else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
3492
#ifndef TARGET_SPARC64
3493
if (!supervisor(dc)) {
3497
gen_helper_rdpsr(cpu_dst, cpu_env);
3499
CHECK_IU_FEATURE(dc, HYPV);
3500
if (!hypervisor(dc))
3502
rs1 = GET_FIELD(insn, 13, 17);
3505
tcg_gen_ld_i64(cpu_dst, cpu_env,
3506
offsetof(CPUSPARCState, hpstate));
3509
// gen_op_rdhtstate();
3512
tcg_gen_mov_tl(cpu_dst, cpu_hintp);
3515
tcg_gen_mov_tl(cpu_dst, cpu_htba);
3518
tcg_gen_mov_tl(cpu_dst, cpu_hver);
3520
case 31: // hstick_cmpr
3521
tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
3527
gen_store_gpr(dc, rd, cpu_dst);
3529
} else if (xop == 0x2a) { /* rdwim / V9 rdpr */
3530
if (!supervisor(dc)) {
3533
cpu_tmp0 = get_temp_tl(dc);
3534
#ifdef TARGET_SPARC64
3535
rs1 = GET_FIELD(insn, 13, 17);
3541
r_tsptr = tcg_temp_new_ptr();
3542
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3543
tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3544
offsetof(trap_state, tpc));
3545
tcg_temp_free_ptr(r_tsptr);
3552
r_tsptr = tcg_temp_new_ptr();
3553
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3554
tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3555
offsetof(trap_state, tnpc));
3556
tcg_temp_free_ptr(r_tsptr);
3563
r_tsptr = tcg_temp_new_ptr();
3564
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3565
tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3566
offsetof(trap_state, tstate));
3567
tcg_temp_free_ptr(r_tsptr);
3572
TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3574
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3575
tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
3576
offsetof(trap_state, tt));
3577
tcg_temp_free_ptr(r_tsptr);
3585
r_tickptr = tcg_temp_new_ptr();
3586
r_const = tcg_const_i32(dc->mem_idx);
3587
tcg_gen_ld_ptr(r_tickptr, cpu_env,
3588
offsetof(CPUSPARCState, tick));
3589
gen_helper_tick_get_count(cpu_tmp0, cpu_env,
3590
r_tickptr, r_const);
3591
tcg_temp_free_ptr(r_tickptr);
3592
tcg_temp_free_i32(r_const);
3596
tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
3599
tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3600
offsetof(CPUSPARCState, pstate));
3603
tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3604
offsetof(CPUSPARCState, tl));
3607
tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3608
offsetof(CPUSPARCState, psrpil));
3611
gen_helper_rdcwp(cpu_tmp0, cpu_env);
3614
tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3615
offsetof(CPUSPARCState, cansave));
3617
case 11: // canrestore
3618
tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3619
offsetof(CPUSPARCState, canrestore));
3621
case 12: // cleanwin
3622
tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3623
offsetof(CPUSPARCState, cleanwin));
3625
case 13: // otherwin
3626
tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3627
offsetof(CPUSPARCState, otherwin));
3630
tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3631
offsetof(CPUSPARCState, wstate));
3633
case 16: // UA2005 gl
3634
CHECK_IU_FEATURE(dc, GL);
3635
tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3636
offsetof(CPUSPARCState, gl));
3638
case 26: // UA2005 strand status
3639
CHECK_IU_FEATURE(dc, HYPV);
3640
if (!hypervisor(dc))
3642
tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
3645
tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
3652
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
3654
gen_store_gpr(dc, rd, cpu_tmp0);
3656
} else if (xop == 0x2b) { /* rdtbr / V9 flushw */
3657
#ifdef TARGET_SPARC64
3658
gen_helper_flushw(cpu_env);
3660
if (!supervisor(dc))
3662
gen_store_gpr(dc, rd, cpu_tbr);
3666
} else if (xop == 0x34) { /* FPU Operations */
3667
if (gen_trap_ifnofpu(dc)) {
3670
gen_op_clear_ieee_excp_and_FTT();
3671
rs1 = GET_FIELD(insn, 13, 17);
3672
rs2 = GET_FIELD(insn, 27, 31);
3673
xop = GET_FIELD(insn, 18, 26);
3676
case 0x1: /* fmovs */
3677
cpu_src1_32 = gen_load_fpr_F(dc, rs2);
3678
gen_store_fpr_F(dc, rd, cpu_src1_32);
3680
case 0x5: /* fnegs */
3681
gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
3683
case 0x9: /* fabss */
3684
gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
3686
case 0x29: /* fsqrts */
3687
CHECK_FPU_FEATURE(dc, FSQRT);
3688
gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
3690
case 0x2a: /* fsqrtd */
3691
CHECK_FPU_FEATURE(dc, FSQRT);
3692
gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
3694
case 0x2b: /* fsqrtq */
3695
CHECK_FPU_FEATURE(dc, FLOAT128);
3696
gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
3698
case 0x41: /* fadds */
3699
gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
3701
case 0x42: /* faddd */
3702
gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
3704
case 0x43: /* faddq */
3705
CHECK_FPU_FEATURE(dc, FLOAT128);
3706
gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
3708
case 0x45: /* fsubs */
3709
gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3711
case 0x46: /* fsubd */
3712
gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3714
case 0x47: /* fsubq */
3715
CHECK_FPU_FEATURE(dc, FLOAT128);
3716
gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3718
case 0x49: /* fmuls */
3719
CHECK_FPU_FEATURE(dc, FMUL);
3720
gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3722
case 0x4a: /* fmuld */
3723
CHECK_FPU_FEATURE(dc, FMUL);
3724
gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3726
case 0x4b: /* fmulq */
3727
CHECK_FPU_FEATURE(dc, FLOAT128);
3728
CHECK_FPU_FEATURE(dc, FMUL);
3729
gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3731
case 0x4d: /* fdivs */
3732
gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3734
case 0x4e: /* fdivd */
3735
gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3737
case 0x4f: /* fdivq */
3738
CHECK_FPU_FEATURE(dc, FLOAT128);
3739
gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3741
case 0x69: /* fsmuld */
3742
CHECK_FPU_FEATURE(dc, FSMULD);
3743
gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3745
case 0x6e: /* fdmulq */
3746
CHECK_FPU_FEATURE(dc, FLOAT128);
3747
gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3749
case 0xc4: /* fitos */
3750
gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3752
case 0xc6: /* fdtos */
3753
gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3755
case 0xc7: /* fqtos */
3756
CHECK_FPU_FEATURE(dc, FLOAT128);
3757
gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3759
case 0xc8: /* fitod */
3760
gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3762
case 0xc9: /* fstod */
3763
gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3765
case 0xcb: /* fqtod */
3766
CHECK_FPU_FEATURE(dc, FLOAT128);
3767
gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3769
case 0xcc: /* fitoq */
3770
CHECK_FPU_FEATURE(dc, FLOAT128);
3771
gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3773
case 0xcd: /* fstoq */
3774
CHECK_FPU_FEATURE(dc, FLOAT128);
3775
gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3777
case 0xce: /* fdtoq */
3778
CHECK_FPU_FEATURE(dc, FLOAT128);
3779
gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3781
case 0xd1: /* fstoi */
3782
gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3784
case 0xd2: /* fdtoi */
3785
gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3787
case 0xd3: /* fqtoi */
3788
CHECK_FPU_FEATURE(dc, FLOAT128);
3789
gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3791
#ifdef TARGET_SPARC64
3792
case 0x2: /* V9 fmovd */
3793
cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3794
gen_store_fpr_D(dc, rd, cpu_src1_64);
3796
case 0x3: /* V9 fmovq */
3797
CHECK_FPU_FEATURE(dc, FLOAT128);
3798
gen_move_Q(dc, rd, rs2);
3800
case 0x6: /* V9 fnegd */
3801
gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3803
case 0x7: /* V9 fnegq */
3804
CHECK_FPU_FEATURE(dc, FLOAT128);
3805
gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3807
case 0xa: /* V9 fabsd */
3808
gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3810
case 0xb: /* V9 fabsq */
3811
CHECK_FPU_FEATURE(dc, FLOAT128);
3812
gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3814
case 0x81: /* V9 fstox */
3815
gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3817
case 0x82: /* V9 fdtox */
3818
gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3820
case 0x83: /* V9 fqtox */
3821
CHECK_FPU_FEATURE(dc, FLOAT128);
3822
gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3824
case 0x84: /* V9 fxtos */
3825
gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3827
case 0x88: /* V9 fxtod */
3828
gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3830
case 0x8c: /* V9 fxtoq */
3831
CHECK_FPU_FEATURE(dc, FLOAT128);
3832
gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3838
} else if (xop == 0x35) { /* FPU Operations */
3839
#ifdef TARGET_SPARC64
3842
if (gen_trap_ifnofpu(dc)) {
3845
gen_op_clear_ieee_excp_and_FTT();
3846
rs1 = GET_FIELD(insn, 13, 17);
3847
rs2 = GET_FIELD(insn, 27, 31);
3848
xop = GET_FIELD(insn, 18, 26);
3850
#ifdef TARGET_SPARC64
3854
cond = GET_FIELD_SP(insn, 10, 12); \
3855
cpu_src1 = get_src1(dc, insn); \
3856
gen_compare_reg(&cmp, cond, cpu_src1); \
3857
gen_fmov##sz(dc, &cmp, rd, rs2); \
3858
free_compare(&cmp); \
3861
if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3864
} else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3867
} else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3868
CHECK_FPU_FEATURE(dc, FLOAT128);
3875
#ifdef TARGET_SPARC64
3876
#define FMOVCC(fcc, sz) \
3879
cond = GET_FIELD_SP(insn, 14, 17); \
3880
gen_fcompare(&cmp, fcc, cond); \
3881
gen_fmov##sz(dc, &cmp, rd, rs2); \
3882
free_compare(&cmp); \
3885
case 0x001: /* V9 fmovscc %fcc0 */
3888
case 0x002: /* V9 fmovdcc %fcc0 */
3891
case 0x003: /* V9 fmovqcc %fcc0 */
3892
CHECK_FPU_FEATURE(dc, FLOAT128);
3895
case 0x041: /* V9 fmovscc %fcc1 */
3898
case 0x042: /* V9 fmovdcc %fcc1 */
3901
case 0x043: /* V9 fmovqcc %fcc1 */
3902
CHECK_FPU_FEATURE(dc, FLOAT128);
3905
case 0x081: /* V9 fmovscc %fcc2 */
3908
case 0x082: /* V9 fmovdcc %fcc2 */
3911
case 0x083: /* V9 fmovqcc %fcc2 */
3912
CHECK_FPU_FEATURE(dc, FLOAT128);
3915
case 0x0c1: /* V9 fmovscc %fcc3 */
3918
case 0x0c2: /* V9 fmovdcc %fcc3 */
3921
case 0x0c3: /* V9 fmovqcc %fcc3 */
3922
CHECK_FPU_FEATURE(dc, FLOAT128);
3926
#define FMOVCC(xcc, sz) \
3929
cond = GET_FIELD_SP(insn, 14, 17); \
3930
gen_compare(&cmp, xcc, cond, dc); \
3931
gen_fmov##sz(dc, &cmp, rd, rs2); \
3932
free_compare(&cmp); \
3935
case 0x101: /* V9 fmovscc %icc */
3938
case 0x102: /* V9 fmovdcc %icc */
3941
case 0x103: /* V9 fmovqcc %icc */
3942
CHECK_FPU_FEATURE(dc, FLOAT128);
3945
case 0x181: /* V9 fmovscc %xcc */
3948
case 0x182: /* V9 fmovdcc %xcc */
3951
case 0x183: /* V9 fmovqcc %xcc */
3952
CHECK_FPU_FEATURE(dc, FLOAT128);
3957
case 0x51: /* fcmps, V9 %fcc */
3958
cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3959
cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3960
gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3962
case 0x52: /* fcmpd, V9 %fcc */
3963
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3964
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3965
gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3967
case 0x53: /* fcmpq, V9 %fcc */
3968
CHECK_FPU_FEATURE(dc, FLOAT128);
3969
gen_op_load_fpr_QT0(QFPREG(rs1));
3970
gen_op_load_fpr_QT1(QFPREG(rs2));
3971
gen_op_fcmpq(rd & 3);
3973
case 0x55: /* fcmpes, V9 %fcc */
3974
cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3975
cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3976
gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3978
case 0x56: /* fcmped, V9 %fcc */
3979
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3980
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3981
gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3983
case 0x57: /* fcmpeq, V9 %fcc */
3984
CHECK_FPU_FEATURE(dc, FLOAT128);
3985
gen_op_load_fpr_QT0(QFPREG(rs1));
3986
gen_op_load_fpr_QT1(QFPREG(rs2));
3987
gen_op_fcmpeq(rd & 3);
3992
} else if (xop == 0x2) {
3993
TCGv dst = gen_dest_gpr(dc, rd);
3994
rs1 = GET_FIELD(insn, 13, 17);
3996
/* clr/mov shortcut : or %g0, x, y -> mov x, y */
3997
if (IS_IMM) { /* immediate */
3998
simm = GET_FIELDs(insn, 19, 31);
3999
tcg_gen_movi_tl(dst, simm);
4000
gen_store_gpr(dc, rd, dst);
4001
} else { /* register */
4002
rs2 = GET_FIELD(insn, 27, 31);
4004
tcg_gen_movi_tl(dst, 0);
4005
gen_store_gpr(dc, rd, dst);
4007
cpu_src2 = gen_load_gpr(dc, rs2);
4008
gen_store_gpr(dc, rd, cpu_src2);
4012
cpu_src1 = get_src1(dc, insn);
4013
if (IS_IMM) { /* immediate */
4014
simm = GET_FIELDs(insn, 19, 31);
4015
tcg_gen_ori_tl(dst, cpu_src1, simm);
4016
gen_store_gpr(dc, rd, dst);
4017
} else { /* register */
4018
rs2 = GET_FIELD(insn, 27, 31);
4020
/* mov shortcut: or x, %g0, y -> mov x, y */
4021
gen_store_gpr(dc, rd, cpu_src1);
4023
cpu_src2 = gen_load_gpr(dc, rs2);
4024
tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
4025
gen_store_gpr(dc, rd, dst);
4029
#ifdef TARGET_SPARC64
4030
} else if (xop == 0x25) { /* sll, V9 sllx */
4031
cpu_src1 = get_src1(dc, insn);
4032
if (IS_IMM) { /* immediate */
4033
simm = GET_FIELDs(insn, 20, 31);
4034
if (insn & (1 << 12)) {
4035
tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
4037
tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
4039
} else { /* register */
4040
rs2 = GET_FIELD(insn, 27, 31);
4041
cpu_src2 = gen_load_gpr(dc, rs2);
4042
cpu_tmp0 = get_temp_tl(dc);
4043
if (insn & (1 << 12)) {
4044
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4046
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4048
tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
4050
gen_store_gpr(dc, rd, cpu_dst);
4051
} else if (xop == 0x26) { /* srl, V9 srlx */
4052
cpu_src1 = get_src1(dc, insn);
4053
if (IS_IMM) { /* immediate */
4054
simm = GET_FIELDs(insn, 20, 31);
4055
if (insn & (1 << 12)) {
4056
tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
4058
tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
4059
tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
4061
} else { /* register */
4062
rs2 = GET_FIELD(insn, 27, 31);
4063
cpu_src2 = gen_load_gpr(dc, rs2);
4064
cpu_tmp0 = get_temp_tl(dc);
4065
if (insn & (1 << 12)) {
4066
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4067
tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
4069
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4070
tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
4071
tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
4074
gen_store_gpr(dc, rd, cpu_dst);
4075
} else if (xop == 0x27) { /* sra, V9 srax */
4076
cpu_src1 = get_src1(dc, insn);
4077
if (IS_IMM) { /* immediate */
4078
simm = GET_FIELDs(insn, 20, 31);
4079
if (insn & (1 << 12)) {
4080
tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
4082
tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
4083
tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
4085
} else { /* register */
4086
rs2 = GET_FIELD(insn, 27, 31);
4087
cpu_src2 = gen_load_gpr(dc, rs2);
4088
cpu_tmp0 = get_temp_tl(dc);
4089
if (insn & (1 << 12)) {
4090
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4091
tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
4093
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4094
tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
4095
tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
4098
gen_store_gpr(dc, rd, cpu_dst);
4100
} else if (xop < 0x36) {
4102
cpu_src1 = get_src1(dc, insn);
4103
cpu_src2 = get_src2(dc, insn);
4104
switch (xop & ~0x10) {
4107
gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
4108
tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
4109
dc->cc_op = CC_OP_ADD;
4111
tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4115
tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
4117
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4118
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4119
dc->cc_op = CC_OP_LOGIC;
4123
tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
4125
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4126
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4127
dc->cc_op = CC_OP_LOGIC;
4131
tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
4133
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4134
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4135
dc->cc_op = CC_OP_LOGIC;
4140
gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
4141
tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
4142
dc->cc_op = CC_OP_SUB;
4144
tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
4147
case 0x5: /* andn */
4148
tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
4150
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4151
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4152
dc->cc_op = CC_OP_LOGIC;
4156
tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
4158
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4159
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4160
dc->cc_op = CC_OP_LOGIC;
4163
case 0x7: /* xorn */
4164
tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
4166
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4167
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4168
dc->cc_op = CC_OP_LOGIC;
4171
case 0x8: /* addx, V9 addc */
4172
gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4175
#ifdef TARGET_SPARC64
4176
case 0x9: /* V9 mulx */
4177
tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
4180
case 0xa: /* umul */
4181
CHECK_IU_FEATURE(dc, MUL);
4182
gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
4184
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4185
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4186
dc->cc_op = CC_OP_LOGIC;
4189
case 0xb: /* smul */
4190
CHECK_IU_FEATURE(dc, MUL);
4191
gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
4193
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4194
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4195
dc->cc_op = CC_OP_LOGIC;
4198
case 0xc: /* subx, V9 subc */
4199
gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4202
#ifdef TARGET_SPARC64
4203
case 0xd: /* V9 udivx */
4204
gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4207
case 0xe: /* udiv */
4208
CHECK_IU_FEATURE(dc, DIV);
4210
gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
4212
dc->cc_op = CC_OP_DIV;
4214
gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
4218
case 0xf: /* sdiv */
4219
CHECK_IU_FEATURE(dc, DIV);
4221
gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
4223
dc->cc_op = CC_OP_DIV;
4225
gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
4232
gen_store_gpr(dc, rd, cpu_dst);
4234
cpu_src1 = get_src1(dc, insn);
4235
cpu_src2 = get_src2(dc, insn);
4237
case 0x20: /* taddcc */
4238
gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
4239
gen_store_gpr(dc, rd, cpu_dst);
4240
tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
4241
dc->cc_op = CC_OP_TADD;
4243
case 0x21: /* tsubcc */
4244
gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
4245
gen_store_gpr(dc, rd, cpu_dst);
4246
tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
4247
dc->cc_op = CC_OP_TSUB;
4249
case 0x22: /* taddcctv */
4250
gen_helper_taddcctv(cpu_dst, cpu_env,
4251
cpu_src1, cpu_src2);
4252
gen_store_gpr(dc, rd, cpu_dst);
4253
dc->cc_op = CC_OP_TADDTV;
4255
case 0x23: /* tsubcctv */
4256
gen_helper_tsubcctv(cpu_dst, cpu_env,
4257
cpu_src1, cpu_src2);
4258
gen_store_gpr(dc, rd, cpu_dst);
4259
dc->cc_op = CC_OP_TSUBTV;
4261
case 0x24: /* mulscc */
4263
gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
4264
gen_store_gpr(dc, rd, cpu_dst);
4265
tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
4266
dc->cc_op = CC_OP_ADD;
4268
#ifndef TARGET_SPARC64
4269
case 0x25: /* sll */
4270
if (IS_IMM) { /* immediate */
4271
simm = GET_FIELDs(insn, 20, 31);
4272
tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
4273
} else { /* register */
4274
cpu_tmp0 = get_temp_tl(dc);
4275
tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4276
tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
4278
gen_store_gpr(dc, rd, cpu_dst);
4280
case 0x26: /* srl */
4281
if (IS_IMM) { /* immediate */
4282
simm = GET_FIELDs(insn, 20, 31);
4283
tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
4284
} else { /* register */
4285
cpu_tmp0 = get_temp_tl(dc);
4286
tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4287
tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
4289
gen_store_gpr(dc, rd, cpu_dst);
4291
case 0x27: /* sra */
4292
if (IS_IMM) { /* immediate */
4293
simm = GET_FIELDs(insn, 20, 31);
4294
tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
4295
} else { /* register */
4296
cpu_tmp0 = get_temp_tl(dc);
4297
tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4298
tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
4300
gen_store_gpr(dc, rd, cpu_dst);
4305
cpu_tmp0 = get_temp_tl(dc);
4308
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4309
tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
4311
#ifndef TARGET_SPARC64
4312
case 0x01 ... 0x0f: /* undefined in the
4316
case 0x10 ... 0x1f: /* implementation-dependent
4320
if ((rd == 0x13) && (dc->def->features &
4321
CPU_FEATURE_POWERDOWN)) {
4322
/* LEON3 power-down */
4324
gen_helper_power_down(cpu_env);
4328
case 0x2: /* V9 wrccr */
4329
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4330
gen_helper_wrccr(cpu_env, cpu_tmp0);
4331
tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4332
dc->cc_op = CC_OP_FLAGS;
4334
case 0x3: /* V9 wrasi */
4335
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4336
tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
4337
tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4338
offsetof(CPUSPARCState, asi));
4339
/* End TB to notice changed ASI. */
4345
case 0x6: /* V9 wrfprs */
4346
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4347
tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
4354
case 0xf: /* V9 sir, nop if user */
4355
#if !defined(CONFIG_USER_ONLY)
4356
if (supervisor(dc)) {
4361
case 0x13: /* Graphics Status */
4362
if (gen_trap_ifnofpu(dc)) {
4365
tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
4367
case 0x14: /* Softint set */
4368
if (!supervisor(dc))
4370
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4371
gen_helper_set_softint(cpu_env, cpu_tmp0);
4373
case 0x15: /* Softint clear */
4374
if (!supervisor(dc))
4376
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4377
gen_helper_clear_softint(cpu_env, cpu_tmp0);
4379
case 0x16: /* Softint write */
4380
if (!supervisor(dc))
4382
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4383
gen_helper_write_softint(cpu_env, cpu_tmp0);
4385
case 0x17: /* Tick compare */
4386
#if !defined(CONFIG_USER_ONLY)
4387
if (!supervisor(dc))
4393
tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
4395
r_tickptr = tcg_temp_new_ptr();
4396
tcg_gen_ld_ptr(r_tickptr, cpu_env,
4397
offsetof(CPUSPARCState, tick));
4398
gen_helper_tick_set_limit(r_tickptr,
4400
tcg_temp_free_ptr(r_tickptr);
4403
case 0x18: /* System tick */
4404
#if !defined(CONFIG_USER_ONLY)
4405
if (!supervisor(dc))
4411
tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
4413
r_tickptr = tcg_temp_new_ptr();
4414
tcg_gen_ld_ptr(r_tickptr, cpu_env,
4415
offsetof(CPUSPARCState, stick));
4416
gen_helper_tick_set_count(r_tickptr,
4418
tcg_temp_free_ptr(r_tickptr);
4421
case 0x19: /* System tick compare */
4422
#if !defined(CONFIG_USER_ONLY)
4423
if (!supervisor(dc))
4429
tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
4431
r_tickptr = tcg_temp_new_ptr();
4432
tcg_gen_ld_ptr(r_tickptr, cpu_env,
4433
offsetof(CPUSPARCState, stick));
4434
gen_helper_tick_set_limit(r_tickptr,
4436
tcg_temp_free_ptr(r_tickptr);
4440
case 0x10: /* Performance Control */
4441
case 0x11: /* Performance Instrumentation
4443
case 0x12: /* Dispatch Control */
4450
#if !defined(CONFIG_USER_ONLY)
4451
case 0x31: /* wrpsr, V9 saved, restored */
4453
if (!supervisor(dc))
4455
#ifdef TARGET_SPARC64
4458
gen_helper_saved(cpu_env);
4461
gen_helper_restored(cpu_env);
4463
case 2: /* UA2005 allclean */
4464
case 3: /* UA2005 otherw */
4465
case 4: /* UA2005 normalw */
4466
case 5: /* UA2005 invalw */
4472
cpu_tmp0 = get_temp_tl(dc);
4473
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4474
gen_helper_wrpsr(cpu_env, cpu_tmp0);
4475
tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4476
dc->cc_op = CC_OP_FLAGS;
4484
case 0x32: /* wrwim, V9 wrpr */
4486
if (!supervisor(dc))
4488
cpu_tmp0 = get_temp_tl(dc);
4489
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4490
#ifdef TARGET_SPARC64
4496
r_tsptr = tcg_temp_new_ptr();
4497
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4498
tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4499
offsetof(trap_state, tpc));
4500
tcg_temp_free_ptr(r_tsptr);
4507
r_tsptr = tcg_temp_new_ptr();
4508
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4509
tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4510
offsetof(trap_state, tnpc));
4511
tcg_temp_free_ptr(r_tsptr);
4518
r_tsptr = tcg_temp_new_ptr();
4519
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4520
tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4521
offsetof(trap_state,
4523
tcg_temp_free_ptr(r_tsptr);
4530
r_tsptr = tcg_temp_new_ptr();
4531
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4532
tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
4533
offsetof(trap_state, tt));
4534
tcg_temp_free_ptr(r_tsptr);
4541
r_tickptr = tcg_temp_new_ptr();
4542
tcg_gen_ld_ptr(r_tickptr, cpu_env,
4543
offsetof(CPUSPARCState, tick));
4544
gen_helper_tick_set_count(r_tickptr,
4546
tcg_temp_free_ptr(r_tickptr);
4550
tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
4554
gen_helper_wrpstate(cpu_env, cpu_tmp0);
4555
dc->npc = DYNAMIC_PC;
4559
tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4560
offsetof(CPUSPARCState, tl));
4561
dc->npc = DYNAMIC_PC;
4564
gen_helper_wrpil(cpu_env, cpu_tmp0);
4567
gen_helper_wrcwp(cpu_env, cpu_tmp0);
4570
tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4571
offsetof(CPUSPARCState,
4574
case 11: // canrestore
4575
tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4576
offsetof(CPUSPARCState,
4579
case 12: // cleanwin
4580
tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4581
offsetof(CPUSPARCState,
4584
case 13: // otherwin
4585
tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4586
offsetof(CPUSPARCState,
4590
tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4591
offsetof(CPUSPARCState,
4594
case 16: // UA2005 gl
4595
CHECK_IU_FEATURE(dc, GL);
4596
gen_helper_wrgl(cpu_env, cpu_tmp0);
4598
case 26: // UA2005 strand status
4599
CHECK_IU_FEATURE(dc, HYPV);
4600
if (!hypervisor(dc))
4602
tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
4608
tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
4609
if (dc->def->nwindows != 32) {
4610
tcg_gen_andi_tl(cpu_wim, cpu_wim,
4611
(1 << dc->def->nwindows) - 1);
4616
case 0x33: /* wrtbr, UA2005 wrhpr */
4618
#ifndef TARGET_SPARC64
4619
if (!supervisor(dc))
4621
tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
4623
CHECK_IU_FEATURE(dc, HYPV);
4624
if (!hypervisor(dc))
4626
cpu_tmp0 = get_temp_tl(dc);
4627
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4630
tcg_gen_st_i64(cpu_tmp0, cpu_env,
4631
offsetof(CPUSPARCState,
4639
// XXX gen_op_wrhtstate();
4642
tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
4645
tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
4647
case 31: // hstick_cmpr
4651
tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
4652
r_tickptr = tcg_temp_new_ptr();
4653
tcg_gen_ld_ptr(r_tickptr, cpu_env,
4654
offsetof(CPUSPARCState, hstick));
4655
gen_helper_tick_set_limit(r_tickptr,
4657
tcg_temp_free_ptr(r_tickptr);
4660
case 6: // hver readonly
4668
#ifdef TARGET_SPARC64
4669
case 0x2c: /* V9 movcc */
4671
int cc = GET_FIELD_SP(insn, 11, 12);
4672
int cond = GET_FIELD_SP(insn, 14, 17);
4676
if (insn & (1 << 18)) {
4678
gen_compare(&cmp, 0, cond, dc);
4679
} else if (cc == 2) {
4680
gen_compare(&cmp, 1, cond, dc);
4685
gen_fcompare(&cmp, cc, cond);
4688
/* The get_src2 above loaded the normal 13-bit
4689
immediate field, not the 11-bit field we have
4690
in movcc. But it did handle the reg case. */
4692
simm = GET_FIELD_SPs(insn, 0, 10);
4693
tcg_gen_movi_tl(cpu_src2, simm);
4696
dst = gen_load_gpr(dc, rd);
4697
tcg_gen_movcond_tl(cmp.cond, dst,
4701
gen_store_gpr(dc, rd, dst);
4704
case 0x2d: /* V9 sdivx */
4705
gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4706
gen_store_gpr(dc, rd, cpu_dst);
4708
case 0x2e: /* V9 popc */
4709
tcg_gen_ctpop_tl(cpu_dst, cpu_src2);
4710
gen_store_gpr(dc, rd, cpu_dst);
4712
case 0x2f: /* V9 movr */
4714
int cond = GET_FIELD_SP(insn, 10, 12);
4718
gen_compare_reg(&cmp, cond, cpu_src1);
4720
/* The get_src2 above loaded the normal 13-bit
4721
immediate field, not the 10-bit field we have
4722
in movr. But it did handle the reg case. */
4724
simm = GET_FIELD_SPs(insn, 0, 9);
4725
tcg_gen_movi_tl(cpu_src2, simm);
4728
dst = gen_load_gpr(dc, rd);
4729
tcg_gen_movcond_tl(cmp.cond, dst,
4733
gen_store_gpr(dc, rd, dst);
4741
} else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4742
#ifdef TARGET_SPARC64
4743
int opf = GET_FIELD_SP(insn, 5, 13);
4744
rs1 = GET_FIELD(insn, 13, 17);
4745
rs2 = GET_FIELD(insn, 27, 31);
4746
if (gen_trap_ifnofpu(dc)) {
4751
case 0x000: /* VIS I edge8cc */
4752
CHECK_FPU_FEATURE(dc, VIS1);
4753
cpu_src1 = gen_load_gpr(dc, rs1);
4754
cpu_src2 = gen_load_gpr(dc, rs2);
4755
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4756
gen_store_gpr(dc, rd, cpu_dst);
4758
case 0x001: /* VIS II edge8n */
4759
CHECK_FPU_FEATURE(dc, VIS2);
4760
cpu_src1 = gen_load_gpr(dc, rs1);
4761
cpu_src2 = gen_load_gpr(dc, rs2);
4762
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4763
gen_store_gpr(dc, rd, cpu_dst);
4765
case 0x002: /* VIS I edge8lcc */
4766
CHECK_FPU_FEATURE(dc, VIS1);
4767
cpu_src1 = gen_load_gpr(dc, rs1);
4768
cpu_src2 = gen_load_gpr(dc, rs2);
4769
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4770
gen_store_gpr(dc, rd, cpu_dst);
4772
case 0x003: /* VIS II edge8ln */
4773
CHECK_FPU_FEATURE(dc, VIS2);
4774
cpu_src1 = gen_load_gpr(dc, rs1);
4775
cpu_src2 = gen_load_gpr(dc, rs2);
4776
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4777
gen_store_gpr(dc, rd, cpu_dst);
4779
case 0x004: /* VIS I edge16cc */
4780
CHECK_FPU_FEATURE(dc, VIS1);
4781
cpu_src1 = gen_load_gpr(dc, rs1);
4782
cpu_src2 = gen_load_gpr(dc, rs2);
4783
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4784
gen_store_gpr(dc, rd, cpu_dst);
4786
case 0x005: /* VIS II edge16n */
4787
CHECK_FPU_FEATURE(dc, VIS2);
4788
cpu_src1 = gen_load_gpr(dc, rs1);
4789
cpu_src2 = gen_load_gpr(dc, rs2);
4790
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4791
gen_store_gpr(dc, rd, cpu_dst);
4793
case 0x006: /* VIS I edge16lcc */
4794
CHECK_FPU_FEATURE(dc, VIS1);
4795
cpu_src1 = gen_load_gpr(dc, rs1);
4796
cpu_src2 = gen_load_gpr(dc, rs2);
4797
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4798
gen_store_gpr(dc, rd, cpu_dst);
4800
case 0x007: /* VIS II edge16ln */
4801
CHECK_FPU_FEATURE(dc, VIS2);
4802
cpu_src1 = gen_load_gpr(dc, rs1);
4803
cpu_src2 = gen_load_gpr(dc, rs2);
4804
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4805
gen_store_gpr(dc, rd, cpu_dst);
4807
case 0x008: /* VIS I edge32cc */
4808
CHECK_FPU_FEATURE(dc, VIS1);
4809
cpu_src1 = gen_load_gpr(dc, rs1);
4810
cpu_src2 = gen_load_gpr(dc, rs2);
4811
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4812
gen_store_gpr(dc, rd, cpu_dst);
4814
case 0x009: /* VIS II edge32n */
4815
CHECK_FPU_FEATURE(dc, VIS2);
4816
cpu_src1 = gen_load_gpr(dc, rs1);
4817
cpu_src2 = gen_load_gpr(dc, rs2);
4818
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4819
gen_store_gpr(dc, rd, cpu_dst);
4821
case 0x00a: /* VIS I edge32lcc */
4822
CHECK_FPU_FEATURE(dc, VIS1);
4823
cpu_src1 = gen_load_gpr(dc, rs1);
4824
cpu_src2 = gen_load_gpr(dc, rs2);
4825
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4826
gen_store_gpr(dc, rd, cpu_dst);
4828
case 0x00b: /* VIS II edge32ln */
4829
CHECK_FPU_FEATURE(dc, VIS2);
4830
cpu_src1 = gen_load_gpr(dc, rs1);
4831
cpu_src2 = gen_load_gpr(dc, rs2);
4832
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4833
gen_store_gpr(dc, rd, cpu_dst);
4835
case 0x010: /* VIS I array8 */
4836
CHECK_FPU_FEATURE(dc, VIS1);
4837
cpu_src1 = gen_load_gpr(dc, rs1);
4838
cpu_src2 = gen_load_gpr(dc, rs2);
4839
gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4840
gen_store_gpr(dc, rd, cpu_dst);
4842
case 0x012: /* VIS I array16 */
4843
CHECK_FPU_FEATURE(dc, VIS1);
4844
cpu_src1 = gen_load_gpr(dc, rs1);
4845
cpu_src2 = gen_load_gpr(dc, rs2);
4846
gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4847
tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4848
gen_store_gpr(dc, rd, cpu_dst);
4850
case 0x014: /* VIS I array32 */
4851
CHECK_FPU_FEATURE(dc, VIS1);
4852
cpu_src1 = gen_load_gpr(dc, rs1);
4853
cpu_src2 = gen_load_gpr(dc, rs2);
4854
gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4855
tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4856
gen_store_gpr(dc, rd, cpu_dst);
4858
case 0x018: /* VIS I alignaddr */
4859
CHECK_FPU_FEATURE(dc, VIS1);
4860
cpu_src1 = gen_load_gpr(dc, rs1);
4861
cpu_src2 = gen_load_gpr(dc, rs2);
4862
gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4863
gen_store_gpr(dc, rd, cpu_dst);
4865
case 0x01a: /* VIS I alignaddrl */
4866
CHECK_FPU_FEATURE(dc, VIS1);
4867
cpu_src1 = gen_load_gpr(dc, rs1);
4868
cpu_src2 = gen_load_gpr(dc, rs2);
4869
gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4870
gen_store_gpr(dc, rd, cpu_dst);
4872
case 0x019: /* VIS II bmask */
4873
CHECK_FPU_FEATURE(dc, VIS2);
4874
cpu_src1 = gen_load_gpr(dc, rs1);
4875
cpu_src2 = gen_load_gpr(dc, rs2);
4876
tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4877
tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4878
gen_store_gpr(dc, rd, cpu_dst);
4880
case 0x020: /* VIS I fcmple16 */
4881
CHECK_FPU_FEATURE(dc, VIS1);
4882
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4883
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4884
gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4885
gen_store_gpr(dc, rd, cpu_dst);
4887
case 0x022: /* VIS I fcmpne16 */
4888
CHECK_FPU_FEATURE(dc, VIS1);
4889
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4890
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4891
gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4892
gen_store_gpr(dc, rd, cpu_dst);
4894
case 0x024: /* VIS I fcmple32 */
4895
CHECK_FPU_FEATURE(dc, VIS1);
4896
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4897
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4898
gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4899
gen_store_gpr(dc, rd, cpu_dst);
4901
case 0x026: /* VIS I fcmpne32 */
4902
CHECK_FPU_FEATURE(dc, VIS1);
4903
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4904
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4905
gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4906
gen_store_gpr(dc, rd, cpu_dst);
4908
case 0x028: /* VIS I fcmpgt16 */
4909
CHECK_FPU_FEATURE(dc, VIS1);
4910
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4911
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4912
gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4913
gen_store_gpr(dc, rd, cpu_dst);
4915
case 0x02a: /* VIS I fcmpeq16 */
4916
CHECK_FPU_FEATURE(dc, VIS1);
4917
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4918
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4919
gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4920
gen_store_gpr(dc, rd, cpu_dst);
4922
case 0x02c: /* VIS I fcmpgt32 */
4923
CHECK_FPU_FEATURE(dc, VIS1);
4924
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4925
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4926
gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4927
gen_store_gpr(dc, rd, cpu_dst);
4929
case 0x02e: /* VIS I fcmpeq32 */
4930
CHECK_FPU_FEATURE(dc, VIS1);
4931
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4932
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4933
gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4934
gen_store_gpr(dc, rd, cpu_dst);
4936
case 0x031: /* VIS I fmul8x16 */
4937
CHECK_FPU_FEATURE(dc, VIS1);
4938
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4940
case 0x033: /* VIS I fmul8x16au */
4941
CHECK_FPU_FEATURE(dc, VIS1);
4942
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4944
case 0x035: /* VIS I fmul8x16al */
4945
CHECK_FPU_FEATURE(dc, VIS1);
4946
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4948
case 0x036: /* VIS I fmul8sux16 */
4949
CHECK_FPU_FEATURE(dc, VIS1);
4950
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4952
case 0x037: /* VIS I fmul8ulx16 */
4953
CHECK_FPU_FEATURE(dc, VIS1);
4954
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4956
case 0x038: /* VIS I fmuld8sux16 */
4957
CHECK_FPU_FEATURE(dc, VIS1);
4958
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4960
case 0x039: /* VIS I fmuld8ulx16 */
4961
CHECK_FPU_FEATURE(dc, VIS1);
4962
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4964
case 0x03a: /* VIS I fpack32 */
4965
CHECK_FPU_FEATURE(dc, VIS1);
4966
gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4968
case 0x03b: /* VIS I fpack16 */
4969
CHECK_FPU_FEATURE(dc, VIS1);
4970
cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4971
cpu_dst_32 = gen_dest_fpr_F(dc);
4972
gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4973
gen_store_fpr_F(dc, rd, cpu_dst_32);
4975
case 0x03d: /* VIS I fpackfix */
4976
CHECK_FPU_FEATURE(dc, VIS1);
4977
cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4978
cpu_dst_32 = gen_dest_fpr_F(dc);
4979
gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4980
gen_store_fpr_F(dc, rd, cpu_dst_32);
4982
case 0x03e: /* VIS I pdist */
4983
CHECK_FPU_FEATURE(dc, VIS1);
4984
gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4986
case 0x048: /* VIS I faligndata */
4987
CHECK_FPU_FEATURE(dc, VIS1);
4988
gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4990
case 0x04b: /* VIS I fpmerge */
4991
CHECK_FPU_FEATURE(dc, VIS1);
4992
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4994
case 0x04c: /* VIS II bshuffle */
4995
CHECK_FPU_FEATURE(dc, VIS2);
4996
gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4998
case 0x04d: /* VIS I fexpand */
4999
CHECK_FPU_FEATURE(dc, VIS1);
5000
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
5002
case 0x050: /* VIS I fpadd16 */
5003
CHECK_FPU_FEATURE(dc, VIS1);
5004
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
5006
case 0x051: /* VIS I fpadd16s */
5007
CHECK_FPU_FEATURE(dc, VIS1);
5008
gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
5010
case 0x052: /* VIS I fpadd32 */
5011
CHECK_FPU_FEATURE(dc, VIS1);
5012
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
5014
case 0x053: /* VIS I fpadd32s */
5015
CHECK_FPU_FEATURE(dc, VIS1);
5016
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
5018
case 0x054: /* VIS I fpsub16 */
5019
CHECK_FPU_FEATURE(dc, VIS1);
5020
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
5022
case 0x055: /* VIS I fpsub16s */
5023
CHECK_FPU_FEATURE(dc, VIS1);
5024
gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
5026
case 0x056: /* VIS I fpsub32 */
5027
CHECK_FPU_FEATURE(dc, VIS1);
5028
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
5030
case 0x057: /* VIS I fpsub32s */
5031
CHECK_FPU_FEATURE(dc, VIS1);
5032
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
5034
case 0x060: /* VIS I fzero */
5035
CHECK_FPU_FEATURE(dc, VIS1);
5036
cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5037
tcg_gen_movi_i64(cpu_dst_64, 0);
5038
gen_store_fpr_D(dc, rd, cpu_dst_64);
5040
case 0x061: /* VIS I fzeros */
5041
CHECK_FPU_FEATURE(dc, VIS1);
5042
cpu_dst_32 = gen_dest_fpr_F(dc);
5043
tcg_gen_movi_i32(cpu_dst_32, 0);
5044
gen_store_fpr_F(dc, rd, cpu_dst_32);
5046
case 0x062: /* VIS I fnor */
5047
CHECK_FPU_FEATURE(dc, VIS1);
5048
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
5050
case 0x063: /* VIS I fnors */
5051
CHECK_FPU_FEATURE(dc, VIS1);
5052
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
5054
case 0x064: /* VIS I fandnot2 */
5055
CHECK_FPU_FEATURE(dc, VIS1);
5056
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
5058
case 0x065: /* VIS I fandnot2s */
5059
CHECK_FPU_FEATURE(dc, VIS1);
5060
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
5062
case 0x066: /* VIS I fnot2 */
5063
CHECK_FPU_FEATURE(dc, VIS1);
5064
gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
5066
case 0x067: /* VIS I fnot2s */
5067
CHECK_FPU_FEATURE(dc, VIS1);
5068
gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
5070
case 0x068: /* VIS I fandnot1 */
5071
CHECK_FPU_FEATURE(dc, VIS1);
5072
gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
5074
case 0x069: /* VIS I fandnot1s */
5075
CHECK_FPU_FEATURE(dc, VIS1);
5076
gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
5078
case 0x06a: /* VIS I fnot1 */
5079
CHECK_FPU_FEATURE(dc, VIS1);
5080
gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
5082
case 0x06b: /* VIS I fnot1s */
5083
CHECK_FPU_FEATURE(dc, VIS1);
5084
gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
5086
case 0x06c: /* VIS I fxor */
5087
CHECK_FPU_FEATURE(dc, VIS1);
5088
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
5090
case 0x06d: /* VIS I fxors */
5091
CHECK_FPU_FEATURE(dc, VIS1);
5092
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
5094
case 0x06e: /* VIS I fnand */
5095
CHECK_FPU_FEATURE(dc, VIS1);
5096
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
5098
case 0x06f: /* VIS I fnands */
5099
CHECK_FPU_FEATURE(dc, VIS1);
5100
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
5102
case 0x070: /* VIS I fand */
5103
CHECK_FPU_FEATURE(dc, VIS1);
5104
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
5106
case 0x071: /* VIS I fands */
5107
CHECK_FPU_FEATURE(dc, VIS1);
5108
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
5110
case 0x072: /* VIS I fxnor */
5111
CHECK_FPU_FEATURE(dc, VIS1);
5112
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
5114
case 0x073: /* VIS I fxnors */
5115
CHECK_FPU_FEATURE(dc, VIS1);
5116
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
5118
case 0x074: /* VIS I fsrc1 */
5119
CHECK_FPU_FEATURE(dc, VIS1);
5120
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5121
gen_store_fpr_D(dc, rd, cpu_src1_64);
5123
case 0x075: /* VIS I fsrc1s */
5124
CHECK_FPU_FEATURE(dc, VIS1);
5125
cpu_src1_32 = gen_load_fpr_F(dc, rs1);
5126
gen_store_fpr_F(dc, rd, cpu_src1_32);
5128
case 0x076: /* VIS I fornot2 */
5129
CHECK_FPU_FEATURE(dc, VIS1);
5130
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
5132
case 0x077: /* VIS I fornot2s */
5133
CHECK_FPU_FEATURE(dc, VIS1);
5134
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
5136
case 0x078: /* VIS I fsrc2 */
5137
CHECK_FPU_FEATURE(dc, VIS1);
5138
cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5139
gen_store_fpr_D(dc, rd, cpu_src1_64);
5141
case 0x079: /* VIS I fsrc2s */
5142
CHECK_FPU_FEATURE(dc, VIS1);
5143
cpu_src1_32 = gen_load_fpr_F(dc, rs2);
5144
gen_store_fpr_F(dc, rd, cpu_src1_32);
5146
case 0x07a: /* VIS I fornot1 */
5147
CHECK_FPU_FEATURE(dc, VIS1);
5148
gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
5150
case 0x07b: /* VIS I fornot1s */
5151
CHECK_FPU_FEATURE(dc, VIS1);
5152
gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
5154
case 0x07c: /* VIS I for */
5155
CHECK_FPU_FEATURE(dc, VIS1);
5156
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
5158
case 0x07d: /* VIS I fors */
5159
CHECK_FPU_FEATURE(dc, VIS1);
5160
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
5162
case 0x07e: /* VIS I fone */
5163
CHECK_FPU_FEATURE(dc, VIS1);
5164
cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5165
tcg_gen_movi_i64(cpu_dst_64, -1);
5166
gen_store_fpr_D(dc, rd, cpu_dst_64);
5168
case 0x07f: /* VIS I fones */
5169
CHECK_FPU_FEATURE(dc, VIS1);
5170
cpu_dst_32 = gen_dest_fpr_F(dc);
5171
tcg_gen_movi_i32(cpu_dst_32, -1);
5172
gen_store_fpr_F(dc, rd, cpu_dst_32);
5174
case 0x080: /* VIS I shutdown */
5175
case 0x081: /* VIS II siam */
5184
} else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
5185
#ifdef TARGET_SPARC64
5190
#ifdef TARGET_SPARC64
5191
} else if (xop == 0x39) { /* V9 return */
5193
cpu_src1 = get_src1(dc, insn);
5194
cpu_tmp0 = get_temp_tl(dc);
5195
if (IS_IMM) { /* immediate */
5196
simm = GET_FIELDs(insn, 19, 31);
5197
tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5198
} else { /* register */
5199
rs2 = GET_FIELD(insn, 27, 31);
5201
cpu_src2 = gen_load_gpr(dc, rs2);
5202
tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5204
tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5207
gen_helper_restore(cpu_env);
5209
gen_check_align(cpu_tmp0, 3);
5210
tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5211
dc->npc = DYNAMIC_PC;
5215
cpu_src1 = get_src1(dc, insn);
5216
cpu_tmp0 = get_temp_tl(dc);
5217
if (IS_IMM) { /* immediate */
5218
simm = GET_FIELDs(insn, 19, 31);
5219
tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5220
} else { /* register */
5221
rs2 = GET_FIELD(insn, 27, 31);
5223
cpu_src2 = gen_load_gpr(dc, rs2);
5224
tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5226
tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5230
case 0x38: /* jmpl */
5232
TCGv t = gen_dest_gpr(dc, rd);
5233
tcg_gen_movi_tl(t, dc->pc);
5234
gen_store_gpr(dc, rd, t);
5237
gen_check_align(cpu_tmp0, 3);
5238
gen_address_mask(dc, cpu_tmp0);
5239
tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5240
dc->npc = DYNAMIC_PC;
5243
#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5244
case 0x39: /* rett, V9 return */
5246
if (!supervisor(dc))
5249
gen_check_align(cpu_tmp0, 3);
5250
tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5251
dc->npc = DYNAMIC_PC;
5252
gen_helper_rett(cpu_env);
5256
case 0x3b: /* flush */
5257
if (!((dc)->def->features & CPU_FEATURE_FLUSH))
5261
case 0x3c: /* save */
5262
gen_helper_save(cpu_env);
5263
gen_store_gpr(dc, rd, cpu_tmp0);
5265
case 0x3d: /* restore */
5266
gen_helper_restore(cpu_env);
5267
gen_store_gpr(dc, rd, cpu_tmp0);
5269
#if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
5270
case 0x3e: /* V9 done/retry */
5274
if (!supervisor(dc))
5276
dc->npc = DYNAMIC_PC;
5277
dc->pc = DYNAMIC_PC;
5278
gen_helper_done(cpu_env);
5281
if (!supervisor(dc))
5283
dc->npc = DYNAMIC_PC;
5284
dc->pc = DYNAMIC_PC;
5285
gen_helper_retry(cpu_env);
5300
case 3: /* load/store instructions */
5302
unsigned int xop = GET_FIELD(insn, 7, 12);
5303
/* ??? gen_address_mask prevents us from using a source
5304
register directly. Always generate a temporary. */
5305
TCGv cpu_addr = get_temp_tl(dc);
5307
tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
5308
if (xop == 0x3c || xop == 0x3e) {
5309
/* V9 casa/casxa : no offset */
5310
} else if (IS_IMM) { /* immediate */
5311
simm = GET_FIELDs(insn, 19, 31);
5313
tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
5315
} else { /* register */
5316
rs2 = GET_FIELD(insn, 27, 31);
5318
tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
5321
if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
5322
(xop > 0x17 && xop <= 0x1d ) ||
5323
(xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
5324
TCGv cpu_val = gen_dest_gpr(dc, rd);
5327
case 0x0: /* ld, V9 lduw, load unsigned word */
5328
gen_address_mask(dc, cpu_addr);
5329
tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
5331
case 0x1: /* ldub, load unsigned byte */
5332
gen_address_mask(dc, cpu_addr);
5333
tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
5335
case 0x2: /* lduh, load unsigned halfword */
5336
gen_address_mask(dc, cpu_addr);
5337
tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
5339
case 0x3: /* ldd, load double word */
5345
gen_address_mask(dc, cpu_addr);
5346
t64 = tcg_temp_new_i64();
5347
tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
5348
tcg_gen_trunc_i64_tl(cpu_val, t64);
5349
tcg_gen_ext32u_tl(cpu_val, cpu_val);
5350
gen_store_gpr(dc, rd + 1, cpu_val);
5351
tcg_gen_shri_i64(t64, t64, 32);
5352
tcg_gen_trunc_i64_tl(cpu_val, t64);
5353
tcg_temp_free_i64(t64);
5354
tcg_gen_ext32u_tl(cpu_val, cpu_val);
5357
case 0x9: /* ldsb, load signed byte */
5358
gen_address_mask(dc, cpu_addr);
5359
tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
5361
case 0xa: /* ldsh, load signed halfword */
5362
gen_address_mask(dc, cpu_addr);
5363
tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
5365
case 0xd: /* ldstub */
5366
gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx);
5369
/* swap, swap register with memory. Also atomically */
5370
CHECK_IU_FEATURE(dc, SWAP);
5371
cpu_src1 = gen_load_gpr(dc, rd);
5372
gen_swap(dc, cpu_val, cpu_src1, cpu_addr,
5373
dc->mem_idx, MO_TEUL);
5375
#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5376
case 0x10: /* lda, V9 lduwa, load word alternate */
5377
gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5379
case 0x11: /* lduba, load unsigned byte alternate */
5380
gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5382
case 0x12: /* lduha, load unsigned halfword alternate */
5383
gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5385
case 0x13: /* ldda, load double word alternate */
5389
gen_ldda_asi(dc, cpu_addr, insn, rd);
5391
case 0x19: /* ldsba, load signed byte alternate */
5392
gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB);
5394
case 0x1a: /* ldsha, load signed halfword alternate */
5395
gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW);
5397
case 0x1d: /* ldstuba -- XXX: should be atomically */
5398
gen_ldstub_asi(dc, cpu_val, cpu_addr, insn);
5400
case 0x1f: /* swapa, swap reg with alt. memory. Also
5402
CHECK_IU_FEATURE(dc, SWAP);
5403
cpu_src1 = gen_load_gpr(dc, rd);
5404
gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn);
5407
#ifndef TARGET_SPARC64
5408
case 0x30: /* ldc */
5409
case 0x31: /* ldcsr */
5410
case 0x33: /* lddc */
5414
#ifdef TARGET_SPARC64
5415
case 0x08: /* V9 ldsw */
5416
gen_address_mask(dc, cpu_addr);
5417
tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
5419
case 0x0b: /* V9 ldx */
5420
gen_address_mask(dc, cpu_addr);
5421
tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
5423
case 0x18: /* V9 ldswa */
5424
gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
5426
case 0x1b: /* V9 ldxa */
5427
gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
5429
case 0x2d: /* V9 prefetch, no effect */
5431
case 0x30: /* V9 ldfa */
5432
if (gen_trap_ifnofpu(dc)) {
5435
gen_ldf_asi(dc, cpu_addr, insn, 4, rd);
5436
gen_update_fprs_dirty(dc, rd);
5438
case 0x33: /* V9 lddfa */
5439
if (gen_trap_ifnofpu(dc)) {
5442
gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5443
gen_update_fprs_dirty(dc, DFPREG(rd));
5445
case 0x3d: /* V9 prefetcha, no effect */
5447
case 0x32: /* V9 ldqfa */
5448
CHECK_FPU_FEATURE(dc, FLOAT128);
5449
if (gen_trap_ifnofpu(dc)) {
5452
gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5453
gen_update_fprs_dirty(dc, QFPREG(rd));
5459
gen_store_gpr(dc, rd, cpu_val);
5460
#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5463
} else if (xop >= 0x20 && xop < 0x24) {
5464
if (gen_trap_ifnofpu(dc)) {
5468
case 0x20: /* ldf, load fpreg */
5469
gen_address_mask(dc, cpu_addr);
5470
cpu_dst_32 = gen_dest_fpr_F(dc);
5471
tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5472
dc->mem_idx, MO_TEUL);
5473
gen_store_fpr_F(dc, rd, cpu_dst_32);
5475
case 0x21: /* ldfsr, V9 ldxfsr */
5476
#ifdef TARGET_SPARC64
5477
gen_address_mask(dc, cpu_addr);
5479
TCGv_i64 t64 = tcg_temp_new_i64();
5480
tcg_gen_qemu_ld_i64(t64, cpu_addr,
5481
dc->mem_idx, MO_TEQ);
5482
gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64);
5483
tcg_temp_free_i64(t64);
5487
cpu_dst_32 = get_temp_i32(dc);
5488
tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5489
dc->mem_idx, MO_TEUL);
5490
gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32);
5492
case 0x22: /* ldqf, load quad fpreg */
5493
CHECK_FPU_FEATURE(dc, FLOAT128);
5494
gen_address_mask(dc, cpu_addr);
5495
cpu_src1_64 = tcg_temp_new_i64();
5496
tcg_gen_qemu_ld_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5497
MO_TEQ | MO_ALIGN_4);
5498
tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5499
cpu_src2_64 = tcg_temp_new_i64();
5500
tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx,
5501
MO_TEQ | MO_ALIGN_4);
5502
gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64);
5503
tcg_temp_free_i64(cpu_src1_64);
5504
tcg_temp_free_i64(cpu_src2_64);
5506
case 0x23: /* lddf, load double fpreg */
5507
gen_address_mask(dc, cpu_addr);
5508
cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5509
tcg_gen_qemu_ld_i64(cpu_dst_64, cpu_addr, dc->mem_idx,
5510
MO_TEQ | MO_ALIGN_4);
5511
gen_store_fpr_D(dc, rd, cpu_dst_64);
5516
} else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
5517
xop == 0xe || xop == 0x1e) {
5518
TCGv cpu_val = gen_load_gpr(dc, rd);
5521
case 0x4: /* st, store word */
5522
gen_address_mask(dc, cpu_addr);
5523
tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
5525
case 0x5: /* stb, store byte */
5526
gen_address_mask(dc, cpu_addr);
5527
tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
5529
case 0x6: /* sth, store halfword */
5530
gen_address_mask(dc, cpu_addr);
5531
tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
5533
case 0x7: /* std, store double word */
5540
gen_address_mask(dc, cpu_addr);
5541
lo = gen_load_gpr(dc, rd + 1);
5542
t64 = tcg_temp_new_i64();
5543
tcg_gen_concat_tl_i64(t64, lo, cpu_val);
5544
tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
5545
tcg_temp_free_i64(t64);
5548
#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5549
case 0x14: /* sta, V9 stwa, store word alternate */
5550
gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5552
case 0x15: /* stba, store byte alternate */
5553
gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5555
case 0x16: /* stha, store halfword alternate */
5556
gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5558
case 0x17: /* stda, store double word alternate */
5562
gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
5565
#ifdef TARGET_SPARC64
5566
case 0x0e: /* V9 stx */
5567
gen_address_mask(dc, cpu_addr);
5568
tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
5570
case 0x1e: /* V9 stxa */
5571
gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
5577
} else if (xop > 0x23 && xop < 0x28) {
5578
if (gen_trap_ifnofpu(dc)) {
5582
case 0x24: /* stf, store fpreg */
5583
gen_address_mask(dc, cpu_addr);
5584
cpu_src1_32 = gen_load_fpr_F(dc, rd);
5585
tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr,
5586
dc->mem_idx, MO_TEUL);
5588
case 0x25: /* stfsr, V9 stxfsr */
5590
#ifdef TARGET_SPARC64
5591
gen_address_mask(dc, cpu_addr);
5593
tcg_gen_qemu_st64(cpu_fsr, cpu_addr, dc->mem_idx);
5597
tcg_gen_qemu_st32(cpu_fsr, cpu_addr, dc->mem_idx);
5601
#ifdef TARGET_SPARC64
5602
/* V9 stqf, store quad fpreg */
5603
CHECK_FPU_FEATURE(dc, FLOAT128);
5604
gen_address_mask(dc, cpu_addr);
5605
/* ??? While stqf only requires 4-byte alignment, it is
5606
legal for the cpu to signal the unaligned exception.
5607
The OS trap handler is then required to fix it up.
5608
For qemu, this avoids having to probe the second page
5609
before performing the first write. */
5610
cpu_src1_64 = gen_load_fpr_Q0(dc, rd);
5611
tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5612
dc->mem_idx, MO_TEQ | MO_ALIGN_16);
5613
tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5614
cpu_src2_64 = gen_load_fpr_Q1(dc, rd);
5615
tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5616
dc->mem_idx, MO_TEQ);
5618
#else /* !TARGET_SPARC64 */
5619
/* stdfq, store floating point queue */
5620
#if defined(CONFIG_USER_ONLY)
5623
if (!supervisor(dc))
5625
if (gen_trap_ifnofpu(dc)) {
5631
case 0x27: /* stdf, store double fpreg */
5632
gen_address_mask(dc, cpu_addr);
5633
cpu_src1_64 = gen_load_fpr_D(dc, rd);
5634
tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5635
MO_TEQ | MO_ALIGN_4);
5640
} else if (xop > 0x33 && xop < 0x3f) {
5642
#ifdef TARGET_SPARC64
5643
case 0x34: /* V9 stfa */
5644
if (gen_trap_ifnofpu(dc)) {
5647
gen_stf_asi(dc, cpu_addr, insn, 4, rd);
5649
case 0x36: /* V9 stqfa */
5651
CHECK_FPU_FEATURE(dc, FLOAT128);
5652
if (gen_trap_ifnofpu(dc)) {
5655
gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5658
case 0x37: /* V9 stdfa */
5659
if (gen_trap_ifnofpu(dc)) {
5662
gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5664
case 0x3e: /* V9 casxa */
5665
rs2 = GET_FIELD(insn, 27, 31);
5666
cpu_src2 = gen_load_gpr(dc, rs2);
5667
gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5670
case 0x34: /* stc */
5671
case 0x35: /* stcsr */
5672
case 0x36: /* stdcq */
5673
case 0x37: /* stdc */
5676
#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5677
case 0x3c: /* V9 or LEON3 casa */
5678
#ifndef TARGET_SPARC64
5679
CHECK_IU_FEATURE(dc, CASA);
5681
rs2 = GET_FIELD(insn, 27, 31);
5682
cpu_src2 = gen_load_gpr(dc, rs2);
5683
gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5695
/* default case for non jump instructions */
5696
if (dc->npc == DYNAMIC_PC) {
5697
dc->pc = DYNAMIC_PC;
5699
} else if (dc->npc == JUMP_PC) {
5700
/* we can do a static jump */
5701
gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5705
dc->npc = dc->npc + 4;
5710
gen_exception(dc, TT_ILL_INSN);
5713
gen_exception(dc, TT_UNIMP_FLUSH);
5715
#if !defined(CONFIG_USER_ONLY)
5717
gen_exception(dc, TT_PRIV_INSN);
5721
gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
5723
#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5725
gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
5728
#ifndef TARGET_SPARC64
5730
gen_exception(dc, TT_NCP_INSN);
5734
if (dc->n_t32 != 0) {
5736
for (i = dc->n_t32 - 1; i >= 0; --i) {
5737
tcg_temp_free_i32(dc->t32[i]);
5741
if (dc->n_ttl != 0) {
5743
for (i = dc->n_ttl - 1; i >= 0; --i) {
5744
tcg_temp_free(dc->ttl[i]);
5750
void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5752
SPARCCPU *cpu = sparc_env_get_cpu(env);
5753
CPUState *cs = CPU(cpu);
5754
target_ulong pc_start, last_pc;
5755
DisasContext dc1, *dc = &dc1;
5760
memset(dc, 0, sizeof(DisasContext));
5765
dc->npc = (target_ulong) tb->cs_base;
5766
dc->cc_op = CC_OP_DYNAMIC;
5767
dc->mem_idx = tb->flags & TB_FLAG_MMU_MASK;
5769
dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5770
dc->address_mask_32bit = tb_am_enabled(tb->flags);
5771
dc->singlestep = (cs->singlestep_enabled || singlestep);
5772
#ifndef CONFIG_USER_ONLY
5773
dc->supervisor = (tb->flags & TB_FLAG_SUPER) != 0;
5775
#ifdef TARGET_SPARC64
5777
dc->asi = (tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5778
#ifndef CONFIG_USER_ONLY
5779
dc->hypervisor = (tb->flags & TB_FLAG_HYPER) != 0;
5784
max_insns = tb->cflags & CF_COUNT_MASK;
5785
if (max_insns == 0) {
5786
max_insns = CF_COUNT_MASK;
5788
if (max_insns > TCG_MAX_INSNS) {
5789
max_insns = TCG_MAX_INSNS;
5794
if (dc->npc & JUMP_PC) {
5795
assert(dc->jump_pc[1] == dc->pc + 4);
5796
tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
5798
tcg_gen_insn_start(dc->pc, dc->npc);
5803
if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
5804
if (dc->pc != pc_start) {
5807
gen_helper_debug(cpu_env);
5813
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5817
insn = cpu_ldl_code(env, dc->pc);
5819
disas_sparc_insn(dc, insn);
5823
/* if the next PC is different, we abort now */
5824
if (dc->pc != (last_pc + 4))
5826
/* if we reach a page boundary, we stop generation so that the
5827
PC of a TT_TFAULT exception is always in the right page */
5828
if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5830
/* if single step mode, we generate only one instruction and
5831
generate an exception */
5832
if (dc->singlestep) {
5835
} while (!tcg_op_buf_full() &&
5836
(dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5837
num_insns < max_insns);
5840
if (tb->cflags & CF_LAST_IO) {
5844
if (dc->pc != DYNAMIC_PC &&
5845
(dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5846
/* static PC and NPC: we can use direct chaining */
5847
gen_goto_tb(dc, 0, dc->pc, dc->npc);
5849
if (dc->pc != DYNAMIC_PC) {
5850
tcg_gen_movi_tl(cpu_pc, dc->pc);
5856
gen_tb_end(tb, num_insns);
5858
tb->size = last_pc + 4 - pc_start;
5859
tb->icount = num_insns;
5862
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5863
&& qemu_log_in_addr_range(pc_start)) {
5865
qemu_log("--------------\n");
5866
qemu_log("IN: %s\n", lookup_symbol(pc_start));
5867
log_target_disas(cs, pc_start, last_pc + 4 - pc_start, 0);
5874
void gen_intermediate_code_init(CPUSPARCState *env)
5877
static const char gregnames[32][4] = {
5878
"g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5879
"o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5880
"l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5881
"i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5883
static const char fregnames[32][4] = {
5884
"f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5885
"f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5886
"f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5887
"f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5890
static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5891
#ifdef TARGET_SPARC64
5892
{ &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
5893
{ &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5895
{ &cpu_wim, offsetof(CPUSPARCState, wim), "wim" },
5897
{ &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5898
{ &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
5901
static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5902
#ifdef TARGET_SPARC64
5903
{ &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5904
{ &cpu_tick_cmpr, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" },
5905
{ &cpu_stick_cmpr, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" },
5906
{ &cpu_hstick_cmpr, offsetof(CPUSPARCState, hstick_cmpr),
5908
{ &cpu_hintp, offsetof(CPUSPARCState, hintp), "hintp" },
5909
{ &cpu_htba, offsetof(CPUSPARCState, htba), "htba" },
5910
{ &cpu_hver, offsetof(CPUSPARCState, hver), "hver" },
5911
{ &cpu_ssr, offsetof(CPUSPARCState, ssr), "ssr" },
5912
{ &cpu_ver, offsetof(CPUSPARCState, version), "ver" },
5914
{ &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5915
{ &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5916
{ &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5917
{ &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
5918
{ &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5919
{ &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5920
{ &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5921
{ &cpu_y, offsetof(CPUSPARCState, y), "y" },
5922
#ifndef CONFIG_USER_ONLY
5923
{ &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5929
/* init various static tables */
5935
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5936
tcg_ctx.tcg_env = cpu_env;
5938
cpu_regwptr = tcg_global_mem_new_ptr(cpu_env,
5939
offsetof(CPUSPARCState, regwptr),
5942
for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5943
*r32[i].ptr = tcg_global_mem_new_i32(cpu_env, r32[i].off, r32[i].name);
5946
for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5947
*rtl[i].ptr = tcg_global_mem_new(cpu_env, rtl[i].off, rtl[i].name);
5950
TCGV_UNUSED(cpu_regs[0]);
5951
for (i = 1; i < 8; ++i) {
5952
cpu_regs[i] = tcg_global_mem_new(cpu_env,
5953
offsetof(CPUSPARCState, gregs[i]),
5957
for (i = 8; i < 32; ++i) {
5958
cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5959
(i - 8) * sizeof(target_ulong),
5963
for (i = 0; i < TARGET_DPREGS; i++) {
5964
cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
5965
offsetof(CPUSPARCState, fpr[i]),
5970
void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb,
5973
target_ulong pc = data[0];
5974
target_ulong npc = data[1];
5977
if (npc == DYNAMIC_PC) {
5978
/* dynamic NPC: already stored */
5979
} else if (npc & JUMP_PC) {
5980
/* jump PC: use 'cond' and the jump targets of the translation */
5982
env->npc = npc & ~3;