1464
1469
tcg_temp_free_i32(tmp);
1472
static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1474
/* We're emulating OSF/1 PALcode. Many of these are trivial access
1475
to internal cpu registers. */
1477
/* Unprivileged PAL call */
1478
if (palcode >= 0x80 && palcode < 0xC0) {
1482
/* No-op inside QEMU. */
1486
tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1490
tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1493
return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0xbf);
1498
#ifndef CONFIG_USER_ONLY
1499
/* Privileged PAL code */
1500
if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1504
/* No-op inside QEMU. */
1508
/* No-op inside QEMU. */
1512
tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUState, vptptr));
1516
tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1520
tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1527
/* Note that we already know we're in kernel mode, so we know
1528
that PS only contains the 3 IPL bits. */
1529
tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUState, ps));
1531
/* But make sure and store only the 3 IPL bits from the user. */
1532
tmp = tcg_temp_new();
1533
tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
1534
tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUState, ps));
1541
tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUState, ps));
1545
tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1549
tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1553
tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
1554
offsetof(CPUState, cpu_index));
1558
return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0x3f);
1564
return gen_invalid(ctx);
1567
#ifndef CONFIG_USER_ONLY
1569
#define PR_BYTE 0x100000
1570
#define PR_LONG 0x200000
1572
static int cpu_pr_data(int pr)
1575
case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1576
case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1577
case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1578
case 3: return offsetof(CPUAlphaState, trap_arg0);
1579
case 4: return offsetof(CPUAlphaState, trap_arg1);
1580
case 5: return offsetof(CPUAlphaState, trap_arg2);
1581
case 6: return offsetof(CPUAlphaState, exc_addr);
1582
case 7: return offsetof(CPUAlphaState, palbr);
1583
case 8: return offsetof(CPUAlphaState, ptbr);
1584
case 9: return offsetof(CPUAlphaState, vptptr);
1585
case 10: return offsetof(CPUAlphaState, unique);
1586
case 11: return offsetof(CPUAlphaState, sysval);
1587
case 12: return offsetof(CPUAlphaState, usp);
1590
return offsetof(CPUAlphaState, shadow[pr - 32]);
1592
return offsetof(CPUAlphaState, scratch[pr - 40]);
1597
static void gen_mfpr(int ra, int regno)
1599
int data = cpu_pr_data(regno);
1601
/* In our emulated PALcode, these processor registers have no
1602
side effects from reading. */
1607
/* The basic registers are data only, and unknown registers
1608
are read-zero, write-ignore. */
1610
tcg_gen_movi_i64(cpu_ir[ra], 0);
1611
} else if (data & PR_BYTE) {
1612
tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1613
} else if (data & PR_LONG) {
1614
tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1616
tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1620
static void gen_mtpr(int rb, int regno)
1625
tmp = tcg_const_i64(0);
1630
/* These two register numbers perform a TLB cache flush. Thankfully we
1631
can only do this inside PALmode, which means that the current basic
1632
block cannot be affected by the change in mappings. */
1636
} else if (regno == 254) {
1638
gen_helper_tbis(tmp);
1640
/* The basic registers are data only, and unknown registers
1641
are read-zero, write-ignore. */
1642
int data = cpu_pr_data(regno);
1644
if (data & PR_BYTE) {
1645
tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1646
} else if (data & PR_LONG) {
1647
tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1649
tcg_gen_st_i64(tmp, cpu_env, data);
1658
#endif /* !USER_ONLY*/
1467
1660
static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1469
1662
uint32_t palcode;
1470
int32_t disp21, disp16, disp12;
1663
int32_t disp21, disp16;
1664
#ifndef CONFIG_USER_ONLY
1472
uint8_t opc, ra, rb, rc, fpfn, fn7, fn2, islit, real_islit;
1668
uint8_t opc, ra, rb, rc, fpfn, fn7, islit, real_islit;
1474
1670
ExitStatus ret;
1569
if (!(ctx->amask & AMASK_BWX))
1571
gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1742
if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1743
gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1575
1749
gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1579
if (!(ctx->amask & AMASK_BWX))
1581
gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1753
if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1754
gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1585
1760
gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
2612
2783
switch ((insn >> 12) & 0xF) {
2614
2785
/* Longword physical access (hw_ldl/p) */
2615
gen_helper_ldl_raw(cpu_ir[ra], addr);
2786
gen_helper_ldl_phys(cpu_ir[ra], addr);
2618
2789
/* Quadword physical access (hw_ldq/p) */
2619
gen_helper_ldq_raw(cpu_ir[ra], addr);
2790
gen_helper_ldq_phys(cpu_ir[ra], addr);
2622
2793
/* Longword physical access with lock (hw_ldl_l/p) */
2623
gen_helper_ldl_l_raw(cpu_ir[ra], addr);
2794
gen_helper_ldl_l_phys(cpu_ir[ra], addr);
2626
2797
/* Quadword physical access with lock (hw_ldq_l/p) */
2627
gen_helper_ldq_l_raw(cpu_ir[ra], addr);
2798
gen_helper_ldq_l_phys(cpu_ir[ra], addr);
2630
2801
/* Longword virtual PTE fetch (hw_ldl/v) */
2631
tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
2634
2804
/* Quadword virtual PTE fetch (hw_ldq/v) */
2635
tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
2638
2808
/* Incpu_ir[ra]id */
2642
2812
goto invalid_opc;
2644
2814
/* Longword virtual access (hw_ldl) */
2645
gen_helper_st_virt_to_phys(addr, addr);
2646
gen_helper_ldl_raw(cpu_ir[ra], addr);
2649
2817
/* Quadword virtual access (hw_ldq) */
2650
gen_helper_st_virt_to_phys(addr, addr);
2651
gen_helper_ldq_raw(cpu_ir[ra], addr);
2654
2820
/* Longword virtual access with protection check (hw_ldl/w) */
2655
tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
2821
tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2658
2824
/* Quadword virtual access with protection check (hw_ldq/w) */
2659
tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
2825
tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2662
2828
/* Longword virtual access with alt access mode (hw_ldl/a)*/
2663
gen_helper_set_alt_mode();
2664
gen_helper_st_virt_to_phys(addr, addr);
2665
gen_helper_ldl_raw(cpu_ir[ra], addr);
2666
gen_helper_restore_mode();
2669
2831
/* Quadword virtual access with alt access mode (hw_ldq/a) */
2670
gen_helper_set_alt_mode();
2671
gen_helper_st_virt_to_phys(addr, addr);
2672
gen_helper_ldq_raw(cpu_ir[ra], addr);
2673
gen_helper_restore_mode();
2676
2834
/* Longword virtual access with alternate access mode and
2677
* protection checks (hw_ldl/wa)
2679
gen_helper_set_alt_mode();
2680
gen_helper_ldl_data(cpu_ir[ra], addr);
2681
gen_helper_restore_mode();
2835
protection checks (hw_ldl/wa) */
2836
tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
2684
2839
/* Quadword virtual access with alternate access mode and
2685
* protection checks (hw_ldq/wa)
2687
gen_helper_set_alt_mode();
2688
gen_helper_ldq_data(cpu_ir[ra], addr);
2689
gen_helper_restore_mode();
2840
protection checks (hw_ldq/wa) */
2841
tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
2692
2844
tcg_temp_free(addr);
2700
if (!(ctx->amask & AMASK_BWX))
2853
if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
2701
2854
goto invalid_opc;
2702
2856
if (likely(rc != 31)) {
2704
2858
tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
2711
if (!(ctx->amask & AMASK_BWX))
2713
if (likely(rc != 31)) {
2715
tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2717
tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2865
if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
2866
if (likely(rc != 31)) {
2868
tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2870
tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2722
if (!(ctx->amask & AMASK_CIX))
2724
if (likely(rc != 31)) {
2726
tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2728
gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2878
if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2879
if (likely(rc != 31)) {
2881
tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2883
gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2733
if (!(ctx->amask & AMASK_MVI))
2735
gen_perr(ra, rb, rc, islit, lit);
2891
if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2892
gen_perr(ra, rb, rc, islit, lit);
2739
if (!(ctx->amask & AMASK_CIX))
2741
if (likely(rc != 31)) {
2743
tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2745
gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2898
if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2899
if (likely(rc != 31)) {
2901
tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2903
gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2750
if (!(ctx->amask & AMASK_CIX))
2752
if (likely(rc != 31)) {
2754
tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2756
gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2911
if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2912
if (likely(rc != 31)) {
2914
tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2916
gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2761
if (!(ctx->amask & AMASK_MVI))
2763
if (real_islit || ra != 31)
2765
gen_unpkbw (rb, rc);
2924
if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2925
if (real_islit || ra != 31) {
2769
if (!(ctx->amask & AMASK_MVI))
2771
if (real_islit || ra != 31)
2773
gen_unpkbl (rb, rc);
2934
if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2935
if (real_islit || ra != 31) {
2777
if (!(ctx->amask & AMASK_MVI))
2779
if (real_islit || ra != 31)
2944
if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2945
if (real_islit || ra != 31) {
2785
if (!(ctx->amask & AMASK_MVI))
2787
if (real_islit || ra != 31)
2954
if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2955
if (real_islit || ra != 31) {
2793
if (!(ctx->amask & AMASK_MVI))
2795
gen_minsb8 (ra, rb, rc, islit, lit);
2964
if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2965
gen_minsb8(ra, rb, rc, islit, lit);
2799
if (!(ctx->amask & AMASK_MVI))
2801
gen_minsw4 (ra, rb, rc, islit, lit);
2971
if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2972
gen_minsw4(ra, rb, rc, islit, lit);
2805
if (!(ctx->amask & AMASK_MVI))
2807
gen_minub8 (ra, rb, rc, islit, lit);
2978
if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2979
gen_minub8(ra, rb, rc, islit, lit);
2811
if (!(ctx->amask & AMASK_MVI))
2813
gen_minuw4 (ra, rb, rc, islit, lit);
2985
if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2986
gen_minuw4(ra, rb, rc, islit, lit);
2817
if (!(ctx->amask & AMASK_MVI))
2819
gen_maxub8 (ra, rb, rc, islit, lit);
2992
if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2993
gen_maxub8(ra, rb, rc, islit, lit);
2823
if (!(ctx->amask & AMASK_MVI))
2825
gen_maxuw4 (ra, rb, rc, islit, lit);
2999
if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3000
gen_maxuw4(ra, rb, rc, islit, lit);
2829
if (!(ctx->amask & AMASK_MVI))
2831
gen_maxsb8 (ra, rb, rc, islit, lit);
3006
if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3007
gen_maxsb8(ra, rb, rc, islit, lit);
2835
if (!(ctx->amask & AMASK_MVI))
2837
gen_maxsw4 (ra, rb, rc, islit, lit);
3013
if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3014
gen_maxsw4(ra, rb, rc, islit, lit);
2841
if (!(ctx->amask & AMASK_FIX))
3020
if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2842
3021
goto invalid_opc;
2843
3023
if (likely(rc != 31)) {
2845
3025
tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
2872
3053
/* HW_MTPR (PALcode) */
2873
#if defined (CONFIG_USER_ONLY)
2879
TCGv tmp1 = tcg_const_i32(insn & 0xFF);
2881
gen_helper_mtpr(tmp1, cpu_ir[ra]);
2883
TCGv tmp2 = tcg_const_i64(0);
2884
gen_helper_mtpr(tmp1, tmp2);
2885
tcg_temp_free(tmp2);
2887
tcg_temp_free(tmp1);
2888
ret = EXIT_PC_STALE;
3054
#ifndef CONFIG_USER_ONLY
3055
if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3056
gen_mtpr(rb, insn & 0xffff);
2893
/* HW_REI (PALcode) */
2894
#if defined (CONFIG_USER_ONLY)
2901
gen_helper_hw_rei();
2906
tmp = tcg_temp_new();
2907
tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51));
2909
tmp = tcg_const_i64(((int64_t)insn << 51) >> 51);
2910
gen_helper_hw_ret(tmp);
3062
/* HW_RET (PALcode) */
3063
#ifndef CONFIG_USER_ONLY
3064
if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3066
/* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3067
address from EXC_ADDR. This turns out to be useful for our
3068
emulation PALcode, so continue to accept it. */
3069
TCGv tmp = tcg_temp_new();
3070
tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUState, exc_addr));
3071
gen_helper_hw_ret(tmp);
3074
gen_helper_hw_ret(cpu_ir[rb]);
3076
ret = EXIT_PC_UPDATED;
2913
ret = EXIT_PC_UPDATED;
2917
3082
/* HW_ST (PALcode) */
2918
#if defined (CONFIG_USER_ONLY)
3083
#ifndef CONFIG_USER_ONLY
3084
if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2924
3085
TCGv addr, val;
2925
3086
addr = tcg_temp_new();
2936
3097
switch ((insn >> 12) & 0xF) {
2938
3099
/* Longword physical access */
2939
gen_helper_stl_raw(val, addr);
3100
gen_helper_stl_phys(addr, val);
2942
3103
/* Quadword physical access */
2943
gen_helper_stq_raw(val, addr);
3104
gen_helper_stq_phys(addr, val);
2946
3107
/* Longword physical access with lock */
2947
gen_helper_stl_c_raw(val, val, addr);
3108
gen_helper_stl_c_phys(val, addr, val);
2950
3111
/* Quadword physical access with lock */
2951
gen_helper_stq_c_raw(val, val, addr);
3112
gen_helper_stq_c_phys(val, addr, val);
2954
3115
/* Longword virtual access */
2955
gen_helper_st_virt_to_phys(addr, addr);
2956
gen_helper_stl_raw(val, addr);
2959
3118
/* Quadword virtual access */
2960
gen_helper_st_virt_to_phys(addr, addr);
2961
gen_helper_stq_raw(val, addr);
2965
3122
goto invalid_opc;
3325
3470
env->implver = implver;
3326
3471
env->amask = amask;
3329
3473
#if defined (CONFIG_USER_ONLY)
3474
env->ps = PS_USER_MODE;
3331
3475
cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3332
3476
| FPCR_UNFD | FPCR_INED | FPCR_DNOD));
3336
3478
env->lock_addr = -1;
3338
/* Initialize IPR */
3339
#if defined (CONFIG_USER_ONLY)
3340
env->ipr[IPR_EXC_ADDR] = 0;
3341
env->ipr[IPR_EXC_SUM] = 0;
3342
env->ipr[IPR_EXC_MASK] = 0;
3346
// hwpcb = env->ipr[IPR_PCBB];
3347
env->ipr[IPR_ASN] = 0;
3348
env->ipr[IPR_ASTEN] = 0;
3349
env->ipr[IPR_ASTSR] = 0;
3350
env->ipr[IPR_DATFX] = 0;
3352
// env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
3353
// env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
3354
// env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
3355
// env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
3356
env->ipr[IPR_FEN] = 0;
3357
env->ipr[IPR_IPL] = 31;
3358
env->ipr[IPR_MCES] = 0;
3359
env->ipr[IPR_PERFMON] = 0; /* Implementation specific */
3360
// env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
3361
env->ipr[IPR_SISR] = 0;
3362
env->ipr[IPR_VIRBND] = -1ULL;
3366
3481
qemu_init_vcpu(env);
3370
void gen_pc_load(CPUState *env, TranslationBlock *tb,
3371
unsigned long searched_pc, int pc_pos, void *puc)
3485
void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
3373
3487
env->pc = gen_opc_pc[pc_pos];