~ubuntu-branches/ubuntu/vivid/qemu/vivid

« back to all changes in this revision

Viewing changes to target-arm/translate.c

  • Committer: Package Import Robot
  • Author(s): dann frazier
  • Date: 2014-02-11 15:41:53 UTC
  • Revision ID: package-import@ubuntu.com-20140211154153-2d001tf0ium08u81
Tags: 1.7.0+dfsg-3ubuntu2
* Backport changes to enable qemu-user-static support for aarch64
* debian/control: add ppc64el to Architectures
* debian/rules: only install qemu-system-aarch64 on arm64.
  Fixes a FTBFS  when built twice in a row on non-arm64 due to a stale
  debian/qemu-system-aarch64 directory

Show diffs side-by-side

added added

removed removed

Lines of Context:
67
67
static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
68
68
static TCGv_i32 cpu_R[16];
69
69
static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
70
 
static TCGv_i32 cpu_exclusive_addr;
71
 
static TCGv_i32 cpu_exclusive_val;
72
 
static TCGv_i32 cpu_exclusive_high;
 
70
static TCGv_i64 cpu_exclusive_addr;
 
71
static TCGv_i64 cpu_exclusive_val;
73
72
#ifdef CONFIG_USER_ONLY
74
 
static TCGv_i32 cpu_exclusive_test;
 
73
static TCGv_i64 cpu_exclusive_test;
75
74
static TCGv_i32 cpu_exclusive_info;
76
75
#endif
77
76
 
102
101
    cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
103
102
    cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
104
103
 
105
 
    cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
 
104
    cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
106
105
        offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
107
 
    cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
 
106
    cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
108
107
        offsetof(CPUARMState, exclusive_val), "exclusive_val");
109
 
    cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
110
 
        offsetof(CPUARMState, exclusive_high), "exclusive_high");
111
108
#ifdef CONFIG_USER_ONLY
112
 
    cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
 
109
    cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
113
110
        offsetof(CPUARMState, exclusive_test), "exclusive_test");
114
111
    cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
115
112
        offsetof(CPUARMState, exclusive_info), "exclusive_info");
1113
1110
VFP_GEN_FTOI(tosiz)
1114
1111
#undef VFP_GEN_FTOI
1115
1112
 
1116
 
#define VFP_GEN_FIX(name) \
 
1113
#define VFP_GEN_FIX(name, round) \
1117
1114
static inline void gen_vfp_##name(int dp, int shift, int neon) \
1118
1115
{ \
1119
1116
    TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1120
1117
    TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1121
1118
    if (dp) { \
1122
 
        gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
 
1119
        gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
 
1120
                                        statusptr); \
1123
1121
    } else { \
1124
 
        gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
 
1122
        gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
 
1123
                                        statusptr); \
1125
1124
    } \
1126
1125
    tcg_temp_free_i32(tmp_shift); \
1127
1126
    tcg_temp_free_ptr(statusptr); \
1128
1127
}
1129
 
VFP_GEN_FIX(tosh)
1130
 
VFP_GEN_FIX(tosl)
1131
 
VFP_GEN_FIX(touh)
1132
 
VFP_GEN_FIX(toul)
1133
 
VFP_GEN_FIX(shto)
1134
 
VFP_GEN_FIX(slto)
1135
 
VFP_GEN_FIX(uhto)
1136
 
VFP_GEN_FIX(ulto)
 
1128
VFP_GEN_FIX(tosh, _round_to_zero)
 
1129
VFP_GEN_FIX(tosl, _round_to_zero)
 
1130
VFP_GEN_FIX(touh, _round_to_zero)
 
1131
VFP_GEN_FIX(toul, _round_to_zero)
 
1132
VFP_GEN_FIX(shto, )
 
1133
VFP_GEN_FIX(slto, )
 
1134
VFP_GEN_FIX(uhto, )
 
1135
VFP_GEN_FIX(ulto, )
1137
1136
#undef VFP_GEN_FIX
1138
1137
 
1139
1138
static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
2740
2739
        tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2741
2740
        tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2742
2741
        if (vmin) {
2743
 
            gen_helper_vfp_minnmd(dest, frn, frm, fpst);
 
2742
            gen_helper_vfp_minnumd(dest, frn, frm, fpst);
2744
2743
        } else {
2745
 
            gen_helper_vfp_maxnmd(dest, frn, frm, fpst);
 
2744
            gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
2746
2745
        }
2747
2746
        tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2748
2747
        tcg_temp_free_i64(frn);
2758
2757
        tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2759
2758
        tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2760
2759
        if (vmin) {
2761
 
            gen_helper_vfp_minnms(dest, frn, frm, fpst);
 
2760
            gen_helper_vfp_minnums(dest, frn, frm, fpst);
2762
2761
        } else {
2763
 
            gen_helper_vfp_maxnms(dest, frn, frm, fpst);
 
2762
            gen_helper_vfp_maxnums(dest, frn, frm, fpst);
2764
2763
        }
2765
2764
        tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2766
2765
        tcg_temp_free_i32(frn);
2772
2771
    return 0;
2773
2772
}
2774
2773
 
 
2774
static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
 
2775
                        int rounding)
 
2776
{
 
2777
    TCGv_ptr fpst = get_fpstatus_ptr(0);
 
2778
    TCGv_i32 tcg_rmode;
 
2779
 
 
2780
    tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
 
2781
    gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
 
2782
 
 
2783
    if (dp) {
 
2784
        TCGv_i64 tcg_op;
 
2785
        TCGv_i64 tcg_res;
 
2786
        tcg_op = tcg_temp_new_i64();
 
2787
        tcg_res = tcg_temp_new_i64();
 
2788
        tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
 
2789
        gen_helper_rintd(tcg_res, tcg_op, fpst);
 
2790
        tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
 
2791
        tcg_temp_free_i64(tcg_op);
 
2792
        tcg_temp_free_i64(tcg_res);
 
2793
    } else {
 
2794
        TCGv_i32 tcg_op;
 
2795
        TCGv_i32 tcg_res;
 
2796
        tcg_op = tcg_temp_new_i32();
 
2797
        tcg_res = tcg_temp_new_i32();
 
2798
        tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
 
2799
        gen_helper_rints(tcg_res, tcg_op, fpst);
 
2800
        tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
 
2801
        tcg_temp_free_i32(tcg_op);
 
2802
        tcg_temp_free_i32(tcg_res);
 
2803
    }
 
2804
 
 
2805
    gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
 
2806
    tcg_temp_free_i32(tcg_rmode);
 
2807
 
 
2808
    tcg_temp_free_ptr(fpst);
 
2809
    return 0;
 
2810
}
 
2811
 
 
2812
static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
 
2813
                       int rounding)
 
2814
{
 
2815
    bool is_signed = extract32(insn, 7, 1);
 
2816
    TCGv_ptr fpst = get_fpstatus_ptr(0);
 
2817
    TCGv_i32 tcg_rmode, tcg_shift;
 
2818
 
 
2819
    tcg_shift = tcg_const_i32(0);
 
2820
 
 
2821
    tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
 
2822
    gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
 
2823
 
 
2824
    if (dp) {
 
2825
        TCGv_i64 tcg_double, tcg_res;
 
2826
        TCGv_i32 tcg_tmp;
 
2827
        /* Rd is encoded as a single precision register even when the source
 
2828
         * is double precision.
 
2829
         */
 
2830
        rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
 
2831
        tcg_double = tcg_temp_new_i64();
 
2832
        tcg_res = tcg_temp_new_i64();
 
2833
        tcg_tmp = tcg_temp_new_i32();
 
2834
        tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
 
2835
        if (is_signed) {
 
2836
            gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
 
2837
        } else {
 
2838
            gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
 
2839
        }
 
2840
        tcg_gen_trunc_i64_i32(tcg_tmp, tcg_res);
 
2841
        tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
 
2842
        tcg_temp_free_i32(tcg_tmp);
 
2843
        tcg_temp_free_i64(tcg_res);
 
2844
        tcg_temp_free_i64(tcg_double);
 
2845
    } else {
 
2846
        TCGv_i32 tcg_single, tcg_res;
 
2847
        tcg_single = tcg_temp_new_i32();
 
2848
        tcg_res = tcg_temp_new_i32();
 
2849
        tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
 
2850
        if (is_signed) {
 
2851
            gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
 
2852
        } else {
 
2853
            gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
 
2854
        }
 
2855
        tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
 
2856
        tcg_temp_free_i32(tcg_res);
 
2857
        tcg_temp_free_i32(tcg_single);
 
2858
    }
 
2859
 
 
2860
    gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
 
2861
    tcg_temp_free_i32(tcg_rmode);
 
2862
 
 
2863
    tcg_temp_free_i32(tcg_shift);
 
2864
 
 
2865
    tcg_temp_free_ptr(fpst);
 
2866
 
 
2867
    return 0;
 
2868
}
 
2869
 
 
2870
/* Table for converting the most common AArch32 encoding of
 
2871
 * rounding mode to arm_fprounding order (which matches the
 
2872
 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
 
2873
 */
 
2874
static const uint8_t fp_decode_rm[] = {
 
2875
    FPROUNDING_TIEAWAY,
 
2876
    FPROUNDING_TIEEVEN,
 
2877
    FPROUNDING_POSINF,
 
2878
    FPROUNDING_NEGINF,
 
2879
};
 
2880
 
2775
2881
static int disas_vfp_v8_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
2776
2882
{
2777
2883
    uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
2794
2900
        return handle_vsel(insn, rd, rn, rm, dp);
2795
2901
    } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
2796
2902
        return handle_vminmaxnm(insn, rd, rn, rm, dp);
 
2903
    } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
 
2904
        /* VRINTA, VRINTN, VRINTP, VRINTM */
 
2905
        int rounding = fp_decode_rm[extract32(insn, 16, 2)];
 
2906
        return handle_vrint(insn, rd, rm, dp, rounding);
 
2907
    } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
 
2908
        /* VCVTA, VCVTN, VCVTP, VCVTM */
 
2909
        int rounding = fp_decode_rm[extract32(insn, 16, 2)];
 
2910
        return handle_vcvt(insn, rd, rm, dp, rounding);
2797
2911
    }
2798
2912
    return 1;
2799
2913
}
3040
3154
                    VFP_DREG_N(rn, insn);
3041
3155
                }
3042
3156
 
3043
 
                if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
3044
 
                    /* Integer or single precision destination.  */
 
3157
                if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
 
3158
                                 ((rn & 0x1e) == 0x6))) {
 
3159
                    /* Integer or single/half precision destination.  */
3045
3160
                    rd = VFP_SREG_D(insn);
3046
3161
                } else {
3047
3162
                    VFP_DREG_D(rd, insn);
3048
3163
                }
3049
3164
                if (op == 15 &&
3050
 
                    (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
3051
 
                    /* VCVT from int is always from S reg regardless of dp bit.
3052
 
                     * VCVT with immediate frac_bits has same format as SREG_M
 
3165
                    (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
 
3166
                     ((rn & 0x1e) == 0x4))) {
 
3167
                    /* VCVT from int or half precision is always from S reg
 
3168
                     * regardless of dp bit. VCVT with immediate frac_bits
 
3169
                     * has same format as SREG_M.
3053
3170
                     */
3054
3171
                    rm = VFP_SREG_M(insn);
3055
3172
                } else {
3139
3256
                case 5:
3140
3257
                case 6:
3141
3258
                case 7:
3142
 
                    /* VCVTB, VCVTT: only present with the halfprec extension,
3143
 
                     * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
 
3259
                    /* VCVTB, VCVTT: only present with the halfprec extension
 
3260
                     * UNPREDICTABLE if bit 8 is set prior to ARMv8
 
3261
                     * (we choose to UNDEF)
3144
3262
                     */
3145
 
                    if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
 
3263
                    if ((dp && !arm_feature(env, ARM_FEATURE_V8)) ||
 
3264
                        !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
3146
3265
                        return 1;
3147
3266
                    }
 
3267
                    if (!extract32(rn, 1, 1)) {
 
3268
                        /* Half precision source.  */
 
3269
                        gen_mov_F0_vreg(0, rm);
 
3270
                        break;
 
3271
                    }
3148
3272
                    /* Otherwise fall through */
3149
3273
                default:
3150
3274
                    /* One source operand.  */
3292
3416
                    case 3: /* sqrt */
3293
3417
                        gen_vfp_sqrt(dp);
3294
3418
                        break;
3295
 
                    case 4: /* vcvtb.f32.f16 */
 
3419
                    case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
3296
3420
                        tmp = gen_vfp_mrs();
3297
3421
                        tcg_gen_ext16u_i32(tmp, tmp);
3298
 
                        gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
 
3422
                        if (dp) {
 
3423
                            gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
 
3424
                                                           cpu_env);
 
3425
                        } else {
 
3426
                            gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
 
3427
                                                           cpu_env);
 
3428
                        }
3299
3429
                        tcg_temp_free_i32(tmp);
3300
3430
                        break;
3301
 
                    case 5: /* vcvtt.f32.f16 */
 
3431
                    case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
3302
3432
                        tmp = gen_vfp_mrs();
3303
3433
                        tcg_gen_shri_i32(tmp, tmp, 16);
3304
 
                        gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
 
3434
                        if (dp) {
 
3435
                            gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
 
3436
                                                           cpu_env);
 
3437
                        } else {
 
3438
                            gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
 
3439
                                                           cpu_env);
 
3440
                        }
3305
3441
                        tcg_temp_free_i32(tmp);
3306
3442
                        break;
3307
 
                    case 6: /* vcvtb.f16.f32 */
 
3443
                    case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3308
3444
                        tmp = tcg_temp_new_i32();
3309
 
                        gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
 
3445
                        if (dp) {
 
3446
                            gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
 
3447
                                                           cpu_env);
 
3448
                        } else {
 
3449
                            gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
 
3450
                                                           cpu_env);
 
3451
                        }
3310
3452
                        gen_mov_F0_vreg(0, rd);
3311
3453
                        tmp2 = gen_vfp_mrs();
3312
3454
                        tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3314
3456
                        tcg_temp_free_i32(tmp2);
3315
3457
                        gen_vfp_msr(tmp);
3316
3458
                        break;
3317
 
                    case 7: /* vcvtt.f16.f32 */
 
3459
                    case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
3318
3460
                        tmp = tcg_temp_new_i32();
3319
 
                        gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
 
3461
                        if (dp) {
 
3462
                            gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
 
3463
                                                           cpu_env);
 
3464
                        } else {
 
3465
                            gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
 
3466
                                                           cpu_env);
 
3467
                        }
3320
3468
                        tcg_gen_shli_i32(tmp, tmp, 16);
3321
3469
                        gen_mov_F0_vreg(0, rd);
3322
3470
                        tmp2 = gen_vfp_mrs();
3338
3486
                        gen_vfp_F1_ld0(dp);
3339
3487
                        gen_vfp_cmpe(dp);
3340
3488
                        break;
 
3489
                    case 12: /* vrintr */
 
3490
                    {
 
3491
                        TCGv_ptr fpst = get_fpstatus_ptr(0);
 
3492
                        if (dp) {
 
3493
                            gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
 
3494
                        } else {
 
3495
                            gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
 
3496
                        }
 
3497
                        tcg_temp_free_ptr(fpst);
 
3498
                        break;
 
3499
                    }
 
3500
                    case 13: /* vrintz */
 
3501
                    {
 
3502
                        TCGv_ptr fpst = get_fpstatus_ptr(0);
 
3503
                        TCGv_i32 tcg_rmode;
 
3504
                        tcg_rmode = tcg_const_i32(float_round_to_zero);
 
3505
                        gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
 
3506
                        if (dp) {
 
3507
                            gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
 
3508
                        } else {
 
3509
                            gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
 
3510
                        }
 
3511
                        gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
 
3512
                        tcg_temp_free_i32(tcg_rmode);
 
3513
                        tcg_temp_free_ptr(fpst);
 
3514
                        break;
 
3515
                    }
 
3516
                    case 14: /* vrintx */
 
3517
                    {
 
3518
                        TCGv_ptr fpst = get_fpstatus_ptr(0);
 
3519
                        if (dp) {
 
3520
                            gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
 
3521
                        } else {
 
3522
                            gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
 
3523
                        }
 
3524
                        tcg_temp_free_ptr(fpst);
 
3525
                        break;
 
3526
                    }
3341
3527
                    case 15: /* single<->double conversion */
3342
3528
                        if (dp)
3343
3529
                            gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3411
3597
                }
3412
3598
 
3413
3599
                /* Write back the result.  */
3414
 
                if (op == 15 && (rn >= 8 && rn <= 11))
3415
 
                    ; /* Comparison, do nothing.  */
3416
 
                else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3417
 
                    /* VCVT double to int: always integer result. */
 
3600
                if (op == 15 && (rn >= 8 && rn <= 11)) {
 
3601
                    /* Comparison, do nothing.  */
 
3602
                } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
 
3603
                                              (rn & 0x1e) == 0x6)) {
 
3604
                    /* VCVT double to int: always integer result.
 
3605
                     * VCVT double to half precision is always a single
 
3606
                     * precision result.
 
3607
                     */
3418
3608
                    gen_mov_vreg_F0(0, rd);
3419
 
                else if (op == 15 && rn == 15)
 
3609
                } else if (op == 15 && rn == 15) {
3420
3610
                    /* conversion */
3421
3611
                    gen_mov_vreg_F0(!dp, rd);
3422
 
                else
 
3612
                } else {
3423
3613
                    gen_mov_vreg_F0(dp, rd);
 
3614
                }
3424
3615
 
3425
3616
                /* break out of the loop if we have finished  */
3426
3617
                if (veclen == 0)
4628
4819
#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4629
4820
#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4630
4821
#define NEON_2RM_VSHLL 38
 
4822
#define NEON_2RM_VRINTN 40
 
4823
#define NEON_2RM_VRINTX 41
 
4824
#define NEON_2RM_VRINTA 42
 
4825
#define NEON_2RM_VRINTZ 43
4631
4826
#define NEON_2RM_VCVT_F16_F32 44
 
4827
#define NEON_2RM_VRINTM 45
4632
4828
#define NEON_2RM_VCVT_F32_F16 46
 
4829
#define NEON_2RM_VRINTP 47
 
4830
#define NEON_2RM_VCVTAU 48
 
4831
#define NEON_2RM_VCVTAS 49
 
4832
#define NEON_2RM_VCVTNU 50
 
4833
#define NEON_2RM_VCVTNS 51
 
4834
#define NEON_2RM_VCVTPU 52
 
4835
#define NEON_2RM_VCVTPS 53
 
4836
#define NEON_2RM_VCVTMU 54
 
4837
#define NEON_2RM_VCVTMS 55
4633
4838
#define NEON_2RM_VRECPE 56
4634
4839
#define NEON_2RM_VRSQRTE 57
4635
4840
#define NEON_2RM_VRECPE_F 58
4643
4848
{
4644
4849
    /* Return true if this neon 2reg-misc op is float-to-float */
4645
4850
    return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
 
4851
            (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
 
4852
            op == NEON_2RM_VRINTM ||
 
4853
            (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
4646
4854
            op >= NEON_2RM_VRECPE_F);
4647
4855
}
4648
4856
 
4685
4893
    [NEON_2RM_VMOVN] = 0x7,
4686
4894
    [NEON_2RM_VQMOVN] = 0x7,
4687
4895
    [NEON_2RM_VSHLL] = 0x7,
 
4896
    [NEON_2RM_VRINTN] = 0x4,
 
4897
    [NEON_2RM_VRINTX] = 0x4,
 
4898
    [NEON_2RM_VRINTA] = 0x4,
 
4899
    [NEON_2RM_VRINTZ] = 0x4,
4688
4900
    [NEON_2RM_VCVT_F16_F32] = 0x2,
 
4901
    [NEON_2RM_VRINTM] = 0x4,
4689
4902
    [NEON_2RM_VCVT_F32_F16] = 0x2,
 
4903
    [NEON_2RM_VRINTP] = 0x4,
 
4904
    [NEON_2RM_VCVTAU] = 0x4,
 
4905
    [NEON_2RM_VCVTAS] = 0x4,
 
4906
    [NEON_2RM_VCVTNU] = 0x4,
 
4907
    [NEON_2RM_VCVTNS] = 0x4,
 
4908
    [NEON_2RM_VCVTPU] = 0x4,
 
4909
    [NEON_2RM_VCVTPS] = 0x4,
 
4910
    [NEON_2RM_VCVTMU] = 0x4,
 
4911
    [NEON_2RM_VCVTMS] = 0x4,
4690
4912
    [NEON_2RM_VRECPE] = 0x4,
4691
4913
    [NEON_2RM_VRSQRTE] = 0x4,
4692
4914
    [NEON_2RM_VRECPE_F] = 0x4,
5132
5354
        {
5133
5355
            TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5134
5356
            if (size == 0) {
5135
 
                gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
 
5357
                gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
5136
5358
            } else {
5137
 
                gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
 
5359
                gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
5138
5360
            }
5139
5361
            tcg_temp_free_ptr(fpstatus);
5140
5362
            break;
5144
5366
                /* VMAXNM/VMINNM */
5145
5367
                TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5146
5368
                if (size == 0) {
5147
 
                    gen_helper_vfp_maxnms(tmp, tmp, tmp2, fpstatus);
 
5369
                    gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
5148
5370
                } else {
5149
 
                    gen_helper_vfp_minnms(tmp, tmp, tmp2, fpstatus);
 
5371
                    gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
5150
5372
                }
5151
5373
                tcg_temp_free_ptr(fpstatus);
5152
5374
            } else {
6375
6597
                            }
6376
6598
                            neon_store_reg(rm, pass, tmp2);
6377
6599
                            break;
 
6600
                        case NEON_2RM_VRINTN:
 
6601
                        case NEON_2RM_VRINTA:
 
6602
                        case NEON_2RM_VRINTM:
 
6603
                        case NEON_2RM_VRINTP:
 
6604
                        case NEON_2RM_VRINTZ:
 
6605
                        {
 
6606
                            TCGv_i32 tcg_rmode;
 
6607
                            TCGv_ptr fpstatus = get_fpstatus_ptr(1);
 
6608
                            int rmode;
 
6609
 
 
6610
                            if (op == NEON_2RM_VRINTZ) {
 
6611
                                rmode = FPROUNDING_ZERO;
 
6612
                            } else {
 
6613
                                rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
 
6614
                            }
 
6615
 
 
6616
                            tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
 
6617
                            gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
 
6618
                                                      cpu_env);
 
6619
                            gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
 
6620
                            gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
 
6621
                                                      cpu_env);
 
6622
                            tcg_temp_free_ptr(fpstatus);
 
6623
                            tcg_temp_free_i32(tcg_rmode);
 
6624
                            break;
 
6625
                        }
 
6626
                        case NEON_2RM_VRINTX:
 
6627
                        {
 
6628
                            TCGv_ptr fpstatus = get_fpstatus_ptr(1);
 
6629
                            gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
 
6630
                            tcg_temp_free_ptr(fpstatus);
 
6631
                            break;
 
6632
                        }
 
6633
                        case NEON_2RM_VCVTAU:
 
6634
                        case NEON_2RM_VCVTAS:
 
6635
                        case NEON_2RM_VCVTNU:
 
6636
                        case NEON_2RM_VCVTNS:
 
6637
                        case NEON_2RM_VCVTPU:
 
6638
                        case NEON_2RM_VCVTPS:
 
6639
                        case NEON_2RM_VCVTMU:
 
6640
                        case NEON_2RM_VCVTMS:
 
6641
                        {
 
6642
                            bool is_signed = !extract32(insn, 7, 1);
 
6643
                            TCGv_ptr fpst = get_fpstatus_ptr(1);
 
6644
                            TCGv_i32 tcg_rmode, tcg_shift;
 
6645
                            int rmode = fp_decode_rm[extract32(insn, 8, 2)];
 
6646
 
 
6647
                            tcg_shift = tcg_const_i32(0);
 
6648
                            tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
 
6649
                            gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
 
6650
                                                      cpu_env);
 
6651
 
 
6652
                            if (is_signed) {
 
6653
                                gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
 
6654
                                                     tcg_shift, fpst);
 
6655
                            } else {
 
6656
                                gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
 
6657
                                                     tcg_shift, fpst);
 
6658
                            }
 
6659
 
 
6660
                            gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
 
6661
                                                      cpu_env);
 
6662
                            tcg_temp_free_i32(tcg_rmode);
 
6663
                            tcg_temp_free_i32(tcg_shift);
 
6664
                            tcg_temp_free_ptr(fpst);
 
6665
                            break;
 
6666
                        }
6378
6667
                        case NEON_2RM_VRECPE:
6379
6668
                            gen_helper_recpe_u32(tmp, tmp, cpu_env);
6380
6669
                            break;
6484
6773
{
6485
6774
    int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6486
6775
    const ARMCPRegInfo *ri;
6487
 
    ARMCPU *cpu = arm_env_get_cpu(env);
6488
6776
 
6489
6777
    cpnum = (insn >> 8) & 0xf;
6490
6778
    if (arm_feature(env, ARM_FEATURE_XSCALE)
6527
6815
    isread = (insn >> 20) & 1;
6528
6816
    rt = (insn >> 12) & 0xf;
6529
6817
 
6530
 
    ri = get_arm_cp_reginfo(cpu,
 
6818
    ri = get_arm_cp_reginfo(s->cp_regs,
6531
6819
                            ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6532
6820
    if (ri) {
6533
6821
        /* Check access permissions */
6534
 
        if (!cp_access_ok(env, ri, isread)) {
 
6822
        if (!cp_access_ok(s->current_pl, ri, isread)) {
6535
6823
            return 1;
6536
6824
        }
6537
6825
 
6745
7033
    default:
6746
7034
        abort();
6747
7035
    }
6748
 
    tcg_gen_mov_i32(cpu_exclusive_val, tmp);
 
7036
 
 
7037
    if (size == 3) {
 
7038
        TCGv_i32 tmp2 = tcg_temp_new_i32();
 
7039
        TCGv_i32 tmp3 = tcg_temp_new_i32();
 
7040
 
 
7041
        tcg_gen_addi_i32(tmp2, addr, 4);
 
7042
        gen_aa32_ld32u(tmp3, tmp2, IS_USER(s));
 
7043
        tcg_temp_free_i32(tmp2);
 
7044
        tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
 
7045
        store_reg(s, rt2, tmp3);
 
7046
    } else {
 
7047
        tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
 
7048
    }
 
7049
 
6749
7050
    store_reg(s, rt, tmp);
6750
 
    if (size == 3) {
6751
 
        TCGv_i32 tmp2 = tcg_temp_new_i32();
6752
 
        tcg_gen_addi_i32(tmp2, addr, 4);
6753
 
        tmp = tcg_temp_new_i32();
6754
 
        gen_aa32_ld32u(tmp, tmp2, IS_USER(s));
6755
 
        tcg_temp_free_i32(tmp2);
6756
 
        tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6757
 
        store_reg(s, rt2, tmp);
6758
 
    }
6759
 
    tcg_gen_mov_i32(cpu_exclusive_addr, addr);
 
7051
    tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
6760
7052
}
6761
7053
 
6762
7054
static void gen_clrex(DisasContext *s)
6763
7055
{
6764
 
    tcg_gen_movi_i32(cpu_exclusive_addr, -1);
 
7056
    tcg_gen_movi_i64(cpu_exclusive_addr, -1);
6765
7057
}
6766
7058
 
6767
7059
#ifdef CONFIG_USER_ONLY
6768
7060
static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6769
7061
                                TCGv_i32 addr, int size)
6770
7062
{
6771
 
    tcg_gen_mov_i32(cpu_exclusive_test, addr);
 
7063
    tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
6772
7064
    tcg_gen_movi_i32(cpu_exclusive_info,
6773
7065
                     size | (rd << 4) | (rt << 8) | (rt2 << 12));
6774
7066
    gen_exception_insn(s, 4, EXCP_STREX);
6778
7070
                                TCGv_i32 addr, int size)
6779
7071
{
6780
7072
    TCGv_i32 tmp;
 
7073
    TCGv_i64 val64, extaddr;
6781
7074
    int done_label;
6782
7075
    int fail_label;
6783
7076
 
6789
7082
       } */
6790
7083
    fail_label = gen_new_label();
6791
7084
    done_label = gen_new_label();
6792
 
    tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
 
7085
    extaddr = tcg_temp_new_i64();
 
7086
    tcg_gen_extu_i32_i64(extaddr, addr);
 
7087
    tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
 
7088
    tcg_temp_free_i64(extaddr);
 
7089
 
6793
7090
    tmp = tcg_temp_new_i32();
6794
7091
    switch (size) {
6795
7092
    case 0:
6805
7102
    default:
6806
7103
        abort();
6807
7104
    }
6808
 
    tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6809
 
    tcg_temp_free_i32(tmp);
 
7105
 
 
7106
    val64 = tcg_temp_new_i64();
6810
7107
    if (size == 3) {
6811
7108
        TCGv_i32 tmp2 = tcg_temp_new_i32();
 
7109
        TCGv_i32 tmp3 = tcg_temp_new_i32();
6812
7110
        tcg_gen_addi_i32(tmp2, addr, 4);
6813
 
        tmp = tcg_temp_new_i32();
6814
 
        gen_aa32_ld32u(tmp, tmp2, IS_USER(s));
 
7111
        gen_aa32_ld32u(tmp3, tmp2, IS_USER(s));
6815
7112
        tcg_temp_free_i32(tmp2);
6816
 
        tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6817
 
        tcg_temp_free_i32(tmp);
 
7113
        tcg_gen_concat_i32_i64(val64, tmp, tmp3);
 
7114
        tcg_temp_free_i32(tmp3);
 
7115
    } else {
 
7116
        tcg_gen_extu_i32_i64(val64, tmp);
6818
7117
    }
 
7118
    tcg_temp_free_i32(tmp);
 
7119
 
 
7120
    tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
 
7121
    tcg_temp_free_i64(val64);
 
7122
 
6819
7123
    tmp = load_reg(s, rt);
6820
7124
    switch (size) {
6821
7125
    case 0:
6843
7147
    gen_set_label(fail_label);
6844
7148
    tcg_gen_movi_i32(cpu_R[rd], 1);
6845
7149
    gen_set_label(done_label);
6846
 
    tcg_gen_movi_i32(cpu_exclusive_addr, -1);
 
7150
    tcg_gen_movi_i64(cpu_exclusive_addr, -1);
6847
7151
}
6848
7152
#endif
6849
7153
 
10266
10570
    dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
10267
10571
    dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
10268
10572
    dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
 
10573
    dc->cp_regs = cpu->cp_regs;
 
10574
    dc->current_pl = arm_current_pl(env);
10269
10575
 
10270
10576
    cpu_F0s = tcg_temp_new_i32();
10271
10577
    cpu_F1s = tcg_temp_new_i32();