~ubuntu-branches/ubuntu/trusty/qemu/trusty

« back to all changes in this revision

Viewing changes to target-arm/translate.c

  • Committer: Package Import Robot
  • Author(s): Serge Hallyn
  • Date: 2013-10-22 22:47:07 UTC
  • mfrom: (1.8.3) (10.1.42 sid)
  • Revision ID: package-import@ubuntu.com-20131022224707-1lya34fw3k3f24tv
Tags: 1.6.0+dfsg-2ubuntu1
* Merge 1.6.0~rc0+dfsg-2exp from debian experimental.  Remaining changes:
  - debian/control
    * update maintainer
    * remove libiscsi, usb-redir, vde, vnc-jpeg, and libssh2-1-dev
      from build-deps
    * enable rbd
    * add qemu-system and qemu-common B/R to qemu-keymaps
    * add D:udev, R:qemu, R:qemu-common and B:qemu-common to
      qemu-system-common
    * qemu-system-arm, qemu-system-ppc, qemu-system-sparc:
      - add qemu-kvm to Provides
      - add qemu-common, qemu-kvm, kvm to B/R
      - remove openbios-sparc from qemu-system-sparc D
      - drop openbios-ppc and openhackware Depends to Suggests (for now)
    * qemu-system-x86:
      - add qemu-common to Breaks/Replaces.
      - add cpu-checker to Recommends.
    * qemu-user: add B/R:qemu-kvm
    * qemu-kvm:
      - add armhf armel powerpc sparc to Architecture
      - C/R/P: qemu-kvm-spice
    * add qemu-common package
    * drop qemu-slof which is not packaged in ubuntu
  - add qemu-system-common.links for tap ifup/down scripts and OVMF link.
  - qemu-system-x86.links:
    * remove pxe rom links which are in kvm-ipxe
    * add symlink for kvm.1 manpage
  - debian/rules
    * add kvm-spice symlink to qemu-kvm
    * call dh_installmodules for qemu-system-x86
    * update dh_installinit to install upstart script
    * run dh_installman (Closes: #709241) (cherrypicked from 1.5.0+dfsg-2)
  - Add qemu-utils.links for kvm-* symlinks.
  - Add qemu-system-x86.qemu-kvm.upstart and .default
  - Add qemu-system-x86.modprobe to set nesting=1
  - Add qemu-system-common.preinst to add kvm group
  - qemu-system-common.postinst: remove bad group acl if there, then have
    udev relabel /dev/kvm.
  - New linaro patches from qemu-linaro rebasing branch
  - Dropped patches:
    * xen-simplify-xen_enabled.patch
    * sparc-linux-user-fix-missing-symbols-in-.rel-.rela.plt-sections.patch
    * main_loop-do-not-set-nonblocking-if-xen_enabled.patch
    * xen_machine_pv-do-not-create-a-dummy-CPU-in-machine-.patch
    * virtio-rng-fix-crash
  - Kept patches:
    * expose_vms_qemu64cpu.patch - updated
    * linaro arm patches from qemu-linaro rebasing branch
  - New patches:
    * fix-pci-add: change CONFIG variable in ifdef to make sure that
      pci_add is defined.
* Add linaro patches
* Add experimental mach-virt patches for arm virtualization.
* qemu-system-common.install: add debian/tmp/usr/lib to install the
  qemu-bridge-helper

Show diffs side-by-side

added added

removed removed

Lines of Context:
42
42
#define ENABLE_ARCH_6K   arm_feature(env, ARM_FEATURE_V6K)
43
43
#define ENABLE_ARCH_6T2   arm_feature(env, ARM_FEATURE_THUMB2)
44
44
#define ENABLE_ARCH_7     arm_feature(env, ARM_FEATURE_V7)
 
45
#define ENABLE_ARCH_8     arm_feature(env, ARM_FEATURE_V8)
45
46
 
46
47
#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
47
48
 
96
97
#endif
97
98
 
98
99
/* FIXME:  These should be removed.  */
99
 
static TCGv cpu_F0s, cpu_F1s;
 
100
static TCGv_i32 cpu_F0s, cpu_F1s;
100
101
static TCGv_i64 cpu_F0d, cpu_F1d;
101
102
 
102
103
#include "exec/gen-icount.h"
139
140
#include "helper.h"
140
141
}
141
142
 
142
 
static inline TCGv load_cpu_offset(int offset)
 
143
static inline TCGv_i32 load_cpu_offset(int offset)
143
144
{
144
 
    TCGv tmp = tcg_temp_new_i32();
 
145
    TCGv_i32 tmp = tcg_temp_new_i32();
145
146
    tcg_gen_ld_i32(tmp, cpu_env, offset);
146
147
    return tmp;
147
148
}
148
149
 
149
150
#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
150
151
 
151
 
static inline void store_cpu_offset(TCGv var, int offset)
 
152
static inline void store_cpu_offset(TCGv_i32 var, int offset)
152
153
{
153
154
    tcg_gen_st_i32(var, cpu_env, offset);
154
155
    tcg_temp_free_i32(var);
158
159
    store_cpu_offset(var, offsetof(CPUARMState, name))
159
160
 
160
161
/* Set a variable to the value of a CPU register.  */
161
 
static void load_reg_var(DisasContext *s, TCGv var, int reg)
 
162
static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
162
163
{
163
164
    if (reg == 15) {
164
165
        uint32_t addr;
174
175
}
175
176
 
176
177
/* Create a new temporary and set it to the value of a CPU register.  */
177
 
static inline TCGv load_reg(DisasContext *s, int reg)
 
178
static inline TCGv_i32 load_reg(DisasContext *s, int reg)
178
179
{
179
 
    TCGv tmp = tcg_temp_new_i32();
 
180
    TCGv_i32 tmp = tcg_temp_new_i32();
180
181
    load_reg_var(s, tmp, reg);
181
182
    return tmp;
182
183
}
183
184
 
184
185
/* Set a CPU register.  The source must be a temporary and will be
185
186
   marked as dead.  */
186
 
static void store_reg(DisasContext *s, int reg, TCGv var)
 
187
static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
187
188
{
188
189
    if (reg == 15) {
189
190
        tcg_gen_andi_i32(var, var, ~1);
203
204
#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
204
205
 
205
206
 
206
 
static inline void gen_set_cpsr(TCGv var, uint32_t mask)
 
207
static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
207
208
{
208
 
    TCGv tmp_mask = tcg_const_i32(mask);
 
209
    TCGv_i32 tmp_mask = tcg_const_i32(mask);
209
210
    gen_helper_cpsr_write(cpu_env, var, tmp_mask);
210
211
    tcg_temp_free_i32(tmp_mask);
211
212
}
214
215
 
215
216
static void gen_exception(int excp)
216
217
{
217
 
    TCGv tmp = tcg_temp_new_i32();
 
218
    TCGv_i32 tmp = tcg_temp_new_i32();
218
219
    tcg_gen_movi_i32(tmp, excp);
219
220
    gen_helper_exception(cpu_env, tmp);
220
221
    tcg_temp_free_i32(tmp);
221
222
}
222
223
 
223
 
static void gen_smul_dual(TCGv a, TCGv b)
 
224
static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
224
225
{
225
 
    TCGv tmp1 = tcg_temp_new_i32();
226
 
    TCGv tmp2 = tcg_temp_new_i32();
 
226
    TCGv_i32 tmp1 = tcg_temp_new_i32();
 
227
    TCGv_i32 tmp2 = tcg_temp_new_i32();
227
228
    tcg_gen_ext16s_i32(tmp1, a);
228
229
    tcg_gen_ext16s_i32(tmp2, b);
229
230
    tcg_gen_mul_i32(tmp1, tmp1, tmp2);
236
237
}
237
238
 
238
239
/* Byteswap each halfword.  */
239
 
static void gen_rev16(TCGv var)
 
240
static void gen_rev16(TCGv_i32 var)
240
241
{
241
 
    TCGv tmp = tcg_temp_new_i32();
 
242
    TCGv_i32 tmp = tcg_temp_new_i32();
242
243
    tcg_gen_shri_i32(tmp, var, 8);
243
244
    tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
244
245
    tcg_gen_shli_i32(var, var, 8);
248
249
}
249
250
 
250
251
/* Byteswap low halfword and sign extend.  */
251
 
static void gen_revsh(TCGv var)
 
252
static void gen_revsh(TCGv_i32 var)
252
253
{
253
254
    tcg_gen_ext16u_i32(var, var);
254
255
    tcg_gen_bswap16_i32(var, var);
256
257
}
257
258
 
258
259
/* Unsigned bitfield extract.  */
259
 
static void gen_ubfx(TCGv var, int shift, uint32_t mask)
 
260
static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
260
261
{
261
262
    if (shift)
262
263
        tcg_gen_shri_i32(var, var, shift);
264
265
}
265
266
 
266
267
/* Signed bitfield extract.  */
267
 
static void gen_sbfx(TCGv var, int shift, int width)
 
268
static void gen_sbfx(TCGv_i32 var, int shift, int width)
268
269
{
269
270
    uint32_t signbit;
270
271
 
279
280
}
280
281
 
281
282
/* Return (b << 32) + a. Mark inputs as dead */
282
 
static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
 
283
static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
283
284
{
284
285
    TCGv_i64 tmp64 = tcg_temp_new_i64();
285
286
 
293
294
}
294
295
 
295
296
/* Return (b << 32) - a. Mark inputs as dead. */
296
 
static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
 
297
static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
297
298
{
298
299
    TCGv_i64 tmp64 = tcg_temp_new_i64();
299
300
 
307
308
}
308
309
 
309
310
/* 32x32->64 multiply.  Marks inputs as dead.  */
310
 
static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
 
311
static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
311
312
{
312
 
    TCGv lo = tcg_temp_new_i32();
313
 
    TCGv hi = tcg_temp_new_i32();
 
313
    TCGv_i32 lo = tcg_temp_new_i32();
 
314
    TCGv_i32 hi = tcg_temp_new_i32();
314
315
    TCGv_i64 ret;
315
316
 
316
317
    tcg_gen_mulu2_i32(lo, hi, a, b);
319
320
 
320
321
    ret = tcg_temp_new_i64();
321
322
    tcg_gen_concat_i32_i64(ret, lo, hi);
322
 
    tcg_temp_free(lo);
323
 
    tcg_temp_free(hi);
 
323
    tcg_temp_free_i32(lo);
 
324
    tcg_temp_free_i32(hi);
324
325
 
325
326
    return ret;
326
327
}
327
328
 
328
 
static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
 
329
static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
329
330
{
330
 
    TCGv lo = tcg_temp_new_i32();
331
 
    TCGv hi = tcg_temp_new_i32();
 
331
    TCGv_i32 lo = tcg_temp_new_i32();
 
332
    TCGv_i32 hi = tcg_temp_new_i32();
332
333
    TCGv_i64 ret;
333
334
 
334
335
    tcg_gen_muls2_i32(lo, hi, a, b);
337
338
 
338
339
    ret = tcg_temp_new_i64();
339
340
    tcg_gen_concat_i32_i64(ret, lo, hi);
340
 
    tcg_temp_free(lo);
341
 
    tcg_temp_free(hi);
 
341
    tcg_temp_free_i32(lo);
 
342
    tcg_temp_free_i32(hi);
342
343
 
343
344
    return ret;
344
345
}
345
346
 
346
347
/* Swap low and high halfwords.  */
347
 
static void gen_swap_half(TCGv var)
 
348
static void gen_swap_half(TCGv_i32 var)
348
349
{
349
 
    TCGv tmp = tcg_temp_new_i32();
 
350
    TCGv_i32 tmp = tcg_temp_new_i32();
350
351
    tcg_gen_shri_i32(tmp, var, 16);
351
352
    tcg_gen_shli_i32(var, var, 16);
352
353
    tcg_gen_or_i32(var, var, tmp);
360
361
    t0 = (t0 + t1) ^ tmp;
361
362
 */
362
363
 
363
 
static void gen_add16(TCGv t0, TCGv t1)
 
364
static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
364
365
{
365
 
    TCGv tmp = tcg_temp_new_i32();
 
366
    TCGv_i32 tmp = tcg_temp_new_i32();
366
367
    tcg_gen_xor_i32(tmp, t0, t1);
367
368
    tcg_gen_andi_i32(tmp, tmp, 0x8000);
368
369
    tcg_gen_andi_i32(t0, t0, ~0x8000);
374
375
}
375
376
 
376
377
/* Set CF to the top bit of var.  */
377
 
static void gen_set_CF_bit31(TCGv var)
 
378
static void gen_set_CF_bit31(TCGv_i32 var)
378
379
{
379
380
    tcg_gen_shri_i32(cpu_CF, var, 31);
380
381
}
381
382
 
382
383
/* Set N and Z flags from var.  */
383
 
static inline void gen_logic_CC(TCGv var)
 
384
static inline void gen_logic_CC(TCGv_i32 var)
384
385
{
385
386
    tcg_gen_mov_i32(cpu_NF, var);
386
387
    tcg_gen_mov_i32(cpu_ZF, var);
387
388
}
388
389
 
389
390
/* T0 += T1 + CF.  */
390
 
static void gen_adc(TCGv t0, TCGv t1)
 
391
static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
391
392
{
392
393
    tcg_gen_add_i32(t0, t0, t1);
393
394
    tcg_gen_add_i32(t0, t0, cpu_CF);
394
395
}
395
396
 
396
397
/* dest = T0 + T1 + CF. */
397
 
static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
 
398
static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
398
399
{
399
400
    tcg_gen_add_i32(dest, t0, t1);
400
401
    tcg_gen_add_i32(dest, dest, cpu_CF);
401
402
}
402
403
 
403
404
/* dest = T0 - T1 + CF - 1.  */
404
 
static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
 
405
static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
405
406
{
406
407
    tcg_gen_sub_i32(dest, t0, t1);
407
408
    tcg_gen_add_i32(dest, dest, cpu_CF);
409
410
}
410
411
 
411
412
/* dest = T0 + T1. Compute C, N, V and Z flags */
412
 
static void gen_add_CC(TCGv dest, TCGv t0, TCGv t1)
 
413
static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
413
414
{
414
 
    TCGv tmp = tcg_temp_new_i32();
 
415
    TCGv_i32 tmp = tcg_temp_new_i32();
415
416
    tcg_gen_movi_i32(tmp, 0);
416
417
    tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
417
418
    tcg_gen_mov_i32(cpu_ZF, cpu_NF);
423
424
}
424
425
 
425
426
/* dest = T0 + T1 + CF.  Compute C, N, V and Z flags */
426
 
static void gen_adc_CC(TCGv dest, TCGv t0, TCGv t1)
 
427
static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
427
428
{
428
 
    TCGv tmp = tcg_temp_new_i32();
 
429
    TCGv_i32 tmp = tcg_temp_new_i32();
429
430
    if (TCG_TARGET_HAS_add2_i32) {
430
431
        tcg_gen_movi_i32(tmp, 0);
431
432
        tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
451
452
}
452
453
 
453
454
/* dest = T0 - T1. Compute C, N, V and Z flags */
454
 
static void gen_sub_CC(TCGv dest, TCGv t0, TCGv t1)
 
455
static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
455
456
{
456
 
    TCGv tmp;
 
457
    TCGv_i32 tmp;
457
458
    tcg_gen_sub_i32(cpu_NF, t0, t1);
458
459
    tcg_gen_mov_i32(cpu_ZF, cpu_NF);
459
460
    tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
466
467
}
467
468
 
468
469
/* dest = T0 + ~T1 + CF.  Compute C, N, V and Z flags */
469
 
static void gen_sbc_CC(TCGv dest, TCGv t0, TCGv t1)
 
470
static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
470
471
{
471
 
    TCGv tmp = tcg_temp_new_i32();
 
472
    TCGv_i32 tmp = tcg_temp_new_i32();
472
473
    tcg_gen_not_i32(tmp, t1);
473
474
    gen_adc_CC(dest, t0, tmp);
474
 
    tcg_temp_free(tmp);
 
475
    tcg_temp_free_i32(tmp);
475
476
}
476
477
 
477
478
#define GEN_SHIFT(name)                                               \
478
 
static void gen_##name(TCGv dest, TCGv t0, TCGv t1)                   \
 
479
static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)       \
479
480
{                                                                     \
480
 
    TCGv tmp1, tmp2, tmp3;                                            \
 
481
    TCGv_i32 tmp1, tmp2, tmp3;                                        \
481
482
    tmp1 = tcg_temp_new_i32();                                        \
482
483
    tcg_gen_andi_i32(tmp1, t1, 0xff);                                 \
483
484
    tmp2 = tcg_const_i32(0);                                          \
493
494
GEN_SHIFT(shr)
494
495
#undef GEN_SHIFT
495
496
 
496
 
static void gen_sar(TCGv dest, TCGv t0, TCGv t1)
 
497
static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
497
498
{
498
 
    TCGv tmp1, tmp2;
 
499
    TCGv_i32 tmp1, tmp2;
499
500
    tmp1 = tcg_temp_new_i32();
500
501
    tcg_gen_andi_i32(tmp1, t1, 0xff);
501
502
    tmp2 = tcg_const_i32(0x1f);
505
506
    tcg_temp_free_i32(tmp1);
506
507
}
507
508
 
508
 
static void tcg_gen_abs_i32(TCGv dest, TCGv src)
 
509
static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
509
510
{
510
 
    TCGv c0 = tcg_const_i32(0);
511
 
    TCGv tmp = tcg_temp_new_i32();
 
511
    TCGv_i32 c0 = tcg_const_i32(0);
 
512
    TCGv_i32 tmp = tcg_temp_new_i32();
512
513
    tcg_gen_neg_i32(tmp, src);
513
514
    tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
514
515
    tcg_temp_free_i32(c0);
515
516
    tcg_temp_free_i32(tmp);
516
517
}
517
518
 
518
 
static void shifter_out_im(TCGv var, int shift)
 
519
static void shifter_out_im(TCGv_i32 var, int shift)
519
520
{
520
521
    if (shift == 0) {
521
522
        tcg_gen_andi_i32(cpu_CF, var, 1);
528
529
}
529
530
 
530
531
/* Shift by immediate.  Includes special handling for shift == 0.  */
531
 
static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
 
532
static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
 
533
                                    int shift, int flags)
532
534
{
533
535
    switch (shiftop) {
534
536
    case 0: /* LSL */
565
567
                shifter_out_im(var, shift - 1);
566
568
            tcg_gen_rotri_i32(var, var, shift); break;
567
569
        } else {
568
 
            TCGv tmp = tcg_temp_new_i32();
 
570
            TCGv_i32 tmp = tcg_temp_new_i32();
569
571
            tcg_gen_shli_i32(tmp, cpu_CF, 31);
570
572
            if (flags)
571
573
                shifter_out_im(var, 0);
576
578
    }
577
579
};
578
580
 
579
 
static inline void gen_arm_shift_reg(TCGv var, int shiftop,
580
 
                                     TCGv shift, int flags)
 
581
static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
 
582
                                     TCGv_i32 shift, int flags)
581
583
{
582
584
    if (flags) {
583
585
        switch (shiftop) {
613
615
    case 4: gen_pas_helper(glue(pfx,add8)); break; \
614
616
    case 7: gen_pas_helper(glue(pfx,sub8)); break; \
615
617
    }
616
 
static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
 
618
static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
617
619
{
618
620
    TCGv_ptr tmp;
619
621
 
660
662
    case 5: gen_pas_helper(glue(pfx,sub16)); break; \
661
663
    case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
662
664
    }
663
 
static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
 
665
static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
664
666
{
665
667
    TCGv_ptr tmp;
666
668
 
699
701
 
700
702
static void gen_test_cc(int cc, int label)
701
703
{
702
 
    TCGv tmp;
 
704
    TCGv_i32 tmp;
703
705
    int inv;
704
706
 
705
707
    switch (cc) {
793
795
/* Set PC and Thumb state from an immediate address.  */
794
796
static inline void gen_bx_im(DisasContext *s, uint32_t addr)
795
797
{
796
 
    TCGv tmp;
 
798
    TCGv_i32 tmp;
797
799
 
798
800
    s->is_jmp = DISAS_UPDATE;
799
801
    if (s->thumb != (addr & 1)) {
806
808
}
807
809
 
808
810
/* Set PC and Thumb state from var.  var is marked as dead.  */
809
 
static inline void gen_bx(DisasContext *s, TCGv var)
 
811
static inline void gen_bx(DisasContext *s, TCGv_i32 var)
810
812
{
811
813
    s->is_jmp = DISAS_UPDATE;
812
814
    tcg_gen_andi_i32(cpu_R[15], var, ~1);
818
820
   to r15 in ARM architecture v7 and above. The source must be a temporary
819
821
   and will be marked as dead. */
820
822
static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
821
 
                                int reg, TCGv var)
 
823
                                int reg, TCGv_i32 var)
822
824
{
823
825
    if (reg == 15 && ENABLE_ARCH_7) {
824
826
        gen_bx(s, var);
832
834
 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
833
835
 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
834
836
static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
835
 
                                int reg, TCGv var)
 
837
                                       int reg, TCGv_i32 var)
836
838
{
837
839
    if (reg == 15 && ENABLE_ARCH_5) {
838
840
        gen_bx(s, var);
847
849
    s->is_jmp = DISAS_SMC;
848
850
}
849
851
 
850
 
static inline TCGv gen_ld8s(TCGv addr, int index)
851
 
{
852
 
    TCGv tmp = tcg_temp_new_i32();
853
 
    tcg_gen_qemu_ld8s(tmp, addr, index);
854
 
    return tmp;
855
 
}
856
 
static inline TCGv gen_ld8u(TCGv addr, int index)
857
 
{
858
 
    TCGv tmp = tcg_temp_new_i32();
859
 
    tcg_gen_qemu_ld8u(tmp, addr, index);
860
 
    return tmp;
861
 
}
862
 
static inline TCGv gen_ld16s(TCGv addr, int index)
863
 
{
864
 
    TCGv tmp = tcg_temp_new_i32();
865
 
    tcg_gen_qemu_ld16s(tmp, addr, index);
866
 
    return tmp;
867
 
}
868
 
static inline TCGv gen_ld16u(TCGv addr, int index)
869
 
{
870
 
    TCGv tmp = tcg_temp_new_i32();
871
 
    tcg_gen_qemu_ld16u(tmp, addr, index);
872
 
    return tmp;
873
 
}
874
 
static inline TCGv gen_ld32(TCGv addr, int index)
875
 
{
876
 
    TCGv tmp = tcg_temp_new_i32();
877
 
    tcg_gen_qemu_ld32u(tmp, addr, index);
878
 
    return tmp;
879
 
}
880
 
static inline TCGv_i64 gen_ld64(TCGv addr, int index)
881
 
{
882
 
    TCGv_i64 tmp = tcg_temp_new_i64();
883
 
    tcg_gen_qemu_ld64(tmp, addr, index);
884
 
    return tmp;
885
 
}
886
 
static inline void gen_st8(TCGv val, TCGv addr, int index)
887
 
{
888
 
    tcg_gen_qemu_st8(val, addr, index);
889
 
    tcg_temp_free_i32(val);
890
 
}
891
 
static inline void gen_st16(TCGv val, TCGv addr, int index)
892
 
{
893
 
    tcg_gen_qemu_st16(val, addr, index);
894
 
    tcg_temp_free_i32(val);
895
 
}
896
 
static inline void gen_st32(TCGv val, TCGv addr, int index)
897
 
{
898
 
    tcg_gen_qemu_st32(val, addr, index);
899
 
    tcg_temp_free_i32(val);
900
 
}
901
 
static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
902
 
{
903
 
    tcg_gen_qemu_st64(val, addr, index);
904
 
    tcg_temp_free_i64(val);
905
 
}
906
 
 
907
852
static inline void gen_set_pc_im(uint32_t val)
908
853
{
909
854
    tcg_gen_movi_i32(cpu_R[15], val);
917
862
}
918
863
 
919
864
static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
920
 
                                       TCGv var)
 
865
                                       TCGv_i32 var)
921
866
{
922
867
    int val, rm, shift, shiftop;
923
 
    TCGv offset;
 
868
    TCGv_i32 offset;
924
869
 
925
870
    if (!(insn & (1 << 25))) {
926
871
        /* immediate */
945
890
}
946
891
 
947
892
static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
948
 
                                        int extra, TCGv var)
 
893
                                        int extra, TCGv_i32 var)
949
894
{
950
895
    int val, rm;
951
 
    TCGv offset;
 
896
    TCGv_i32 offset;
952
897
 
953
898
    if (insn & (1 << 22)) {
954
899
        /* immediate */
1111
1056
#define VFP_GEN_FIX(name) \
1112
1057
static inline void gen_vfp_##name(int dp, int shift, int neon) \
1113
1058
{ \
1114
 
    TCGv tmp_shift = tcg_const_i32(shift); \
 
1059
    TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1115
1060
    TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1116
1061
    if (dp) { \
1117
1062
        gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1131
1076
VFP_GEN_FIX(ulto)
1132
1077
#undef VFP_GEN_FIX
1133
1078
 
1134
 
static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
 
1079
static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
1135
1080
{
1136
1081
    if (dp)
1137
1082
        tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1139
1084
        tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1140
1085
}
1141
1086
 
1142
 
static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
 
1087
static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
1143
1088
{
1144
1089
    if (dp)
1145
1090
        tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1171
1116
    return vfp_reg_offset(0, sreg);
1172
1117
}
1173
1118
 
1174
 
static TCGv neon_load_reg(int reg, int pass)
 
1119
static TCGv_i32 neon_load_reg(int reg, int pass)
1175
1120
{
1176
 
    TCGv tmp = tcg_temp_new_i32();
 
1121
    TCGv_i32 tmp = tcg_temp_new_i32();
1177
1122
    tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1178
1123
    return tmp;
1179
1124
}
1180
1125
 
1181
 
static void neon_store_reg(int reg, int pass, TCGv var)
 
1126
static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1182
1127
{
1183
1128
    tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1184
1129
    tcg_temp_free_i32(var);
1235
1180
    tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1236
1181
}
1237
1182
 
1238
 
static inline TCGv iwmmxt_load_creg(int reg)
 
1183
static inline TCGv_i32 iwmmxt_load_creg(int reg)
1239
1184
{
1240
 
    TCGv var = tcg_temp_new_i32();
 
1185
    TCGv_i32 var = tcg_temp_new_i32();
1241
1186
    tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1242
1187
    return var;
1243
1188
}
1244
1189
 
1245
 
static inline void iwmmxt_store_creg(int reg, TCGv var)
 
1190
static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1246
1191
{
1247
1192
    tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1248
1193
    tcg_temp_free_i32(var);
1360
1305
 
1361
1306
static void gen_op_iwmmxt_set_mup(void)
1362
1307
{
1363
 
    TCGv tmp;
 
1308
    TCGv_i32 tmp;
1364
1309
    tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1365
1310
    tcg_gen_ori_i32(tmp, tmp, 2);
1366
1311
    store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1368
1313
 
1369
1314
static void gen_op_iwmmxt_set_cup(void)
1370
1315
{
1371
 
    TCGv tmp;
 
1316
    TCGv_i32 tmp;
1372
1317
    tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1373
1318
    tcg_gen_ori_i32(tmp, tmp, 1);
1374
1319
    store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1376
1321
 
1377
1322
static void gen_op_iwmmxt_setpsr_nz(void)
1378
1323
{
1379
 
    TCGv tmp = tcg_temp_new_i32();
 
1324
    TCGv_i32 tmp = tcg_temp_new_i32();
1380
1325
    gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1381
1326
    store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1382
1327
}
1388
1333
    tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1389
1334
}
1390
1335
 
1391
 
static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
 
1336
static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
 
1337
                                     TCGv_i32 dest)
1392
1338
{
1393
1339
    int rd;
1394
1340
    uint32_t offset;
1395
 
    TCGv tmp;
 
1341
    TCGv_i32 tmp;
1396
1342
 
1397
1343
    rd = (insn >> 16) & 0xf;
1398
1344
    tmp = load_reg(s, rd);
1422
1368
    return 0;
1423
1369
}
1424
1370
 
1425
 
static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
 
1371
static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1426
1372
{
1427
1373
    int rd = (insn >> 0) & 0xf;
1428
 
    TCGv tmp;
 
1374
    TCGv_i32 tmp;
1429
1375
 
1430
1376
    if (insn & (1 << 8)) {
1431
1377
        if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1450
1396
{
1451
1397
    int rd, wrd;
1452
1398
    int rdhi, rdlo, rd0, rd1, i;
1453
 
    TCGv addr;
1454
 
    TCGv tmp, tmp2, tmp3;
 
1399
    TCGv_i32 addr;
 
1400
    TCGv_i32 tmp, tmp2, tmp3;
1455
1401
 
1456
1402
    if ((insn & 0x0e000e00) == 0x0c000000) {
1457
1403
        if ((insn & 0x0fe00ff0) == 0x0c400000) {
1489
1435
                        tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1490
1436
                        i = 0;
1491
1437
                    } else {                            /* WLDRW wRd */
1492
 
                        tmp = gen_ld32(addr, IS_USER(s));
 
1438
                        tmp = tcg_temp_new_i32();
 
1439
                        tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1493
1440
                    }
1494
1441
                } else {
 
1442
                    tmp = tcg_temp_new_i32();
1495
1443
                    if (insn & (1 << 22)) {             /* WLDRH */
1496
 
                        tmp = gen_ld16u(addr, IS_USER(s));
 
1444
                        tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
1497
1445
                    } else {                            /* WLDRB */
1498
 
                        tmp = gen_ld8u(addr, IS_USER(s));
 
1446
                        tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
1499
1447
                    }
1500
1448
                }
1501
1449
                if (i) {
1507
1455
        } else {
1508
1456
            if ((insn >> 28) == 0xf) {                  /* WSTRW wCx */
1509
1457
                tmp = iwmmxt_load_creg(wrd);
1510
 
                gen_st32(tmp, addr, IS_USER(s));
 
1458
                tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
1511
1459
            } else {
1512
1460
                gen_op_iwmmxt_movq_M0_wRn(wrd);
1513
1461
                tmp = tcg_temp_new_i32();
1514
1462
                if (insn & (1 << 8)) {
1515
1463
                    if (insn & (1 << 22)) {             /* WSTRD */
1516
 
                        tcg_temp_free_i32(tmp);
1517
1464
                        tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1518
1465
                    } else {                            /* WSTRW wRd */
1519
1466
                        tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1520
 
                        gen_st32(tmp, addr, IS_USER(s));
 
1467
                        tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
1521
1468
                    }
1522
1469
                } else {
1523
1470
                    if (insn & (1 << 22)) {             /* WSTRH */
1524
1471
                        tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1525
 
                        gen_st16(tmp, addr, IS_USER(s));
 
1472
                        tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
1526
1473
                    } else {                            /* WSTRB */
1527
1474
                        tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1528
 
                        gen_st8(tmp, addr, IS_USER(s));
 
1475
                        tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
1529
1476
                    }
1530
1477
                }
1531
1478
            }
 
1479
            tcg_temp_free_i32(tmp);
1532
1480
        }
1533
1481
        tcg_temp_free_i32(addr);
1534
1482
        return 0;
1803
1751
            tmp3 = tcg_const_i32((insn & 1) << 5);
1804
1752
            break;
1805
1753
        default:
1806
 
            TCGV_UNUSED(tmp2);
1807
 
            TCGV_UNUSED(tmp3);
 
1754
            TCGV_UNUSED_I32(tmp2);
 
1755
            TCGV_UNUSED_I32(tmp3);
1808
1756
        }
1809
1757
        gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1810
 
        tcg_temp_free(tmp3);
1811
 
        tcg_temp_free(tmp2);
 
1758
        tcg_temp_free_i32(tmp3);
 
1759
        tcg_temp_free_i32(tmp2);
1812
1760
        tcg_temp_free_i32(tmp);
1813
1761
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1814
1762
        gen_op_iwmmxt_set_mup();
2267
2215
        tmp = tcg_const_i32((insn >> 20) & 3);
2268
2216
        iwmmxt_load_reg(cpu_V1, rd1);
2269
2217
        gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2270
 
        tcg_temp_free(tmp);
 
2218
        tcg_temp_free_i32(tmp);
2271
2219
        gen_op_iwmmxt_movq_wRn_M0(wrd);
2272
2220
        gen_op_iwmmxt_set_mup();
2273
2221
        break;
2323
2271
        gen_op_iwmmxt_movq_M0_wRn(rd0);
2324
2272
        tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2325
2273
        gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2326
 
        tcg_temp_free(tmp);
 
2274
        tcg_temp_free_i32(tmp);
2327
2275
        gen_op_iwmmxt_movq_wRn_M0(wrd);
2328
2276
        gen_op_iwmmxt_set_mup();
2329
2277
        gen_op_iwmmxt_set_cup();
2453
2401
static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
2454
2402
{
2455
2403
    int acc, rd0, rd1, rdhi, rdlo;
2456
 
    TCGv tmp, tmp2;
 
2404
    TCGv_i32 tmp, tmp2;
2457
2405
 
2458
2406
    if ((insn & 0x0ff00f10) == 0x0e200010) {
2459
2407
        /* Multiply with Internal Accumulate Format */
2539
2487
#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn,  0,  5)
2540
2488
 
2541
2489
/* Move between integer and VFP cores.  */
2542
 
static TCGv gen_vfp_mrs(void)
 
2490
static TCGv_i32 gen_vfp_mrs(void)
2543
2491
{
2544
 
    TCGv tmp = tcg_temp_new_i32();
 
2492
    TCGv_i32 tmp = tcg_temp_new_i32();
2545
2493
    tcg_gen_mov_i32(tmp, cpu_F0s);
2546
2494
    return tmp;
2547
2495
}
2548
2496
 
2549
 
static void gen_vfp_msr(TCGv tmp)
 
2497
static void gen_vfp_msr(TCGv_i32 tmp)
2550
2498
{
2551
2499
    tcg_gen_mov_i32(cpu_F0s, tmp);
2552
2500
    tcg_temp_free_i32(tmp);
2553
2501
}
2554
2502
 
2555
 
static void gen_neon_dup_u8(TCGv var, int shift)
 
2503
static void gen_neon_dup_u8(TCGv_i32 var, int shift)
2556
2504
{
2557
 
    TCGv tmp = tcg_temp_new_i32();
 
2505
    TCGv_i32 tmp = tcg_temp_new_i32();
2558
2506
    if (shift)
2559
2507
        tcg_gen_shri_i32(var, var, shift);
2560
2508
    tcg_gen_ext8u_i32(var, var);
2565
2513
    tcg_temp_free_i32(tmp);
2566
2514
}
2567
2515
 
2568
 
static void gen_neon_dup_low16(TCGv var)
 
2516
static void gen_neon_dup_low16(TCGv_i32 var)
2569
2517
{
2570
 
    TCGv tmp = tcg_temp_new_i32();
 
2518
    TCGv_i32 tmp = tcg_temp_new_i32();
2571
2519
    tcg_gen_ext16u_i32(var, var);
2572
2520
    tcg_gen_shli_i32(tmp, var, 16);
2573
2521
    tcg_gen_or_i32(var, var, tmp);
2574
2522
    tcg_temp_free_i32(tmp);
2575
2523
}
2576
2524
 
2577
 
static void gen_neon_dup_high16(TCGv var)
 
2525
static void gen_neon_dup_high16(TCGv_i32 var)
2578
2526
{
2579
 
    TCGv tmp = tcg_temp_new_i32();
 
2527
    TCGv_i32 tmp = tcg_temp_new_i32();
2580
2528
    tcg_gen_andi_i32(var, var, 0xffff0000);
2581
2529
    tcg_gen_shri_i32(tmp, var, 16);
2582
2530
    tcg_gen_or_i32(var, var, tmp);
2583
2531
    tcg_temp_free_i32(tmp);
2584
2532
}
2585
2533
 
2586
 
static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
 
2534
static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
2587
2535
{
2588
2536
    /* Load a single Neon element and replicate into a 32 bit TCG reg */
2589
 
    TCGv tmp;
 
2537
    TCGv_i32 tmp = tcg_temp_new_i32();
2590
2538
    switch (size) {
2591
2539
    case 0:
2592
 
        tmp = gen_ld8u(addr, IS_USER(s));
 
2540
        tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
2593
2541
        gen_neon_dup_u8(tmp, 0);
2594
2542
        break;
2595
2543
    case 1:
2596
 
        tmp = gen_ld16u(addr, IS_USER(s));
 
2544
        tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
2597
2545
        gen_neon_dup_low16(tmp);
2598
2546
        break;
2599
2547
    case 2:
2600
 
        tmp = gen_ld32(addr, IS_USER(s));
 
2548
        tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
2601
2549
        break;
2602
2550
    default: /* Avoid compiler warnings.  */
2603
2551
        abort();
2611
2559
{
2612
2560
    uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2613
2561
    int dp, veclen;
2614
 
    TCGv addr;
2615
 
    TCGv tmp;
2616
 
    TCGv tmp2;
 
2562
    TCGv_i32 addr;
 
2563
    TCGv_i32 tmp;
 
2564
    TCGv_i32 tmp2;
2617
2565
 
2618
2566
    if (!arm_feature(env, ARM_FEATURE_VFP))
2619
2567
        return 1;
3435
3383
    }
3436
3384
}
3437
3385
 
3438
 
static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
 
3386
static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
3439
3387
{
3440
3388
    if (x)
3441
3389
        tcg_gen_sari_i32(t0, t0, 16);
3482
3430
}
3483
3431
 
3484
3432
/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3485
 
static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
 
3433
static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
3486
3434
{
3487
 
    TCGv tmp;
 
3435
    TCGv_i32 tmp;
3488
3436
    if (spsr) {
3489
3437
        /* ??? This is also undefined in system mode.  */
3490
3438
        if (IS_USER(s))
3506
3454
/* Returns nonzero if access to the PSR is not permitted.  */
3507
3455
static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3508
3456
{
3509
 
    TCGv tmp;
 
3457
    TCGv_i32 tmp;
3510
3458
    tmp = tcg_temp_new_i32();
3511
3459
    tcg_gen_movi_i32(tmp, val);
3512
3460
    return gen_set_psr(s, mask, spsr, tmp);
3513
3461
}
3514
3462
 
3515
3463
/* Generate an old-style exception return. Marks pc as dead. */
3516
 
static void gen_exception_return(DisasContext *s, TCGv pc)
 
3464
static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
3517
3465
{
3518
 
    TCGv tmp;
 
3466
    TCGv_i32 tmp;
3519
3467
    store_reg(s, 15, pc);
3520
3468
    tmp = load_cpu_field(spsr);
3521
3469
    gen_set_cpsr(tmp, 0xffffffff);
3524
3472
}
3525
3473
 
3526
3474
/* Generate a v6 exception return.  Marks both values as dead.  */
3527
 
static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
 
3475
static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
3528
3476
{
3529
3477
    gen_set_cpsr(cpsr, 0xffffffff);
3530
3478
    tcg_temp_free_i32(cpsr);
3537
3485
{
3538
3486
    if (s->condexec_mask) {
3539
3487
        uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3540
 
        TCGv tmp = tcg_temp_new_i32();
 
3488
        TCGv_i32 tmp = tcg_temp_new_i32();
3541
3489
        tcg_gen_movi_i32(tmp, val);
3542
3490
        store_cpu_field(tmp, condexec_bits);
3543
3491
    }
3560
3508
        break;
3561
3509
    case 2: /* wfe */
3562
3510
    case 4: /* sev */
3563
 
        /* TODO: Implement SEV and WFE.  May help SMP performance.  */
 
3511
    case 5: /* sevl */
 
3512
        /* TODO: Implement SEV, SEVL and WFE.  May help SMP performance.  */
3564
3513
    default: /* nop */
3565
3514
        break;
3566
3515
    }
3568
3517
 
3569
3518
#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3570
3519
 
3571
 
static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
 
3520
static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
3572
3521
{
3573
3522
    switch (size) {
3574
3523
    case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3578
3527
    }
3579
3528
}
3580
3529
 
3581
 
static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
 
3530
static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
3582
3531
{
3583
3532
    switch (size) {
3584
3533
    case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3640
3589
    default: return 1; \
3641
3590
    }} while (0)
3642
3591
 
3643
 
static TCGv neon_load_scratch(int scratch)
 
3592
static TCGv_i32 neon_load_scratch(int scratch)
3644
3593
{
3645
 
    TCGv tmp = tcg_temp_new_i32();
 
3594
    TCGv_i32 tmp = tcg_temp_new_i32();
3646
3595
    tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3647
3596
    return tmp;
3648
3597
}
3649
3598
 
3650
 
static void neon_store_scratch(int scratch, TCGv var)
 
3599
static void neon_store_scratch(int scratch, TCGv_i32 var)
3651
3600
{
3652
3601
    tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3653
3602
    tcg_temp_free_i32(var);
3654
3603
}
3655
3604
 
3656
 
static inline TCGv neon_get_scalar(int size, int reg)
 
3605
static inline TCGv_i32 neon_get_scalar(int size, int reg)
3657
3606
{
3658
 
    TCGv tmp;
 
3607
    TCGv_i32 tmp;
3659
3608
    if (size == 1) {
3660
3609
        tmp = neon_load_reg(reg & 7, reg >> 4);
3661
3610
        if (reg & 8) {
3671
3620
 
3672
3621
static int gen_neon_unzip(int rd, int rm, int size, int q)
3673
3622
{
3674
 
    TCGv tmp, tmp2;
 
3623
    TCGv_i32 tmp, tmp2;
3675
3624
    if (!q && size == 2) {
3676
3625
        return 1;
3677
3626
    }
3710
3659
 
3711
3660
static int gen_neon_zip(int rd, int rm, int size, int q)
3712
3661
{
3713
 
    TCGv tmp, tmp2;
 
3662
    TCGv_i32 tmp, tmp2;
3714
3663
    if (!q && size == 2) {
3715
3664
        return 1;
3716
3665
    }
3747
3696
    return 0;
3748
3697
}
3749
3698
 
3750
 
static void gen_neon_trn_u8(TCGv t0, TCGv t1)
 
3699
static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
3751
3700
{
3752
 
    TCGv rd, tmp;
 
3701
    TCGv_i32 rd, tmp;
3753
3702
 
3754
3703
    rd = tcg_temp_new_i32();
3755
3704
    tmp = tcg_temp_new_i32();
3769
3718
    tcg_temp_free_i32(rd);
3770
3719
}
3771
3720
 
3772
 
static void gen_neon_trn_u16(TCGv t0, TCGv t1)
 
3721
static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
3773
3722
{
3774
 
    TCGv rd, tmp;
 
3723
    TCGv_i32 rd, tmp;
3775
3724
 
3776
3725
    rd = tcg_temp_new_i32();
3777
3726
    tmp = tcg_temp_new_i32();
3823
3772
    int load;
3824
3773
    int shift;
3825
3774
    int n;
3826
 
    TCGv addr;
3827
 
    TCGv tmp;
3828
 
    TCGv tmp2;
 
3775
    TCGv_i32 addr;
 
3776
    TCGv_i32 tmp;
 
3777
    TCGv_i32 tmp2;
3829
3778
    TCGv_i64 tmp64;
3830
3779
 
3831
3780
    if (!s->vfp_enabled)
3872
3821
                tcg_gen_addi_i32(addr, addr, 1 << size);
3873
3822
            }
3874
3823
            if (size == 3) {
 
3824
                tmp64 = tcg_temp_new_i64();
3875
3825
                if (load) {
3876
 
                    tmp64 = gen_ld64(addr, IS_USER(s));
 
3826
                    tcg_gen_qemu_ld64(tmp64, addr, IS_USER(s));
3877
3827
                    neon_store_reg64(tmp64, rd);
3878
 
                    tcg_temp_free_i64(tmp64);
3879
3828
                } else {
3880
 
                    tmp64 = tcg_temp_new_i64();
3881
3829
                    neon_load_reg64(tmp64, rd);
3882
 
                    gen_st64(tmp64, addr, IS_USER(s));
 
3830
                    tcg_gen_qemu_st64(tmp64, addr, IS_USER(s));
3883
3831
                }
 
3832
                tcg_temp_free_i64(tmp64);
3884
3833
                tcg_gen_addi_i32(addr, addr, stride);
3885
3834
            } else {
3886
3835
                for (pass = 0; pass < 2; pass++) {
3887
3836
                    if (size == 2) {
3888
3837
                        if (load) {
3889
 
                            tmp = gen_ld32(addr, IS_USER(s));
 
3838
                            tmp = tcg_temp_new_i32();
 
3839
                            tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
3890
3840
                            neon_store_reg(rd, pass, tmp);
3891
3841
                        } else {
3892
3842
                            tmp = neon_load_reg(rd, pass);
3893
 
                            gen_st32(tmp, addr, IS_USER(s));
 
3843
                            tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
 
3844
                            tcg_temp_free_i32(tmp);
3894
3845
                        }
3895
3846
                        tcg_gen_addi_i32(addr, addr, stride);
3896
3847
                    } else if (size == 1) {
3897
3848
                        if (load) {
3898
 
                            tmp = gen_ld16u(addr, IS_USER(s));
 
3849
                            tmp = tcg_temp_new_i32();
 
3850
                            tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
3899
3851
                            tcg_gen_addi_i32(addr, addr, stride);
3900
 
                            tmp2 = gen_ld16u(addr, IS_USER(s));
 
3852
                            tmp2 = tcg_temp_new_i32();
 
3853
                            tcg_gen_qemu_ld16u(tmp2, addr, IS_USER(s));
3901
3854
                            tcg_gen_addi_i32(addr, addr, stride);
3902
3855
                            tcg_gen_shli_i32(tmp2, tmp2, 16);
3903
3856
                            tcg_gen_or_i32(tmp, tmp, tmp2);
3907
3860
                            tmp = neon_load_reg(rd, pass);
3908
3861
                            tmp2 = tcg_temp_new_i32();
3909
3862
                            tcg_gen_shri_i32(tmp2, tmp, 16);
3910
 
                            gen_st16(tmp, addr, IS_USER(s));
 
3863
                            tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
 
3864
                            tcg_temp_free_i32(tmp);
3911
3865
                            tcg_gen_addi_i32(addr, addr, stride);
3912
 
                            gen_st16(tmp2, addr, IS_USER(s));
 
3866
                            tcg_gen_qemu_st16(tmp2, addr, IS_USER(s));
 
3867
                            tcg_temp_free_i32(tmp2);
3913
3868
                            tcg_gen_addi_i32(addr, addr, stride);
3914
3869
                        }
3915
3870
                    } else /* size == 0 */ {
3916
3871
                        if (load) {
3917
 
                            TCGV_UNUSED(tmp2);
 
3872
                            TCGV_UNUSED_I32(tmp2);
3918
3873
                            for (n = 0; n < 4; n++) {
3919
 
                                tmp = gen_ld8u(addr, IS_USER(s));
 
3874
                                tmp = tcg_temp_new_i32();
 
3875
                                tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
3920
3876
                                tcg_gen_addi_i32(addr, addr, stride);
3921
3877
                                if (n == 0) {
3922
3878
                                    tmp2 = tmp;
3936
3892
                                } else {
3937
3893
                                    tcg_gen_shri_i32(tmp, tmp2, n * 8);
3938
3894
                                }
3939
 
                                gen_st8(tmp, addr, IS_USER(s));
 
3895
                                tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
 
3896
                                tcg_temp_free_i32(tmp);
3940
3897
                                tcg_gen_addi_i32(addr, addr, stride);
3941
3898
                            }
3942
3899
                            tcg_temp_free_i32(tmp2);
4056
4013
            load_reg_var(s, addr, rn);
4057
4014
            for (reg = 0; reg < nregs; reg++) {
4058
4015
                if (load) {
 
4016
                    tmp = tcg_temp_new_i32();
4059
4017
                    switch (size) {
4060
4018
                    case 0:
4061
 
                        tmp = gen_ld8u(addr, IS_USER(s));
 
4019
                        tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
4062
4020
                        break;
4063
4021
                    case 1:
4064
 
                        tmp = gen_ld16u(addr, IS_USER(s));
 
4022
                        tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
4065
4023
                        break;
4066
4024
                    case 2:
4067
 
                        tmp = gen_ld32(addr, IS_USER(s));
 
4025
                        tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
4068
4026
                        break;
4069
4027
                    default: /* Avoid compiler warnings.  */
4070
4028
                        abort();
4082
4040
                        tcg_gen_shri_i32(tmp, tmp, shift);
4083
4041
                    switch (size) {
4084
4042
                    case 0:
4085
 
                        gen_st8(tmp, addr, IS_USER(s));
 
4043
                        tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
4086
4044
                        break;
4087
4045
                    case 1:
4088
 
                        gen_st16(tmp, addr, IS_USER(s));
 
4046
                        tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
4089
4047
                        break;
4090
4048
                    case 2:
4091
 
                        gen_st32(tmp, addr, IS_USER(s));
 
4049
                        tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
4092
4050
                        break;
4093
4051
                    }
 
4052
                    tcg_temp_free_i32(tmp);
4094
4053
                }
4095
4054
                rd += stride;
4096
4055
                tcg_gen_addi_i32(addr, addr, 1 << size);
4100
4059
        }
4101
4060
    }
4102
4061
    if (rm != 15) {
4103
 
        TCGv base;
 
4062
        TCGv_i32 base;
4104
4063
 
4105
4064
        base = load_reg(s, rn);
4106
4065
        if (rm == 13) {
4107
4066
            tcg_gen_addi_i32(base, base, stride);
4108
4067
        } else {
4109
 
            TCGv index;
 
4068
            TCGv_i32 index;
4110
4069
            index = load_reg(s, rm);
4111
4070
            tcg_gen_add_i32(base, base, index);
4112
4071
            tcg_temp_free_i32(index);
4117
4076
}
4118
4077
 
4119
4078
/* Bitwise select.  dest = c ? t : f.  Clobbers T and F.  */
4120
 
static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
 
4079
static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
4121
4080
{
4122
4081
    tcg_gen_and_i32(t, t, c);
4123
4082
    tcg_gen_andc_i32(f, f, c);
4124
4083
    tcg_gen_or_i32(dest, t, f);
4125
4084
}
4126
4085
 
4127
 
static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
 
4086
static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
4128
4087
{
4129
4088
    switch (size) {
4130
4089
    case 0: gen_helper_neon_narrow_u8(dest, src); break;
4134
4093
    }
4135
4094
}
4136
4095
 
4137
 
static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
 
4096
static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
4138
4097
{
4139
4098
    switch (size) {
4140
4099
    case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4144
4103
    }
4145
4104
}
4146
4105
 
4147
 
static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
 
4106
static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
4148
4107
{
4149
4108
    switch (size) {
4150
4109
    case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4154
4113
    }
4155
4114
}
4156
4115
 
4157
 
static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
 
4116
static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
4158
4117
{
4159
4118
    switch (size) {
4160
4119
    case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4164
4123
    }
4165
4124
}
4166
4125
 
4167
 
static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
 
4126
static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
4168
4127
                                         int q, int u)
4169
4128
{
4170
4129
    if (q) {
4198
4157
    }
4199
4158
}
4200
4159
 
4201
 
static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
 
4160
static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
4202
4161
{
4203
4162
    if (u) {
4204
4163
        switch (size) {
4259
4218
    }
4260
4219
}
4261
4220
 
4262
 
static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
 
4221
static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
 
4222
                                 int size, int u)
4263
4223
{
4264
4224
    TCGv_i64 tmp;
4265
4225
 
4289
4249
    }
4290
4250
}
4291
4251
 
4292
 
static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
 
4252
static void gen_neon_narrow_op(int op, int u, int size,
 
4253
                               TCGv_i32 dest, TCGv_i64 src)
4293
4254
{
4294
4255
    if (op) {
4295
4256
        if (u) {
4500
4461
    int pairwise;
4501
4462
    int u;
4502
4463
    uint32_t imm, mask;
4503
 
    TCGv tmp, tmp2, tmp3, tmp4, tmp5;
 
4464
    TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
4504
4465
    TCGv_i64 tmp64;
4505
4466
 
4506
4467
    if (!s->vfp_enabled)
5457
5418
                    tmp = neon_load_reg(rn, 1);
5458
5419
                    neon_store_scratch(2, tmp);
5459
5420
                }
5460
 
                TCGV_UNUSED(tmp3);
 
5421
                TCGV_UNUSED_I32(tmp3);
5461
5422
                for (pass = 0; pass < 2; pass++) {
5462
5423
                    if (src1_wide) {
5463
5424
                        neon_load_reg64(cpu_V0, rn + pass);
5464
 
                        TCGV_UNUSED(tmp);
 
5425
                        TCGV_UNUSED_I32(tmp);
5465
5426
                    } else {
5466
5427
                        if (pass == 1 && rd == rn) {
5467
5428
                            tmp = neon_load_scratch(2);
5474
5435
                    }
5475
5436
                    if (src2_wide) {
5476
5437
                        neon_load_reg64(cpu_V1, rm + pass);
5477
 
                        TCGV_UNUSED(tmp2);
 
5438
                        TCGV_UNUSED_I32(tmp2);
5478
5439
                    } else {
5479
5440
                        if (pass == 1 && rd == rm) {
5480
5441
                            tmp2 = neon_load_scratch(2);
5888
5849
                    if (rm & 1) {
5889
5850
                        return 1;
5890
5851
                    }
5891
 
                    TCGV_UNUSED(tmp2);
 
5852
                    TCGV_UNUSED_I32(tmp2);
5892
5853
                    for (pass = 0; pass < 2; pass++) {
5893
5854
                        neon_load_reg64(cpu_V0, rm + pass);
5894
5855
                        tmp = tcg_temp_new_i32();
5970
5931
                        if (neon_2rm_is_float_op(op)) {
5971
5932
                            tcg_gen_ld_f32(cpu_F0s, cpu_env,
5972
5933
                                           neon_reg_offset(rm, pass));
5973
 
                            TCGV_UNUSED(tmp);
 
5934
                            TCGV_UNUSED_I32(tmp);
5974
5935
                        } else {
5975
5936
                            tmp = neon_load_reg(rm, pass);
5976
5937
                        }
6043
6004
                            case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
6044
6005
                            default: abort();
6045
6006
                            }
6046
 
                            tcg_temp_free(tmp2);
 
6007
                            tcg_temp_free_i32(tmp2);
6047
6008
                            if (op == NEON_2RM_VCLE0) {
6048
6009
                                tcg_gen_not_i32(tmp, tmp);
6049
6010
                            }
6056
6017
                            case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
6057
6018
                            default: abort();
6058
6019
                            }
6059
 
                            tcg_temp_free(tmp2);
 
6020
                            tcg_temp_free_i32(tmp2);
6060
6021
                            if (op == NEON_2RM_VCLT0) {
6061
6022
                                tcg_gen_not_i32(tmp, tmp);
6062
6023
                            }
6069
6030
                            case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
6070
6031
                            default: abort();
6071
6032
                            }
6072
 
                            tcg_temp_free(tmp2);
 
6033
                            tcg_temp_free_i32(tmp2);
6073
6034
                            break;
6074
6035
                        case NEON_2RM_VABS:
6075
6036
                            switch(size) {
6082
6043
                        case NEON_2RM_VNEG:
6083
6044
                            tmp2 = tcg_const_i32(0);
6084
6045
                            gen_neon_rsb(size, tmp, tmp2);
6085
 
                            tcg_temp_free(tmp2);
 
6046
                            tcg_temp_free_i32(tmp2);
6086
6047
                            break;
6087
6048
                        case NEON_2RM_VCGT0_F:
6088
6049
                        {
6089
6050
                            TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6090
6051
                            tmp2 = tcg_const_i32(0);
6091
6052
                            gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6092
 
                            tcg_temp_free(tmp2);
 
6053
                            tcg_temp_free_i32(tmp2);
6093
6054
                            tcg_temp_free_ptr(fpstatus);
6094
6055
                            break;
6095
6056
                        }
6098
6059
                            TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6099
6060
                            tmp2 = tcg_const_i32(0);
6100
6061
                            gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6101
 
                            tcg_temp_free(tmp2);
 
6062
                            tcg_temp_free_i32(tmp2);
6102
6063
                            tcg_temp_free_ptr(fpstatus);
6103
6064
                            break;
6104
6065
                        }
6107
6068
                            TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6108
6069
                            tmp2 = tcg_const_i32(0);
6109
6070
                            gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6110
 
                            tcg_temp_free(tmp2);
 
6071
                            tcg_temp_free_i32(tmp2);
6111
6072
                            tcg_temp_free_ptr(fpstatus);
6112
6073
                            break;
6113
6074
                        }
6116
6077
                            TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6117
6078
                            tmp2 = tcg_const_i32(0);
6118
6079
                            gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
6119
 
                            tcg_temp_free(tmp2);
 
6080
                            tcg_temp_free_i32(tmp2);
6120
6081
                            tcg_temp_free_ptr(fpstatus);
6121
6082
                            break;
6122
6083
                        }
6125
6086
                            TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6126
6087
                            tmp2 = tcg_const_i32(0);
6127
6088
                            gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
6128
 
                            tcg_temp_free(tmp2);
 
6089
                            tcg_temp_free_i32(tmp2);
6129
6090
                            tcg_temp_free_ptr(fpstatus);
6130
6091
                            break;
6131
6092
                        }
6326
6287
            break;
6327
6288
        }
6328
6289
 
 
6290
        if (use_icount && (ri->type & ARM_CP_IO)) {
 
6291
            gen_io_start();
 
6292
        }
 
6293
 
6329
6294
        if (isread) {
6330
6295
            /* Read */
6331
6296
            if (is64) {
6353
6318
                tcg_temp_free_i64(tmp64);
6354
6319
                store_reg(s, rt2, tmp);
6355
6320
            } else {
6356
 
                TCGv tmp;
 
6321
                TCGv_i32 tmp;
6357
6322
                if (ri->type & ARM_CP_CONST) {
6358
6323
                    tmp = tcg_const_i32(ri->resetvalue);
6359
6324
                } else if (ri->readfn) {
6384
6349
            }
6385
6350
 
6386
6351
            if (is64) {
6387
 
                TCGv tmplo, tmphi;
 
6352
                TCGv_i32 tmplo, tmphi;
6388
6353
                TCGv_i64 tmp64 = tcg_temp_new_i64();
6389
6354
                tmplo = load_reg(s, rt);
6390
6355
                tmphi = load_reg(s, rt2);
6402
6367
                tcg_temp_free_i64(tmp64);
6403
6368
            } else {
6404
6369
                if (ri->writefn) {
6405
 
                    TCGv tmp;
 
6370
                    TCGv_i32 tmp;
6406
6371
                    TCGv_ptr tmpptr;
6407
6372
                    gen_set_pc_im(s->pc);
6408
6373
                    tmp = load_reg(s, rt);
6411
6376
                    tcg_temp_free_ptr(tmpptr);
6412
6377
                    tcg_temp_free_i32(tmp);
6413
6378
                } else {
6414
 
                    TCGv tmp = load_reg(s, rt);
 
6379
                    TCGv_i32 tmp = load_reg(s, rt);
6415
6380
                    store_cpu_offset(tmp, ri->fieldoffset);
6416
6381
                }
6417
6382
            }
 
6383
        }
 
6384
 
 
6385
        if (use_icount && (ri->type & ARM_CP_IO)) {
 
6386
            /* I/O operations must end the TB here (whether read or write) */
 
6387
            gen_io_end();
 
6388
            gen_lookup_tb(s);
 
6389
        } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
6418
6390
            /* We default to ending the TB on a coprocessor register write,
6419
6391
             * but allow this to be suppressed by the register definition
6420
6392
             * (usually only necessary to work around guest bugs).
6421
6393
             */
6422
 
            if (!(ri->type & ARM_CP_SUPPRESS_TB_END)) {
6423
 
                gen_lookup_tb(s);
6424
 
            }
 
6394
            gen_lookup_tb(s);
6425
6395
        }
 
6396
 
6426
6397
        return 0;
6427
6398
    }
6428
6399
 
6433
6404
/* Store a 64-bit value to a register pair.  Clobbers val.  */
6434
6405
static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
6435
6406
{
6436
 
    TCGv tmp;
 
6407
    TCGv_i32 tmp;
6437
6408
    tmp = tcg_temp_new_i32();
6438
6409
    tcg_gen_trunc_i64_i32(tmp, val);
6439
6410
    store_reg(s, rlow, tmp);
6447
6418
static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
6448
6419
{
6449
6420
    TCGv_i64 tmp;
6450
 
    TCGv tmp2;
 
6421
    TCGv_i32 tmp2;
6451
6422
 
6452
6423
    /* Load value and extend to 64 bits.  */
6453
6424
    tmp = tcg_temp_new_i64();
6462
6433
static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
6463
6434
{
6464
6435
    TCGv_i64 tmp;
6465
 
    TCGv tmpl;
6466
 
    TCGv tmph;
 
6436
    TCGv_i32 tmpl;
 
6437
    TCGv_i32 tmph;
6467
6438
 
6468
6439
    /* Load 64-bit value rd:rn.  */
6469
6440
    tmpl = load_reg(s, rlow);
6477
6448
}
6478
6449
 
6479
6450
/* Set N and Z flags from hi|lo.  */
6480
 
static void gen_logicq_cc(TCGv lo, TCGv hi)
 
6451
static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
6481
6452
{
6482
6453
    tcg_gen_mov_i32(cpu_NF, hi);
6483
6454
    tcg_gen_or_i32(cpu_ZF, lo, hi);
6493
6464
   this sequence is effectively atomic.  In user emulation mode we
6494
6465
   throw an exception and handle the atomic operation elsewhere.  */
6495
6466
static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6496
 
                               TCGv addr, int size)
 
6467
                               TCGv_i32 addr, int size)
6497
6468
{
6498
 
    TCGv tmp;
 
6469
    TCGv_i32 tmp = tcg_temp_new_i32();
6499
6470
 
6500
6471
    switch (size) {
6501
6472
    case 0:
6502
 
        tmp = gen_ld8u(addr, IS_USER(s));
 
6473
        tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
6503
6474
        break;
6504
6475
    case 1:
6505
 
        tmp = gen_ld16u(addr, IS_USER(s));
 
6476
        tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
6506
6477
        break;
6507
6478
    case 2:
6508
6479
    case 3:
6509
 
        tmp = gen_ld32(addr, IS_USER(s));
 
6480
        tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
6510
6481
        break;
6511
6482
    default:
6512
6483
        abort();
6514
6485
    tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6515
6486
    store_reg(s, rt, tmp);
6516
6487
    if (size == 3) {
6517
 
        TCGv tmp2 = tcg_temp_new_i32();
 
6488
        TCGv_i32 tmp2 = tcg_temp_new_i32();
6518
6489
        tcg_gen_addi_i32(tmp2, addr, 4);
6519
 
        tmp = gen_ld32(tmp2, IS_USER(s));
 
6490
        tmp = tcg_temp_new_i32();
 
6491
        tcg_gen_qemu_ld32u(tmp, tmp2, IS_USER(s));
6520
6492
        tcg_temp_free_i32(tmp2);
6521
6493
        tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6522
6494
        store_reg(s, rt2, tmp);
6531
6503
 
6532
6504
#ifdef CONFIG_USER_ONLY
6533
6505
static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6534
 
                                TCGv addr, int size)
 
6506
                                TCGv_i32 addr, int size)
6535
6507
{
6536
6508
    tcg_gen_mov_i32(cpu_exclusive_test, addr);
6537
6509
    tcg_gen_movi_i32(cpu_exclusive_info,
6540
6512
}
6541
6513
#else
6542
6514
static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6543
 
                                TCGv addr, int size)
 
6515
                                TCGv_i32 addr, int size)
6544
6516
{
6545
 
    TCGv tmp;
 
6517
    TCGv_i32 tmp;
6546
6518
    int done_label;
6547
6519
    int fail_label;
6548
6520
 
6555
6527
    fail_label = gen_new_label();
6556
6528
    done_label = gen_new_label();
6557
6529
    tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
 
6530
    tmp = tcg_temp_new_i32();
6558
6531
    switch (size) {
6559
6532
    case 0:
6560
 
        tmp = gen_ld8u(addr, IS_USER(s));
 
6533
        tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
6561
6534
        break;
6562
6535
    case 1:
6563
 
        tmp = gen_ld16u(addr, IS_USER(s));
 
6536
        tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
6564
6537
        break;
6565
6538
    case 2:
6566
6539
    case 3:
6567
 
        tmp = gen_ld32(addr, IS_USER(s));
 
6540
        tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
6568
6541
        break;
6569
6542
    default:
6570
6543
        abort();
6572
6545
    tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6573
6546
    tcg_temp_free_i32(tmp);
6574
6547
    if (size == 3) {
6575
 
        TCGv tmp2 = tcg_temp_new_i32();
 
6548
        TCGv_i32 tmp2 = tcg_temp_new_i32();
6576
6549
        tcg_gen_addi_i32(tmp2, addr, 4);
6577
 
        tmp = gen_ld32(tmp2, IS_USER(s));
 
6550
        tmp = tcg_temp_new_i32();
 
6551
        tcg_gen_qemu_ld32u(tmp, tmp2, IS_USER(s));
6578
6552
        tcg_temp_free_i32(tmp2);
6579
6553
        tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6580
6554
        tcg_temp_free_i32(tmp);
6582
6556
    tmp = load_reg(s, rt);
6583
6557
    switch (size) {
6584
6558
    case 0:
6585
 
        gen_st8(tmp, addr, IS_USER(s));
 
6559
        tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
6586
6560
        break;
6587
6561
    case 1:
6588
 
        gen_st16(tmp, addr, IS_USER(s));
 
6562
        tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
6589
6563
        break;
6590
6564
    case 2:
6591
6565
    case 3:
6592
 
        gen_st32(tmp, addr, IS_USER(s));
 
6566
        tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
6593
6567
        break;
6594
6568
    default:
6595
6569
        abort();
6596
6570
    }
 
6571
    tcg_temp_free_i32(tmp);
6597
6572
    if (size == 3) {
6598
6573
        tcg_gen_addi_i32(addr, addr, 4);
6599
6574
        tmp = load_reg(s, rt2);
6600
 
        gen_st32(tmp, addr, IS_USER(s));
 
6575
        tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
 
6576
        tcg_temp_free_i32(tmp);
6601
6577
    }
6602
6578
    tcg_gen_movi_i32(cpu_R[rd], 0);
6603
6579
    tcg_gen_br(done_label);
6643
6619
    }
6644
6620
    tcg_gen_addi_i32(addr, addr, offset);
6645
6621
    tmp = load_reg(s, 14);
6646
 
    gen_st32(tmp, addr, 0);
 
6622
    tcg_gen_qemu_st32(tmp, addr, 0);
 
6623
    tcg_temp_free_i32(tmp);
6647
6624
    tmp = load_cpu_field(spsr);
6648
6625
    tcg_gen_addi_i32(addr, addr, 4);
6649
 
    gen_st32(tmp, addr, 0);
 
6626
    tcg_gen_qemu_st32(tmp, addr, 0);
 
6627
    tcg_temp_free_i32(tmp);
6650
6628
    if (writeback) {
6651
6629
        switch (amode) {
6652
6630
        case 0:
6675
6653
static void disas_arm_insn(CPUARMState * env, DisasContext *s)
6676
6654
{
6677
6655
    unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6678
 
    TCGv tmp;
6679
 
    TCGv tmp2;
6680
 
    TCGv tmp3;
6681
 
    TCGv addr;
 
6656
    TCGv_i32 tmp;
 
6657
    TCGv_i32 tmp2;
 
6658
    TCGv_i32 tmp3;
 
6659
    TCGv_i32 addr;
6682
6660
    TCGv_i64 tmp64;
6683
6661
 
6684
6662
    insn = arm_ldl_code(env, s->pc, s->bswap_code);
6789
6767
            if (offset)
6790
6768
                tcg_gen_addi_i32(addr, addr, offset);
6791
6769
            /* Load PC into tmp and CPSR into tmp2.  */
6792
 
            tmp = gen_ld32(addr, 0);
 
6770
            tmp = tcg_temp_new_i32();
 
6771
            tcg_gen_qemu_ld32u(tmp, addr, 0);
6793
6772
            tcg_gen_addi_i32(addr, addr, 4);
6794
 
            tmp2 = gen_ld32(addr, 0);
 
6773
            tmp2 = tcg_temp_new_i32();
 
6774
            tcg_gen_qemu_ld32u(tmp2, addr, 0);
6795
6775
            if (insn & (1 << 21)) {
6796
6776
                /* Base writeback.  */
6797
6777
                switch (i) {
7100
7080
            rn = (insn >> 16) & 0xf;
7101
7081
            tmp = load_reg(s, rn);
7102
7082
        } else {
7103
 
            TCGV_UNUSED(tmp);
 
7083
            TCGV_UNUSED_I32(tmp);
7104
7084
        }
7105
7085
        rd = (insn >> 12) & 0xf;
7106
7086
        switch(op1) {
7301
7281
                            tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
7302
7282
                        }
7303
7283
                        if (insn & (1 << 21)) { /* mult accumulate */
7304
 
                            TCGv al = load_reg(s, rn);
7305
 
                            TCGv ah = load_reg(s, rd);
 
7284
                            TCGv_i32 al = load_reg(s, rn);
 
7285
                            TCGv_i32 ah = load_reg(s, rd);
7306
7286
                            tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
7307
 
                            tcg_temp_free(al);
7308
 
                            tcg_temp_free(ah);
 
7287
                            tcg_temp_free_i32(al);
 
7288
                            tcg_temp_free_i32(ah);
7309
7289
                        }
7310
7290
                        if (insn & (1 << 20)) {
7311
7291
                            gen_logicq_cc(tmp, tmp2);
7321
7301
                    rd = (insn >> 12) & 0xf;
7322
7302
                    if (insn & (1 << 23)) {
7323
7303
                        /* load/store exclusive */
 
7304
                        int op2 = (insn >> 8) & 3;
7324
7305
                        op1 = (insn >> 21) & 0x3;
7325
 
                        if (op1)
7326
 
                            ARCH(6K);
7327
 
                        else
7328
 
                            ARCH(6);
 
7306
 
 
7307
                        switch (op2) {
 
7308
                        case 0: /* lda/stl */
 
7309
                            if (op1 == 1) {
 
7310
                                goto illegal_op;
 
7311
                            }
 
7312
                            ARCH(8);
 
7313
                            break;
 
7314
                        case 1: /* reserved */
 
7315
                            goto illegal_op;
 
7316
                        case 2: /* ldaex/stlex */
 
7317
                            ARCH(8);
 
7318
                            break;
 
7319
                        case 3: /* ldrex/strex */
 
7320
                            if (op1) {
 
7321
                                ARCH(6K);
 
7322
                            } else {
 
7323
                                ARCH(6);
 
7324
                            }
 
7325
                            break;
 
7326
                        }
 
7327
 
7329
7328
                        addr = tcg_temp_local_new_i32();
7330
7329
                        load_reg_var(s, addr, rn);
7331
 
                        if (insn & (1 << 20)) {
 
7330
 
 
7331
                        /* Since the emulation does not have barriers,
 
7332
                           the acquire/release semantics need no special
 
7333
                           handling */
 
7334
                        if (op2 == 0) {
 
7335
                            if (insn & (1 << 20)) {
 
7336
                                tmp = tcg_temp_new_i32();
 
7337
                                switch (op1) {
 
7338
                                case 0: /* lda */
 
7339
                                    tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
 
7340
                                    break;
 
7341
                                case 2: /* ldab */
 
7342
                                    tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
 
7343
                                    break;
 
7344
                                case 3: /* ldah */
 
7345
                                    tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
 
7346
                                    break;
 
7347
                                default:
 
7348
                                    abort();
 
7349
                                }
 
7350
                                store_reg(s, rd, tmp);
 
7351
                            } else {
 
7352
                                rm = insn & 0xf;
 
7353
                                tmp = load_reg(s, rm);
 
7354
                                switch (op1) {
 
7355
                                case 0: /* stl */
 
7356
                                    tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
 
7357
                                    break;
 
7358
                                case 2: /* stlb */
 
7359
                                    tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
 
7360
                                    break;
 
7361
                                case 3: /* stlh */
 
7362
                                    tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
 
7363
                                    break;
 
7364
                                default:
 
7365
                                    abort();
 
7366
                                }
 
7367
                                tcg_temp_free_i32(tmp);
 
7368
                            }
 
7369
                        } else if (insn & (1 << 20)) {
7332
7370
                            switch (op1) {
7333
7371
                            case 0: /* ldrex */
7334
7372
                                gen_load_exclusive(s, rd, 15, addr, 2);
7364
7402
                                abort();
7365
7403
                            }
7366
7404
                        }
7367
 
                        tcg_temp_free(addr);
 
7405
                        tcg_temp_free_i32(addr);
7368
7406
                    } else {
7369
7407
                        /* SWP instruction */
7370
7408
                        rm = (insn) & 0xf;
7374
7412
                           so it is good enough.  */
7375
7413
                        addr = load_reg(s, rn);
7376
7414
                        tmp = load_reg(s, rm);
 
7415
                        tmp2 = tcg_temp_new_i32();
7377
7416
                        if (insn & (1 << 22)) {
7378
 
                            tmp2 = gen_ld8u(addr, IS_USER(s));
7379
 
                            gen_st8(tmp, addr, IS_USER(s));
 
7417
                            tcg_gen_qemu_ld8u(tmp2, addr, IS_USER(s));
 
7418
                            tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
7380
7419
                        } else {
7381
 
                            tmp2 = gen_ld32(addr, IS_USER(s));
7382
 
                            gen_st32(tmp, addr, IS_USER(s));
 
7420
                            tcg_gen_qemu_ld32u(tmp2, addr, IS_USER(s));
 
7421
                            tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
7383
7422
                        }
 
7423
                        tcg_temp_free_i32(tmp);
7384
7424
                        tcg_temp_free_i32(addr);
7385
7425
                        store_reg(s, rd, tmp2);
7386
7426
                    }
7397
7437
                address_offset = 0;
7398
7438
                if (insn & (1 << 20)) {
7399
7439
                    /* load */
 
7440
                    tmp = tcg_temp_new_i32();
7400
7441
                    switch(sh) {
7401
7442
                    case 1:
7402
 
                        tmp = gen_ld16u(addr, IS_USER(s));
 
7443
                        tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
7403
7444
                        break;
7404
7445
                    case 2:
7405
 
                        tmp = gen_ld8s(addr, IS_USER(s));
 
7446
                        tcg_gen_qemu_ld8s(tmp, addr, IS_USER(s));
7406
7447
                        break;
7407
7448
                    default:
7408
7449
                    case 3:
7409
 
                        tmp = gen_ld16s(addr, IS_USER(s));
 
7450
                        tcg_gen_qemu_ld16s(tmp, addr, IS_USER(s));
7410
7451
                        break;
7411
7452
                    }
7412
7453
                    load = 1;
7416
7457
                    if (sh & 1) {
7417
7458
                        /* store */
7418
7459
                        tmp = load_reg(s, rd);
7419
 
                        gen_st32(tmp, addr, IS_USER(s));
 
7460
                        tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
 
7461
                        tcg_temp_free_i32(tmp);
7420
7462
                        tcg_gen_addi_i32(addr, addr, 4);
7421
7463
                        tmp = load_reg(s, rd + 1);
7422
 
                        gen_st32(tmp, addr, IS_USER(s));
 
7464
                        tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
 
7465
                        tcg_temp_free_i32(tmp);
7423
7466
                        load = 0;
7424
7467
                    } else {
7425
7468
                        /* load */
7426
 
                        tmp = gen_ld32(addr, IS_USER(s));
 
7469
                        tmp = tcg_temp_new_i32();
 
7470
                        tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
7427
7471
                        store_reg(s, rd, tmp);
7428
7472
                        tcg_gen_addi_i32(addr, addr, 4);
7429
 
                        tmp = gen_ld32(addr, IS_USER(s));
 
7473
                        tmp = tcg_temp_new_i32();
 
7474
                        tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
7430
7475
                        rd++;
7431
7476
                        load = 1;
7432
7477
                    }
7434
7479
                } else {
7435
7480
                    /* store */
7436
7481
                    tmp = load_reg(s, rd);
7437
 
                    gen_st16(tmp, addr, IS_USER(s));
 
7482
                    tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
 
7483
                    tcg_temp_free_i32(tmp);
7438
7484
                    load = 0;
7439
7485
                }
7440
7486
                /* Perform base writeback before the loaded value to
7764
7810
                gen_add_data_offset(s, insn, tmp2);
7765
7811
            if (insn & (1 << 20)) {
7766
7812
                /* load */
 
7813
                tmp = tcg_temp_new_i32();
7767
7814
                if (insn & (1 << 22)) {
7768
 
                    tmp = gen_ld8u(tmp2, i);
 
7815
                    tcg_gen_qemu_ld8u(tmp, tmp2, i);
7769
7816
                } else {
7770
 
                    tmp = gen_ld32(tmp2, i);
 
7817
                    tcg_gen_qemu_ld32u(tmp, tmp2, i);
7771
7818
                }
7772
7819
            } else {
7773
7820
                /* store */
7774
7821
                tmp = load_reg(s, rd);
7775
 
                if (insn & (1 << 22))
7776
 
                    gen_st8(tmp, tmp2, i);
7777
 
                else
7778
 
                    gen_st32(tmp, tmp2, i);
 
7822
                if (insn & (1 << 22)) {
 
7823
                    tcg_gen_qemu_st8(tmp, tmp2, i);
 
7824
                } else {
 
7825
                    tcg_gen_qemu_st32(tmp, tmp2, i);
 
7826
                }
 
7827
                tcg_temp_free_i32(tmp);
7779
7828
            }
7780
7829
            if (!(insn & (1 << 24))) {
7781
7830
                gen_add_data_offset(s, insn, tmp2);
7794
7843
        case 0x09:
7795
7844
            {
7796
7845
                int j, n, user, loaded_base;
7797
 
                TCGv loaded_var;
 
7846
                TCGv_i32 loaded_var;
7798
7847
                /* load/store multiple words */
7799
7848
                /* XXX: store correct base if write back */
7800
7849
                user = 0;
7810
7859
 
7811
7860
                /* compute total size */
7812
7861
                loaded_base = 0;
7813
 
                TCGV_UNUSED(loaded_var);
 
7862
                TCGV_UNUSED_I32(loaded_var);
7814
7863
                n = 0;
7815
7864
                for(i=0;i<16;i++) {
7816
7865
                    if (insn & (1 << i))
7839
7888
                    if (insn & (1 << i)) {
7840
7889
                        if (insn & (1 << 20)) {
7841
7890
                            /* load */
7842
 
                            tmp = gen_ld32(addr, IS_USER(s));
 
7891
                            tmp = tcg_temp_new_i32();
 
7892
                            tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
7843
7893
                            if (user) {
7844
7894
                                tmp2 = tcg_const_i32(i);
7845
7895
                                gen_helper_set_user_reg(cpu_env, tmp2, tmp);
7866
7916
                            } else {
7867
7917
                                tmp = load_reg(s, i);
7868
7918
                            }
7869
 
                            gen_st32(tmp, addr, IS_USER(s));
 
7919
                            tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
 
7920
                            tcg_temp_free_i32(tmp);
7870
7921
                        }
7871
7922
                        j++;
7872
7923
                        /* no need to add after the last transfer */
7960
8011
   Returns zero if the opcode is valid.  */
7961
8012
 
7962
8013
static int
7963
 
gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
 
8014
gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
 
8015
                   TCGv_i32 t0, TCGv_i32 t1)
7964
8016
{
7965
8017
    int logic_cc;
7966
8018
 
8034
8086
{
8035
8087
    uint32_t insn, imm, shift, offset;
8036
8088
    uint32_t rd, rn, rm, rs;
8037
 
    TCGv tmp;
8038
 
    TCGv tmp2;
8039
 
    TCGv tmp3;
8040
 
    TCGv addr;
 
8089
    TCGv_i32 tmp;
 
8090
    TCGv_i32 tmp2;
 
8091
    TCGv_i32 tmp3;
 
8092
    TCGv_i32 addr;
8041
8093
    TCGv_i64 tmp64;
8042
8094
    int op;
8043
8095
    int shiftop;
8122
8174
                }
8123
8175
                if (insn & (1 << 20)) {
8124
8176
                    /* ldrd */
8125
 
                    tmp = gen_ld32(addr, IS_USER(s));
 
8177
                    tmp = tcg_temp_new_i32();
 
8178
                    tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
8126
8179
                    store_reg(s, rs, tmp);
8127
8180
                    tcg_gen_addi_i32(addr, addr, 4);
8128
 
                    tmp = gen_ld32(addr, IS_USER(s));
 
8181
                    tmp = tcg_temp_new_i32();
 
8182
                    tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
8129
8183
                    store_reg(s, rd, tmp);
8130
8184
                } else {
8131
8185
                    /* strd */
8132
8186
                    tmp = load_reg(s, rs);
8133
 
                    gen_st32(tmp, addr, IS_USER(s));
 
8187
                    tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
 
8188
                    tcg_temp_free_i32(tmp);
8134
8189
                    tcg_gen_addi_i32(addr, addr, 4);
8135
8190
                    tmp = load_reg(s, rd);
8136
 
                    gen_st32(tmp, addr, IS_USER(s));
 
8191
                    tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
 
8192
                    tcg_temp_free_i32(tmp);
8137
8193
                }
8138
8194
                if (insn & (1 << 21)) {
8139
8195
                    /* Base writeback.  */
8146
8202
                }
8147
8203
            } else if ((insn & (1 << 23)) == 0) {
8148
8204
                /* Load/store exclusive word.  */
8149
 
                addr = tcg_temp_local_new();
 
8205
                addr = tcg_temp_local_new_i32();
8150
8206
                load_reg_var(s, addr, rn);
8151
8207
                tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
8152
8208
                if (insn & (1 << 20)) {
8154
8210
                } else {
8155
8211
                    gen_store_exclusive(s, rd, rs, 15, addr, 2);
8156
8212
                }
8157
 
                tcg_temp_free(addr);
8158
 
            } else if ((insn & (1 << 6)) == 0) {
 
8213
                tcg_temp_free_i32(addr);
 
8214
            } else if ((insn & (7 << 5)) == 0) {
8159
8215
                /* Table Branch.  */
8160
8216
                if (rn == 15) {
8161
8217
                    addr = tcg_temp_new_i32();
8169
8225
                    /* tbh */
8170
8226
                    tcg_gen_add_i32(addr, addr, tmp);
8171
8227
                    tcg_temp_free_i32(tmp);
8172
 
                    tmp = gen_ld16u(addr, IS_USER(s));
 
8228
                    tmp = tcg_temp_new_i32();
 
8229
                    tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
8173
8230
                } else { /* tbb */
8174
8231
                    tcg_temp_free_i32(tmp);
8175
 
                    tmp = gen_ld8u(addr, IS_USER(s));
 
8232
                    tmp = tcg_temp_new_i32();
 
8233
                    tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
8176
8234
                }
8177
8235
                tcg_temp_free_i32(addr);
8178
8236
                tcg_gen_shli_i32(tmp, tmp, 1);
8179
8237
                tcg_gen_addi_i32(tmp, tmp, s->pc);
8180
8238
                store_reg(s, 15, tmp);
8181
8239
            } else {
8182
 
                /* Load/store exclusive byte/halfword/doubleword.  */
8183
 
                ARCH(7);
 
8240
                int op2 = (insn >> 6) & 0x3;
8184
8241
                op = (insn >> 4) & 0x3;
8185
 
                if (op == 2) {
 
8242
                switch (op2) {
 
8243
                case 0:
8186
8244
                    goto illegal_op;
 
8245
                case 1:
 
8246
                    /* Load/store exclusive byte/halfword/doubleword */
 
8247
                    if (op == 2) {
 
8248
                        goto illegal_op;
 
8249
                    }
 
8250
                    ARCH(7);
 
8251
                    break;
 
8252
                case 2:
 
8253
                    /* Load-acquire/store-release */
 
8254
                    if (op == 3) {
 
8255
                        goto illegal_op;
 
8256
                    }
 
8257
                    /* Fall through */
 
8258
                case 3:
 
8259
                    /* Load-acquire/store-release exclusive */
 
8260
                    ARCH(8);
 
8261
                    break;
8187
8262
                }
8188
 
                addr = tcg_temp_local_new();
 
8263
                addr = tcg_temp_local_new_i32();
8189
8264
                load_reg_var(s, addr, rn);
8190
 
                if (insn & (1 << 20)) {
 
8265
                if (!(op2 & 1)) {
 
8266
                    if (insn & (1 << 20)) {
 
8267
                        tmp = tcg_temp_new_i32();
 
8268
                        switch (op) {
 
8269
                        case 0: /* ldab */
 
8270
                            tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
 
8271
                            break;
 
8272
                        case 1: /* ldah */
 
8273
                            tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
 
8274
                            break;
 
8275
                        case 2: /* lda */
 
8276
                            tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
 
8277
                            break;
 
8278
                        default:
 
8279
                            abort();
 
8280
                        }
 
8281
                        store_reg(s, rs, tmp);
 
8282
                    } else {
 
8283
                        tmp = load_reg(s, rs);
 
8284
                        switch (op) {
 
8285
                        case 0: /* stlb */
 
8286
                            tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
 
8287
                            break;
 
8288
                        case 1: /* stlh */
 
8289
                            tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
 
8290
                            break;
 
8291
                        case 2: /* stl */
 
8292
                            tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
 
8293
                            break;
 
8294
                        default:
 
8295
                            abort();
 
8296
                        }
 
8297
                        tcg_temp_free_i32(tmp);
 
8298
                    }
 
8299
                } else if (insn & (1 << 20)) {
8191
8300
                    gen_load_exclusive(s, rs, rd, addr, op);
8192
8301
                } else {
8193
8302
                    gen_store_exclusive(s, rm, rs, rd, addr, op);
8194
8303
                }
8195
 
                tcg_temp_free(addr);
 
8304
                tcg_temp_free_i32(addr);
8196
8305
            }
8197
8306
        } else {
8198
8307
            /* Load/store multiple, RFE, SRS.  */
8207
8316
                    if ((insn & (1 << 24)) == 0)
8208
8317
                        tcg_gen_addi_i32(addr, addr, -8);
8209
8318
                    /* Load PC into tmp and CPSR into tmp2.  */
8210
 
                    tmp = gen_ld32(addr, 0);
 
8319
                    tmp = tcg_temp_new_i32();
 
8320
                    tcg_gen_qemu_ld32u(tmp, addr, 0);
8211
8321
                    tcg_gen_addi_i32(addr, addr, 4);
8212
 
                    tmp2 = gen_ld32(addr, 0);
 
8322
                    tmp2 = tcg_temp_new_i32();
 
8323
                    tcg_gen_qemu_ld32u(tmp2, addr, 0);
8213
8324
                    if (insn & (1 << 21)) {
8214
8325
                        /* Base writeback.  */
8215
8326
                        if (insn & (1 << 24)) {
8229
8340
                }
8230
8341
            } else {
8231
8342
                int i, loaded_base = 0;
8232
 
                TCGv loaded_var;
 
8343
                TCGv_i32 loaded_var;
8233
8344
                /* Load/store multiple.  */
8234
8345
                addr = load_reg(s, rn);
8235
8346
                offset = 0;
8241
8352
                    tcg_gen_addi_i32(addr, addr, -offset);
8242
8353
                }
8243
8354
 
8244
 
                TCGV_UNUSED(loaded_var);
 
8355
                TCGV_UNUSED_I32(loaded_var);
8245
8356
                for (i = 0; i < 16; i++) {
8246
8357
                    if ((insn & (1 << i)) == 0)
8247
8358
                        continue;
8248
8359
                    if (insn & (1 << 20)) {
8249
8360
                        /* Load.  */
8250
 
                        tmp = gen_ld32(addr, IS_USER(s));
 
8361
                        tmp = tcg_temp_new_i32();
 
8362
                        tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
8251
8363
                        if (i == 15) {
8252
8364
                            gen_bx(s, tmp);
8253
8365
                        } else if (i == rn) {
8259
8371
                    } else {
8260
8372
                        /* Store.  */
8261
8373
                        tmp = load_reg(s, i);
8262
 
                        gen_st32(tmp, addr, IS_USER(s));
 
8374
                        tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
 
8375
                        tcg_temp_free_i32(tmp);
8263
8376
                    }
8264
8377
                    tcg_gen_addi_i32(addr, addr, 4);
8265
8378
                }
9035
9148
        }
9036
9149
        if (insn & (1 << 20)) {
9037
9150
            /* Load.  */
 
9151
            tmp = tcg_temp_new_i32();
9038
9152
            switch (op) {
9039
 
            case 0: tmp = gen_ld8u(addr, user); break;
9040
 
            case 4: tmp = gen_ld8s(addr, user); break;
9041
 
            case 1: tmp = gen_ld16u(addr, user); break;
9042
 
            case 5: tmp = gen_ld16s(addr, user); break;
9043
 
            case 2: tmp = gen_ld32(addr, user); break;
 
9153
            case 0:
 
9154
                tcg_gen_qemu_ld8u(tmp, addr, user);
 
9155
                break;
 
9156
            case 4:
 
9157
                tcg_gen_qemu_ld8s(tmp, addr, user);
 
9158
                break;
 
9159
            case 1:
 
9160
                tcg_gen_qemu_ld16u(tmp, addr, user);
 
9161
                break;
 
9162
            case 5:
 
9163
                tcg_gen_qemu_ld16s(tmp, addr, user);
 
9164
                break;
 
9165
            case 2:
 
9166
                tcg_gen_qemu_ld32u(tmp, addr, user);
 
9167
                break;
9044
9168
            default:
 
9169
                tcg_temp_free_i32(tmp);
9045
9170
                tcg_temp_free_i32(addr);
9046
9171
                goto illegal_op;
9047
9172
            }
9054
9179
            /* Store.  */
9055
9180
            tmp = load_reg(s, rs);
9056
9181
            switch (op) {
9057
 
            case 0: gen_st8(tmp, addr, user); break;
9058
 
            case 1: gen_st16(tmp, addr, user); break;
9059
 
            case 2: gen_st32(tmp, addr, user); break;
 
9182
            case 0:
 
9183
                tcg_gen_qemu_st8(tmp, addr, user);
 
9184
                break;
 
9185
            case 1:
 
9186
                tcg_gen_qemu_st16(tmp, addr, user);
 
9187
                break;
 
9188
            case 2:
 
9189
                tcg_gen_qemu_st32(tmp, addr, user);
 
9190
                break;
9060
9191
            default:
 
9192
                tcg_temp_free_i32(tmp);
9061
9193
                tcg_temp_free_i32(addr);
9062
9194
                goto illegal_op;
9063
9195
            }
 
9196
            tcg_temp_free_i32(tmp);
9064
9197
        }
9065
9198
        if (postinc)
9066
9199
            tcg_gen_addi_i32(addr, addr, imm);
9084
9217
    uint32_t val, insn, op, rm, rn, rd, shift, cond;
9085
9218
    int32_t offset;
9086
9219
    int i;
9087
 
    TCGv tmp;
9088
 
    TCGv tmp2;
9089
 
    TCGv addr;
 
9220
    TCGv_i32 tmp;
 
9221
    TCGv_i32 tmp2;
 
9222
    TCGv_i32 addr;
9090
9223
 
9091
9224
    if (s->condexec_mask) {
9092
9225
        cond = s->condexec_cond;
9189
9322
            val &= ~(uint32_t)2;
9190
9323
            addr = tcg_temp_new_i32();
9191
9324
            tcg_gen_movi_i32(addr, val);
9192
 
            tmp = gen_ld32(addr, IS_USER(s));
 
9325
            tmp = tcg_temp_new_i32();
 
9326
            tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
9193
9327
            tcg_temp_free_i32(addr);
9194
9328
            store_reg(s, rd, tmp);
9195
9329
            break;
9254
9388
        } else if (op != 0xf) { /* mvn doesn't read its first operand */
9255
9389
            tmp = load_reg(s, rd);
9256
9390
        } else {
9257
 
            TCGV_UNUSED(tmp);
 
9391
            TCGV_UNUSED_I32(tmp);
9258
9392
        }
9259
9393
 
9260
9394
        tmp2 = load_reg(s, rm);
9384
9518
        tcg_gen_add_i32(addr, addr, tmp);
9385
9519
        tcg_temp_free_i32(tmp);
9386
9520
 
9387
 
        if (op < 3) /* store */
 
9521
        if (op < 3) { /* store */
9388
9522
            tmp = load_reg(s, rd);
 
9523
        } else {
 
9524
            tmp = tcg_temp_new_i32();
 
9525
        }
9389
9526
 
9390
9527
        switch (op) {
9391
9528
        case 0: /* str */
9392
 
            gen_st32(tmp, addr, IS_USER(s));
 
9529
            tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9393
9530
            break;
9394
9531
        case 1: /* strh */
9395
 
            gen_st16(tmp, addr, IS_USER(s));
 
9532
            tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
9396
9533
            break;
9397
9534
        case 2: /* strb */
9398
 
            gen_st8(tmp, addr, IS_USER(s));
 
9535
            tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
9399
9536
            break;
9400
9537
        case 3: /* ldrsb */
9401
 
            tmp = gen_ld8s(addr, IS_USER(s));
 
9538
            tcg_gen_qemu_ld8s(tmp, addr, IS_USER(s));
9402
9539
            break;
9403
9540
        case 4: /* ldr */
9404
 
            tmp = gen_ld32(addr, IS_USER(s));
 
9541
            tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
9405
9542
            break;
9406
9543
        case 5: /* ldrh */
9407
 
            tmp = gen_ld16u(addr, IS_USER(s));
 
9544
            tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
9408
9545
            break;
9409
9546
        case 6: /* ldrb */
9410
 
            tmp = gen_ld8u(addr, IS_USER(s));
 
9547
            tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
9411
9548
            break;
9412
9549
        case 7: /* ldrsh */
9413
 
            tmp = gen_ld16s(addr, IS_USER(s));
 
9550
            tcg_gen_qemu_ld16s(tmp, addr, IS_USER(s));
9414
9551
            break;
9415
9552
        }
9416
 
        if (op >= 3) /* load */
 
9553
        if (op >= 3) { /* load */
9417
9554
            store_reg(s, rd, tmp);
 
9555
        } else {
 
9556
            tcg_temp_free_i32(tmp);
 
9557
        }
9418
9558
        tcg_temp_free_i32(addr);
9419
9559
        break;
9420
9560
 
9428
9568
 
9429
9569
        if (insn & (1 << 11)) {
9430
9570
            /* load */
9431
 
            tmp = gen_ld32(addr, IS_USER(s));
 
9571
            tmp = tcg_temp_new_i32();
 
9572
            tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
9432
9573
            store_reg(s, rd, tmp);
9433
9574
        } else {
9434
9575
            /* store */
9435
9576
            tmp = load_reg(s, rd);
9436
 
            gen_st32(tmp, addr, IS_USER(s));
 
9577
            tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
 
9578
            tcg_temp_free_i32(tmp);
9437
9579
        }
9438
9580
        tcg_temp_free_i32(addr);
9439
9581
        break;
9448
9590
 
9449
9591
        if (insn & (1 << 11)) {
9450
9592
            /* load */
9451
 
            tmp = gen_ld8u(addr, IS_USER(s));
 
9593
            tmp = tcg_temp_new_i32();
 
9594
            tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
9452
9595
            store_reg(s, rd, tmp);
9453
9596
        } else {
9454
9597
            /* store */
9455
9598
            tmp = load_reg(s, rd);
9456
 
            gen_st8(tmp, addr, IS_USER(s));
 
9599
            tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
 
9600
            tcg_temp_free_i32(tmp);
9457
9601
        }
9458
9602
        tcg_temp_free_i32(addr);
9459
9603
        break;
9468
9612
 
9469
9613
        if (insn & (1 << 11)) {
9470
9614
            /* load */
9471
 
            tmp = gen_ld16u(addr, IS_USER(s));
 
9615
            tmp = tcg_temp_new_i32();
 
9616
            tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
9472
9617
            store_reg(s, rd, tmp);
9473
9618
        } else {
9474
9619
            /* store */
9475
9620
            tmp = load_reg(s, rd);
9476
 
            gen_st16(tmp, addr, IS_USER(s));
 
9621
            tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
 
9622
            tcg_temp_free_i32(tmp);
9477
9623
        }
9478
9624
        tcg_temp_free_i32(addr);
9479
9625
        break;
9487
9633
 
9488
9634
        if (insn & (1 << 11)) {
9489
9635
            /* load */
9490
 
            tmp = gen_ld32(addr, IS_USER(s));
 
9636
            tmp = tcg_temp_new_i32();
 
9637
            tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
9491
9638
            store_reg(s, rd, tmp);
9492
9639
        } else {
9493
9640
            /* store */
9494
9641
            tmp = load_reg(s, rd);
9495
 
            gen_st32(tmp, addr, IS_USER(s));
 
9642
            tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
 
9643
            tcg_temp_free_i32(tmp);
9496
9644
        }
9497
9645
        tcg_temp_free_i32(addr);
9498
9646
        break;
9558
9706
                if (insn & (1 << i)) {
9559
9707
                    if (insn & (1 << 11)) {
9560
9708
                        /* pop */
9561
 
                        tmp = gen_ld32(addr, IS_USER(s));
 
9709
                        tmp = tcg_temp_new_i32();
 
9710
                        tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
9562
9711
                        store_reg(s, i, tmp);
9563
9712
                    } else {
9564
9713
                        /* push */
9565
9714
                        tmp = load_reg(s, i);
9566
 
                        gen_st32(tmp, addr, IS_USER(s));
 
9715
                        tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
 
9716
                        tcg_temp_free_i32(tmp);
9567
9717
                    }
9568
9718
                    /* advance to the next address.  */
9569
9719
                    tcg_gen_addi_i32(addr, addr, 4);
9570
9720
                }
9571
9721
            }
9572
 
            TCGV_UNUSED(tmp);
 
9722
            TCGV_UNUSED_I32(tmp);
9573
9723
            if (insn & (1 << 8)) {
9574
9724
                if (insn & (1 << 11)) {
9575
9725
                    /* pop pc */
9576
 
                    tmp = gen_ld32(addr, IS_USER(s));
 
9726
                    tmp = tcg_temp_new_i32();
 
9727
                    tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
9577
9728
                    /* don't set the pc until the rest of the instruction
9578
9729
                       has completed */
9579
9730
                } else {
9580
9731
                    /* push lr */
9581
9732
                    tmp = load_reg(s, 14);
9582
 
                    gen_st32(tmp, addr, IS_USER(s));
 
9733
                    tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
 
9734
                    tcg_temp_free_i32(tmp);
9583
9735
                }
9584
9736
                tcg_gen_addi_i32(addr, addr, 4);
9585
9737
            }
9694
9846
    case 12:
9695
9847
    {
9696
9848
        /* load/store multiple */
9697
 
        TCGv loaded_var;
9698
 
        TCGV_UNUSED(loaded_var);
 
9849
        TCGv_i32 loaded_var;
 
9850
        TCGV_UNUSED_I32(loaded_var);
9699
9851
        rn = (insn >> 8) & 0x7;
9700
9852
        addr = load_reg(s, rn);
9701
9853
        for (i = 0; i < 8; i++) {
9702
9854
            if (insn & (1 << i)) {
9703
9855
                if (insn & (1 << 11)) {
9704
9856
                    /* load */
9705
 
                    tmp = gen_ld32(addr, IS_USER(s));
 
9857
                    tmp = tcg_temp_new_i32();
 
9858
                    tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
9706
9859
                    if (i == rn) {
9707
9860
                        loaded_var = tmp;
9708
9861
                    } else {
9711
9864
                } else {
9712
9865
                    /* store */
9713
9866
                    tmp = load_reg(s, i);
9714
 
                    gen_st32(tmp, addr, IS_USER(s));
 
9867
                    tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
 
9868
                    tcg_temp_free_i32(tmp);
9715
9869
                }
9716
9870
                /* advance to the next address */
9717
9871
                tcg_gen_addi_i32(addr, addr, 4);
9783
9937
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9784
9938
   basic block 'tb'. If search_pc is TRUE, also generate PC
9785
9939
   information for each intermediate instruction. */
9786
 
static inline void gen_intermediate_code_internal(CPUARMState *env,
 
9940
static inline void gen_intermediate_code_internal(ARMCPU *cpu,
9787
9941
                                                  TranslationBlock *tb,
9788
 
                                                  int search_pc)
 
9942
                                                  bool search_pc)
9789
9943
{
 
9944
    CPUState *cs = CPU(cpu);
 
9945
    CPUARMState *env = &cpu->env;
9790
9946
    DisasContext dc1, *dc = &dc1;
9791
9947
    CPUBreakpoint *bp;
9792
9948
    uint16_t *gen_opc_end;
9805
9961
 
9806
9962
    dc->is_jmp = DISAS_NEXT;
9807
9963
    dc->pc = pc_start;
9808
 
    dc->singlestep_enabled = env->singlestep_enabled;
 
9964
    dc->singlestep_enabled = cs->singlestep_enabled;
9809
9965
    dc->condjmp = 0;
9810
9966
    dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
9811
9967
    dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
9871
10027
       complications trying to do it at the end of the block.  */
9872
10028
    if (dc->condexec_mask || dc->condexec_cond)
9873
10029
      {
9874
 
        TCGv tmp = tcg_temp_new_i32();
 
10030
        TCGv_i32 tmp = tcg_temp_new_i32();
9875
10031
        tcg_gen_movi_i32(tmp, 0);
9876
10032
        store_cpu_field(tmp, condexec_bits);
9877
10033
      }
9903
10059
                       invalidate this TB.  */
9904
10060
                    dc->pc += 2;
9905
10061
                    goto done_generating;
9906
 
                    break;
9907
10062
                }
9908
10063
            }
9909
10064
        }
9956
10111
         * ensures prefetch aborts occur at the right place.  */
9957
10112
        num_insns ++;
9958
10113
    } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
9959
 
             !env->singlestep_enabled &&
 
10114
             !cs->singlestep_enabled &&
9960
10115
             !singlestep &&
9961
10116
             dc->pc < next_page_start &&
9962
10117
             num_insns < max_insns);
9973
10128
    /* At this stage dc->condjmp will only be set when the skipped
9974
10129
       instruction was a conditional branch or trap, and the PC has
9975
10130
       already been written.  */
9976
 
    if (unlikely(env->singlestep_enabled)) {
 
10131
    if (unlikely(cs->singlestep_enabled)) {
9977
10132
        /* Make sure the pc is updated, and raise a debug exception.  */
9978
10133
        if (dc->condjmp) {
9979
10134
            gen_set_condexec(dc);
10067
10222
 
10068
10223
void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
10069
10224
{
10070
 
    gen_intermediate_code_internal(env, tb, 0);
 
10225
    gen_intermediate_code_internal(arm_env_get_cpu(env), tb, false);
10071
10226
}
10072
10227
 
10073
10228
void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
10074
10229
{
10075
 
    gen_intermediate_code_internal(env, tb, 1);
 
10230
    gen_intermediate_code_internal(arm_env_get_cpu(env), tb, true);
10076
10231
}
10077
10232
 
10078
10233
static const char *cpu_mode_names[16] = {
10080
10235
  "???", "???", "???", "und", "???", "???", "???", "sys"
10081
10236
};
10082
10237
 
10083
 
void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf,
10084
 
                    int flags)
 
10238
void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
 
10239
                        int flags)
10085
10240
{
 
10241
    ARMCPU *cpu = ARM_CPU(cs);
 
10242
    CPUARMState *env = &cpu->env;
10086
10243
    int i;
10087
10244
    uint32_t psr;
10088
10245