2
* PowerPC emulation helpers for qemu.
4
* Copyright (c) 2003-2007 Jocelyn Mayer
6
* This library is free software; you can redistribute it and/or
7
* modify it under the terms of the GNU Lesser General Public
8
* License as published by the Free Software Foundation; either
9
* version 2 of the License, or (at your option) any later version.
11
* This library is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14
* Lesser General Public License for more details.
16
* You should have received a copy of the GNU Lesser General Public
17
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
21
#include "dyngen-exec.h"
22
#include "host-utils.h"
25
#include "helper_regs.h"
27
#if !defined(CONFIG_USER_ONLY)
28
#include "softmmu_exec.h"
29
#endif /* !defined(CONFIG_USER_ONLY) */
32
//#define DEBUG_EXCEPTIONS
33
//#define DEBUG_SOFTWARE_TLB
35
#ifdef DEBUG_SOFTWARE_TLB
36
# define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
38
# define LOG_SWTLB(...) do { } while (0)
42
/*****************************************************************************/
43
/* Exceptions processing helpers */
45
void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
48
printf("Raise exception %3x code : %d\n", exception, error_code);
50
env->exception_index = exception;
51
env->error_code = error_code;
55
void helper_raise_exception (uint32_t exception)
57
helper_raise_exception_err(exception, 0);
60
/*****************************************************************************/
62
void helper_load_dump_spr (uint32_t sprn)
64
qemu_log("Read SPR %d %03x => " TARGET_FMT_lx "\n", sprn, sprn,
68
void helper_store_dump_spr (uint32_t sprn)
70
qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx "\n", sprn, sprn,
74
target_ulong helper_load_tbl (void)
76
return (target_ulong)cpu_ppc_load_tbl(env);
79
target_ulong helper_load_tbu (void)
81
return cpu_ppc_load_tbu(env);
84
target_ulong helper_load_atbl (void)
86
return (target_ulong)cpu_ppc_load_atbl(env);
89
target_ulong helper_load_atbu (void)
91
return cpu_ppc_load_atbu(env);
94
#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
95
target_ulong helper_load_purr (void)
97
return (target_ulong)cpu_ppc_load_purr(env);
101
target_ulong helper_load_601_rtcl (void)
103
return cpu_ppc601_load_rtcl(env);
106
target_ulong helper_load_601_rtcu (void)
108
return cpu_ppc601_load_rtcu(env);
111
#if !defined(CONFIG_USER_ONLY)
112
#if defined (TARGET_PPC64)
113
void helper_store_asr (target_ulong val)
115
ppc_store_asr(env, val);
119
void helper_store_sdr1 (target_ulong val)
121
ppc_store_sdr1(env, val);
124
void helper_store_tbl (target_ulong val)
126
cpu_ppc_store_tbl(env, val);
129
void helper_store_tbu (target_ulong val)
131
cpu_ppc_store_tbu(env, val);
134
void helper_store_atbl (target_ulong val)
136
cpu_ppc_store_atbl(env, val);
139
void helper_store_atbu (target_ulong val)
141
cpu_ppc_store_atbu(env, val);
144
void helper_store_601_rtcl (target_ulong val)
146
cpu_ppc601_store_rtcl(env, val);
149
void helper_store_601_rtcu (target_ulong val)
151
cpu_ppc601_store_rtcu(env, val);
154
target_ulong helper_load_decr (void)
156
return cpu_ppc_load_decr(env);
159
void helper_store_decr (target_ulong val)
161
cpu_ppc_store_decr(env, val);
164
void helper_store_hid0_601 (target_ulong val)
168
hid0 = env->spr[SPR_HID0];
169
if ((val ^ hid0) & 0x00000008) {
170
/* Change current endianness */
171
env->hflags &= ~(1 << MSR_LE);
172
env->hflags_nmsr &= ~(1 << MSR_LE);
173
env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
174
env->hflags |= env->hflags_nmsr;
175
qemu_log("%s: set endianness to %c => " TARGET_FMT_lx "\n", __func__,
176
val & 0x8 ? 'l' : 'b', env->hflags);
178
env->spr[SPR_HID0] = (uint32_t)val;
181
void helper_store_403_pbr (uint32_t num, target_ulong value)
183
if (likely(env->pb[num] != value)) {
184
env->pb[num] = value;
185
/* Should be optimized */
190
target_ulong helper_load_40x_pit (void)
192
return load_40x_pit(env);
195
void helper_store_40x_pit (target_ulong val)
197
store_40x_pit(env, val);
200
void helper_store_40x_dbcr0 (target_ulong val)
202
store_40x_dbcr0(env, val);
205
void helper_store_40x_sler (target_ulong val)
207
store_40x_sler(env, val);
210
void helper_store_booke_tcr (target_ulong val)
212
store_booke_tcr(env, val);
215
void helper_store_booke_tsr (target_ulong val)
217
store_booke_tsr(env, val);
220
void helper_store_ibatu (uint32_t nr, target_ulong val)
222
ppc_store_ibatu(env, nr, val);
225
void helper_store_ibatl (uint32_t nr, target_ulong val)
227
ppc_store_ibatl(env, nr, val);
230
void helper_store_dbatu (uint32_t nr, target_ulong val)
232
ppc_store_dbatu(env, nr, val);
235
void helper_store_dbatl (uint32_t nr, target_ulong val)
237
ppc_store_dbatl(env, nr, val);
240
void helper_store_601_batl (uint32_t nr, target_ulong val)
242
ppc_store_ibatl_601(env, nr, val);
245
void helper_store_601_batu (uint32_t nr, target_ulong val)
247
ppc_store_ibatu_601(env, nr, val);
251
/*****************************************************************************/
252
/* Memory load and stores */
254
static inline target_ulong addr_add(target_ulong addr, target_long arg)
256
#if defined(TARGET_PPC64)
258
return (uint32_t)(addr + arg);
264
void helper_lmw (target_ulong addr, uint32_t reg)
266
for (; reg < 32; reg++) {
268
env->gpr[reg] = bswap32(ldl(addr));
270
env->gpr[reg] = ldl(addr);
271
addr = addr_add(addr, 4);
275
void helper_stmw (target_ulong addr, uint32_t reg)
277
for (; reg < 32; reg++) {
279
stl(addr, bswap32((uint32_t)env->gpr[reg]));
281
stl(addr, (uint32_t)env->gpr[reg]);
282
addr = addr_add(addr, 4);
286
void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
289
for (; nb > 3; nb -= 4) {
290
env->gpr[reg] = ldl(addr);
291
reg = (reg + 1) % 32;
292
addr = addr_add(addr, 4);
294
if (unlikely(nb > 0)) {
296
for (sh = 24; nb > 0; nb--, sh -= 8) {
297
env->gpr[reg] |= ldub(addr) << sh;
298
addr = addr_add(addr, 1);
302
/* PPC32 specification says we must generate an exception if
303
* rA is in the range of registers to be loaded.
304
* In an other hand, IBM says this is valid, but rA won't be loaded.
305
* For now, I'll follow the spec...
307
void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
309
if (likely(xer_bc != 0)) {
310
if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
311
(reg < rb && (reg + xer_bc) > rb))) {
312
helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
314
POWERPC_EXCP_INVAL_LSWX);
316
helper_lsw(addr, xer_bc, reg);
321
void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
324
for (; nb > 3; nb -= 4) {
325
stl(addr, env->gpr[reg]);
326
reg = (reg + 1) % 32;
327
addr = addr_add(addr, 4);
329
if (unlikely(nb > 0)) {
330
for (sh = 24; nb > 0; nb--, sh -= 8) {
331
stb(addr, (env->gpr[reg] >> sh) & 0xFF);
332
addr = addr_add(addr, 1);
337
static void do_dcbz(target_ulong addr, int dcache_line_size)
339
addr &= ~(dcache_line_size - 1);
341
for (i = 0 ; i < dcache_line_size ; i += 4) {
344
if (env->reserve_addr == addr)
345
env->reserve_addr = (target_ulong)-1ULL;
348
void helper_dcbz(target_ulong addr)
350
do_dcbz(addr, env->dcache_line_size);
353
void helper_dcbz_970(target_ulong addr)
355
if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
358
do_dcbz(addr, env->dcache_line_size);
361
void helper_icbi(target_ulong addr)
363
addr &= ~(env->dcache_line_size - 1);
364
/* Invalidate one cache line :
365
* PowerPC specification says this is to be treated like a load
366
* (not a fetch) by the MMU. To be sure it will be so,
367
* do the load "by hand".
373
target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
377
for (i = 0; i < xer_bc; i++) {
379
addr = addr_add(addr, 1);
380
/* ra (if not 0) and rb are never modified */
381
if (likely(reg != rb && (ra == 0 || reg != ra))) {
382
env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
384
if (unlikely(c == xer_cmp))
386
if (likely(d != 0)) {
397
/*****************************************************************************/
398
/* Fixed point operations helpers */
399
#if defined(TARGET_PPC64)
401
/* multiply high word */
402
uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
406
muls64(&tl, &th, arg1, arg2);
410
/* multiply high word unsigned */
411
uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
415
mulu64(&tl, &th, arg1, arg2);
419
uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
424
muls64(&tl, (uint64_t *)&th, arg1, arg2);
425
/* If th != 0 && th != -1, then we had an overflow */
426
if (likely((uint64_t)(th + 1) <= 1)) {
427
env->xer &= ~(1 << XER_OV);
429
env->xer |= (1 << XER_OV) | (1 << XER_SO);
435
target_ulong helper_cntlzw (target_ulong t)
440
#if defined(TARGET_PPC64)
441
target_ulong helper_cntlzd (target_ulong t)
447
/* shift right arithmetic helper */
448
target_ulong helper_sraw (target_ulong value, target_ulong shift)
452
if (likely(!(shift & 0x20))) {
453
if (likely((uint32_t)shift != 0)) {
455
ret = (int32_t)value >> shift;
456
if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
457
env->xer &= ~(1 << XER_CA);
459
env->xer |= (1 << XER_CA);
462
ret = (int32_t)value;
463
env->xer &= ~(1 << XER_CA);
466
ret = (int32_t)value >> 31;
468
env->xer |= (1 << XER_CA);
470
env->xer &= ~(1 << XER_CA);
473
return (target_long)ret;
476
#if defined(TARGET_PPC64)
477
target_ulong helper_srad (target_ulong value, target_ulong shift)
481
if (likely(!(shift & 0x40))) {
482
if (likely((uint64_t)shift != 0)) {
484
ret = (int64_t)value >> shift;
485
if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
486
env->xer &= ~(1 << XER_CA);
488
env->xer |= (1 << XER_CA);
491
ret = (int64_t)value;
492
env->xer &= ~(1 << XER_CA);
495
ret = (int64_t)value >> 63;
497
env->xer |= (1 << XER_CA);
499
env->xer &= ~(1 << XER_CA);
506
#if defined(TARGET_PPC64)
507
target_ulong helper_popcntb (target_ulong val)
509
val = (val & 0x5555555555555555ULL) + ((val >> 1) &
510
0x5555555555555555ULL);
511
val = (val & 0x3333333333333333ULL) + ((val >> 2) &
512
0x3333333333333333ULL);
513
val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) &
514
0x0f0f0f0f0f0f0f0fULL);
518
target_ulong helper_popcntw (target_ulong val)
520
val = (val & 0x5555555555555555ULL) + ((val >> 1) &
521
0x5555555555555555ULL);
522
val = (val & 0x3333333333333333ULL) + ((val >> 2) &
523
0x3333333333333333ULL);
524
val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) &
525
0x0f0f0f0f0f0f0f0fULL);
526
val = (val & 0x00ff00ff00ff00ffULL) + ((val >> 8) &
527
0x00ff00ff00ff00ffULL);
528
val = (val & 0x0000ffff0000ffffULL) + ((val >> 16) &
529
0x0000ffff0000ffffULL);
533
target_ulong helper_popcntd (target_ulong val)
538
target_ulong helper_popcntb (target_ulong val)
540
val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
541
val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
542
val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
546
target_ulong helper_popcntw (target_ulong val)
548
val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
549
val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
550
val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
551
val = (val & 0x00ff00ff) + ((val >> 8) & 0x00ff00ff);
552
val = (val & 0x0000ffff) + ((val >> 16) & 0x0000ffff);
557
/*****************************************************************************/
558
/* Floating point operations helpers */
559
uint64_t helper_float32_to_float64(uint32_t arg)
564
d.d = float32_to_float64(f.f, &env->fp_status);
568
uint32_t helper_float64_to_float32(uint64_t arg)
573
f.f = float64_to_float32(d.d, &env->fp_status);
577
static inline int isden(float64 d)
583
return ((u.ll >> 52) & 0x7FF) == 0;
586
uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
592
isneg = float64_is_neg(farg.d);
593
if (unlikely(float64_is_any_nan(farg.d))) {
594
if (float64_is_signaling_nan(farg.d)) {
595
/* Signaling NaN: flags are undefined */
601
} else if (unlikely(float64_is_infinity(farg.d))) {
608
if (float64_is_zero(farg.d)) {
616
/* Denormalized numbers */
619
/* Normalized numbers */
630
/* We update FPSCR_FPRF */
631
env->fpscr &= ~(0x1F << FPSCR_FPRF);
632
env->fpscr |= ret << FPSCR_FPRF;
634
/* We just need fpcc to update Rc1 */
638
/* Floating-point invalid operations exception */
639
static inline uint64_t fload_invalid_op_excp(int op)
646
case POWERPC_EXCP_FP_VXSNAN:
647
env->fpscr |= 1 << FPSCR_VXSNAN;
649
case POWERPC_EXCP_FP_VXSOFT:
650
env->fpscr |= 1 << FPSCR_VXSOFT;
652
case POWERPC_EXCP_FP_VXISI:
653
/* Magnitude subtraction of infinities */
654
env->fpscr |= 1 << FPSCR_VXISI;
656
case POWERPC_EXCP_FP_VXIDI:
657
/* Division of infinity by infinity */
658
env->fpscr |= 1 << FPSCR_VXIDI;
660
case POWERPC_EXCP_FP_VXZDZ:
661
/* Division of zero by zero */
662
env->fpscr |= 1 << FPSCR_VXZDZ;
664
case POWERPC_EXCP_FP_VXIMZ:
665
/* Multiplication of zero by infinity */
666
env->fpscr |= 1 << FPSCR_VXIMZ;
668
case POWERPC_EXCP_FP_VXVC:
669
/* Ordered comparison of NaN */
670
env->fpscr |= 1 << FPSCR_VXVC;
671
env->fpscr &= ~(0xF << FPSCR_FPCC);
672
env->fpscr |= 0x11 << FPSCR_FPCC;
673
/* We must update the target FPR before raising the exception */
675
env->exception_index = POWERPC_EXCP_PROGRAM;
676
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
677
/* Update the floating-point enabled exception summary */
678
env->fpscr |= 1 << FPSCR_FEX;
679
/* Exception is differed */
683
case POWERPC_EXCP_FP_VXSQRT:
684
/* Square root of a negative number */
685
env->fpscr |= 1 << FPSCR_VXSQRT;
687
env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
689
/* Set the result to quiet NaN */
690
ret = 0x7FF8000000000000ULL;
691
env->fpscr &= ~(0xF << FPSCR_FPCC);
692
env->fpscr |= 0x11 << FPSCR_FPCC;
695
case POWERPC_EXCP_FP_VXCVI:
696
/* Invalid conversion */
697
env->fpscr |= 1 << FPSCR_VXCVI;
698
env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
700
/* Set the result to quiet NaN */
701
ret = 0x7FF8000000000000ULL;
702
env->fpscr &= ~(0xF << FPSCR_FPCC);
703
env->fpscr |= 0x11 << FPSCR_FPCC;
707
/* Update the floating-point invalid operation summary */
708
env->fpscr |= 1 << FPSCR_VX;
709
/* Update the floating-point exception summary */
710
env->fpscr |= 1 << FPSCR_FX;
712
/* Update the floating-point enabled exception summary */
713
env->fpscr |= 1 << FPSCR_FEX;
714
if (msr_fe0 != 0 || msr_fe1 != 0)
715
helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
720
static inline void float_zero_divide_excp(void)
722
env->fpscr |= 1 << FPSCR_ZX;
723
env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
724
/* Update the floating-point exception summary */
725
env->fpscr |= 1 << FPSCR_FX;
727
/* Update the floating-point enabled exception summary */
728
env->fpscr |= 1 << FPSCR_FEX;
729
if (msr_fe0 != 0 || msr_fe1 != 0) {
730
helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
731
POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
736
static inline void float_overflow_excp(void)
738
env->fpscr |= 1 << FPSCR_OX;
739
/* Update the floating-point exception summary */
740
env->fpscr |= 1 << FPSCR_FX;
742
/* XXX: should adjust the result */
743
/* Update the floating-point enabled exception summary */
744
env->fpscr |= 1 << FPSCR_FEX;
745
/* We must update the target FPR before raising the exception */
746
env->exception_index = POWERPC_EXCP_PROGRAM;
747
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
749
env->fpscr |= 1 << FPSCR_XX;
750
env->fpscr |= 1 << FPSCR_FI;
754
static inline void float_underflow_excp(void)
756
env->fpscr |= 1 << FPSCR_UX;
757
/* Update the floating-point exception summary */
758
env->fpscr |= 1 << FPSCR_FX;
760
/* XXX: should adjust the result */
761
/* Update the floating-point enabled exception summary */
762
env->fpscr |= 1 << FPSCR_FEX;
763
/* We must update the target FPR before raising the exception */
764
env->exception_index = POWERPC_EXCP_PROGRAM;
765
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
769
static inline void float_inexact_excp(void)
771
env->fpscr |= 1 << FPSCR_XX;
772
/* Update the floating-point exception summary */
773
env->fpscr |= 1 << FPSCR_FX;
775
/* Update the floating-point enabled exception summary */
776
env->fpscr |= 1 << FPSCR_FEX;
777
/* We must update the target FPR before raising the exception */
778
env->exception_index = POWERPC_EXCP_PROGRAM;
779
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
783
static inline void fpscr_set_rounding_mode(void)
787
/* Set rounding mode */
790
/* Best approximation (round to nearest) */
791
rnd_type = float_round_nearest_even;
794
/* Smaller magnitude (round toward zero) */
795
rnd_type = float_round_to_zero;
798
/* Round toward +infinite */
799
rnd_type = float_round_up;
803
/* Round toward -infinite */
804
rnd_type = float_round_down;
807
set_float_rounding_mode(rnd_type, &env->fp_status);
810
void helper_fpscr_clrbit (uint32_t bit)
814
prev = (env->fpscr >> bit) & 1;
815
env->fpscr &= ~(1 << bit);
820
fpscr_set_rounding_mode();
828
void helper_fpscr_setbit (uint32_t bit)
832
prev = (env->fpscr >> bit) & 1;
833
env->fpscr |= 1 << bit;
837
env->fpscr |= 1 << FPSCR_FX;
841
env->fpscr |= 1 << FPSCR_FX;
846
env->fpscr |= 1 << FPSCR_FX;
851
env->fpscr |= 1 << FPSCR_FX;
856
env->fpscr |= 1 << FPSCR_FX;
869
env->fpscr |= 1 << FPSCR_VX;
870
env->fpscr |= 1 << FPSCR_FX;
877
env->error_code = POWERPC_EXCP_FP;
879
env->error_code |= POWERPC_EXCP_FP_VXSNAN;
881
env->error_code |= POWERPC_EXCP_FP_VXISI;
883
env->error_code |= POWERPC_EXCP_FP_VXIDI;
885
env->error_code |= POWERPC_EXCP_FP_VXZDZ;
887
env->error_code |= POWERPC_EXCP_FP_VXIMZ;
889
env->error_code |= POWERPC_EXCP_FP_VXVC;
891
env->error_code |= POWERPC_EXCP_FP_VXSOFT;
893
env->error_code |= POWERPC_EXCP_FP_VXSQRT;
895
env->error_code |= POWERPC_EXCP_FP_VXCVI;
902
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
909
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
916
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
923
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
929
fpscr_set_rounding_mode();
934
/* Update the floating-point enabled exception summary */
935
env->fpscr |= 1 << FPSCR_FEX;
936
/* We have to update Rc1 before raising the exception */
937
env->exception_index = POWERPC_EXCP_PROGRAM;
943
void helper_store_fpscr (uint64_t arg, uint32_t mask)
946
* We use only the 32 LSB of the incoming fpr
954
new |= prev & 0x60000000;
955
for (i = 0; i < 8; i++) {
956
if (mask & (1 << i)) {
957
env->fpscr &= ~(0xF << (4 * i));
958
env->fpscr |= new & (0xF << (4 * i));
961
/* Update VX and FEX */
963
env->fpscr |= 1 << FPSCR_VX;
965
env->fpscr &= ~(1 << FPSCR_VX);
966
if ((fpscr_ex & fpscr_eex) != 0) {
967
env->fpscr |= 1 << FPSCR_FEX;
968
env->exception_index = POWERPC_EXCP_PROGRAM;
969
/* XXX: we should compute it properly */
970
env->error_code = POWERPC_EXCP_FP;
973
env->fpscr &= ~(1 << FPSCR_FEX);
974
fpscr_set_rounding_mode();
977
void helper_float_check_status (void)
979
if (env->exception_index == POWERPC_EXCP_PROGRAM &&
980
(env->error_code & POWERPC_EXCP_FP)) {
981
/* Differred floating-point exception after target FPR update */
982
if (msr_fe0 != 0 || msr_fe1 != 0)
983
helper_raise_exception_err(env->exception_index, env->error_code);
985
int status = get_float_exception_flags(&env->fp_status);
986
if (status & float_flag_divbyzero) {
987
float_zero_divide_excp();
988
} else if (status & float_flag_overflow) {
989
float_overflow_excp();
990
} else if (status & float_flag_underflow) {
991
float_underflow_excp();
992
} else if (status & float_flag_inexact) {
993
float_inexact_excp();
998
void helper_reset_fpstatus (void)
1000
set_float_exception_flags(0, &env->fp_status);
1004
uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
1006
CPU_DoubleU farg1, farg2;
1011
if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1012
float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
1013
/* Magnitude subtraction of infinities */
1014
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1016
if (unlikely(float64_is_signaling_nan(farg1.d) ||
1017
float64_is_signaling_nan(farg2.d))) {
1019
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1021
farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1028
uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
1030
CPU_DoubleU farg1, farg2;
1035
if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1036
float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1037
/* Magnitude subtraction of infinities */
1038
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1040
if (unlikely(float64_is_signaling_nan(farg1.d) ||
1041
float64_is_signaling_nan(farg2.d))) {
1042
/* sNaN subtraction */
1043
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1045
farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1052
uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1054
CPU_DoubleU farg1, farg2;
1059
if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1060
(float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1061
/* Multiplication of zero by infinity */
1062
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1064
if (unlikely(float64_is_signaling_nan(farg1.d) ||
1065
float64_is_signaling_nan(farg2.d))) {
1066
/* sNaN multiplication */
1067
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1069
farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1076
uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1078
CPU_DoubleU farg1, farg2;
1083
if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1084
/* Division of infinity by infinity */
1085
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1086
} else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1087
/* Division of zero by zero */
1088
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1090
if (unlikely(float64_is_signaling_nan(farg1.d) ||
1091
float64_is_signaling_nan(farg2.d))) {
1093
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1095
farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1102
uint64_t helper_fabs (uint64_t arg)
1107
farg.d = float64_abs(farg.d);
1112
uint64_t helper_fnabs (uint64_t arg)
1117
farg.d = float64_abs(farg.d);
1118
farg.d = float64_chs(farg.d);
1123
uint64_t helper_fneg (uint64_t arg)
1128
farg.d = float64_chs(farg.d);
1132
/* fctiw - fctiw. */
1133
uint64_t helper_fctiw (uint64_t arg)
1138
if (unlikely(float64_is_signaling_nan(farg.d))) {
1139
/* sNaN conversion */
1140
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1141
} else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1142
/* qNan / infinity conversion */
1143
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1145
farg.ll = float64_to_int32(farg.d, &env->fp_status);
1146
/* XXX: higher bits are not supposed to be significant.
1147
* to make tests easier, return the same as a real PowerPC 750
1149
farg.ll |= 0xFFF80000ULL << 32;
1154
/* fctiwz - fctiwz. */
1155
uint64_t helper_fctiwz (uint64_t arg)
1160
if (unlikely(float64_is_signaling_nan(farg.d))) {
1161
/* sNaN conversion */
1162
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1163
} else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1164
/* qNan / infinity conversion */
1165
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1167
farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1168
/* XXX: higher bits are not supposed to be significant.
1169
* to make tests easier, return the same as a real PowerPC 750
1171
farg.ll |= 0xFFF80000ULL << 32;
1176
#if defined(TARGET_PPC64)
1177
/* fcfid - fcfid. */
1178
uint64_t helper_fcfid (uint64_t arg)
1181
farg.d = int64_to_float64(arg, &env->fp_status);
1185
/* fctid - fctid. */
1186
uint64_t helper_fctid (uint64_t arg)
1191
if (unlikely(float64_is_signaling_nan(farg.d))) {
1192
/* sNaN conversion */
1193
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1194
} else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1195
/* qNan / infinity conversion */
1196
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1198
farg.ll = float64_to_int64(farg.d, &env->fp_status);
1203
/* fctidz - fctidz. */
1204
uint64_t helper_fctidz (uint64_t arg)
1209
if (unlikely(float64_is_signaling_nan(farg.d))) {
1210
/* sNaN conversion */
1211
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1212
} else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1213
/* qNan / infinity conversion */
1214
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1216
farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1223
static inline uint64_t do_fri(uint64_t arg, int rounding_mode)
1228
if (unlikely(float64_is_signaling_nan(farg.d))) {
1230
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1231
} else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1232
/* qNan / infinity round */
1233
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1235
set_float_rounding_mode(rounding_mode, &env->fp_status);
1236
farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1237
/* Restore rounding mode from FPSCR */
1238
fpscr_set_rounding_mode();
1243
uint64_t helper_frin (uint64_t arg)
1245
return do_fri(arg, float_round_nearest_even);
1248
uint64_t helper_friz (uint64_t arg)
1250
return do_fri(arg, float_round_to_zero);
1253
uint64_t helper_frip (uint64_t arg)
1255
return do_fri(arg, float_round_up);
1258
uint64_t helper_frim (uint64_t arg)
1260
return do_fri(arg, float_round_down);
1263
/* fmadd - fmadd. */
1264
uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1266
CPU_DoubleU farg1, farg2, farg3;
1272
if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1273
(float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1274
/* Multiplication of zero by infinity */
1275
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1277
if (unlikely(float64_is_signaling_nan(farg1.d) ||
1278
float64_is_signaling_nan(farg2.d) ||
1279
float64_is_signaling_nan(farg3.d))) {
1280
/* sNaN operation */
1281
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1283
/* This is the way the PowerPC specification defines it */
1284
float128 ft0_128, ft1_128;
1286
ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1287
ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1288
ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1289
if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1290
float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1291
/* Magnitude subtraction of infinities */
1292
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1294
ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1295
ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1296
farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1303
/* fmsub - fmsub. */
1304
uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1306
CPU_DoubleU farg1, farg2, farg3;
1312
if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1313
(float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1314
/* Multiplication of zero by infinity */
1315
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1317
if (unlikely(float64_is_signaling_nan(farg1.d) ||
1318
float64_is_signaling_nan(farg2.d) ||
1319
float64_is_signaling_nan(farg3.d))) {
1320
/* sNaN operation */
1321
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1323
/* This is the way the PowerPC specification defines it */
1324
float128 ft0_128, ft1_128;
1326
ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1327
ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1328
ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1329
if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1330
float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1331
/* Magnitude subtraction of infinities */
1332
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1334
ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1335
ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1336
farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1342
/* fnmadd - fnmadd. */
1343
uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1345
CPU_DoubleU farg1, farg2, farg3;
1351
if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1352
(float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1353
/* Multiplication of zero by infinity */
1354
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1356
if (unlikely(float64_is_signaling_nan(farg1.d) ||
1357
float64_is_signaling_nan(farg2.d) ||
1358
float64_is_signaling_nan(farg3.d))) {
1359
/* sNaN operation */
1360
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1362
/* This is the way the PowerPC specification defines it */
1363
float128 ft0_128, ft1_128;
1365
ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1366
ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1367
ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1368
if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1369
float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1370
/* Magnitude subtraction of infinities */
1371
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1373
ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1374
ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1375
farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1377
if (likely(!float64_is_any_nan(farg1.d))) {
1378
farg1.d = float64_chs(farg1.d);
1384
/* fnmsub - fnmsub. */
1385
uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1387
CPU_DoubleU farg1, farg2, farg3;
1393
if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1394
(float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1395
/* Multiplication of zero by infinity */
1396
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1398
if (unlikely(float64_is_signaling_nan(farg1.d) ||
1399
float64_is_signaling_nan(farg2.d) ||
1400
float64_is_signaling_nan(farg3.d))) {
1401
/* sNaN operation */
1402
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1404
/* This is the way the PowerPC specification defines it */
1405
float128 ft0_128, ft1_128;
1407
ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1408
ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1409
ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1410
if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1411
float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1412
/* Magnitude subtraction of infinities */
1413
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1415
ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1416
ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1417
farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1419
if (likely(!float64_is_any_nan(farg1.d))) {
1420
farg1.d = float64_chs(farg1.d);
1427
uint64_t helper_frsp (uint64_t arg)
1433
if (unlikely(float64_is_signaling_nan(farg.d))) {
1434
/* sNaN square root */
1435
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1437
f32 = float64_to_float32(farg.d, &env->fp_status);
1438
farg.d = float32_to_float64(f32, &env->fp_status);
1443
/* fsqrt - fsqrt. */
1444
uint64_t helper_fsqrt (uint64_t arg)
1449
if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1450
/* Square root of a negative nonzero number */
1451
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1453
if (unlikely(float64_is_signaling_nan(farg.d))) {
1454
/* sNaN square root */
1455
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1457
farg.d = float64_sqrt(farg.d, &env->fp_status);
1463
uint64_t helper_fre (uint64_t arg)
1468
if (unlikely(float64_is_signaling_nan(farg.d))) {
1469
/* sNaN reciprocal */
1470
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1472
farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1477
uint64_t helper_fres (uint64_t arg)
1483
if (unlikely(float64_is_signaling_nan(farg.d))) {
1484
/* sNaN reciprocal */
1485
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1487
farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1488
f32 = float64_to_float32(farg.d, &env->fp_status);
1489
farg.d = float32_to_float64(f32, &env->fp_status);
1494
/* frsqrte - frsqrte. */
1495
uint64_t helper_frsqrte (uint64_t arg)
1501
if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1502
/* Reciprocal square root of a negative nonzero number */
1503
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1505
if (unlikely(float64_is_signaling_nan(farg.d))) {
1506
/* sNaN reciprocal square root */
1507
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1509
farg.d = float64_sqrt(farg.d, &env->fp_status);
1510
farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1511
f32 = float64_to_float32(farg.d, &env->fp_status);
1512
farg.d = float32_to_float64(f32, &env->fp_status);
1518
uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1524
if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_any_nan(farg1.d)) {
1531
void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1533
CPU_DoubleU farg1, farg2;
1538
if (unlikely(float64_is_any_nan(farg1.d) ||
1539
float64_is_any_nan(farg2.d))) {
1541
} else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1543
} else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1549
env->fpscr &= ~(0x0F << FPSCR_FPRF);
1550
env->fpscr |= ret << FPSCR_FPRF;
1551
env->crf[crfD] = ret;
1552
if (unlikely(ret == 0x01UL
1553
&& (float64_is_signaling_nan(farg1.d) ||
1554
float64_is_signaling_nan(farg2.d)))) {
1555
/* sNaN comparison */
1556
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1560
void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1562
CPU_DoubleU farg1, farg2;
1567
if (unlikely(float64_is_any_nan(farg1.d) ||
1568
float64_is_any_nan(farg2.d))) {
1570
} else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1572
} else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1578
env->fpscr &= ~(0x0F << FPSCR_FPRF);
1579
env->fpscr |= ret << FPSCR_FPRF;
1580
env->crf[crfD] = ret;
1581
if (unlikely (ret == 0x01UL)) {
1582
if (float64_is_signaling_nan(farg1.d) ||
1583
float64_is_signaling_nan(farg2.d)) {
1584
/* sNaN comparison */
1585
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1586
POWERPC_EXCP_FP_VXVC);
1588
/* qNaN comparison */
1589
fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1594
#if !defined (CONFIG_USER_ONLY)
1595
void helper_store_msr (target_ulong val)
1597
val = hreg_store_msr(env, val, 0);
1599
env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1600
helper_raise_exception(val);
1604
static inline void do_rfi(target_ulong nip, target_ulong msr,
1605
target_ulong msrm, int keep_msrh)
1607
#if defined(TARGET_PPC64)
1608
if (msr & (1ULL << MSR_SF)) {
1609
nip = (uint64_t)nip;
1610
msr &= (uint64_t)msrm;
1612
nip = (uint32_t)nip;
1613
msr = (uint32_t)(msr & msrm);
1615
msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1618
nip = (uint32_t)nip;
1619
msr &= (uint32_t)msrm;
1621
/* XXX: beware: this is false if VLE is supported */
1622
env->nip = nip & ~((target_ulong)0x00000003);
1623
hreg_store_msr(env, msr, 1);
1624
#if defined (DEBUG_OP)
1625
cpu_dump_rfi(env->nip, env->msr);
1627
/* No need to raise an exception here,
1628
* as rfi is always the last insn of a TB
1630
env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1633
void helper_rfi (void)
1635
do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1636
~((target_ulong)0x783F0000), 1);
1639
#if defined(TARGET_PPC64)
1640
void helper_rfid (void)
1642
do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1643
~((target_ulong)0x783F0000), 0);
1646
void helper_hrfid (void)
1648
do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1649
~((target_ulong)0x783F0000), 0);
1654
void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1656
if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1657
((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1658
((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1659
((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1660
((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1661
helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1665
#if defined(TARGET_PPC64)
1666
void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1668
if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1669
((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1670
((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1671
((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1672
((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1673
helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1677
/*****************************************************************************/
1678
/* PowerPC 601 specific instructions (POWER bridge) */
1680
target_ulong helper_clcs (uint32_t arg)
1684
/* Instruction cache line size */
1685
return env->icache_line_size;
1688
/* Data cache line size */
1689
return env->dcache_line_size;
1692
/* Minimum cache line size */
1693
return (env->icache_line_size < env->dcache_line_size) ?
1694
env->icache_line_size : env->dcache_line_size;
1697
/* Maximum cache line size */
1698
return (env->icache_line_size > env->dcache_line_size) ?
1699
env->icache_line_size : env->dcache_line_size;
1708
target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1710
uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1712
if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1713
(int32_t)arg2 == 0) {
1714
env->spr[SPR_MQ] = 0;
1717
env->spr[SPR_MQ] = tmp % arg2;
1718
return tmp / (int32_t)arg2;
1722
target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1724
uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1726
if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1727
(int32_t)arg2 == 0) {
1728
env->xer |= (1 << XER_OV) | (1 << XER_SO);
1729
env->spr[SPR_MQ] = 0;
1732
env->spr[SPR_MQ] = tmp % arg2;
1733
tmp /= (int32_t)arg2;
1734
if ((int32_t)tmp != tmp) {
1735
env->xer |= (1 << XER_OV) | (1 << XER_SO);
1737
env->xer &= ~(1 << XER_OV);
1743
target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1745
if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1746
(int32_t)arg2 == 0) {
1747
env->spr[SPR_MQ] = 0;
1750
env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1751
return (int32_t)arg1 / (int32_t)arg2;
1755
target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1757
if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1758
(int32_t)arg2 == 0) {
1759
env->xer |= (1 << XER_OV) | (1 << XER_SO);
1760
env->spr[SPR_MQ] = 0;
1763
env->xer &= ~(1 << XER_OV);
1764
env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1765
return (int32_t)arg1 / (int32_t)arg2;
1769
#if !defined (CONFIG_USER_ONLY)
1770
target_ulong helper_rac (target_ulong addr)
1774
target_ulong ret = 0;
1776
/* We don't have to generate many instances of this instruction,
1777
* as rac is supervisor only.
1779
/* XXX: FIX THIS: Pretend we have no BAT */
1780
nb_BATs = env->nb_BATs;
1782
if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1784
env->nb_BATs = nb_BATs;
1788
void helper_rfsvc (void)
1790
do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1794
/*****************************************************************************/
1795
/* 602 specific instructions */
1796
/* mfrom is the most crazy instruction ever seen, imho ! */
1797
/* Real implementation uses a ROM table. Do the same */
1798
/* Extremely decomposed:
1800
* return 256 * log10(10 + 1.0) + 0.5
1802
#if !defined (CONFIG_USER_ONLY)
1803
target_ulong helper_602_mfrom (target_ulong arg)
1805
if (likely(arg < 602)) {
1806
#include "mfrom_table.c"
1807
return mfrom_ROM_table[arg];
1814
/*****************************************************************************/
1815
/* Embedded PowerPC specific helpers */
1817
/* XXX: to be improved to check access rights when in user-mode */
1818
target_ulong helper_load_dcr (target_ulong dcrn)
1822
if (unlikely(env->dcr_env == NULL)) {
1823
qemu_log("No DCR environment\n");
1824
helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1825
POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1826
} else if (unlikely(ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val) != 0)) {
1827
qemu_log("DCR read error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1828
helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1829
POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1834
void helper_store_dcr (target_ulong dcrn, target_ulong val)
1836
if (unlikely(env->dcr_env == NULL)) {
1837
qemu_log("No DCR environment\n");
1838
helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1839
POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1840
} else if (unlikely(ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val) != 0)) {
1841
qemu_log("DCR write error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1842
helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1843
POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1847
#if !defined(CONFIG_USER_ONLY)
1848
void helper_40x_rfci (void)
1850
do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1851
~((target_ulong)0xFFFF0000), 0);
1854
void helper_rfci (void)
1856
do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1857
~((target_ulong)0x3FFF0000), 0);
1860
void helper_rfdi (void)
1862
do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1863
~((target_ulong)0x3FFF0000), 0);
1866
void helper_rfmci (void)
1868
do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1869
~((target_ulong)0x3FFF0000), 0);
1874
target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1880
for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1881
if ((high & mask) == 0) {
1889
for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1890
if ((low & mask) == 0) {
1902
env->xer = (env->xer & ~0x7F) | i;
1904
env->crf[0] |= xer_so;
1909
/*****************************************************************************/
1910
/* Altivec extension helpers */
1911
#if defined(HOST_WORDS_BIGENDIAN)
1919
#if defined(HOST_WORDS_BIGENDIAN)
1920
#define VECTOR_FOR_INORDER_I(index, element) \
1921
for (index = 0; index < ARRAY_SIZE(r->element); index++)
1923
#define VECTOR_FOR_INORDER_I(index, element) \
1924
for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1927
/* If X is a NaN, store the corresponding QNaN into RESULT. Otherwise,
1928
* execute the following block. */
1929
#define DO_HANDLE_NAN(result, x) \
1930
if (float32_is_any_nan(x)) { \
1933
__f.l = __f.l | (1 << 22); /* Set QNaN bit. */ \
1937
#define HANDLE_NAN1(result, x) \
1938
DO_HANDLE_NAN(result, x)
1939
#define HANDLE_NAN2(result, x, y) \
1940
DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
1941
#define HANDLE_NAN3(result, x, y, z) \
1942
DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
1944
/* Saturating arithmetic helpers. */
1945
#define SATCVT(from, to, from_type, to_type, min, max) \
1946
static inline to_type cvt##from##to(from_type x, int *sat) \
1949
if (x < (from_type)min) { \
1952
} else if (x > (from_type)max) { \
1960
#define SATCVTU(from, to, from_type, to_type, min, max) \
1961
static inline to_type cvt##from##to(from_type x, int *sat) \
1964
if (x > (from_type)max) { \
1972
SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX)
1973
SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX)
1974
SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX)
1976
SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX)
1977
SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX)
1978
SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX)
1979
SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX)
1980
SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX)
1981
SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX)
1985
#define LVE(name, access, swap, element) \
1986
void helper_##name (ppc_avr_t *r, target_ulong addr) \
1988
size_t n_elems = ARRAY_SIZE(r->element); \
1989
int adjust = HI_IDX*(n_elems-1); \
1990
int sh = sizeof(r->element[0]) >> 1; \
1991
int index = (addr & 0xf) >> sh; \
1993
r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
1995
r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
1999
LVE(lvebx, ldub, I, u8)
2000
LVE(lvehx, lduw, bswap16, u16)
2001
LVE(lvewx, ldl, bswap32, u32)
2005
void helper_lvsl (ppc_avr_t *r, target_ulong sh)
2007
int i, j = (sh & 0xf);
2009
VECTOR_FOR_INORDER_I (i, u8) {
2014
void helper_lvsr (ppc_avr_t *r, target_ulong sh)
2016
int i, j = 0x10 - (sh & 0xf);
2018
VECTOR_FOR_INORDER_I (i, u8) {
2023
#define STVE(name, access, swap, element) \
2024
void helper_##name (ppc_avr_t *r, target_ulong addr) \
2026
size_t n_elems = ARRAY_SIZE(r->element); \
2027
int adjust = HI_IDX*(n_elems-1); \
2028
int sh = sizeof(r->element[0]) >> 1; \
2029
int index = (addr & 0xf) >> sh; \
2031
access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2033
access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2037
STVE(stvebx, stb, I, u8)
2038
STVE(stvehx, stw, bswap16, u16)
2039
STVE(stvewx, stl, bswap32, u32)
2043
void helper_mtvscr (ppc_avr_t *r)
2045
#if defined(HOST_WORDS_BIGENDIAN)
2046
env->vscr = r->u32[3];
2048
env->vscr = r->u32[0];
2050
set_flush_to_zero(vscr_nj, &env->vec_status);
2053
void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2056
for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2057
r->u32[i] = ~a->u32[i] < b->u32[i];
2061
#define VARITH_DO(name, op, element) \
2062
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2065
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2066
r->element[i] = a->element[i] op b->element[i]; \
2069
#define VARITH(suffix, element) \
2070
VARITH_DO(add##suffix, +, element) \
2071
VARITH_DO(sub##suffix, -, element)
2078
#define VARITHFP(suffix, func) \
2079
void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2082
for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2083
HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2084
r->f[i] = func(a->f[i], b->f[i], &env->vec_status); \
2088
VARITHFP(addfp, float32_add)
2089
VARITHFP(subfp, float32_sub)
2092
#define VARITHSAT_CASE(type, op, cvt, element) \
2094
type result = (type)a->element[i] op (type)b->element[i]; \
2095
r->element[i] = cvt(result, &sat); \
2098
#define VARITHSAT_DO(name, op, optype, cvt, element) \
2099
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2103
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2104
switch (sizeof(r->element[0])) { \
2105
case 1: VARITHSAT_CASE(optype, op, cvt, element); break; \
2106
case 2: VARITHSAT_CASE(optype, op, cvt, element); break; \
2107
case 4: VARITHSAT_CASE(optype, op, cvt, element); break; \
2111
env->vscr |= (1 << VSCR_SAT); \
2114
#define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
2115
VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
2116
VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2117
#define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
2118
VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
2119
VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2120
VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
2121
VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
2122
VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
2123
VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
2124
VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
2125
VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
2126
#undef VARITHSAT_CASE
2128
#undef VARITHSAT_SIGNED
2129
#undef VARITHSAT_UNSIGNED
2131
#define VAVG_DO(name, element, etype) \
2132
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2135
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2136
etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
2137
r->element[i] = x >> 1; \
2141
#define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2142
VAVG_DO(avgs##type, signed_element, signed_type) \
2143
VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2144
VAVG(b, s8, int16_t, u8, uint16_t)
2145
VAVG(h, s16, int32_t, u16, uint32_t)
2146
VAVG(w, s32, int64_t, u32, uint64_t)
2150
#define VCF(suffix, cvt, element) \
2151
void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2154
for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2155
float32 t = cvt(b->element[i], &env->vec_status); \
2156
r->f[i] = float32_scalbn (t, -uim, &env->vec_status); \
2159
VCF(ux, uint32_to_float32, u32)
2160
VCF(sx, int32_to_float32, s32)
2163
#define VCMP_DO(suffix, compare, element, record) \
2164
void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2166
uint32_t ones = (uint32_t)-1; \
2167
uint32_t all = ones; \
2168
uint32_t none = 0; \
2170
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2171
uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2172
switch (sizeof (a->element[0])) { \
2173
case 4: r->u32[i] = result; break; \
2174
case 2: r->u16[i] = result; break; \
2175
case 1: r->u8[i] = result; break; \
2181
env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2184
#define VCMP(suffix, compare, element) \
2185
VCMP_DO(suffix, compare, element, 0) \
2186
VCMP_DO(suffix##_dot, compare, element, 1)
2199
#define VCMPFP_DO(suffix, compare, order, record) \
2200
void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2202
uint32_t ones = (uint32_t)-1; \
2203
uint32_t all = ones; \
2204
uint32_t none = 0; \
2206
for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2208
int rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); \
2209
if (rel == float_relation_unordered) { \
2211
} else if (rel compare order) { \
2216
r->u32[i] = result; \
2221
env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2224
#define VCMPFP(suffix, compare, order) \
2225
VCMPFP_DO(suffix, compare, order, 0) \
2226
VCMPFP_DO(suffix##_dot, compare, order, 1)
2227
VCMPFP(eqfp, ==, float_relation_equal)
2228
VCMPFP(gefp, !=, float_relation_less)
2229
VCMPFP(gtfp, ==, float_relation_greater)
2233
static inline void vcmpbfp_internal(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
2238
for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2239
int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
2240
if (le_rel == float_relation_unordered) {
2241
r->u32[i] = 0xc0000000;
2242
/* ALL_IN does not need to be updated here. */
2244
float32 bneg = float32_chs(b->f[i]);
2245
int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
2246
int le = le_rel != float_relation_greater;
2247
int ge = ge_rel != float_relation_less;
2248
r->u32[i] = ((!le) << 31) | ((!ge) << 30);
2249
all_in |= (!le | !ge);
2253
env->crf[6] = (all_in == 0) << 1;
2257
void helper_vcmpbfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2259
vcmpbfp_internal(r, a, b, 0);
2262
void helper_vcmpbfp_dot (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2264
vcmpbfp_internal(r, a, b, 1);
2267
#define VCT(suffix, satcvt, element) \
2268
void helper_vct##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2272
float_status s = env->vec_status; \
2273
set_float_rounding_mode(float_round_to_zero, &s); \
2274
for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2275
if (float32_is_any_nan(b->f[i])) { \
2276
r->element[i] = 0; \
2278
float64 t = float32_to_float64(b->f[i], &s); \
2280
t = float64_scalbn(t, uim, &s); \
2281
j = float64_to_int64(t, &s); \
2282
r->element[i] = satcvt(j, &sat); \
2286
env->vscr |= (1 << VSCR_SAT); \
2289
VCT(uxs, cvtsduw, u32)
2290
VCT(sxs, cvtsdsw, s32)
2293
void helper_vmaddfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2296
for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2297
HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2298
/* Need to do the computation in higher precision and round
2299
* once at the end. */
2300
float64 af, bf, cf, t;
2301
af = float32_to_float64(a->f[i], &env->vec_status);
2302
bf = float32_to_float64(b->f[i], &env->vec_status);
2303
cf = float32_to_float64(c->f[i], &env->vec_status);
2304
t = float64_mul(af, cf, &env->vec_status);
2305
t = float64_add(t, bf, &env->vec_status);
2306
r->f[i] = float64_to_float32(t, &env->vec_status);
2311
void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2316
for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2317
int32_t prod = a->s16[i] * b->s16[i];
2318
int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2319
r->s16[i] = cvtswsh (t, &sat);
2323
env->vscr |= (1 << VSCR_SAT);
2327
void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2332
for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2333
int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
2334
int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2335
r->s16[i] = cvtswsh (t, &sat);
2339
env->vscr |= (1 << VSCR_SAT);
2343
#define VMINMAX_DO(name, compare, element) \
2344
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2347
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2348
if (a->element[i] compare b->element[i]) { \
2349
r->element[i] = b->element[i]; \
2351
r->element[i] = a->element[i]; \
2355
#define VMINMAX(suffix, element) \
2356
VMINMAX_DO(min##suffix, >, element) \
2357
VMINMAX_DO(max##suffix, <, element)
2367
#define VMINMAXFP(suffix, rT, rF) \
2368
void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2371
for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2372
HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2373
if (float32_lt_quiet(a->f[i], b->f[i], &env->vec_status)) { \
2374
r->f[i] = rT->f[i]; \
2376
r->f[i] = rF->f[i]; \
2381
VMINMAXFP(minfp, a, b)
2382
VMINMAXFP(maxfp, b, a)
2385
void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2388
for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2389
int32_t prod = a->s16[i] * b->s16[i];
2390
r->s16[i] = (int16_t) (prod + c->s16[i]);
2394
#define VMRG_DO(name, element, highp) \
2395
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2399
size_t n_elems = ARRAY_SIZE(r->element); \
2400
for (i = 0; i < n_elems/2; i++) { \
2402
result.element[i*2+HI_IDX] = a->element[i]; \
2403
result.element[i*2+LO_IDX] = b->element[i]; \
2405
result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2406
result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2411
#if defined(HOST_WORDS_BIGENDIAN)
2418
#define VMRG(suffix, element) \
2419
VMRG_DO(mrgl##suffix, element, MRGHI) \
2420
VMRG_DO(mrgh##suffix, element, MRGLO)
2429
void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2434
for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2435
prod[i] = (int32_t)a->s8[i] * b->u8[i];
2438
VECTOR_FOR_INORDER_I(i, s32) {
2439
r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2443
void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2448
for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2449
prod[i] = a->s16[i] * b->s16[i];
2452
VECTOR_FOR_INORDER_I(i, s32) {
2453
r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1];
2457
void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2463
for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2464
prod[i] = (int32_t)a->s16[i] * b->s16[i];
2467
VECTOR_FOR_INORDER_I (i, s32) {
2468
int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1];
2469
r->u32[i] = cvtsdsw(t, &sat);
2473
env->vscr |= (1 << VSCR_SAT);
2477
void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2482
for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2483
prod[i] = a->u8[i] * b->u8[i];
2486
VECTOR_FOR_INORDER_I(i, u32) {
2487
r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2491
void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2496
for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2497
prod[i] = a->u16[i] * b->u16[i];
2500
VECTOR_FOR_INORDER_I(i, u32) {
2501
r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1];
2505
void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2511
for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2512
prod[i] = a->u16[i] * b->u16[i];
2515
VECTOR_FOR_INORDER_I (i, s32) {
2516
uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1];
2517
r->u32[i] = cvtuduw(t, &sat);
2521
env->vscr |= (1 << VSCR_SAT);
2525
#define VMUL_DO(name, mul_element, prod_element, evenp) \
2526
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2529
VECTOR_FOR_INORDER_I(i, prod_element) { \
2531
r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2533
r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2537
#define VMUL(suffix, mul_element, prod_element) \
2538
VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2539
VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2547
void helper_vnmsubfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2550
for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2551
HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2552
/* Need to do the computation is higher precision and round
2553
* once at the end. */
2554
float64 af, bf, cf, t;
2555
af = float32_to_float64(a->f[i], &env->vec_status);
2556
bf = float32_to_float64(b->f[i], &env->vec_status);
2557
cf = float32_to_float64(c->f[i], &env->vec_status);
2558
t = float64_mul(af, cf, &env->vec_status);
2559
t = float64_sub(t, bf, &env->vec_status);
2561
r->f[i] = float64_to_float32(t, &env->vec_status);
2566
void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2570
VECTOR_FOR_INORDER_I (i, u8) {
2571
int s = c->u8[i] & 0x1f;
2572
#if defined(HOST_WORDS_BIGENDIAN)
2573
int index = s & 0xf;
2575
int index = 15 - (s & 0xf);
2578
result.u8[i] = b->u8[index];
2580
result.u8[i] = a->u8[index];
2586
#if defined(HOST_WORDS_BIGENDIAN)
2591
void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2595
#if defined(HOST_WORDS_BIGENDIAN)
2596
const ppc_avr_t *x[2] = { a, b };
2598
const ppc_avr_t *x[2] = { b, a };
2601
VECTOR_FOR_INORDER_I (i, u64) {
2602
VECTOR_FOR_INORDER_I (j, u32){
2603
uint32_t e = x[i]->u32[j];
2604
result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
2605
((e >> 6) & 0x3e0) |
2612
#define VPK(suffix, from, to, cvt, dosat) \
2613
void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2618
ppc_avr_t *a0 = PKBIG ? a : b; \
2619
ppc_avr_t *a1 = PKBIG ? b : a; \
2620
VECTOR_FOR_INORDER_I (i, from) { \
2621
result.to[i] = cvt(a0->from[i], &sat); \
2622
result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \
2625
if (dosat && sat) { \
2626
env->vscr |= (1 << VSCR_SAT); \
2630
VPK(shss, s16, s8, cvtshsb, 1)
2631
VPK(shus, s16, u8, cvtshub, 1)
2632
VPK(swss, s32, s16, cvtswsh, 1)
2633
VPK(swus, s32, u16, cvtswuh, 1)
2634
VPK(uhus, u16, u8, cvtuhub, 1)
2635
VPK(uwus, u32, u16, cvtuwuh, 1)
2636
VPK(uhum, u16, u8, I, 0)
2637
VPK(uwum, u32, u16, I, 0)
2642
void helper_vrefp (ppc_avr_t *r, ppc_avr_t *b)
2645
for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2646
HANDLE_NAN1(r->f[i], b->f[i]) {
2647
r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
2652
#define VRFI(suffix, rounding) \
2653
void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2656
float_status s = env->vec_status; \
2657
set_float_rounding_mode(rounding, &s); \
2658
for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2659
HANDLE_NAN1(r->f[i], b->f[i]) { \
2660
r->f[i] = float32_round_to_int (b->f[i], &s); \
2664
VRFI(n, float_round_nearest_even)
2665
VRFI(m, float_round_down)
2666
VRFI(p, float_round_up)
2667
VRFI(z, float_round_to_zero)
2670
#define VROTATE(suffix, element) \
2671
void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2674
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2675
unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2676
unsigned int shift = b->element[i] & mask; \
2677
r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2685
void helper_vrsqrtefp (ppc_avr_t *r, ppc_avr_t *b)
2688
for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2689
HANDLE_NAN1(r->f[i], b->f[i]) {
2690
float32 t = float32_sqrt(b->f[i], &env->vec_status);
2691
r->f[i] = float32_div(float32_one, t, &env->vec_status);
2696
void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2698
r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2699
r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2702
void helper_vexptefp (ppc_avr_t *r, ppc_avr_t *b)
2705
for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2706
HANDLE_NAN1(r->f[i], b->f[i]) {
2707
r->f[i] = float32_exp2(b->f[i], &env->vec_status);
2712
void helper_vlogefp (ppc_avr_t *r, ppc_avr_t *b)
2715
for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2716
HANDLE_NAN1(r->f[i], b->f[i]) {
2717
r->f[i] = float32_log2(b->f[i], &env->vec_status);
2722
#if defined(HOST_WORDS_BIGENDIAN)
2729
/* The specification says that the results are undefined if all of the
2730
* shift counts are not identical. We check to make sure that they are
2731
* to conform to what real hardware appears to do. */
2732
#define VSHIFT(suffix, leftp) \
2733
void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2735
int shift = b->u8[LO_IDX*15] & 0x7; \
2738
for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \
2739
doit = doit && ((b->u8[i] & 0x7) == shift); \
2744
} else if (leftp) { \
2745
uint64_t carry = a->u64[LO_IDX] >> (64 - shift); \
2746
r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry; \
2747
r->u64[LO_IDX] = a->u64[LO_IDX] << shift; \
2749
uint64_t carry = a->u64[HI_IDX] << (64 - shift); \
2750
r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry; \
2751
r->u64[HI_IDX] = a->u64[HI_IDX] >> shift; \
2761
#define VSL(suffix, element) \
2762
void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2765
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2766
unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2767
unsigned int shift = b->element[i] & mask; \
2768
r->element[i] = a->element[i] << shift; \
2776
void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2778
int sh = shift & 0xf;
2782
#if defined(HOST_WORDS_BIGENDIAN)
2783
for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2786
result.u8[i] = b->u8[index-0x10];
2788
result.u8[i] = a->u8[index];
2792
for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2793
int index = (16 - sh) + i;
2795
result.u8[i] = a->u8[index-0x10];
2797
result.u8[i] = b->u8[index];
2804
void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2806
int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2808
#if defined (HOST_WORDS_BIGENDIAN)
2809
memmove (&r->u8[0], &a->u8[sh], 16-sh);
2810
memset (&r->u8[16-sh], 0, sh);
2812
memmove (&r->u8[sh], &a->u8[0], 16-sh);
2813
memset (&r->u8[0], 0, sh);
2817
/* Experimental testing shows that hardware masks the immediate. */
2818
#define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2819
#if defined(HOST_WORDS_BIGENDIAN)
2820
#define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2822
#define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2824
#define VSPLT(suffix, element) \
2825
void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2827
uint32_t s = b->element[SPLAT_ELEMENT(element)]; \
2829
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2830
r->element[i] = s; \
2837
#undef SPLAT_ELEMENT
2838
#undef _SPLAT_MASKED
2840
#define VSPLTI(suffix, element, splat_type) \
2841
void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat) \
2843
splat_type x = (int8_t)(splat << 3) >> 3; \
2845
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2846
r->element[i] = x; \
2849
VSPLTI(b, s8, int8_t)
2850
VSPLTI(h, s16, int16_t)
2851
VSPLTI(w, s32, int32_t)
2854
#define VSR(suffix, element) \
2855
void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2858
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2859
unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2860
unsigned int shift = b->element[i] & mask; \
2861
r->element[i] = a->element[i] >> shift; \
2872
void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2874
int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2876
#if defined (HOST_WORDS_BIGENDIAN)
2877
memmove (&r->u8[sh], &a->u8[0], 16-sh);
2878
memset (&r->u8[0], 0, sh);
2880
memmove (&r->u8[0], &a->u8[sh], 16-sh);
2881
memset (&r->u8[16-sh], 0, sh);
2885
void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2888
for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2889
r->u32[i] = a->u32[i] >= b->u32[i];
2893
void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2900
#if defined(HOST_WORDS_BIGENDIAN)
2901
upper = ARRAY_SIZE(r->s32)-1;
2905
t = (int64_t)b->s32[upper];
2906
for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2910
result.s32[upper] = cvtsdsw(t, &sat);
2914
env->vscr |= (1 << VSCR_SAT);
2918
void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2924
#if defined(HOST_WORDS_BIGENDIAN)
2929
for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
2930
int64_t t = (int64_t)b->s32[upper+i*2];
2932
for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
2935
result.s32[upper+i*2] = cvtsdsw(t, &sat);
2940
env->vscr |= (1 << VSCR_SAT);
2944
void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2949
for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2950
int64_t t = (int64_t)b->s32[i];
2951
for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
2954
r->s32[i] = cvtsdsw(t, &sat);
2958
env->vscr |= (1 << VSCR_SAT);
2962
void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2967
for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2968
int64_t t = (int64_t)b->s32[i];
2969
t += a->s16[2*i] + a->s16[2*i+1];
2970
r->s32[i] = cvtsdsw(t, &sat);
2974
env->vscr |= (1 << VSCR_SAT);
2978
void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2983
for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2984
uint64_t t = (uint64_t)b->u32[i];
2985
for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
2988
r->u32[i] = cvtuduw(t, &sat);
2992
env->vscr |= (1 << VSCR_SAT);
2996
#if defined(HOST_WORDS_BIGENDIAN)
3003
#define VUPKPX(suffix, hi) \
3004
void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
3008
for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
3009
uint16_t e = b->u16[hi ? i : i+4]; \
3010
uint8_t a = (e >> 15) ? 0xff : 0; \
3011
uint8_t r = (e >> 10) & 0x1f; \
3012
uint8_t g = (e >> 5) & 0x1f; \
3013
uint8_t b = e & 0x1f; \
3014
result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
3022
#define VUPK(suffix, unpacked, packee, hi) \
3023
void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
3028
for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \
3029
result.unpacked[i] = b->packee[i]; \
3032
for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
3033
result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
3038
VUPK(hsb, s16, s8, UPKHI)
3039
VUPK(hsh, s32, s16, UPKHI)
3040
VUPK(lsb, s16, s8, UPKLO)
3041
VUPK(lsh, s32, s16, UPKLO)
3046
#undef DO_HANDLE_NAN
3050
#undef VECTOR_FOR_INORDER_I
3054
/*****************************************************************************/
3055
/* SPE extension helpers */
3056
/* Use a table to make this quicker */
3057
static uint8_t hbrev[16] = {
3058
0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
3059
0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
3062
static inline uint8_t byte_reverse(uint8_t val)
3064
return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
3067
static inline uint32_t word_reverse(uint32_t val)
3069
return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
3070
(byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
3073
#define MASKBITS 16 // Random value - to be fixed (implementation dependent)
3074
target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
3076
uint32_t a, b, d, mask;
3078
mask = UINT32_MAX >> (32 - MASKBITS);
3081
d = word_reverse(1 + word_reverse(a | ~b));
3082
return (arg1 & ~mask) | (d & b);
3085
uint32_t helper_cntlsw32 (uint32_t val)
3087
if (val & 0x80000000)
3093
uint32_t helper_cntlzw32 (uint32_t val)
3098
/* Single-precision floating-point conversions */
3099
static inline uint32_t efscfsi(uint32_t val)
3103
u.f = int32_to_float32(val, &env->vec_status);
3108
static inline uint32_t efscfui(uint32_t val)
3112
u.f = uint32_to_float32(val, &env->vec_status);
3117
static inline int32_t efsctsi(uint32_t val)
3122
/* NaN are not treated the same way IEEE 754 does */
3123
if (unlikely(float32_is_quiet_nan(u.f)))
3126
return float32_to_int32(u.f, &env->vec_status);
3129
static inline uint32_t efsctui(uint32_t val)
3134
/* NaN are not treated the same way IEEE 754 does */
3135
if (unlikely(float32_is_quiet_nan(u.f)))
3138
return float32_to_uint32(u.f, &env->vec_status);
3141
static inline uint32_t efsctsiz(uint32_t val)
3146
/* NaN are not treated the same way IEEE 754 does */
3147
if (unlikely(float32_is_quiet_nan(u.f)))
3150
return float32_to_int32_round_to_zero(u.f, &env->vec_status);
3153
static inline uint32_t efsctuiz(uint32_t val)
3158
/* NaN are not treated the same way IEEE 754 does */
3159
if (unlikely(float32_is_quiet_nan(u.f)))
3162
return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
3165
static inline uint32_t efscfsf(uint32_t val)
3170
u.f = int32_to_float32(val, &env->vec_status);
3171
tmp = int64_to_float32(1ULL << 32, &env->vec_status);
3172
u.f = float32_div(u.f, tmp, &env->vec_status);
3177
static inline uint32_t efscfuf(uint32_t val)
3182
u.f = uint32_to_float32(val, &env->vec_status);
3183
tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3184
u.f = float32_div(u.f, tmp, &env->vec_status);
3189
static inline uint32_t efsctsf(uint32_t val)
3195
/* NaN are not treated the same way IEEE 754 does */
3196
if (unlikely(float32_is_quiet_nan(u.f)))
3198
tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3199
u.f = float32_mul(u.f, tmp, &env->vec_status);
3201
return float32_to_int32(u.f, &env->vec_status);
3204
static inline uint32_t efsctuf(uint32_t val)
3210
/* NaN are not treated the same way IEEE 754 does */
3211
if (unlikely(float32_is_quiet_nan(u.f)))
3213
tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3214
u.f = float32_mul(u.f, tmp, &env->vec_status);
3216
return float32_to_uint32(u.f, &env->vec_status);
3219
#define HELPER_SPE_SINGLE_CONV(name) \
3220
uint32_t helper_e##name (uint32_t val) \
3222
return e##name(val); \
3225
HELPER_SPE_SINGLE_CONV(fscfsi);
3227
HELPER_SPE_SINGLE_CONV(fscfui);
3229
HELPER_SPE_SINGLE_CONV(fscfuf);
3231
HELPER_SPE_SINGLE_CONV(fscfsf);
3233
HELPER_SPE_SINGLE_CONV(fsctsi);
3235
HELPER_SPE_SINGLE_CONV(fsctui);
3237
HELPER_SPE_SINGLE_CONV(fsctsiz);
3239
HELPER_SPE_SINGLE_CONV(fsctuiz);
3241
HELPER_SPE_SINGLE_CONV(fsctsf);
3243
HELPER_SPE_SINGLE_CONV(fsctuf);
3245
#define HELPER_SPE_VECTOR_CONV(name) \
3246
uint64_t helper_ev##name (uint64_t val) \
3248
return ((uint64_t)e##name(val >> 32) << 32) | \
3249
(uint64_t)e##name(val); \
3252
HELPER_SPE_VECTOR_CONV(fscfsi);
3254
HELPER_SPE_VECTOR_CONV(fscfui);
3256
HELPER_SPE_VECTOR_CONV(fscfuf);
3258
HELPER_SPE_VECTOR_CONV(fscfsf);
3260
HELPER_SPE_VECTOR_CONV(fsctsi);
3262
HELPER_SPE_VECTOR_CONV(fsctui);
3264
HELPER_SPE_VECTOR_CONV(fsctsiz);
3266
HELPER_SPE_VECTOR_CONV(fsctuiz);
3268
HELPER_SPE_VECTOR_CONV(fsctsf);
3270
HELPER_SPE_VECTOR_CONV(fsctuf);
3272
/* Single-precision floating-point arithmetic */
3273
static inline uint32_t efsadd(uint32_t op1, uint32_t op2)
3278
u1.f = float32_add(u1.f, u2.f, &env->vec_status);
3282
static inline uint32_t efssub(uint32_t op1, uint32_t op2)
3287
u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
3291
static inline uint32_t efsmul(uint32_t op1, uint32_t op2)
3296
u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
3300
static inline uint32_t efsdiv(uint32_t op1, uint32_t op2)
3305
u1.f = float32_div(u1.f, u2.f, &env->vec_status);
3309
#define HELPER_SPE_SINGLE_ARITH(name) \
3310
uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3312
return e##name(op1, op2); \
3315
HELPER_SPE_SINGLE_ARITH(fsadd);
3317
HELPER_SPE_SINGLE_ARITH(fssub);
3319
HELPER_SPE_SINGLE_ARITH(fsmul);
3321
HELPER_SPE_SINGLE_ARITH(fsdiv);
3323
#define HELPER_SPE_VECTOR_ARITH(name) \
3324
uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \
3326
return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
3327
(uint64_t)e##name(op1, op2); \
3330
HELPER_SPE_VECTOR_ARITH(fsadd);
3332
HELPER_SPE_VECTOR_ARITH(fssub);
3334
HELPER_SPE_VECTOR_ARITH(fsmul);
3336
HELPER_SPE_VECTOR_ARITH(fsdiv);
3338
/* Single-precision floating-point comparisons */
3339
static inline uint32_t efscmplt(uint32_t op1, uint32_t op2)
3344
return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3347
static inline uint32_t efscmpgt(uint32_t op1, uint32_t op2)
3352
return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
3355
static inline uint32_t efscmpeq(uint32_t op1, uint32_t op2)
3360
return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3363
static inline uint32_t efststlt(uint32_t op1, uint32_t op2)
3365
/* XXX: TODO: ignore special values (NaN, infinites, ...) */
3366
return efscmplt(op1, op2);
3369
static inline uint32_t efststgt(uint32_t op1, uint32_t op2)
3371
/* XXX: TODO: ignore special values (NaN, infinites, ...) */
3372
return efscmpgt(op1, op2);
3375
static inline uint32_t efststeq(uint32_t op1, uint32_t op2)
3377
/* XXX: TODO: ignore special values (NaN, infinites, ...) */
3378
return efscmpeq(op1, op2);
3381
#define HELPER_SINGLE_SPE_CMP(name) \
3382
uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3384
return e##name(op1, op2) << 2; \
3387
HELPER_SINGLE_SPE_CMP(fststlt);
3389
HELPER_SINGLE_SPE_CMP(fststgt);
3391
HELPER_SINGLE_SPE_CMP(fststeq);
3393
HELPER_SINGLE_SPE_CMP(fscmplt);
3395
HELPER_SINGLE_SPE_CMP(fscmpgt);
3397
HELPER_SINGLE_SPE_CMP(fscmpeq);
3399
static inline uint32_t evcmp_merge(int t0, int t1)
3401
return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
3404
#define HELPER_VECTOR_SPE_CMP(name) \
3405
uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \
3407
return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
3410
HELPER_VECTOR_SPE_CMP(fststlt);
3412
HELPER_VECTOR_SPE_CMP(fststgt);
3414
HELPER_VECTOR_SPE_CMP(fststeq);
3416
HELPER_VECTOR_SPE_CMP(fscmplt);
3418
HELPER_VECTOR_SPE_CMP(fscmpgt);
3420
HELPER_VECTOR_SPE_CMP(fscmpeq);
3422
/* Double-precision floating-point conversion */
3423
uint64_t helper_efdcfsi (uint32_t val)
3427
u.d = int32_to_float64(val, &env->vec_status);
3432
uint64_t helper_efdcfsid (uint64_t val)
3436
u.d = int64_to_float64(val, &env->vec_status);
3441
uint64_t helper_efdcfui (uint32_t val)
3445
u.d = uint32_to_float64(val, &env->vec_status);
3450
uint64_t helper_efdcfuid (uint64_t val)
3454
u.d = uint64_to_float64(val, &env->vec_status);
3459
uint32_t helper_efdctsi (uint64_t val)
3464
/* NaN are not treated the same way IEEE 754 does */
3465
if (unlikely(float64_is_any_nan(u.d))) {
3469
return float64_to_int32(u.d, &env->vec_status);
3472
uint32_t helper_efdctui (uint64_t val)
3477
/* NaN are not treated the same way IEEE 754 does */
3478
if (unlikely(float64_is_any_nan(u.d))) {
3482
return float64_to_uint32(u.d, &env->vec_status);
3485
uint32_t helper_efdctsiz (uint64_t val)
3490
/* NaN are not treated the same way IEEE 754 does */
3491
if (unlikely(float64_is_any_nan(u.d))) {
3495
return float64_to_int32_round_to_zero(u.d, &env->vec_status);
3498
uint64_t helper_efdctsidz (uint64_t val)
3503
/* NaN are not treated the same way IEEE 754 does */
3504
if (unlikely(float64_is_any_nan(u.d))) {
3508
return float64_to_int64_round_to_zero(u.d, &env->vec_status);
3511
uint32_t helper_efdctuiz (uint64_t val)
3516
/* NaN are not treated the same way IEEE 754 does */
3517
if (unlikely(float64_is_any_nan(u.d))) {
3521
return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
3524
uint64_t helper_efdctuidz (uint64_t val)
3529
/* NaN are not treated the same way IEEE 754 does */
3530
if (unlikely(float64_is_any_nan(u.d))) {
3534
return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
3537
uint64_t helper_efdcfsf (uint32_t val)
3542
u.d = int32_to_float64(val, &env->vec_status);
3543
tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3544
u.d = float64_div(u.d, tmp, &env->vec_status);
3549
uint64_t helper_efdcfuf (uint32_t val)
3554
u.d = uint32_to_float64(val, &env->vec_status);
3555
tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3556
u.d = float64_div(u.d, tmp, &env->vec_status);
3561
uint32_t helper_efdctsf (uint64_t val)
3567
/* NaN are not treated the same way IEEE 754 does */
3568
if (unlikely(float64_is_any_nan(u.d))) {
3571
tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3572
u.d = float64_mul(u.d, tmp, &env->vec_status);
3574
return float64_to_int32(u.d, &env->vec_status);
3577
uint32_t helper_efdctuf (uint64_t val)
3583
/* NaN are not treated the same way IEEE 754 does */
3584
if (unlikely(float64_is_any_nan(u.d))) {
3587
tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3588
u.d = float64_mul(u.d, tmp, &env->vec_status);
3590
return float64_to_uint32(u.d, &env->vec_status);
3593
uint32_t helper_efscfd (uint64_t val)
3599
u2.f = float64_to_float32(u1.d, &env->vec_status);
3604
uint64_t helper_efdcfs (uint32_t val)
3610
u2.d = float32_to_float64(u1.f, &env->vec_status);
3615
/* Double precision fixed-point arithmetic */
3616
uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
3621
u1.d = float64_add(u1.d, u2.d, &env->vec_status);
3625
uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
3630
u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
3634
uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
3639
u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
3643
uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
3648
u1.d = float64_div(u1.d, u2.d, &env->vec_status);
3652
/* Double precision floating point helpers */
3653
uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
3658
return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3661
uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
3666
return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
3669
uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
3674
return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3677
uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
3679
/* XXX: TODO: test special values (NaN, infinites, ...) */
3680
return helper_efdtstlt(op1, op2);
3683
uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
3685
/* XXX: TODO: test special values (NaN, infinites, ...) */
3686
return helper_efdtstgt(op1, op2);
3689
uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
3691
/* XXX: TODO: test special values (NaN, infinites, ...) */
3692
return helper_efdtsteq(op1, op2);
3695
/*****************************************************************************/
3696
/* Softmmu support */
3697
#if !defined (CONFIG_USER_ONLY)
3699
#define MMUSUFFIX _mmu
3702
#include "softmmu_template.h"
3705
#include "softmmu_template.h"
3708
#include "softmmu_template.h"
3711
#include "softmmu_template.h"
3713
/* try to fill the TLB and return an exception if error. If retaddr is
3714
NULL, it means that the function was called in C code (i.e. not
3715
from generated code or from helper.c) */
3716
/* XXX: fix it to restore all registers */
3717
void tlb_fill(CPUState *env1, target_ulong addr, int is_write, int mmu_idx,
3720
TranslationBlock *tb;
3721
CPUState *saved_env;
3727
ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx);
3728
if (unlikely(ret != 0)) {
3729
if (likely(retaddr)) {
3730
/* now we have a real cpu fault */
3731
pc = (unsigned long)retaddr;
3732
tb = tb_find_pc(pc);
3734
/* the PC is inside the translated code. It means that we have
3735
a virtual CPU fault */
3736
cpu_restore_state(tb, env, pc);
3739
helper_raise_exception_err(env->exception_index, env->error_code);
3744
/* Segment registers load and store */
3745
target_ulong helper_load_sr (target_ulong sr_num)
3747
#if defined(TARGET_PPC64)
3748
if (env->mmu_model & POWERPC_MMU_64)
3749
return ppc_load_sr(env, sr_num);
3751
return env->sr[sr_num];
3754
void helper_store_sr (target_ulong sr_num, target_ulong val)
3756
ppc_store_sr(env, sr_num, val);
3759
/* SLB management */
3760
#if defined(TARGET_PPC64)
3761
void helper_store_slb (target_ulong rb, target_ulong rs)
3763
if (ppc_store_slb(env, rb, rs) < 0) {
3764
helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3768
target_ulong helper_load_slb_esid (target_ulong rb)
3772
if (ppc_load_slb_esid(env, rb, &rt) < 0) {
3773
helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3778
target_ulong helper_load_slb_vsid (target_ulong rb)
3782
if (ppc_load_slb_vsid(env, rb, &rt) < 0) {
3783
helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3788
void helper_slbia (void)
3790
ppc_slb_invalidate_all(env);
3793
void helper_slbie (target_ulong addr)
3795
ppc_slb_invalidate_one(env, addr);
3798
#endif /* defined(TARGET_PPC64) */
3800
/* TLB management */
3801
void helper_tlbia (void)
3803
ppc_tlb_invalidate_all(env);
3806
void helper_tlbie (target_ulong addr)
3808
ppc_tlb_invalidate_one(env, addr);
3811
/* Software driven TLBs management */
3812
/* PowerPC 602/603 software TLB load instructions helpers */
3813
static void do_6xx_tlb (target_ulong new_EPN, int is_code)
3815
target_ulong RPN, CMP, EPN;
3818
RPN = env->spr[SPR_RPA];
3820
CMP = env->spr[SPR_ICMP];
3821
EPN = env->spr[SPR_IMISS];
3823
CMP = env->spr[SPR_DCMP];
3824
EPN = env->spr[SPR_DMISS];
3826
way = (env->spr[SPR_SRR1] >> 17) & 1;
3827
(void)EPN; /* avoid a compiler warning */
3828
LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3829
" PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3831
/* Store this TLB */
3832
ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3833
way, is_code, CMP, RPN);
3836
void helper_6xx_tlbd (target_ulong EPN)
3841
void helper_6xx_tlbi (target_ulong EPN)
3846
/* PowerPC 74xx software TLB load instructions helpers */
3847
static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3849
target_ulong RPN, CMP, EPN;
3852
RPN = env->spr[SPR_PTELO];
3853
CMP = env->spr[SPR_PTEHI];
3854
EPN = env->spr[SPR_TLBMISS] & ~0x3;
3855
way = env->spr[SPR_TLBMISS] & 0x3;
3856
(void)EPN; /* avoid a compiler warning */
3857
LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3858
" PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3860
/* Store this TLB */
3861
ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3862
way, is_code, CMP, RPN);
3865
void helper_74xx_tlbd (target_ulong EPN)
3867
do_74xx_tlb(EPN, 0);
3870
void helper_74xx_tlbi (target_ulong EPN)
3872
do_74xx_tlb(EPN, 1);
3875
static inline target_ulong booke_tlb_to_page_size(int size)
3877
return 1024 << (2 * size);
3880
static inline int booke_page_size_to_tlb(target_ulong page_size)
3884
switch (page_size) {
3918
#if defined (TARGET_PPC64)
3919
case 0x000100000000ULL:
3922
case 0x000400000000ULL:
3925
case 0x001000000000ULL:
3928
case 0x004000000000ULL:
3931
case 0x010000000000ULL:
3943
/* Helpers for 4xx TLB management */
3944
#define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
3946
#define PPC4XX_TLBHI_V 0x00000040
3947
#define PPC4XX_TLBHI_E 0x00000020
3948
#define PPC4XX_TLBHI_SIZE_MIN 0
3949
#define PPC4XX_TLBHI_SIZE_MAX 7
3950
#define PPC4XX_TLBHI_SIZE_DEFAULT 1
3951
#define PPC4XX_TLBHI_SIZE_SHIFT 7
3952
#define PPC4XX_TLBHI_SIZE_MASK 0x00000007
3954
#define PPC4XX_TLBLO_EX 0x00000200
3955
#define PPC4XX_TLBLO_WR 0x00000100
3956
#define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
3957
#define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
3959
target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3965
entry &= PPC4XX_TLB_ENTRY_MASK;
3966
tlb = &env->tlb.tlbe[entry];
3968
if (tlb->prot & PAGE_VALID) {
3969
ret |= PPC4XX_TLBHI_V;
3971
size = booke_page_size_to_tlb(tlb->size);
3972
if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
3973
size = PPC4XX_TLBHI_SIZE_DEFAULT;
3975
ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
3976
env->spr[SPR_40x_PID] = tlb->PID;
3980
target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3985
entry &= PPC4XX_TLB_ENTRY_MASK;
3986
tlb = &env->tlb.tlbe[entry];
3988
if (tlb->prot & PAGE_EXEC) {
3989
ret |= PPC4XX_TLBLO_EX;
3991
if (tlb->prot & PAGE_WRITE) {
3992
ret |= PPC4XX_TLBLO_WR;
3997
void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
4000
target_ulong page, end;
4002
LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry,
4004
entry &= PPC4XX_TLB_ENTRY_MASK;
4005
tlb = &env->tlb.tlbe[entry];
4006
/* Invalidate previous TLB (if it's valid) */
4007
if (tlb->prot & PAGE_VALID) {
4008
end = tlb->EPN + tlb->size;
4009
LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end "
4010
TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
4011
for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
4012
tlb_flush_page(env, page);
4015
tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
4016
& PPC4XX_TLBHI_SIZE_MASK);
4017
/* We cannot handle TLB size < TARGET_PAGE_SIZE.
4018
* If this ever occurs, one should use the ppcemb target instead
4019
* of the ppc or ppc64 one
4021
if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
4022
cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
4023
"are not supported (%d)\n",
4024
tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
4026
tlb->EPN = val & ~(tlb->size - 1);
4027
if (val & PPC4XX_TLBHI_V) {
4028
tlb->prot |= PAGE_VALID;
4029
if (val & PPC4XX_TLBHI_E) {
4030
/* XXX: TO BE FIXED */
4032
"Little-endian TLB entries are not supported by now\n");
4035
tlb->prot &= ~PAGE_VALID;
4037
tlb->PID = env->spr[SPR_40x_PID]; /* PID */
4038
LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4039
" size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4040
(int)entry, tlb->RPN, tlb->EPN, tlb->size,
4041
tlb->prot & PAGE_READ ? 'r' : '-',
4042
tlb->prot & PAGE_WRITE ? 'w' : '-',
4043
tlb->prot & PAGE_EXEC ? 'x' : '-',
4044
tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4045
/* Invalidate new TLB (if valid) */
4046
if (tlb->prot & PAGE_VALID) {
4047
end = tlb->EPN + tlb->size;
4048
LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end "
4049
TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
4050
for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
4051
tlb_flush_page(env, page);
4056
void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
4060
LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry,
4062
entry &= PPC4XX_TLB_ENTRY_MASK;
4063
tlb = &env->tlb.tlbe[entry];
4064
tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
4065
tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
4066
tlb->prot = PAGE_READ;
4067
if (val & PPC4XX_TLBLO_EX) {
4068
tlb->prot |= PAGE_EXEC;
4070
if (val & PPC4XX_TLBLO_WR) {
4071
tlb->prot |= PAGE_WRITE;
4073
LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4074
" size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4075
(int)entry, tlb->RPN, tlb->EPN, tlb->size,
4076
tlb->prot & PAGE_READ ? 'r' : '-',
4077
tlb->prot & PAGE_WRITE ? 'w' : '-',
4078
tlb->prot & PAGE_EXEC ? 'x' : '-',
4079
tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4082
target_ulong helper_4xx_tlbsx (target_ulong address)
4084
return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
4087
/* PowerPC 440 TLB management */
4088
void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
4091
target_ulong EPN, RPN, size;
4094
LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n",
4095
__func__, word, (int)entry, value);
4098
tlb = &env->tlb.tlbe[entry];
4101
/* Just here to please gcc */
4103
EPN = value & 0xFFFFFC00;
4104
if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
4107
size = booke_tlb_to_page_size((value >> 4) & 0xF);
4108
if ((tlb->prot & PAGE_VALID) && tlb->size < size)
4112
tlb->attr |= (value >> 8) & 1;
4113
if (value & 0x200) {
4114
tlb->prot |= PAGE_VALID;
4116
if (tlb->prot & PAGE_VALID) {
4117
tlb->prot &= ~PAGE_VALID;
4121
tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
4126
RPN = value & 0xFFFFFC0F;
4127
if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
4132
tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
4133
tlb->prot = tlb->prot & PAGE_VALID;
4135
tlb->prot |= PAGE_READ << 4;
4137
tlb->prot |= PAGE_WRITE << 4;
4139
tlb->prot |= PAGE_EXEC << 4;
4141
tlb->prot |= PAGE_READ;
4143
tlb->prot |= PAGE_WRITE;
4145
tlb->prot |= PAGE_EXEC;
4150
target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
4157
tlb = &env->tlb.tlbe[entry];
4160
/* Just here to please gcc */
4163
size = booke_page_size_to_tlb(tlb->size);
4164
if (size < 0 || size > 0xF)
4167
if (tlb->attr & 0x1)
4169
if (tlb->prot & PAGE_VALID)
4171
env->spr[SPR_440_MMUCR] &= ~0x000000FF;
4172
env->spr[SPR_440_MMUCR] |= tlb->PID;
4178
ret = tlb->attr & ~0x1;
4179
if (tlb->prot & (PAGE_READ << 4))
4181
if (tlb->prot & (PAGE_WRITE << 4))
4183
if (tlb->prot & (PAGE_EXEC << 4))
4185
if (tlb->prot & PAGE_READ)
4187
if (tlb->prot & PAGE_WRITE)
4189
if (tlb->prot & PAGE_EXEC)
4196
target_ulong helper_440_tlbsx (target_ulong address)
4198
return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
4201
/* PowerPC BookE 2.06 TLB management */
4203
static ppcmas_tlb_t *booke206_cur_tlb(CPUState *env)
4205
uint32_t tlbncfg = 0;
4206
int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT;
4207
int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK);
4210
tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
4211
tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb];
4213
if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) {
4214
cpu_abort(env, "we don't support HES yet\n");
4217
return booke206_get_tlbm(env, tlb, ea, esel);
4220
void helper_booke_setpid(uint32_t pidn, target_ulong pid)
4222
env->spr[pidn] = pid;
4223
/* changing PIDs mean we're in a different address space now */
4227
void helper_booke206_tlbwe(void)
4229
uint32_t tlbncfg, tlbn;
4231
uint32_t size_tlb, size_ps;
4233
switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) {
4234
case MAS0_WQ_ALWAYS:
4235
/* good to go, write that entry */
4238
/* XXX check if reserved */
4243
case MAS0_WQ_CLR_RSRV:
4244
/* XXX clear entry */
4247
/* no idea what to do */
4251
if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) &&
4253
/* XXX we don't support direct LRAT setting yet */
4254
fprintf(stderr, "cpu: don't support LRAT setting yet\n");
4258
tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
4259
tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn];
4261
tlb = booke206_cur_tlb(env);
4264
helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
4265
POWERPC_EXCP_INVAL |
4266
POWERPC_EXCP_INVAL_INVAL);
4269
/* check that we support the targeted size */
4270
size_tlb = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
4271
size_ps = booke206_tlbnps(env, tlbn);
4272
if ((env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) && (tlbncfg & TLBnCFG_AVAIL) &&
4273
!(size_ps & (1 << size_tlb))) {
4274
helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
4275
POWERPC_EXCP_INVAL |
4276
POWERPC_EXCP_INVAL_INVAL);
4280
cpu_abort(env, "missing HV implementation\n");
4282
tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) |
4283
env->spr[SPR_BOOKE_MAS3];
4284
tlb->mas1 = env->spr[SPR_BOOKE_MAS1];
4287
if (!(tlbncfg & TLBnCFG_AVAIL)) {
4288
/* force !AVAIL TLB entries to correct page size */
4289
tlb->mas1 &= ~MAS1_TSIZE_MASK;
4290
/* XXX can be configured in MMUCSR0 */
4291
tlb->mas1 |= (tlbncfg & TLBnCFG_MINSIZE) >> 12;
4294
/* XXX needs to change when supporting 64-bit e500 */
4295
tlb->mas2 = env->spr[SPR_BOOKE_MAS2] & 0xffffffff;
4297
if (!(tlbncfg & TLBnCFG_IPROT)) {
4298
/* no IPROT supported by TLB */
4299
tlb->mas1 &= ~MAS1_IPROT;
4302
if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) {
4303
tlb_flush_page(env, tlb->mas2 & MAS2_EPN_MASK);
4309
static inline void booke206_tlb_to_mas(CPUState *env, ppcmas_tlb_t *tlb)
4311
int tlbn = booke206_tlbm_to_tlbn(env, tlb);
4312
int way = booke206_tlbm_to_way(env, tlb);
4314
env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT;
4315
env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT;
4316
env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
4318
env->spr[SPR_BOOKE_MAS1] = tlb->mas1;
4319
env->spr[SPR_BOOKE_MAS2] = tlb->mas2;
4320
env->spr[SPR_BOOKE_MAS3] = tlb->mas7_3;
4321
env->spr[SPR_BOOKE_MAS7] = tlb->mas7_3 >> 32;
4324
void helper_booke206_tlbre(void)
4326
ppcmas_tlb_t *tlb = NULL;
4328
tlb = booke206_cur_tlb(env);
4330
env->spr[SPR_BOOKE_MAS1] = 0;
4332
booke206_tlb_to_mas(env, tlb);
4336
void helper_booke206_tlbsx(target_ulong address)
4338
ppcmas_tlb_t *tlb = NULL;
4340
target_phys_addr_t raddr;
4343
spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT;
4344
sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS;
4346
for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
4347
int ways = booke206_tlb_ways(env, i);
4349
for (j = 0; j < ways; j++) {
4350
tlb = booke206_get_tlbm(env, i, address, j);
4356
if (ppcmas_tlb_check(env, tlb, &raddr, address, spid)) {
4360
if (sas != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
4364
booke206_tlb_to_mas(env, tlb);
4369
/* no entry found, fill with defaults */
4370
env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
4371
env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
4372
env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK;
4373
env->spr[SPR_BOOKE_MAS3] = 0;
4374
env->spr[SPR_BOOKE_MAS7] = 0;
4376
if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) {
4377
env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
4380
env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16)
4383
/* next victim logic */
4384
env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT;
4386
env->last_way &= booke206_tlb_ways(env, 0) - 1;
4387
env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
4390
static inline void booke206_invalidate_ea_tlb(CPUState *env, int tlbn,
4394
int ways = booke206_tlb_ways(env, tlbn);
4397
for (i = 0; i < ways; i++) {
4398
ppcmas_tlb_t *tlb = booke206_get_tlbm(env, tlbn, ea, i);
4402
mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
4403
if (((tlb->mas2 & MAS2_EPN_MASK) == (ea & mask)) &&
4404
!(tlb->mas1 & MAS1_IPROT)) {
4405
tlb->mas1 &= ~MAS1_VALID;
4410
void helper_booke206_tlbivax(target_ulong address)
4412
if (address & 0x4) {
4413
/* flush all entries */
4414
if (address & 0x8) {
4415
/* flush all of TLB1 */
4416
booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1);
4418
/* flush all of TLB0 */
4419
booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0);
4424
if (address & 0x8) {
4425
/* flush TLB1 entries */
4426
booke206_invalidate_ea_tlb(env, 1, address);
4429
/* flush TLB0 entries */
4430
booke206_invalidate_ea_tlb(env, 0, address);
4431
tlb_flush_page(env, address & MAS2_EPN_MASK);
4435
void helper_booke206_tlbilx0(target_ulong address)
4437
/* XXX missing LPID handling */
4438
booke206_flush_tlb(env, -1, 1);
4441
void helper_booke206_tlbilx1(target_ulong address)
4444
int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
4445
ppcmas_tlb_t *tlb = env->tlb.tlbm;
4448
/* XXX missing LPID handling */
4449
for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
4450
tlb_size = booke206_tlb_size(env, i);
4451
for (j = 0; j < tlb_size; j++) {
4452
if (!(tlb[j].mas1 & MAS1_IPROT) &&
4453
((tlb[j].mas1 & MAS1_TID_MASK) == tid)) {
4454
tlb[j].mas1 &= ~MAS1_VALID;
4457
tlb += booke206_tlb_size(env, i);
4462
void helper_booke206_tlbilx3(target_ulong address)
4466
int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
4467
int pid = tid >> MAS6_SPID_SHIFT;
4468
int sgs = env->spr[SPR_BOOKE_MAS5] & MAS5_SGS;
4469
int ind = (env->spr[SPR_BOOKE_MAS6] & MAS6_SIND) ? MAS1_IND : 0;
4470
/* XXX check for unsupported isize and raise an invalid opcode then */
4471
int size = env->spr[SPR_BOOKE_MAS6] & MAS6_ISIZE_MASK;
4472
/* XXX implement MAV2 handling */
4475
/* XXX missing LPID handling */
4476
/* flush by pid and ea */
4477
for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
4478
int ways = booke206_tlb_ways(env, i);
4480
for (j = 0; j < ways; j++) {
4481
tlb = booke206_get_tlbm(env, i, address, j);
4485
if ((ppcmas_tlb_check(env, tlb, NULL, address, pid) != 0) ||
4486
(tlb->mas1 & MAS1_IPROT) ||
4487
((tlb->mas1 & MAS1_IND) != ind) ||
4488
((tlb->mas8 & MAS8_TGS) != sgs)) {
4491
if (mav2 && ((tlb->mas1 & MAS1_TSIZE_MASK) != size)) {
4492
/* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */
4495
/* XXX e500mc doesn't match SAS, but other cores might */
4496
tlb->mas1 &= ~MAS1_VALID;
4502
void helper_booke206_tlbflush(uint32_t type)
4507
flags |= BOOKE206_FLUSH_TLB1;
4511
flags |= BOOKE206_FLUSH_TLB0;
4514
booke206_flush_tlb(env, flags, 1);
4517
/* Embedded.Processor Control */
4518
static int dbell2irq(target_ulong rb)
4520
int msg = rb & DBELL_TYPE_MASK;
4524
case DBELL_TYPE_DBELL:
4525
irq = PPC_INTERRUPT_DOORBELL;
4527
case DBELL_TYPE_DBELL_CRIT:
4528
irq = PPC_INTERRUPT_CDOORBELL;
4530
case DBELL_TYPE_G_DBELL:
4531
case DBELL_TYPE_G_DBELL_CRIT:
4532
case DBELL_TYPE_G_DBELL_MC:
4541
void helper_msgclr(target_ulong rb)
4543
int irq = dbell2irq(rb);
4549
env->pending_interrupts &= ~(1 << irq);
4552
void helper_msgsnd(target_ulong rb)
4554
int irq = dbell2irq(rb);
4555
int pir = rb & DBELL_PIRTAG_MASK;
4562
for (cenv = first_cpu; cenv != NULL; cenv = cenv->next_cpu) {
4563
if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
4564
cenv->pending_interrupts |= 1 << irq;
4565
cpu_interrupt(cenv, CPU_INTERRUPT_HARD);
4570
#endif /* !CONFIG_USER_ONLY */