2
* PowerPC emulation helpers for qemu.
4
* Copyright (c) 2003-2007 Jocelyn Mayer
6
* This library is free software; you can redistribute it and/or
7
* modify it under the terms of the GNU Lesser General Public
8
* License as published by the Free Software Foundation; either
9
* version 2 of the License, or (at your option) any later version.
11
* This library is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14
* Lesser General Public License for more details.
16
* You should have received a copy of the GNU Lesser General Public
17
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
21
#include "host-utils.h"
24
#include "helper_regs.h"
27
//#define DEBUG_EXCEPTIONS
28
//#define DEBUG_SOFTWARE_TLB
30
#ifdef DEBUG_SOFTWARE_TLB
31
# define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
33
# define LOG_SWTLB(...) do { } while (0)
37
/*****************************************************************************/
38
/* Exceptions processing helpers */
40
void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
43
printf("Raise exception %3x code : %d\n", exception, error_code);
45
env->exception_index = exception;
46
env->error_code = error_code;
50
void helper_raise_exception (uint32_t exception)
52
helper_raise_exception_err(exception, 0);
55
/*****************************************************************************/
57
void helper_load_dump_spr (uint32_t sprn)
59
qemu_log("Read SPR %d %03x => " TARGET_FMT_lx "\n", sprn, sprn,
63
void helper_store_dump_spr (uint32_t sprn)
65
qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx "\n", sprn, sprn,
69
target_ulong helper_load_tbl (void)
71
return (target_ulong)cpu_ppc_load_tbl(env);
74
target_ulong helper_load_tbu (void)
76
return cpu_ppc_load_tbu(env);
79
target_ulong helper_load_atbl (void)
81
return (target_ulong)cpu_ppc_load_atbl(env);
84
target_ulong helper_load_atbu (void)
86
return cpu_ppc_load_atbu(env);
89
#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
90
target_ulong helper_load_purr (void)
92
return (target_ulong)cpu_ppc_load_purr(env);
96
target_ulong helper_load_601_rtcl (void)
98
return cpu_ppc601_load_rtcl(env);
101
target_ulong helper_load_601_rtcu (void)
103
return cpu_ppc601_load_rtcu(env);
106
#if !defined(CONFIG_USER_ONLY)
107
#if defined (TARGET_PPC64)
108
void helper_store_asr (target_ulong val)
110
ppc_store_asr(env, val);
114
void helper_store_sdr1 (target_ulong val)
116
ppc_store_sdr1(env, val);
119
void helper_store_tbl (target_ulong val)
121
cpu_ppc_store_tbl(env, val);
124
void helper_store_tbu (target_ulong val)
126
cpu_ppc_store_tbu(env, val);
129
void helper_store_atbl (target_ulong val)
131
cpu_ppc_store_atbl(env, val);
134
void helper_store_atbu (target_ulong val)
136
cpu_ppc_store_atbu(env, val);
139
void helper_store_601_rtcl (target_ulong val)
141
cpu_ppc601_store_rtcl(env, val);
144
void helper_store_601_rtcu (target_ulong val)
146
cpu_ppc601_store_rtcu(env, val);
149
target_ulong helper_load_decr (void)
151
return cpu_ppc_load_decr(env);
154
void helper_store_decr (target_ulong val)
156
cpu_ppc_store_decr(env, val);
159
void helper_store_hid0_601 (target_ulong val)
163
hid0 = env->spr[SPR_HID0];
164
if ((val ^ hid0) & 0x00000008) {
165
/* Change current endianness */
166
env->hflags &= ~(1 << MSR_LE);
167
env->hflags_nmsr &= ~(1 << MSR_LE);
168
env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
169
env->hflags |= env->hflags_nmsr;
170
qemu_log("%s: set endianness to %c => " TARGET_FMT_lx "\n", __func__,
171
val & 0x8 ? 'l' : 'b', env->hflags);
173
env->spr[SPR_HID0] = (uint32_t)val;
176
void helper_store_403_pbr (uint32_t num, target_ulong value)
178
if (likely(env->pb[num] != value)) {
179
env->pb[num] = value;
180
/* Should be optimized */
185
target_ulong helper_load_40x_pit (void)
187
return load_40x_pit(env);
190
void helper_store_40x_pit (target_ulong val)
192
store_40x_pit(env, val);
195
void helper_store_40x_dbcr0 (target_ulong val)
197
store_40x_dbcr0(env, val);
200
void helper_store_40x_sler (target_ulong val)
202
store_40x_sler(env, val);
205
void helper_store_booke_tcr (target_ulong val)
207
store_booke_tcr(env, val);
210
void helper_store_booke_tsr (target_ulong val)
212
store_booke_tsr(env, val);
215
void helper_store_ibatu (uint32_t nr, target_ulong val)
217
ppc_store_ibatu(env, nr, val);
220
void helper_store_ibatl (uint32_t nr, target_ulong val)
222
ppc_store_ibatl(env, nr, val);
225
void helper_store_dbatu (uint32_t nr, target_ulong val)
227
ppc_store_dbatu(env, nr, val);
230
void helper_store_dbatl (uint32_t nr, target_ulong val)
232
ppc_store_dbatl(env, nr, val);
235
void helper_store_601_batl (uint32_t nr, target_ulong val)
237
ppc_store_ibatl_601(env, nr, val);
240
void helper_store_601_batu (uint32_t nr, target_ulong val)
242
ppc_store_ibatu_601(env, nr, val);
246
/*****************************************************************************/
247
/* Memory load and stores */
249
static inline target_ulong addr_add(target_ulong addr, target_long arg)
251
#if defined(TARGET_PPC64)
253
return (uint32_t)(addr + arg);
259
void helper_lmw (target_ulong addr, uint32_t reg)
261
for (; reg < 32; reg++) {
263
env->gpr[reg] = bswap32(ldl(addr));
265
env->gpr[reg] = ldl(addr);
266
addr = addr_add(addr, 4);
270
void helper_stmw (target_ulong addr, uint32_t reg)
272
for (; reg < 32; reg++) {
274
stl(addr, bswap32((uint32_t)env->gpr[reg]));
276
stl(addr, (uint32_t)env->gpr[reg]);
277
addr = addr_add(addr, 4);
281
void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
284
for (; nb > 3; nb -= 4) {
285
env->gpr[reg] = ldl(addr);
286
reg = (reg + 1) % 32;
287
addr = addr_add(addr, 4);
289
if (unlikely(nb > 0)) {
291
for (sh = 24; nb > 0; nb--, sh -= 8) {
292
env->gpr[reg] |= ldub(addr) << sh;
293
addr = addr_add(addr, 1);
297
/* PPC32 specification says we must generate an exception if
298
* rA is in the range of registers to be loaded.
299
* In an other hand, IBM says this is valid, but rA won't be loaded.
300
* For now, I'll follow the spec...
302
void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
304
if (likely(xer_bc != 0)) {
305
if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
306
(reg < rb && (reg + xer_bc) > rb))) {
307
helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
309
POWERPC_EXCP_INVAL_LSWX);
311
helper_lsw(addr, xer_bc, reg);
316
void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
319
for (; nb > 3; nb -= 4) {
320
stl(addr, env->gpr[reg]);
321
reg = (reg + 1) % 32;
322
addr = addr_add(addr, 4);
324
if (unlikely(nb > 0)) {
325
for (sh = 24; nb > 0; nb--, sh -= 8) {
326
stb(addr, (env->gpr[reg] >> sh) & 0xFF);
327
addr = addr_add(addr, 1);
332
static void do_dcbz(target_ulong addr, int dcache_line_size)
334
addr &= ~(dcache_line_size - 1);
336
for (i = 0 ; i < dcache_line_size ; i += 4) {
339
if (env->reserve_addr == addr)
340
env->reserve_addr = (target_ulong)-1ULL;
343
void helper_dcbz(target_ulong addr)
345
do_dcbz(addr, env->dcache_line_size);
348
void helper_dcbz_970(target_ulong addr)
350
if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
353
do_dcbz(addr, env->dcache_line_size);
356
void helper_icbi(target_ulong addr)
358
addr &= ~(env->dcache_line_size - 1);
359
/* Invalidate one cache line :
360
* PowerPC specification says this is to be treated like a load
361
* (not a fetch) by the MMU. To be sure it will be so,
362
* do the load "by hand".
368
target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
372
for (i = 0; i < xer_bc; i++) {
374
addr = addr_add(addr, 1);
375
/* ra (if not 0) and rb are never modified */
376
if (likely(reg != rb && (ra == 0 || reg != ra))) {
377
env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
379
if (unlikely(c == xer_cmp))
381
if (likely(d != 0)) {
392
/*****************************************************************************/
393
/* Fixed point operations helpers */
394
#if defined(TARGET_PPC64)
396
/* multiply high word */
397
uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
401
muls64(&tl, &th, arg1, arg2);
405
/* multiply high word unsigned */
406
uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
410
mulu64(&tl, &th, arg1, arg2);
414
uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
419
muls64(&tl, (uint64_t *)&th, arg1, arg2);
420
/* If th != 0 && th != -1, then we had an overflow */
421
if (likely((uint64_t)(th + 1) <= 1)) {
422
env->xer &= ~(1 << XER_OV);
424
env->xer |= (1 << XER_OV) | (1 << XER_SO);
430
target_ulong helper_cntlzw (target_ulong t)
435
#if defined(TARGET_PPC64)
436
target_ulong helper_cntlzd (target_ulong t)
442
/* shift right arithmetic helper */
443
target_ulong helper_sraw (target_ulong value, target_ulong shift)
447
if (likely(!(shift & 0x20))) {
448
if (likely((uint32_t)shift != 0)) {
450
ret = (int32_t)value >> shift;
451
if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
452
env->xer &= ~(1 << XER_CA);
454
env->xer |= (1 << XER_CA);
457
ret = (int32_t)value;
458
env->xer &= ~(1 << XER_CA);
461
ret = (int32_t)value >> 31;
463
env->xer |= (1 << XER_CA);
465
env->xer &= ~(1 << XER_CA);
468
return (target_long)ret;
471
#if defined(TARGET_PPC64)
472
target_ulong helper_srad (target_ulong value, target_ulong shift)
476
if (likely(!(shift & 0x40))) {
477
if (likely((uint64_t)shift != 0)) {
479
ret = (int64_t)value >> shift;
480
if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
481
env->xer &= ~(1 << XER_CA);
483
env->xer |= (1 << XER_CA);
486
ret = (int64_t)value;
487
env->xer &= ~(1 << XER_CA);
490
ret = (int64_t)value >> 63;
492
env->xer |= (1 << XER_CA);
494
env->xer &= ~(1 << XER_CA);
501
#if defined(TARGET_PPC64)
502
target_ulong helper_popcntb (target_ulong val)
504
val = (val & 0x5555555555555555ULL) + ((val >> 1) &
505
0x5555555555555555ULL);
506
val = (val & 0x3333333333333333ULL) + ((val >> 2) &
507
0x3333333333333333ULL);
508
val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) &
509
0x0f0f0f0f0f0f0f0fULL);
513
target_ulong helper_popcntw (target_ulong val)
515
val = (val & 0x5555555555555555ULL) + ((val >> 1) &
516
0x5555555555555555ULL);
517
val = (val & 0x3333333333333333ULL) + ((val >> 2) &
518
0x3333333333333333ULL);
519
val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) &
520
0x0f0f0f0f0f0f0f0fULL);
521
val = (val & 0x00ff00ff00ff00ffULL) + ((val >> 8) &
522
0x00ff00ff00ff00ffULL);
523
val = (val & 0x0000ffff0000ffffULL) + ((val >> 16) &
524
0x0000ffff0000ffffULL);
528
target_ulong helper_popcntd (target_ulong val)
533
target_ulong helper_popcntb (target_ulong val)
535
val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
536
val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
537
val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
541
target_ulong helper_popcntw (target_ulong val)
543
val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
544
val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
545
val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
546
val = (val & 0x00ff00ff) + ((val >> 8) & 0x00ff00ff);
547
val = (val & 0x0000ffff) + ((val >> 16) & 0x0000ffff);
552
/*****************************************************************************/
553
/* Floating point operations helpers */
554
uint64_t helper_float32_to_float64(uint32_t arg)
559
d.d = float32_to_float64(f.f, &env->fp_status);
563
uint32_t helper_float64_to_float32(uint64_t arg)
568
f.f = float64_to_float32(d.d, &env->fp_status);
572
static inline int isden(float64 d)
578
return ((u.ll >> 52) & 0x7FF) == 0;
581
uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
587
isneg = float64_is_neg(farg.d);
588
if (unlikely(float64_is_any_nan(farg.d))) {
589
if (float64_is_signaling_nan(farg.d)) {
590
/* Signaling NaN: flags are undefined */
596
} else if (unlikely(float64_is_infinity(farg.d))) {
603
if (float64_is_zero(farg.d)) {
611
/* Denormalized numbers */
614
/* Normalized numbers */
625
/* We update FPSCR_FPRF */
626
env->fpscr &= ~(0x1F << FPSCR_FPRF);
627
env->fpscr |= ret << FPSCR_FPRF;
629
/* We just need fpcc to update Rc1 */
633
/* Floating-point invalid operations exception */
634
static inline uint64_t fload_invalid_op_excp(int op)
641
case POWERPC_EXCP_FP_VXSNAN:
642
env->fpscr |= 1 << FPSCR_VXSNAN;
644
case POWERPC_EXCP_FP_VXSOFT:
645
env->fpscr |= 1 << FPSCR_VXSOFT;
647
case POWERPC_EXCP_FP_VXISI:
648
/* Magnitude subtraction of infinities */
649
env->fpscr |= 1 << FPSCR_VXISI;
651
case POWERPC_EXCP_FP_VXIDI:
652
/* Division of infinity by infinity */
653
env->fpscr |= 1 << FPSCR_VXIDI;
655
case POWERPC_EXCP_FP_VXZDZ:
656
/* Division of zero by zero */
657
env->fpscr |= 1 << FPSCR_VXZDZ;
659
case POWERPC_EXCP_FP_VXIMZ:
660
/* Multiplication of zero by infinity */
661
env->fpscr |= 1 << FPSCR_VXIMZ;
663
case POWERPC_EXCP_FP_VXVC:
664
/* Ordered comparison of NaN */
665
env->fpscr |= 1 << FPSCR_VXVC;
666
env->fpscr &= ~(0xF << FPSCR_FPCC);
667
env->fpscr |= 0x11 << FPSCR_FPCC;
668
/* We must update the target FPR before raising the exception */
670
env->exception_index = POWERPC_EXCP_PROGRAM;
671
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
672
/* Update the floating-point enabled exception summary */
673
env->fpscr |= 1 << FPSCR_FEX;
674
/* Exception is differed */
678
case POWERPC_EXCP_FP_VXSQRT:
679
/* Square root of a negative number */
680
env->fpscr |= 1 << FPSCR_VXSQRT;
682
env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
684
/* Set the result to quiet NaN */
685
ret = 0x7FF8000000000000ULL;
686
env->fpscr &= ~(0xF << FPSCR_FPCC);
687
env->fpscr |= 0x11 << FPSCR_FPCC;
690
case POWERPC_EXCP_FP_VXCVI:
691
/* Invalid conversion */
692
env->fpscr |= 1 << FPSCR_VXCVI;
693
env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
695
/* Set the result to quiet NaN */
696
ret = 0x7FF8000000000000ULL;
697
env->fpscr &= ~(0xF << FPSCR_FPCC);
698
env->fpscr |= 0x11 << FPSCR_FPCC;
702
/* Update the floating-point invalid operation summary */
703
env->fpscr |= 1 << FPSCR_VX;
704
/* Update the floating-point exception summary */
705
env->fpscr |= 1 << FPSCR_FX;
707
/* Update the floating-point enabled exception summary */
708
env->fpscr |= 1 << FPSCR_FEX;
709
if (msr_fe0 != 0 || msr_fe1 != 0)
710
helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
715
static inline void float_zero_divide_excp(void)
717
env->fpscr |= 1 << FPSCR_ZX;
718
env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
719
/* Update the floating-point exception summary */
720
env->fpscr |= 1 << FPSCR_FX;
722
/* Update the floating-point enabled exception summary */
723
env->fpscr |= 1 << FPSCR_FEX;
724
if (msr_fe0 != 0 || msr_fe1 != 0) {
725
helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
726
POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
731
static inline void float_overflow_excp(void)
733
env->fpscr |= 1 << FPSCR_OX;
734
/* Update the floating-point exception summary */
735
env->fpscr |= 1 << FPSCR_FX;
737
/* XXX: should adjust the result */
738
/* Update the floating-point enabled exception summary */
739
env->fpscr |= 1 << FPSCR_FEX;
740
/* We must update the target FPR before raising the exception */
741
env->exception_index = POWERPC_EXCP_PROGRAM;
742
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
744
env->fpscr |= 1 << FPSCR_XX;
745
env->fpscr |= 1 << FPSCR_FI;
749
static inline void float_underflow_excp(void)
751
env->fpscr |= 1 << FPSCR_UX;
752
/* Update the floating-point exception summary */
753
env->fpscr |= 1 << FPSCR_FX;
755
/* XXX: should adjust the result */
756
/* Update the floating-point enabled exception summary */
757
env->fpscr |= 1 << FPSCR_FEX;
758
/* We must update the target FPR before raising the exception */
759
env->exception_index = POWERPC_EXCP_PROGRAM;
760
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
764
static inline void float_inexact_excp(void)
766
env->fpscr |= 1 << FPSCR_XX;
767
/* Update the floating-point exception summary */
768
env->fpscr |= 1 << FPSCR_FX;
770
/* Update the floating-point enabled exception summary */
771
env->fpscr |= 1 << FPSCR_FEX;
772
/* We must update the target FPR before raising the exception */
773
env->exception_index = POWERPC_EXCP_PROGRAM;
774
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
778
static inline void fpscr_set_rounding_mode(void)
782
/* Set rounding mode */
785
/* Best approximation (round to nearest) */
786
rnd_type = float_round_nearest_even;
789
/* Smaller magnitude (round toward zero) */
790
rnd_type = float_round_to_zero;
793
/* Round toward +infinite */
794
rnd_type = float_round_up;
798
/* Round toward -infinite */
799
rnd_type = float_round_down;
802
set_float_rounding_mode(rnd_type, &env->fp_status);
805
void helper_fpscr_clrbit (uint32_t bit)
809
prev = (env->fpscr >> bit) & 1;
810
env->fpscr &= ~(1 << bit);
815
fpscr_set_rounding_mode();
823
void helper_fpscr_setbit (uint32_t bit)
827
prev = (env->fpscr >> bit) & 1;
828
env->fpscr |= 1 << bit;
832
env->fpscr |= 1 << FPSCR_FX;
836
env->fpscr |= 1 << FPSCR_FX;
841
env->fpscr |= 1 << FPSCR_FX;
846
env->fpscr |= 1 << FPSCR_FX;
851
env->fpscr |= 1 << FPSCR_FX;
864
env->fpscr |= 1 << FPSCR_VX;
865
env->fpscr |= 1 << FPSCR_FX;
872
env->error_code = POWERPC_EXCP_FP;
874
env->error_code |= POWERPC_EXCP_FP_VXSNAN;
876
env->error_code |= POWERPC_EXCP_FP_VXISI;
878
env->error_code |= POWERPC_EXCP_FP_VXIDI;
880
env->error_code |= POWERPC_EXCP_FP_VXZDZ;
882
env->error_code |= POWERPC_EXCP_FP_VXIMZ;
884
env->error_code |= POWERPC_EXCP_FP_VXVC;
886
env->error_code |= POWERPC_EXCP_FP_VXSOFT;
888
env->error_code |= POWERPC_EXCP_FP_VXSQRT;
890
env->error_code |= POWERPC_EXCP_FP_VXCVI;
897
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
904
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
911
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
918
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
924
fpscr_set_rounding_mode();
929
/* Update the floating-point enabled exception summary */
930
env->fpscr |= 1 << FPSCR_FEX;
931
/* We have to update Rc1 before raising the exception */
932
env->exception_index = POWERPC_EXCP_PROGRAM;
938
void helper_store_fpscr (uint64_t arg, uint32_t mask)
941
* We use only the 32 LSB of the incoming fpr
949
new |= prev & 0x60000000;
950
for (i = 0; i < 8; i++) {
951
if (mask & (1 << i)) {
952
env->fpscr &= ~(0xF << (4 * i));
953
env->fpscr |= new & (0xF << (4 * i));
956
/* Update VX and FEX */
958
env->fpscr |= 1 << FPSCR_VX;
960
env->fpscr &= ~(1 << FPSCR_VX);
961
if ((fpscr_ex & fpscr_eex) != 0) {
962
env->fpscr |= 1 << FPSCR_FEX;
963
env->exception_index = POWERPC_EXCP_PROGRAM;
964
/* XXX: we should compute it properly */
965
env->error_code = POWERPC_EXCP_FP;
968
env->fpscr &= ~(1 << FPSCR_FEX);
969
fpscr_set_rounding_mode();
972
void helper_float_check_status (void)
974
if (env->exception_index == POWERPC_EXCP_PROGRAM &&
975
(env->error_code & POWERPC_EXCP_FP)) {
976
/* Differred floating-point exception after target FPR update */
977
if (msr_fe0 != 0 || msr_fe1 != 0)
978
helper_raise_exception_err(env->exception_index, env->error_code);
980
int status = get_float_exception_flags(&env->fp_status);
981
if (status & float_flag_divbyzero) {
982
float_zero_divide_excp();
983
} else if (status & float_flag_overflow) {
984
float_overflow_excp();
985
} else if (status & float_flag_underflow) {
986
float_underflow_excp();
987
} else if (status & float_flag_inexact) {
988
float_inexact_excp();
993
void helper_reset_fpstatus (void)
995
set_float_exception_flags(0, &env->fp_status);
999
uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
1001
CPU_DoubleU farg1, farg2;
1006
if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1007
float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
1008
/* Magnitude subtraction of infinities */
1009
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1011
if (unlikely(float64_is_signaling_nan(farg1.d) ||
1012
float64_is_signaling_nan(farg2.d))) {
1014
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1016
farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1023
uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
1025
CPU_DoubleU farg1, farg2;
1030
if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1031
float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1032
/* Magnitude subtraction of infinities */
1033
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1035
if (unlikely(float64_is_signaling_nan(farg1.d) ||
1036
float64_is_signaling_nan(farg2.d))) {
1037
/* sNaN subtraction */
1038
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1040
farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1047
uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1049
CPU_DoubleU farg1, farg2;
1054
if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1055
(float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1056
/* Multiplication of zero by infinity */
1057
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1059
if (unlikely(float64_is_signaling_nan(farg1.d) ||
1060
float64_is_signaling_nan(farg2.d))) {
1061
/* sNaN multiplication */
1062
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1064
farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1071
uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1073
CPU_DoubleU farg1, farg2;
1078
if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1079
/* Division of infinity by infinity */
1080
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1081
} else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1082
/* Division of zero by zero */
1083
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1085
if (unlikely(float64_is_signaling_nan(farg1.d) ||
1086
float64_is_signaling_nan(farg2.d))) {
1088
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1090
farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1097
uint64_t helper_fabs (uint64_t arg)
1102
farg.d = float64_abs(farg.d);
1107
uint64_t helper_fnabs (uint64_t arg)
1112
farg.d = float64_abs(farg.d);
1113
farg.d = float64_chs(farg.d);
1118
uint64_t helper_fneg (uint64_t arg)
1123
farg.d = float64_chs(farg.d);
1127
/* fctiw - fctiw. */
1128
uint64_t helper_fctiw (uint64_t arg)
1133
if (unlikely(float64_is_signaling_nan(farg.d))) {
1134
/* sNaN conversion */
1135
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1136
} else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1137
/* qNan / infinity conversion */
1138
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1140
farg.ll = float64_to_int32(farg.d, &env->fp_status);
1141
/* XXX: higher bits are not supposed to be significant.
1142
* to make tests easier, return the same as a real PowerPC 750
1144
farg.ll |= 0xFFF80000ULL << 32;
1149
/* fctiwz - fctiwz. */
1150
uint64_t helper_fctiwz (uint64_t arg)
1155
if (unlikely(float64_is_signaling_nan(farg.d))) {
1156
/* sNaN conversion */
1157
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1158
} else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1159
/* qNan / infinity conversion */
1160
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1162
farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1163
/* XXX: higher bits are not supposed to be significant.
1164
* to make tests easier, return the same as a real PowerPC 750
1166
farg.ll |= 0xFFF80000ULL << 32;
1171
#if defined(TARGET_PPC64)
1172
/* fcfid - fcfid. */
1173
uint64_t helper_fcfid (uint64_t arg)
1176
farg.d = int64_to_float64(arg, &env->fp_status);
1180
/* fctid - fctid. */
1181
uint64_t helper_fctid (uint64_t arg)
1186
if (unlikely(float64_is_signaling_nan(farg.d))) {
1187
/* sNaN conversion */
1188
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1189
} else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1190
/* qNan / infinity conversion */
1191
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1193
farg.ll = float64_to_int64(farg.d, &env->fp_status);
1198
/* fctidz - fctidz. */
1199
uint64_t helper_fctidz (uint64_t arg)
1204
if (unlikely(float64_is_signaling_nan(farg.d))) {
1205
/* sNaN conversion */
1206
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1207
} else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1208
/* qNan / infinity conversion */
1209
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1211
farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1218
static inline uint64_t do_fri(uint64_t arg, int rounding_mode)
1223
if (unlikely(float64_is_signaling_nan(farg.d))) {
1225
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1226
} else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1227
/* qNan / infinity round */
1228
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1230
set_float_rounding_mode(rounding_mode, &env->fp_status);
1231
farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1232
/* Restore rounding mode from FPSCR */
1233
fpscr_set_rounding_mode();
1238
uint64_t helper_frin (uint64_t arg)
1240
return do_fri(arg, float_round_nearest_even);
1243
uint64_t helper_friz (uint64_t arg)
1245
return do_fri(arg, float_round_to_zero);
1248
uint64_t helper_frip (uint64_t arg)
1250
return do_fri(arg, float_round_up);
1253
uint64_t helper_frim (uint64_t arg)
1255
return do_fri(arg, float_round_down);
1258
/* fmadd - fmadd. */
1259
uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1261
CPU_DoubleU farg1, farg2, farg3;
1267
if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1268
(float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1269
/* Multiplication of zero by infinity */
1270
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1272
if (unlikely(float64_is_signaling_nan(farg1.d) ||
1273
float64_is_signaling_nan(farg2.d) ||
1274
float64_is_signaling_nan(farg3.d))) {
1275
/* sNaN operation */
1276
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1278
/* This is the way the PowerPC specification defines it */
1279
float128 ft0_128, ft1_128;
1281
ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1282
ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1283
ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1284
if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1285
float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1286
/* Magnitude subtraction of infinities */
1287
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1289
ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1290
ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1291
farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1298
/* fmsub - fmsub. */
1299
uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1301
CPU_DoubleU farg1, farg2, farg3;
1307
if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1308
(float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1309
/* Multiplication of zero by infinity */
1310
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1312
if (unlikely(float64_is_signaling_nan(farg1.d) ||
1313
float64_is_signaling_nan(farg2.d) ||
1314
float64_is_signaling_nan(farg3.d))) {
1315
/* sNaN operation */
1316
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1318
/* This is the way the PowerPC specification defines it */
1319
float128 ft0_128, ft1_128;
1321
ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1322
ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1323
ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1324
if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1325
float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1326
/* Magnitude subtraction of infinities */
1327
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1329
ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1330
ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1331
farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1337
/* fnmadd - fnmadd. */
1338
uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1340
CPU_DoubleU farg1, farg2, farg3;
1346
if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1347
(float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1348
/* Multiplication of zero by infinity */
1349
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1351
if (unlikely(float64_is_signaling_nan(farg1.d) ||
1352
float64_is_signaling_nan(farg2.d) ||
1353
float64_is_signaling_nan(farg3.d))) {
1354
/* sNaN operation */
1355
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1357
/* This is the way the PowerPC specification defines it */
1358
float128 ft0_128, ft1_128;
1360
ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1361
ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1362
ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1363
if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1364
float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1365
/* Magnitude subtraction of infinities */
1366
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1368
ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1369
ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1370
farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1372
if (likely(!float64_is_any_nan(farg1.d))) {
1373
farg1.d = float64_chs(farg1.d);
1379
/* fnmsub - fnmsub. */
1380
uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1382
CPU_DoubleU farg1, farg2, farg3;
1388
if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1389
(float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1390
/* Multiplication of zero by infinity */
1391
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1393
if (unlikely(float64_is_signaling_nan(farg1.d) ||
1394
float64_is_signaling_nan(farg2.d) ||
1395
float64_is_signaling_nan(farg3.d))) {
1396
/* sNaN operation */
1397
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1399
/* This is the way the PowerPC specification defines it */
1400
float128 ft0_128, ft1_128;
1402
ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1403
ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1404
ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1405
if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1406
float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1407
/* Magnitude subtraction of infinities */
1408
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1410
ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1411
ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1412
farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1414
if (likely(!float64_is_any_nan(farg1.d))) {
1415
farg1.d = float64_chs(farg1.d);
1422
uint64_t helper_frsp (uint64_t arg)
1428
if (unlikely(float64_is_signaling_nan(farg.d))) {
1429
/* sNaN square root */
1430
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1432
f32 = float64_to_float32(farg.d, &env->fp_status);
1433
farg.d = float32_to_float64(f32, &env->fp_status);
1438
/* fsqrt - fsqrt. */
1439
uint64_t helper_fsqrt (uint64_t arg)
1444
if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1445
/* Square root of a negative nonzero number */
1446
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1448
if (unlikely(float64_is_signaling_nan(farg.d))) {
1449
/* sNaN square root */
1450
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1452
farg.d = float64_sqrt(farg.d, &env->fp_status);
1458
uint64_t helper_fre (uint64_t arg)
1463
if (unlikely(float64_is_signaling_nan(farg.d))) {
1464
/* sNaN reciprocal */
1465
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1467
farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1472
uint64_t helper_fres (uint64_t arg)
1478
if (unlikely(float64_is_signaling_nan(farg.d))) {
1479
/* sNaN reciprocal */
1480
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1482
farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1483
f32 = float64_to_float32(farg.d, &env->fp_status);
1484
farg.d = float32_to_float64(f32, &env->fp_status);
1489
/* frsqrte - frsqrte. */
1490
uint64_t helper_frsqrte (uint64_t arg)
1496
if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1497
/* Reciprocal square root of a negative nonzero number */
1498
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1500
if (unlikely(float64_is_signaling_nan(farg.d))) {
1501
/* sNaN reciprocal square root */
1502
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1504
farg.d = float64_sqrt(farg.d, &env->fp_status);
1505
farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1506
f32 = float64_to_float32(farg.d, &env->fp_status);
1507
farg.d = float32_to_float64(f32, &env->fp_status);
1513
uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1519
if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_any_nan(farg1.d)) {
1526
void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1528
CPU_DoubleU farg1, farg2;
1533
if (unlikely(float64_is_any_nan(farg1.d) ||
1534
float64_is_any_nan(farg2.d))) {
1536
} else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1538
} else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1544
env->fpscr &= ~(0x0F << FPSCR_FPRF);
1545
env->fpscr |= ret << FPSCR_FPRF;
1546
env->crf[crfD] = ret;
1547
if (unlikely(ret == 0x01UL
1548
&& (float64_is_signaling_nan(farg1.d) ||
1549
float64_is_signaling_nan(farg2.d)))) {
1550
/* sNaN comparison */
1551
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1555
void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1557
CPU_DoubleU farg1, farg2;
1562
if (unlikely(float64_is_any_nan(farg1.d) ||
1563
float64_is_any_nan(farg2.d))) {
1565
} else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1567
} else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1573
env->fpscr &= ~(0x0F << FPSCR_FPRF);
1574
env->fpscr |= ret << FPSCR_FPRF;
1575
env->crf[crfD] = ret;
1576
if (unlikely (ret == 0x01UL)) {
1577
if (float64_is_signaling_nan(farg1.d) ||
1578
float64_is_signaling_nan(farg2.d)) {
1579
/* sNaN comparison */
1580
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1581
POWERPC_EXCP_FP_VXVC);
1583
/* qNaN comparison */
1584
fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1589
#if !defined (CONFIG_USER_ONLY)
1590
void helper_store_msr (target_ulong val)
1592
val = hreg_store_msr(env, val, 0);
1594
env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1595
helper_raise_exception(val);
1599
static inline void do_rfi(target_ulong nip, target_ulong msr,
1600
target_ulong msrm, int keep_msrh)
1602
#if defined(TARGET_PPC64)
1603
if (msr & (1ULL << MSR_SF)) {
1604
nip = (uint64_t)nip;
1605
msr &= (uint64_t)msrm;
1607
nip = (uint32_t)nip;
1608
msr = (uint32_t)(msr & msrm);
1610
msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1613
nip = (uint32_t)nip;
1614
msr &= (uint32_t)msrm;
1616
/* XXX: beware: this is false if VLE is supported */
1617
env->nip = nip & ~((target_ulong)0x00000003);
1618
hreg_store_msr(env, msr, 1);
1619
#if defined (DEBUG_OP)
1620
cpu_dump_rfi(env->nip, env->msr);
1622
/* No need to raise an exception here,
1623
* as rfi is always the last insn of a TB
1625
env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1628
void helper_rfi (void)
1630
do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1631
~((target_ulong)0x783F0000), 1);
1634
#if defined(TARGET_PPC64)
1635
void helper_rfid (void)
1637
do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1638
~((target_ulong)0x783F0000), 0);
1641
void helper_hrfid (void)
1643
do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1644
~((target_ulong)0x783F0000), 0);
1649
void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1651
if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1652
((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1653
((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1654
((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1655
((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1656
helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1660
#if defined(TARGET_PPC64)
1661
void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1663
if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1664
((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1665
((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1666
((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1667
((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1668
helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1672
/*****************************************************************************/
1673
/* PowerPC 601 specific instructions (POWER bridge) */
1675
target_ulong helper_clcs (uint32_t arg)
1679
/* Instruction cache line size */
1680
return env->icache_line_size;
1683
/* Data cache line size */
1684
return env->dcache_line_size;
1687
/* Minimum cache line size */
1688
return (env->icache_line_size < env->dcache_line_size) ?
1689
env->icache_line_size : env->dcache_line_size;
1692
/* Maximum cache line size */
1693
return (env->icache_line_size > env->dcache_line_size) ?
1694
env->icache_line_size : env->dcache_line_size;
1703
target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1705
uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1707
if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1708
(int32_t)arg2 == 0) {
1709
env->spr[SPR_MQ] = 0;
1712
env->spr[SPR_MQ] = tmp % arg2;
1713
return tmp / (int32_t)arg2;
1717
target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1719
uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1721
if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1722
(int32_t)arg2 == 0) {
1723
env->xer |= (1 << XER_OV) | (1 << XER_SO);
1724
env->spr[SPR_MQ] = 0;
1727
env->spr[SPR_MQ] = tmp % arg2;
1728
tmp /= (int32_t)arg2;
1729
if ((int32_t)tmp != tmp) {
1730
env->xer |= (1 << XER_OV) | (1 << XER_SO);
1732
env->xer &= ~(1 << XER_OV);
1738
target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1740
if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1741
(int32_t)arg2 == 0) {
1742
env->spr[SPR_MQ] = 0;
1745
env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1746
return (int32_t)arg1 / (int32_t)arg2;
1750
target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1752
if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1753
(int32_t)arg2 == 0) {
1754
env->xer |= (1 << XER_OV) | (1 << XER_SO);
1755
env->spr[SPR_MQ] = 0;
1758
env->xer &= ~(1 << XER_OV);
1759
env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1760
return (int32_t)arg1 / (int32_t)arg2;
1764
#if !defined (CONFIG_USER_ONLY)
1765
target_ulong helper_rac (target_ulong addr)
1769
target_ulong ret = 0;
1771
/* We don't have to generate many instances of this instruction,
1772
* as rac is supervisor only.
1774
/* XXX: FIX THIS: Pretend we have no BAT */
1775
nb_BATs = env->nb_BATs;
1777
if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1779
env->nb_BATs = nb_BATs;
1783
void helper_rfsvc (void)
1785
do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1789
/*****************************************************************************/
1790
/* 602 specific instructions */
1791
/* mfrom is the most crazy instruction ever seen, imho ! */
1792
/* Real implementation uses a ROM table. Do the same */
1793
/* Extremly decomposed:
1795
* return 256 * log10(10 + 1.0) + 0.5
1797
#if !defined (CONFIG_USER_ONLY)
1798
target_ulong helper_602_mfrom (target_ulong arg)
1800
if (likely(arg < 602)) {
1801
#include "mfrom_table.c"
1802
return mfrom_ROM_table[arg];
1809
/*****************************************************************************/
1810
/* Embedded PowerPC specific helpers */
1812
/* XXX: to be improved to check access rights when in user-mode */
1813
target_ulong helper_load_dcr (target_ulong dcrn)
1817
if (unlikely(env->dcr_env == NULL)) {
1818
qemu_log("No DCR environment\n");
1819
helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1820
POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1821
} else if (unlikely(ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val) != 0)) {
1822
qemu_log("DCR read error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1823
helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1824
POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1829
void helper_store_dcr (target_ulong dcrn, target_ulong val)
1831
if (unlikely(env->dcr_env == NULL)) {
1832
qemu_log("No DCR environment\n");
1833
helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1834
POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1835
} else if (unlikely(ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val) != 0)) {
1836
qemu_log("DCR write error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1837
helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1838
POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1842
#if !defined(CONFIG_USER_ONLY)
1843
void helper_40x_rfci (void)
1845
do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1846
~((target_ulong)0xFFFF0000), 0);
1849
void helper_rfci (void)
1851
do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1852
~((target_ulong)0x3FFF0000), 0);
1855
void helper_rfdi (void)
1857
do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1858
~((target_ulong)0x3FFF0000), 0);
1861
void helper_rfmci (void)
1863
do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1864
~((target_ulong)0x3FFF0000), 0);
1869
target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1875
for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1876
if ((high & mask) == 0) {
1884
for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1885
if ((low & mask) == 0) {
1897
env->xer = (env->xer & ~0x7F) | i;
1899
env->crf[0] |= xer_so;
1904
/*****************************************************************************/
1905
/* Altivec extension helpers */
1906
#if defined(HOST_WORDS_BIGENDIAN)
1914
#if defined(HOST_WORDS_BIGENDIAN)
1915
#define VECTOR_FOR_INORDER_I(index, element) \
1916
for (index = 0; index < ARRAY_SIZE(r->element); index++)
1918
#define VECTOR_FOR_INORDER_I(index, element) \
1919
for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1922
/* If X is a NaN, store the corresponding QNaN into RESULT. Otherwise,
1923
* execute the following block. */
1924
#define DO_HANDLE_NAN(result, x) \
1925
if (float32_is_any_nan(x)) { \
1928
__f.l = __f.l | (1 << 22); /* Set QNaN bit. */ \
1932
#define HANDLE_NAN1(result, x) \
1933
DO_HANDLE_NAN(result, x)
1934
#define HANDLE_NAN2(result, x, y) \
1935
DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
1936
#define HANDLE_NAN3(result, x, y, z) \
1937
DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
1939
/* Saturating arithmetic helpers. */
1940
#define SATCVT(from, to, from_type, to_type, min, max) \
1941
static inline to_type cvt##from##to(from_type x, int *sat) \
1944
if (x < (from_type)min) { \
1947
} else if (x > (from_type)max) { \
1955
#define SATCVTU(from, to, from_type, to_type, min, max) \
1956
static inline to_type cvt##from##to(from_type x, int *sat) \
1959
if (x > (from_type)max) { \
1967
SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX)
1968
SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX)
1969
SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX)
1971
SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX)
1972
SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX)
1973
SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX)
1974
SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX)
1975
SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX)
1976
SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX)
1980
#define LVE(name, access, swap, element) \
1981
void helper_##name (ppc_avr_t *r, target_ulong addr) \
1983
size_t n_elems = ARRAY_SIZE(r->element); \
1984
int adjust = HI_IDX*(n_elems-1); \
1985
int sh = sizeof(r->element[0]) >> 1; \
1986
int index = (addr & 0xf) >> sh; \
1988
r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
1990
r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
1994
LVE(lvebx, ldub, I, u8)
1995
LVE(lvehx, lduw, bswap16, u16)
1996
LVE(lvewx, ldl, bswap32, u32)
2000
void helper_lvsl (ppc_avr_t *r, target_ulong sh)
2002
int i, j = (sh & 0xf);
2004
VECTOR_FOR_INORDER_I (i, u8) {
2009
void helper_lvsr (ppc_avr_t *r, target_ulong sh)
2011
int i, j = 0x10 - (sh & 0xf);
2013
VECTOR_FOR_INORDER_I (i, u8) {
2018
#define STVE(name, access, swap, element) \
2019
void helper_##name (ppc_avr_t *r, target_ulong addr) \
2021
size_t n_elems = ARRAY_SIZE(r->element); \
2022
int adjust = HI_IDX*(n_elems-1); \
2023
int sh = sizeof(r->element[0]) >> 1; \
2024
int index = (addr & 0xf) >> sh; \
2026
access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2028
access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2032
STVE(stvebx, stb, I, u8)
2033
STVE(stvehx, stw, bswap16, u16)
2034
STVE(stvewx, stl, bswap32, u32)
2038
void helper_mtvscr (ppc_avr_t *r)
2040
#if defined(HOST_WORDS_BIGENDIAN)
2041
env->vscr = r->u32[3];
2043
env->vscr = r->u32[0];
2045
set_flush_to_zero(vscr_nj, &env->vec_status);
2048
void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2051
for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2052
r->u32[i] = ~a->u32[i] < b->u32[i];
2056
#define VARITH_DO(name, op, element) \
2057
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2060
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2061
r->element[i] = a->element[i] op b->element[i]; \
2064
#define VARITH(suffix, element) \
2065
VARITH_DO(add##suffix, +, element) \
2066
VARITH_DO(sub##suffix, -, element)
2073
#define VARITHFP(suffix, func) \
2074
void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2077
for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2078
HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2079
r->f[i] = func(a->f[i], b->f[i], &env->vec_status); \
2083
VARITHFP(addfp, float32_add)
2084
VARITHFP(subfp, float32_sub)
2087
#define VARITHSAT_CASE(type, op, cvt, element) \
2089
type result = (type)a->element[i] op (type)b->element[i]; \
2090
r->element[i] = cvt(result, &sat); \
2093
#define VARITHSAT_DO(name, op, optype, cvt, element) \
2094
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2098
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2099
switch (sizeof(r->element[0])) { \
2100
case 1: VARITHSAT_CASE(optype, op, cvt, element); break; \
2101
case 2: VARITHSAT_CASE(optype, op, cvt, element); break; \
2102
case 4: VARITHSAT_CASE(optype, op, cvt, element); break; \
2106
env->vscr |= (1 << VSCR_SAT); \
2109
#define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
2110
VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
2111
VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2112
#define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
2113
VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
2114
VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2115
VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
2116
VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
2117
VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
2118
VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
2119
VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
2120
VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
2121
#undef VARITHSAT_CASE
2123
#undef VARITHSAT_SIGNED
2124
#undef VARITHSAT_UNSIGNED
2126
#define VAVG_DO(name, element, etype) \
2127
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2130
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2131
etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
2132
r->element[i] = x >> 1; \
2136
#define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2137
VAVG_DO(avgs##type, signed_element, signed_type) \
2138
VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2139
VAVG(b, s8, int16_t, u8, uint16_t)
2140
VAVG(h, s16, int32_t, u16, uint32_t)
2141
VAVG(w, s32, int64_t, u32, uint64_t)
2145
#define VCF(suffix, cvt, element) \
2146
void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2149
for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2150
float32 t = cvt(b->element[i], &env->vec_status); \
2151
r->f[i] = float32_scalbn (t, -uim, &env->vec_status); \
2154
VCF(ux, uint32_to_float32, u32)
2155
VCF(sx, int32_to_float32, s32)
2158
#define VCMP_DO(suffix, compare, element, record) \
2159
void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2161
uint32_t ones = (uint32_t)-1; \
2162
uint32_t all = ones; \
2163
uint32_t none = 0; \
2165
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2166
uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2167
switch (sizeof (a->element[0])) { \
2168
case 4: r->u32[i] = result; break; \
2169
case 2: r->u16[i] = result; break; \
2170
case 1: r->u8[i] = result; break; \
2176
env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2179
#define VCMP(suffix, compare, element) \
2180
VCMP_DO(suffix, compare, element, 0) \
2181
VCMP_DO(suffix##_dot, compare, element, 1)
2194
#define VCMPFP_DO(suffix, compare, order, record) \
2195
void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2197
uint32_t ones = (uint32_t)-1; \
2198
uint32_t all = ones; \
2199
uint32_t none = 0; \
2201
for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2203
int rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); \
2204
if (rel == float_relation_unordered) { \
2206
} else if (rel compare order) { \
2211
r->u32[i] = result; \
2216
env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2219
#define VCMPFP(suffix, compare, order) \
2220
VCMPFP_DO(suffix, compare, order, 0) \
2221
VCMPFP_DO(suffix##_dot, compare, order, 1)
2222
VCMPFP(eqfp, ==, float_relation_equal)
2223
VCMPFP(gefp, !=, float_relation_less)
2224
VCMPFP(gtfp, ==, float_relation_greater)
2228
static inline void vcmpbfp_internal(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
2233
for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2234
int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
2235
if (le_rel == float_relation_unordered) {
2236
r->u32[i] = 0xc0000000;
2237
/* ALL_IN does not need to be updated here. */
2239
float32 bneg = float32_chs(b->f[i]);
2240
int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
2241
int le = le_rel != float_relation_greater;
2242
int ge = ge_rel != float_relation_less;
2243
r->u32[i] = ((!le) << 31) | ((!ge) << 30);
2244
all_in |= (!le | !ge);
2248
env->crf[6] = (all_in == 0) << 1;
2252
void helper_vcmpbfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2254
vcmpbfp_internal(r, a, b, 0);
2257
void helper_vcmpbfp_dot (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2259
vcmpbfp_internal(r, a, b, 1);
2262
#define VCT(suffix, satcvt, element) \
2263
void helper_vct##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2267
float_status s = env->vec_status; \
2268
set_float_rounding_mode(float_round_to_zero, &s); \
2269
for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2270
if (float32_is_any_nan(b->f[i])) { \
2271
r->element[i] = 0; \
2273
float64 t = float32_to_float64(b->f[i], &s); \
2275
t = float64_scalbn(t, uim, &s); \
2276
j = float64_to_int64(t, &s); \
2277
r->element[i] = satcvt(j, &sat); \
2281
env->vscr |= (1 << VSCR_SAT); \
2284
VCT(uxs, cvtsduw, u32)
2285
VCT(sxs, cvtsdsw, s32)
2288
void helper_vmaddfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2291
for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2292
HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2293
/* Need to do the computation in higher precision and round
2294
* once at the end. */
2295
float64 af, bf, cf, t;
2296
af = float32_to_float64(a->f[i], &env->vec_status);
2297
bf = float32_to_float64(b->f[i], &env->vec_status);
2298
cf = float32_to_float64(c->f[i], &env->vec_status);
2299
t = float64_mul(af, cf, &env->vec_status);
2300
t = float64_add(t, bf, &env->vec_status);
2301
r->f[i] = float64_to_float32(t, &env->vec_status);
2306
void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2311
for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2312
int32_t prod = a->s16[i] * b->s16[i];
2313
int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2314
r->s16[i] = cvtswsh (t, &sat);
2318
env->vscr |= (1 << VSCR_SAT);
2322
void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2327
for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2328
int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
2329
int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2330
r->s16[i] = cvtswsh (t, &sat);
2334
env->vscr |= (1 << VSCR_SAT);
2338
#define VMINMAX_DO(name, compare, element) \
2339
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2342
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2343
if (a->element[i] compare b->element[i]) { \
2344
r->element[i] = b->element[i]; \
2346
r->element[i] = a->element[i]; \
2350
#define VMINMAX(suffix, element) \
2351
VMINMAX_DO(min##suffix, >, element) \
2352
VMINMAX_DO(max##suffix, <, element)
2362
#define VMINMAXFP(suffix, rT, rF) \
2363
void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2366
for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2367
HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2368
if (float32_lt_quiet(a->f[i], b->f[i], &env->vec_status)) { \
2369
r->f[i] = rT->f[i]; \
2371
r->f[i] = rF->f[i]; \
2376
VMINMAXFP(minfp, a, b)
2377
VMINMAXFP(maxfp, b, a)
2380
void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2383
for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2384
int32_t prod = a->s16[i] * b->s16[i];
2385
r->s16[i] = (int16_t) (prod + c->s16[i]);
2389
#define VMRG_DO(name, element, highp) \
2390
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2394
size_t n_elems = ARRAY_SIZE(r->element); \
2395
for (i = 0; i < n_elems/2; i++) { \
2397
result.element[i*2+HI_IDX] = a->element[i]; \
2398
result.element[i*2+LO_IDX] = b->element[i]; \
2400
result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2401
result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2406
#if defined(HOST_WORDS_BIGENDIAN)
2413
#define VMRG(suffix, element) \
2414
VMRG_DO(mrgl##suffix, element, MRGHI) \
2415
VMRG_DO(mrgh##suffix, element, MRGLO)
2424
void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2429
for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2430
prod[i] = (int32_t)a->s8[i] * b->u8[i];
2433
VECTOR_FOR_INORDER_I(i, s32) {
2434
r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2438
void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2443
for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2444
prod[i] = a->s16[i] * b->s16[i];
2447
VECTOR_FOR_INORDER_I(i, s32) {
2448
r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1];
2452
void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2458
for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2459
prod[i] = (int32_t)a->s16[i] * b->s16[i];
2462
VECTOR_FOR_INORDER_I (i, s32) {
2463
int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1];
2464
r->u32[i] = cvtsdsw(t, &sat);
2468
env->vscr |= (1 << VSCR_SAT);
2472
void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2477
for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2478
prod[i] = a->u8[i] * b->u8[i];
2481
VECTOR_FOR_INORDER_I(i, u32) {
2482
r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2486
void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2491
for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2492
prod[i] = a->u16[i] * b->u16[i];
2495
VECTOR_FOR_INORDER_I(i, u32) {
2496
r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1];
2500
void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2506
for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2507
prod[i] = a->u16[i] * b->u16[i];
2510
VECTOR_FOR_INORDER_I (i, s32) {
2511
uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1];
2512
r->u32[i] = cvtuduw(t, &sat);
2516
env->vscr |= (1 << VSCR_SAT);
2520
#define VMUL_DO(name, mul_element, prod_element, evenp) \
2521
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2524
VECTOR_FOR_INORDER_I(i, prod_element) { \
2526
r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2528
r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2532
#define VMUL(suffix, mul_element, prod_element) \
2533
VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2534
VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2542
void helper_vnmsubfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2545
for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2546
HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2547
/* Need to do the computation is higher precision and round
2548
* once at the end. */
2549
float64 af, bf, cf, t;
2550
af = float32_to_float64(a->f[i], &env->vec_status);
2551
bf = float32_to_float64(b->f[i], &env->vec_status);
2552
cf = float32_to_float64(c->f[i], &env->vec_status);
2553
t = float64_mul(af, cf, &env->vec_status);
2554
t = float64_sub(t, bf, &env->vec_status);
2556
r->f[i] = float64_to_float32(t, &env->vec_status);
2561
void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2565
VECTOR_FOR_INORDER_I (i, u8) {
2566
int s = c->u8[i] & 0x1f;
2567
#if defined(HOST_WORDS_BIGENDIAN)
2568
int index = s & 0xf;
2570
int index = 15 - (s & 0xf);
2573
result.u8[i] = b->u8[index];
2575
result.u8[i] = a->u8[index];
2581
#if defined(HOST_WORDS_BIGENDIAN)
2586
void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2590
#if defined(HOST_WORDS_BIGENDIAN)
2591
const ppc_avr_t *x[2] = { a, b };
2593
const ppc_avr_t *x[2] = { b, a };
2596
VECTOR_FOR_INORDER_I (i, u64) {
2597
VECTOR_FOR_INORDER_I (j, u32){
2598
uint32_t e = x[i]->u32[j];
2599
result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
2600
((e >> 6) & 0x3e0) |
2607
#define VPK(suffix, from, to, cvt, dosat) \
2608
void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2613
ppc_avr_t *a0 = PKBIG ? a : b; \
2614
ppc_avr_t *a1 = PKBIG ? b : a; \
2615
VECTOR_FOR_INORDER_I (i, from) { \
2616
result.to[i] = cvt(a0->from[i], &sat); \
2617
result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \
2620
if (dosat && sat) { \
2621
env->vscr |= (1 << VSCR_SAT); \
2625
VPK(shss, s16, s8, cvtshsb, 1)
2626
VPK(shus, s16, u8, cvtshub, 1)
2627
VPK(swss, s32, s16, cvtswsh, 1)
2628
VPK(swus, s32, u16, cvtswuh, 1)
2629
VPK(uhus, u16, u8, cvtuhub, 1)
2630
VPK(uwus, u32, u16, cvtuwuh, 1)
2631
VPK(uhum, u16, u8, I, 0)
2632
VPK(uwum, u32, u16, I, 0)
2637
void helper_vrefp (ppc_avr_t *r, ppc_avr_t *b)
2640
for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2641
HANDLE_NAN1(r->f[i], b->f[i]) {
2642
r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
2647
#define VRFI(suffix, rounding) \
2648
void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2651
float_status s = env->vec_status; \
2652
set_float_rounding_mode(rounding, &s); \
2653
for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2654
HANDLE_NAN1(r->f[i], b->f[i]) { \
2655
r->f[i] = float32_round_to_int (b->f[i], &s); \
2659
VRFI(n, float_round_nearest_even)
2660
VRFI(m, float_round_down)
2661
VRFI(p, float_round_up)
2662
VRFI(z, float_round_to_zero)
2665
#define VROTATE(suffix, element) \
2666
void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2669
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2670
unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2671
unsigned int shift = b->element[i] & mask; \
2672
r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2680
void helper_vrsqrtefp (ppc_avr_t *r, ppc_avr_t *b)
2683
for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2684
HANDLE_NAN1(r->f[i], b->f[i]) {
2685
float32 t = float32_sqrt(b->f[i], &env->vec_status);
2686
r->f[i] = float32_div(float32_one, t, &env->vec_status);
2691
void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2693
r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2694
r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2697
void helper_vexptefp (ppc_avr_t *r, ppc_avr_t *b)
2700
for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2701
HANDLE_NAN1(r->f[i], b->f[i]) {
2702
r->f[i] = float32_exp2(b->f[i], &env->vec_status);
2707
void helper_vlogefp (ppc_avr_t *r, ppc_avr_t *b)
2710
for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2711
HANDLE_NAN1(r->f[i], b->f[i]) {
2712
r->f[i] = float32_log2(b->f[i], &env->vec_status);
2717
#if defined(HOST_WORDS_BIGENDIAN)
2724
/* The specification says that the results are undefined if all of the
2725
* shift counts are not identical. We check to make sure that they are
2726
* to conform to what real hardware appears to do. */
2727
#define VSHIFT(suffix, leftp) \
2728
void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2730
int shift = b->u8[LO_IDX*15] & 0x7; \
2733
for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \
2734
doit = doit && ((b->u8[i] & 0x7) == shift); \
2739
} else if (leftp) { \
2740
uint64_t carry = a->u64[LO_IDX] >> (64 - shift); \
2741
r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry; \
2742
r->u64[LO_IDX] = a->u64[LO_IDX] << shift; \
2744
uint64_t carry = a->u64[HI_IDX] << (64 - shift); \
2745
r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry; \
2746
r->u64[HI_IDX] = a->u64[HI_IDX] >> shift; \
2756
#define VSL(suffix, element) \
2757
void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2760
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2761
unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2762
unsigned int shift = b->element[i] & mask; \
2763
r->element[i] = a->element[i] << shift; \
2771
void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2773
int sh = shift & 0xf;
2777
#if defined(HOST_WORDS_BIGENDIAN)
2778
for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2781
result.u8[i] = b->u8[index-0x10];
2783
result.u8[i] = a->u8[index];
2787
for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2788
int index = (16 - sh) + i;
2790
result.u8[i] = a->u8[index-0x10];
2792
result.u8[i] = b->u8[index];
2799
void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2801
int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2803
#if defined (HOST_WORDS_BIGENDIAN)
2804
memmove (&r->u8[0], &a->u8[sh], 16-sh);
2805
memset (&r->u8[16-sh], 0, sh);
2807
memmove (&r->u8[sh], &a->u8[0], 16-sh);
2808
memset (&r->u8[0], 0, sh);
2812
/* Experimental testing shows that hardware masks the immediate. */
2813
#define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2814
#if defined(HOST_WORDS_BIGENDIAN)
2815
#define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2817
#define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2819
#define VSPLT(suffix, element) \
2820
void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2822
uint32_t s = b->element[SPLAT_ELEMENT(element)]; \
2824
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2825
r->element[i] = s; \
2832
#undef SPLAT_ELEMENT
2833
#undef _SPLAT_MASKED
2835
#define VSPLTI(suffix, element, splat_type) \
2836
void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat) \
2838
splat_type x = (int8_t)(splat << 3) >> 3; \
2840
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2841
r->element[i] = x; \
2844
VSPLTI(b, s8, int8_t)
2845
VSPLTI(h, s16, int16_t)
2846
VSPLTI(w, s32, int32_t)
2849
#define VSR(suffix, element) \
2850
void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2853
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2854
unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2855
unsigned int shift = b->element[i] & mask; \
2856
r->element[i] = a->element[i] >> shift; \
2867
void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2869
int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2871
#if defined (HOST_WORDS_BIGENDIAN)
2872
memmove (&r->u8[sh], &a->u8[0], 16-sh);
2873
memset (&r->u8[0], 0, sh);
2875
memmove (&r->u8[0], &a->u8[sh], 16-sh);
2876
memset (&r->u8[16-sh], 0, sh);
2880
void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2883
for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2884
r->u32[i] = a->u32[i] >= b->u32[i];
2888
void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2895
#if defined(HOST_WORDS_BIGENDIAN)
2896
upper = ARRAY_SIZE(r->s32)-1;
2900
t = (int64_t)b->s32[upper];
2901
for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2905
result.s32[upper] = cvtsdsw(t, &sat);
2909
env->vscr |= (1 << VSCR_SAT);
2913
void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2919
#if defined(HOST_WORDS_BIGENDIAN)
2924
for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
2925
int64_t t = (int64_t)b->s32[upper+i*2];
2927
for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
2930
result.s32[upper+i*2] = cvtsdsw(t, &sat);
2935
env->vscr |= (1 << VSCR_SAT);
2939
void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2944
for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2945
int64_t t = (int64_t)b->s32[i];
2946
for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
2949
r->s32[i] = cvtsdsw(t, &sat);
2953
env->vscr |= (1 << VSCR_SAT);
2957
void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2962
for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2963
int64_t t = (int64_t)b->s32[i];
2964
t += a->s16[2*i] + a->s16[2*i+1];
2965
r->s32[i] = cvtsdsw(t, &sat);
2969
env->vscr |= (1 << VSCR_SAT);
2973
void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2978
for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2979
uint64_t t = (uint64_t)b->u32[i];
2980
for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
2983
r->u32[i] = cvtuduw(t, &sat);
2987
env->vscr |= (1 << VSCR_SAT);
2991
#if defined(HOST_WORDS_BIGENDIAN)
2998
#define VUPKPX(suffix, hi) \
2999
void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
3003
for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
3004
uint16_t e = b->u16[hi ? i : i+4]; \
3005
uint8_t a = (e >> 15) ? 0xff : 0; \
3006
uint8_t r = (e >> 10) & 0x1f; \
3007
uint8_t g = (e >> 5) & 0x1f; \
3008
uint8_t b = e & 0x1f; \
3009
result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
3017
#define VUPK(suffix, unpacked, packee, hi) \
3018
void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
3023
for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \
3024
result.unpacked[i] = b->packee[i]; \
3027
for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
3028
result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
3033
VUPK(hsb, s16, s8, UPKHI)
3034
VUPK(hsh, s32, s16, UPKHI)
3035
VUPK(lsb, s16, s8, UPKLO)
3036
VUPK(lsh, s32, s16, UPKLO)
3041
#undef DO_HANDLE_NAN
3045
#undef VECTOR_FOR_INORDER_I
3049
/*****************************************************************************/
3050
/* SPE extension helpers */
3051
/* Use a table to make this quicker */
3052
static uint8_t hbrev[16] = {
3053
0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
3054
0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
3057
static inline uint8_t byte_reverse(uint8_t val)
3059
return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
3062
static inline uint32_t word_reverse(uint32_t val)
3064
return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
3065
(byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
3068
#define MASKBITS 16 // Random value - to be fixed (implementation dependant)
3069
target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
3071
uint32_t a, b, d, mask;
3073
mask = UINT32_MAX >> (32 - MASKBITS);
3076
d = word_reverse(1 + word_reverse(a | ~b));
3077
return (arg1 & ~mask) | (d & b);
3080
uint32_t helper_cntlsw32 (uint32_t val)
3082
if (val & 0x80000000)
3088
uint32_t helper_cntlzw32 (uint32_t val)
3093
/* Single-precision floating-point conversions */
3094
static inline uint32_t efscfsi(uint32_t val)
3098
u.f = int32_to_float32(val, &env->vec_status);
3103
static inline uint32_t efscfui(uint32_t val)
3107
u.f = uint32_to_float32(val, &env->vec_status);
3112
static inline int32_t efsctsi(uint32_t val)
3117
/* NaN are not treated the same way IEEE 754 does */
3118
if (unlikely(float32_is_quiet_nan(u.f)))
3121
return float32_to_int32(u.f, &env->vec_status);
3124
static inline uint32_t efsctui(uint32_t val)
3129
/* NaN are not treated the same way IEEE 754 does */
3130
if (unlikely(float32_is_quiet_nan(u.f)))
3133
return float32_to_uint32(u.f, &env->vec_status);
3136
static inline uint32_t efsctsiz(uint32_t val)
3141
/* NaN are not treated the same way IEEE 754 does */
3142
if (unlikely(float32_is_quiet_nan(u.f)))
3145
return float32_to_int32_round_to_zero(u.f, &env->vec_status);
3148
static inline uint32_t efsctuiz(uint32_t val)
3153
/* NaN are not treated the same way IEEE 754 does */
3154
if (unlikely(float32_is_quiet_nan(u.f)))
3157
return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
3160
static inline uint32_t efscfsf(uint32_t val)
3165
u.f = int32_to_float32(val, &env->vec_status);
3166
tmp = int64_to_float32(1ULL << 32, &env->vec_status);
3167
u.f = float32_div(u.f, tmp, &env->vec_status);
3172
static inline uint32_t efscfuf(uint32_t val)
3177
u.f = uint32_to_float32(val, &env->vec_status);
3178
tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3179
u.f = float32_div(u.f, tmp, &env->vec_status);
3184
static inline uint32_t efsctsf(uint32_t val)
3190
/* NaN are not treated the same way IEEE 754 does */
3191
if (unlikely(float32_is_quiet_nan(u.f)))
3193
tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3194
u.f = float32_mul(u.f, tmp, &env->vec_status);
3196
return float32_to_int32(u.f, &env->vec_status);
3199
static inline uint32_t efsctuf(uint32_t val)
3205
/* NaN are not treated the same way IEEE 754 does */
3206
if (unlikely(float32_is_quiet_nan(u.f)))
3208
tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3209
u.f = float32_mul(u.f, tmp, &env->vec_status);
3211
return float32_to_uint32(u.f, &env->vec_status);
3214
#define HELPER_SPE_SINGLE_CONV(name) \
3215
uint32_t helper_e##name (uint32_t val) \
3217
return e##name(val); \
3220
HELPER_SPE_SINGLE_CONV(fscfsi);
3222
HELPER_SPE_SINGLE_CONV(fscfui);
3224
HELPER_SPE_SINGLE_CONV(fscfuf);
3226
HELPER_SPE_SINGLE_CONV(fscfsf);
3228
HELPER_SPE_SINGLE_CONV(fsctsi);
3230
HELPER_SPE_SINGLE_CONV(fsctui);
3232
HELPER_SPE_SINGLE_CONV(fsctsiz);
3234
HELPER_SPE_SINGLE_CONV(fsctuiz);
3236
HELPER_SPE_SINGLE_CONV(fsctsf);
3238
HELPER_SPE_SINGLE_CONV(fsctuf);
3240
#define HELPER_SPE_VECTOR_CONV(name) \
3241
uint64_t helper_ev##name (uint64_t val) \
3243
return ((uint64_t)e##name(val >> 32) << 32) | \
3244
(uint64_t)e##name(val); \
3247
HELPER_SPE_VECTOR_CONV(fscfsi);
3249
HELPER_SPE_VECTOR_CONV(fscfui);
3251
HELPER_SPE_VECTOR_CONV(fscfuf);
3253
HELPER_SPE_VECTOR_CONV(fscfsf);
3255
HELPER_SPE_VECTOR_CONV(fsctsi);
3257
HELPER_SPE_VECTOR_CONV(fsctui);
3259
HELPER_SPE_VECTOR_CONV(fsctsiz);
3261
HELPER_SPE_VECTOR_CONV(fsctuiz);
3263
HELPER_SPE_VECTOR_CONV(fsctsf);
3265
HELPER_SPE_VECTOR_CONV(fsctuf);
3267
/* Single-precision floating-point arithmetic */
3268
static inline uint32_t efsadd(uint32_t op1, uint32_t op2)
3273
u1.f = float32_add(u1.f, u2.f, &env->vec_status);
3277
static inline uint32_t efssub(uint32_t op1, uint32_t op2)
3282
u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
3286
static inline uint32_t efsmul(uint32_t op1, uint32_t op2)
3291
u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
3295
static inline uint32_t efsdiv(uint32_t op1, uint32_t op2)
3300
u1.f = float32_div(u1.f, u2.f, &env->vec_status);
3304
#define HELPER_SPE_SINGLE_ARITH(name) \
3305
uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3307
return e##name(op1, op2); \
3310
HELPER_SPE_SINGLE_ARITH(fsadd);
3312
HELPER_SPE_SINGLE_ARITH(fssub);
3314
HELPER_SPE_SINGLE_ARITH(fsmul);
3316
HELPER_SPE_SINGLE_ARITH(fsdiv);
3318
#define HELPER_SPE_VECTOR_ARITH(name) \
3319
uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \
3321
return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
3322
(uint64_t)e##name(op1, op2); \
3325
HELPER_SPE_VECTOR_ARITH(fsadd);
3327
HELPER_SPE_VECTOR_ARITH(fssub);
3329
HELPER_SPE_VECTOR_ARITH(fsmul);
3331
HELPER_SPE_VECTOR_ARITH(fsdiv);
3333
/* Single-precision floating-point comparisons */
3334
static inline uint32_t efscmplt(uint32_t op1, uint32_t op2)
3339
return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3342
static inline uint32_t efscmpgt(uint32_t op1, uint32_t op2)
3347
return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
3350
static inline uint32_t efscmpeq(uint32_t op1, uint32_t op2)
3355
return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3358
static inline uint32_t efststlt(uint32_t op1, uint32_t op2)
3360
/* XXX: TODO: ignore special values (NaN, infinites, ...) */
3361
return efscmplt(op1, op2);
3364
static inline uint32_t efststgt(uint32_t op1, uint32_t op2)
3366
/* XXX: TODO: ignore special values (NaN, infinites, ...) */
3367
return efscmpgt(op1, op2);
3370
static inline uint32_t efststeq(uint32_t op1, uint32_t op2)
3372
/* XXX: TODO: ignore special values (NaN, infinites, ...) */
3373
return efscmpeq(op1, op2);
3376
#define HELPER_SINGLE_SPE_CMP(name) \
3377
uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3379
return e##name(op1, op2) << 2; \
3382
HELPER_SINGLE_SPE_CMP(fststlt);
3384
HELPER_SINGLE_SPE_CMP(fststgt);
3386
HELPER_SINGLE_SPE_CMP(fststeq);
3388
HELPER_SINGLE_SPE_CMP(fscmplt);
3390
HELPER_SINGLE_SPE_CMP(fscmpgt);
3392
HELPER_SINGLE_SPE_CMP(fscmpeq);
3394
static inline uint32_t evcmp_merge(int t0, int t1)
3396
return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
3399
#define HELPER_VECTOR_SPE_CMP(name) \
3400
uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \
3402
return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
3405
HELPER_VECTOR_SPE_CMP(fststlt);
3407
HELPER_VECTOR_SPE_CMP(fststgt);
3409
HELPER_VECTOR_SPE_CMP(fststeq);
3411
HELPER_VECTOR_SPE_CMP(fscmplt);
3413
HELPER_VECTOR_SPE_CMP(fscmpgt);
3415
HELPER_VECTOR_SPE_CMP(fscmpeq);
3417
/* Double-precision floating-point conversion */
3418
uint64_t helper_efdcfsi (uint32_t val)
3422
u.d = int32_to_float64(val, &env->vec_status);
3427
uint64_t helper_efdcfsid (uint64_t val)
3431
u.d = int64_to_float64(val, &env->vec_status);
3436
uint64_t helper_efdcfui (uint32_t val)
3440
u.d = uint32_to_float64(val, &env->vec_status);
3445
uint64_t helper_efdcfuid (uint64_t val)
3449
u.d = uint64_to_float64(val, &env->vec_status);
3454
uint32_t helper_efdctsi (uint64_t val)
3459
/* NaN are not treated the same way IEEE 754 does */
3460
if (unlikely(float64_is_any_nan(u.d))) {
3464
return float64_to_int32(u.d, &env->vec_status);
3467
uint32_t helper_efdctui (uint64_t val)
3472
/* NaN are not treated the same way IEEE 754 does */
3473
if (unlikely(float64_is_any_nan(u.d))) {
3477
return float64_to_uint32(u.d, &env->vec_status);
3480
uint32_t helper_efdctsiz (uint64_t val)
3485
/* NaN are not treated the same way IEEE 754 does */
3486
if (unlikely(float64_is_any_nan(u.d))) {
3490
return float64_to_int32_round_to_zero(u.d, &env->vec_status);
3493
uint64_t helper_efdctsidz (uint64_t val)
3498
/* NaN are not treated the same way IEEE 754 does */
3499
if (unlikely(float64_is_any_nan(u.d))) {
3503
return float64_to_int64_round_to_zero(u.d, &env->vec_status);
3506
uint32_t helper_efdctuiz (uint64_t val)
3511
/* NaN are not treated the same way IEEE 754 does */
3512
if (unlikely(float64_is_any_nan(u.d))) {
3516
return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
3519
uint64_t helper_efdctuidz (uint64_t val)
3524
/* NaN are not treated the same way IEEE 754 does */
3525
if (unlikely(float64_is_any_nan(u.d))) {
3529
return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
3532
uint64_t helper_efdcfsf (uint32_t val)
3537
u.d = int32_to_float64(val, &env->vec_status);
3538
tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3539
u.d = float64_div(u.d, tmp, &env->vec_status);
3544
uint64_t helper_efdcfuf (uint32_t val)
3549
u.d = uint32_to_float64(val, &env->vec_status);
3550
tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3551
u.d = float64_div(u.d, tmp, &env->vec_status);
3556
uint32_t helper_efdctsf (uint64_t val)
3562
/* NaN are not treated the same way IEEE 754 does */
3563
if (unlikely(float64_is_any_nan(u.d))) {
3566
tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3567
u.d = float64_mul(u.d, tmp, &env->vec_status);
3569
return float64_to_int32(u.d, &env->vec_status);
3572
uint32_t helper_efdctuf (uint64_t val)
3578
/* NaN are not treated the same way IEEE 754 does */
3579
if (unlikely(float64_is_any_nan(u.d))) {
3582
tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3583
u.d = float64_mul(u.d, tmp, &env->vec_status);
3585
return float64_to_uint32(u.d, &env->vec_status);
3588
uint32_t helper_efscfd (uint64_t val)
3594
u2.f = float64_to_float32(u1.d, &env->vec_status);
3599
uint64_t helper_efdcfs (uint32_t val)
3605
u2.d = float32_to_float64(u1.f, &env->vec_status);
3610
/* Double precision fixed-point arithmetic */
3611
uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
3616
u1.d = float64_add(u1.d, u2.d, &env->vec_status);
3620
uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
3625
u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
3629
uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
3634
u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
3638
uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
3643
u1.d = float64_div(u1.d, u2.d, &env->vec_status);
3647
/* Double precision floating point helpers */
3648
uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
3653
return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3656
uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
3661
return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
3664
uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
3669
return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3672
uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
3674
/* XXX: TODO: test special values (NaN, infinites, ...) */
3675
return helper_efdtstlt(op1, op2);
3678
uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
3680
/* XXX: TODO: test special values (NaN, infinites, ...) */
3681
return helper_efdtstgt(op1, op2);
3684
uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
3686
/* XXX: TODO: test special values (NaN, infinites, ...) */
3687
return helper_efdtsteq(op1, op2);
3690
/*****************************************************************************/
3691
/* Softmmu support */
3692
#if !defined (CONFIG_USER_ONLY)
3694
#define MMUSUFFIX _mmu
3697
#include "softmmu_template.h"
3700
#include "softmmu_template.h"
3703
#include "softmmu_template.h"
3706
#include "softmmu_template.h"
3708
/* try to fill the TLB and return an exception if error. If retaddr is
3709
NULL, it means that the function was called in C code (i.e. not
3710
from generated code or from helper.c) */
3711
/* XXX: fix it to restore all registers */
3712
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3714
TranslationBlock *tb;
3715
CPUState *saved_env;
3719
/* XXX: hack to restore env in all cases, even if not called from
3722
env = cpu_single_env;
3723
ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3724
if (unlikely(ret != 0)) {
3725
if (likely(retaddr)) {
3726
/* now we have a real cpu fault */
3727
pc = (unsigned long)retaddr;
3728
tb = tb_find_pc(pc);
3730
/* the PC is inside the translated code. It means that we have
3731
a virtual CPU fault */
3732
cpu_restore_state(tb, env, pc);
3735
helper_raise_exception_err(env->exception_index, env->error_code);
3740
/* Segment registers load and store */
3741
target_ulong helper_load_sr (target_ulong sr_num)
3743
#if defined(TARGET_PPC64)
3744
if (env->mmu_model & POWERPC_MMU_64)
3745
return ppc_load_sr(env, sr_num);
3747
return env->sr[sr_num];
3750
void helper_store_sr (target_ulong sr_num, target_ulong val)
3752
ppc_store_sr(env, sr_num, val);
3755
/* SLB management */
3756
#if defined(TARGET_PPC64)
3757
void helper_store_slb (target_ulong rb, target_ulong rs)
3759
if (ppc_store_slb(env, rb, rs) < 0) {
3760
helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3764
target_ulong helper_load_slb_esid (target_ulong rb)
3768
if (ppc_load_slb_esid(env, rb, &rt) < 0) {
3769
helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3774
target_ulong helper_load_slb_vsid (target_ulong rb)
3778
if (ppc_load_slb_vsid(env, rb, &rt) < 0) {
3779
helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3784
void helper_slbia (void)
3786
ppc_slb_invalidate_all(env);
3789
void helper_slbie (target_ulong addr)
3791
ppc_slb_invalidate_one(env, addr);
3794
#endif /* defined(TARGET_PPC64) */
3796
/* TLB management */
3797
void helper_tlbia (void)
3799
ppc_tlb_invalidate_all(env);
3802
void helper_tlbie (target_ulong addr)
3804
ppc_tlb_invalidate_one(env, addr);
3807
/* Software driven TLBs management */
3808
/* PowerPC 602/603 software TLB load instructions helpers */
3809
static void do_6xx_tlb (target_ulong new_EPN, int is_code)
3811
target_ulong RPN, CMP, EPN;
3814
RPN = env->spr[SPR_RPA];
3816
CMP = env->spr[SPR_ICMP];
3817
EPN = env->spr[SPR_IMISS];
3819
CMP = env->spr[SPR_DCMP];
3820
EPN = env->spr[SPR_DMISS];
3822
way = (env->spr[SPR_SRR1] >> 17) & 1;
3823
(void)EPN; /* avoid a compiler warning */
3824
LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3825
" PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3827
/* Store this TLB */
3828
ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3829
way, is_code, CMP, RPN);
3832
void helper_6xx_tlbd (target_ulong EPN)
3837
void helper_6xx_tlbi (target_ulong EPN)
3842
/* PowerPC 74xx software TLB load instructions helpers */
3843
static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3845
target_ulong RPN, CMP, EPN;
3848
RPN = env->spr[SPR_PTELO];
3849
CMP = env->spr[SPR_PTEHI];
3850
EPN = env->spr[SPR_TLBMISS] & ~0x3;
3851
way = env->spr[SPR_TLBMISS] & 0x3;
3852
(void)EPN; /* avoid a compiler warning */
3853
LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3854
" PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3856
/* Store this TLB */
3857
ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3858
way, is_code, CMP, RPN);
3861
void helper_74xx_tlbd (target_ulong EPN)
3863
do_74xx_tlb(EPN, 0);
3866
void helper_74xx_tlbi (target_ulong EPN)
3868
do_74xx_tlb(EPN, 1);
3871
static inline target_ulong booke_tlb_to_page_size(int size)
3873
return 1024 << (2 * size);
3876
static inline int booke_page_size_to_tlb(target_ulong page_size)
3880
switch (page_size) {
3914
#if defined (TARGET_PPC64)
3915
case 0x000100000000ULL:
3918
case 0x000400000000ULL:
3921
case 0x001000000000ULL:
3924
case 0x004000000000ULL:
3927
case 0x010000000000ULL:
3939
/* Helpers for 4xx TLB management */
3940
#define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
3942
#define PPC4XX_TLBHI_V 0x00000040
3943
#define PPC4XX_TLBHI_E 0x00000020
3944
#define PPC4XX_TLBHI_SIZE_MIN 0
3945
#define PPC4XX_TLBHI_SIZE_MAX 7
3946
#define PPC4XX_TLBHI_SIZE_DEFAULT 1
3947
#define PPC4XX_TLBHI_SIZE_SHIFT 7
3948
#define PPC4XX_TLBHI_SIZE_MASK 0x00000007
3950
#define PPC4XX_TLBLO_EX 0x00000200
3951
#define PPC4XX_TLBLO_WR 0x00000100
3952
#define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
3953
#define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
3955
target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3961
entry &= PPC4XX_TLB_ENTRY_MASK;
3962
tlb = &env->tlb.tlbe[entry];
3964
if (tlb->prot & PAGE_VALID) {
3965
ret |= PPC4XX_TLBHI_V;
3967
size = booke_page_size_to_tlb(tlb->size);
3968
if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
3969
size = PPC4XX_TLBHI_SIZE_DEFAULT;
3971
ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
3972
env->spr[SPR_40x_PID] = tlb->PID;
3976
target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3981
entry &= PPC4XX_TLB_ENTRY_MASK;
3982
tlb = &env->tlb.tlbe[entry];
3984
if (tlb->prot & PAGE_EXEC) {
3985
ret |= PPC4XX_TLBLO_EX;
3987
if (tlb->prot & PAGE_WRITE) {
3988
ret |= PPC4XX_TLBLO_WR;
3993
void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
3996
target_ulong page, end;
3998
LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry,
4000
entry &= PPC4XX_TLB_ENTRY_MASK;
4001
tlb = &env->tlb.tlbe[entry];
4002
/* Invalidate previous TLB (if it's valid) */
4003
if (tlb->prot & PAGE_VALID) {
4004
end = tlb->EPN + tlb->size;
4005
LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end "
4006
TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
4007
for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
4008
tlb_flush_page(env, page);
4011
tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
4012
& PPC4XX_TLBHI_SIZE_MASK);
4013
/* We cannot handle TLB size < TARGET_PAGE_SIZE.
4014
* If this ever occurs, one should use the ppcemb target instead
4015
* of the ppc or ppc64 one
4017
if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
4018
cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
4019
"are not supported (%d)\n",
4020
tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
4022
tlb->EPN = val & ~(tlb->size - 1);
4023
if (val & PPC4XX_TLBHI_V) {
4024
tlb->prot |= PAGE_VALID;
4025
if (val & PPC4XX_TLBHI_E) {
4026
/* XXX: TO BE FIXED */
4028
"Little-endian TLB entries are not supported by now\n");
4031
tlb->prot &= ~PAGE_VALID;
4033
tlb->PID = env->spr[SPR_40x_PID]; /* PID */
4034
LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4035
" size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4036
(int)entry, tlb->RPN, tlb->EPN, tlb->size,
4037
tlb->prot & PAGE_READ ? 'r' : '-',
4038
tlb->prot & PAGE_WRITE ? 'w' : '-',
4039
tlb->prot & PAGE_EXEC ? 'x' : '-',
4040
tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4041
/* Invalidate new TLB (if valid) */
4042
if (tlb->prot & PAGE_VALID) {
4043
end = tlb->EPN + tlb->size;
4044
LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end "
4045
TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
4046
for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
4047
tlb_flush_page(env, page);
4052
void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
4056
LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry,
4058
entry &= PPC4XX_TLB_ENTRY_MASK;
4059
tlb = &env->tlb.tlbe[entry];
4060
tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
4061
tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
4062
tlb->prot = PAGE_READ;
4063
if (val & PPC4XX_TLBLO_EX) {
4064
tlb->prot |= PAGE_EXEC;
4066
if (val & PPC4XX_TLBLO_WR) {
4067
tlb->prot |= PAGE_WRITE;
4069
LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4070
" size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4071
(int)entry, tlb->RPN, tlb->EPN, tlb->size,
4072
tlb->prot & PAGE_READ ? 'r' : '-',
4073
tlb->prot & PAGE_WRITE ? 'w' : '-',
4074
tlb->prot & PAGE_EXEC ? 'x' : '-',
4075
tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4078
target_ulong helper_4xx_tlbsx (target_ulong address)
4080
return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
4083
/* PowerPC 440 TLB management */
4084
void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
4087
target_ulong EPN, RPN, size;
4090
LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n",
4091
__func__, word, (int)entry, value);
4094
tlb = &env->tlb.tlbe[entry];
4097
/* Just here to please gcc */
4099
EPN = value & 0xFFFFFC00;
4100
if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
4103
size = booke_tlb_to_page_size((value >> 4) & 0xF);
4104
if ((tlb->prot & PAGE_VALID) && tlb->size < size)
4108
tlb->attr |= (value >> 8) & 1;
4109
if (value & 0x200) {
4110
tlb->prot |= PAGE_VALID;
4112
if (tlb->prot & PAGE_VALID) {
4113
tlb->prot &= ~PAGE_VALID;
4117
tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
4122
RPN = value & 0xFFFFFC0F;
4123
if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
4128
tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
4129
tlb->prot = tlb->prot & PAGE_VALID;
4131
tlb->prot |= PAGE_READ << 4;
4133
tlb->prot |= PAGE_WRITE << 4;
4135
tlb->prot |= PAGE_EXEC << 4;
4137
tlb->prot |= PAGE_READ;
4139
tlb->prot |= PAGE_WRITE;
4141
tlb->prot |= PAGE_EXEC;
4146
target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
4153
tlb = &env->tlb.tlbe[entry];
4156
/* Just here to please gcc */
4159
size = booke_page_size_to_tlb(tlb->size);
4160
if (size < 0 || size > 0xF)
4163
if (tlb->attr & 0x1)
4165
if (tlb->prot & PAGE_VALID)
4167
env->spr[SPR_440_MMUCR] &= ~0x000000FF;
4168
env->spr[SPR_440_MMUCR] |= tlb->PID;
4174
ret = tlb->attr & ~0x1;
4175
if (tlb->prot & (PAGE_READ << 4))
4177
if (tlb->prot & (PAGE_WRITE << 4))
4179
if (tlb->prot & (PAGE_EXEC << 4))
4181
if (tlb->prot & PAGE_READ)
4183
if (tlb->prot & PAGE_WRITE)
4185
if (tlb->prot & PAGE_EXEC)
4192
target_ulong helper_440_tlbsx (target_ulong address)
4194
return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
4197
/* PowerPC BookE 2.06 TLB management */
4199
static ppcmas_tlb_t *booke206_cur_tlb(CPUState *env)
4201
uint32_t tlbncfg = 0;
4202
int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT;
4203
int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK);
4206
tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
4207
tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb];
4209
if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) {
4210
cpu_abort(env, "we don't support HES yet\n");
4213
return booke206_get_tlbm(env, tlb, ea, esel);
4216
void helper_booke_setpid(uint32_t pidn, target_ulong pid)
4218
env->spr[pidn] = pid;
4219
/* changing PIDs mean we're in a different address space now */
4223
void helper_booke206_tlbwe(void)
4225
uint32_t tlbncfg, tlbn;
4228
switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) {
4229
case MAS0_WQ_ALWAYS:
4230
/* good to go, write that entry */
4233
/* XXX check if reserved */
4238
case MAS0_WQ_CLR_RSRV:
4239
/* XXX clear entry */
4242
/* no idea what to do */
4246
if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) &&
4248
/* XXX we don't support direct LRAT setting yet */
4249
fprintf(stderr, "cpu: don't support LRAT setting yet\n");
4253
tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
4254
tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn];
4256
tlb = booke206_cur_tlb(env);
4259
cpu_abort(env, "missing HV implementation\n");
4261
tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) |
4262
env->spr[SPR_BOOKE_MAS3];
4263
tlb->mas1 = env->spr[SPR_BOOKE_MAS1];
4264
/* XXX needs to change when supporting 64-bit e500 */
4265
tlb->mas2 = env->spr[SPR_BOOKE_MAS2] & 0xffffffff;
4267
if (!(tlbncfg & TLBnCFG_IPROT)) {
4268
/* no IPROT supported by TLB */
4269
tlb->mas1 &= ~MAS1_IPROT;
4272
if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) {
4273
tlb_flush_page(env, tlb->mas2 & MAS2_EPN_MASK);
4279
static inline void booke206_tlb_to_mas(CPUState *env, ppcmas_tlb_t *tlb)
4281
int tlbn = booke206_tlbm_to_tlbn(env, tlb);
4282
int way = booke206_tlbm_to_way(env, tlb);
4284
env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT;
4285
env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT;
4286
env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
4288
env->spr[SPR_BOOKE_MAS1] = tlb->mas1;
4289
env->spr[SPR_BOOKE_MAS2] = tlb->mas2;
4290
env->spr[SPR_BOOKE_MAS3] = tlb->mas7_3;
4291
env->spr[SPR_BOOKE_MAS7] = tlb->mas7_3 >> 32;
4294
void helper_booke206_tlbre(void)
4296
ppcmas_tlb_t *tlb = NULL;
4298
tlb = booke206_cur_tlb(env);
4299
booke206_tlb_to_mas(env, tlb);
4302
void helper_booke206_tlbsx(target_ulong address)
4304
ppcmas_tlb_t *tlb = NULL;
4306
target_phys_addr_t raddr;
4309
spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT;
4310
sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS;
4312
for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
4313
int ways = booke206_tlb_ways(env, i);
4315
for (j = 0; j < ways; j++) {
4316
tlb = booke206_get_tlbm(env, i, address, j);
4318
if (ppcmas_tlb_check(env, tlb, &raddr, address, spid)) {
4322
if (sas != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
4326
booke206_tlb_to_mas(env, tlb);
4331
/* no entry found, fill with defaults */
4332
env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
4333
env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
4334
env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK;
4335
env->spr[SPR_BOOKE_MAS3] = 0;
4336
env->spr[SPR_BOOKE_MAS7] = 0;
4338
if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) {
4339
env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
4342
env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16)
4345
/* next victim logic */
4346
env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT;
4348
env->last_way &= booke206_tlb_ways(env, 0) - 1;
4349
env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
4352
static inline void booke206_invalidate_ea_tlb(CPUState *env, int tlbn,
4356
int ways = booke206_tlb_ways(env, tlbn);
4359
for (i = 0; i < ways; i++) {
4360
ppcmas_tlb_t *tlb = booke206_get_tlbm(env, tlbn, ea, i);
4361
mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
4362
if (((tlb->mas2 & MAS2_EPN_MASK) == (ea & mask)) &&
4363
!(tlb->mas1 & MAS1_IPROT)) {
4364
tlb->mas1 &= ~MAS1_VALID;
4369
void helper_booke206_tlbivax(target_ulong address)
4371
if (address & 0x4) {
4372
/* flush all entries */
4373
if (address & 0x8) {
4374
/* flush all of TLB1 */
4375
booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1);
4377
/* flush all of TLB0 */
4378
booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0);
4383
if (address & 0x8) {
4384
/* flush TLB1 entries */
4385
booke206_invalidate_ea_tlb(env, 1, address);
4388
/* flush TLB0 entries */
4389
booke206_invalidate_ea_tlb(env, 0, address);
4390
tlb_flush_page(env, address & MAS2_EPN_MASK);
4394
void helper_booke206_tlbflush(uint32_t type)
4399
flags |= BOOKE206_FLUSH_TLB1;
4403
flags |= BOOKE206_FLUSH_TLB0;
4406
booke206_flush_tlb(env, flags, 1);
4409
#endif /* !CONFIG_USER_ONLY */