2
* PowerPC floating point and SPE emulation helpers for QEMU.
4
* Copyright (c) 2003-2007 Jocelyn Mayer
6
* This library is free software; you can redistribute it and/or
7
* modify it under the terms of the GNU Lesser General Public
8
* License as published by the Free Software Foundation; either
9
* version 2 of the License, or (at your option) any later version.
11
* This library is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14
* Lesser General Public License for more details.
16
* You should have received a copy of the GNU Lesser General Public
17
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
19
#include "qemu/osdep.h"
21
#include "exec/helper-proto.h"
22
#include "exec/exec-all.h"
24
#define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
25
#define float32_snan_to_qnan(x) ((x) | 0x00400000)
27
/*****************************************************************************/
28
/* Floating point operations helpers */
29
uint64_t helper_float32_to_float64(CPUPPCState *env, uint32_t arg)
35
d.d = float32_to_float64(f.f, &env->fp_status);
39
uint32_t helper_float64_to_float32(CPUPPCState *env, uint64_t arg)
45
f.f = float64_to_float32(d.d, &env->fp_status);
49
static inline int isden(float64 d)
55
return ((u.ll >> 52) & 0x7FF) == 0;
58
static inline int ppc_float32_get_unbiased_exp(float32 f)
60
return ((f >> 23) & 0xFF) - 127;
63
static inline int ppc_float64_get_unbiased_exp(float64 f)
65
return ((f >> 52) & 0x7FF) - 1023;
68
void helper_compute_fprf(CPUPPCState *env, uint64_t arg)
75
isneg = float64_is_neg(farg.d);
76
if (unlikely(float64_is_any_nan(farg.d))) {
77
if (float64_is_signaling_nan(farg.d, &env->fp_status)) {
78
/* Signaling NaN: flags are undefined */
84
} else if (unlikely(float64_is_infinity(farg.d))) {
92
if (float64_is_zero(farg.d)) {
101
/* Denormalized numbers */
104
/* Normalized numbers */
114
/* We update FPSCR_FPRF */
115
env->fpscr &= ~(0x1F << FPSCR_FPRF);
116
env->fpscr |= fprf << FPSCR_FPRF;
119
/* Floating-point invalid operations exception */
120
static inline __attribute__((__always_inline__))
121
uint64_t float_invalid_op_excp(CPUPPCState *env, int op, int set_fpcc)
123
CPUState *cs = CPU(ppc_env_get_cpu(env));
129
case POWERPC_EXCP_FP_VXSNAN:
130
env->fpscr |= 1 << FPSCR_VXSNAN;
132
case POWERPC_EXCP_FP_VXSOFT:
133
env->fpscr |= 1 << FPSCR_VXSOFT;
135
case POWERPC_EXCP_FP_VXISI:
136
/* Magnitude subtraction of infinities */
137
env->fpscr |= 1 << FPSCR_VXISI;
139
case POWERPC_EXCP_FP_VXIDI:
140
/* Division of infinity by infinity */
141
env->fpscr |= 1 << FPSCR_VXIDI;
143
case POWERPC_EXCP_FP_VXZDZ:
144
/* Division of zero by zero */
145
env->fpscr |= 1 << FPSCR_VXZDZ;
147
case POWERPC_EXCP_FP_VXIMZ:
148
/* Multiplication of zero by infinity */
149
env->fpscr |= 1 << FPSCR_VXIMZ;
151
case POWERPC_EXCP_FP_VXVC:
152
/* Ordered comparison of NaN */
153
env->fpscr |= 1 << FPSCR_VXVC;
155
env->fpscr &= ~(0xF << FPSCR_FPCC);
156
env->fpscr |= 0x11 << FPSCR_FPCC;
158
/* We must update the target FPR before raising the exception */
160
cs->exception_index = POWERPC_EXCP_PROGRAM;
161
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
162
/* Update the floating-point enabled exception summary */
163
env->fpscr |= 1 << FPSCR_FEX;
164
/* Exception is differed */
168
case POWERPC_EXCP_FP_VXSQRT:
169
/* Square root of a negative number */
170
env->fpscr |= 1 << FPSCR_VXSQRT;
172
env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
174
/* Set the result to quiet NaN */
175
ret = 0x7FF8000000000000ULL;
177
env->fpscr &= ~(0xF << FPSCR_FPCC);
178
env->fpscr |= 0x11 << FPSCR_FPCC;
182
case POWERPC_EXCP_FP_VXCVI:
183
/* Invalid conversion */
184
env->fpscr |= 1 << FPSCR_VXCVI;
185
env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
187
/* Set the result to quiet NaN */
188
ret = 0x7FF8000000000000ULL;
190
env->fpscr &= ~(0xF << FPSCR_FPCC);
191
env->fpscr |= 0x11 << FPSCR_FPCC;
196
/* Update the floating-point invalid operation summary */
197
env->fpscr |= 1 << FPSCR_VX;
198
/* Update the floating-point exception summary */
201
/* Update the floating-point enabled exception summary */
202
env->fpscr |= 1 << FPSCR_FEX;
203
if (msr_fe0 != 0 || msr_fe1 != 0) {
204
/* GETPC() works here because this is inline */
205
raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
206
POWERPC_EXCP_FP | op, GETPC());
212
static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr)
214
env->fpscr |= 1 << FPSCR_ZX;
215
env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
216
/* Update the floating-point exception summary */
219
/* Update the floating-point enabled exception summary */
220
env->fpscr |= 1 << FPSCR_FEX;
221
if (msr_fe0 != 0 || msr_fe1 != 0) {
222
raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
223
POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX,
229
static inline void float_overflow_excp(CPUPPCState *env)
231
CPUState *cs = CPU(ppc_env_get_cpu(env));
233
env->fpscr |= 1 << FPSCR_OX;
234
/* Update the floating-point exception summary */
237
/* XXX: should adjust the result */
238
/* Update the floating-point enabled exception summary */
239
env->fpscr |= 1 << FPSCR_FEX;
240
/* We must update the target FPR before raising the exception */
241
cs->exception_index = POWERPC_EXCP_PROGRAM;
242
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
244
env->fpscr |= 1 << FPSCR_XX;
245
env->fpscr |= 1 << FPSCR_FI;
249
static inline void float_underflow_excp(CPUPPCState *env)
251
CPUState *cs = CPU(ppc_env_get_cpu(env));
253
env->fpscr |= 1 << FPSCR_UX;
254
/* Update the floating-point exception summary */
257
/* XXX: should adjust the result */
258
/* Update the floating-point enabled exception summary */
259
env->fpscr |= 1 << FPSCR_FEX;
260
/* We must update the target FPR before raising the exception */
261
cs->exception_index = POWERPC_EXCP_PROGRAM;
262
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
266
static inline void float_inexact_excp(CPUPPCState *env)
268
CPUState *cs = CPU(ppc_env_get_cpu(env));
270
env->fpscr |= 1 << FPSCR_XX;
271
/* Update the floating-point exception summary */
274
/* Update the floating-point enabled exception summary */
275
env->fpscr |= 1 << FPSCR_FEX;
276
/* We must update the target FPR before raising the exception */
277
cs->exception_index = POWERPC_EXCP_PROGRAM;
278
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
282
static inline void fpscr_set_rounding_mode(CPUPPCState *env)
286
/* Set rounding mode */
289
/* Best approximation (round to nearest) */
290
rnd_type = float_round_nearest_even;
293
/* Smaller magnitude (round toward zero) */
294
rnd_type = float_round_to_zero;
297
/* Round toward +infinite */
298
rnd_type = float_round_up;
302
/* Round toward -infinite */
303
rnd_type = float_round_down;
306
set_float_rounding_mode(rnd_type, &env->fp_status);
309
void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
313
prev = (env->fpscr >> bit) & 1;
314
env->fpscr &= ~(1 << bit);
319
fpscr_set_rounding_mode(env);
327
void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
329
CPUState *cs = CPU(ppc_env_get_cpu(env));
332
prev = (env->fpscr >> bit) & 1;
333
env->fpscr |= 1 << bit;
375
env->fpscr |= 1 << FPSCR_VX;
384
env->error_code = POWERPC_EXCP_FP;
386
env->error_code |= POWERPC_EXCP_FP_VXSNAN;
389
env->error_code |= POWERPC_EXCP_FP_VXISI;
392
env->error_code |= POWERPC_EXCP_FP_VXIDI;
395
env->error_code |= POWERPC_EXCP_FP_VXZDZ;
398
env->error_code |= POWERPC_EXCP_FP_VXIMZ;
401
env->error_code |= POWERPC_EXCP_FP_VXVC;
404
env->error_code |= POWERPC_EXCP_FP_VXSOFT;
407
env->error_code |= POWERPC_EXCP_FP_VXSQRT;
410
env->error_code |= POWERPC_EXCP_FP_VXCVI;
418
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
425
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
432
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
439
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
445
fpscr_set_rounding_mode(env);
450
/* Update the floating-point enabled exception summary */
451
env->fpscr |= 1 << FPSCR_FEX;
452
/* We have to update Rc1 before raising the exception */
453
cs->exception_index = POWERPC_EXCP_PROGRAM;
459
void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
461
CPUState *cs = CPU(ppc_env_get_cpu(env));
462
target_ulong prev, new;
466
new = (target_ulong)arg;
467
new &= ~0x60000000LL;
468
new |= prev & 0x60000000LL;
469
for (i = 0; i < sizeof(target_ulong) * 2; i++) {
470
if (mask & (1 << i)) {
471
env->fpscr &= ~(0xFLL << (4 * i));
472
env->fpscr |= new & (0xFLL << (4 * i));
475
/* Update VX and FEX */
477
env->fpscr |= 1 << FPSCR_VX;
479
env->fpscr &= ~(1 << FPSCR_VX);
481
if ((fpscr_ex & fpscr_eex) != 0) {
482
env->fpscr |= 1 << FPSCR_FEX;
483
cs->exception_index = POWERPC_EXCP_PROGRAM;
484
/* XXX: we should compute it properly */
485
env->error_code = POWERPC_EXCP_FP;
487
env->fpscr &= ~(1 << FPSCR_FEX);
489
fpscr_set_rounding_mode(env);
492
void store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
494
helper_store_fpscr(env, arg, mask);
497
static void do_float_check_status(CPUPPCState *env, uintptr_t raddr)
499
CPUState *cs = CPU(ppc_env_get_cpu(env));
500
int status = get_float_exception_flags(&env->fp_status);
502
if (status & float_flag_divbyzero) {
503
float_zero_divide_excp(env, raddr);
504
} else if (status & float_flag_overflow) {
505
float_overflow_excp(env);
506
} else if (status & float_flag_underflow) {
507
float_underflow_excp(env);
508
} else if (status & float_flag_inexact) {
509
float_inexact_excp(env);
512
if (cs->exception_index == POWERPC_EXCP_PROGRAM &&
513
(env->error_code & POWERPC_EXCP_FP)) {
514
/* Differred floating-point exception after target FPR update */
515
if (msr_fe0 != 0 || msr_fe1 != 0) {
516
raise_exception_err_ra(env, cs->exception_index,
517
env->error_code, raddr);
522
static inline __attribute__((__always_inline__))
523
void float_check_status(CPUPPCState *env)
525
/* GETPC() works here because this is inline */
526
do_float_check_status(env, GETPC());
529
void helper_float_check_status(CPUPPCState *env)
531
do_float_check_status(env, GETPC());
534
void helper_reset_fpstatus(CPUPPCState *env)
536
set_float_exception_flags(0, &env->fp_status);
540
uint64_t helper_fadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
542
CPU_DoubleU farg1, farg2;
547
if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
548
float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
549
/* Magnitude subtraction of infinities */
550
farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
552
if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
553
float64_is_signaling_nan(farg2.d, &env->fp_status))) {
555
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
557
farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
564
uint64_t helper_fsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
566
CPU_DoubleU farg1, farg2;
571
if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
572
float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
573
/* Magnitude subtraction of infinities */
574
farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
576
if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
577
float64_is_signaling_nan(farg2.d, &env->fp_status))) {
578
/* sNaN subtraction */
579
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
581
farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
588
uint64_t helper_fmul(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
590
CPU_DoubleU farg1, farg2;
595
if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
596
(float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
597
/* Multiplication of zero by infinity */
598
farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
600
if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
601
float64_is_signaling_nan(farg2.d, &env->fp_status))) {
602
/* sNaN multiplication */
603
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
605
farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
612
uint64_t helper_fdiv(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
614
CPU_DoubleU farg1, farg2;
619
if (unlikely(float64_is_infinity(farg1.d) &&
620
float64_is_infinity(farg2.d))) {
621
/* Division of infinity by infinity */
622
farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, 1);
623
} else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
624
/* Division of zero by zero */
625
farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, 1);
627
if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
628
float64_is_signaling_nan(farg2.d, &env->fp_status))) {
630
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
632
farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
639
#define FPU_FCTI(op, cvt, nanval) \
640
uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
645
farg.ll = float64_to_##cvt(farg.d, &env->fp_status); \
647
if (unlikely(env->fp_status.float_exception_flags)) { \
648
if (float64_is_any_nan(arg)) { \
649
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \
650
if (float64_is_signaling_nan(arg, &env->fp_status)) { \
651
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \
654
} else if (env->fp_status.float_exception_flags & \
655
float_flag_invalid) { \
656
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \
658
float_check_status(env); \
663
FPU_FCTI(fctiw, int32, 0x80000000U)
664
FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U)
665
FPU_FCTI(fctiwu, uint32, 0x00000000U)
666
FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U)
667
FPU_FCTI(fctid, int64, 0x8000000000000000ULL)
668
FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL)
669
FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL)
670
FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL)
672
#define FPU_FCFI(op, cvtr, is_single) \
673
uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
678
float32 tmp = cvtr(arg, &env->fp_status); \
679
farg.d = float32_to_float64(tmp, &env->fp_status); \
681
farg.d = cvtr(arg, &env->fp_status); \
683
float_check_status(env); \
687
FPU_FCFI(fcfid, int64_to_float64, 0)
688
FPU_FCFI(fcfids, int64_to_float32, 1)
689
FPU_FCFI(fcfidu, uint64_to_float64, 0)
690
FPU_FCFI(fcfidus, uint64_to_float32, 1)
692
static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg,
699
if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
701
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
702
farg.ll = arg | 0x0008000000000000ULL;
704
int inexact = get_float_exception_flags(&env->fp_status) &
706
set_float_rounding_mode(rounding_mode, &env->fp_status);
707
farg.ll = float64_round_to_int(farg.d, &env->fp_status);
708
/* Restore rounding mode from FPSCR */
709
fpscr_set_rounding_mode(env);
711
/* fri* does not set FPSCR[XX] */
713
env->fp_status.float_exception_flags &= ~float_flag_inexact;
716
float_check_status(env);
720
uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
722
return do_fri(env, arg, float_round_ties_away);
725
uint64_t helper_friz(CPUPPCState *env, uint64_t arg)
727
return do_fri(env, arg, float_round_to_zero);
730
uint64_t helper_frip(CPUPPCState *env, uint64_t arg)
732
return do_fri(env, arg, float_round_up);
735
uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
737
return do_fri(env, arg, float_round_down);
741
uint64_t helper_fmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
744
CPU_DoubleU farg1, farg2, farg3;
750
if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
751
(float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
752
/* Multiplication of zero by infinity */
753
farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
755
if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
756
float64_is_signaling_nan(farg2.d, &env->fp_status) ||
757
float64_is_signaling_nan(farg3.d, &env->fp_status))) {
759
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
761
/* This is the way the PowerPC specification defines it */
762
float128 ft0_128, ft1_128;
764
ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
765
ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
766
ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
767
if (unlikely(float128_is_infinity(ft0_128) &&
768
float64_is_infinity(farg3.d) &&
769
float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
770
/* Magnitude subtraction of infinities */
771
farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
773
ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
774
ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
775
farg1.d = float128_to_float64(ft0_128, &env->fp_status);
783
uint64_t helper_fmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
786
CPU_DoubleU farg1, farg2, farg3;
792
if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
793
(float64_is_zero(farg1.d) &&
794
float64_is_infinity(farg2.d)))) {
795
/* Multiplication of zero by infinity */
796
farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
798
if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
799
float64_is_signaling_nan(farg2.d, &env->fp_status) ||
800
float64_is_signaling_nan(farg3.d, &env->fp_status))) {
802
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
804
/* This is the way the PowerPC specification defines it */
805
float128 ft0_128, ft1_128;
807
ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
808
ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
809
ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
810
if (unlikely(float128_is_infinity(ft0_128) &&
811
float64_is_infinity(farg3.d) &&
812
float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
813
/* Magnitude subtraction of infinities */
814
farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
816
ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
817
ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
818
farg1.d = float128_to_float64(ft0_128, &env->fp_status);
824
/* fnmadd - fnmadd. */
825
uint64_t helper_fnmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
828
CPU_DoubleU farg1, farg2, farg3;
834
if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
835
(float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
836
/* Multiplication of zero by infinity */
837
farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
839
if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
840
float64_is_signaling_nan(farg2.d, &env->fp_status) ||
841
float64_is_signaling_nan(farg3.d, &env->fp_status))) {
843
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
845
/* This is the way the PowerPC specification defines it */
846
float128 ft0_128, ft1_128;
848
ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
849
ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
850
ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
851
if (unlikely(float128_is_infinity(ft0_128) &&
852
float64_is_infinity(farg3.d) &&
853
float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
854
/* Magnitude subtraction of infinities */
855
farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
857
ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
858
ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
859
farg1.d = float128_to_float64(ft0_128, &env->fp_status);
861
if (likely(!float64_is_any_nan(farg1.d))) {
862
farg1.d = float64_chs(farg1.d);
868
/* fnmsub - fnmsub. */
869
uint64_t helper_fnmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
872
CPU_DoubleU farg1, farg2, farg3;
878
if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
879
(float64_is_zero(farg1.d) &&
880
float64_is_infinity(farg2.d)))) {
881
/* Multiplication of zero by infinity */
882
farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
884
if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
885
float64_is_signaling_nan(farg2.d, &env->fp_status) ||
886
float64_is_signaling_nan(farg3.d, &env->fp_status))) {
888
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
890
/* This is the way the PowerPC specification defines it */
891
float128 ft0_128, ft1_128;
893
ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
894
ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
895
ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
896
if (unlikely(float128_is_infinity(ft0_128) &&
897
float64_is_infinity(farg3.d) &&
898
float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
899
/* Magnitude subtraction of infinities */
900
farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
902
ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
903
ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
904
farg1.d = float128_to_float64(ft0_128, &env->fp_status);
906
if (likely(!float64_is_any_nan(farg1.d))) {
907
farg1.d = float64_chs(farg1.d);
914
uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
921
if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
922
/* sNaN square root */
923
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
925
f32 = float64_to_float32(farg.d, &env->fp_status);
926
farg.d = float32_to_float64(f32, &env->fp_status);
932
uint64_t helper_fsqrt(CPUPPCState *env, uint64_t arg)
938
if (unlikely(float64_is_any_nan(farg.d))) {
939
if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
940
/* sNaN reciprocal square root */
941
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
942
farg.ll = float64_snan_to_qnan(farg.ll);
944
} else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
945
/* Square root of a negative nonzero number */
946
farg.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
948
farg.d = float64_sqrt(farg.d, &env->fp_status);
954
uint64_t helper_fre(CPUPPCState *env, uint64_t arg)
960
if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
961
/* sNaN reciprocal */
962
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
964
farg.d = float64_div(float64_one, farg.d, &env->fp_status);
969
uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
976
if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
977
/* sNaN reciprocal */
978
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
980
farg.d = float64_div(float64_one, farg.d, &env->fp_status);
981
f32 = float64_to_float32(farg.d, &env->fp_status);
982
farg.d = float32_to_float64(f32, &env->fp_status);
987
/* frsqrte - frsqrte. */
988
uint64_t helper_frsqrte(CPUPPCState *env, uint64_t arg)
994
if (unlikely(float64_is_any_nan(farg.d))) {
995
if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
996
/* sNaN reciprocal square root */
997
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
998
farg.ll = float64_snan_to_qnan(farg.ll);
1000
} else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1001
/* Reciprocal square root of a negative nonzero number */
1002
farg.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
1004
farg.d = float64_sqrt(farg.d, &env->fp_status);
1005
farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1012
uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1019
if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) &&
1020
!float64_is_any_nan(farg1.d)) {
1027
uint32_t helper_ftdiv(uint64_t fra, uint64_t frb)
1032
if (unlikely(float64_is_infinity(fra) ||
1033
float64_is_infinity(frb) ||
1034
float64_is_zero(frb))) {
1038
int e_a = ppc_float64_get_unbiased_exp(fra);
1039
int e_b = ppc_float64_get_unbiased_exp(frb);
1041
if (unlikely(float64_is_any_nan(fra) ||
1042
float64_is_any_nan(frb))) {
1044
} else if ((e_b <= -1022) || (e_b >= 1021)) {
1046
} else if (!float64_is_zero(fra) &&
1047
(((e_a - e_b) >= 1023) ||
1048
((e_a - e_b) <= -1021) ||
1053
if (unlikely(float64_is_zero_or_denormal(frb))) {
1054
/* XB is not zero because of the above check and */
1055
/* so must be denormalized. */
1060
return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
1063
uint32_t helper_ftsqrt(uint64_t frb)
1068
if (unlikely(float64_is_infinity(frb) || float64_is_zero(frb))) {
1072
int e_b = ppc_float64_get_unbiased_exp(frb);
1074
if (unlikely(float64_is_any_nan(frb))) {
1076
} else if (unlikely(float64_is_zero(frb))) {
1078
} else if (unlikely(float64_is_neg(frb))) {
1080
} else if (!float64_is_zero(frb) && (e_b <= (-1022+52))) {
1084
if (unlikely(float64_is_zero_or_denormal(frb))) {
1085
/* XB is not zero because of the above check and */
1086
/* therefore must be denormalized. */
1091
return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
1094
void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1097
CPU_DoubleU farg1, farg2;
1103
if (unlikely(float64_is_any_nan(farg1.d) ||
1104
float64_is_any_nan(farg2.d))) {
1106
} else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1108
} else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1114
env->fpscr &= ~(0x0F << FPSCR_FPRF);
1115
env->fpscr |= ret << FPSCR_FPRF;
1116
env->crf[crfD] = ret;
1117
if (unlikely(ret == 0x01UL
1118
&& (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1119
float64_is_signaling_nan(farg2.d, &env->fp_status)))) {
1120
/* sNaN comparison */
1121
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1125
void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1128
CPU_DoubleU farg1, farg2;
1134
if (unlikely(float64_is_any_nan(farg1.d) ||
1135
float64_is_any_nan(farg2.d))) {
1137
} else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1139
} else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1145
env->fpscr &= ~(0x0F << FPSCR_FPRF);
1146
env->fpscr |= ret << FPSCR_FPRF;
1147
env->crf[crfD] = ret;
1148
if (unlikely(ret == 0x01UL)) {
1149
if (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1150
float64_is_signaling_nan(farg2.d, &env->fp_status)) {
1151
/* sNaN comparison */
1152
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
1153
POWERPC_EXCP_FP_VXVC, 1);
1155
/* qNaN comparison */
1156
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 1);
1161
/* Single-precision floating-point conversions */
1162
static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val)
1166
u.f = int32_to_float32(val, &env->vec_status);
1171
static inline uint32_t efscfui(CPUPPCState *env, uint32_t val)
1175
u.f = uint32_to_float32(val, &env->vec_status);
1180
static inline int32_t efsctsi(CPUPPCState *env, uint32_t val)
1185
/* NaN are not treated the same way IEEE 754 does */
1186
if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1190
return float32_to_int32(u.f, &env->vec_status);
1193
static inline uint32_t efsctui(CPUPPCState *env, uint32_t val)
1198
/* NaN are not treated the same way IEEE 754 does */
1199
if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1203
return float32_to_uint32(u.f, &env->vec_status);
1206
static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val)
1211
/* NaN are not treated the same way IEEE 754 does */
1212
if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1216
return float32_to_int32_round_to_zero(u.f, &env->vec_status);
1219
static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val)
1224
/* NaN are not treated the same way IEEE 754 does */
1225
if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1229
return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
1232
static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val)
1237
u.f = int32_to_float32(val, &env->vec_status);
1238
tmp = int64_to_float32(1ULL << 32, &env->vec_status);
1239
u.f = float32_div(u.f, tmp, &env->vec_status);
1244
static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val)
1249
u.f = uint32_to_float32(val, &env->vec_status);
1250
tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1251
u.f = float32_div(u.f, tmp, &env->vec_status);
1256
static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val)
1262
/* NaN are not treated the same way IEEE 754 does */
1263
if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1266
tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1267
u.f = float32_mul(u.f, tmp, &env->vec_status);
1269
return float32_to_int32(u.f, &env->vec_status);
1272
static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val)
1278
/* NaN are not treated the same way IEEE 754 does */
1279
if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1282
tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1283
u.f = float32_mul(u.f, tmp, &env->vec_status);
1285
return float32_to_uint32(u.f, &env->vec_status);
1288
#define HELPER_SPE_SINGLE_CONV(name) \
1289
uint32_t helper_e##name(CPUPPCState *env, uint32_t val) \
1291
return e##name(env, val); \
1294
HELPER_SPE_SINGLE_CONV(fscfsi);
1296
HELPER_SPE_SINGLE_CONV(fscfui);
1298
HELPER_SPE_SINGLE_CONV(fscfuf);
1300
HELPER_SPE_SINGLE_CONV(fscfsf);
1302
HELPER_SPE_SINGLE_CONV(fsctsi);
1304
HELPER_SPE_SINGLE_CONV(fsctui);
1306
HELPER_SPE_SINGLE_CONV(fsctsiz);
1308
HELPER_SPE_SINGLE_CONV(fsctuiz);
1310
HELPER_SPE_SINGLE_CONV(fsctsf);
1312
HELPER_SPE_SINGLE_CONV(fsctuf);
1314
#define HELPER_SPE_VECTOR_CONV(name) \
1315
uint64_t helper_ev##name(CPUPPCState *env, uint64_t val) \
1317
return ((uint64_t)e##name(env, val >> 32) << 32) | \
1318
(uint64_t)e##name(env, val); \
1321
HELPER_SPE_VECTOR_CONV(fscfsi);
1323
HELPER_SPE_VECTOR_CONV(fscfui);
1325
HELPER_SPE_VECTOR_CONV(fscfuf);
1327
HELPER_SPE_VECTOR_CONV(fscfsf);
1329
HELPER_SPE_VECTOR_CONV(fsctsi);
1331
HELPER_SPE_VECTOR_CONV(fsctui);
1333
HELPER_SPE_VECTOR_CONV(fsctsiz);
1335
HELPER_SPE_VECTOR_CONV(fsctuiz);
1337
HELPER_SPE_VECTOR_CONV(fsctsf);
1339
HELPER_SPE_VECTOR_CONV(fsctuf);
1341
/* Single-precision floating-point arithmetic */
1342
static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2)
1348
u1.f = float32_add(u1.f, u2.f, &env->vec_status);
1352
static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2)
1358
u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
1362
static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2)
1368
u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
1372
static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2)
1378
u1.f = float32_div(u1.f, u2.f, &env->vec_status);
1382
#define HELPER_SPE_SINGLE_ARITH(name) \
1383
uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1385
return e##name(env, op1, op2); \
1388
HELPER_SPE_SINGLE_ARITH(fsadd);
1390
HELPER_SPE_SINGLE_ARITH(fssub);
1392
HELPER_SPE_SINGLE_ARITH(fsmul);
1394
HELPER_SPE_SINGLE_ARITH(fsdiv);
1396
#define HELPER_SPE_VECTOR_ARITH(name) \
1397
uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1399
return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) | \
1400
(uint64_t)e##name(env, op1, op2); \
1403
HELPER_SPE_VECTOR_ARITH(fsadd);
1405
HELPER_SPE_VECTOR_ARITH(fssub);
1407
HELPER_SPE_VECTOR_ARITH(fsmul);
1409
HELPER_SPE_VECTOR_ARITH(fsdiv);
1411
/* Single-precision floating-point comparisons */
1412
static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1418
return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1421
static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1427
return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
1430
static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1436
return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1439
static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1441
/* XXX: TODO: ignore special values (NaN, infinites, ...) */
1442
return efscmplt(env, op1, op2);
1445
static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1447
/* XXX: TODO: ignore special values (NaN, infinites, ...) */
1448
return efscmpgt(env, op1, op2);
1451
static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1453
/* XXX: TODO: ignore special values (NaN, infinites, ...) */
1454
return efscmpeq(env, op1, op2);
1457
#define HELPER_SINGLE_SPE_CMP(name) \
1458
uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1460
return e##name(env, op1, op2); \
1463
HELPER_SINGLE_SPE_CMP(fststlt);
1465
HELPER_SINGLE_SPE_CMP(fststgt);
1467
HELPER_SINGLE_SPE_CMP(fststeq);
1469
HELPER_SINGLE_SPE_CMP(fscmplt);
1471
HELPER_SINGLE_SPE_CMP(fscmpgt);
1473
HELPER_SINGLE_SPE_CMP(fscmpeq);
1475
static inline uint32_t evcmp_merge(int t0, int t1)
1477
return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1480
#define HELPER_VECTOR_SPE_CMP(name) \
1481
uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1483
return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32), \
1484
e##name(env, op1, op2)); \
1487
HELPER_VECTOR_SPE_CMP(fststlt);
1489
HELPER_VECTOR_SPE_CMP(fststgt);
1491
HELPER_VECTOR_SPE_CMP(fststeq);
1493
HELPER_VECTOR_SPE_CMP(fscmplt);
1495
HELPER_VECTOR_SPE_CMP(fscmpgt);
1497
HELPER_VECTOR_SPE_CMP(fscmpeq);
1499
/* Double-precision floating-point conversion */
1500
uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val)
1504
u.d = int32_to_float64(val, &env->vec_status);
1509
uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val)
1513
u.d = int64_to_float64(val, &env->vec_status);
1518
uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val)
1522
u.d = uint32_to_float64(val, &env->vec_status);
1527
uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val)
1531
u.d = uint64_to_float64(val, &env->vec_status);
1536
uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val)
1541
/* NaN are not treated the same way IEEE 754 does */
1542
if (unlikely(float64_is_any_nan(u.d))) {
1546
return float64_to_int32(u.d, &env->vec_status);
1549
uint32_t helper_efdctui(CPUPPCState *env, uint64_t val)
1554
/* NaN are not treated the same way IEEE 754 does */
1555
if (unlikely(float64_is_any_nan(u.d))) {
1559
return float64_to_uint32(u.d, &env->vec_status);
1562
uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val)
1567
/* NaN are not treated the same way IEEE 754 does */
1568
if (unlikely(float64_is_any_nan(u.d))) {
1572
return float64_to_int32_round_to_zero(u.d, &env->vec_status);
1575
uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val)
1580
/* NaN are not treated the same way IEEE 754 does */
1581
if (unlikely(float64_is_any_nan(u.d))) {
1585
return float64_to_int64_round_to_zero(u.d, &env->vec_status);
1588
uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val)
1593
/* NaN are not treated the same way IEEE 754 does */
1594
if (unlikely(float64_is_any_nan(u.d))) {
1598
return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
1601
uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val)
1606
/* NaN are not treated the same way IEEE 754 does */
1607
if (unlikely(float64_is_any_nan(u.d))) {
1611
return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
1614
uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val)
1619
u.d = int32_to_float64(val, &env->vec_status);
1620
tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1621
u.d = float64_div(u.d, tmp, &env->vec_status);
1626
uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val)
1631
u.d = uint32_to_float64(val, &env->vec_status);
1632
tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1633
u.d = float64_div(u.d, tmp, &env->vec_status);
1638
uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val)
1644
/* NaN are not treated the same way IEEE 754 does */
1645
if (unlikely(float64_is_any_nan(u.d))) {
1648
tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1649
u.d = float64_mul(u.d, tmp, &env->vec_status);
1651
return float64_to_int32(u.d, &env->vec_status);
1654
uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val)
1660
/* NaN are not treated the same way IEEE 754 does */
1661
if (unlikely(float64_is_any_nan(u.d))) {
1664
tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1665
u.d = float64_mul(u.d, tmp, &env->vec_status);
1667
return float64_to_uint32(u.d, &env->vec_status);
1670
uint32_t helper_efscfd(CPUPPCState *env, uint64_t val)
1676
u2.f = float64_to_float32(u1.d, &env->vec_status);
1681
uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val)
1687
u2.d = float32_to_float64(u1.f, &env->vec_status);
1692
/* Double precision fixed-point arithmetic */
1693
uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2)
1699
u1.d = float64_add(u1.d, u2.d, &env->vec_status);
1703
uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2)
1709
u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
1713
uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2)
1719
u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
1723
uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2)
1729
u1.d = float64_div(u1.d, u2.d, &env->vec_status);
1733
/* Double precision floating point helpers */
1734
uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1740
return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1743
uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1749
return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
1752
uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1758
return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1761
uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1763
/* XXX: TODO: test special values (NaN, infinites, ...) */
1764
return helper_efdtstlt(env, op1, op2);
1767
uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1769
/* XXX: TODO: test special values (NaN, infinites, ...) */
1770
return helper_efdtstgt(env, op1, op2);
1773
uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1775
/* XXX: TODO: test special values (NaN, infinites, ...) */
1776
return helper_efdtsteq(env, op1, op2);
1779
#define DECODE_SPLIT(opcode, shift1, nb1, shift2, nb2) \
1780
(((((opcode) >> (shift1)) & ((1 << (nb1)) - 1)) << nb2) | \
1781
(((opcode) >> (shift2)) & ((1 << (nb2)) - 1)))
1783
#define xT(opcode) DECODE_SPLIT(opcode, 0, 1, 21, 5)
1784
#define xA(opcode) DECODE_SPLIT(opcode, 2, 1, 16, 5)
1785
#define xB(opcode) DECODE_SPLIT(opcode, 1, 1, 11, 5)
1786
#define xC(opcode) DECODE_SPLIT(opcode, 3, 1, 6, 5)
1787
#define BF(opcode) (((opcode) >> (31-8)) & 7)
1789
typedef union _ppc_vsr_t {
1796
#if defined(HOST_WORDS_BIGENDIAN)
1797
#define VsrW(i) u32[i]
1798
#define VsrD(i) u64[i]
1800
#define VsrW(i) u32[3-(i)]
1801
#define VsrD(i) u64[1-(i)]
1804
static void getVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
1807
vsr->VsrD(0) = env->fpr[n];
1808
vsr->VsrD(1) = env->vsr[n];
1810
vsr->u64[0] = env->avr[n-32].u64[0];
1811
vsr->u64[1] = env->avr[n-32].u64[1];
1815
static void putVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
1818
env->fpr[n] = vsr->VsrD(0);
1819
env->vsr[n] = vsr->VsrD(1);
1821
env->avr[n-32].u64[0] = vsr->u64[0];
1822
env->avr[n-32].u64[1] = vsr->u64[1];
1826
#define float64_to_float64(x, env) x
1829
/* VSX_ADD_SUB - VSX floating point add/subract
1830
* name - instruction mnemonic
1831
* op - operation (add or sub)
1832
* nels - number of elements (1, 2 or 4)
1833
* tp - type (float32 or float64)
1834
* fld - vsr_t field (VsrD(*) or VsrW(*))
1837
#define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp) \
1838
void helper_##name(CPUPPCState *env, uint32_t opcode) \
1840
ppc_vsr_t xt, xa, xb; \
1843
getVSR(xA(opcode), &xa, env); \
1844
getVSR(xB(opcode), &xb, env); \
1845
getVSR(xT(opcode), &xt, env); \
1846
helper_reset_fpstatus(env); \
1848
for (i = 0; i < nels; i++) { \
1849
float_status tstat = env->fp_status; \
1850
set_float_exception_flags(0, &tstat); \
1851
xt.fld = tp##_##op(xa.fld, xb.fld, &tstat); \
1852
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1854
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1855
if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \
1856
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf); \
1857
} else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
1858
tp##_is_signaling_nan(xb.fld, &tstat)) { \
1859
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
1864
xt.fld = helper_frsp(env, xt.fld); \
1868
helper_compute_fprf(env, xt.fld); \
1871
putVSR(xT(opcode), &xt, env); \
1872
float_check_status(env); \
1875
VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
1876
VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1)
1877
VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0)
1878
VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0)
1879
VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0)
1880
VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
1881
VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
1882
VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
1884
/* VSX_MUL - VSX floating point multiply
1885
* op - instruction mnemonic
1886
* nels - number of elements (1, 2 or 4)
1887
* tp - type (float32 or float64)
1888
* fld - vsr_t field (VsrD(*) or VsrW(*))
1891
#define VSX_MUL(op, nels, tp, fld, sfprf, r2sp) \
1892
void helper_##op(CPUPPCState *env, uint32_t opcode) \
1894
ppc_vsr_t xt, xa, xb; \
1897
getVSR(xA(opcode), &xa, env); \
1898
getVSR(xB(opcode), &xb, env); \
1899
getVSR(xT(opcode), &xt, env); \
1900
helper_reset_fpstatus(env); \
1902
for (i = 0; i < nels; i++) { \
1903
float_status tstat = env->fp_status; \
1904
set_float_exception_flags(0, &tstat); \
1905
xt.fld = tp##_mul(xa.fld, xb.fld, &tstat); \
1906
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1908
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1909
if ((tp##_is_infinity(xa.fld) && tp##_is_zero(xb.fld)) || \
1910
(tp##_is_infinity(xb.fld) && tp##_is_zero(xa.fld))) { \
1911
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, sfprf); \
1912
} else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
1913
tp##_is_signaling_nan(xb.fld, &tstat)) { \
1914
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
1919
xt.fld = helper_frsp(env, xt.fld); \
1923
helper_compute_fprf(env, xt.fld); \
1927
putVSR(xT(opcode), &xt, env); \
1928
float_check_status(env); \
1931
VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
1932
VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
1933
VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
1934
VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
1936
/* VSX_DIV - VSX floating point divide
1937
* op - instruction mnemonic
1938
* nels - number of elements (1, 2 or 4)
1939
* tp - type (float32 or float64)
1940
* fld - vsr_t field (VsrD(*) or VsrW(*))
1943
#define VSX_DIV(op, nels, tp, fld, sfprf, r2sp) \
1944
void helper_##op(CPUPPCState *env, uint32_t opcode) \
1946
ppc_vsr_t xt, xa, xb; \
1949
getVSR(xA(opcode), &xa, env); \
1950
getVSR(xB(opcode), &xb, env); \
1951
getVSR(xT(opcode), &xt, env); \
1952
helper_reset_fpstatus(env); \
1954
for (i = 0; i < nels; i++) { \
1955
float_status tstat = env->fp_status; \
1956
set_float_exception_flags(0, &tstat); \
1957
xt.fld = tp##_div(xa.fld, xb.fld, &tstat); \
1958
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1960
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1961
if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \
1962
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, sfprf); \
1963
} else if (tp##_is_zero(xa.fld) && \
1964
tp##_is_zero(xb.fld)) { \
1965
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, sfprf); \
1966
} else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
1967
tp##_is_signaling_nan(xb.fld, &tstat)) { \
1968
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
1973
xt.fld = helper_frsp(env, xt.fld); \
1977
helper_compute_fprf(env, xt.fld); \
1981
putVSR(xT(opcode), &xt, env); \
1982
float_check_status(env); \
1985
VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
1986
VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
1987
VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
1988
VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
1990
/* VSX_RE - VSX floating point reciprocal estimate
1991
* op - instruction mnemonic
1992
* nels - number of elements (1, 2 or 4)
1993
* tp - type (float32 or float64)
1994
* fld - vsr_t field (VsrD(*) or VsrW(*))
1997
#define VSX_RE(op, nels, tp, fld, sfprf, r2sp) \
1998
void helper_##op(CPUPPCState *env, uint32_t opcode) \
2003
getVSR(xB(opcode), &xb, env); \
2004
getVSR(xT(opcode), &xt, env); \
2005
helper_reset_fpstatus(env); \
2007
for (i = 0; i < nels; i++) { \
2008
if (unlikely(tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \
2009
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
2011
xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status); \
2014
xt.fld = helper_frsp(env, xt.fld); \
2018
helper_compute_fprf(env, xt.fld); \
2022
putVSR(xT(opcode), &xt, env); \
2023
float_check_status(env); \
2026
VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
2027
VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
2028
VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
2029
VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
2031
/* VSX_SQRT - VSX floating point square root
2032
* op - instruction mnemonic
2033
* nels - number of elements (1, 2 or 4)
2034
* tp - type (float32 or float64)
2035
* fld - vsr_t field (VsrD(*) or VsrW(*))
2038
#define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp) \
2039
void helper_##op(CPUPPCState *env, uint32_t opcode) \
2044
getVSR(xB(opcode), &xb, env); \
2045
getVSR(xT(opcode), &xt, env); \
2046
helper_reset_fpstatus(env); \
2048
for (i = 0; i < nels; i++) { \
2049
float_status tstat = env->fp_status; \
2050
set_float_exception_flags(0, &tstat); \
2051
xt.fld = tp##_sqrt(xb.fld, &tstat); \
2052
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2054
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2055
if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
2056
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \
2057
} else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \
2058
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
2063
xt.fld = helper_frsp(env, xt.fld); \
2067
helper_compute_fprf(env, xt.fld); \
2071
putVSR(xT(opcode), &xt, env); \
2072
float_check_status(env); \
2075
VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
2076
VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
2077
VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
2078
VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
2080
/* VSX_RSQRTE - VSX floating point reciprocal square root estimate
2081
* op - instruction mnemonic
2082
* nels - number of elements (1, 2 or 4)
2083
* tp - type (float32 or float64)
2084
* fld - vsr_t field (VsrD(*) or VsrW(*))
2087
#define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp) \
2088
void helper_##op(CPUPPCState *env, uint32_t opcode) \
2093
getVSR(xB(opcode), &xb, env); \
2094
getVSR(xT(opcode), &xt, env); \
2095
helper_reset_fpstatus(env); \
2097
for (i = 0; i < nels; i++) { \
2098
float_status tstat = env->fp_status; \
2099
set_float_exception_flags(0, &tstat); \
2100
xt.fld = tp##_sqrt(xb.fld, &tstat); \
2101
xt.fld = tp##_div(tp##_one, xt.fld, &tstat); \
2102
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2104
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2105
if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
2106
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \
2107
} else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \
2108
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
2113
xt.fld = helper_frsp(env, xt.fld); \
2117
helper_compute_fprf(env, xt.fld); \
2121
putVSR(xT(opcode), &xt, env); \
2122
float_check_status(env); \
2125
VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
2126
VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
2127
VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
2128
VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
2130
/* VSX_TDIV - VSX floating point test for divide
2131
* op - instruction mnemonic
2132
* nels - number of elements (1, 2 or 4)
2133
* tp - type (float32 or float64)
2134
* fld - vsr_t field (VsrD(*) or VsrW(*))
2135
* emin - minimum unbiased exponent
2136
* emax - maximum unbiased exponent
2137
* nbits - number of fraction bits
2139
#define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits) \
2140
void helper_##op(CPUPPCState *env, uint32_t opcode) \
2147
getVSR(xA(opcode), &xa, env); \
2148
getVSR(xB(opcode), &xb, env); \
2150
for (i = 0; i < nels; i++) { \
2151
if (unlikely(tp##_is_infinity(xa.fld) || \
2152
tp##_is_infinity(xb.fld) || \
2153
tp##_is_zero(xb.fld))) { \
2157
int e_a = ppc_##tp##_get_unbiased_exp(xa.fld); \
2158
int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
2160
if (unlikely(tp##_is_any_nan(xa.fld) || \
2161
tp##_is_any_nan(xb.fld))) { \
2163
} else if ((e_b <= emin) || (e_b >= (emax-2))) { \
2165
} else if (!tp##_is_zero(xa.fld) && \
2166
(((e_a - e_b) >= emax) || \
2167
((e_a - e_b) <= (emin+1)) || \
2168
(e_a <= (emin+nbits)))) { \
2172
if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
2173
/* XB is not zero because of the above check and */ \
2174
/* so must be denormalized. */ \
2180
env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2183
VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
2184
VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
2185
VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
2187
/* VSX_TSQRT - VSX floating point test for square root
2188
* op - instruction mnemonic
2189
* nels - number of elements (1, 2 or 4)
2190
* tp - type (float32 or float64)
2191
* fld - vsr_t field (VsrD(*) or VsrW(*))
2192
* emin - minimum unbiased exponent
2193
* emax - maximum unbiased exponent
2194
* nbits - number of fraction bits
2196
#define VSX_TSQRT(op, nels, tp, fld, emin, nbits) \
2197
void helper_##op(CPUPPCState *env, uint32_t opcode) \
2204
getVSR(xA(opcode), &xa, env); \
2205
getVSR(xB(opcode), &xb, env); \
2207
for (i = 0; i < nels; i++) { \
2208
if (unlikely(tp##_is_infinity(xb.fld) || \
2209
tp##_is_zero(xb.fld))) { \
2213
int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
2215
if (unlikely(tp##_is_any_nan(xb.fld))) { \
2217
} else if (unlikely(tp##_is_zero(xb.fld))) { \
2219
} else if (unlikely(tp##_is_neg(xb.fld))) { \
2221
} else if (!tp##_is_zero(xb.fld) && \
2222
(e_b <= (emin+nbits))) { \
2226
if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
2227
/* XB is not zero because of the above check and */ \
2228
/* therefore must be denormalized. */ \
2234
env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2237
VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
2238
VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
2239
VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
2241
/* VSX_MADD - VSX floating point muliply/add variations
2242
* op - instruction mnemonic
2243
* nels - number of elements (1, 2 or 4)
2244
* tp - type (float32 or float64)
2245
* fld - vsr_t field (VsrD(*) or VsrW(*))
2246
* maddflgs - flags for the float*muladd routine that control the
2247
* various forms (madd, msub, nmadd, nmsub)
2248
* afrm - A form (1=A, 0=M)
2251
#define VSX_MADD(op, nels, tp, fld, maddflgs, afrm, sfprf, r2sp) \
2252
void helper_##op(CPUPPCState *env, uint32_t opcode) \
2254
ppc_vsr_t xt_in, xa, xb, xt_out; \
2258
if (afrm) { /* AxB + T */ \
2261
} else { /* AxT + B */ \
2266
getVSR(xA(opcode), &xa, env); \
2267
getVSR(xB(opcode), &xb, env); \
2268
getVSR(xT(opcode), &xt_in, env); \
2272
helper_reset_fpstatus(env); \
2274
for (i = 0; i < nels; i++) { \
2275
float_status tstat = env->fp_status; \
2276
set_float_exception_flags(0, &tstat); \
2277
if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
2278
/* Avoid double rounding errors by rounding the intermediate */ \
2279
/* result to odd. */ \
2280
set_float_rounding_mode(float_round_to_zero, &tstat); \
2281
xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
2282
maddflgs, &tstat); \
2283
xt_out.fld |= (get_float_exception_flags(&tstat) & \
2284
float_flag_inexact) != 0; \
2286
xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
2287
maddflgs, &tstat); \
2289
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2291
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2292
if (tp##_is_signaling_nan(xa.fld, &tstat) || \
2293
tp##_is_signaling_nan(b->fld, &tstat) || \
2294
tp##_is_signaling_nan(c->fld, &tstat)) { \
2295
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
2296
tstat.float_exception_flags &= ~float_flag_invalid; \
2298
if ((tp##_is_infinity(xa.fld) && tp##_is_zero(b->fld)) || \
2299
(tp##_is_zero(xa.fld) && tp##_is_infinity(b->fld))) { \
2300
xt_out.fld = float64_to_##tp(float_invalid_op_excp(env, \
2301
POWERPC_EXCP_FP_VXIMZ, sfprf), &env->fp_status); \
2302
tstat.float_exception_flags &= ~float_flag_invalid; \
2304
if ((tstat.float_exception_flags & float_flag_invalid) && \
2305
((tp##_is_infinity(xa.fld) || \
2306
tp##_is_infinity(b->fld)) && \
2307
tp##_is_infinity(c->fld))) { \
2308
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf); \
2313
xt_out.fld = helper_frsp(env, xt_out.fld); \
2317
helper_compute_fprf(env, xt_out.fld); \
2320
putVSR(xT(opcode), &xt_out, env); \
2321
float_check_status(env); \
2325
#define MSUB_FLGS float_muladd_negate_c
2326
#define NMADD_FLGS float_muladd_negate_result
2327
#define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
2329
VSX_MADD(xsmaddadp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 0)
2330
VSX_MADD(xsmaddmdp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 0)
2331
VSX_MADD(xsmsubadp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 0)
2332
VSX_MADD(xsmsubmdp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 0)
2333
VSX_MADD(xsnmaddadp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 0)
2334
VSX_MADD(xsnmaddmdp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 0)
2335
VSX_MADD(xsnmsubadp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 0)
2336
VSX_MADD(xsnmsubmdp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 0)
2338
VSX_MADD(xsmaddasp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 1)
2339
VSX_MADD(xsmaddmsp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 1)
2340
VSX_MADD(xsmsubasp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 1)
2341
VSX_MADD(xsmsubmsp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 1)
2342
VSX_MADD(xsnmaddasp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 1)
2343
VSX_MADD(xsnmaddmsp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 1)
2344
VSX_MADD(xsnmsubasp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 1)
2345
VSX_MADD(xsnmsubmsp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 1)
2347
VSX_MADD(xvmaddadp, 2, float64, VsrD(i), MADD_FLGS, 1, 0, 0)
2348
VSX_MADD(xvmaddmdp, 2, float64, VsrD(i), MADD_FLGS, 0, 0, 0)
2349
VSX_MADD(xvmsubadp, 2, float64, VsrD(i), MSUB_FLGS, 1, 0, 0)
2350
VSX_MADD(xvmsubmdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0, 0)
2351
VSX_MADD(xvnmaddadp, 2, float64, VsrD(i), NMADD_FLGS, 1, 0, 0)
2352
VSX_MADD(xvnmaddmdp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0, 0)
2353
VSX_MADD(xvnmsubadp, 2, float64, VsrD(i), NMSUB_FLGS, 1, 0, 0)
2354
VSX_MADD(xvnmsubmdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0, 0)
2356
VSX_MADD(xvmaddasp, 4, float32, VsrW(i), MADD_FLGS, 1, 0, 0)
2357
VSX_MADD(xvmaddmsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0, 0)
2358
VSX_MADD(xvmsubasp, 4, float32, VsrW(i), MSUB_FLGS, 1, 0, 0)
2359
VSX_MADD(xvmsubmsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0, 0)
2360
VSX_MADD(xvnmaddasp, 4, float32, VsrW(i), NMADD_FLGS, 1, 0, 0)
2361
VSX_MADD(xvnmaddmsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0, 0)
2362
VSX_MADD(xvnmsubasp, 4, float32, VsrW(i), NMSUB_FLGS, 1, 0, 0)
2363
VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0)
2365
/* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
2366
* op - instruction mnemonic
2367
* cmp - comparison operation
2368
* exp - expected result of comparison
2369
* svxvc - set VXVC bit
2371
#define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc) \
2372
void helper_##op(CPUPPCState *env, uint32_t opcode) \
2374
ppc_vsr_t xt, xa, xb; \
2375
bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false; \
2377
getVSR(xA(opcode), &xa, env); \
2378
getVSR(xB(opcode), &xb, env); \
2379
getVSR(xT(opcode), &xt, env); \
2381
if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
2382
float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2383
vxsnan_flag = true; \
2384
if (fpscr_ve == 0 && svxvc) { \
2387
} else if (svxvc) { \
2388
vxvc_flag = float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) || \
2389
float64_is_quiet_nan(xb.VsrD(0), &env->fp_status); \
2391
if (vxsnan_flag) { \
2392
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2395
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
2397
vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag); \
2400
if (float64_##cmp(xb.VsrD(0), xa.VsrD(0), &env->fp_status) == exp) { \
2408
putVSR(xT(opcode), &xt, env); \
2409
helper_float_check_status(env); \
2412
VSX_SCALAR_CMP_DP(xscmpeqdp, eq, 1, 0)
2413
VSX_SCALAR_CMP_DP(xscmpgedp, le, 1, 1)
2414
VSX_SCALAR_CMP_DP(xscmpgtdp, lt, 1, 1)
2415
VSX_SCALAR_CMP_DP(xscmpnedp, eq, 0, 0)
2417
#define VSX_SCALAR_CMP(op, ordered) \
2418
void helper_##op(CPUPPCState *env, uint32_t opcode) \
2423
getVSR(xA(opcode), &xa, env); \
2424
getVSR(xB(opcode), &xb, env); \
2426
if (unlikely(float64_is_any_nan(xa.VsrD(0)) || \
2427
float64_is_any_nan(xb.VsrD(0)))) { \
2428
if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
2429
float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2430
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2433
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
2437
if (float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) { \
2439
} else if (!float64_le(xa.VsrD(0), xb.VsrD(0), \
2440
&env->fp_status)) { \
2447
env->fpscr &= ~(0x0F << FPSCR_FPRF); \
2448
env->fpscr |= cc << FPSCR_FPRF; \
2449
env->crf[BF(opcode)] = cc; \
2451
float_check_status(env); \
2454
VSX_SCALAR_CMP(xscmpodp, 1)
2455
VSX_SCALAR_CMP(xscmpudp, 0)
2457
/* VSX_MAX_MIN - VSX floating point maximum/minimum
2458
* name - instruction mnemonic
2459
* op - operation (max or min)
2460
* nels - number of elements (1, 2 or 4)
2461
* tp - type (float32 or float64)
2462
* fld - vsr_t field (VsrD(*) or VsrW(*))
2464
#define VSX_MAX_MIN(name, op, nels, tp, fld) \
2465
void helper_##name(CPUPPCState *env, uint32_t opcode) \
2467
ppc_vsr_t xt, xa, xb; \
2470
getVSR(xA(opcode), &xa, env); \
2471
getVSR(xB(opcode), &xb, env); \
2472
getVSR(xT(opcode), &xt, env); \
2474
for (i = 0; i < nels; i++) { \
2475
xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status); \
2476
if (unlikely(tp##_is_signaling_nan(xa.fld, &env->fp_status) || \
2477
tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \
2478
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2482
putVSR(xT(opcode), &xt, env); \
2483
float_check_status(env); \
2486
VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
2487
VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i))
2488
VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i))
2489
VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
2490
VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
2491
VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
2493
/* VSX_CMP - VSX floating point compare
2494
* op - instruction mnemonic
2495
* nels - number of elements (1, 2 or 4)
2496
* tp - type (float32 or float64)
2497
* fld - vsr_t field (VsrD(*) or VsrW(*))
2498
* cmp - comparison operation
2499
* svxvc - set VXVC bit
2500
* exp - expected result of comparison
2502
#define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp) \
2503
void helper_##op(CPUPPCState *env, uint32_t opcode) \
2505
ppc_vsr_t xt, xa, xb; \
2508
int all_false = 1; \
2510
getVSR(xA(opcode), &xa, env); \
2511
getVSR(xB(opcode), &xb, env); \
2512
getVSR(xT(opcode), &xt, env); \
2514
for (i = 0; i < nels; i++) { \
2515
if (unlikely(tp##_is_any_nan(xa.fld) || \
2516
tp##_is_any_nan(xb.fld))) { \
2517
if (tp##_is_signaling_nan(xa.fld, &env->fp_status) || \
2518
tp##_is_signaling_nan(xb.fld, &env->fp_status)) { \
2519
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2522
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
2527
if (tp##_##cmp(xb.fld, xa.fld, &env->fp_status) == exp) { \
2537
putVSR(xT(opcode), &xt, env); \
2538
if ((opcode >> (31-21)) & 1) { \
2539
env->crf[6] = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \
2541
float_check_status(env); \
2544
VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1)
2545
VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1)
2546
VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1, 1)
2547
VSX_CMP(xvcmpnedp, 2, float64, VsrD(i), eq, 0, 0)
2548
VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0, 1)
2549
VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1)
2550
VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1)
2551
VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
2553
/* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2554
* op - instruction mnemonic
2555
* nels - number of elements (1, 2 or 4)
2556
* stp - source type (float32 or float64)
2557
* ttp - target type (float32 or float64)
2558
* sfld - source vsr_t field
2559
* tfld - target vsr_t field (f32 or f64)
2562
#define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2563
void helper_##op(CPUPPCState *env, uint32_t opcode) \
2568
getVSR(xB(opcode), &xb, env); \
2569
getVSR(xT(opcode), &xt, env); \
2571
for (i = 0; i < nels; i++) { \
2572
xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
2573
if (unlikely(stp##_is_signaling_nan(xb.sfld, \
2574
&env->fp_status))) { \
2575
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2576
xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
2579
helper_compute_fprf(env, ttp##_to_float64(xt.tfld, \
2580
&env->fp_status)); \
2584
putVSR(xT(opcode), &xt, env); \
2585
float_check_status(env); \
2588
VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1)
2589
VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
2590
VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2*i), 0)
2591
VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2*i), VsrD(i), 0)
2593
uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
2595
float_status tstat = env->fp_status;
2596
set_float_exception_flags(0, &tstat);
2598
return (uint64_t)float64_to_float32(xb, &tstat) << 32;
2601
uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
2603
float_status tstat = env->fp_status;
2604
set_float_exception_flags(0, &tstat);
2606
return float32_to_float64(xb >> 32, &tstat);
2609
/* VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2610
* op - instruction mnemonic
2611
* nels - number of elements (1, 2 or 4)
2612
* stp - source type (float32 or float64)
2613
* ttp - target type (int32, uint32, int64 or uint64)
2614
* sfld - source vsr_t field
2615
* tfld - target vsr_t field
2616
* rnan - resulting NaN
2618
#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan) \
2619
void helper_##op(CPUPPCState *env, uint32_t opcode) \
2624
getVSR(xB(opcode), &xb, env); \
2625
getVSR(xT(opcode), &xt, env); \
2627
for (i = 0; i < nels; i++) { \
2628
if (unlikely(stp##_is_any_nan(xb.sfld))) { \
2629
if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) { \
2630
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2632
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
2635
xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, \
2637
if (env->fp_status.float_exception_flags & float_flag_invalid) { \
2638
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
2643
putVSR(xT(opcode), &xt, env); \
2644
float_check_status(env); \
2647
VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), \
2648
0x8000000000000000ULL)
2649
VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, VsrD(0), VsrW(1), \
2651
VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL)
2652
VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U)
2653
VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \
2654
0x8000000000000000ULL)
2655
VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2*i), \
2657
VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL)
2658
VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2*i), 0U)
2659
VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2*i), VsrD(i), \
2660
0x8000000000000000ULL)
2661
VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U)
2662
VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2*i), VsrD(i), 0ULL)
2663
VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
2665
/* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
2666
* op - instruction mnemonic
2667
* nels - number of elements (1, 2 or 4)
2668
* stp - source type (int32, uint32, int64 or uint64)
2669
* ttp - target type (float32 or float64)
2670
* sfld - source vsr_t field
2671
* tfld - target vsr_t field
2672
* jdef - definition of the j index (i or 2*i)
2675
#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp) \
2676
void helper_##op(CPUPPCState *env, uint32_t opcode) \
2681
getVSR(xB(opcode), &xb, env); \
2682
getVSR(xT(opcode), &xt, env); \
2684
for (i = 0; i < nels; i++) { \
2685
xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
2687
xt.tfld = helper_frsp(env, xt.tfld); \
2690
helper_compute_fprf(env, xt.tfld); \
2694
putVSR(xT(opcode), &xt, env); \
2695
float_check_status(env); \
2698
VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
2699
VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
2700
VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
2701
VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
2702
VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
2703
VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
2704
VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2*i), VsrD(i), 0, 0)
2705
VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2*i), VsrD(i), 0, 0)
2706
VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2*i), 0, 0)
2707
VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2*i), 0, 0)
2708
VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
2709
VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
2711
/* For "use current rounding mode", define a value that will not be one of
2712
* the existing rounding model enums.
2714
#define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
2715
float_round_up + float_round_to_zero)
2717
/* VSX_ROUND - VSX floating point round
2718
* op - instruction mnemonic
2719
* nels - number of elements (1, 2 or 4)
2720
* tp - type (float32 or float64)
2721
* fld - vsr_t field (VsrD(*) or VsrW(*))
2722
* rmode - rounding mode
2725
#define VSX_ROUND(op, nels, tp, fld, rmode, sfprf) \
2726
void helper_##op(CPUPPCState *env, uint32_t opcode) \
2730
getVSR(xB(opcode), &xb, env); \
2731
getVSR(xT(opcode), &xt, env); \
2733
if (rmode != FLOAT_ROUND_CURRENT) { \
2734
set_float_rounding_mode(rmode, &env->fp_status); \
2737
for (i = 0; i < nels; i++) { \
2738
if (unlikely(tp##_is_signaling_nan(xb.fld, \
2739
&env->fp_status))) { \
2740
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2741
xt.fld = tp##_snan_to_qnan(xb.fld); \
2743
xt.fld = tp##_round_to_int(xb.fld, &env->fp_status); \
2746
helper_compute_fprf(env, xt.fld); \
2750
/* If this is not a "use current rounding mode" instruction, \
2751
* then inhibit setting of the XX bit and restore rounding \
2752
* mode from FPSCR */ \
2753
if (rmode != FLOAT_ROUND_CURRENT) { \
2754
fpscr_set_rounding_mode(env); \
2755
env->fp_status.float_exception_flags &= ~float_flag_inexact; \
2758
putVSR(xT(opcode), &xt, env); \
2759
float_check_status(env); \
2762
VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1)
2763
VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
2764
VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
2765
VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
2766
VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
2768
VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0)
2769
VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
2770
VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
2771
VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
2772
VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
2774
VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_ties_away, 0)
2775
VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0)
2776
VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0)
2777
VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0)
2778
VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0)
2780
uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
2782
helper_reset_fpstatus(env);
2784
uint64_t xt = helper_frsp(env, xb);
2786
helper_compute_fprf(env, xt);
2787
float_check_status(env);