~pmdj/ubuntu/trusty/qemu/2.9+applesmc+fadtv3

« back to all changes in this revision

Viewing changes to target-ppc/fpu_helper.c

  • Committer: Phil Dennis-Jordan
  • Date: 2017-07-21 08:03:43 UTC
  • mfrom: (1.1.1)
  • Revision ID: phil@philjordan.eu-20170721080343-2yr2vdj7713czahv
New upstream release 2.9.0.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
/*
2
 
 *  PowerPC floating point and SPE emulation helpers for QEMU.
3
 
 *
4
 
 *  Copyright (c) 2003-2007 Jocelyn Mayer
5
 
 *
6
 
 * This library is free software; you can redistribute it and/or
7
 
 * modify it under the terms of the GNU Lesser General Public
8
 
 * License as published by the Free Software Foundation; either
9
 
 * version 2 of the License, or (at your option) any later version.
10
 
 *
11
 
 * This library is distributed in the hope that it will be useful,
12
 
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 
 * Lesser General Public License for more details.
15
 
 *
16
 
 * You should have received a copy of the GNU Lesser General Public
17
 
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 
 */
19
 
#include "qemu/osdep.h"
20
 
#include "cpu.h"
21
 
#include "exec/helper-proto.h"
22
 
#include "exec/exec-all.h"
23
 
 
24
 
#define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
25
 
#define float32_snan_to_qnan(x) ((x) | 0x00400000)
26
 
 
27
 
/*****************************************************************************/
28
 
/* Floating point operations helpers */
29
 
uint64_t helper_float32_to_float64(CPUPPCState *env, uint32_t arg)
30
 
{
31
 
    CPU_FloatU f;
32
 
    CPU_DoubleU d;
33
 
 
34
 
    f.l = arg;
35
 
    d.d = float32_to_float64(f.f, &env->fp_status);
36
 
    return d.ll;
37
 
}
38
 
 
39
 
uint32_t helper_float64_to_float32(CPUPPCState *env, uint64_t arg)
40
 
{
41
 
    CPU_FloatU f;
42
 
    CPU_DoubleU d;
43
 
 
44
 
    d.ll = arg;
45
 
    f.f = float64_to_float32(d.d, &env->fp_status);
46
 
    return f.l;
47
 
}
48
 
 
49
 
static inline int isden(float64 d)
50
 
{
51
 
    CPU_DoubleU u;
52
 
 
53
 
    u.d = d;
54
 
 
55
 
    return ((u.ll >> 52) & 0x7FF) == 0;
56
 
}
57
 
 
58
 
static inline int ppc_float32_get_unbiased_exp(float32 f)
59
 
{
60
 
    return ((f >> 23) & 0xFF) - 127;
61
 
}
62
 
 
63
 
static inline int ppc_float64_get_unbiased_exp(float64 f)
64
 
{
65
 
    return ((f >> 52) & 0x7FF) - 1023;
66
 
}
67
 
 
68
 
void helper_compute_fprf(CPUPPCState *env, uint64_t arg)
69
 
{
70
 
    CPU_DoubleU farg;
71
 
    int isneg;
72
 
    int fprf;
73
 
 
74
 
    farg.ll = arg;
75
 
    isneg = float64_is_neg(farg.d);
76
 
    if (unlikely(float64_is_any_nan(farg.d))) {
77
 
        if (float64_is_signaling_nan(farg.d, &env->fp_status)) {
78
 
            /* Signaling NaN: flags are undefined */
79
 
            fprf = 0x00;
80
 
        } else {
81
 
            /* Quiet NaN */
82
 
            fprf = 0x11;
83
 
        }
84
 
    } else if (unlikely(float64_is_infinity(farg.d))) {
85
 
        /* +/- infinity */
86
 
        if (isneg) {
87
 
            fprf = 0x09;
88
 
        } else {
89
 
            fprf = 0x05;
90
 
        }
91
 
    } else {
92
 
        if (float64_is_zero(farg.d)) {
93
 
            /* +/- zero */
94
 
            if (isneg) {
95
 
                fprf = 0x12;
96
 
            } else {
97
 
                fprf = 0x02;
98
 
            }
99
 
        } else {
100
 
            if (isden(farg.d)) {
101
 
                /* Denormalized numbers */
102
 
                fprf = 0x10;
103
 
            } else {
104
 
                /* Normalized numbers */
105
 
                fprf = 0x00;
106
 
            }
107
 
            if (isneg) {
108
 
                fprf |= 0x08;
109
 
            } else {
110
 
                fprf |= 0x04;
111
 
            }
112
 
        }
113
 
    }
114
 
    /* We update FPSCR_FPRF */
115
 
    env->fpscr &= ~(0x1F << FPSCR_FPRF);
116
 
    env->fpscr |= fprf << FPSCR_FPRF;
117
 
}
118
 
 
119
 
/* Floating-point invalid operations exception */
120
 
static inline __attribute__((__always_inline__))
121
 
uint64_t float_invalid_op_excp(CPUPPCState *env, int op, int set_fpcc)
122
 
{
123
 
    CPUState *cs = CPU(ppc_env_get_cpu(env));
124
 
    uint64_t ret = 0;
125
 
    int ve;
126
 
 
127
 
    ve = fpscr_ve;
128
 
    switch (op) {
129
 
    case POWERPC_EXCP_FP_VXSNAN:
130
 
        env->fpscr |= 1 << FPSCR_VXSNAN;
131
 
        break;
132
 
    case POWERPC_EXCP_FP_VXSOFT:
133
 
        env->fpscr |= 1 << FPSCR_VXSOFT;
134
 
        break;
135
 
    case POWERPC_EXCP_FP_VXISI:
136
 
        /* Magnitude subtraction of infinities */
137
 
        env->fpscr |= 1 << FPSCR_VXISI;
138
 
        goto update_arith;
139
 
    case POWERPC_EXCP_FP_VXIDI:
140
 
        /* Division of infinity by infinity */
141
 
        env->fpscr |= 1 << FPSCR_VXIDI;
142
 
        goto update_arith;
143
 
    case POWERPC_EXCP_FP_VXZDZ:
144
 
        /* Division of zero by zero */
145
 
        env->fpscr |= 1 << FPSCR_VXZDZ;
146
 
        goto update_arith;
147
 
    case POWERPC_EXCP_FP_VXIMZ:
148
 
        /* Multiplication of zero by infinity */
149
 
        env->fpscr |= 1 << FPSCR_VXIMZ;
150
 
        goto update_arith;
151
 
    case POWERPC_EXCP_FP_VXVC:
152
 
        /* Ordered comparison of NaN */
153
 
        env->fpscr |= 1 << FPSCR_VXVC;
154
 
        if (set_fpcc) {
155
 
            env->fpscr &= ~(0xF << FPSCR_FPCC);
156
 
            env->fpscr |= 0x11 << FPSCR_FPCC;
157
 
        }
158
 
        /* We must update the target FPR before raising the exception */
159
 
        if (ve != 0) {
160
 
            cs->exception_index = POWERPC_EXCP_PROGRAM;
161
 
            env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
162
 
            /* Update the floating-point enabled exception summary */
163
 
            env->fpscr |= 1 << FPSCR_FEX;
164
 
            /* Exception is differed */
165
 
            ve = 0;
166
 
        }
167
 
        break;
168
 
    case POWERPC_EXCP_FP_VXSQRT:
169
 
        /* Square root of a negative number */
170
 
        env->fpscr |= 1 << FPSCR_VXSQRT;
171
 
    update_arith:
172
 
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
173
 
        if (ve == 0) {
174
 
            /* Set the result to quiet NaN */
175
 
            ret = 0x7FF8000000000000ULL;
176
 
            if (set_fpcc) {
177
 
                env->fpscr &= ~(0xF << FPSCR_FPCC);
178
 
                env->fpscr |= 0x11 << FPSCR_FPCC;
179
 
            }
180
 
        }
181
 
        break;
182
 
    case POWERPC_EXCP_FP_VXCVI:
183
 
        /* Invalid conversion */
184
 
        env->fpscr |= 1 << FPSCR_VXCVI;
185
 
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
186
 
        if (ve == 0) {
187
 
            /* Set the result to quiet NaN */
188
 
            ret = 0x7FF8000000000000ULL;
189
 
            if (set_fpcc) {
190
 
                env->fpscr &= ~(0xF << FPSCR_FPCC);
191
 
                env->fpscr |= 0x11 << FPSCR_FPCC;
192
 
            }
193
 
        }
194
 
        break;
195
 
    }
196
 
    /* Update the floating-point invalid operation summary */
197
 
    env->fpscr |= 1 << FPSCR_VX;
198
 
    /* Update the floating-point exception summary */
199
 
    env->fpscr |= FP_FX;
200
 
    if (ve != 0) {
201
 
        /* Update the floating-point enabled exception summary */
202
 
        env->fpscr |= 1 << FPSCR_FEX;
203
 
        if (msr_fe0 != 0 || msr_fe1 != 0) {
204
 
            /* GETPC() works here because this is inline */
205
 
            raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
206
 
                                   POWERPC_EXCP_FP | op, GETPC());
207
 
        }
208
 
    }
209
 
    return ret;
210
 
}
211
 
 
212
 
static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr)
213
 
{
214
 
    env->fpscr |= 1 << FPSCR_ZX;
215
 
    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
216
 
    /* Update the floating-point exception summary */
217
 
    env->fpscr |= FP_FX;
218
 
    if (fpscr_ze != 0) {
219
 
        /* Update the floating-point enabled exception summary */
220
 
        env->fpscr |= 1 << FPSCR_FEX;
221
 
        if (msr_fe0 != 0 || msr_fe1 != 0) {
222
 
            raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
223
 
                                   POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX,
224
 
                                   raddr);
225
 
        }
226
 
    }
227
 
}
228
 
 
229
 
static inline void float_overflow_excp(CPUPPCState *env)
230
 
{
231
 
    CPUState *cs = CPU(ppc_env_get_cpu(env));
232
 
 
233
 
    env->fpscr |= 1 << FPSCR_OX;
234
 
    /* Update the floating-point exception summary */
235
 
    env->fpscr |= FP_FX;
236
 
    if (fpscr_oe != 0) {
237
 
        /* XXX: should adjust the result */
238
 
        /* Update the floating-point enabled exception summary */
239
 
        env->fpscr |= 1 << FPSCR_FEX;
240
 
        /* We must update the target FPR before raising the exception */
241
 
        cs->exception_index = POWERPC_EXCP_PROGRAM;
242
 
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
243
 
    } else {
244
 
        env->fpscr |= 1 << FPSCR_XX;
245
 
        env->fpscr |= 1 << FPSCR_FI;
246
 
    }
247
 
}
248
 
 
249
 
static inline void float_underflow_excp(CPUPPCState *env)
250
 
{
251
 
    CPUState *cs = CPU(ppc_env_get_cpu(env));
252
 
 
253
 
    env->fpscr |= 1 << FPSCR_UX;
254
 
    /* Update the floating-point exception summary */
255
 
    env->fpscr |= FP_FX;
256
 
    if (fpscr_ue != 0) {
257
 
        /* XXX: should adjust the result */
258
 
        /* Update the floating-point enabled exception summary */
259
 
        env->fpscr |= 1 << FPSCR_FEX;
260
 
        /* We must update the target FPR before raising the exception */
261
 
        cs->exception_index = POWERPC_EXCP_PROGRAM;
262
 
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
263
 
    }
264
 
}
265
 
 
266
 
static inline void float_inexact_excp(CPUPPCState *env)
267
 
{
268
 
    CPUState *cs = CPU(ppc_env_get_cpu(env));
269
 
 
270
 
    env->fpscr |= 1 << FPSCR_XX;
271
 
    /* Update the floating-point exception summary */
272
 
    env->fpscr |= FP_FX;
273
 
    if (fpscr_xe != 0) {
274
 
        /* Update the floating-point enabled exception summary */
275
 
        env->fpscr |= 1 << FPSCR_FEX;
276
 
        /* We must update the target FPR before raising the exception */
277
 
        cs->exception_index = POWERPC_EXCP_PROGRAM;
278
 
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
279
 
    }
280
 
}
281
 
 
282
 
static inline void fpscr_set_rounding_mode(CPUPPCState *env)
283
 
{
284
 
    int rnd_type;
285
 
 
286
 
    /* Set rounding mode */
287
 
    switch (fpscr_rn) {
288
 
    case 0:
289
 
        /* Best approximation (round to nearest) */
290
 
        rnd_type = float_round_nearest_even;
291
 
        break;
292
 
    case 1:
293
 
        /* Smaller magnitude (round toward zero) */
294
 
        rnd_type = float_round_to_zero;
295
 
        break;
296
 
    case 2:
297
 
        /* Round toward +infinite */
298
 
        rnd_type = float_round_up;
299
 
        break;
300
 
    default:
301
 
    case 3:
302
 
        /* Round toward -infinite */
303
 
        rnd_type = float_round_down;
304
 
        break;
305
 
    }
306
 
    set_float_rounding_mode(rnd_type, &env->fp_status);
307
 
}
308
 
 
309
 
void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
310
 
{
311
 
    int prev;
312
 
 
313
 
    prev = (env->fpscr >> bit) & 1;
314
 
    env->fpscr &= ~(1 << bit);
315
 
    if (prev == 1) {
316
 
        switch (bit) {
317
 
        case FPSCR_RN1:
318
 
        case FPSCR_RN:
319
 
            fpscr_set_rounding_mode(env);
320
 
            break;
321
 
        default:
322
 
            break;
323
 
        }
324
 
    }
325
 
}
326
 
 
327
 
void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
328
 
{
329
 
    CPUState *cs = CPU(ppc_env_get_cpu(env));
330
 
    int prev;
331
 
 
332
 
    prev = (env->fpscr >> bit) & 1;
333
 
    env->fpscr |= 1 << bit;
334
 
    if (prev == 0) {
335
 
        switch (bit) {
336
 
        case FPSCR_VX:
337
 
            env->fpscr |= FP_FX;
338
 
            if (fpscr_ve) {
339
 
                goto raise_ve;
340
 
            }
341
 
            break;
342
 
        case FPSCR_OX:
343
 
            env->fpscr |= FP_FX;
344
 
            if (fpscr_oe) {
345
 
                goto raise_oe;
346
 
            }
347
 
            break;
348
 
        case FPSCR_UX:
349
 
            env->fpscr |= FP_FX;
350
 
            if (fpscr_ue) {
351
 
                goto raise_ue;
352
 
            }
353
 
            break;
354
 
        case FPSCR_ZX:
355
 
            env->fpscr |= FP_FX;
356
 
            if (fpscr_ze) {
357
 
                goto raise_ze;
358
 
            }
359
 
            break;
360
 
        case FPSCR_XX:
361
 
            env->fpscr |= FP_FX;
362
 
            if (fpscr_xe) {
363
 
                goto raise_xe;
364
 
            }
365
 
            break;
366
 
        case FPSCR_VXSNAN:
367
 
        case FPSCR_VXISI:
368
 
        case FPSCR_VXIDI:
369
 
        case FPSCR_VXZDZ:
370
 
        case FPSCR_VXIMZ:
371
 
        case FPSCR_VXVC:
372
 
        case FPSCR_VXSOFT:
373
 
        case FPSCR_VXSQRT:
374
 
        case FPSCR_VXCVI:
375
 
            env->fpscr |= 1 << FPSCR_VX;
376
 
            env->fpscr |= FP_FX;
377
 
            if (fpscr_ve != 0) {
378
 
                goto raise_ve;
379
 
            }
380
 
            break;
381
 
        case FPSCR_VE:
382
 
            if (fpscr_vx != 0) {
383
 
            raise_ve:
384
 
                env->error_code = POWERPC_EXCP_FP;
385
 
                if (fpscr_vxsnan) {
386
 
                    env->error_code |= POWERPC_EXCP_FP_VXSNAN;
387
 
                }
388
 
                if (fpscr_vxisi) {
389
 
                    env->error_code |= POWERPC_EXCP_FP_VXISI;
390
 
                }
391
 
                if (fpscr_vxidi) {
392
 
                    env->error_code |= POWERPC_EXCP_FP_VXIDI;
393
 
                }
394
 
                if (fpscr_vxzdz) {
395
 
                    env->error_code |= POWERPC_EXCP_FP_VXZDZ;
396
 
                }
397
 
                if (fpscr_vximz) {
398
 
                    env->error_code |= POWERPC_EXCP_FP_VXIMZ;
399
 
                }
400
 
                if (fpscr_vxvc) {
401
 
                    env->error_code |= POWERPC_EXCP_FP_VXVC;
402
 
                }
403
 
                if (fpscr_vxsoft) {
404
 
                    env->error_code |= POWERPC_EXCP_FP_VXSOFT;
405
 
                }
406
 
                if (fpscr_vxsqrt) {
407
 
                    env->error_code |= POWERPC_EXCP_FP_VXSQRT;
408
 
                }
409
 
                if (fpscr_vxcvi) {
410
 
                    env->error_code |= POWERPC_EXCP_FP_VXCVI;
411
 
                }
412
 
                goto raise_excp;
413
 
            }
414
 
            break;
415
 
        case FPSCR_OE:
416
 
            if (fpscr_ox != 0) {
417
 
            raise_oe:
418
 
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
419
 
                goto raise_excp;
420
 
            }
421
 
            break;
422
 
        case FPSCR_UE:
423
 
            if (fpscr_ux != 0) {
424
 
            raise_ue:
425
 
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
426
 
                goto raise_excp;
427
 
            }
428
 
            break;
429
 
        case FPSCR_ZE:
430
 
            if (fpscr_zx != 0) {
431
 
            raise_ze:
432
 
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
433
 
                goto raise_excp;
434
 
            }
435
 
            break;
436
 
        case FPSCR_XE:
437
 
            if (fpscr_xx != 0) {
438
 
            raise_xe:
439
 
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
440
 
                goto raise_excp;
441
 
            }
442
 
            break;
443
 
        case FPSCR_RN1:
444
 
        case FPSCR_RN:
445
 
            fpscr_set_rounding_mode(env);
446
 
            break;
447
 
        default:
448
 
            break;
449
 
        raise_excp:
450
 
            /* Update the floating-point enabled exception summary */
451
 
            env->fpscr |= 1 << FPSCR_FEX;
452
 
            /* We have to update Rc1 before raising the exception */
453
 
            cs->exception_index = POWERPC_EXCP_PROGRAM;
454
 
            break;
455
 
        }
456
 
    }
457
 
}
458
 
 
459
 
void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
460
 
{
461
 
    CPUState *cs = CPU(ppc_env_get_cpu(env));
462
 
    target_ulong prev, new;
463
 
    int i;
464
 
 
465
 
    prev = env->fpscr;
466
 
    new = (target_ulong)arg;
467
 
    new &= ~0x60000000LL;
468
 
    new |= prev & 0x60000000LL;
469
 
    for (i = 0; i < sizeof(target_ulong) * 2; i++) {
470
 
        if (mask & (1 << i)) {
471
 
            env->fpscr &= ~(0xFLL << (4 * i));
472
 
            env->fpscr |= new & (0xFLL << (4 * i));
473
 
        }
474
 
    }
475
 
    /* Update VX and FEX */
476
 
    if (fpscr_ix != 0) {
477
 
        env->fpscr |= 1 << FPSCR_VX;
478
 
    } else {
479
 
        env->fpscr &= ~(1 << FPSCR_VX);
480
 
    }
481
 
    if ((fpscr_ex & fpscr_eex) != 0) {
482
 
        env->fpscr |= 1 << FPSCR_FEX;
483
 
        cs->exception_index = POWERPC_EXCP_PROGRAM;
484
 
        /* XXX: we should compute it properly */
485
 
        env->error_code = POWERPC_EXCP_FP;
486
 
    } else {
487
 
        env->fpscr &= ~(1 << FPSCR_FEX);
488
 
    }
489
 
    fpscr_set_rounding_mode(env);
490
 
}
491
 
 
492
 
void store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
493
 
{
494
 
    helper_store_fpscr(env, arg, mask);
495
 
}
496
 
 
497
 
static void do_float_check_status(CPUPPCState *env, uintptr_t raddr)
498
 
{
499
 
    CPUState *cs = CPU(ppc_env_get_cpu(env));
500
 
    int status = get_float_exception_flags(&env->fp_status);
501
 
 
502
 
    if (status & float_flag_divbyzero) {
503
 
        float_zero_divide_excp(env, raddr);
504
 
    } else if (status & float_flag_overflow) {
505
 
        float_overflow_excp(env);
506
 
    } else if (status & float_flag_underflow) {
507
 
        float_underflow_excp(env);
508
 
    } else if (status & float_flag_inexact) {
509
 
        float_inexact_excp(env);
510
 
    }
511
 
 
512
 
    if (cs->exception_index == POWERPC_EXCP_PROGRAM &&
513
 
        (env->error_code & POWERPC_EXCP_FP)) {
514
 
        /* Differred floating-point exception after target FPR update */
515
 
        if (msr_fe0 != 0 || msr_fe1 != 0) {
516
 
            raise_exception_err_ra(env, cs->exception_index,
517
 
                                   env->error_code, raddr);
518
 
        }
519
 
    }
520
 
}
521
 
 
522
 
static inline  __attribute__((__always_inline__))
523
 
void float_check_status(CPUPPCState *env)
524
 
{
525
 
    /* GETPC() works here because this is inline */
526
 
    do_float_check_status(env, GETPC());
527
 
}
528
 
 
529
 
void helper_float_check_status(CPUPPCState *env)
530
 
{
531
 
    do_float_check_status(env, GETPC());
532
 
}
533
 
 
534
 
void helper_reset_fpstatus(CPUPPCState *env)
535
 
{
536
 
    set_float_exception_flags(0, &env->fp_status);
537
 
}
538
 
 
539
 
/* fadd - fadd. */
540
 
uint64_t helper_fadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
541
 
{
542
 
    CPU_DoubleU farg1, farg2;
543
 
 
544
 
    farg1.ll = arg1;
545
 
    farg2.ll = arg2;
546
 
 
547
 
    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
548
 
                 float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
549
 
        /* Magnitude subtraction of infinities */
550
 
        farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
551
 
    } else {
552
 
        if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
553
 
                     float64_is_signaling_nan(farg2.d, &env->fp_status))) {
554
 
            /* sNaN addition */
555
 
            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
556
 
        }
557
 
        farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
558
 
    }
559
 
 
560
 
    return farg1.ll;
561
 
}
562
 
 
563
 
/* fsub - fsub. */
564
 
uint64_t helper_fsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
565
 
{
566
 
    CPU_DoubleU farg1, farg2;
567
 
 
568
 
    farg1.ll = arg1;
569
 
    farg2.ll = arg2;
570
 
 
571
 
    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
572
 
                 float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
573
 
        /* Magnitude subtraction of infinities */
574
 
        farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
575
 
    } else {
576
 
        if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
577
 
                     float64_is_signaling_nan(farg2.d, &env->fp_status))) {
578
 
            /* sNaN subtraction */
579
 
            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
580
 
        }
581
 
        farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
582
 
    }
583
 
 
584
 
    return farg1.ll;
585
 
}
586
 
 
587
 
/* fmul - fmul. */
588
 
uint64_t helper_fmul(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
589
 
{
590
 
    CPU_DoubleU farg1, farg2;
591
 
 
592
 
    farg1.ll = arg1;
593
 
    farg2.ll = arg2;
594
 
 
595
 
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
596
 
                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
597
 
        /* Multiplication of zero by infinity */
598
 
        farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
599
 
    } else {
600
 
        if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
601
 
                     float64_is_signaling_nan(farg2.d, &env->fp_status))) {
602
 
            /* sNaN multiplication */
603
 
            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
604
 
        }
605
 
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
606
 
    }
607
 
 
608
 
    return farg1.ll;
609
 
}
610
 
 
611
 
/* fdiv - fdiv. */
612
 
uint64_t helper_fdiv(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
613
 
{
614
 
    CPU_DoubleU farg1, farg2;
615
 
 
616
 
    farg1.ll = arg1;
617
 
    farg2.ll = arg2;
618
 
 
619
 
    if (unlikely(float64_is_infinity(farg1.d) &&
620
 
                 float64_is_infinity(farg2.d))) {
621
 
        /* Division of infinity by infinity */
622
 
        farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, 1);
623
 
    } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
624
 
        /* Division of zero by zero */
625
 
        farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, 1);
626
 
    } else {
627
 
        if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
628
 
                     float64_is_signaling_nan(farg2.d, &env->fp_status))) {
629
 
            /* sNaN division */
630
 
            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
631
 
        }
632
 
        farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
633
 
    }
634
 
 
635
 
    return farg1.ll;
636
 
}
637
 
 
638
 
 
639
 
#define FPU_FCTI(op, cvt, nanval)                                      \
640
 
uint64_t helper_##op(CPUPPCState *env, uint64_t arg)                   \
641
 
{                                                                      \
642
 
    CPU_DoubleU farg;                                                  \
643
 
                                                                       \
644
 
    farg.ll = arg;                                                     \
645
 
    farg.ll = float64_to_##cvt(farg.d, &env->fp_status);               \
646
 
                                                                       \
647
 
    if (unlikely(env->fp_status.float_exception_flags)) {              \
648
 
        if (float64_is_any_nan(arg)) {                                 \
649
 
            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);      \
650
 
            if (float64_is_signaling_nan(arg, &env->fp_status)) {      \
651
 
                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \
652
 
            }                                                          \
653
 
            farg.ll = nanval;                                          \
654
 
        } else if (env->fp_status.float_exception_flags &              \
655
 
                   float_flag_invalid) {                               \
656
 
            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);      \
657
 
        }                                                              \
658
 
        float_check_status(env);                                       \
659
 
    }                                                                  \
660
 
    return farg.ll;                                                    \
661
 
 }
662
 
 
663
 
FPU_FCTI(fctiw, int32, 0x80000000U)
664
 
FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U)
665
 
FPU_FCTI(fctiwu, uint32, 0x00000000U)
666
 
FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U)
667
 
FPU_FCTI(fctid, int64, 0x8000000000000000ULL)
668
 
FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL)
669
 
FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL)
670
 
FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL)
671
 
 
672
 
#define FPU_FCFI(op, cvtr, is_single)                      \
673
 
uint64_t helper_##op(CPUPPCState *env, uint64_t arg)       \
674
 
{                                                          \
675
 
    CPU_DoubleU farg;                                      \
676
 
                                                           \
677
 
    if (is_single) {                                       \
678
 
        float32 tmp = cvtr(arg, &env->fp_status);          \
679
 
        farg.d = float32_to_float64(tmp, &env->fp_status); \
680
 
    } else {                                               \
681
 
        farg.d = cvtr(arg, &env->fp_status);               \
682
 
    }                                                      \
683
 
    float_check_status(env);                               \
684
 
    return farg.ll;                                        \
685
 
}
686
 
 
687
 
FPU_FCFI(fcfid, int64_to_float64, 0)
688
 
FPU_FCFI(fcfids, int64_to_float32, 1)
689
 
FPU_FCFI(fcfidu, uint64_to_float64, 0)
690
 
FPU_FCFI(fcfidus, uint64_to_float32, 1)
691
 
 
692
 
static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg,
693
 
                              int rounding_mode)
694
 
{
695
 
    CPU_DoubleU farg;
696
 
 
697
 
    farg.ll = arg;
698
 
 
699
 
    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
700
 
        /* sNaN round */
701
 
        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
702
 
        farg.ll = arg | 0x0008000000000000ULL;
703
 
    } else {
704
 
        int inexact = get_float_exception_flags(&env->fp_status) &
705
 
                      float_flag_inexact;
706
 
        set_float_rounding_mode(rounding_mode, &env->fp_status);
707
 
        farg.ll = float64_round_to_int(farg.d, &env->fp_status);
708
 
        /* Restore rounding mode from FPSCR */
709
 
        fpscr_set_rounding_mode(env);
710
 
 
711
 
        /* fri* does not set FPSCR[XX] */
712
 
        if (!inexact) {
713
 
            env->fp_status.float_exception_flags &= ~float_flag_inexact;
714
 
        }
715
 
    }
716
 
    float_check_status(env);
717
 
    return farg.ll;
718
 
}
719
 
 
720
 
uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
721
 
{
722
 
    return do_fri(env, arg, float_round_ties_away);
723
 
}
724
 
 
725
 
uint64_t helper_friz(CPUPPCState *env, uint64_t arg)
726
 
{
727
 
    return do_fri(env, arg, float_round_to_zero);
728
 
}
729
 
 
730
 
uint64_t helper_frip(CPUPPCState *env, uint64_t arg)
731
 
{
732
 
    return do_fri(env, arg, float_round_up);
733
 
}
734
 
 
735
 
uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
736
 
{
737
 
    return do_fri(env, arg, float_round_down);
738
 
}
739
 
 
740
 
/* fmadd - fmadd. */
741
 
uint64_t helper_fmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
742
 
                      uint64_t arg3)
743
 
{
744
 
    CPU_DoubleU farg1, farg2, farg3;
745
 
 
746
 
    farg1.ll = arg1;
747
 
    farg2.ll = arg2;
748
 
    farg3.ll = arg3;
749
 
 
750
 
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
751
 
                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
752
 
        /* Multiplication of zero by infinity */
753
 
        farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
754
 
    } else {
755
 
        if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
756
 
                     float64_is_signaling_nan(farg2.d, &env->fp_status) ||
757
 
                     float64_is_signaling_nan(farg3.d, &env->fp_status))) {
758
 
            /* sNaN operation */
759
 
            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
760
 
        }
761
 
        /* This is the way the PowerPC specification defines it */
762
 
        float128 ft0_128, ft1_128;
763
 
 
764
 
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
765
 
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
766
 
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
767
 
        if (unlikely(float128_is_infinity(ft0_128) &&
768
 
                     float64_is_infinity(farg3.d) &&
769
 
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
770
 
            /* Magnitude subtraction of infinities */
771
 
            farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
772
 
        } else {
773
 
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
774
 
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
775
 
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
776
 
        }
777
 
    }
778
 
 
779
 
    return farg1.ll;
780
 
}
781
 
 
782
 
/* fmsub - fmsub. */
783
 
uint64_t helper_fmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
784
 
                      uint64_t arg3)
785
 
{
786
 
    CPU_DoubleU farg1, farg2, farg3;
787
 
 
788
 
    farg1.ll = arg1;
789
 
    farg2.ll = arg2;
790
 
    farg3.ll = arg3;
791
 
 
792
 
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
793
 
                 (float64_is_zero(farg1.d) &&
794
 
                  float64_is_infinity(farg2.d)))) {
795
 
        /* Multiplication of zero by infinity */
796
 
        farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
797
 
    } else {
798
 
        if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
799
 
                     float64_is_signaling_nan(farg2.d, &env->fp_status) ||
800
 
                     float64_is_signaling_nan(farg3.d, &env->fp_status))) {
801
 
            /* sNaN operation */
802
 
            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
803
 
        }
804
 
        /* This is the way the PowerPC specification defines it */
805
 
        float128 ft0_128, ft1_128;
806
 
 
807
 
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
808
 
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
809
 
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
810
 
        if (unlikely(float128_is_infinity(ft0_128) &&
811
 
                     float64_is_infinity(farg3.d) &&
812
 
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
813
 
            /* Magnitude subtraction of infinities */
814
 
            farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
815
 
        } else {
816
 
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
817
 
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
818
 
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
819
 
        }
820
 
    }
821
 
    return farg1.ll;
822
 
}
823
 
 
824
 
/* fnmadd - fnmadd. */
825
 
uint64_t helper_fnmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
826
 
                       uint64_t arg3)
827
 
{
828
 
    CPU_DoubleU farg1, farg2, farg3;
829
 
 
830
 
    farg1.ll = arg1;
831
 
    farg2.ll = arg2;
832
 
    farg3.ll = arg3;
833
 
 
834
 
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
835
 
                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
836
 
        /* Multiplication of zero by infinity */
837
 
        farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
838
 
    } else {
839
 
        if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
840
 
                     float64_is_signaling_nan(farg2.d, &env->fp_status) ||
841
 
                     float64_is_signaling_nan(farg3.d, &env->fp_status))) {
842
 
            /* sNaN operation */
843
 
            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
844
 
        }
845
 
        /* This is the way the PowerPC specification defines it */
846
 
        float128 ft0_128, ft1_128;
847
 
 
848
 
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
849
 
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
850
 
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
851
 
        if (unlikely(float128_is_infinity(ft0_128) &&
852
 
                     float64_is_infinity(farg3.d) &&
853
 
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
854
 
            /* Magnitude subtraction of infinities */
855
 
            farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
856
 
        } else {
857
 
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
858
 
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
859
 
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
860
 
        }
861
 
        if (likely(!float64_is_any_nan(farg1.d))) {
862
 
            farg1.d = float64_chs(farg1.d);
863
 
        }
864
 
    }
865
 
    return farg1.ll;
866
 
}
867
 
 
868
 
/* fnmsub - fnmsub. */
869
 
uint64_t helper_fnmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
870
 
                       uint64_t arg3)
871
 
{
872
 
    CPU_DoubleU farg1, farg2, farg3;
873
 
 
874
 
    farg1.ll = arg1;
875
 
    farg2.ll = arg2;
876
 
    farg3.ll = arg3;
877
 
 
878
 
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
879
 
                 (float64_is_zero(farg1.d) &&
880
 
                  float64_is_infinity(farg2.d)))) {
881
 
        /* Multiplication of zero by infinity */
882
 
        farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
883
 
    } else {
884
 
        if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
885
 
                     float64_is_signaling_nan(farg2.d, &env->fp_status) ||
886
 
                     float64_is_signaling_nan(farg3.d, &env->fp_status))) {
887
 
            /* sNaN operation */
888
 
            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
889
 
        }
890
 
        /* This is the way the PowerPC specification defines it */
891
 
        float128 ft0_128, ft1_128;
892
 
 
893
 
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
894
 
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
895
 
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
896
 
        if (unlikely(float128_is_infinity(ft0_128) &&
897
 
                     float64_is_infinity(farg3.d) &&
898
 
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
899
 
            /* Magnitude subtraction of infinities */
900
 
            farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
901
 
        } else {
902
 
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
903
 
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
904
 
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
905
 
        }
906
 
        if (likely(!float64_is_any_nan(farg1.d))) {
907
 
            farg1.d = float64_chs(farg1.d);
908
 
        }
909
 
    }
910
 
    return farg1.ll;
911
 
}
912
 
 
913
 
/* frsp - frsp. */
914
 
uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
915
 
{
916
 
    CPU_DoubleU farg;
917
 
    float32 f32;
918
 
 
919
 
    farg.ll = arg;
920
 
 
921
 
    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
922
 
        /* sNaN square root */
923
 
        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
924
 
    }
925
 
    f32 = float64_to_float32(farg.d, &env->fp_status);
926
 
    farg.d = float32_to_float64(f32, &env->fp_status);
927
 
 
928
 
    return farg.ll;
929
 
}
930
 
 
931
 
/* fsqrt - fsqrt. */
932
 
uint64_t helper_fsqrt(CPUPPCState *env, uint64_t arg)
933
 
{
934
 
    CPU_DoubleU farg;
935
 
 
936
 
    farg.ll = arg;
937
 
 
938
 
    if (unlikely(float64_is_any_nan(farg.d))) {
939
 
        if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
940
 
            /* sNaN reciprocal square root */
941
 
            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
942
 
            farg.ll = float64_snan_to_qnan(farg.ll);
943
 
        }
944
 
    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
945
 
        /* Square root of a negative nonzero number */
946
 
        farg.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
947
 
    } else {
948
 
        farg.d = float64_sqrt(farg.d, &env->fp_status);
949
 
    }
950
 
    return farg.ll;
951
 
}
952
 
 
953
 
/* fre - fre. */
954
 
uint64_t helper_fre(CPUPPCState *env, uint64_t arg)
955
 
{
956
 
    CPU_DoubleU farg;
957
 
 
958
 
    farg.ll = arg;
959
 
 
960
 
    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
961
 
        /* sNaN reciprocal */
962
 
        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
963
 
    }
964
 
    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
965
 
    return farg.d;
966
 
}
967
 
 
968
 
/* fres - fres. */
969
 
uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
970
 
{
971
 
    CPU_DoubleU farg;
972
 
    float32 f32;
973
 
 
974
 
    farg.ll = arg;
975
 
 
976
 
    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
977
 
        /* sNaN reciprocal */
978
 
        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
979
 
    }
980
 
    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
981
 
    f32 = float64_to_float32(farg.d, &env->fp_status);
982
 
    farg.d = float32_to_float64(f32, &env->fp_status);
983
 
 
984
 
    return farg.ll;
985
 
}
986
 
 
987
 
/* frsqrte  - frsqrte. */
988
 
uint64_t helper_frsqrte(CPUPPCState *env, uint64_t arg)
989
 
{
990
 
    CPU_DoubleU farg;
991
 
 
992
 
    farg.ll = arg;
993
 
 
994
 
    if (unlikely(float64_is_any_nan(farg.d))) {
995
 
        if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
996
 
            /* sNaN reciprocal square root */
997
 
            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
998
 
            farg.ll = float64_snan_to_qnan(farg.ll);
999
 
        }
1000
 
    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1001
 
        /* Reciprocal square root of a negative nonzero number */
1002
 
        farg.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
1003
 
    } else {
1004
 
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1005
 
        farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1006
 
    }
1007
 
 
1008
 
    return farg.ll;
1009
 
}
1010
 
 
1011
 
/* fsel - fsel. */
1012
 
uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1013
 
                     uint64_t arg3)
1014
 
{
1015
 
    CPU_DoubleU farg1;
1016
 
 
1017
 
    farg1.ll = arg1;
1018
 
 
1019
 
    if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) &&
1020
 
        !float64_is_any_nan(farg1.d)) {
1021
 
        return arg2;
1022
 
    } else {
1023
 
        return arg3;
1024
 
    }
1025
 
}
1026
 
 
1027
 
uint32_t helper_ftdiv(uint64_t fra, uint64_t frb)
1028
 
{
1029
 
    int fe_flag = 0;
1030
 
    int fg_flag = 0;
1031
 
 
1032
 
    if (unlikely(float64_is_infinity(fra) ||
1033
 
                 float64_is_infinity(frb) ||
1034
 
                 float64_is_zero(frb))) {
1035
 
        fe_flag = 1;
1036
 
        fg_flag = 1;
1037
 
    } else {
1038
 
        int e_a = ppc_float64_get_unbiased_exp(fra);
1039
 
        int e_b = ppc_float64_get_unbiased_exp(frb);
1040
 
 
1041
 
        if (unlikely(float64_is_any_nan(fra) ||
1042
 
                     float64_is_any_nan(frb))) {
1043
 
            fe_flag = 1;
1044
 
        } else if ((e_b <= -1022) || (e_b >= 1021)) {
1045
 
            fe_flag = 1;
1046
 
        } else if (!float64_is_zero(fra) &&
1047
 
                   (((e_a - e_b) >= 1023) ||
1048
 
                    ((e_a - e_b) <= -1021) ||
1049
 
                    (e_a <= -970))) {
1050
 
            fe_flag = 1;
1051
 
        }
1052
 
 
1053
 
        if (unlikely(float64_is_zero_or_denormal(frb))) {
1054
 
            /* XB is not zero because of the above check and */
1055
 
            /* so must be denormalized.                      */
1056
 
            fg_flag = 1;
1057
 
        }
1058
 
    }
1059
 
 
1060
 
    return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
1061
 
}
1062
 
 
1063
 
uint32_t helper_ftsqrt(uint64_t frb)
1064
 
{
1065
 
    int fe_flag = 0;
1066
 
    int fg_flag = 0;
1067
 
 
1068
 
    if (unlikely(float64_is_infinity(frb) || float64_is_zero(frb))) {
1069
 
        fe_flag = 1;
1070
 
        fg_flag = 1;
1071
 
    } else {
1072
 
        int e_b = ppc_float64_get_unbiased_exp(frb);
1073
 
 
1074
 
        if (unlikely(float64_is_any_nan(frb))) {
1075
 
            fe_flag = 1;
1076
 
        } else if (unlikely(float64_is_zero(frb))) {
1077
 
            fe_flag = 1;
1078
 
        } else if (unlikely(float64_is_neg(frb))) {
1079
 
            fe_flag = 1;
1080
 
        } else if (!float64_is_zero(frb) && (e_b <= (-1022+52))) {
1081
 
            fe_flag = 1;
1082
 
        }
1083
 
 
1084
 
        if (unlikely(float64_is_zero_or_denormal(frb))) {
1085
 
            /* XB is not zero because of the above check and */
1086
 
            /* therefore must be denormalized.               */
1087
 
            fg_flag = 1;
1088
 
        }
1089
 
    }
1090
 
 
1091
 
    return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
1092
 
}
1093
 
 
1094
 
void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1095
 
                  uint32_t crfD)
1096
 
{
1097
 
    CPU_DoubleU farg1, farg2;
1098
 
    uint32_t ret = 0;
1099
 
 
1100
 
    farg1.ll = arg1;
1101
 
    farg2.ll = arg2;
1102
 
 
1103
 
    if (unlikely(float64_is_any_nan(farg1.d) ||
1104
 
                 float64_is_any_nan(farg2.d))) {
1105
 
        ret = 0x01UL;
1106
 
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1107
 
        ret = 0x08UL;
1108
 
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1109
 
        ret = 0x04UL;
1110
 
    } else {
1111
 
        ret = 0x02UL;
1112
 
    }
1113
 
 
1114
 
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1115
 
    env->fpscr |= ret << FPSCR_FPRF;
1116
 
    env->crf[crfD] = ret;
1117
 
    if (unlikely(ret == 0x01UL
1118
 
                 && (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1119
 
                     float64_is_signaling_nan(farg2.d, &env->fp_status)))) {
1120
 
        /* sNaN comparison */
1121
 
        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1122
 
    }
1123
 
}
1124
 
 
1125
 
void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1126
 
                  uint32_t crfD)
1127
 
{
1128
 
    CPU_DoubleU farg1, farg2;
1129
 
    uint32_t ret = 0;
1130
 
 
1131
 
    farg1.ll = arg1;
1132
 
    farg2.ll = arg2;
1133
 
 
1134
 
    if (unlikely(float64_is_any_nan(farg1.d) ||
1135
 
                 float64_is_any_nan(farg2.d))) {
1136
 
        ret = 0x01UL;
1137
 
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1138
 
        ret = 0x08UL;
1139
 
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1140
 
        ret = 0x04UL;
1141
 
    } else {
1142
 
        ret = 0x02UL;
1143
 
    }
1144
 
 
1145
 
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1146
 
    env->fpscr |= ret << FPSCR_FPRF;
1147
 
    env->crf[crfD] = ret;
1148
 
    if (unlikely(ret == 0x01UL)) {
1149
 
        if (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1150
 
            float64_is_signaling_nan(farg2.d, &env->fp_status)) {
1151
 
            /* sNaN comparison */
1152
 
            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
1153
 
                                  POWERPC_EXCP_FP_VXVC, 1);
1154
 
        } else {
1155
 
            /* qNaN comparison */
1156
 
            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 1);
1157
 
        }
1158
 
    }
1159
 
}
1160
 
 
1161
 
/* Single-precision floating-point conversions */
1162
 
static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val)
1163
 
{
1164
 
    CPU_FloatU u;
1165
 
 
1166
 
    u.f = int32_to_float32(val, &env->vec_status);
1167
 
 
1168
 
    return u.l;
1169
 
}
1170
 
 
1171
 
static inline uint32_t efscfui(CPUPPCState *env, uint32_t val)
1172
 
{
1173
 
    CPU_FloatU u;
1174
 
 
1175
 
    u.f = uint32_to_float32(val, &env->vec_status);
1176
 
 
1177
 
    return u.l;
1178
 
}
1179
 
 
1180
 
static inline int32_t efsctsi(CPUPPCState *env, uint32_t val)
1181
 
{
1182
 
    CPU_FloatU u;
1183
 
 
1184
 
    u.l = val;
1185
 
    /* NaN are not treated the same way IEEE 754 does */
1186
 
    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1187
 
        return 0;
1188
 
    }
1189
 
 
1190
 
    return float32_to_int32(u.f, &env->vec_status);
1191
 
}
1192
 
 
1193
 
static inline uint32_t efsctui(CPUPPCState *env, uint32_t val)
1194
 
{
1195
 
    CPU_FloatU u;
1196
 
 
1197
 
    u.l = val;
1198
 
    /* NaN are not treated the same way IEEE 754 does */
1199
 
    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1200
 
        return 0;
1201
 
    }
1202
 
 
1203
 
    return float32_to_uint32(u.f, &env->vec_status);
1204
 
}
1205
 
 
1206
 
static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val)
1207
 
{
1208
 
    CPU_FloatU u;
1209
 
 
1210
 
    u.l = val;
1211
 
    /* NaN are not treated the same way IEEE 754 does */
1212
 
    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1213
 
        return 0;
1214
 
    }
1215
 
 
1216
 
    return float32_to_int32_round_to_zero(u.f, &env->vec_status);
1217
 
}
1218
 
 
1219
 
static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val)
1220
 
{
1221
 
    CPU_FloatU u;
1222
 
 
1223
 
    u.l = val;
1224
 
    /* NaN are not treated the same way IEEE 754 does */
1225
 
    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1226
 
        return 0;
1227
 
    }
1228
 
 
1229
 
    return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
1230
 
}
1231
 
 
1232
 
static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val)
1233
 
{
1234
 
    CPU_FloatU u;
1235
 
    float32 tmp;
1236
 
 
1237
 
    u.f = int32_to_float32(val, &env->vec_status);
1238
 
    tmp = int64_to_float32(1ULL << 32, &env->vec_status);
1239
 
    u.f = float32_div(u.f, tmp, &env->vec_status);
1240
 
 
1241
 
    return u.l;
1242
 
}
1243
 
 
1244
 
static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val)
1245
 
{
1246
 
    CPU_FloatU u;
1247
 
    float32 tmp;
1248
 
 
1249
 
    u.f = uint32_to_float32(val, &env->vec_status);
1250
 
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1251
 
    u.f = float32_div(u.f, tmp, &env->vec_status);
1252
 
 
1253
 
    return u.l;
1254
 
}
1255
 
 
1256
 
static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val)
1257
 
{
1258
 
    CPU_FloatU u;
1259
 
    float32 tmp;
1260
 
 
1261
 
    u.l = val;
1262
 
    /* NaN are not treated the same way IEEE 754 does */
1263
 
    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1264
 
        return 0;
1265
 
    }
1266
 
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1267
 
    u.f = float32_mul(u.f, tmp, &env->vec_status);
1268
 
 
1269
 
    return float32_to_int32(u.f, &env->vec_status);
1270
 
}
1271
 
 
1272
 
static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val)
1273
 
{
1274
 
    CPU_FloatU u;
1275
 
    float32 tmp;
1276
 
 
1277
 
    u.l = val;
1278
 
    /* NaN are not treated the same way IEEE 754 does */
1279
 
    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1280
 
        return 0;
1281
 
    }
1282
 
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1283
 
    u.f = float32_mul(u.f, tmp, &env->vec_status);
1284
 
 
1285
 
    return float32_to_uint32(u.f, &env->vec_status);
1286
 
}
1287
 
 
1288
 
#define HELPER_SPE_SINGLE_CONV(name)                              \
1289
 
    uint32_t helper_e##name(CPUPPCState *env, uint32_t val)       \
1290
 
    {                                                             \
1291
 
        return e##name(env, val);                                 \
1292
 
    }
1293
 
/* efscfsi */
1294
 
HELPER_SPE_SINGLE_CONV(fscfsi);
1295
 
/* efscfui */
1296
 
HELPER_SPE_SINGLE_CONV(fscfui);
1297
 
/* efscfuf */
1298
 
HELPER_SPE_SINGLE_CONV(fscfuf);
1299
 
/* efscfsf */
1300
 
HELPER_SPE_SINGLE_CONV(fscfsf);
1301
 
/* efsctsi */
1302
 
HELPER_SPE_SINGLE_CONV(fsctsi);
1303
 
/* efsctui */
1304
 
HELPER_SPE_SINGLE_CONV(fsctui);
1305
 
/* efsctsiz */
1306
 
HELPER_SPE_SINGLE_CONV(fsctsiz);
1307
 
/* efsctuiz */
1308
 
HELPER_SPE_SINGLE_CONV(fsctuiz);
1309
 
/* efsctsf */
1310
 
HELPER_SPE_SINGLE_CONV(fsctsf);
1311
 
/* efsctuf */
1312
 
HELPER_SPE_SINGLE_CONV(fsctuf);
1313
 
 
1314
 
#define HELPER_SPE_VECTOR_CONV(name)                            \
1315
 
    uint64_t helper_ev##name(CPUPPCState *env, uint64_t val)    \
1316
 
    {                                                           \
1317
 
        return ((uint64_t)e##name(env, val >> 32) << 32) |      \
1318
 
            (uint64_t)e##name(env, val);                        \
1319
 
    }
1320
 
/* evfscfsi */
1321
 
HELPER_SPE_VECTOR_CONV(fscfsi);
1322
 
/* evfscfui */
1323
 
HELPER_SPE_VECTOR_CONV(fscfui);
1324
 
/* evfscfuf */
1325
 
HELPER_SPE_VECTOR_CONV(fscfuf);
1326
 
/* evfscfsf */
1327
 
HELPER_SPE_VECTOR_CONV(fscfsf);
1328
 
/* evfsctsi */
1329
 
HELPER_SPE_VECTOR_CONV(fsctsi);
1330
 
/* evfsctui */
1331
 
HELPER_SPE_VECTOR_CONV(fsctui);
1332
 
/* evfsctsiz */
1333
 
HELPER_SPE_VECTOR_CONV(fsctsiz);
1334
 
/* evfsctuiz */
1335
 
HELPER_SPE_VECTOR_CONV(fsctuiz);
1336
 
/* evfsctsf */
1337
 
HELPER_SPE_VECTOR_CONV(fsctsf);
1338
 
/* evfsctuf */
1339
 
HELPER_SPE_VECTOR_CONV(fsctuf);
1340
 
 
1341
 
/* Single-precision floating-point arithmetic */
1342
 
static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2)
1343
 
{
1344
 
    CPU_FloatU u1, u2;
1345
 
 
1346
 
    u1.l = op1;
1347
 
    u2.l = op2;
1348
 
    u1.f = float32_add(u1.f, u2.f, &env->vec_status);
1349
 
    return u1.l;
1350
 
}
1351
 
 
1352
 
static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2)
1353
 
{
1354
 
    CPU_FloatU u1, u2;
1355
 
 
1356
 
    u1.l = op1;
1357
 
    u2.l = op2;
1358
 
    u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
1359
 
    return u1.l;
1360
 
}
1361
 
 
1362
 
static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2)
1363
 
{
1364
 
    CPU_FloatU u1, u2;
1365
 
 
1366
 
    u1.l = op1;
1367
 
    u2.l = op2;
1368
 
    u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
1369
 
    return u1.l;
1370
 
}
1371
 
 
1372
 
static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2)
1373
 
{
1374
 
    CPU_FloatU u1, u2;
1375
 
 
1376
 
    u1.l = op1;
1377
 
    u2.l = op2;
1378
 
    u1.f = float32_div(u1.f, u2.f, &env->vec_status);
1379
 
    return u1.l;
1380
 
}
1381
 
 
1382
 
#define HELPER_SPE_SINGLE_ARITH(name)                                   \
1383
 
    uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1384
 
    {                                                                   \
1385
 
        return e##name(env, op1, op2);                                  \
1386
 
    }
1387
 
/* efsadd */
1388
 
HELPER_SPE_SINGLE_ARITH(fsadd);
1389
 
/* efssub */
1390
 
HELPER_SPE_SINGLE_ARITH(fssub);
1391
 
/* efsmul */
1392
 
HELPER_SPE_SINGLE_ARITH(fsmul);
1393
 
/* efsdiv */
1394
 
HELPER_SPE_SINGLE_ARITH(fsdiv);
1395
 
 
1396
 
#define HELPER_SPE_VECTOR_ARITH(name)                                   \
1397
 
    uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1398
 
    {                                                                   \
1399
 
        return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) |   \
1400
 
            (uint64_t)e##name(env, op1, op2);                           \
1401
 
    }
1402
 
/* evfsadd */
1403
 
HELPER_SPE_VECTOR_ARITH(fsadd);
1404
 
/* evfssub */
1405
 
HELPER_SPE_VECTOR_ARITH(fssub);
1406
 
/* evfsmul */
1407
 
HELPER_SPE_VECTOR_ARITH(fsmul);
1408
 
/* evfsdiv */
1409
 
HELPER_SPE_VECTOR_ARITH(fsdiv);
1410
 
 
1411
 
/* Single-precision floating-point comparisons */
1412
 
static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1413
 
{
1414
 
    CPU_FloatU u1, u2;
1415
 
 
1416
 
    u1.l = op1;
1417
 
    u2.l = op2;
1418
 
    return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1419
 
}
1420
 
 
1421
 
static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1422
 
{
1423
 
    CPU_FloatU u1, u2;
1424
 
 
1425
 
    u1.l = op1;
1426
 
    u2.l = op2;
1427
 
    return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
1428
 
}
1429
 
 
1430
 
static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1431
 
{
1432
 
    CPU_FloatU u1, u2;
1433
 
 
1434
 
    u1.l = op1;
1435
 
    u2.l = op2;
1436
 
    return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1437
 
}
1438
 
 
1439
 
static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1440
 
{
1441
 
    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1442
 
    return efscmplt(env, op1, op2);
1443
 
}
1444
 
 
1445
 
static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1446
 
{
1447
 
    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1448
 
    return efscmpgt(env, op1, op2);
1449
 
}
1450
 
 
1451
 
static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1452
 
{
1453
 
    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1454
 
    return efscmpeq(env, op1, op2);
1455
 
}
1456
 
 
1457
 
#define HELPER_SINGLE_SPE_CMP(name)                                     \
1458
 
    uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1459
 
    {                                                                   \
1460
 
        return e##name(env, op1, op2);                                  \
1461
 
    }
1462
 
/* efststlt */
1463
 
HELPER_SINGLE_SPE_CMP(fststlt);
1464
 
/* efststgt */
1465
 
HELPER_SINGLE_SPE_CMP(fststgt);
1466
 
/* efststeq */
1467
 
HELPER_SINGLE_SPE_CMP(fststeq);
1468
 
/* efscmplt */
1469
 
HELPER_SINGLE_SPE_CMP(fscmplt);
1470
 
/* efscmpgt */
1471
 
HELPER_SINGLE_SPE_CMP(fscmpgt);
1472
 
/* efscmpeq */
1473
 
HELPER_SINGLE_SPE_CMP(fscmpeq);
1474
 
 
1475
 
static inline uint32_t evcmp_merge(int t0, int t1)
1476
 
{
1477
 
    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1478
 
}
1479
 
 
1480
 
#define HELPER_VECTOR_SPE_CMP(name)                                     \
1481
 
    uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1482
 
    {                                                                   \
1483
 
        return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32),          \
1484
 
                           e##name(env, op1, op2));                     \
1485
 
    }
1486
 
/* evfststlt */
1487
 
HELPER_VECTOR_SPE_CMP(fststlt);
1488
 
/* evfststgt */
1489
 
HELPER_VECTOR_SPE_CMP(fststgt);
1490
 
/* evfststeq */
1491
 
HELPER_VECTOR_SPE_CMP(fststeq);
1492
 
/* evfscmplt */
1493
 
HELPER_VECTOR_SPE_CMP(fscmplt);
1494
 
/* evfscmpgt */
1495
 
HELPER_VECTOR_SPE_CMP(fscmpgt);
1496
 
/* evfscmpeq */
1497
 
HELPER_VECTOR_SPE_CMP(fscmpeq);
1498
 
 
1499
 
/* Double-precision floating-point conversion */
1500
 
uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val)
1501
 
{
1502
 
    CPU_DoubleU u;
1503
 
 
1504
 
    u.d = int32_to_float64(val, &env->vec_status);
1505
 
 
1506
 
    return u.ll;
1507
 
}
1508
 
 
1509
 
uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val)
1510
 
{
1511
 
    CPU_DoubleU u;
1512
 
 
1513
 
    u.d = int64_to_float64(val, &env->vec_status);
1514
 
 
1515
 
    return u.ll;
1516
 
}
1517
 
 
1518
 
uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val)
1519
 
{
1520
 
    CPU_DoubleU u;
1521
 
 
1522
 
    u.d = uint32_to_float64(val, &env->vec_status);
1523
 
 
1524
 
    return u.ll;
1525
 
}
1526
 
 
1527
 
uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val)
1528
 
{
1529
 
    CPU_DoubleU u;
1530
 
 
1531
 
    u.d = uint64_to_float64(val, &env->vec_status);
1532
 
 
1533
 
    return u.ll;
1534
 
}
1535
 
 
1536
 
uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val)
1537
 
{
1538
 
    CPU_DoubleU u;
1539
 
 
1540
 
    u.ll = val;
1541
 
    /* NaN are not treated the same way IEEE 754 does */
1542
 
    if (unlikely(float64_is_any_nan(u.d))) {
1543
 
        return 0;
1544
 
    }
1545
 
 
1546
 
    return float64_to_int32(u.d, &env->vec_status);
1547
 
}
1548
 
 
1549
 
uint32_t helper_efdctui(CPUPPCState *env, uint64_t val)
1550
 
{
1551
 
    CPU_DoubleU u;
1552
 
 
1553
 
    u.ll = val;
1554
 
    /* NaN are not treated the same way IEEE 754 does */
1555
 
    if (unlikely(float64_is_any_nan(u.d))) {
1556
 
        return 0;
1557
 
    }
1558
 
 
1559
 
    return float64_to_uint32(u.d, &env->vec_status);
1560
 
}
1561
 
 
1562
 
uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val)
1563
 
{
1564
 
    CPU_DoubleU u;
1565
 
 
1566
 
    u.ll = val;
1567
 
    /* NaN are not treated the same way IEEE 754 does */
1568
 
    if (unlikely(float64_is_any_nan(u.d))) {
1569
 
        return 0;
1570
 
    }
1571
 
 
1572
 
    return float64_to_int32_round_to_zero(u.d, &env->vec_status);
1573
 
}
1574
 
 
1575
 
uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val)
1576
 
{
1577
 
    CPU_DoubleU u;
1578
 
 
1579
 
    u.ll = val;
1580
 
    /* NaN are not treated the same way IEEE 754 does */
1581
 
    if (unlikely(float64_is_any_nan(u.d))) {
1582
 
        return 0;
1583
 
    }
1584
 
 
1585
 
    return float64_to_int64_round_to_zero(u.d, &env->vec_status);
1586
 
}
1587
 
 
1588
 
uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val)
1589
 
{
1590
 
    CPU_DoubleU u;
1591
 
 
1592
 
    u.ll = val;
1593
 
    /* NaN are not treated the same way IEEE 754 does */
1594
 
    if (unlikely(float64_is_any_nan(u.d))) {
1595
 
        return 0;
1596
 
    }
1597
 
 
1598
 
    return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
1599
 
}
1600
 
 
1601
 
uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val)
1602
 
{
1603
 
    CPU_DoubleU u;
1604
 
 
1605
 
    u.ll = val;
1606
 
    /* NaN are not treated the same way IEEE 754 does */
1607
 
    if (unlikely(float64_is_any_nan(u.d))) {
1608
 
        return 0;
1609
 
    }
1610
 
 
1611
 
    return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
1612
 
}
1613
 
 
1614
 
uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val)
1615
 
{
1616
 
    CPU_DoubleU u;
1617
 
    float64 tmp;
1618
 
 
1619
 
    u.d = int32_to_float64(val, &env->vec_status);
1620
 
    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1621
 
    u.d = float64_div(u.d, tmp, &env->vec_status);
1622
 
 
1623
 
    return u.ll;
1624
 
}
1625
 
 
1626
 
uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val)
1627
 
{
1628
 
    CPU_DoubleU u;
1629
 
    float64 tmp;
1630
 
 
1631
 
    u.d = uint32_to_float64(val, &env->vec_status);
1632
 
    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1633
 
    u.d = float64_div(u.d, tmp, &env->vec_status);
1634
 
 
1635
 
    return u.ll;
1636
 
}
1637
 
 
1638
 
uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val)
1639
 
{
1640
 
    CPU_DoubleU u;
1641
 
    float64 tmp;
1642
 
 
1643
 
    u.ll = val;
1644
 
    /* NaN are not treated the same way IEEE 754 does */
1645
 
    if (unlikely(float64_is_any_nan(u.d))) {
1646
 
        return 0;
1647
 
    }
1648
 
    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1649
 
    u.d = float64_mul(u.d, tmp, &env->vec_status);
1650
 
 
1651
 
    return float64_to_int32(u.d, &env->vec_status);
1652
 
}
1653
 
 
1654
 
uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val)
1655
 
{
1656
 
    CPU_DoubleU u;
1657
 
    float64 tmp;
1658
 
 
1659
 
    u.ll = val;
1660
 
    /* NaN are not treated the same way IEEE 754 does */
1661
 
    if (unlikely(float64_is_any_nan(u.d))) {
1662
 
        return 0;
1663
 
    }
1664
 
    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1665
 
    u.d = float64_mul(u.d, tmp, &env->vec_status);
1666
 
 
1667
 
    return float64_to_uint32(u.d, &env->vec_status);
1668
 
}
1669
 
 
1670
 
uint32_t helper_efscfd(CPUPPCState *env, uint64_t val)
1671
 
{
1672
 
    CPU_DoubleU u1;
1673
 
    CPU_FloatU u2;
1674
 
 
1675
 
    u1.ll = val;
1676
 
    u2.f = float64_to_float32(u1.d, &env->vec_status);
1677
 
 
1678
 
    return u2.l;
1679
 
}
1680
 
 
1681
 
uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val)
1682
 
{
1683
 
    CPU_DoubleU u2;
1684
 
    CPU_FloatU u1;
1685
 
 
1686
 
    u1.l = val;
1687
 
    u2.d = float32_to_float64(u1.f, &env->vec_status);
1688
 
 
1689
 
    return u2.ll;
1690
 
}
1691
 
 
1692
 
/* Double precision fixed-point arithmetic */
1693
 
uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2)
1694
 
{
1695
 
    CPU_DoubleU u1, u2;
1696
 
 
1697
 
    u1.ll = op1;
1698
 
    u2.ll = op2;
1699
 
    u1.d = float64_add(u1.d, u2.d, &env->vec_status);
1700
 
    return u1.ll;
1701
 
}
1702
 
 
1703
 
uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2)
1704
 
{
1705
 
    CPU_DoubleU u1, u2;
1706
 
 
1707
 
    u1.ll = op1;
1708
 
    u2.ll = op2;
1709
 
    u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
1710
 
    return u1.ll;
1711
 
}
1712
 
 
1713
 
uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2)
1714
 
{
1715
 
    CPU_DoubleU u1, u2;
1716
 
 
1717
 
    u1.ll = op1;
1718
 
    u2.ll = op2;
1719
 
    u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
1720
 
    return u1.ll;
1721
 
}
1722
 
 
1723
 
uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2)
1724
 
{
1725
 
    CPU_DoubleU u1, u2;
1726
 
 
1727
 
    u1.ll = op1;
1728
 
    u2.ll = op2;
1729
 
    u1.d = float64_div(u1.d, u2.d, &env->vec_status);
1730
 
    return u1.ll;
1731
 
}
1732
 
 
1733
 
/* Double precision floating point helpers */
1734
 
uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1735
 
{
1736
 
    CPU_DoubleU u1, u2;
1737
 
 
1738
 
    u1.ll = op1;
1739
 
    u2.ll = op2;
1740
 
    return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1741
 
}
1742
 
 
1743
 
uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1744
 
{
1745
 
    CPU_DoubleU u1, u2;
1746
 
 
1747
 
    u1.ll = op1;
1748
 
    u2.ll = op2;
1749
 
    return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
1750
 
}
1751
 
 
1752
 
uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1753
 
{
1754
 
    CPU_DoubleU u1, u2;
1755
 
 
1756
 
    u1.ll = op1;
1757
 
    u2.ll = op2;
1758
 
    return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1759
 
}
1760
 
 
1761
 
uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1762
 
{
1763
 
    /* XXX: TODO: test special values (NaN, infinites, ...) */
1764
 
    return helper_efdtstlt(env, op1, op2);
1765
 
}
1766
 
 
1767
 
uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1768
 
{
1769
 
    /* XXX: TODO: test special values (NaN, infinites, ...) */
1770
 
    return helper_efdtstgt(env, op1, op2);
1771
 
}
1772
 
 
1773
 
uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1774
 
{
1775
 
    /* XXX: TODO: test special values (NaN, infinites, ...) */
1776
 
    return helper_efdtsteq(env, op1, op2);
1777
 
}
1778
 
 
1779
 
#define DECODE_SPLIT(opcode, shift1, nb1, shift2, nb2) \
1780
 
    (((((opcode) >> (shift1)) & ((1 << (nb1)) - 1)) << nb2) |    \
1781
 
     (((opcode) >> (shift2)) & ((1 << (nb2)) - 1)))
1782
 
 
1783
 
#define xT(opcode) DECODE_SPLIT(opcode, 0, 1, 21, 5)
1784
 
#define xA(opcode) DECODE_SPLIT(opcode, 2, 1, 16, 5)
1785
 
#define xB(opcode) DECODE_SPLIT(opcode, 1, 1, 11, 5)
1786
 
#define xC(opcode) DECODE_SPLIT(opcode, 3, 1,  6, 5)
1787
 
#define BF(opcode) (((opcode) >> (31-8)) & 7)
1788
 
 
1789
 
typedef union _ppc_vsr_t {
1790
 
    uint64_t u64[2];
1791
 
    uint32_t u32[4];
1792
 
    float32 f32[4];
1793
 
    float64 f64[2];
1794
 
} ppc_vsr_t;
1795
 
 
1796
 
#if defined(HOST_WORDS_BIGENDIAN)
1797
 
#define VsrW(i) u32[i]
1798
 
#define VsrD(i) u64[i]
1799
 
#else
1800
 
#define VsrW(i) u32[3-(i)]
1801
 
#define VsrD(i) u64[1-(i)]
1802
 
#endif
1803
 
 
1804
 
static void getVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
1805
 
{
1806
 
    if (n < 32) {
1807
 
        vsr->VsrD(0) = env->fpr[n];
1808
 
        vsr->VsrD(1) = env->vsr[n];
1809
 
    } else {
1810
 
        vsr->u64[0] = env->avr[n-32].u64[0];
1811
 
        vsr->u64[1] = env->avr[n-32].u64[1];
1812
 
    }
1813
 
}
1814
 
 
1815
 
static void putVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
1816
 
{
1817
 
    if (n < 32) {
1818
 
        env->fpr[n] = vsr->VsrD(0);
1819
 
        env->vsr[n] = vsr->VsrD(1);
1820
 
    } else {
1821
 
        env->avr[n-32].u64[0] = vsr->u64[0];
1822
 
        env->avr[n-32].u64[1] = vsr->u64[1];
1823
 
    }
1824
 
}
1825
 
 
1826
 
#define float64_to_float64(x, env) x
1827
 
 
1828
 
 
1829
 
/* VSX_ADD_SUB - VSX floating point add/subract
1830
 
 *   name  - instruction mnemonic
1831
 
 *   op    - operation (add or sub)
1832
 
 *   nels  - number of elements (1, 2 or 4)
1833
 
 *   tp    - type (float32 or float64)
1834
 
 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1835
 
 *   sfprf - set FPRF
1836
 
 */
1837
 
#define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp)                    \
1838
 
void helper_##name(CPUPPCState *env, uint32_t opcode)                        \
1839
 
{                                                                            \
1840
 
    ppc_vsr_t xt, xa, xb;                                                    \
1841
 
    int i;                                                                   \
1842
 
                                                                             \
1843
 
    getVSR(xA(opcode), &xa, env);                                            \
1844
 
    getVSR(xB(opcode), &xb, env);                                            \
1845
 
    getVSR(xT(opcode), &xt, env);                                            \
1846
 
    helper_reset_fpstatus(env);                                              \
1847
 
                                                                             \
1848
 
    for (i = 0; i < nels; i++) {                                             \
1849
 
        float_status tstat = env->fp_status;                                 \
1850
 
        set_float_exception_flags(0, &tstat);                                \
1851
 
        xt.fld = tp##_##op(xa.fld, xb.fld, &tstat);                          \
1852
 
        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1853
 
                                                                             \
1854
 
        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1855
 
            if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) {      \
1856
 
                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf);    \
1857
 
            } else if (tp##_is_signaling_nan(xa.fld, &tstat) ||              \
1858
 
                       tp##_is_signaling_nan(xb.fld, &tstat)) {              \
1859
 
                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
1860
 
            }                                                                \
1861
 
        }                                                                    \
1862
 
                                                                             \
1863
 
        if (r2sp) {                                                          \
1864
 
            xt.fld = helper_frsp(env, xt.fld);                               \
1865
 
        }                                                                    \
1866
 
                                                                             \
1867
 
        if (sfprf) {                                                         \
1868
 
            helper_compute_fprf(env, xt.fld);                                \
1869
 
        }                                                                    \
1870
 
    }                                                                        \
1871
 
    putVSR(xT(opcode), &xt, env);                                            \
1872
 
    float_check_status(env);                                                 \
1873
 
}
1874
 
 
1875
 
VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
1876
 
VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1)
1877
 
VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0)
1878
 
VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0)
1879
 
VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0)
1880
 
VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
1881
 
VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
1882
 
VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
1883
 
 
1884
 
/* VSX_MUL - VSX floating point multiply
1885
 
 *   op    - instruction mnemonic
1886
 
 *   nels  - number of elements (1, 2 or 4)
1887
 
 *   tp    - type (float32 or float64)
1888
 
 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1889
 
 *   sfprf - set FPRF
1890
 
 */
1891
 
#define VSX_MUL(op, nels, tp, fld, sfprf, r2sp)                              \
1892
 
void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
1893
 
{                                                                            \
1894
 
    ppc_vsr_t xt, xa, xb;                                                    \
1895
 
    int i;                                                                   \
1896
 
                                                                             \
1897
 
    getVSR(xA(opcode), &xa, env);                                            \
1898
 
    getVSR(xB(opcode), &xb, env);                                            \
1899
 
    getVSR(xT(opcode), &xt, env);                                            \
1900
 
    helper_reset_fpstatus(env);                                              \
1901
 
                                                                             \
1902
 
    for (i = 0; i < nels; i++) {                                             \
1903
 
        float_status tstat = env->fp_status;                                 \
1904
 
        set_float_exception_flags(0, &tstat);                                \
1905
 
        xt.fld = tp##_mul(xa.fld, xb.fld, &tstat);                           \
1906
 
        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1907
 
                                                                             \
1908
 
        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1909
 
            if ((tp##_is_infinity(xa.fld) && tp##_is_zero(xb.fld)) ||        \
1910
 
                (tp##_is_infinity(xb.fld) && tp##_is_zero(xa.fld))) {        \
1911
 
                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, sfprf);    \
1912
 
            } else if (tp##_is_signaling_nan(xa.fld, &tstat) ||              \
1913
 
                       tp##_is_signaling_nan(xb.fld, &tstat)) {              \
1914
 
                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
1915
 
            }                                                                \
1916
 
        }                                                                    \
1917
 
                                                                             \
1918
 
        if (r2sp) {                                                          \
1919
 
            xt.fld = helper_frsp(env, xt.fld);                               \
1920
 
        }                                                                    \
1921
 
                                                                             \
1922
 
        if (sfprf) {                                                         \
1923
 
            helper_compute_fprf(env, xt.fld);                                \
1924
 
        }                                                                    \
1925
 
    }                                                                        \
1926
 
                                                                             \
1927
 
    putVSR(xT(opcode), &xt, env);                                            \
1928
 
    float_check_status(env);                                                 \
1929
 
}
1930
 
 
1931
 
VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
1932
 
VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
1933
 
VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
1934
 
VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
1935
 
 
1936
 
/* VSX_DIV - VSX floating point divide
1937
 
 *   op    - instruction mnemonic
1938
 
 *   nels  - number of elements (1, 2 or 4)
1939
 
 *   tp    - type (float32 or float64)
1940
 
 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1941
 
 *   sfprf - set FPRF
1942
 
 */
1943
 
#define VSX_DIV(op, nels, tp, fld, sfprf, r2sp)                               \
1944
 
void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
1945
 
{                                                                             \
1946
 
    ppc_vsr_t xt, xa, xb;                                                     \
1947
 
    int i;                                                                    \
1948
 
                                                                              \
1949
 
    getVSR(xA(opcode), &xa, env);                                             \
1950
 
    getVSR(xB(opcode), &xb, env);                                             \
1951
 
    getVSR(xT(opcode), &xt, env);                                             \
1952
 
    helper_reset_fpstatus(env);                                               \
1953
 
                                                                              \
1954
 
    for (i = 0; i < nels; i++) {                                              \
1955
 
        float_status tstat = env->fp_status;                                  \
1956
 
        set_float_exception_flags(0, &tstat);                                 \
1957
 
        xt.fld = tp##_div(xa.fld, xb.fld, &tstat);                            \
1958
 
        env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
1959
 
                                                                              \
1960
 
        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
1961
 
            if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) {       \
1962
 
                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, sfprf);     \
1963
 
            } else if (tp##_is_zero(xa.fld) &&                                \
1964
 
                tp##_is_zero(xb.fld)) {                                       \
1965
 
                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, sfprf);     \
1966
 
            } else if (tp##_is_signaling_nan(xa.fld, &tstat) ||               \
1967
 
                tp##_is_signaling_nan(xb.fld, &tstat)) {                      \
1968
 
                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);    \
1969
 
            }                                                                 \
1970
 
        }                                                                     \
1971
 
                                                                              \
1972
 
        if (r2sp) {                                                           \
1973
 
            xt.fld = helper_frsp(env, xt.fld);                                \
1974
 
        }                                                                     \
1975
 
                                                                              \
1976
 
        if (sfprf) {                                                          \
1977
 
            helper_compute_fprf(env, xt.fld);                                 \
1978
 
        }                                                                     \
1979
 
    }                                                                         \
1980
 
                                                                              \
1981
 
    putVSR(xT(opcode), &xt, env);                                             \
1982
 
    float_check_status(env);                                                  \
1983
 
}
1984
 
 
1985
 
VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
1986
 
VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
1987
 
VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
1988
 
VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
1989
 
 
1990
 
/* VSX_RE  - VSX floating point reciprocal estimate
1991
 
 *   op    - instruction mnemonic
1992
 
 *   nels  - number of elements (1, 2 or 4)
1993
 
 *   tp    - type (float32 or float64)
1994
 
 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1995
 
 *   sfprf - set FPRF
1996
 
 */
1997
 
#define VSX_RE(op, nels, tp, fld, sfprf, r2sp)                                \
1998
 
void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
1999
 
{                                                                             \
2000
 
    ppc_vsr_t xt, xb;                                                         \
2001
 
    int i;                                                                    \
2002
 
                                                                              \
2003
 
    getVSR(xB(opcode), &xb, env);                                             \
2004
 
    getVSR(xT(opcode), &xt, env);                                             \
2005
 
    helper_reset_fpstatus(env);                                               \
2006
 
                                                                              \
2007
 
    for (i = 0; i < nels; i++) {                                              \
2008
 
        if (unlikely(tp##_is_signaling_nan(xb.fld, &env->fp_status))) {       \
2009
 
                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);    \
2010
 
        }                                                                     \
2011
 
        xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status);                 \
2012
 
                                                                              \
2013
 
        if (r2sp) {                                                           \
2014
 
            xt.fld = helper_frsp(env, xt.fld);                                \
2015
 
        }                                                                     \
2016
 
                                                                              \
2017
 
        if (sfprf) {                                                          \
2018
 
            helper_compute_fprf(env, xt.fld);                                 \
2019
 
        }                                                                     \
2020
 
    }                                                                         \
2021
 
                                                                              \
2022
 
    putVSR(xT(opcode), &xt, env);                                             \
2023
 
    float_check_status(env);                                                  \
2024
 
}
2025
 
 
2026
 
VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
2027
 
VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
2028
 
VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
2029
 
VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
2030
 
 
2031
 
/* VSX_SQRT - VSX floating point square root
2032
 
 *   op    - instruction mnemonic
2033
 
 *   nels  - number of elements (1, 2 or 4)
2034
 
 *   tp    - type (float32 or float64)
2035
 
 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2036
 
 *   sfprf - set FPRF
2037
 
 */
2038
 
#define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp)                             \
2039
 
void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
2040
 
{                                                                            \
2041
 
    ppc_vsr_t xt, xb;                                                        \
2042
 
    int i;                                                                   \
2043
 
                                                                             \
2044
 
    getVSR(xB(opcode), &xb, env);                                            \
2045
 
    getVSR(xT(opcode), &xt, env);                                            \
2046
 
    helper_reset_fpstatus(env);                                              \
2047
 
                                                                             \
2048
 
    for (i = 0; i < nels; i++) {                                             \
2049
 
        float_status tstat = env->fp_status;                                 \
2050
 
        set_float_exception_flags(0, &tstat);                                \
2051
 
        xt.fld = tp##_sqrt(xb.fld, &tstat);                                  \
2052
 
        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2053
 
                                                                             \
2054
 
        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
2055
 
            if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) {              \
2056
 
                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf);   \
2057
 
            } else if (tp##_is_signaling_nan(xb.fld, &tstat)) {              \
2058
 
                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
2059
 
            }                                                                \
2060
 
        }                                                                    \
2061
 
                                                                             \
2062
 
        if (r2sp) {                                                          \
2063
 
            xt.fld = helper_frsp(env, xt.fld);                               \
2064
 
        }                                                                    \
2065
 
                                                                             \
2066
 
        if (sfprf) {                                                         \
2067
 
            helper_compute_fprf(env, xt.fld);                                \
2068
 
        }                                                                    \
2069
 
    }                                                                        \
2070
 
                                                                             \
2071
 
    putVSR(xT(opcode), &xt, env);                                            \
2072
 
    float_check_status(env);                                                 \
2073
 
}
2074
 
 
2075
 
VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
2076
 
VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
2077
 
VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
2078
 
VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
2079
 
 
2080
 
/* VSX_RSQRTE - VSX floating point reciprocal square root estimate
2081
 
 *   op    - instruction mnemonic
2082
 
 *   nels  - number of elements (1, 2 or 4)
2083
 
 *   tp    - type (float32 or float64)
2084
 
 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2085
 
 *   sfprf - set FPRF
2086
 
 */
2087
 
#define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp)                           \
2088
 
void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
2089
 
{                                                                            \
2090
 
    ppc_vsr_t xt, xb;                                                        \
2091
 
    int i;                                                                   \
2092
 
                                                                             \
2093
 
    getVSR(xB(opcode), &xb, env);                                            \
2094
 
    getVSR(xT(opcode), &xt, env);                                            \
2095
 
    helper_reset_fpstatus(env);                                              \
2096
 
                                                                             \
2097
 
    for (i = 0; i < nels; i++) {                                             \
2098
 
        float_status tstat = env->fp_status;                                 \
2099
 
        set_float_exception_flags(0, &tstat);                                \
2100
 
        xt.fld = tp##_sqrt(xb.fld, &tstat);                                  \
2101
 
        xt.fld = tp##_div(tp##_one, xt.fld, &tstat);                         \
2102
 
        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2103
 
                                                                             \
2104
 
        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
2105
 
            if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) {              \
2106
 
                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf);   \
2107
 
            } else if (tp##_is_signaling_nan(xb.fld, &tstat)) {              \
2108
 
                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
2109
 
            }                                                                \
2110
 
        }                                                                    \
2111
 
                                                                             \
2112
 
        if (r2sp) {                                                          \
2113
 
            xt.fld = helper_frsp(env, xt.fld);                               \
2114
 
        }                                                                    \
2115
 
                                                                             \
2116
 
        if (sfprf) {                                                         \
2117
 
            helper_compute_fprf(env, xt.fld);                                \
2118
 
        }                                                                    \
2119
 
    }                                                                        \
2120
 
                                                                             \
2121
 
    putVSR(xT(opcode), &xt, env);                                            \
2122
 
    float_check_status(env);                                                 \
2123
 
}
2124
 
 
2125
 
VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
2126
 
VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
2127
 
VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
2128
 
VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
2129
 
 
2130
 
/* VSX_TDIV - VSX floating point test for divide
2131
 
 *   op    - instruction mnemonic
2132
 
 *   nels  - number of elements (1, 2 or 4)
2133
 
 *   tp    - type (float32 or float64)
2134
 
 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2135
 
 *   emin  - minimum unbiased exponent
2136
 
 *   emax  - maximum unbiased exponent
2137
 
 *   nbits - number of fraction bits
2138
 
 */
2139
 
#define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits)                  \
2140
 
void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2141
 
{                                                                       \
2142
 
    ppc_vsr_t xa, xb;                                                   \
2143
 
    int i;                                                              \
2144
 
    int fe_flag = 0;                                                    \
2145
 
    int fg_flag = 0;                                                    \
2146
 
                                                                        \
2147
 
    getVSR(xA(opcode), &xa, env);                                       \
2148
 
    getVSR(xB(opcode), &xb, env);                                       \
2149
 
                                                                        \
2150
 
    for (i = 0; i < nels; i++) {                                        \
2151
 
        if (unlikely(tp##_is_infinity(xa.fld) ||                        \
2152
 
                     tp##_is_infinity(xb.fld) ||                        \
2153
 
                     tp##_is_zero(xb.fld))) {                           \
2154
 
            fe_flag = 1;                                                \
2155
 
            fg_flag = 1;                                                \
2156
 
        } else {                                                        \
2157
 
            int e_a = ppc_##tp##_get_unbiased_exp(xa.fld);              \
2158
 
            int e_b = ppc_##tp##_get_unbiased_exp(xb.fld);              \
2159
 
                                                                        \
2160
 
            if (unlikely(tp##_is_any_nan(xa.fld) ||                     \
2161
 
                         tp##_is_any_nan(xb.fld))) {                    \
2162
 
                fe_flag = 1;                                            \
2163
 
            } else if ((e_b <= emin) || (e_b >= (emax-2))) {            \
2164
 
                fe_flag = 1;                                            \
2165
 
            } else if (!tp##_is_zero(xa.fld) &&                         \
2166
 
                       (((e_a - e_b) >= emax) ||                        \
2167
 
                        ((e_a - e_b) <= (emin+1)) ||                    \
2168
 
                         (e_a <= (emin+nbits)))) {                      \
2169
 
                fe_flag = 1;                                            \
2170
 
            }                                                           \
2171
 
                                                                        \
2172
 
            if (unlikely(tp##_is_zero_or_denormal(xb.fld))) {           \
2173
 
                /* XB is not zero because of the above check and */     \
2174
 
                /* so must be denormalized.                      */     \
2175
 
                fg_flag = 1;                                            \
2176
 
            }                                                           \
2177
 
        }                                                               \
2178
 
    }                                                                   \
2179
 
                                                                        \
2180
 
    env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2181
 
}
2182
 
 
2183
 
VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
2184
 
VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
2185
 
VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
2186
 
 
2187
 
/* VSX_TSQRT - VSX floating point test for square root
2188
 
 *   op    - instruction mnemonic
2189
 
 *   nels  - number of elements (1, 2 or 4)
2190
 
 *   tp    - type (float32 or float64)
2191
 
 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2192
 
 *   emin  - minimum unbiased exponent
2193
 
 *   emax  - maximum unbiased exponent
2194
 
 *   nbits - number of fraction bits
2195
 
 */
2196
 
#define VSX_TSQRT(op, nels, tp, fld, emin, nbits)                       \
2197
 
void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2198
 
{                                                                       \
2199
 
    ppc_vsr_t xa, xb;                                                   \
2200
 
    int i;                                                              \
2201
 
    int fe_flag = 0;                                                    \
2202
 
    int fg_flag = 0;                                                    \
2203
 
                                                                        \
2204
 
    getVSR(xA(opcode), &xa, env);                                       \
2205
 
    getVSR(xB(opcode), &xb, env);                                       \
2206
 
                                                                        \
2207
 
    for (i = 0; i < nels; i++) {                                        \
2208
 
        if (unlikely(tp##_is_infinity(xb.fld) ||                        \
2209
 
                     tp##_is_zero(xb.fld))) {                           \
2210
 
            fe_flag = 1;                                                \
2211
 
            fg_flag = 1;                                                \
2212
 
        } else {                                                        \
2213
 
            int e_b = ppc_##tp##_get_unbiased_exp(xb.fld);              \
2214
 
                                                                        \
2215
 
            if (unlikely(tp##_is_any_nan(xb.fld))) {                    \
2216
 
                fe_flag = 1;                                            \
2217
 
            } else if (unlikely(tp##_is_zero(xb.fld))) {                \
2218
 
                fe_flag = 1;                                            \
2219
 
            } else if (unlikely(tp##_is_neg(xb.fld))) {                 \
2220
 
                fe_flag = 1;                                            \
2221
 
            } else if (!tp##_is_zero(xb.fld) &&                         \
2222
 
                      (e_b <= (emin+nbits))) {                          \
2223
 
                fe_flag = 1;                                            \
2224
 
            }                                                           \
2225
 
                                                                        \
2226
 
            if (unlikely(tp##_is_zero_or_denormal(xb.fld))) {           \
2227
 
                /* XB is not zero because of the above check and */     \
2228
 
                /* therefore must be denormalized.               */     \
2229
 
                fg_flag = 1;                                            \
2230
 
            }                                                           \
2231
 
        }                                                               \
2232
 
    }                                                                   \
2233
 
                                                                        \
2234
 
    env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2235
 
}
2236
 
 
2237
 
VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
2238
 
VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
2239
 
VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
2240
 
 
2241
 
/* VSX_MADD - VSX floating point muliply/add variations
2242
 
 *   op    - instruction mnemonic
2243
 
 *   nels  - number of elements (1, 2 or 4)
2244
 
 *   tp    - type (float32 or float64)
2245
 
 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2246
 
 *   maddflgs - flags for the float*muladd routine that control the
2247
 
 *           various forms (madd, msub, nmadd, nmsub)
2248
 
 *   afrm  - A form (1=A, 0=M)
2249
 
 *   sfprf - set FPRF
2250
 
 */
2251
 
#define VSX_MADD(op, nels, tp, fld, maddflgs, afrm, sfprf, r2sp)              \
2252
 
void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
2253
 
{                                                                             \
2254
 
    ppc_vsr_t xt_in, xa, xb, xt_out;                                          \
2255
 
    ppc_vsr_t *b, *c;                                                         \
2256
 
    int i;                                                                    \
2257
 
                                                                              \
2258
 
    if (afrm) { /* AxB + T */                                                 \
2259
 
        b = &xb;                                                              \
2260
 
        c = &xt_in;                                                           \
2261
 
    } else { /* AxT + B */                                                    \
2262
 
        b = &xt_in;                                                           \
2263
 
        c = &xb;                                                              \
2264
 
    }                                                                         \
2265
 
                                                                              \
2266
 
    getVSR(xA(opcode), &xa, env);                                             \
2267
 
    getVSR(xB(opcode), &xb, env);                                             \
2268
 
    getVSR(xT(opcode), &xt_in, env);                                          \
2269
 
                                                                              \
2270
 
    xt_out = xt_in;                                                           \
2271
 
                                                                              \
2272
 
    helper_reset_fpstatus(env);                                               \
2273
 
                                                                              \
2274
 
    for (i = 0; i < nels; i++) {                                              \
2275
 
        float_status tstat = env->fp_status;                                  \
2276
 
        set_float_exception_flags(0, &tstat);                                 \
2277
 
        if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
2278
 
            /* Avoid double rounding errors by rounding the intermediate */   \
2279
 
            /* result to odd.                                            */   \
2280
 
            set_float_rounding_mode(float_round_to_zero, &tstat);             \
2281
 
            xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld,                  \
2282
 
                                       maddflgs, &tstat);                     \
2283
 
            xt_out.fld |= (get_float_exception_flags(&tstat) &                \
2284
 
                              float_flag_inexact) != 0;                       \
2285
 
        } else {                                                              \
2286
 
            xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld,                  \
2287
 
                                        maddflgs, &tstat);                    \
2288
 
        }                                                                     \
2289
 
        env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
2290
 
                                                                              \
2291
 
        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
2292
 
            if (tp##_is_signaling_nan(xa.fld, &tstat) ||                      \
2293
 
                tp##_is_signaling_nan(b->fld, &tstat) ||                      \
2294
 
                tp##_is_signaling_nan(c->fld, &tstat)) {                      \
2295
 
                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);    \
2296
 
                tstat.float_exception_flags &= ~float_flag_invalid;           \
2297
 
            }                                                                 \
2298
 
            if ((tp##_is_infinity(xa.fld) && tp##_is_zero(b->fld)) ||         \
2299
 
                (tp##_is_zero(xa.fld) && tp##_is_infinity(b->fld))) {         \
2300
 
                xt_out.fld = float64_to_##tp(float_invalid_op_excp(env,       \
2301
 
                    POWERPC_EXCP_FP_VXIMZ, sfprf), &env->fp_status);          \
2302
 
                tstat.float_exception_flags &= ~float_flag_invalid;           \
2303
 
            }                                                                 \
2304
 
            if ((tstat.float_exception_flags & float_flag_invalid) &&         \
2305
 
                ((tp##_is_infinity(xa.fld) ||                                 \
2306
 
                  tp##_is_infinity(b->fld)) &&                                \
2307
 
                  tp##_is_infinity(c->fld))) {                                \
2308
 
                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf);     \
2309
 
            }                                                                 \
2310
 
        }                                                                     \
2311
 
                                                                              \
2312
 
        if (r2sp) {                                                           \
2313
 
            xt_out.fld = helper_frsp(env, xt_out.fld);                        \
2314
 
        }                                                                     \
2315
 
                                                                              \
2316
 
        if (sfprf) {                                                          \
2317
 
            helper_compute_fprf(env, xt_out.fld);                             \
2318
 
        }                                                                     \
2319
 
    }                                                                         \
2320
 
    putVSR(xT(opcode), &xt_out, env);                                         \
2321
 
    float_check_status(env);                                                  \
2322
 
}
2323
 
 
2324
 
#define MADD_FLGS 0
2325
 
#define MSUB_FLGS float_muladd_negate_c
2326
 
#define NMADD_FLGS float_muladd_negate_result
2327
 
#define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
2328
 
 
2329
 
VSX_MADD(xsmaddadp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 0)
2330
 
VSX_MADD(xsmaddmdp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 0)
2331
 
VSX_MADD(xsmsubadp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 0)
2332
 
VSX_MADD(xsmsubmdp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 0)
2333
 
VSX_MADD(xsnmaddadp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 0)
2334
 
VSX_MADD(xsnmaddmdp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 0)
2335
 
VSX_MADD(xsnmsubadp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 0)
2336
 
VSX_MADD(xsnmsubmdp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 0)
2337
 
 
2338
 
VSX_MADD(xsmaddasp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 1)
2339
 
VSX_MADD(xsmaddmsp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 1)
2340
 
VSX_MADD(xsmsubasp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 1)
2341
 
VSX_MADD(xsmsubmsp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 1)
2342
 
VSX_MADD(xsnmaddasp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 1)
2343
 
VSX_MADD(xsnmaddmsp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 1)
2344
 
VSX_MADD(xsnmsubasp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 1)
2345
 
VSX_MADD(xsnmsubmsp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 1)
2346
 
 
2347
 
VSX_MADD(xvmaddadp, 2, float64, VsrD(i), MADD_FLGS, 1, 0, 0)
2348
 
VSX_MADD(xvmaddmdp, 2, float64, VsrD(i), MADD_FLGS, 0, 0, 0)
2349
 
VSX_MADD(xvmsubadp, 2, float64, VsrD(i), MSUB_FLGS, 1, 0, 0)
2350
 
VSX_MADD(xvmsubmdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0, 0)
2351
 
VSX_MADD(xvnmaddadp, 2, float64, VsrD(i), NMADD_FLGS, 1, 0, 0)
2352
 
VSX_MADD(xvnmaddmdp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0, 0)
2353
 
VSX_MADD(xvnmsubadp, 2, float64, VsrD(i), NMSUB_FLGS, 1, 0, 0)
2354
 
VSX_MADD(xvnmsubmdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0, 0)
2355
 
 
2356
 
VSX_MADD(xvmaddasp, 4, float32, VsrW(i), MADD_FLGS, 1, 0, 0)
2357
 
VSX_MADD(xvmaddmsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0, 0)
2358
 
VSX_MADD(xvmsubasp, 4, float32, VsrW(i), MSUB_FLGS, 1, 0, 0)
2359
 
VSX_MADD(xvmsubmsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0, 0)
2360
 
VSX_MADD(xvnmaddasp, 4, float32, VsrW(i), NMADD_FLGS, 1, 0, 0)
2361
 
VSX_MADD(xvnmaddmsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0, 0)
2362
 
VSX_MADD(xvnmsubasp, 4, float32, VsrW(i), NMSUB_FLGS, 1, 0, 0)
2363
 
VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0)
2364
 
 
2365
 
/* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
2366
 
 *   op    - instruction mnemonic
2367
 
 *   cmp   - comparison operation
2368
 
 *   exp   - expected result of comparison
2369
 
 *   svxvc - set VXVC bit
2370
 
 */
2371
 
#define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc)                                \
2372
 
void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
2373
 
{                                                                             \
2374
 
    ppc_vsr_t xt, xa, xb;                                                     \
2375
 
    bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false;            \
2376
 
                                                                              \
2377
 
    getVSR(xA(opcode), &xa, env);                                             \
2378
 
    getVSR(xB(opcode), &xb, env);                                             \
2379
 
    getVSR(xT(opcode), &xt, env);                                             \
2380
 
                                                                              \
2381
 
    if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) ||              \
2382
 
        float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {              \
2383
 
        vxsnan_flag = true;                                                   \
2384
 
        if (fpscr_ve == 0 && svxvc) {                                         \
2385
 
            vxvc_flag = true;                                                 \
2386
 
        }                                                                     \
2387
 
    } else if (svxvc) {                                                       \
2388
 
        vxvc_flag = float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) ||      \
2389
 
            float64_is_quiet_nan(xb.VsrD(0), &env->fp_status);                \
2390
 
    }                                                                         \
2391
 
    if (vxsnan_flag) {                                                        \
2392
 
        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);                \
2393
 
    }                                                                         \
2394
 
    if (vxvc_flag) {                                                          \
2395
 
        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);                  \
2396
 
    }                                                                         \
2397
 
    vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag);                        \
2398
 
                                                                              \
2399
 
    if (!vex_flag) {                                                          \
2400
 
        if (float64_##cmp(xb.VsrD(0), xa.VsrD(0), &env->fp_status) == exp) {  \
2401
 
            xt.VsrD(0) = -1;                                                  \
2402
 
            xt.VsrD(1) = 0;                                                   \
2403
 
        } else {                                                              \
2404
 
            xt.VsrD(0) = 0;                                                   \
2405
 
            xt.VsrD(1) = 0;                                                   \
2406
 
        }                                                                     \
2407
 
    }                                                                         \
2408
 
    putVSR(xT(opcode), &xt, env);                                             \
2409
 
    helper_float_check_status(env);                                           \
2410
 
}
2411
 
 
2412
 
VSX_SCALAR_CMP_DP(xscmpeqdp, eq, 1, 0)
2413
 
VSX_SCALAR_CMP_DP(xscmpgedp, le, 1, 1)
2414
 
VSX_SCALAR_CMP_DP(xscmpgtdp, lt, 1, 1)
2415
 
VSX_SCALAR_CMP_DP(xscmpnedp, eq, 0, 0)
2416
 
 
2417
 
#define VSX_SCALAR_CMP(op, ordered)                                      \
2418
 
void helper_##op(CPUPPCState *env, uint32_t opcode)                      \
2419
 
{                                                                        \
2420
 
    ppc_vsr_t xa, xb;                                                    \
2421
 
    uint32_t cc = 0;                                                     \
2422
 
                                                                         \
2423
 
    getVSR(xA(opcode), &xa, env);                                        \
2424
 
    getVSR(xB(opcode), &xb, env);                                        \
2425
 
                                                                         \
2426
 
    if (unlikely(float64_is_any_nan(xa.VsrD(0)) ||                       \
2427
 
                 float64_is_any_nan(xb.VsrD(0)))) {                      \
2428
 
        if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) ||     \
2429
 
            float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {     \
2430
 
            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);       \
2431
 
        }                                                                \
2432
 
        if (ordered) {                                                   \
2433
 
            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);         \
2434
 
        }                                                                \
2435
 
        cc = 1;                                                          \
2436
 
    } else {                                                             \
2437
 
        if (float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) {       \
2438
 
            cc = 8;                                                      \
2439
 
        } else if (!float64_le(xa.VsrD(0), xb.VsrD(0),                   \
2440
 
                               &env->fp_status)) { \
2441
 
            cc = 4;                                                      \
2442
 
        } else {                                                         \
2443
 
            cc = 2;                                                      \
2444
 
        }                                                                \
2445
 
    }                                                                    \
2446
 
                                                                         \
2447
 
    env->fpscr &= ~(0x0F << FPSCR_FPRF);                                 \
2448
 
    env->fpscr |= cc << FPSCR_FPRF;                                      \
2449
 
    env->crf[BF(opcode)] = cc;                                           \
2450
 
                                                                         \
2451
 
    float_check_status(env);                                             \
2452
 
}
2453
 
 
2454
 
VSX_SCALAR_CMP(xscmpodp, 1)
2455
 
VSX_SCALAR_CMP(xscmpudp, 0)
2456
 
 
2457
 
/* VSX_MAX_MIN - VSX floating point maximum/minimum
2458
 
 *   name  - instruction mnemonic
2459
 
 *   op    - operation (max or min)
2460
 
 *   nels  - number of elements (1, 2 or 4)
2461
 
 *   tp    - type (float32 or float64)
2462
 
 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2463
 
 */
2464
 
#define VSX_MAX_MIN(name, op, nels, tp, fld)                                  \
2465
 
void helper_##name(CPUPPCState *env, uint32_t opcode)                         \
2466
 
{                                                                             \
2467
 
    ppc_vsr_t xt, xa, xb;                                                     \
2468
 
    int i;                                                                    \
2469
 
                                                                              \
2470
 
    getVSR(xA(opcode), &xa, env);                                             \
2471
 
    getVSR(xB(opcode), &xb, env);                                             \
2472
 
    getVSR(xT(opcode), &xt, env);                                             \
2473
 
                                                                              \
2474
 
    for (i = 0; i < nels; i++) {                                              \
2475
 
        xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status);                  \
2476
 
        if (unlikely(tp##_is_signaling_nan(xa.fld, &env->fp_status) ||        \
2477
 
                     tp##_is_signaling_nan(xb.fld, &env->fp_status))) {       \
2478
 
            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);            \
2479
 
        }                                                                     \
2480
 
    }                                                                         \
2481
 
                                                                              \
2482
 
    putVSR(xT(opcode), &xt, env);                                             \
2483
 
    float_check_status(env);                                                  \
2484
 
}
2485
 
 
2486
 
VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
2487
 
VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i))
2488
 
VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i))
2489
 
VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
2490
 
VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
2491
 
VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
2492
 
 
2493
 
/* VSX_CMP - VSX floating point compare
2494
 
 *   op    - instruction mnemonic
2495
 
 *   nels  - number of elements (1, 2 or 4)
2496
 
 *   tp    - type (float32 or float64)
2497
 
 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2498
 
 *   cmp   - comparison operation
2499
 
 *   svxvc - set VXVC bit
2500
 
 *   exp   - expected result of comparison
2501
 
 */
2502
 
#define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp)                       \
2503
 
void helper_##op(CPUPPCState *env, uint32_t opcode)                       \
2504
 
{                                                                         \
2505
 
    ppc_vsr_t xt, xa, xb;                                                 \
2506
 
    int i;                                                                \
2507
 
    int all_true = 1;                                                     \
2508
 
    int all_false = 1;                                                    \
2509
 
                                                                          \
2510
 
    getVSR(xA(opcode), &xa, env);                                         \
2511
 
    getVSR(xB(opcode), &xb, env);                                         \
2512
 
    getVSR(xT(opcode), &xt, env);                                         \
2513
 
                                                                          \
2514
 
    for (i = 0; i < nels; i++) {                                          \
2515
 
        if (unlikely(tp##_is_any_nan(xa.fld) ||                           \
2516
 
                     tp##_is_any_nan(xb.fld))) {                          \
2517
 
            if (tp##_is_signaling_nan(xa.fld, &env->fp_status) ||         \
2518
 
                tp##_is_signaling_nan(xb.fld, &env->fp_status)) {         \
2519
 
                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);    \
2520
 
            }                                                             \
2521
 
            if (svxvc) {                                                  \
2522
 
                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);      \
2523
 
            }                                                             \
2524
 
            xt.fld = 0;                                                   \
2525
 
            all_true = 0;                                                 \
2526
 
        } else {                                                          \
2527
 
            if (tp##_##cmp(xb.fld, xa.fld, &env->fp_status) == exp) {     \
2528
 
                xt.fld = -1;                                              \
2529
 
                all_false = 0;                                            \
2530
 
            } else {                                                      \
2531
 
                xt.fld = 0;                                               \
2532
 
                all_true = 0;                                             \
2533
 
            }                                                             \
2534
 
        }                                                                 \
2535
 
    }                                                                     \
2536
 
                                                                          \
2537
 
    putVSR(xT(opcode), &xt, env);                                         \
2538
 
    if ((opcode >> (31-21)) & 1) {                                        \
2539
 
        env->crf[6] = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0);       \
2540
 
    }                                                                     \
2541
 
    float_check_status(env);                                              \
2542
 
 }
2543
 
 
2544
 
VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1)
2545
 
VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1)
2546
 
VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1, 1)
2547
 
VSX_CMP(xvcmpnedp, 2, float64, VsrD(i), eq, 0, 0)
2548
 
VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0, 1)
2549
 
VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1)
2550
 
VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1)
2551
 
VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
2552
 
 
2553
 
/* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2554
 
 *   op    - instruction mnemonic
2555
 
 *   nels  - number of elements (1, 2 or 4)
2556
 
 *   stp   - source type (float32 or float64)
2557
 
 *   ttp   - target type (float32 or float64)
2558
 
 *   sfld  - source vsr_t field
2559
 
 *   tfld  - target vsr_t field (f32 or f64)
2560
 
 *   sfprf - set FPRF
2561
 
 */
2562
 
#define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf)    \
2563
 
void helper_##op(CPUPPCState *env, uint32_t opcode)                \
2564
 
{                                                                  \
2565
 
    ppc_vsr_t xt, xb;                                              \
2566
 
    int i;                                                         \
2567
 
                                                                   \
2568
 
    getVSR(xB(opcode), &xb, env);                                  \
2569
 
    getVSR(xT(opcode), &xt, env);                                  \
2570
 
                                                                   \
2571
 
    for (i = 0; i < nels; i++) {                                   \
2572
 
        xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);        \
2573
 
        if (unlikely(stp##_is_signaling_nan(xb.sfld,               \
2574
 
                                            &env->fp_status))) {   \
2575
 
            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2576
 
            xt.tfld = ttp##_snan_to_qnan(xt.tfld);                 \
2577
 
        }                                                          \
2578
 
        if (sfprf) {                                               \
2579
 
            helper_compute_fprf(env, ttp##_to_float64(xt.tfld,     \
2580
 
                                &env->fp_status));                 \
2581
 
        }                                                          \
2582
 
    }                                                              \
2583
 
                                                                   \
2584
 
    putVSR(xT(opcode), &xt, env);                                  \
2585
 
    float_check_status(env);                                       \
2586
 
}
2587
 
 
2588
 
VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1)
2589
 
VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
2590
 
VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2*i), 0)
2591
 
VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2*i), VsrD(i), 0)
2592
 
 
2593
 
uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
2594
 
{
2595
 
    float_status tstat = env->fp_status;
2596
 
    set_float_exception_flags(0, &tstat);
2597
 
 
2598
 
    return (uint64_t)float64_to_float32(xb, &tstat) << 32;
2599
 
}
2600
 
 
2601
 
uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
2602
 
{
2603
 
    float_status tstat = env->fp_status;
2604
 
    set_float_exception_flags(0, &tstat);
2605
 
 
2606
 
    return float32_to_float64(xb >> 32, &tstat);
2607
 
}
2608
 
 
2609
 
/* VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2610
 
 *   op    - instruction mnemonic
2611
 
 *   nels  - number of elements (1, 2 or 4)
2612
 
 *   stp   - source type (float32 or float64)
2613
 
 *   ttp   - target type (int32, uint32, int64 or uint64)
2614
 
 *   sfld  - source vsr_t field
2615
 
 *   tfld  - target vsr_t field
2616
 
 *   rnan  - resulting NaN
2617
 
 */
2618
 
#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan)              \
2619
 
void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
2620
 
{                                                                            \
2621
 
    ppc_vsr_t xt, xb;                                                        \
2622
 
    int i;                                                                   \
2623
 
                                                                             \
2624
 
    getVSR(xB(opcode), &xb, env);                                            \
2625
 
    getVSR(xT(opcode), &xt, env);                                            \
2626
 
                                                                             \
2627
 
    for (i = 0; i < nels; i++) {                                             \
2628
 
        if (unlikely(stp##_is_any_nan(xb.sfld))) {                           \
2629
 
            if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) {          \
2630
 
                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);       \
2631
 
            }                                                                \
2632
 
            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0);            \
2633
 
            xt.tfld = rnan;                                                  \
2634
 
        } else {                                                             \
2635
 
            xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld,                \
2636
 
                          &env->fp_status);                                  \
2637
 
            if (env->fp_status.float_exception_flags & float_flag_invalid) { \
2638
 
                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0);        \
2639
 
            }                                                                \
2640
 
        }                                                                    \
2641
 
    }                                                                        \
2642
 
                                                                             \
2643
 
    putVSR(xT(opcode), &xt, env);                                            \
2644
 
    float_check_status(env);                                                 \
2645
 
}
2646
 
 
2647
 
VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), \
2648
 
                  0x8000000000000000ULL)
2649
 
VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, VsrD(0), VsrW(1), \
2650
 
                  0x80000000U)
2651
 
VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL)
2652
 
VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U)
2653
 
VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \
2654
 
                  0x8000000000000000ULL)
2655
 
VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2*i), \
2656
 
                  0x80000000U)
2657
 
VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL)
2658
 
VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2*i), 0U)
2659
 
VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2*i), VsrD(i), \
2660
 
                  0x8000000000000000ULL)
2661
 
VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U)
2662
 
VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2*i), VsrD(i), 0ULL)
2663
 
VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
2664
 
 
2665
 
/* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
2666
 
 *   op    - instruction mnemonic
2667
 
 *   nels  - number of elements (1, 2 or 4)
2668
 
 *   stp   - source type (int32, uint32, int64 or uint64)
2669
 
 *   ttp   - target type (float32 or float64)
2670
 
 *   sfld  - source vsr_t field
2671
 
 *   tfld  - target vsr_t field
2672
 
 *   jdef  - definition of the j index (i or 2*i)
2673
 
 *   sfprf - set FPRF
2674
 
 */
2675
 
#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp)  \
2676
 
void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2677
 
{                                                                       \
2678
 
    ppc_vsr_t xt, xb;                                                   \
2679
 
    int i;                                                              \
2680
 
                                                                        \
2681
 
    getVSR(xB(opcode), &xb, env);                                       \
2682
 
    getVSR(xT(opcode), &xt, env);                                       \
2683
 
                                                                        \
2684
 
    for (i = 0; i < nels; i++) {                                        \
2685
 
        xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);             \
2686
 
        if (r2sp) {                                                     \
2687
 
            xt.tfld = helper_frsp(env, xt.tfld);                        \
2688
 
        }                                                               \
2689
 
        if (sfprf) {                                                    \
2690
 
            helper_compute_fprf(env, xt.tfld);                          \
2691
 
        }                                                               \
2692
 
    }                                                                   \
2693
 
                                                                        \
2694
 
    putVSR(xT(opcode), &xt, env);                                       \
2695
 
    float_check_status(env);                                            \
2696
 
}
2697
 
 
2698
 
VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
2699
 
VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
2700
 
VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
2701
 
VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
2702
 
VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
2703
 
VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
2704
 
VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2*i), VsrD(i), 0, 0)
2705
 
VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2*i), VsrD(i), 0, 0)
2706
 
VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2*i), 0, 0)
2707
 
VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2*i), 0, 0)
2708
 
VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
2709
 
VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
2710
 
 
2711
 
/* For "use current rounding mode", define a value that will not be one of
2712
 
 * the existing rounding model enums.
2713
 
 */
2714
 
#define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
2715
 
  float_round_up + float_round_to_zero)
2716
 
 
2717
 
/* VSX_ROUND - VSX floating point round
2718
 
 *   op    - instruction mnemonic
2719
 
 *   nels  - number of elements (1, 2 or 4)
2720
 
 *   tp    - type (float32 or float64)
2721
 
 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2722
 
 *   rmode - rounding mode
2723
 
 *   sfprf - set FPRF
2724
 
 */
2725
 
#define VSX_ROUND(op, nels, tp, fld, rmode, sfprf)                     \
2726
 
void helper_##op(CPUPPCState *env, uint32_t opcode)                    \
2727
 
{                                                                      \
2728
 
    ppc_vsr_t xt, xb;                                                  \
2729
 
    int i;                                                             \
2730
 
    getVSR(xB(opcode), &xb, env);                                      \
2731
 
    getVSR(xT(opcode), &xt, env);                                      \
2732
 
                                                                       \
2733
 
    if (rmode != FLOAT_ROUND_CURRENT) {                                \
2734
 
        set_float_rounding_mode(rmode, &env->fp_status);               \
2735
 
    }                                                                  \
2736
 
                                                                       \
2737
 
    for (i = 0; i < nels; i++) {                                       \
2738
 
        if (unlikely(tp##_is_signaling_nan(xb.fld,                     \
2739
 
                                           &env->fp_status))) {        \
2740
 
            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);     \
2741
 
            xt.fld = tp##_snan_to_qnan(xb.fld);                        \
2742
 
        } else {                                                       \
2743
 
            xt.fld = tp##_round_to_int(xb.fld, &env->fp_status);       \
2744
 
        }                                                              \
2745
 
        if (sfprf) {                                                   \
2746
 
            helper_compute_fprf(env, xt.fld);                          \
2747
 
        }                                                              \
2748
 
    }                                                                  \
2749
 
                                                                       \
2750
 
    /* If this is not a "use current rounding mode" instruction,       \
2751
 
     * then inhibit setting of the XX bit and restore rounding         \
2752
 
     * mode from FPSCR */                                              \
2753
 
    if (rmode != FLOAT_ROUND_CURRENT) {                                \
2754
 
        fpscr_set_rounding_mode(env);                                  \
2755
 
        env->fp_status.float_exception_flags &= ~float_flag_inexact;   \
2756
 
    }                                                                  \
2757
 
                                                                       \
2758
 
    putVSR(xT(opcode), &xt, env);                                      \
2759
 
    float_check_status(env);                                           \
2760
 
}
2761
 
 
2762
 
VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1)
2763
 
VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
2764
 
VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
2765
 
VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
2766
 
VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
2767
 
 
2768
 
VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0)
2769
 
VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
2770
 
VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
2771
 
VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
2772
 
VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
2773
 
 
2774
 
VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_ties_away, 0)
2775
 
VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0)
2776
 
VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0)
2777
 
VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0)
2778
 
VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0)
2779
 
 
2780
 
uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
2781
 
{
2782
 
    helper_reset_fpstatus(env);
2783
 
 
2784
 
    uint64_t xt = helper_frsp(env, xb);
2785
 
 
2786
 
    helper_compute_fprf(env, xt);
2787
 
    float_check_status(env);
2788
 
    return xt;
2789
 
}