2
* The implementations contained in this file are heavily based on the
3
* implementations found in the Berkeley SoftFloat library. As such, they are
4
* licensed under the same 3-clause BSD license:
6
* License for Berkeley SoftFloat Release 3e
11
* The following applies to the whole of SoftFloat Release 3e as well as to
12
* each source file individually.
14
* Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 The Regents of the
15
* University of California. All rights reserved.
17
* Redistribution and use in source and binary forms, with or without
18
* modification, are permitted provided that the following conditions are met:
20
* 1. Redistributions of source code must retain the above copyright notice,
21
* this list of conditions, and the following disclaimer.
23
* 2. Redistributions in binary form must reproduce the above copyright
24
* notice, this list of conditions, and the following disclaimer in the
25
* documentation and/or other materials provided with the distribution.
27
* 3. Neither the name of the University nor the names of its contributors
28
* may be used to endorse or promote products derived from this software
29
* without specific prior written permission.
31
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
32
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
33
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
34
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
35
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
36
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
38
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
40
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44
#extension GL_ARB_gpu_shader_int64 : enable
45
#extension GL_ARB_shader_bit_encoding : enable
46
#extension GL_EXT_shader_integer_mix : enable
47
#extension GL_MESA_shader_integer_functions : enable
51
/* Software IEEE floating-point rounding mode.
52
* GLSL spec section "4.7.1 Range and Precision":
53
* The rounding mode cannot be set and is undefined.
54
* But here, we are able to define the rounding mode at the compilation time.
56
#define FLOAT_ROUND_NEAREST_EVEN 0
57
#define FLOAT_ROUND_TO_ZERO 1
58
#define FLOAT_ROUND_DOWN 2
59
#define FLOAT_ROUND_UP 3
60
#define FLOAT_ROUNDING_MODE FLOAT_ROUND_NEAREST_EVEN
62
/* Relax propagation of NaN. Binary operations with a NaN source will still
63
* produce a NaN result, but it won't follow strict IEEE rules.
65
#define RELAXED_NAN_PROPAGATION
67
/* Absolute value of a Float64 :
71
__fabs64(uint64_t __a)
73
uvec2 a = unpackUint2x32(__a);
75
return packUint2x32(a);
78
/* Returns 1 if the double-precision floating-point value `a' is a NaN;
79
* otherwise returns 0.
82
__is_nan(uint64_t __a)
84
uvec2 a = unpackUint2x32(__a);
85
return (0xFFE00000u <= (a.y<<1)) &&
86
((a.x != 0u) || ((a.y & 0x000FFFFFu) != 0u));
89
/* Negate value of a Float64 :
93
__fneg64(uint64_t __a)
95
uvec2 a = unpackUint2x32(__a);
97
return packUint2x32(a);
101
__fsign64(uint64_t __a)
103
uvec2 a = unpackUint2x32(__a);
106
retval.y = mix((a.y & 0x80000000u) | 0x3FF00000u, 0u, (a.y << 1 | a.x) == 0u);
107
return packUint2x32(retval);
110
/* Returns the fraction bits of the double-precision floating-point value `a'.*/
112
__extractFloat64FracLo(uint64_t a)
114
return unpackUint2x32(a).x;
118
__extractFloat64FracHi(uint64_t a)
120
return unpackUint2x32(a).y & 0x000FFFFFu;
123
/* Returns the exponent bits of the double-precision floating-point value `a'.*/
125
__extractFloat64Exp(uint64_t __a)
127
uvec2 a = unpackUint2x32(__a);
128
return int((a.y>>20) & 0x7FFu);
132
__feq64_nonnan(uint64_t __a, uint64_t __b)
134
uvec2 a = unpackUint2x32(__a);
135
uvec2 b = unpackUint2x32(__b);
136
return (a.x == b.x) &&
137
((a.y == b.y) || ((a.x == 0u) && (((a.y | b.y)<<1) == 0u)));
140
/* Returns true if the double-precision floating-point value `a' is equal to the
141
* corresponding value `b', and false otherwise. The comparison is performed
142
* according to the IEEE Standard for Floating-Point Arithmetic.
145
__feq64(uint64_t a, uint64_t b)
147
if (__is_nan(a) || __is_nan(b))
150
return __feq64_nonnan(a, b);
153
/* Returns true if the double-precision floating-point value `a' is not equal
154
* to the corresponding value `b', and false otherwise. The comparison is
155
* performed according to the IEEE Standard for Floating-Point Arithmetic.
158
__fneu64(uint64_t a, uint64_t b)
160
if (__is_nan(a) || __is_nan(b))
163
return !__feq64_nonnan(a, b);
166
/* Returns the sign bit of the double-precision floating-point value `a'.*/
168
__extractFloat64Sign(uint64_t a)
170
return unpackUint2x32(a).y & 0x80000000u;
173
/* Returns true if the signed 64-bit value formed by concatenating `a0' and
174
* `a1' is less than the signed 64-bit value formed by concatenating `b0' and
175
* `b1'. Otherwise, returns false.
178
ilt64(uint a0, uint a1, uint b0, uint b1)
180
return (int(a0) < int(b0)) || ((a0 == b0) && (a1 < b1));
184
__flt64_nonnan(uint64_t __a, uint64_t __b)
186
uvec2 a = unpackUint2x32(__a);
187
uvec2 b = unpackUint2x32(__b);
189
/* IEEE 754 floating point numbers are specifically designed so that, with
190
* two exceptions, values can be compared by bit-casting to signed integers
191
* with the same number of bits.
193
* From https://en.wikipedia.org/wiki/IEEE_754-1985#Comparing_floating-point_numbers:
195
* When comparing as 2's-complement integers: If the sign bits differ,
196
* the negative number precedes the positive number, so 2's complement
197
* gives the correct result (except that negative zero and positive zero
198
* should be considered equal). If both values are positive, the 2's
199
* complement comparison again gives the correct result. Otherwise (two
200
* negative numbers), the correct FP ordering is the opposite of the 2's
201
* complement ordering.
203
* The logic implied by the above quotation is:
205
* !both_are_zero(a, b) && (both_negative(a, b) ? a > b : a < b)
207
* This is equivalent to
209
* fneu(a, b) && (both_negative(a, b) ? a >= b : a < b)
211
* fneu(a, b) && (both_negative(a, b) ? !(a < b) : a < b)
213
* fneu(a, b) && ((both_negative(a, b) && !(a < b)) ||
214
* (!both_negative(a, b) && (a < b)))
216
* (A!|B)&(A|!B) is (A xor B) which is implemented here using !=.
218
* fneu(a, b) && (both_negative(a, b) != (a < b))
220
bool lt = ilt64(a.y, a.x, b.y, b.x);
221
bool both_negative = (a.y & b.y & 0x80000000u) != 0;
223
return !__feq64_nonnan(__a, __b) && (lt != both_negative);
226
/* Returns true if the double-precision floating-point value `a' is less than
227
* the corresponding value `b', and false otherwise. The comparison is performed
228
* according to the IEEE Standard for Floating-Point Arithmetic.
231
__flt64(uint64_t a, uint64_t b)
233
/* This weird layout matters. Doing the "obvious" thing results in extra
234
* flow control being inserted to implement the short-circuit evaluation
235
* rules. Flow control is bad!
237
bool x = !__is_nan(a);
238
bool y = !__is_nan(b);
239
bool z = __flt64_nonnan(a, b);
241
return (x && y && z);
244
/* Returns true if the double-precision floating-point value `a' is greater
245
* than or equal to * the corresponding value `b', and false otherwise. The
246
* comparison is performed * according to the IEEE Standard for Floating-Point
250
__fge64(uint64_t a, uint64_t b)
252
/* This weird layout matters. Doing the "obvious" thing results in extra
253
* flow control being inserted to implement the short-circuit evaluation
254
* rules. Flow control is bad!
256
bool x = !__is_nan(a);
257
bool y = !__is_nan(b);
258
bool z = !__flt64_nonnan(a, b);
260
return (x && y && z);
264
__fsat64(uint64_t __a)
266
uvec2 a = unpackUint2x32(__a);
268
/* fsat(NaN) should be zero. */
269
if (__is_nan(__a) || int(a.y) < 0)
272
/* IEEE 754 floating point numbers are specifically designed so that, with
273
* two exceptions, values can be compared by bit-casting to signed integers
274
* with the same number of bits.
276
* From https://en.wikipedia.org/wiki/IEEE_754-1985#Comparing_floating-point_numbers:
278
* When comparing as 2's-complement integers: If the sign bits differ,
279
* the negative number precedes the positive number, so 2's complement
280
* gives the correct result (except that negative zero and positive zero
281
* should be considered equal). If both values are positive, the 2's
282
* complement comparison again gives the correct result. Otherwise (two
283
* negative numbers), the correct FP ordering is the opposite of the 2's
284
* complement ordering.
286
* We know that both values are not negative, and we know that at least one
287
* value is not zero. Therefore, we can just use the 2's complement
288
* comparison ordering.
290
if (ilt64(0x3FF00000, 0x00000000, a.y, a.x))
291
return 0x3FF0000000000000ul;
296
/* Adds the 64-bit value formed by concatenating `a0' and `a1' to the 64-bit
297
* value formed by concatenating `b0' and `b1'. Addition is modulo 2^64, so
298
* any carry out is lost. The result is broken into two 32-bit pieces which
299
* are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
302
__add64(uint a0, uint a1, uint b0, uint b1,
308
z0Ptr = a0 + b0 + uint(z1 < a1);
312
/* Subtracts the 64-bit value formed by concatenating `b0' and `b1' from the
313
* 64-bit value formed by concatenating `a0' and `a1'. Subtraction is modulo
314
* 2^64, so any borrow out (carry out) is lost. The result is broken into two
315
* 32-bit pieces which are stored at the locations pointed to by `z0Ptr' and
319
__sub64(uint a0, uint a1, uint b0, uint b1,
324
z0Ptr = a0 - b0 - uint(a1 < b1);
327
/* Shifts the 64-bit value formed by concatenating `a0' and `a1' right by the
328
* number of bits given in `count'. If any nonzero bits are shifted off, they
329
* are "jammed" into the least significant bit of the result by setting the
330
* least significant bit to 1. The value of `count' can be arbitrarily large;
331
* in particular, if `count' is greater than 64, the result will be either 0
332
* or 1, depending on whether the concatenation of `a0' and `a1' is zero or
333
* nonzero. The result is broken into two 32-bit pieces which are stored at
334
* the locations pointed to by `z0Ptr' and `z1Ptr'.
337
__shift64RightJamming(uint a0,
345
int negCount = (-count) & 31;
347
z0 = mix(0u, a0, count == 0);
348
z0 = mix(z0, (a0 >> count), count < 32);
350
z1 = uint((a0 | a1) != 0u); /* count >= 64 */
351
uint z1_lt64 = (a0>>(count & 31)) | uint(((a0<<negCount) | a1) != 0u);
352
z1 = mix(z1, z1_lt64, count < 64);
353
z1 = mix(z1, (a0 | uint(a1 != 0u)), count == 32);
354
uint z1_lt32 = (a0<<negCount) | (a1>>count) | uint ((a1<<negCount) != 0u);
355
z1 = mix(z1, z1_lt32, count < 32);
356
z1 = mix(z1, a1, count == 0);
361
/* Shifts the 96-bit value formed by concatenating `a0', `a1', and `a2' right
362
* by 32 _plus_ the number of bits given in `count'. The shifted result is
363
* at most 64 nonzero bits; these are broken into two 32-bit pieces which are
364
* stored at the locations pointed to by `z0Ptr' and `z1Ptr'. The bits shifted
365
* off form a third 32-bit result as follows: The _last_ bit shifted off is
366
* the most-significant bit of the extra result, and the other 31 bits of the
367
* extra result are all zero if and only if _all_but_the_last_ bits shifted off
368
* were all zero. This extra result is stored in the location pointed to by
369
* `z2Ptr'. The value of `count' can be arbitrarily large.
370
* (This routine makes more sense if `a0', `a1', and `a2' are considered
371
* to form a fixed-point value with binary point between `a1' and `a2'. This
372
* fixed-point value is shifted right by the number of bits given in `count',
373
* and the integer part of the result is returned at the locations pointed to
374
* by `z0Ptr' and `z1Ptr'. The fractional part of the result may be slightly
375
* corrupted as described above, and is returned at the location pointed to by
379
__shift64ExtraRightJamming(uint a0, uint a1, uint a2,
388
int negCount = (-count) & 31;
390
z2 = mix(uint(a0 != 0u), a0, count == 64);
391
z2 = mix(z2, a0 << negCount, count < 64);
392
z2 = mix(z2, a1 << negCount, count < 32);
394
z1 = mix(0u, (a0 >> (count & 31)), count < 64);
395
z1 = mix(z1, (a0<<negCount) | (a1>>count), count < 32);
397
a2 = mix(a2 | a1, a2, count < 32);
398
z0 = mix(z0, a0 >> count, count < 32);
399
z2 |= uint(a2 != 0u);
401
z0 = mix(z0, 0u, (count == 32));
402
z1 = mix(z1, a0, (count == 32));
403
z2 = mix(z2, a1, (count == 32));
404
z0 = mix(z0, a0, (count == 0));
405
z1 = mix(z1, a1, (count == 0));
406
z2 = mix(z2, a2, (count == 0));
412
/* Shifts the 64-bit value formed by concatenating `a0' and `a1' left by the
413
* number of bits given in `count'. Any bits shifted off are lost. The value
414
* of `count' must be less than 32. The result is broken into two 32-bit
415
* pieces which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
418
__shortShift64Left(uint a0, uint a1,
424
z0Ptr = mix((a0 << count | (a1 >> ((-count) & 31))), a0, count == 0);
427
/* Packs the sign `zSign', the exponent `zExp', and the significand formed by
428
* the concatenation of `zFrac0' and `zFrac1' into a double-precision floating-
429
* point value, returning the result. After being shifted into the proper
430
* positions, the three fields `zSign', `zExp', and `zFrac0' are simply added
431
* together to form the most significant 32 bits of the result. This means
432
* that any integer portion of `zFrac0' will be added into the exponent. Since
433
* a properly normalized significand will have an integer portion equal to 1,
434
* the `zExp' input should be 1 less than the desired result exponent whenever
435
* `zFrac0' and `zFrac1' concatenated form a complete, normalized significand.
438
__packFloat64(uint zSign, int zExp, uint zFrac0, uint zFrac1)
442
z.y = zSign + (uint(zExp) << 20) + zFrac0;
444
return packUint2x32(z);
447
/* Takes an abstract floating-point value having sign `zSign', exponent `zExp',
448
* and extended significand formed by the concatenation of `zFrac0', `zFrac1',
449
* and `zFrac2', and returns the proper double-precision floating-point value
450
* corresponding to the abstract input. Ordinarily, the abstract value is
451
* simply rounded and packed into the double-precision format, with the inexact
452
* exception raised if the abstract input cannot be represented exactly.
453
* However, if the abstract value is too large, the overflow and inexact
454
* exceptions are raised and an infinity or maximal finite value is returned.
455
* If the abstract value is too small, the input value is rounded to a
456
* subnormal number, and the underflow and inexact exceptions are raised if the
457
* abstract input cannot be represented exactly as a subnormal double-precision
458
* floating-point number.
459
* The input significand must be normalized or smaller. If the input
460
* significand is not normalized, `zExp' must be 0; in that case, the result
461
* returned is a subnormal number, and it must not require rounding. In the
462
* usual case that the input significand is normalized, `zExp' must be 1 less
463
* than the "true" floating-point exponent. The handling of underflow and
464
* overflow follows the IEEE Standard for Floating-Point Arithmetic.
467
__roundAndPackFloat64(uint zSign,
473
bool roundNearestEven;
476
roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;
477
increment = int(zFrac2) < 0;
478
if (!roundNearestEven) {
479
if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_TO_ZERO) {
483
increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) &&
486
increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&
492
if ((0x7FD < zExp) ||
494
(0x001FFFFFu == zFrac0 && 0xFFFFFFFFu == zFrac1) &&
496
if ((FLOAT_ROUNDING_MODE == FLOAT_ROUND_TO_ZERO) ||
497
((zSign != 0u) && (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP)) ||
498
((zSign == 0u) && (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN))) {
499
return __packFloat64(zSign, 0x7FE, 0x000FFFFFu, 0xFFFFFFFFu);
501
return __packFloat64(zSign, 0x7FF, 0u, 0u);
506
__shift64ExtraRightJamming(
507
zFrac0, zFrac1, zFrac2, -zExp, zFrac0, zFrac1, zFrac2);
509
if (roundNearestEven) {
510
increment = zFrac2 < 0u;
513
increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) &&
516
increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&
523
__add64(zFrac0, zFrac1, 0u, 1u, zFrac0, zFrac1);
524
zFrac1 &= ~((zFrac2 + uint(zFrac2 == 0u)) & uint(roundNearestEven));
526
zExp = mix(zExp, 0, (zFrac0 | zFrac1) == 0u);
528
return __packFloat64(zSign, zExp, zFrac0, zFrac1);
532
__roundAndPackUInt64(uint zSign, uint zFrac0, uint zFrac1, uint zFrac2)
534
bool roundNearestEven;
536
uint64_t default_nan = 0xFFFFFFFFFFFFFFFFUL;
538
roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;
540
if (zFrac2 >= 0x80000000u)
543
if (!roundNearestEven) {
545
if ((FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) && (zFrac2 != 0u)) {
549
increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&
555
__add64(zFrac0, zFrac1, 0u, 1u, zFrac0, zFrac1);
556
if ((zFrac0 | zFrac1) != 0u)
557
zFrac1 &= ~(1u) + uint(zFrac2 == 0u) & uint(roundNearestEven);
559
return mix(packUint2x32(uvec2(zFrac1, zFrac0)), default_nan,
560
(zSign != 0u && (zFrac0 | zFrac1) != 0u));
564
__roundAndPackInt64(uint zSign, uint zFrac0, uint zFrac1, uint zFrac2)
566
bool roundNearestEven;
568
int64_t default_NegNaN = -0x7FFFFFFFFFFFFFFEL;
569
int64_t default_PosNaN = 0xFFFFFFFFFFFFFFFFL;
571
roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;
573
if (zFrac2 >= 0x80000000u)
576
if (!roundNearestEven) {
578
increment = ((FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) &&
581
increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&
587
__add64(zFrac0, zFrac1, 0u, 1u, zFrac0, zFrac1);
588
if ((zFrac0 | zFrac1) != 0u)
589
zFrac1 &= ~(1u) + uint(zFrac2 == 0u) & uint(roundNearestEven);
592
int64_t absZ = mix(int64_t(packUint2x32(uvec2(zFrac1, zFrac0))),
593
-int64_t(packUint2x32(uvec2(zFrac1, zFrac0))),
595
int64_t nan = mix(default_PosNaN, default_NegNaN, zSign != 0u);
596
return mix(absZ, nan, ((zSign != 0u) != (absZ < 0)) && bool(absZ));
599
/* Returns the number of leading 0 bits before the most-significant 1 bit of
600
* `a'. If `a' is zero, 32 is returned.
603
__countLeadingZeros32(uint a)
605
return 31 - findMSB(a);
608
/* Takes an abstract floating-point value having sign `zSign', exponent `zExp',
609
* and significand formed by the concatenation of `zSig0' and `zSig1', and
610
* returns the proper double-precision floating-point value corresponding
611
* to the abstract input. This routine is just like `__roundAndPackFloat64'
612
* except that the input significand has fewer bits and does not have to be
613
* normalized. In all cases, `zExp' must be 1 less than the "true" floating-
617
__normalizeRoundAndPackFloat64(uint zSign,
631
shiftCount = __countLeadingZeros32(zFrac0) - 11;
632
if (0 <= shiftCount) {
634
__shortShift64Left(zFrac0, zFrac1, shiftCount, zFrac0, zFrac1);
636
__shift64ExtraRightJamming(
637
zFrac0, zFrac1, 0u, -shiftCount, zFrac0, zFrac1, zFrac2);
640
return __roundAndPackFloat64(zSign, zExp, zFrac0, zFrac1, zFrac2);
643
/* Takes two double-precision floating-point values `a' and `b', one of which
644
* is a NaN, and returns the appropriate NaN result.
647
__propagateFloat64NaN(uint64_t __a, uint64_t __b)
649
#if defined RELAXED_NAN_PROPAGATION
650
uvec2 a = unpackUint2x32(__a);
651
uvec2 b = unpackUint2x32(__b);
653
return packUint2x32(uvec2(a.x | b.x, a.y | b.y));
655
bool aIsNaN = __is_nan(__a);
656
bool bIsNaN = __is_nan(__b);
657
uvec2 a = unpackUint2x32(__a);
658
uvec2 b = unpackUint2x32(__b);
662
return packUint2x32(mix(b, mix(a, b, bvec2(bIsNaN, bIsNaN)), bvec2(aIsNaN, aIsNaN)));
666
/* If a shader is in the soft-fp64 path, it almost certainly has register
667
* pressure problems. Choose a method to exchange two values that does not
668
* require a temporary.
670
#define EXCHANGE(a, b) \
677
/* Returns the result of adding the double-precision floating-point values
678
* `a' and `b'. The operation is performed according to the IEEE Standard for
679
* Floating-Point Arithmetic.
682
__fadd64(uint64_t a, uint64_t b)
684
uint aSign = __extractFloat64Sign(a);
685
uint bSign = __extractFloat64Sign(b);
686
uint aFracLo = __extractFloat64FracLo(a);
687
uint aFracHi = __extractFloat64FracHi(a);
688
uint bFracLo = __extractFloat64FracLo(b);
689
uint bFracHi = __extractFloat64FracHi(b);
690
int aExp = __extractFloat64Exp(a);
691
int bExp = __extractFloat64Exp(b);
692
int expDiff = aExp - bExp;
693
if (aSign == bSign) {
701
bool propagate = ((aFracHi | bFracHi) | (aFracLo| bFracLo)) != 0u;
702
return mix(a, __propagateFloat64NaN(a, b), propagate);
704
__add64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
706
return __packFloat64(aSign, 0, zFrac0, zFrac1);
708
zFrac0 |= 0x00200000u;
710
__shift64ExtraRightJamming(
711
zFrac0, zFrac1, zFrac2, 1, zFrac0, zFrac1, zFrac2);
714
EXCHANGE(aFracHi, bFracHi);
715
EXCHANGE(aFracLo, bFracLo);
716
EXCHANGE(aExp, bExp);
720
bool propagate = (aFracHi | aFracLo) != 0u;
721
return mix(__packFloat64(aSign, 0x7ff, 0u, 0u), __propagateFloat64NaN(a, b), propagate);
724
expDiff = mix(abs(expDiff), abs(expDiff) - 1, bExp == 0);
725
bFracHi = mix(bFracHi | 0x00100000u, bFracHi, bExp == 0);
726
__shift64ExtraRightJamming(
727
bFracHi, bFracLo, 0u, expDiff, bFracHi, bFracLo, zFrac2);
730
aFracHi |= 0x00100000u;
731
__add64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
733
if (!(zFrac0 < 0x00200000u)) {
734
__shift64ExtraRightJamming(zFrac0, zFrac1, zFrac2, 1, zFrac0, zFrac1, zFrac2);
738
return __roundAndPackFloat64(aSign, zExp, zFrac0, zFrac1, zFrac2);
743
__shortShift64Left(aFracHi, aFracLo, 10, aFracHi, aFracLo);
744
__shortShift64Left(bFracHi, bFracLo, 10, bFracHi, bFracLo);
750
EXCHANGE(aFracHi, bFracHi);
751
EXCHANGE(aFracLo, bFracLo);
752
EXCHANGE(aExp, bExp);
753
aSign ^= 0x80000000u;
757
bool propagate = (aFracHi | aFracLo) != 0u;
758
return mix(__packFloat64(aSign, 0x7ff, 0u, 0u), __propagateFloat64NaN(a, b), propagate);
761
expDiff = mix(abs(expDiff), abs(expDiff) - 1, bExp == 0);
762
bFracHi = mix(bFracHi | 0x40000000u, bFracHi, bExp == 0);
763
__shift64RightJamming(bFracHi, bFracLo, expDiff, bFracHi, bFracLo);
764
aFracHi |= 0x40000000u;
765
__sub64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
768
return __normalizeRoundAndPackFloat64(aSign, zExp - 10, zFrac0, zFrac1);
771
bool propagate = ((aFracHi | bFracHi) | (aFracLo | bFracLo)) != 0u;
772
return mix(0xFFFFFFFFFFFFFFFFUL, __propagateFloat64NaN(a, b), propagate);
774
bExp = mix(bExp, 1, aExp == 0);
775
aExp = mix(aExp, 1, aExp == 0);
779
uint sign_of_difference = 0;
780
if (bFracHi < aFracHi) {
781
__sub64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
783
else if (aFracHi < bFracHi) {
784
__sub64(bFracHi, bFracLo, aFracHi, aFracLo, zFrac0, zFrac1);
785
sign_of_difference = 0x80000000;
787
else if (bFracLo <= aFracLo) {
788
/* It is possible that zFrac0 and zFrac1 may be zero after this. */
789
__sub64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
792
__sub64(bFracHi, bFracLo, aFracHi, aFracLo, zFrac0, zFrac1);
793
sign_of_difference = 0x80000000;
795
zExp = mix(bExp, aExp, sign_of_difference == 0u);
796
aSign ^= sign_of_difference;
797
uint64_t retval_0 = __packFloat64(uint(FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) << 31, 0, 0u, 0u);
798
uint64_t retval_1 = __normalizeRoundAndPackFloat64(aSign, zExp - 11, zFrac0, zFrac1);
799
return mix(retval_0, retval_1, zFrac0 != 0u || zFrac1 != 0u);
803
/* Multiplies the 64-bit value formed by concatenating `a0' and `a1' to the
804
* 64-bit value formed by concatenating `b0' and `b1' to obtain a 128-bit
805
* product. The product is broken into four 32-bit pieces which are stored at
806
* the locations pointed to by `z0Ptr', `z1Ptr', `z2Ptr', and `z3Ptr'.
809
__mul64To128(uint a0, uint a1, uint b0, uint b1,
822
umulExtended(a1, b1, z2, z3);
823
umulExtended(a1, b0, z1, more2);
824
__add64(z1, more2, 0u, z2, z1, z2);
825
umulExtended(a0, b0, z0, more1);
826
__add64(z0, more1, 0u, z1, z0, z1);
827
umulExtended(a0, b1, more1, more2);
828
__add64(more1, more2, 0u, z2, more1, z2);
829
__add64(z0, z1, 0u, more1, z0, z1);
836
/* Normalizes the subnormal double-precision floating-point value represented
837
* by the denormalized significand formed by the concatenation of `aFrac0' and
838
* `aFrac1'. The normalized exponent is stored at the location pointed to by
839
* `zExpPtr'. The most significant 21 bits of the normalized significand are
840
* stored at the location pointed to by `zFrac0Ptr', and the least significant
841
* 32 bits of the normalized significand are stored at the location pointed to
845
__normalizeFloat64Subnormal(uint aFrac0, uint aFrac1,
851
uint temp_zfrac0, temp_zfrac1;
852
shiftCount = __countLeadingZeros32(mix(aFrac0, aFrac1, aFrac0 == 0u)) - 11;
853
zExpPtr = mix(1 - shiftCount, -shiftCount - 31, aFrac0 == 0u);
855
temp_zfrac0 = mix(aFrac1<<shiftCount, aFrac1>>(-shiftCount), shiftCount < 0);
856
temp_zfrac1 = mix(0u, aFrac1<<(shiftCount & 31), shiftCount < 0);
858
__shortShift64Left(aFrac0, aFrac1, shiftCount, zFrac0Ptr, zFrac1Ptr);
860
zFrac0Ptr = mix(zFrac0Ptr, temp_zfrac0, aFrac0 == 0);
861
zFrac1Ptr = mix(zFrac1Ptr, temp_zfrac1, aFrac0 == 0);
864
/* Returns the result of multiplying the double-precision floating-point values
865
* `a' and `b'. The operation is performed according to the IEEE Standard for
866
* Floating-Point Arithmetic.
869
__fmul64(uint64_t a, uint64_t b)
877
uint aFracLo = __extractFloat64FracLo(a);
878
uint aFracHi = __extractFloat64FracHi(a);
879
uint bFracLo = __extractFloat64FracLo(b);
880
uint bFracHi = __extractFloat64FracHi(b);
881
int aExp = __extractFloat64Exp(a);
882
uint aSign = __extractFloat64Sign(a);
883
int bExp = __extractFloat64Exp(b);
884
uint bSign = __extractFloat64Sign(b);
885
uint zSign = aSign ^ bSign;
887
if (((aFracHi | aFracLo) != 0u) ||
888
((bExp == 0x7FF) && ((bFracHi | bFracLo) != 0u))) {
889
return __propagateFloat64NaN(a, b);
891
if ((uint(bExp) | bFracHi | bFracLo) == 0u)
892
return 0xFFFFFFFFFFFFFFFFUL;
893
return __packFloat64(zSign, 0x7FF, 0u, 0u);
896
/* a cannot be NaN, but is b NaN? */
897
if ((bFracHi | bFracLo) != 0u)
898
#if defined RELAXED_NAN_PROPAGATION
901
return __propagateFloat64NaN(a, b);
903
if ((uint(aExp) | aFracHi | aFracLo) == 0u)
904
return 0xFFFFFFFFFFFFFFFFUL;
905
return __packFloat64(zSign, 0x7FF, 0u, 0u);
908
if ((aFracHi | aFracLo) == 0u)
909
return __packFloat64(zSign, 0, 0u, 0u);
910
__normalizeFloat64Subnormal(aFracHi, aFracLo, aExp, aFracHi, aFracLo);
913
if ((bFracHi | bFracLo) == 0u)
914
return __packFloat64(zSign, 0, 0u, 0u);
915
__normalizeFloat64Subnormal(bFracHi, bFracLo, bExp, bFracHi, bFracLo);
917
zExp = aExp + bExp - 0x400;
918
aFracHi |= 0x00100000u;
919
__shortShift64Left(bFracHi, bFracLo, 12, bFracHi, bFracLo);
921
aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1, zFrac2, zFrac3);
922
__add64(zFrac0, zFrac1, aFracHi, aFracLo, zFrac0, zFrac1);
923
zFrac2 |= uint(zFrac3 != 0u);
924
if (0x00200000u <= zFrac0) {
925
__shift64ExtraRightJamming(
926
zFrac0, zFrac1, zFrac2, 1, zFrac0, zFrac1, zFrac2);
929
return __roundAndPackFloat64(zSign, zExp, zFrac0, zFrac1, zFrac2);
933
__ffma64(uint64_t a, uint64_t b, uint64_t c)
935
return __fadd64(__fmul64(a, b), c);
938
/* Shifts the 64-bit value formed by concatenating `a0' and `a1' right by the
939
* number of bits given in `count'. Any bits shifted off are lost. The value
940
* of `count' can be arbitrarily large; in particular, if `count' is greater
941
* than 64, the result will be 0. The result is broken into two 32-bit pieces
942
* which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
945
__shift64Right(uint a0, uint a1,
952
int negCount = (-count) & 31;
955
z0 = mix(z0, (a0 >> count), count < 32);
956
z0 = mix(z0, a0, count == 0);
958
z1 = mix(0u, (a0 >> (count & 31)), count < 64);
959
z1 = mix(z1, (a0<<negCount) | (a1>>count), count < 32);
960
z1 = mix(z1, a0, count == 0);
966
/* Returns the result of converting the double-precision floating-point value
967
* `a' to the unsigned integer format. The conversion is performed according
968
* to the IEEE Standard for Floating-Point Arithmetic.
971
__fp64_to_uint(uint64_t a)
973
uint aFracLo = __extractFloat64FracLo(a);
974
uint aFracHi = __extractFloat64FracHi(a);
975
int aExp = __extractFloat64Exp(a);
976
uint aSign = __extractFloat64Sign(a);
978
if ((aExp == 0x7FF) && ((aFracHi | aFracLo) != 0u))
981
aFracHi |= mix(0u, 0x00100000u, aExp != 0);
983
int shiftDist = 0x427 - aExp;
985
__shift64RightJamming(aFracHi, aFracLo, shiftDist, aFracHi, aFracLo);
987
if ((aFracHi & 0xFFFFF000u) != 0u)
988
return mix(~0u, 0u, aSign != 0u);
992
__shift64Right(aFracHi, aFracLo, 12, zero, z);
994
uint expt = mix(~0u, 0u, aSign != 0u);
996
return mix(z, expt, (aSign != 0u) && (z != 0u));
1000
__uint_to_fp64(uint a)
1005
int shiftDist = __countLeadingZeros32(a) + 21;
1009
int negCount = (- shiftDist) & 31;
1011
aHigh = mix(0u, a<< shiftDist - 32, shiftDist < 64);
1013
aHigh = mix(aHigh, 0u, shiftDist == 0);
1014
aLow = mix(aLow, a, shiftDist ==0);
1015
aHigh = mix(aHigh, a >> negCount, shiftDist < 32);
1016
aLow = mix(aLow, a << shiftDist, shiftDist < 32);
1018
return __packFloat64(0u, 0x432 - shiftDist, aHigh, aLow);
1022
__uint64_to_fp64(uint64_t a)
1027
uvec2 aFrac = unpackUint2x32(a);
1028
uint aFracLo = __extractFloat64FracLo(a);
1029
uint aFracHi = __extractFloat64FracHi(a);
1031
if ((aFracHi & 0x80000000u) != 0u) {
1032
__shift64RightJamming(aFracHi, aFracLo, 1, aFracHi, aFracLo);
1033
return __roundAndPackFloat64(0, 0x433, aFracHi, aFracLo, 0u);
1035
return __normalizeRoundAndPackFloat64(0, 0x432, aFrac.y, aFrac.x);
1040
__fp64_to_uint64(uint64_t a)
1042
uint aFracLo = __extractFloat64FracLo(a);
1043
uint aFracHi = __extractFloat64FracHi(a);
1044
int aExp = __extractFloat64Exp(a);
1045
uint aSign = __extractFloat64Sign(a);
1047
uint64_t default_nan = 0xFFFFFFFFFFFFFFFFUL;
1049
aFracHi = mix(aFracHi, aFracHi | 0x00100000u, aExp != 0);
1050
int shiftCount = 0x433 - aExp;
1052
if ( shiftCount <= 0 ) {
1053
if (shiftCount < -11 && aExp == 0x7FF) {
1054
if ((aFracHi | aFracLo) != 0u)
1055
return __propagateFloat64NaN(a, a);
1056
return mix(default_nan, a, aSign == 0u);
1058
__shortShift64Left(aFracHi, aFracLo, -shiftCount, aFracHi, aFracLo);
1060
__shift64ExtraRightJamming(aFracHi, aFracLo, zFrac2, shiftCount,
1061
aFracHi, aFracLo, zFrac2);
1063
return __roundAndPackUInt64(aSign, aFracHi, aFracLo, zFrac2);
1067
__fp64_to_int64(uint64_t a)
1070
uint aFracLo = __extractFloat64FracLo(a);
1071
uint aFracHi = __extractFloat64FracHi(a);
1072
int aExp = __extractFloat64Exp(a);
1073
uint aSign = __extractFloat64Sign(a);
1074
int64_t default_NegNaN = -0x7FFFFFFFFFFFFFFEL;
1075
int64_t default_PosNaN = 0xFFFFFFFFFFFFFFFFL;
1077
aFracHi = mix(aFracHi, aFracHi | 0x00100000u, aExp != 0);
1078
int shiftCount = 0x433 - aExp;
1080
if (shiftCount <= 0) {
1081
if (shiftCount < -11 && aExp == 0x7FF) {
1082
if ((aFracHi | aFracLo) != 0u)
1083
return default_NegNaN;
1084
return mix(default_NegNaN, default_PosNaN, aSign == 0u);
1086
__shortShift64Left(aFracHi, aFracLo, -shiftCount, aFracHi, aFracLo);
1088
__shift64ExtraRightJamming(aFracHi, aFracLo, zFrac2, shiftCount,
1089
aFracHi, aFracLo, zFrac2);
1092
return __roundAndPackInt64(aSign, aFracHi, aFracLo, zFrac2);
1096
__int64_to_fp64(int64_t a)
1101
uint64_t absA = mix(uint64_t(a), uint64_t(-a), a < 0);
1102
uint aFracHi = __extractFloat64FracHi(absA);
1103
uvec2 aFrac = unpackUint2x32(absA);
1104
uint zSign = uint(unpackInt2x32(a).y) & 0x80000000u;
1106
if ((aFracHi & 0x80000000u) != 0u) {
1107
return mix(0ul, __packFloat64(0x80000000u, 0x434, 0u, 0u), a < 0);
1110
return __normalizeRoundAndPackFloat64(zSign, 0x432, aFrac.y, aFrac.x);
1113
/* Returns the result of converting the double-precision floating-point value
1114
* `a' to the 32-bit two's complement integer format. The conversion is
1115
* performed according to the IEEE Standard for Floating-Point Arithmetic---
1116
* which means in particular that the conversion is rounded according to the
1117
* current rounding mode. If `a' is a NaN, the largest positive integer is
1118
* returned. Otherwise, if the conversion overflows, the largest integer with
1119
* the same sign as `a' is returned.
1122
__fp64_to_int(uint64_t a)
1124
uint aFracLo = __extractFloat64FracLo(a);
1125
uint aFracHi = __extractFloat64FracHi(a);
1126
int aExp = __extractFloat64Exp(a);
1127
uint aSign = __extractFloat64Sign(a);
1130
uint aFracExtra = 0u;
1131
int shiftCount = aExp - 0x413;
1133
if (0 <= shiftCount) {
1135
if ((aExp == 0x7FF) && bool(aFracHi | aFracLo))
1137
return mix(0x7FFFFFFF, 0x80000000, aSign != 0u);
1139
__shortShift64Left(aFracHi | 0x00100000u, aFracLo, shiftCount, absZ, aFracExtra);
1144
aFracHi |= 0x00100000u;
1145
aFracExtra = ( aFracHi << (shiftCount & 31)) | aFracLo;
1146
absZ = aFracHi >> (- shiftCount);
1149
int z = mix(int(absZ), -int(absZ), aSign != 0u);
1150
int nan = mix(0x7FFFFFFF, 0x80000000, aSign != 0u);
1151
return mix(z, nan, ((aSign != 0u) != (z < 0)) && bool(z));
1154
/* Returns the result of converting the 32-bit two's complement integer `a'
1155
* to the double-precision floating-point format. The conversion is performed
1156
* according to the IEEE Standard for Floating-Point Arithmetic.
1159
__int_to_fp64(int a)
1164
return __packFloat64(0u, 0, 0u, 0u);
1165
uint zSign = uint(a) & 0x80000000u;
1166
uint absA = mix(uint(a), uint(-a), a < 0);
1167
int shiftCount = __countLeadingZeros32(absA) - 11;
1168
if (0 <= shiftCount) {
1169
zFrac0 = absA << shiftCount;
1172
__shift64Right(absA, 0u, -shiftCount, zFrac0, zFrac1);
1174
return __packFloat64(zSign, 0x412 - shiftCount, zFrac0, zFrac1);
1178
__fp64_to_bool(uint64_t a)
1180
return !__feq64_nonnan(__fabs64(a), 0ul);
1184
__bool_to_fp64(bool a)
1186
return packUint2x32(uvec2(0x00000000u, uint(-int(a) & 0x3ff00000)));
1189
/* Packs the sign `zSign', exponent `zExp', and significand `zFrac' into a
1190
* single-precision floating-point value, returning the result. After being
1191
* shifted into the proper positions, the three fields are simply added
1192
* together to form the result. This means that any integer portion of `zSig'
1193
* will be added into the exponent. Since a properly normalized significand
1194
* will have an integer portion equal to 1, the `zExp' input should be 1 less
1195
* than the desired result exponent whenever `zFrac' is a complete, normalized
1199
__packFloat32(uint zSign, int zExp, uint zFrac)
1201
return uintBitsToFloat(zSign + (uint(zExp)<<23) + zFrac);
1204
/* Takes an abstract floating-point value having sign `zSign', exponent `zExp',
1205
* and significand `zFrac', and returns the proper single-precision floating-
1206
* point value corresponding to the abstract input. Ordinarily, the abstract
1207
* value is simply rounded and packed into the single-precision format, with
1208
* the inexact exception raised if the abstract input cannot be represented
1209
* exactly. However, if the abstract value is too large, the overflow and
1210
* inexact exceptions are raised and an infinity or maximal finite value is
1211
* returned. If the abstract value is too small, the input value is rounded to
1212
* a subnormal number, and the underflow and inexact exceptions are raised if
1213
* the abstract input cannot be represented exactly as a subnormal single-
1214
* precision floating-point number.
1215
* The input significand `zFrac' has its binary point between bits 30
1216
* and 29, which is 7 bits to the left of the usual location. This shifted
1217
* significand must be normalized or smaller. If `zFrac' is not normalized,
1218
* `zExp' must be 0; in that case, the result returned is a subnormal number,
1219
* and it must not require rounding. In the usual case that `zFrac' is
1220
* normalized, `zExp' must be 1 less than the "true" floating-point exponent.
1221
* The handling of underflow and overflow follows the IEEE Standard for
1222
* Floating-Point Arithmetic.
1225
__roundAndPackFloat32(uint zSign, int zExp, uint zFrac)
1227
bool roundNearestEven;
1231
roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;
1232
roundIncrement = 0x40;
1233
if (!roundNearestEven) {
1234
if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_TO_ZERO) {
1237
roundIncrement = 0x7F;
1239
if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP)
1242
if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN)
1247
roundBits = int(zFrac & 0x7Fu);
1248
if (0xFDu <= uint(zExp)) {
1249
if ((0xFD < zExp) || ((zExp == 0xFD) && (int(zFrac) + roundIncrement) < 0))
1250
return __packFloat32(zSign, 0xFF, 0u) - float(roundIncrement == 0);
1252
bool zexp_lt0 = zExp < 0;
1253
uint zFrac_lt0 = mix(uint(zFrac != 0u), (zFrac>>count) | uint((zFrac<<((-count) & 31)) != 0u), (-zExp) < 32);
1254
zFrac = mix(zFrac, zFrac_lt0, zexp_lt0);
1255
roundBits = mix(roundBits, int(zFrac) & 0x7f, zexp_lt0);
1256
zExp = mix(zExp, 0, zexp_lt0);
1258
zFrac = (zFrac + uint(roundIncrement))>>7;
1259
zFrac &= ~uint(((roundBits ^ 0x40) == 0) && roundNearestEven);
1261
return __packFloat32(zSign, mix(zExp, 0, zFrac == 0u), zFrac);
1264
/* Returns the result of converting the double-precision floating-point value
1265
* `a' to the single-precision floating-point format. The conversion is
1266
* performed according to the IEEE Standard for Floating-Point Arithmetic.
1269
__fp64_to_fp32(uint64_t __a)
1271
uvec2 a = unpackUint2x32(__a);
1275
uint aFracLo = __extractFloat64FracLo(__a);
1276
uint aFracHi = __extractFloat64FracHi(__a);
1277
int aExp = __extractFloat64Exp(__a);
1278
uint aSign = __extractFloat64Sign(__a);
1279
if (aExp == 0x7FF) {
1280
__shortShift64Left(a.y, a.x, 12, a.y, a.x);
1281
float rval = uintBitsToFloat(aSign | 0x7FC00000u | (a.y>>9));
1282
rval = mix(__packFloat32(aSign, 0xFF, 0u), rval, (aFracHi | aFracLo) != 0u);
1285
__shift64RightJamming(aFracHi, aFracLo, 22, allZero, zFrac);
1286
zFrac = mix(zFrac, zFrac | 0x40000000u, aExp != 0);
1287
return __roundAndPackFloat32(aSign, aExp - 0x381, zFrac);
1290
/* Returns the result of converting the single-precision floating-point value
1291
* `a' to the double-precision floating-point format.
1294
__fp32_to_fp64(float f)
1296
uint a = floatBitsToUint(f);
1297
uint aFrac = a & 0x007FFFFFu;
1298
int aExp = int((a>>23) & 0xFFu);
1299
uint aSign = a & 0x80000000u;
1307
__shift64Right(nanHi, nanLo, 12, nanHi, nanLo);
1308
nanHi |= aSign | 0x7FF80000u;
1309
return packUint2x32(uvec2(nanLo, nanHi));
1311
return __packFloat64(aSign, 0x7FF, 0u, 0u);
1316
return __packFloat64(aSign, 0, 0u, 0u);
1317
/* Normalize subnormal */
1318
int shiftCount = __countLeadingZeros32(aFrac) - 8;
1319
aFrac <<= shiftCount;
1320
aExp = 1 - shiftCount;
1324
__shift64Right(aFrac, 0u, 3, zFrac0, zFrac1);
1325
return __packFloat64(aSign, aExp + 0x380, zFrac0, zFrac1);
1328
/* Adds the 96-bit value formed by concatenating `a0', `a1', and `a2' to the
1329
* 96-bit value formed by concatenating `b0', `b1', and `b2'. Addition is
1330
* modulo 2^96, so any carry out is lost. The result is broken into three
1331
* 32-bit pieces which are stored at the locations pointed to by `z0Ptr',
1332
* `z1Ptr', and `z2Ptr'.
1335
__add96(uint a0, uint a1, uint a2,
1336
uint b0, uint b1, uint b2,
1342
uint carry1 = uint(z2 < a2);
1344
uint carry0 = uint(z1 < a1);
1347
z0 += uint(z1 < carry1);
1354
/* Subtracts the 96-bit value formed by concatenating `b0', `b1', and `b2' from
1355
* the 96-bit value formed by concatenating `a0', `a1', and `a2'. Subtraction
1356
* is modulo 2^96, so any borrow out (carry out) is lost. The result is broken
1357
* into three 32-bit pieces which are stored at the locations pointed to by
1358
* `z0Ptr', `z1Ptr', and `z2Ptr'.
1361
__sub96(uint a0, uint a1, uint a2,
1362
uint b0, uint b1, uint b2,
1368
uint borrow1 = uint(a2 < b2);
1370
uint borrow0 = uint(a1 < b1);
1372
z0 -= uint(z1 < borrow1);
1380
/* Returns an approximation to the 32-bit integer quotient obtained by dividing
1381
* `b' into the 64-bit value formed by concatenating `a0' and `a1'. The
1382
* divisor `b' must be at least 2^31. If q is the exact quotient truncated
1383
* toward zero, the approximation returned lies between q and q + 2 inclusive.
1384
* If the exact quotient q is larger than 32 bits, the maximum positive 32-bit
1385
* unsigned integer is returned.
1388
__estimateDiv64To32(uint a0, uint a1, uint b)
1401
z = (b0<<16 <= a0) ? 0xFFFF0000u : (a0 / b0)<<16;
1402
umulExtended(b, z, term0, term1);
1403
__sub64(a0, a1, term0, term1, rem0, rem1);
1404
while (int(rem0) < 0) {
1407
__add64(rem0, rem1, b0, b1, rem0, rem1);
1409
rem0 = (rem0<<16) | (rem1>>16);
1410
z |= (b0<<16 <= rem0) ? 0xFFFFu : rem0 / b0;
1415
__sqrtOddAdjustments(int index)
1455
__sqrtEvenAdjustments(int index)
1494
/* Returns an approximation to the square root of the 32-bit significand given
1495
* by `a'. Considered as an integer, `a' must be at least 2^31. If bit 0 of
1496
* `aExp' (the least significant bit) is 1, the integer returned approximates
1497
* 2^31*sqrt(`a'/2^31), where `a' is considered an integer. If bit 0 of `aExp'
1498
* is 0, the integer returned approximates 2^31*sqrt(`a'/2^30). In either
1499
* case, the approximation returned lies strictly within +/-2 of the exact
1503
__estimateSqrt32(int aExp, uint a)
1507
int index = int(a>>27 & 15u);
1508
if ((aExp & 1) != 0) {
1509
z = 0x4000u + (a>>17) - __sqrtOddAdjustments(index);
1510
z = ((a / z)<<14) + (z<<15);
1513
z = 0x8000u + (a>>17) - __sqrtEvenAdjustments(index);
1515
z = (0x20000u <= z) ? 0xFFFF8000u : (z<<15);
1517
return uint(int(a)>>1);
1519
return ((__estimateDiv64To32(a, 0u, z))>>1) + (z>>1);
1522
/* Returns the square root of the double-precision floating-point value `a'.
1523
* The operation is performed according to the IEEE Standard for Floating-Point
1527
__fsqrt64(uint64_t a)
1532
uint doubleZFrac0 = 0u;
1541
uint64_t default_nan = 0xFFFFFFFFFFFFFFFFUL;
1543
uint aFracLo = __extractFloat64FracLo(a);
1544
uint aFracHi = __extractFloat64FracHi(a);
1545
int aExp = __extractFloat64Exp(a);
1546
uint aSign = __extractFloat64Sign(a);
1547
if (aExp == 0x7FF) {
1548
if ((aFracHi | aFracLo) != 0u)
1549
return __propagateFloat64NaN(a, a);
1555
if ((uint(aExp) | aFracHi | aFracLo) == 0u)
1560
if ((aFracHi | aFracLo) == 0u)
1561
return __packFloat64(0u, 0, 0u, 0u);
1562
__normalizeFloat64Subnormal(aFracHi, aFracLo, aExp, aFracHi, aFracLo);
1564
int zExp = ((aExp - 0x3FF)>>1) + 0x3FE;
1565
aFracHi |= 0x00100000u;
1566
__shortShift64Left(aFracHi, aFracLo, 11, term0, term1);
1567
zFrac0 = (__estimateSqrt32(aExp, term0)>>1) + 1u;
1569
zFrac0 = 0x7FFFFFFFu;
1570
doubleZFrac0 = zFrac0 + zFrac0;
1571
__shortShift64Left(aFracHi, aFracLo, 9 - (aExp & 1), aFracHi, aFracLo);
1572
umulExtended(zFrac0, zFrac0, term0, term1);
1573
__sub64(aFracHi, aFracLo, term0, term1, rem0, rem1);
1574
while (int(rem0) < 0) {
1577
__add64(rem0, rem1, 0u, doubleZFrac0 | 1u, rem0, rem1);
1579
zFrac1 = __estimateDiv64To32(rem1, 0u, doubleZFrac0);
1580
if ((zFrac1 & 0x1FFu) <= 5u) {
1583
umulExtended(doubleZFrac0, zFrac1, term1, term2);
1584
__sub64(rem1, 0u, term1, term2, rem1, rem2);
1585
umulExtended(zFrac1, zFrac1, term2, term3);
1586
__sub96(rem1, rem2, 0u, 0u, term2, term3, rem1, rem2, rem3);
1587
while (int(rem1) < 0) {
1589
__shortShift64Left(0u, zFrac1, 1, term2, term3);
1591
term2 |= doubleZFrac0;
1592
__add96(rem1, rem2, rem3, 0u, term2, term3, rem1, rem2, rem3);
1594
zFrac1 |= uint((rem1 | rem2 | rem3) != 0u);
1596
__shift64ExtraRightJamming(zFrac0, zFrac1, 0u, 10, zFrac0, zFrac1, zFrac2);
1597
return __roundAndPackFloat64(0u, zExp, zFrac0, zFrac1, zFrac2);
1601
__ftrunc64(uint64_t __a)
1603
uvec2 a = unpackUint2x32(__a);
1604
int aExp = __extractFloat64Exp(__a);
1608
int unbiasedExp = aExp - 1023;
1609
int fracBits = 52 - unbiasedExp;
1610
uint maskLo = mix(~0u << fracBits, 0u, fracBits >= 32);
1611
uint maskHi = mix(~0u << (fracBits - 32), ~0u, fracBits < 33);
1615
zLo = mix(zLo, 0u, unbiasedExp < 0);
1616
zHi = mix(zHi, 0u, unbiasedExp < 0);
1617
zLo = mix(zLo, a.x, unbiasedExp > 52);
1618
zHi = mix(zHi, a.y, unbiasedExp > 52);
1619
return packUint2x32(uvec2(zLo, zHi));
1623
__ffloor64(uint64_t a)
1625
/* The big assumtion is that when 'a' is NaN, __ftrunc(a) returns a. Based
1626
* on that assumption, NaN values that don't have the sign bit will safely
1627
* return NaN (identity). This is guarded by RELAXED_NAN_PROPAGATION
1628
* because otherwise the NaN should have the "signal" bit set. The
1629
* __fadd64 will ensure that occurs.
1632
#if defined RELAXED_NAN_PROPAGATION
1633
int(unpackUint2x32(a).y) >= 0
1638
uint64_t tr = __ftrunc64(a);
1640
if (is_positive || __feq64(tr, a)) {
1643
return __fadd64(tr, 0xbff0000000000000ul /* -1.0 */);
1648
__fround64(uint64_t __a)
1650
uvec2 a = unpackUint2x32(__a);
1651
int unbiasedExp = __extractFloat64Exp(__a) - 1023;
1655
if (unbiasedExp < 20) {
1656
if (unbiasedExp < 0) {
1657
if ((aHi & 0x80000000u) != 0u && aLo == 0u) {
1661
if ((a.y & 0x000FFFFFu) == 0u && a.x == 0u) {
1663
return packUint2x32(uvec2(aLo, aHi));
1665
aHi = mix(aHi, (aHi | 0x3FF00000u), unbiasedExp == -1);
1668
uint maskExp = 0x000FFFFFu >> unbiasedExp;
1669
uint lastBit = maskExp + 1;
1670
aHi += 0x00080000u >> unbiasedExp;
1671
if ((aHi & maskExp) == 0u)
1676
} else if (unbiasedExp > 51 || unbiasedExp == 1024) {
1679
uint maskExp = 0xFFFFFFFFu >> (unbiasedExp - 20);
1680
if ((aLo & maskExp) == 0u)
1682
uint tmp = aLo + (1u << (51 - unbiasedExp));
1689
return packUint2x32(uvec2(aLo, aHi));
1693
__fmin64(uint64_t a, uint64_t b)
1695
/* This weird layout matters. Doing the "obvious" thing results in extra
1696
* flow control being inserted to implement the short-circuit evaluation
1697
* rules. Flow control is bad!
1699
bool b_nan = __is_nan(b);
1700
bool a_lt_b = __flt64_nonnan(a, b);
1701
bool a_nan = __is_nan(a);
1703
return (b_nan || a_lt_b) && !a_nan ? a : b;
1707
__fmax64(uint64_t a, uint64_t b)
1709
/* This weird layout matters. Doing the "obvious" thing results in extra
1710
* flow control being inserted to implement the short-circuit evaluation
1711
* rules. Flow control is bad!
1713
bool b_nan = __is_nan(b);
1714
bool a_lt_b = __flt64_nonnan(a, b);
1715
bool a_nan = __is_nan(a);
1717
return (b_nan || a_lt_b) && !a_nan ? b : a;
1721
__ffract64(uint64_t a)
1723
return __fadd64(a, __fneg64(__ffloor64(a)));