1
/**************************************************************************
3
* Copyright 2008 VMware, Inc.
6
* Permission is hereby granted, free of charge, to any person obtaining a
7
* copy of this software and associated documentation files (the
8
* "Software"), to deal in the Software without restriction, including
9
* without limitation the rights to use, copy, modify, merge, publish,
10
* distribute, sub license, and/or sell copies of the Software, and to
11
* permit persons to whom the Software is furnished to do so, subject to
12
* the following conditions:
14
* The above copyright notice and this permission notice (including the
15
* next paragraph) shall be included in all copies or substantial portions
18
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
**************************************************************************/
30
* Math utilities and approximations for common math functions.
31
* Reduced precision is usually acceptable in shaders...
33
* "fast" is used in the names of functions which are low-precision,
34
* or at least lower-precision than the normal C lib functions.
48
#include "u_endian.h" /* for UTIL_ARCH_BIG_ENDIAN */
56
#define M_SQRT2 1.41421356237309504880
61
* Initialize math module. This should be called before using any
62
* other functions in this module.
83
* Extract the IEEE float32 exponent.
86
util_get_float32_exponent(float x)
92
return ((f.ui >> 23) & 0xff) - 127;
96
#define LOG2_TABLE_SIZE_LOG2 8
97
#define LOG2_TABLE_SCALE (1 << LOG2_TABLE_SIZE_LOG2)
98
#define LOG2_TABLE_SIZE (LOG2_TABLE_SCALE + 1)
99
extern float log2_table[LOG2_TABLE_SIZE];
103
* Fast approximation to log2(x).
106
util_fast_log2(float x)
111
epart = (float)(((num.i & 0x7f800000) >> 23) - 127);
112
/* mpart = log2_table[mantissa*LOG2_TABLE_SCALE + 0.5] */
113
mpart = log2_table[((num.i & 0x007fffff) + (1 << (22 - LOG2_TABLE_SIZE_LOG2))) >> (23 - LOG2_TABLE_SIZE_LOG2)];
114
return epart + mpart;
119
* Floor(x), returned as int.
124
#if defined(USE_X86_ASM) && defined(__GNUC__) && defined(__i386__)
126
* IEEE floor for computers that round to nearest or even.
127
* 'f' must be between -4194304 and 4194303.
128
* This floor operation is done by "(iround(f + .5) + iround(f - .5)) >> 1",
129
* but uses some IEEE specific tricks for better speed.
130
* Contributed by Josh Vanderhoof
134
af = (3 << 22) + 0.5 + (double)f;
135
bf = (3 << 22) + 0.5 - (double)f;
136
/* GCC generates an extra fstp/fld without this. */
137
__asm__ ("fstps %0" : "=m" (ai) : "t" (af) : "st");
138
__asm__ ("fstps %0" : "=m" (bi) : "t" (bf) : "st");
139
return (ai - bi) >> 1;
144
af = (3 << 22) + 0.5 + (double) f;
145
bf = (3 << 22) + 0.5 - (double) f;
146
u.f = (float) af; ai = u.i;
147
u.f = (float) bf; bi = u.i;
148
return (ai - bi) >> 1;
154
* Round float to nearest int.
159
#if defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86)
161
__asm__ ("fistpl %0" : "=m" (r) : "t" (f) : "st");
163
#elif defined(PIPE_CC_MSVC) && defined(PIPE_ARCH_X86)
172
return (int) (f + 0.5f);
174
return (int) (f - 0.5f);
180
* Approximate floating point comparison
183
util_is_approx(float a, float b, float tol)
185
return fabsf(b - a) <= tol;
190
* util_is_X_inf_or_nan = test if x is NaN or +/- Inf
191
* util_is_X_nan = test if x is NaN
192
* util_X_inf_sign = return +1 for +Inf, -1 for -Inf, or 0 for not Inf
194
* NaN can be checked with x != x, however this fails with the fast math flag
202
util_is_inf_or_nan(float x)
206
return (tmp.ui & 0x7f800000) == 0x7f800000;
215
return (tmp.ui & 0x7fffffff) > 0x7f800000;
220
util_inf_sign(float x)
224
if ((tmp.ui & 0x7fffffff) != 0x7f800000) {
228
return (x < 0) ? -1 : 1;
236
util_is_double_inf_or_nan(double x)
240
return (tmp.ui & 0x7ff0000000000000ULL) == 0x7ff0000000000000ULL;
245
util_is_double_nan(double x)
249
return (tmp.ui & 0x7fffffffffffffffULL) > 0x7ff0000000000000ULL;
254
util_double_inf_sign(double x)
258
if ((tmp.ui & 0x7fffffffffffffffULL) != 0x7ff0000000000000ULL) {
262
return (x < 0) ? -1 : 1;
270
util_is_half_inf_or_nan(int16_t x)
272
return (x & 0x7c00) == 0x7c00;
277
util_is_half_nan(int16_t x)
279
return (x & 0x7fff) > 0x7c00;
284
util_half_inf_sign(int16_t x)
286
if ((x & 0x7fff) != 0x7c00) {
290
return (x < 0) ? -1 : 1;
297
static inline unsigned
315
* Convert uint8_t to float in [0, 1].
318
ubyte_to_float(uint8_t ub)
320
return (float) ub * (1.0f / 255.0f);
325
* Convert float in [0,1] to uint8_t in [0,255] with clamping.
327
static inline uint8_t
328
float_to_ubyte(float f)
330
/* return 0 for NaN too */
334
else if (f >= 1.0f) {
335
return (uint8_t) 255;
340
tmp.f = tmp.f * (255.0f/256.0f) + 32768.0f;
341
return (uint8_t) tmp.i;
346
* Convert uint16_t to float in [0, 1].
349
ushort_to_float(uint16_t us)
351
return (float) us * (1.0f / 65535.0f);
356
* Convert float in [0,1] to uint16_t in [0,65535] with clamping.
358
static inline uint16_t
359
float_to_ushort(float f)
361
/* return 0 for NaN too */
365
else if (f >= 1.0f) {
366
return (uint16_t) 65535;
371
tmp.f = tmp.f * (65535.0f/65536.0f) + 128.0f;
372
return (uint16_t) tmp.i;
377
byte_to_float_tex(int8_t b)
379
return (b == -128) ? -1.0F : b * 1.0F / 127.0F;
383
float_to_byte_tex(float f)
385
return (int8_t) (127.0F * f);
391
static inline unsigned
392
util_logbase2(unsigned n)
394
#if defined(HAVE___BUILTIN_CLZ)
395
return ((sizeof(unsigned) * 8 - 1) - __builtin_clz(n | 1));
398
if (n >= 1<<16) { n >>= 16; pos += 16; }
399
if (n >= 1<< 8) { n >>= 8; pos += 8; }
400
if (n >= 1<< 4) { n >>= 4; pos += 4; }
401
if (n >= 1<< 2) { n >>= 2; pos += 2; }
402
if (n >= 1<< 1) { pos += 1; }
407
static inline uint64_t
408
util_logbase2_64(uint64_t n)
410
#if defined(HAVE___BUILTIN_CLZLL)
411
return ((sizeof(uint64_t) * 8 - 1) - __builtin_clzll(n | 1));
414
if (n >= 1ull<<32) { n >>= 32; pos += 32; }
415
if (n >= 1ull<<16) { n >>= 16; pos += 16; }
416
if (n >= 1ull<< 8) { n >>= 8; pos += 8; }
417
if (n >= 1ull<< 4) { n >>= 4; pos += 4; }
418
if (n >= 1ull<< 2) { n >>= 2; pos += 2; }
419
if (n >= 1ull<< 1) { pos += 1; }
425
* Returns the ceiling of log n base 2, and 0 when n == 0. Equivalently,
426
* returns the smallest x such that n <= 2**x.
428
static inline unsigned
429
util_logbase2_ceil(unsigned n)
434
return 1 + util_logbase2(n - 1);
437
static inline uint64_t
438
util_logbase2_ceil64(uint64_t n)
443
return 1ull + util_logbase2_64(n - 1);
447
* Returns the smallest power of two >= x
449
static inline unsigned
450
util_next_power_of_two(unsigned x)
452
#if defined(HAVE___BUILTIN_CLZ)
456
return (1 << ((sizeof(unsigned) * 8) - __builtin_clz(x - 1)));
463
if (util_is_power_of_two_or_zero(x))
467
val = (val >> 1) | val;
468
val = (val >> 2) | val;
469
val = (val >> 4) | val;
470
val = (val >> 8) | val;
471
val = (val >> 16) | val;
477
static inline uint64_t
478
util_next_power_of_two64(uint64_t x)
480
#if defined(HAVE___BUILTIN_CLZLL)
484
return (1ull << ((sizeof(uint64_t) * 8) - __builtin_clzll(x - 1)));
491
if (util_is_power_of_two_or_zero64(x))
495
val = (val >> 1) | val;
496
val = (val >> 2) | val;
497
val = (val >> 4) | val;
498
val = (val >> 8) | val;
499
val = (val >> 16) | val;
500
val = (val >> 32) | val;
508
* Algorithm taken from:
509
* http://stackoverflow.com/questions/9144800/c-reverse-bits-in-unsigned-integer
511
static inline unsigned
512
util_bitreverse(unsigned n)
514
n = ((n >> 1) & 0x55555555u) | ((n & 0x55555555u) << 1);
515
n = ((n >> 2) & 0x33333333u) | ((n & 0x33333333u) << 2);
516
n = ((n >> 4) & 0x0f0f0f0fu) | ((n & 0x0f0f0f0fu) << 4);
517
n = ((n >> 8) & 0x00ff00ffu) | ((n & 0x00ff00ffu) << 8);
518
n = ((n >> 16) & 0xffffu) | ((n & 0xffffu) << 16);
523
* Convert from little endian to CPU byte order.
526
#if UTIL_ARCH_BIG_ENDIAN
527
#define util_le64_to_cpu(x) util_bswap64(x)
528
#define util_le32_to_cpu(x) util_bswap32(x)
529
#define util_le16_to_cpu(x) util_bswap16(x)
531
#define util_le64_to_cpu(x) (x)
532
#define util_le32_to_cpu(x) (x)
533
#define util_le16_to_cpu(x) (x)
536
#define util_cpu_to_le64(x) util_le64_to_cpu(x)
537
#define util_cpu_to_le32(x) util_le32_to_cpu(x)
538
#define util_cpu_to_le16(x) util_le16_to_cpu(x)
541
* Reverse byte order of a 32 bit word.
543
static inline uint32_t
544
util_bswap32(uint32_t n)
546
#if defined(HAVE___BUILTIN_BSWAP32)
547
return __builtin_bswap32(n);
550
((n >> 8) & 0x0000ff00) |
551
((n << 8) & 0x00ff0000) |
557
* Reverse byte order of a 64bit word.
559
static inline uint64_t
560
util_bswap64(uint64_t n)
562
#if defined(HAVE___BUILTIN_BSWAP64)
563
return __builtin_bswap64(n);
565
return ((uint64_t)util_bswap32((uint32_t)n) << 32) |
566
util_bswap32((n >> 32));
572
* Reverse byte order of a 16 bit word.
574
static inline uint16_t
575
util_bswap16(uint16_t n)
584
static inline int64_t
585
util_sign_extend(uint64_t val, unsigned width)
588
if (val & (UINT64_C(1) << (width - 1))) {
589
return -(int64_t)((UINT64_C(1) << width) - val);
596
util_memcpy_cpu_to_le32(void * restrict dest, const void * restrict src, size_t n)
598
#if UTIL_ARCH_BIG_ENDIAN
602
for (i = 0, e = n / 4; i < e; i++) {
603
uint32_t * restrict d = (uint32_t* restrict)dest;
604
const uint32_t * restrict s = (const uint32_t* restrict)src;
605
d[i] = util_bswap32(s[i]);
609
return memcpy(dest, src, n);
614
* Clamp X to [MIN, MAX].
615
* This is a macro to allow float, int, uint, etc. types.
616
* We arbitrarily turn NaN into MIN.
618
#define CLAMP( X, MIN, MAX ) ( (X)>(MIN) ? ((X)>(MAX) ? (MAX) : (X)) : (MIN) )
620
/* Syntax sugar occuring frequently in graphics code */
621
#define SATURATE( X ) CLAMP(X, 0.0f, 1.0f)
623
#define MIN2( A, B ) ( (A)<(B) ? (A) : (B) )
624
#define MAX2( A, B ) ( (A)>(B) ? (A) : (B) )
626
#define MIN3( A, B, C ) ((A) < (B) ? MIN2(A, C) : MIN2(B, C))
627
#define MAX3( A, B, C ) ((A) > (B) ? MAX2(A, C) : MAX2(B, C))
629
#define MIN4( A, B, C, D ) ((A) < (B) ? MIN3(A, C, D) : MIN3(B, C, D))
630
#define MAX4( A, B, C, D ) ((A) > (B) ? MAX3(A, C, D) : MAX3(B, C, D))
634
* Align a value up to an alignment value
636
* If \c value is not already aligned to the requested alignment value, it
637
* will be rounded up.
639
* \param value Value to be rounded
640
* \param alignment Alignment value to be used. This must be a power of two.
642
* \sa ROUND_DOWN_TO()
648
static inline uintptr_t
649
ALIGN(uintptr_t value, int32_t alignment)
651
assert(util_is_power_of_two_nonzero(alignment));
652
return (((value) + (alignment) - 1) & ~((alignment) - 1));
656
* Like ALIGN(), but works with a non-power-of-two alignment.
658
static inline uintptr_t
659
ALIGN_NPOT(uintptr_t value, int32_t alignment)
661
assert(alignment > 0);
662
return (value + alignment - 1) / alignment * alignment;
666
* Align a value down to an alignment value
668
* If \c value is not already aligned to the requested alignment value, it
669
* will be rounded down.
671
* \param value Value to be rounded
672
* \param alignment Alignment value to be used. This must be a power of two.
676
static inline uint64_t
677
ROUND_DOWN_TO(uint64_t value, int32_t alignment)
679
assert(util_is_power_of_two_nonzero(alignment));
680
return ((value) & ~(alignment - 1));
684
* Align a value, only works pot alignemnts.
687
align(int value, int alignment)
689
return (value + alignment - 1) & ~(alignment - 1);
692
static inline uint64_t
693
align64(uint64_t value, unsigned alignment)
695
return (value + alignment - 1) & ~((uint64_t)alignment - 1);
699
* Works like align but on npot alignments.
702
util_align_npot(size_t value, size_t alignment)
704
if (value % alignment)
705
return value + (alignment - (value % alignment));
709
static inline unsigned
710
u_minify(unsigned value, unsigned levels)
712
return MAX2(1, value >> levels);
716
#define COPY_4V( DST, SRC ) \
718
(DST)[0] = (SRC)[0]; \
719
(DST)[1] = (SRC)[1]; \
720
(DST)[2] = (SRC)[2]; \
721
(DST)[3] = (SRC)[3]; \
727
#define COPY_4FV( DST, SRC ) COPY_4V(DST, SRC)
732
#define ASSIGN_4V( DST, V0, V1, V2, V3 ) \
742
static inline uint32_t
743
util_unsigned_fixed(float value, unsigned frac_bits)
745
return value < 0 ? 0 : (uint32_t)(value * (1<<frac_bits));
748
static inline int32_t
749
util_signed_fixed(float value, unsigned frac_bits)
751
return (int32_t)(value * (1<<frac_bits));
755
util_fpstate_get(void);
757
util_fpstate_set_denorms_to_zero(unsigned current_fpstate);
759
util_fpstate_set(unsigned fpstate);
762
* For indexed draw calls, return true if the vertex count to be drawn is
763
* much lower than the vertex count that has to be uploaded, meaning
764
* that the driver should flatten indices instead of trying to upload
767
* This is used by vertex upload code in u_vbuf and glthread.
770
util_is_vbo_upload_ratio_too_large(unsigned draw_vertex_count,
771
unsigned upload_vertex_count)
773
if (draw_vertex_count > 1024)
774
return upload_vertex_count > draw_vertex_count * 4;
775
else if (draw_vertex_count > 32)
776
return upload_vertex_count > draw_vertex_count * 8;
778
return upload_vertex_count > draw_vertex_count * 16;
781
bool util_invert_mat4x4(float *out, const float *m);
783
/* Quantize the lod bias value to reduce the number of sampler state
784
* variants in gallium because apps use it for smooth mipmap transitions,
785
* thrashing cso_cache and degrading performance.
787
* This quantization matches the AMD hw specification, so having more
788
* precision would have no effect anyway.
791
util_quantize_lod_bias(float lod)
793
lod = CLAMP(lod, -16, 16);
794
return roundf(lod * 256) / 256;
801
#endif /* U_MATH_H */