~ubuntu-branches/ubuntu/gutsy/vnc4/gutsy

« back to all changes in this revision

Viewing changes to unix/xc/extras/Mesa/src/math/m_debug_util.h

  • Committer: Bazaar Package Importer
  • Author(s): Ola Lundqvist
  • Date: 2006-05-15 20:35:17 UTC
  • mfrom: (1.1.2 upstream)
  • Revision ID: james.westby@ubuntu.com-20060515203517-l4lre1ku942mn26k
Tags: 4.1.1+X4.3.0-10
* Correction of critical security issue. Thanks to Martin Kogler
  <e9925248@student.tuwien.ac.at> that informed me about the issue,
  and provided the patch.
  This flaw was originally found by Steve Wiseman of intelliadmin.com.
* Applied patch from Javier Kohen <jkohen@users.sourceforge.net> that
  inform the user that only 8 first characters of the password will
  actually be used when typing more than 8 characters, closes:
  #355619.

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
 
 
2
/*
 
3
 * Mesa 3-D graphics library
 
4
 * Version:  3.5
 
5
 *
 
6
 * Copyright (C) 1999-2001  Brian Paul   All Rights Reserved.
 
7
 *
 
8
 * Permission is hereby granted, free of charge, to any person obtaining a
 
9
 * copy of this software and associated documentation files (the "Software"),
 
10
 * to deal in the Software without restriction, including without limitation
 
11
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 
12
 * and/or sell copies of the Software, and to permit persons to whom the
 
13
 * Software is furnished to do so, subject to the following conditions:
 
14
 *
 
15
 * The above copyright notice and this permission notice shall be included
 
16
 * in all copies or substantial portions of the Software.
 
17
 *
 
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 
19
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 
21
 * BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
 
22
 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 
23
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
24
 *
 
25
 * Authors:
 
26
 *    Gareth Hughes <gareth@valinux.com>
 
27
 */
 
28
 
 
29
#ifndef __M_DEBUG_UTIL_H__
 
30
#define __M_DEBUG_UTIL_H__
 
31
 
 
32
 
 
33
#ifdef DEBUG  /* This code only used for debugging */
 
34
 
 
35
 
 
36
/* Comment this out to deactivate the cycle counter.
 
37
 * NOTE: it works only on CPUs which know the 'rdtsc' command (586 or higher)
 
38
 * (hope, you don't try to debug Mesa on a 386 ;)
 
39
 */
 
40
#if defined(__GNUC__) && \
 
41
    ((defined(__i386__) && defined(USE_X86_ASM)) || \
 
42
     (defined(__sparc__) && defined(USE_SPARC_ASM)))
 
43
#define  RUN_DEBUG_BENCHMARK
 
44
#endif
 
45
 
 
46
#define TEST_COUNT              128     /* size of the tested vector array   */
 
47
 
 
48
#define REQUIRED_PRECISION      10      /* allow 4 bits to miss              */
 
49
#define MAX_PRECISION           24      /* max. precision possible           */
 
50
 
 
51
 
 
52
#ifdef  RUN_DEBUG_BENCHMARK
 
53
/* Overhead of profiling counter in cycles.  Automatically adjusted to
 
54
 * your machine at run time - counter initialization should give very
 
55
 * consistent results.
 
56
 */
 
57
extern long counter_overhead;
 
58
 
 
59
/* This is the value of the environment variable MESA_PROFILE, and is
 
60
 * used to determine if we should benchmark the functions as well as
 
61
 * verify their correctness.
 
62
 */
 
63
extern char *mesa_profile;
 
64
 
 
65
/* Modify the the number of tests if you like.
 
66
 * We take the minimum of all results, because every error should be
 
67
 * positive (time used by other processes, task switches etc).
 
68
 * It is assumed that all calculations are done in the cache.
 
69
 */
 
70
 
 
71
#if defined(__i386__)
 
72
 
 
73
#if 1 /* PPro, PII, PIII version */
 
74
 
 
75
/* Profiling on the P6 architecture requires a little more work, due to
 
76
 * the internal out-of-order execution.  We must perform a serializing
 
77
 * 'cpuid' instruction before and after the 'rdtsc' instructions to make
 
78
 * sure no other uops are executed when we sample the timestamp counter.
 
79
 */
 
80
#define  INIT_COUNTER()                                                 \
 
81
   do {                                                                 \
 
82
      int cycle_i;                                                      \
 
83
      counter_overhead = LONG_MAX;                                      \
 
84
      for ( cycle_i = 0 ; cycle_i < 8 ; cycle_i++ ) {                   \
 
85
         long cycle_tmp1 = 0, cycle_tmp2 = 0;                           \
 
86
         __asm__ __volatile__ ( "push %%ebx       \n"                   \
 
87
                                "xor %%eax, %%eax \n"                   \
 
88
                                "cpuid            \n"                   \
 
89
                                "rdtsc            \n"                   \
 
90
                                "mov %%eax, %0    \n"                   \
 
91
                                "xor %%eax, %%eax \n"                   \
 
92
                                "cpuid            \n"                   \
 
93
                                "pop %%ebx        \n"                   \
 
94
                                "push %%ebx       \n"                   \
 
95
                                "xor %%eax, %%eax \n"                   \
 
96
                                "cpuid            \n"                   \
 
97
                                "rdtsc            \n"                   \
 
98
                                "mov %%eax, %1    \n"                   \
 
99
                                "xor %%eax, %%eax \n"                   \
 
100
                                "cpuid            \n"                   \
 
101
                                "pop %%ebx        \n"                   \
 
102
                                : "=m" (cycle_tmp1), "=m" (cycle_tmp2)  \
 
103
                                : : "eax", "ecx", "edx" );              \
 
104
         if ( counter_overhead > (cycle_tmp2 - cycle_tmp1) ) {          \
 
105
            counter_overhead = cycle_tmp2 - cycle_tmp1;                 \
 
106
         }                                                              \
 
107
      }                                                                 \
 
108
   } while (0)
 
109
 
 
110
#define  BEGIN_RACE(x)                                                  \
 
111
   x = LONG_MAX;                                                        \
 
112
   for ( cycle_i = 0 ; cycle_i < 10 ; cycle_i++ ) {                     \
 
113
      long cycle_tmp1 = 0, cycle_tmp2 = 0;                              \
 
114
      __asm__ __volatile__ ( "push %%ebx       \n"                      \
 
115
                             "xor %%eax, %%eax \n"                      \
 
116
                             "cpuid            \n"                      \
 
117
                             "rdtsc            \n"                      \
 
118
                             "mov %%eax, %0    \n"                      \
 
119
                             "xor %%eax, %%eax \n"                      \
 
120
                             "cpuid            \n"                      \
 
121
                             "pop %%ebx        \n"                      \
 
122
                             : "=m" (cycle_tmp1)                        \
 
123
                             : : "eax", "ecx", "edx" );
 
124
 
 
125
#define END_RACE(x)                                                     \
 
126
      __asm__ __volatile__ ( "push %%ebx       \n"                      \
 
127
                             "xor %%eax, %%eax \n"                      \
 
128
                             "cpuid            \n"                      \
 
129
                             "rdtsc            \n"                      \
 
130
                             "mov %%eax, %0    \n"                      \
 
131
                             "xor %%eax, %%eax \n"                      \
 
132
                             "cpuid            \n"                      \
 
133
                             "pop %%ebx        \n"                      \
 
134
                             : "=m" (cycle_tmp2)                        \
 
135
                             : : "eax", "ecx", "edx" );                 \
 
136
      if ( x > (cycle_tmp2 - cycle_tmp1) ) {                            \
 
137
         x = cycle_tmp2 - cycle_tmp1;                                   \
 
138
      }                                                                 \
 
139
   }                                                                    \
 
140
   x -= counter_overhead;
 
141
 
 
142
#else /* PPlain, PMMX version */
 
143
 
 
144
/* To ensure accurate results, we stall the pipelines with the
 
145
 * non-pairable 'cdq' instruction.  This ensures all the code being
 
146
 * profiled is complete when the 'rdtsc' instruction executes.
 
147
 */
 
148
#define  INIT_COUNTER(x)                                                \
 
149
   do {                                                                 \
 
150
      int cycle_i;                                                      \
 
151
      x = LONG_MAX;                                                     \
 
152
      for ( cycle_i = 0 ; cycle_i < 32 ; cycle_i++ ) {                  \
 
153
         long cycle_tmp1, cycle_tmp2, dummy;                            \
 
154
         __asm__ ( "mov %%eax, %0" : "=a" (cycle_tmp1) );               \
 
155
         __asm__ ( "mov %%eax, %0" : "=a" (cycle_tmp2) );               \
 
156
         __asm__ ( "cdq" );                                             \
 
157
         __asm__ ( "cdq" );                                             \
 
158
         __asm__ ( "rdtsc" : "=a" (cycle_tmp1), "=d" (dummy) );         \
 
159
         __asm__ ( "cdq" );                                             \
 
160
         __asm__ ( "cdq" );                                             \
 
161
         __asm__ ( "rdtsc" : "=a" (cycle_tmp2), "=d" (dummy) );         \
 
162
         if ( x > (cycle_tmp2 - cycle_tmp1) )                           \
 
163
            x = cycle_tmp2 - cycle_tmp1;                                \
 
164
      }                                                                 \
 
165
   } while (0)
 
166
 
 
167
#define  BEGIN_RACE(x)                                                  \
 
168
   x = LONG_MAX;                                                        \
 
169
   for ( cycle_i = 0 ; cycle_i < 16 ; cycle_i++ ) {                     \
 
170
      long cycle_tmp1, cycle_tmp2, dummy;                               \
 
171
      __asm__ ( "mov %%eax, %0" : "=a" (cycle_tmp1) );                  \
 
172
      __asm__ ( "mov %%eax, %0" : "=a" (cycle_tmp2) );                  \
 
173
      __asm__ ( "cdq" );                                                \
 
174
      __asm__ ( "cdq" );                                                \
 
175
      __asm__ ( "rdtsc" : "=a" (cycle_tmp1), "=d" (dummy) );
 
176
 
 
177
 
 
178
#define END_RACE(x)                                                     \
 
179
      __asm__ ( "cdq" );                                                \
 
180
      __asm__ ( "cdq" );                                                \
 
181
      __asm__ ( "rdtsc" : "=a" (cycle_tmp2), "=d" (dummy) );            \
 
182
      if ( x > (cycle_tmp2 - cycle_tmp1) )                              \
 
183
         x = cycle_tmp2 - cycle_tmp1;                                   \
 
184
   }                                                                    \
 
185
   x -= counter_overhead;
 
186
 
 
187
#endif
 
188
 
 
189
#elif defined(__sparc__)
 
190
 
 
191
#define  INIT_COUNTER() \
 
192
         do { counter_overhead = 5; } while(0)
 
193
 
 
194
#define  BEGIN_RACE(x)                                                        \
 
195
x = LONG_MAX;                                                                 \
 
196
for (cycle_i = 0; cycle_i <10; cycle_i++) {                                   \
 
197
   register long cycle_tmp1 asm("l0");                                        \
 
198
   register long cycle_tmp2 asm("l1");                                        \
 
199
   /* rd %tick, %l0 */                                                        \
 
200
   __asm__ __volatile__ (".word 0xa1410000" : "=r" (cycle_tmp1));  /*  save timestamp   */
 
201
 
 
202
#define END_RACE(x)                                                           \
 
203
   /* rd %tick, %l1 */                                                        \
 
204
   __asm__ __volatile__ (".word 0xa3410000" : "=r" (cycle_tmp2));             \
 
205
   if (x > (cycle_tmp2-cycle_tmp1)) x = cycle_tmp2 - cycle_tmp1;              \
 
206
}                                                                             \
 
207
x -= counter_overhead;
 
208
 
 
209
#else
 
210
#error Your processor is not supported for RUN_XFORM_BENCHMARK
 
211
#endif
 
212
 
 
213
#else
 
214
 
 
215
#define BEGIN_RACE(x)
 
216
#define END_RACE(x)
 
217
 
 
218
#endif
 
219
 
 
220
 
 
221
/* =============================================================
 
222
 * Helper functions
 
223
 */
 
224
 
 
225
static GLfloat rnd( void )
 
226
{
 
227
   GLfloat f = (GLfloat)rand() / (GLfloat)RAND_MAX;
 
228
   GLfloat gran = (GLfloat)(1 << 13);
 
229
 
 
230
   f = (GLfloat)(GLint)(f * gran) / gran;
 
231
 
 
232
   return f * 2.0 - 1.0;
 
233
}
 
234
 
 
235
static int significand_match( GLfloat a, GLfloat b )
 
236
{
 
237
   GLfloat d = a - b;
 
238
   int a_ex, b_ex, d_ex;
 
239
 
 
240
   if ( d == 0.0F ) {
 
241
      return MAX_PRECISION;   /* Exact match */
 
242
   }
 
243
 
 
244
   if ( a == 0.0F || b == 0.0F ) {
 
245
      /* It would probably be better to check if the
 
246
       * non-zero number is denormalized and return
 
247
       * the index of the highest set bit here.
 
248
       */
 
249
      return 0;
 
250
   }
 
251
 
 
252
   frexp( a, &a_ex );
 
253
   frexp( b, &b_ex );
 
254
   frexp( d, &d_ex );
 
255
 
 
256
   if ( a_ex < b_ex ) {
 
257
      return a_ex - d_ex;
 
258
   } else {
 
259
      return b_ex - d_ex;
 
260
   }
 
261
}
 
262
 
 
263
enum { NIL = 0, ONE = 1, NEG = -1, VAR = 2 };
 
264
 
 
265
static void init_matrix( GLfloat *m )
 
266
{
 
267
   m[0] = 63.0; m[4] = 43.0; m[ 8] = 29.0; m[12] = 43.0;
 
268
   m[1] = 55.0; m[5] = 17.0; m[ 9] = 31.0; m[13] =  7.0;
 
269
   m[2] = 44.0; m[6] =  9.0; m[10] =  7.0; m[14] =  3.0;
 
270
   m[3] = 11.0; m[7] = 23.0; m[11] = 91.0; m[15] =  9.0;
 
271
}
 
272
 
 
273
 
 
274
/* Ensure our arrays are correctly aligned.
 
275
 */
 
276
#if defined(__GNUC__)
 
277
#  define ALIGN16       __attribute__ ((aligned (16)))
 
278
#elif defined(__MSC__)
 
279
#  define ALIGN16       __declspec(align(16)) /* GH: Does this work? */
 
280
#else
 
281
#  warning "ALIGN16 will not 16-byte align!\n"
 
282
#  define ALIGN16
 
283
#endif
 
284
 
 
285
 
 
286
#endif /* DEBUG */
 
287
 
 
288
#endif /* __M_DEBUG_UTIL_H__ */