HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
VM_BasicAVXFunc.h
Go to the documentation of this file.
1 /*
2  * PROPRIETARY INFORMATION. This software is proprietary to
3  * Side Effects Software Inc., and is not to be reproduced,
4  * transmitted, or disclosed in any way without written permission.
5  *
6  * NAME: VM_BasicAVXFunc.h ( VM Library, C++)
7  *
8  * COMMENTS:
9  */
10 
11 #ifndef __VM_BasicFunc__
12 #define __VM_BasicFunc__
13 
14 #include "VM_API.h"
15 #include <SYS/SYS_Math.h>
16 #include <SYS/SYS_Types.h>
17 
18 // Uncomment this define to "test" with the basic instructions. These are
19 // brain-dead slow, so they shouldn't be used in actual code.
20 //#define CPU_HAS_AVX_INSTR 1
21 
22 class v8si;
23 
24 class v8sf {
25 public:
26  v8sf() {}
27  v8sf(float a) { f[0] = f[1] = f[2] = f[3] = f[4] = f[5] = f[6] = f[7] = a; }
28  v8sf(float a0, float a1, float a2, float a3,
29  float a4, float a5, float a6, float a7)
30  {
31  f[0] = a0;
32  f[1] = a1;
33  f[2] = a2;
34  f[3] = a3;
35  f[4] = a4;
36  f[5] = a5;
37  f[6] = a6;
38  f[7] = a7;
39  }
40 
41  operator v8si() const;
42  fpreal32 f[8];
43 };
44 
45 class v8si {
46 public:
47  v8si() {}
48  v8si(int32 a) { i[0] = i[1] = i[2] = i[3] = i[4] = i[5] = i[6] = i[7] = a; }
49  v8si(int32 a0, int32 a1, int32 a2, int32 a3,
50  int32 a4, int32 a5, int32 a6, int32 a7)
51  {
52  i[0] = a0;
53  i[1] = a1;
54  i[2] = a2;
55  i[3] = a3;
56  i[4] = a4;
57  i[5] = a5;
58  i[6] = a6;
59  i[7] = a7;
60  }
61 
62  operator v8sf() const;
63  int32 i[8];
64 };
65 
66 inline
67 v8sf::operator v8si() const
68 {
69  return *(v8si *)this;
70 }
71 
72 inline
73 v8si::operator v8sf() const
74 {
75  return *(v8sf *)this;
76 }
77 
78 #define V8SF(A) (v8sf)A
79 #define V8SI(A) (v8si)A
80 
81 #define vm_BASIC_IFF_AVX(OP) \
82  v8si r; \
83  r.i[0] = a.f[0] OP b.f[0]; \
84  r.i[1] = a.f[1] OP b.f[1]; \
85  r.i[2] = a.f[2] OP b.f[2]; \
86  r.i[3] = a.f[3] OP b.f[3]; \
87  r.i[4] = a.f[4] OP b.f[4]; \
88  r.i[5] = a.f[5] OP b.f[5]; \
89  r.i[6] = a.f[6] OP b.f[6]; \
90  r.i[7] = a.f[7] OP b.f[7]; \
91  return r;
92 
93 #define vm_BASIC_CF_AVX(OP) \
94  v8si r; \
95  r.i[0] = a.f[0] OP b.f[0] ? 0xFFFFFFFF : 0; \
96  r.i[1] = a.f[1] OP b.f[1] ? 0xFFFFFFFF : 0; \
97  r.i[2] = a.f[2] OP b.f[2] ? 0xFFFFFFFF : 0; \
98  r.i[3] = a.f[3] OP b.f[3] ? 0xFFFFFFFF : 0; \
99  r.i[4] = a.f[4] OP b.f[4] ? 0xFFFFFFFF : 0; \
100  r.i[5] = a.f[5] OP b.f[5] ? 0xFFFFFFFF : 0; \
101  r.i[6] = a.f[6] OP b.f[6] ? 0xFFFFFFFF : 0; \
102  r.i[7] = a.f[7] OP b.f[7] ? 0xFFFFFFFF : 0; \
103  return r;
104 
105 #define vm_BASIC_CI_AVX(OP) \
106  v8si r; \
107  r.i[0] = a.i[0] OP b.i[0] ? 0xFFFFFFFF : 0; \
108  r.i[1] = a.i[1] OP b.i[1] ? 0xFFFFFFFF : 0; \
109  r.i[2] = a.i[2] OP b.i[2] ? 0xFFFFFFFF : 0; \
110  r.i[3] = a.i[3] OP b.i[3] ? 0xFFFFFFFF : 0; \
111  r.i[4] = a.i[4] OP b.i[4] ? 0xFFFFFFFF : 0; \
112  r.i[5] = a.i[5] OP b.i[5] ? 0xFFFFFFFF : 0; \
113  r.i[6] = a.i[6] OP b.i[6] ? 0xFFFFFFFF : 0; \
114  r.i[7] = a.i[7] OP b.i[7] ? 0xFFFFFFFF : 0; \
115  return r;
116 
117 #define vm_BASIC_III_AVX(OP) \
118  v8si r; \
119  r.i[0] = a.i[0] OP b.i[0]; \
120  r.i[1] = a.i[1] OP b.i[1]; \
121  r.i[2] = a.i[2] OP b.i[2]; \
122  r.i[3] = a.i[3] OP b.i[3]; \
123  r.i[4] = a.i[4] OP b.i[4]; \
124  r.i[5] = a.i[5] OP b.i[5]; \
125  r.i[6] = a.i[6] OP b.i[6]; \
126  r.i[7] = a.i[7] OP b.i[7]; \
127  return r;
128 
129 #define vm_BASIC_FFF_AVX(OP) \
130  v8sf r; \
131  r.f[0] = a.f[0] OP b.f[0]; \
132  r.f[1] = a.f[1] OP b.f[1]; \
133  r.f[2] = a.f[2] OP b.f[2]; \
134  r.f[3] = a.f[3] OP b.f[3]; \
135  r.f[4] = a.f[4] OP b.f[4]; \
136  r.f[5] = a.f[5] OP b.f[5]; \
137  r.f[6] = a.f[6] OP b.f[6]; \
138  r.f[7] = a.f[7] OP b.f[7]; \
139  return r;
140 
141 #define vm_BASIC_FFFF_AVX(OP1, OP2) \
142  v8sf r; \
143  r.f[0] = a.f[0] OP1 b.f[0] OP2 c.f[0]; \
144  r.f[1] = a.f[1] OP1 b.f[1] OP2 c.f[1]; \
145  r.f[2] = a.f[2] OP1 b.f[2] OP2 c.f[2]; \
146  r.f[3] = a.f[3] OP1 b.f[3] OP2 c.f[3]; \
147  r.f[4] = a.f[4] OP1 b.f[4] OP2 c.f[4]; \
148  r.f[5] = a.f[5] OP1 b.f[5] OP2 c.f[5]; \
149  r.f[6] = a.f[6] OP1 b.f[6] OP2 c.f[6]; \
150  r.f[7] = a.f[7] OP1 b.f[7] OP2 c.f[7]; \
151  return r;
152 
153 #define vm_BASIC_UFuncF_AVX(FUNC) \
154  v8sf r; \
155  r.f[0] = FUNC(a.f[0]); \
156  r.f[1] = FUNC(a.f[1]); \
157  r.f[2] = FUNC(a.f[2]); \
158  r.f[3] = FUNC(a.f[3]); \
159  r.f[4] = FUNC(a.f[4]); \
160  r.f[5] = FUNC(a.f[5]); \
161  r.f[6] = FUNC(a.f[6]); \
162  r.f[7] = FUNC(a.f[7]); \
163  return r;
164 
165 #define vm_BASIC_UFuncFF_AVX(FUNC) \
166  v8sf r; \
167  r.f[0] = FUNC(a.f[0], b.f[0]); \
168  r.f[1] = FUNC(a.f[1], b.f[1]); \
169  r.f[2] = FUNC(a.f[2], b.f[2]); \
170  r.f[3] = FUNC(a.f[3], b.f[3]); \
171  r.f[4] = FUNC(a.f[4], b.f[4]); \
172  r.f[5] = FUNC(a.f[5], b.f[5]); \
173  r.f[6] = FUNC(a.f[6], b.f[6]); \
174  r.f[7] = FUNC(a.f[7], b.f[7]); \
175  return r;
176 
177 static inline v8si vm_clt_avx(const v8sf &a, const v8sf &b){ vm_BASIC_CF_AVX(<) }
178 static inline v8si vm_cle_avx(const v8sf &a, const v8sf &b){ vm_BASIC_CF_AVX(<=) }
179 static inline v8si vm_cgt_avx(const v8sf &a, const v8sf &b){ vm_BASIC_CF_AVX(>) }
180 static inline v8si vm_cge_avx(const v8sf &a, const v8sf &b){ vm_BASIC_CF_AVX(>=) }
181 static inline v8si vm_ceq_avx(const v8sf &a, const v8sf &b){ vm_BASIC_CF_AVX(==) }
182 static inline v8si vm_cne_avx(const v8sf &a, const v8sf &b){ vm_BASIC_CF_AVX(!=) }
183 
184 static inline v8si vm_clt_avx(const v8si &a, const v8si &b){ vm_BASIC_CI_AVX(<) }
185 static inline v8si vm_cle_avx(const v8si &a, const v8si &b){ vm_BASIC_CI_AVX(<=) }
186 static inline v8si vm_cgt_avx(const v8si &a, const v8si &b){ vm_BASIC_CI_AVX(>) }
187 static inline v8si vm_cge_avx(const v8si &a, const v8si &b){ vm_BASIC_CI_AVX(>=) }
188 static inline v8si vm_ceq_avx(const v8si &a, const v8si &b){ vm_BASIC_CI_AVX(==) }
189 static inline v8si vm_cne_avx(const v8si &a, const v8si &b){ vm_BASIC_CI_AVX(!=) }
190 
191 static inline v8si vm_add_avx(const v8si &a, const v8si &b){ vm_BASIC_III_AVX(+) }
192 static inline v8si vm_sub_avx(const v8si &a, const v8si &b){ vm_BASIC_III_AVX(-) }
193 
194 static inline v8sf vm_add_avx(const v8sf &a, const v8sf &b){ vm_BASIC_FFF_AVX(+) }
195 static inline v8sf vm_sub_avx(const v8sf &a, const v8sf &b){ vm_BASIC_FFF_AVX(-) }
196 static inline v8sf vm_mul_avx(const v8sf &a, const v8sf &b){ vm_BASIC_FFF_AVX(*) }
197 static inline v8sf vm_div_avx(const v8sf &a, const v8sf &b){ vm_BASIC_FFF_AVX(/) }
198 
199 static inline v8si vm_and_avx(const v8si &a, const v8si &b){ vm_BASIC_III_AVX(&) }
200 static inline v8si vm_or_avx(const v8si &a, const v8si &b){ vm_BASIC_III_AVX(|) }
201 static inline v8si vm_xor_avx(const v8si &a, const v8si &b){ vm_BASIC_III_AVX(^) }
202 
203 static inline v8si vm_andnot_avx(const v8si &a, const v8si &b)
204 {
205  v8si r;
206  r.i[0] = ~a.i[0] & b.i[0];
207  r.i[1] = ~a.i[1] & b.i[1];
208  r.i[2] = ~a.i[2] & b.i[2];
209  r.i[3] = ~a.i[3] & b.i[3];
210  r.i[4] = ~a.i[4] & b.i[4];
211  r.i[5] = ~a.i[5] & b.i[5];
212  r.i[6] = ~a.i[6] & b.i[6];
213  r.i[7] = ~a.i[7] & b.i[7];
214  return r;
215 }
216 
217 static inline v8sf
218 vm_min_avx(const v8sf &a, const v8sf &b) { vm_BASIC_UFuncFF_AVX(SYSmin) }
219 static inline v8sf
220 vm_max_avx(const v8sf &a, const v8sf &b) { vm_BASIC_UFuncFF_AVX(SYSmax) }
221 
222 static inline v8sf
223 vm_madd_avx(const v8sf &a, const v8sf &b, const v8sf &c) { vm_BASIC_FFFF_AVX(*, +) }
224 
225 static inline v8sf vm_sqrt_avx(const v8sf &a) { vm_BASIC_UFuncF_AVX(SYSsqrt) }
226 static inline v8sf vm_isqrt_avx(const v8sf &a) { vm_BASIC_UFuncF_AVX(1/SYSsqrt) }
227 static inline v8sf vm_negate_avx(const v8sf &a) { vm_BASIC_UFuncF_AVX(-) }
228 static inline v8sf vm_reciprocal_avx(const v8sf &a) { vm_BASIC_UFuncF_AVX(1/) }
229 static inline v8sf vm_abs_avx(const v8sf &a) { vm_BASIC_UFuncF_AVX(SYSabs) }
230 
231 static inline v8si vm_floor_avx(const v8sf &a)
232 {
233  v8si r;
234  r.i[0] = (int)SYSfastFloor(a.f[0]);
235  r.i[1] = (int)SYSfastFloor(a.f[1]);
236  r.i[2] = (int)SYSfastFloor(a.f[2]);
237  r.i[3] = (int)SYSfastFloor(a.f[3]);
238  r.i[4] = (int)SYSfastFloor(a.f[4]);
239  r.i[5] = (int)SYSfastFloor(a.f[5]);
240  r.i[6] = (int)SYSfastFloor(a.f[6]);
241  r.i[7] = (int)SYSfastFloor(a.f[7]);
242  return r;
243 }
244 
245 static inline v8si vm_intcast_avx(const v8sf &a)
246 {
247  v8si r;
248  r.i[0] = (int)a.f[0];
249  r.i[1] = (int)a.f[1];
250  r.i[2] = (int)a.f[2];
251  r.i[3] = (int)a.f[3];
252  r.i[4] = (int)a.f[4];
253  r.i[5] = (int)a.f[5];
254  r.i[6] = (int)a.f[6];
255  r.i[7] = (int)a.f[7];
256  return r;
257 }
258 
259 static inline v8sf vm_floatcast_avx(const v8si &a)
260 {
261  v8sf r;
262  r.f[0] = (float)a.i[0];
263  r.f[1] = (float)a.i[1];
264  r.f[2] = (float)a.i[2];
265  r.f[3] = (float)a.i[3];
266  r.f[4] = (float)a.i[4];
267  r.f[5] = (float)a.i[5];
268  r.f[6] = (float)a.i[6];
269  r.f[7] = (float)a.i[7];
270  return r;
271 }
272 
273 static inline v8si
274 vm_splats_avx(uint32 a)
275 {
276  return v8si(a);
277 }
278 
279 static inline v8si
280 vm_splats_avx(int32 a)
281 {
282  return v8si(a);
283 }
284 
285 static inline v8sf
286 vm_splats_avx(float a)
287 {
288  return v8sf(a);
289 }
290 
291 static inline v8si
292 vm_splats_avx(uint32 a, uint32 b, uint32 c, uint32 d,
293  uint32 e, uint32 f, uint32 g, uint32 h)
294 {
295  return v8si(a, b, c, d, e, f, g, h);
296 }
297 
298 static inline v8si
299 vm_splats_avx(int32 a, int32 b, int32 c, int32 d,
300  int32 e, int32 f, int32 g, int32 h)
301 {
302  return v8si(a, b, c, d, e, f, g, h);
303 }
304 
305 static inline v8sf
306 vm_splats_avx(float a, float b, float c, float d,
307  float e, float f, float g, float h)
308 {
309  return v8sf(a, b, c, d, e, f, g, h);
310 }
311 
312 static inline bool
313 vm_allbits_avx(const v8si &a)
314 {
315  return (a.i[0] & a.i[1] & a.i[2] & a.i[3] &
316  a.i[4] & a.i[5] & a.i[6] & a.i[7]) == 0xFFFFFFFF;
317 }
318 
319 template <int A, int B, int C, int D>
320 static inline v8sf
321 vm_shuffle_avx(const v8sf &v)
322 {
323  v8sf vec;
324 
325  vec.f[0] = v.f[A];
326  vec.f[1] = v.f[B];
327  vec.f[2] = v.f[C];
328  vec.f[3] = v.f[D];
329  vec.f[4] = v.f[A];
330  vec.f[5] = v.f[B];
331  vec.f[6] = v.f[C];
332  vec.f[7] = v.f[D];
333 
334  return vec;
335 }
336 
337 static inline v8si
338 vm_load_avx(const int32 v[8])
339 {
340  return v8si(v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);
341 }
342 
343 static inline v8sf
344 vm_load_avx(const float v[8])
345 {
346  return v8sf(v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);
347 }
348 
349 static inline void
350 vm_store_avx(float dst[8], v8sf value)
351 {
352  dst[0] = value.f[0];
353  dst[1] = value.f[1];
354  dst[2] = value.f[2];
355  dst[3] = value.f[3];
356  dst[4] = value.f[4];
357  dst[5] = value.f[5];
358  dst[6] = value.f[6];
359  dst[7] = value.f[7];
360 }
361 
362 
363 static inline v8si
364 vm_insert_avx(const v8si v, int32 a, int n)
365 {
366  v8si vec;
367 
368  vec = v;
369  vec.i[n] = a;
370  return vec;
371 }
372 
373 static inline v8sf
374 vm_insert_avx(const v8sf v, float a, int n)
375 {
376  v8sf vec;
377 
378  vec = v;
379  vec.f[n] = a;
380  return vec;
381 }
382 
383 static inline int
384 vm_extract_avx(const v8si v, int n)
385 {
386  return v.i[n];
387 }
388 
389 static inline float
390 vm_extract_avx(const v8sf v, int n)
391 {
392  return v.f[n];
393 }
394 
395 static inline v8si
396 vm_shiftleft_avx(const v8si& a, int c)
397 {
398  v8si r = a;
399  ((uint32*)r.i)[0] <<= c;
400  ((uint32*)r.i)[1] <<= c;
401  ((uint32*)r.i)[2] <<= c;
402  ((uint32*)r.i)[3] <<= c;
403  ((uint32*)r.i)[4] <<= c;
404  ((uint32*)r.i)[5] <<= c;
405  ((uint32*)r.i)[6] <<= c;
406  ((uint32*)r.i)[7] <<= c;
407  return r;
408 }
409 
410 static inline v8si
411 vm_shiftright_avx(const v8si& a, int c)
412 {
413  v8si r = a;
414  ((uint32*)r.i)[0] >>= c;
415  ((uint32*)r.i)[1] >>= c;
416  ((uint32*)r.i)[2] >>= c;
417  ((uint32*)r.i)[3] >>= c;
418  ((uint32*)r.i)[4] >>= c;
419  ((uint32*)r.i)[5] >>= c;
420  ((uint32*)r.i)[6] >>= c;
421  ((uint32*)r.i)[7] >>= c;
422  return r;
423 }
424 
425 #define VM_EXTRACT_AVX vm_extract_avx
426 #define VM_INSERT_AVX vm_insert_avx
427 #define VM_SPLATS_AVX vm_splats_avx
428 #define VM_LOAD_AVX vm_load_avx
429 #define VM_STORE_AVX vm_store_avx
430 
431 #define VM_CMPLT_AVX vm_clt_avx
432 #define VM_CMPLE_AVX vm_cle_avx
433 #define VM_CMPGT_AVX vm_cgt_avx
434 #define VM_CMPGE_AVX vm_cge_avx
435 #define VM_CMPEQ_AVX vm_ceq_avx
436 #define VM_CMPNE_AVX vm_cne_avx
437 
438 #define VM_ICMPLT_AVX vm_clt_avx
439 #define VM_ICMPGT_AVX vm_cgt_avx
440 #define VM_ICMPEQ_AVX vm_ceq_avx
441 
442 #define VM_IADD_AVX vm_add_avx
443 #define VM_ISUB_AVX vm_sub_avx
444 
445 #define VM_ADD_AVX vm_add_avx
446 #define VM_SUB_AVX vm_sub_avx
447 #define VM_MUL_AVX vm_mul_avx
448 #define VM_DIV_AVX vm_div_avx
449 #define VM_FDIV_AVX vm_div_avx
450 #define VM_NEG_AVX vm_negate_avx
451 #define VM_SQRT_AVX vm_sqrt_avx
452 #define VM_FSQRT_AVX vm_sqrt_avx
453 #define VM_ISQRT_AVX vm_isqrt_avx
454 #define VM_ABS_AVX vm_abs_avx
455 
456 #define VM_MADD_AVX vm_madd_avx
457 #define VM_INVERT_AVX vm_reciprocal_avx
458 
459 #define VM_MIN_AVX vm_min_avx
460 #define VM_MAX_AVX vm_max_avx
461 
462 #define VM_AND_AVX vm_and_avx
463 #define VM_ANDNOT_AVX vm_andnot_avx
464 #define VM_OR_AVX vm_or_avx
465 #define VM_XOR_AVX vm_xor_avx
466 
467 #define VM_ALLBITS_AVX vm_allbits_avx
468 
469 #define VM_SHUFFLE_AVX vm_shuffle_avx
470 
471 #define VM_P_FLOOR_AVX()
472 #define VM_FLOOR_AVX vm_floor_avx
473 #define VM_E_FLOOR_AVX()
474 
475 #define VM_INT_AVX vm_intcast_avx
476 
477 #define VM_IFLOAT_AVX vm_floatcast_avx
478 
479 // bitshifing A=v8si C=int
480 #define VM_SHIFTLEFT_AVX(A,C) vm_shiftleft_avx(A,C)
481 #define VM_SHIFTRIGHT_AVX(A,C) vm_shiftright_avx(A,C)
482 
483 static SYS_FORCE_INLINE void
484 vm_sincos_avx(v8sf x, v8sf *s, v8sf *c)
485 {
486  for (int i = 0; i < 8; i++)
487  SYSsincos(x.f[i], s->f + i, c->f + i);
488 }
489 
490 static SYS_FORCE_INLINE v8sf
491 vm_sin_avx(v8sf x)
492 {
493  v8sf s,c;
494  vm_sincos_avx(x,&s,&c);
495  return s;
496 }
497 
498 static SYS_FORCE_INLINE v8sf
499 vm_cos_avx(v8sf x)
500 {
501  v8sf s,c;
502  vm_sincos_avx(x,&s,&c);
503  return c;
504 }
505 
506 static SYS_FORCE_INLINE v8sf
507 vm_tan_avx(v8sf x)
508 {
509  v8sf s,c;
510  vm_sincos_avx(x,&s,&c);
511  return vm_div_avx(s, c);
512 }
513 
514 #define VM_SINCOS_AVX vm_sincos_avx
515 #define VM_SIN_AVX vm_sin_avx
516 #define VM_COS_AVX vm_cos_avx
517 #define VM_TAN_AVX vm_tan_avx
518 
519 #endif
#define SYSmax(a, b)
Definition: SYS_Math.h:1570
typedef int(APIENTRYP RE_PFNGLXSWAPINTERVALSGIPROC)(int)
#define vm_BASIC_FFFF_AVX(OP1, OP2)
int int32
Definition: SYS_Types.h:39
v8sf(float a0, float a1, float a2, float a3, float a4, float a5, float a6, float a7)
const GLdouble * v
Definition: glcorearb.h:837
GLboolean GLboolean g
Definition: glcorearb.h:1222
GLboolean GLboolean GLboolean GLboolean a
Definition: glcorearb.h:1222
GLdouble s
Definition: glad.h:3009
#define SYSabs(a)
Definition: SYS_Math.h:1572
float fpreal32
Definition: SYS_Types.h:200
#define vm_BASIC_CF_AVX(OP)
__m256 v8sf
Definition: VM_AVXFunc.h:22
__m256i v8si
Definition: VM_AVXFunc.h:23
#define vm_BASIC_UFuncFF_AVX(FUNC)
GLdouble n
Definition: glcorearb.h:2008
GLfloat f
Definition: glcorearb.h:1926
v8si(int32 a0, int32 a1, int32 a2, int32 a3, int32 a4, int32 a5, int32 a6, int32 a7)
IMATH_NAMESPACE::V2f float
v8si(int32 a)
#define SYS_FORCE_INLINE
Definition: SYS_Inline.h:45
int32 i[8]
GLboolean GLboolean GLboolean b
Definition: glcorearb.h:1222
GLint GLenum GLint x
Definition: glcorearb.h:409
#define vm_BASIC_UFuncF_AVX(FUNC)
GLfloat GLfloat GLfloat GLfloat h
Definition: glcorearb.h:2002
GLenum GLenum dst
Definition: glcorearb.h:1793
#define vm_BASIC_FFF_AVX(OP)
#define vm_BASIC_CI_AVX(OP)
unsigned int uint32
Definition: SYS_Types.h:40
#define vm_BASIC_III_AVX(OP)
Definition: core.h:1131
GLboolean r
Definition: glcorearb.h:1222
v8sf(float a)
#define SYSmin(a, b)
Definition: SYS_Math.h:1571
fpreal32 f[8]