1
///////////////////////////////////////////////////////////////////////////////////
2
/// OpenGL Mathematics (glm.g-truc.net)
4
/// Copyright (c) 2005 - 2012 G-Truc Creation (www.g-truc.net)
5
/// Permission is hereby granted, free of charge, to any person obtaining a copy
6
/// of this software and associated documentation files (the "Software"), to deal
7
/// in the Software without restriction, including without limitation the rights
8
/// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
/// copies of the Software, and to permit persons to whom the Software is
10
/// furnished to do so, subject to the following conditions:
12
/// The above copyright notice and this permission notice shall be included in
13
/// all copies or substantial portions of the Software.
15
/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24
/// @file glm/core/intrinsic_common.inl
25
/// @date 2009-06-05 / 2011-06-15
26
/// @author Christophe Riccio
27
///////////////////////////////////////////////////////////////////////////////////
32
static const __m128 GLM_VAR_USED _m128_rad_ps = _mm_set_ps1(3.141592653589793238462643383279f / 180.f);
33
static const __m128 GLM_VAR_USED _m128_deg_ps = _mm_set_ps1(180.f / 3.141592653589793238462643383279f);
35
template <typename matType>
36
GLM_FUNC_QUALIFIER matType sse_comp_mul_ps
43
out[0] = _mm_mul_ps(in1[0], in2[0]);
44
out[1] = _mm_mul_ps(in1[1], in2[1]);
45
out[2] = _mm_mul_ps(in1[2], in2[2]);
46
out[3] = _mm_mul_ps(in1[3], in2[3]);
49
GLM_FUNC_QUALIFIER void sse_add_ps(__m128 in1[4], __m128 in2[4], __m128 out[4])
52
out[0] = _mm_add_ps(in1[0], in2[0]);
53
out[1] = _mm_add_ps(in1[1], in2[1]);
54
out[2] = _mm_add_ps(in1[2], in2[2]);
55
out[3] = _mm_add_ps(in1[3], in2[3]);
59
GLM_FUNC_QUALIFIER void sse_sub_ps(__m128 in1[4], __m128 in2[4], __m128 out[4])
62
out[0] = _mm_sub_ps(in1[0], in2[0]);
63
out[1] = _mm_sub_ps(in1[1], in2[1]);
64
out[2] = _mm_sub_ps(in1[2], in2[2]);
65
out[3] = _mm_sub_ps(in1[3], in2[3]);
69
GLM_FUNC_QUALIFIER __m128 sse_mul_ps(__m128 m[4], __m128 v)
71
__m128 v0 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(0, 0, 0, 0));
72
__m128 v1 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(1, 1, 1, 1));
73
__m128 v2 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(2, 2, 2, 2));
74
__m128 v3 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(3, 3, 3, 3));
76
__m128 m0 = _mm_mul_ps(m[0], v0);
77
__m128 m1 = _mm_mul_ps(m[1], v1);
78
__m128 m2 = _mm_mul_ps(m[2], v2);
79
__m128 m3 = _mm_mul_ps(m[3], v3);
81
__m128 a0 = _mm_add_ps(m0, m1);
82
__m128 a1 = _mm_add_ps(m2, m3);
83
__m128 a2 = _mm_add_ps(a0, a1);
88
GLM_FUNC_QUALIFIER __m128 sse_mul_ps(__m128 v, __m128 m[4])
95
__m128 m0 = _mm_mul_ps(v, i0);
96
__m128 m1 = _mm_mul_ps(v, i1);
97
__m128 m2 = _mm_mul_ps(v, i2);
98
__m128 m3 = _mm_mul_ps(v, i3);
100
__m128 u0 = _mm_unpacklo_ps(m0, m1);
101
__m128 u1 = _mm_unpackhi_ps(m0, m1);
102
__m128 a0 = _mm_add_ps(u0, u1);
104
__m128 u2 = _mm_unpacklo_ps(m2, m3);
105
__m128 u3 = _mm_unpackhi_ps(m2, m3);
106
__m128 a1 = _mm_add_ps(u2, u3);
108
__m128 f0 = _mm_movelh_ps(a0, a1);
109
__m128 f1 = _mm_movehl_ps(a1, a0);
110
__m128 f2 = _mm_add_ps(f0, f1);
115
GLM_FUNC_QUALIFIER void sse_mul_ps(__m128 const in1[4], __m128 const in2[4], __m128 out[4])
118
__m128 e0 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(0, 0, 0, 0));
119
__m128 e1 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(1, 1, 1, 1));
120
__m128 e2 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(2, 2, 2, 2));
121
__m128 e3 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(3, 3, 3, 3));
123
__m128 m0 = _mm_mul_ps(in1[0], e0);
124
__m128 m1 = _mm_mul_ps(in1[1], e1);
125
__m128 m2 = _mm_mul_ps(in1[2], e2);
126
__m128 m3 = _mm_mul_ps(in1[3], e3);
128
__m128 a0 = _mm_add_ps(m0, m1);
129
__m128 a1 = _mm_add_ps(m2, m3);
130
__m128 a2 = _mm_add_ps(a0, a1);
136
__m128 e0 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(0, 0, 0, 0));
137
__m128 e1 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(1, 1, 1, 1));
138
__m128 e2 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(2, 2, 2, 2));
139
__m128 e3 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(3, 3, 3, 3));
141
__m128 m0 = _mm_mul_ps(in1[0], e0);
142
__m128 m1 = _mm_mul_ps(in1[1], e1);
143
__m128 m2 = _mm_mul_ps(in1[2], e2);
144
__m128 m3 = _mm_mul_ps(in1[3], e3);
146
__m128 a0 = _mm_add_ps(m0, m1);
147
__m128 a1 = _mm_add_ps(m2, m3);
148
__m128 a2 = _mm_add_ps(a0, a1);
154
__m128 e0 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(0, 0, 0, 0));
155
__m128 e1 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(1, 1, 1, 1));
156
__m128 e2 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(2, 2, 2, 2));
157
__m128 e3 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(3, 3, 3, 3));
159
__m128 m0 = _mm_mul_ps(in1[0], e0);
160
__m128 m1 = _mm_mul_ps(in1[1], e1);
161
__m128 m2 = _mm_mul_ps(in1[2], e2);
162
__m128 m3 = _mm_mul_ps(in1[3], e3);
164
__m128 a0 = _mm_add_ps(m0, m1);
165
__m128 a1 = _mm_add_ps(m2, m3);
166
__m128 a2 = _mm_add_ps(a0, a1);
172
//(__m128&)_mm_shuffle_epi32(__m128i&)in2[0], _MM_SHUFFLE(3, 3, 3, 3))
173
__m128 e0 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(0, 0, 0, 0));
174
__m128 e1 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(1, 1, 1, 1));
175
__m128 e2 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(2, 2, 2, 2));
176
__m128 e3 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(3, 3, 3, 3));
178
__m128 m0 = _mm_mul_ps(in1[0], e0);
179
__m128 m1 = _mm_mul_ps(in1[1], e1);
180
__m128 m2 = _mm_mul_ps(in1[2], e2);
181
__m128 m3 = _mm_mul_ps(in1[3], e3);
183
__m128 a0 = _mm_add_ps(m0, m1);
184
__m128 a1 = _mm_add_ps(m2, m3);
185
__m128 a2 = _mm_add_ps(a0, a1);
191
GLM_FUNC_QUALIFIER void sse_transpose_ps(__m128 const in[4], __m128 out[4])
193
__m128 tmp0 = _mm_shuffle_ps(in[0], in[1], 0x44);
194
__m128 tmp2 = _mm_shuffle_ps(in[0], in[1], 0xEE);
195
__m128 tmp1 = _mm_shuffle_ps(in[2], in[3], 0x44);
196
__m128 tmp3 = _mm_shuffle_ps(in[2], in[3], 0xEE);
198
out[0] = _mm_shuffle_ps(tmp0, tmp1, 0x88);
199
out[1] = _mm_shuffle_ps(tmp0, tmp1, 0xDD);
200
out[2] = _mm_shuffle_ps(tmp2, tmp3, 0x88);
201
out[3] = _mm_shuffle_ps(tmp2, tmp3, 0xDD);
204
GLM_FUNC_QUALIFIER __m128 sse_slow_det_ps(__m128 const in[4])
208
// valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
209
// valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
210
// valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3];
211
// valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3];
213
__m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
214
__m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
216
__m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
217
__m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
218
__m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
219
__m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
221
__m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
222
__m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
223
Fac0 = _mm_sub_ps(Mul00, Mul01);
228
// valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
229
// valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
230
// valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3];
231
// valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3];
233
__m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
234
__m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
236
__m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
237
__m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
238
__m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
239
__m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
241
__m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
242
__m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
243
Fac1 = _mm_sub_ps(Mul00, Mul01);
249
// valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
250
// valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
251
// valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2];
252
// valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2];
254
__m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
255
__m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
257
__m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
258
__m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
259
__m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
260
__m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
262
__m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
263
__m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
264
Fac2 = _mm_sub_ps(Mul00, Mul01);
269
// valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
270
// valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
271
// valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3];
272
// valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3];
274
__m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
275
__m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
277
__m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
278
__m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
279
__m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
280
__m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
282
__m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
283
__m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
284
Fac3 = _mm_sub_ps(Mul00, Mul01);
289
// valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
290
// valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
291
// valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2];
292
// valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2];
294
__m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
295
__m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
297
__m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
298
__m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
299
__m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
300
__m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
302
__m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
303
__m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
304
Fac4 = _mm_sub_ps(Mul00, Mul01);
309
// valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
310
// valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
311
// valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1];
312
// valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1];
314
__m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
315
__m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
317
__m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
318
__m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
319
__m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
320
__m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
322
__m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
323
__m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
324
Fac5 = _mm_sub_ps(Mul00, Mul01);
327
__m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f);
328
__m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f);
334
__m128 Temp0 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(0, 0, 0, 0));
335
__m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0));
341
__m128 Temp1 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(1, 1, 1, 1));
342
__m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0));
348
__m128 Temp2 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(2, 2, 2, 2));
349
__m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0));
355
__m128 Temp3 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(3, 3, 3, 3));
356
__m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0));
359
// + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]),
360
// - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]),
361
// + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]),
362
// - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]),
363
__m128 Mul00 = _mm_mul_ps(Vec1, Fac0);
364
__m128 Mul01 = _mm_mul_ps(Vec2, Fac1);
365
__m128 Mul02 = _mm_mul_ps(Vec3, Fac2);
366
__m128 Sub00 = _mm_sub_ps(Mul00, Mul01);
367
__m128 Add00 = _mm_add_ps(Sub00, Mul02);
368
__m128 Inv0 = _mm_mul_ps(SignB, Add00);
371
// - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]),
372
// + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]),
373
// - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]),
374
// + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]),
375
__m128 Mul03 = _mm_mul_ps(Vec0, Fac0);
376
__m128 Mul04 = _mm_mul_ps(Vec2, Fac3);
377
__m128 Mul05 = _mm_mul_ps(Vec3, Fac4);
378
__m128 Sub01 = _mm_sub_ps(Mul03, Mul04);
379
__m128 Add01 = _mm_add_ps(Sub01, Mul05);
380
__m128 Inv1 = _mm_mul_ps(SignA, Add01);
383
// + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]),
384
// - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]),
385
// + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]),
386
// - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]),
387
__m128 Mul06 = _mm_mul_ps(Vec0, Fac1);
388
__m128 Mul07 = _mm_mul_ps(Vec1, Fac3);
389
__m128 Mul08 = _mm_mul_ps(Vec3, Fac5);
390
__m128 Sub02 = _mm_sub_ps(Mul06, Mul07);
391
__m128 Add02 = _mm_add_ps(Sub02, Mul08);
392
__m128 Inv2 = _mm_mul_ps(SignB, Add02);
395
// - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]),
396
// + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]),
397
// - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]),
398
// + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3]));
399
__m128 Mul09 = _mm_mul_ps(Vec0, Fac2);
400
__m128 Mul10 = _mm_mul_ps(Vec1, Fac4);
401
__m128 Mul11 = _mm_mul_ps(Vec2, Fac5);
402
__m128 Sub03 = _mm_sub_ps(Mul09, Mul10);
403
__m128 Add03 = _mm_add_ps(Sub03, Mul11);
404
__m128 Inv3 = _mm_mul_ps(SignA, Add03);
406
__m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0));
407
__m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0));
408
__m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0));
410
// valType Determinant = m[0][0] * Inverse[0][0]
411
// + m[0][1] * Inverse[1][0]
412
// + m[0][2] * Inverse[2][0]
413
// + m[0][3] * Inverse[3][0];
414
__m128 Det0 = sse_dot_ps(in[0], Row2);
418
GLM_FUNC_QUALIFIER __m128 sse_detd_ps
423
// _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(
425
//T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
426
//T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
427
//T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
428
//T SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
429
//T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
430
//T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
433
__m128 Swp2A = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[2]), _MM_SHUFFLE(0, 1, 1, 2)));
434
__m128 Swp3A = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[3]), _MM_SHUFFLE(3, 2, 3, 3)));
435
__m128 MulA = _mm_mul_ps(Swp2A, Swp3A);
438
__m128 Swp2B = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[2]), _MM_SHUFFLE(3, 2, 3, 3)));
439
__m128 Swp3B = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[3]), _MM_SHUFFLE(0, 1, 1, 2)));
440
__m128 MulB = _mm_mul_ps(Swp2B, Swp3B);
442
// Columns subtraction
443
__m128 SubE = _mm_sub_ps(MulA, MulB);
446
__m128 Swp2C = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[2]), _MM_SHUFFLE(0, 0, 1, 2)));
447
__m128 Swp3C = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[3]), _MM_SHUFFLE(1, 2, 0, 0)));
448
__m128 MulC = _mm_mul_ps(Swp2C, Swp3C);
449
__m128 SubF = _mm_sub_ps(_mm_movehl_ps(MulC, MulC), MulC);
451
//detail::tvec4<T> DetCof(
452
// + (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02),
453
// - (m[1][0] * SubFactor00 - m[1][2] * SubFactor03 + m[1][3] * SubFactor04),
454
// + (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05),
455
// - (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05));
457
__m128 SubFacA = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(SubE), _MM_SHUFFLE(2, 1, 0, 0)));
458
__m128 SwpFacA = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[1]), _MM_SHUFFLE(0, 0, 0, 1)));
459
__m128 MulFacA = _mm_mul_ps(SwpFacA, SubFacA);
461
__m128 SubTmpB = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(0, 0, 3, 1));
462
__m128 SubFacB = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(SubTmpB), _MM_SHUFFLE(3, 1, 1, 0)));//SubF[0], SubE[3], SubE[3], SubE[1];
463
__m128 SwpFacB = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[1]), _MM_SHUFFLE(1, 1, 2, 2)));
464
__m128 MulFacB = _mm_mul_ps(SwpFacB, SubFacB);
466
__m128 SubRes = _mm_sub_ps(MulFacA, MulFacB);
468
__m128 SubTmpC = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(1, 0, 2, 2));
469
__m128 SubFacC = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(SubTmpC), _MM_SHUFFLE(3, 3, 2, 0)));
470
__m128 SwpFacC = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[1]), _MM_SHUFFLE(2, 3, 3, 3)));
471
__m128 MulFacC = _mm_mul_ps(SwpFacC, SubFacC);
473
__m128 AddRes = _mm_add_ps(SubRes, MulFacC);
474
__m128 DetCof = _mm_mul_ps(AddRes, _mm_setr_ps( 1.0f,-1.0f, 1.0f,-1.0f));
476
//return m[0][0] * DetCof[0]
477
// + m[0][1] * DetCof[1]
478
// + m[0][2] * DetCof[2]
479
// + m[0][3] * DetCof[3];
481
return sse_dot_ps(m[0], DetCof);
484
GLM_FUNC_QUALIFIER __m128 sse_det_ps
489
// _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(add)
491
//T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
492
//T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
493
//T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
494
//T SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
495
//T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
496
//T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
499
__m128 Swp2A = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(0, 1, 1, 2));
500
__m128 Swp3A = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(3, 2, 3, 3));
501
__m128 MulA = _mm_mul_ps(Swp2A, Swp3A);
504
__m128 Swp2B = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(3, 2, 3, 3));
505
__m128 Swp3B = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(0, 1, 1, 2));
506
__m128 MulB = _mm_mul_ps(Swp2B, Swp3B);
508
// Columns subtraction
509
__m128 SubE = _mm_sub_ps(MulA, MulB);
512
__m128 Swp2C = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(0, 0, 1, 2));
513
__m128 Swp3C = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(1, 2, 0, 0));
514
__m128 MulC = _mm_mul_ps(Swp2C, Swp3C);
515
__m128 SubF = _mm_sub_ps(_mm_movehl_ps(MulC, MulC), MulC);
517
//detail::tvec4<T> DetCof(
518
// + (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02),
519
// - (m[1][0] * SubFactor00 - m[1][2] * SubFactor03 + m[1][3] * SubFactor04),
520
// + (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05),
521
// - (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05));
523
__m128 SubFacA = _mm_shuffle_ps(SubE, SubE, _MM_SHUFFLE(2, 1, 0, 0));
524
__m128 SwpFacA = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(0, 0, 0, 1));
525
__m128 MulFacA = _mm_mul_ps(SwpFacA, SubFacA);
527
__m128 SubTmpB = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(0, 0, 3, 1));
528
__m128 SubFacB = _mm_shuffle_ps(SubTmpB, SubTmpB, _MM_SHUFFLE(3, 1, 1, 0));//SubF[0], SubE[3], SubE[3], SubE[1];
529
__m128 SwpFacB = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(1, 1, 2, 2));
530
__m128 MulFacB = _mm_mul_ps(SwpFacB, SubFacB);
532
__m128 SubRes = _mm_sub_ps(MulFacA, MulFacB);
534
__m128 SubTmpC = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(1, 0, 2, 2));
535
__m128 SubFacC = _mm_shuffle_ps(SubTmpC, SubTmpC, _MM_SHUFFLE(3, 3, 2, 0));
536
__m128 SwpFacC = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(2, 3, 3, 3));
537
__m128 MulFacC = _mm_mul_ps(SwpFacC, SubFacC);
539
__m128 AddRes = _mm_add_ps(SubRes, MulFacC);
540
__m128 DetCof = _mm_mul_ps(AddRes, _mm_setr_ps( 1.0f,-1.0f, 1.0f,-1.0f));
542
//return m[0][0] * DetCof[0]
543
// + m[0][1] * DetCof[1]
544
// + m[0][2] * DetCof[2]
545
// + m[0][3] * DetCof[3];
547
return sse_dot_ps(m[0], DetCof);
550
GLM_FUNC_QUALIFIER void sse_inverse_ps(__m128 const in[4], __m128 out[4])
554
// valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
555
// valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
556
// valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3];
557
// valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3];
559
__m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
560
__m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
562
__m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
563
__m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
564
__m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
565
__m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
567
__m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
568
__m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
569
Fac0 = _mm_sub_ps(Mul00, Mul01);
574
// valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
575
// valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
576
// valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3];
577
// valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3];
579
__m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
580
__m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
582
__m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
583
__m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
584
__m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
585
__m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
587
__m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
588
__m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
589
Fac1 = _mm_sub_ps(Mul00, Mul01);
595
// valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
596
// valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
597
// valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2];
598
// valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2];
600
__m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
601
__m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
603
__m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
604
__m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
605
__m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
606
__m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
608
__m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
609
__m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
610
Fac2 = _mm_sub_ps(Mul00, Mul01);
615
// valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
616
// valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
617
// valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3];
618
// valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3];
620
__m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
621
__m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
623
__m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
624
__m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
625
__m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
626
__m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
628
__m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
629
__m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
630
Fac3 = _mm_sub_ps(Mul00, Mul01);
635
// valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
636
// valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
637
// valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2];
638
// valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2];
640
__m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
641
__m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
643
__m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
644
__m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
645
__m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
646
__m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
648
__m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
649
__m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
650
Fac4 = _mm_sub_ps(Mul00, Mul01);
655
// valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
656
// valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
657
// valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1];
658
// valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1];
660
__m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
661
__m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
663
__m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
664
__m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
665
__m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
666
__m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
668
__m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
669
__m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
670
Fac5 = _mm_sub_ps(Mul00, Mul01);
673
__m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f);
674
__m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f);
680
__m128 Temp0 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(0, 0, 0, 0));
681
__m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0));
687
__m128 Temp1 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(1, 1, 1, 1));
688
__m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0));
694
__m128 Temp2 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(2, 2, 2, 2));
695
__m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0));
701
__m128 Temp3 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(3, 3, 3, 3));
702
__m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0));
705
// + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]),
706
// - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]),
707
// + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]),
708
// - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]),
709
__m128 Mul00 = _mm_mul_ps(Vec1, Fac0);
710
__m128 Mul01 = _mm_mul_ps(Vec2, Fac1);
711
__m128 Mul02 = _mm_mul_ps(Vec3, Fac2);
712
__m128 Sub00 = _mm_sub_ps(Mul00, Mul01);
713
__m128 Add00 = _mm_add_ps(Sub00, Mul02);
714
__m128 Inv0 = _mm_mul_ps(SignB, Add00);
717
// - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]),
718
// + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]),
719
// - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]),
720
// + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]),
721
__m128 Mul03 = _mm_mul_ps(Vec0, Fac0);
722
__m128 Mul04 = _mm_mul_ps(Vec2, Fac3);
723
__m128 Mul05 = _mm_mul_ps(Vec3, Fac4);
724
__m128 Sub01 = _mm_sub_ps(Mul03, Mul04);
725
__m128 Add01 = _mm_add_ps(Sub01, Mul05);
726
__m128 Inv1 = _mm_mul_ps(SignA, Add01);
729
// + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]),
730
// - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]),
731
// + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]),
732
// - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]),
733
__m128 Mul06 = _mm_mul_ps(Vec0, Fac1);
734
__m128 Mul07 = _mm_mul_ps(Vec1, Fac3);
735
__m128 Mul08 = _mm_mul_ps(Vec3, Fac5);
736
__m128 Sub02 = _mm_sub_ps(Mul06, Mul07);
737
__m128 Add02 = _mm_add_ps(Sub02, Mul08);
738
__m128 Inv2 = _mm_mul_ps(SignB, Add02);
741
// - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]),
742
// + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]),
743
// - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]),
744
// + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3]));
745
__m128 Mul09 = _mm_mul_ps(Vec0, Fac2);
746
__m128 Mul10 = _mm_mul_ps(Vec1, Fac4);
747
__m128 Mul11 = _mm_mul_ps(Vec2, Fac5);
748
__m128 Sub03 = _mm_sub_ps(Mul09, Mul10);
749
__m128 Add03 = _mm_add_ps(Sub03, Mul11);
750
__m128 Inv3 = _mm_mul_ps(SignA, Add03);
752
__m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0));
753
__m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0));
754
__m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0));
756
// valType Determinant = m[0][0] * Inverse[0][0]
757
// + m[0][1] * Inverse[1][0]
758
// + m[0][2] * Inverse[2][0]
759
// + m[0][3] * Inverse[3][0];
760
__m128 Det0 = sse_dot_ps(in[0], Row2);
761
__m128 Rcp0 = _mm_div_ps(one, Det0);
762
//__m128 Rcp0 = _mm_rcp_ps(Det0);
764
// Inverse /= Determinant;
765
out[0] = _mm_mul_ps(Inv0, Rcp0);
766
out[1] = _mm_mul_ps(Inv1, Rcp0);
767
out[2] = _mm_mul_ps(Inv2, Rcp0);
768
out[3] = _mm_mul_ps(Inv3, Rcp0);
771
GLM_FUNC_QUALIFIER void sse_inverse_fast_ps(__m128 const in[4], __m128 out[4])
775
// valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
776
// valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
777
// valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3];
778
// valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3];
780
__m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
781
__m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
783
__m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
784
__m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
785
__m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
786
__m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
788
__m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
789
__m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
790
Fac0 = _mm_sub_ps(Mul00, Mul01);
795
// valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
796
// valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
797
// valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3];
798
// valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3];
800
__m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
801
__m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
803
__m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
804
__m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
805
__m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
806
__m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
808
__m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
809
__m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
810
Fac1 = _mm_sub_ps(Mul00, Mul01);
816
// valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
817
// valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
818
// valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2];
819
// valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2];
821
__m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
822
__m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
824
__m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
825
__m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
826
__m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
827
__m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
829
__m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
830
__m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
831
Fac2 = _mm_sub_ps(Mul00, Mul01);
836
// valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
837
// valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
838
// valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3];
839
// valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3];
841
__m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
842
__m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
844
__m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
845
__m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
846
__m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
847
__m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
849
__m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
850
__m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
851
Fac3 = _mm_sub_ps(Mul00, Mul01);
856
// valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
857
// valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
858
// valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2];
859
// valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2];
861
__m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
862
__m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
864
__m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
865
__m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
866
__m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
867
__m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
869
__m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
870
__m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
871
Fac4 = _mm_sub_ps(Mul00, Mul01);
876
// valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
877
// valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
878
// valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1];
879
// valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1];
881
__m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
882
__m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
884
__m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
885
__m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
886
__m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
887
__m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
889
__m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
890
__m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
891
Fac5 = _mm_sub_ps(Mul00, Mul01);
894
__m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f);
895
__m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f);
901
__m128 Temp0 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(0, 0, 0, 0));
902
__m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0));
908
__m128 Temp1 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(1, 1, 1, 1));
909
__m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0));
915
__m128 Temp2 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(2, 2, 2, 2));
916
__m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0));
922
__m128 Temp3 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(3, 3, 3, 3));
923
__m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0));
926
// + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]),
927
// - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]),
928
// + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]),
929
// - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]),
930
__m128 Mul00 = _mm_mul_ps(Vec1, Fac0);
931
__m128 Mul01 = _mm_mul_ps(Vec2, Fac1);
932
__m128 Mul02 = _mm_mul_ps(Vec3, Fac2);
933
__m128 Sub00 = _mm_sub_ps(Mul00, Mul01);
934
__m128 Add00 = _mm_add_ps(Sub00, Mul02);
935
__m128 Inv0 = _mm_mul_ps(SignB, Add00);
938
// - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]),
939
// + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]),
940
// - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]),
941
// + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]),
942
__m128 Mul03 = _mm_mul_ps(Vec0, Fac0);
943
__m128 Mul04 = _mm_mul_ps(Vec2, Fac3);
944
__m128 Mul05 = _mm_mul_ps(Vec3, Fac4);
945
__m128 Sub01 = _mm_sub_ps(Mul03, Mul04);
946
__m128 Add01 = _mm_add_ps(Sub01, Mul05);
947
__m128 Inv1 = _mm_mul_ps(SignA, Add01);
950
// + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]),
951
// - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]),
952
// + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]),
953
// - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]),
954
__m128 Mul06 = _mm_mul_ps(Vec0, Fac1);
955
__m128 Mul07 = _mm_mul_ps(Vec1, Fac3);
956
__m128 Mul08 = _mm_mul_ps(Vec3, Fac5);
957
__m128 Sub02 = _mm_sub_ps(Mul06, Mul07);
958
__m128 Add02 = _mm_add_ps(Sub02, Mul08);
959
__m128 Inv2 = _mm_mul_ps(SignB, Add02);
962
// - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]),
963
// + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]),
964
// - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]),
965
// + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3]));
966
__m128 Mul09 = _mm_mul_ps(Vec0, Fac2);
967
__m128 Mul10 = _mm_mul_ps(Vec1, Fac4);
968
__m128 Mul11 = _mm_mul_ps(Vec2, Fac5);
969
__m128 Sub03 = _mm_sub_ps(Mul09, Mul10);
970
__m128 Add03 = _mm_add_ps(Sub03, Mul11);
971
__m128 Inv3 = _mm_mul_ps(SignA, Add03);
973
__m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0));
974
__m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0));
975
__m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0));
977
// valType Determinant = m[0][0] * Inverse[0][0]
978
// + m[0][1] * Inverse[1][0]
979
// + m[0][2] * Inverse[2][0]
980
// + m[0][3] * Inverse[3][0];
981
__m128 Det0 = sse_dot_ps(in[0], Row2);
982
__m128 Rcp0 = _mm_rcp_ps(Det0);
983
//__m128 Rcp0 = _mm_div_ps(one, Det0);
984
// Inverse /= Determinant;
985
out[0] = _mm_mul_ps(Inv0, Rcp0);
986
out[1] = _mm_mul_ps(Inv1, Rcp0);
987
out[2] = _mm_mul_ps(Inv2, Rcp0);
988
out[3] = _mm_mul_ps(Inv3, Rcp0);
991
GLM_FUNC_QUALIFIER void sse_rotate_ps(__m128 const in[4], float Angle, float const v[3], __m128 out[4])
993
float a = glm::radians(Angle);
997
glm::vec4 AxisA(v[0], v[1], v[2], float(0));
998
__m128 AxisB = _mm_set_ps(AxisA.w, AxisA.z, AxisA.y, AxisA.x);
999
__m128 AxisC = detail::sse_nrm_ps(AxisB);
1001
__m128 Cos0 = _mm_set_ss(c);
1002
__m128 CosA = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(0, 0, 0, 0));
1003
__m128 Sin0 = _mm_set_ss(s);
1004
__m128 SinA = _mm_shuffle_ps(Sin0, Sin0, _MM_SHUFFLE(0, 0, 0, 0));
1006
// detail::tvec3<valType> temp = (valType(1) - c) * axis;
1007
__m128 Temp0 = _mm_sub_ps(one, CosA);
1008
__m128 Temp1 = _mm_mul_ps(Temp0, AxisC);
1010
//Rotate[0][0] = c + temp[0] * axis[0];
1011
//Rotate[0][1] = 0 + temp[0] * axis[1] + s * axis[2];
1012
//Rotate[0][2] = 0 + temp[0] * axis[2] - s * axis[1];
1013
__m128 Axis0 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(0, 0, 0, 0));
1014
__m128 TmpA0 = _mm_mul_ps(Axis0, AxisC);
1015
__m128 CosA0 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 1, 1, 0));
1016
__m128 TmpA1 = _mm_add_ps(CosA0, TmpA0);
1017
__m128 SinA0 = SinA;//_mm_set_ps(0.0f, s, -s, 0.0f);
1018
__m128 TmpA2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 1, 2, 3));
1019
__m128 TmpA3 = _mm_mul_ps(SinA0, TmpA2);
1020
__m128 TmpA4 = _mm_add_ps(TmpA1, TmpA3);
1022
//Rotate[1][0] = 0 + temp[1] * axis[0] - s * axis[2];
1023
//Rotate[1][1] = c + temp[1] * axis[1];
1024
//Rotate[1][2] = 0 + temp[1] * axis[2] + s * axis[0];
1025
__m128 Axis1 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(1, 1, 1, 1));
1026
__m128 TmpB0 = _mm_mul_ps(Axis1, AxisC);
1027
__m128 CosA1 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 1, 0, 1));
1028
__m128 TmpB1 = _mm_add_ps(CosA1, TmpB0);
1029
__m128 SinB0 = SinA;//_mm_set_ps(-s, 0.0f, s, 0.0f);
1030
__m128 TmpB2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 0, 3, 2));
1031
__m128 TmpB3 = _mm_mul_ps(SinA0, TmpB2);
1032
__m128 TmpB4 = _mm_add_ps(TmpB1, TmpB3);
1034
//Rotate[2][0] = 0 + temp[2] * axis[0] + s * axis[1];
1035
//Rotate[2][1] = 0 + temp[2] * axis[1] - s * axis[0];
1036
//Rotate[2][2] = c + temp[2] * axis[2];
1037
__m128 Axis2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(2, 2, 2, 2));
1038
__m128 TmpC0 = _mm_mul_ps(Axis2, AxisC);
1039
__m128 CosA2 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 0, 1, 1));
1040
__m128 TmpC1 = _mm_add_ps(CosA2, TmpC0);
1041
__m128 SinC0 = SinA;//_mm_set_ps(s, -s, 0.0f, 0.0f);
1042
__m128 TmpC2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 3, 0, 1));
1043
__m128 TmpC3 = _mm_mul_ps(SinA0, TmpC2);
1044
__m128 TmpC4 = _mm_add_ps(TmpC1, TmpC3);
1050
Result[3] = _mm_set_ps(1, 0, 0, 0);
1052
//detail::tmat4x4<valType> Result(detail::tmat4x4<valType>::null);
1053
//Result[0] = m[0] * Rotate[0][0] + m[1] * Rotate[0][1] + m[2] * Rotate[0][2];
1054
//Result[1] = m[0] * Rotate[1][0] + m[1] * Rotate[1][1] + m[2] * Rotate[1][2];
1055
//Result[2] = m[0] * Rotate[2][0] + m[1] * Rotate[2][1] + m[2] * Rotate[2][2];
1058
sse_mul_ps(in, Result, out);
1061
GLM_FUNC_QUALIFIER void sse_outer_ps(__m128 const & c, __m128 const & r, __m128 out[4])
1063
out[0] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(0, 0, 0, 0)));
1064
out[1] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(1, 1, 1, 1)));
1065
out[2] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(2, 2, 2, 2)));
1066
out[3] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(3, 3, 3, 3)));