1
// This file is part of Eigen, a lightweight C++ template library
4
// Copyright (C) 2001 Intel Corporation
5
// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
6
// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
8
// Eigen is free software; you can redistribute it and/or
9
// modify it under the terms of the GNU Lesser General Public
10
// License as published by the Free Software Foundation; either
11
// version 3 of the License, or (at your option) any later version.
13
// Alternatively, you can redistribute it and/or
14
// modify it under the terms of the GNU General Public License as
15
// published by the Free Software Foundation; either version 2 of
16
// the License, or (at your option) any later version.
18
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
19
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
20
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
21
// GNU General Public License for more details.
23
// You should have received a copy of the GNU Lesser General Public
24
// License and a copy of the GNU General Public License along with
25
// Eigen. If not, see <http://www.gnu.org/licenses/>.
27
// The SSE code for the 4x4 float and double matrix inverse in this file
28
// comes from the following Intel's library:
29
// http://software.intel.com/en-us/articles/optimized-matrix-library-for-use-with-the-intel-pentiumr-4-processors-sse2-instructions/
31
// Here is the respective copyright and license statement:
33
// Copyright (c) 2001 Intel Corporation.
35
// Permition is granted to use, copy, distribute and prepare derivative works
36
// of this library for any purpose and without fee, provided, that the above
37
// copyright notice and this statement appear in all copies.
38
// Intel makes no representations about the suitability of this software for
39
// any purpose, and specifically disclaims all warranties.
40
// See LEGAL.TXT for all the legal information.
42
#ifndef EIGEN_INVERSE_SSE_H
43
#define EIGEN_INVERSE_SSE_H
47
template<typename MatrixType, typename ResultType>
48
struct compute_inverse_size4<Architecture::SSE, float, MatrixType, ResultType>
51
MatrixAlignment = bool(MatrixType::Flags&AlignedBit),
52
ResultAlignment = bool(ResultType::Flags&AlignedBit),
53
StorageOrdersMatch = (MatrixType::Flags&RowMajorBit) == (ResultType::Flags&RowMajorBit)
56
static void run(const MatrixType& matrix, ResultType& result)
58
EIGEN_ALIGN16 const int _Sign_PNNP[4] = { 0x00000000, 0x80000000, 0x80000000, 0x00000000 };
60
// Load the full matrix into registers
61
__m128 _L1 = matrix.template packet<MatrixAlignment>( 0);
62
__m128 _L2 = matrix.template packet<MatrixAlignment>( 4);
63
__m128 _L3 = matrix.template packet<MatrixAlignment>( 8);
64
__m128 _L4 = matrix.template packet<MatrixAlignment>(12);
66
// The inverse is calculated using "Divide and Conquer" technique. The
67
// original matrix is divide into four 2x2 sub-matrices. Since each
68
// register holds four matrix element, the smaller matrices are
69
// represented as a registers. Hence we get a better locality of the
72
__m128 A, B, C, D; // the four sub-matrices
73
if(!StorageOrdersMatch)
75
A = _mm_unpacklo_ps(_L1, _L2);
76
B = _mm_unpacklo_ps(_L3, _L4);
77
C = _mm_unpackhi_ps(_L1, _L2);
78
D = _mm_unpackhi_ps(_L3, _L4);
82
A = _mm_movelh_ps(_L1, _L2);
83
B = _mm_movehl_ps(_L2, _L1);
84
C = _mm_movelh_ps(_L3, _L4);
85
D = _mm_movehl_ps(_L4, _L3);
88
__m128 iA, iB, iC, iD, // partial inverse of the sub-matrices
90
__m128 dA, dB, dC, dD; // determinant of the sub-matrices
91
__m128 det, d, d1, d2;
92
__m128 rd; // reciprocal of the determinant
95
AB = _mm_mul_ps(_mm_shuffle_ps(A,A,0x0F), B);
96
AB = _mm_sub_ps(AB,_mm_mul_ps(_mm_shuffle_ps(A,A,0xA5), _mm_shuffle_ps(B,B,0x4E)));
98
DC = _mm_mul_ps(_mm_shuffle_ps(D,D,0x0F), C);
99
DC = _mm_sub_ps(DC,_mm_mul_ps(_mm_shuffle_ps(D,D,0xA5), _mm_shuffle_ps(C,C,0x4E)));
102
dA = _mm_mul_ps(_mm_shuffle_ps(A, A, 0x5F),A);
103
dA = _mm_sub_ss(dA, _mm_movehl_ps(dA,dA));
105
dB = _mm_mul_ps(_mm_shuffle_ps(B, B, 0x5F),B);
106
dB = _mm_sub_ss(dB, _mm_movehl_ps(dB,dB));
109
dC = _mm_mul_ps(_mm_shuffle_ps(C, C, 0x5F),C);
110
dC = _mm_sub_ss(dC, _mm_movehl_ps(dC,dC));
112
dD = _mm_mul_ps(_mm_shuffle_ps(D, D, 0x5F),D);
113
dD = _mm_sub_ss(dD, _mm_movehl_ps(dD,dD));
115
// d = trace(AB*DC) = trace(A#*B*D#*C)
116
d = _mm_mul_ps(_mm_shuffle_ps(DC,DC,0xD8),AB);
119
iD = _mm_mul_ps(_mm_shuffle_ps(C,C,0xA0), _mm_movelh_ps(AB,AB));
120
iD = _mm_add_ps(iD,_mm_mul_ps(_mm_shuffle_ps(C,C,0xF5), _mm_movehl_ps(AB,AB)));
122
iA = _mm_mul_ps(_mm_shuffle_ps(B,B,0xA0), _mm_movelh_ps(DC,DC));
123
iA = _mm_add_ps(iA,_mm_mul_ps(_mm_shuffle_ps(B,B,0xF5), _mm_movehl_ps(DC,DC)));
125
// d = trace(AB*DC) = trace(A#*B*D#*C) [continue]
126
d = _mm_add_ps(d, _mm_movehl_ps(d, d));
127
d = _mm_add_ss(d, _mm_shuffle_ps(d, d, 1));
128
d1 = _mm_mul_ss(dA,dD);
129
d2 = _mm_mul_ss(dB,dC);
131
// iD = D*|A| - C*A#*B
132
iD = _mm_sub_ps(_mm_mul_ps(D,_mm_shuffle_ps(dA,dA,0)), iD);
134
// iA = A*|D| - B*D#*C;
135
iA = _mm_sub_ps(_mm_mul_ps(A,_mm_shuffle_ps(dD,dD,0)), iA);
137
// det = |A|*|D| + |B|*|C| - trace(A#*B*D#*C)
138
det = _mm_sub_ss(_mm_add_ss(d1,d2),d);
139
rd = _mm_div_ss(_mm_set_ss(1.0f), det);
141
// #ifdef ZERO_SINGULAR
142
// rd = _mm_and_ps(_mm_cmpneq_ss(det,_mm_setzero_ps()), rd);
145
// iB = D * (A#B)# = D*B#*A
146
iB = _mm_mul_ps(D, _mm_shuffle_ps(AB,AB,0x33));
147
iB = _mm_sub_ps(iB, _mm_mul_ps(_mm_shuffle_ps(D,D,0xB1), _mm_shuffle_ps(AB,AB,0x66)));
148
// iC = A * (D#C)# = A*C#*D
149
iC = _mm_mul_ps(A, _mm_shuffle_ps(DC,DC,0x33));
150
iC = _mm_sub_ps(iC, _mm_mul_ps(_mm_shuffle_ps(A,A,0xB1), _mm_shuffle_ps(DC,DC,0x66)));
152
rd = _mm_shuffle_ps(rd,rd,0);
153
rd = _mm_xor_ps(rd, _mm_load_ps((float*)_Sign_PNNP));
155
// iB = C*|B| - D*B#*A
156
iB = _mm_sub_ps(_mm_mul_ps(C,_mm_shuffle_ps(dB,dB,0)), iB);
158
// iC = B*|C| - A*C#*D;
159
iC = _mm_sub_ps(_mm_mul_ps(B,_mm_shuffle_ps(dC,dC,0)), iC);
162
iA = _mm_mul_ps(rd,iA);
163
iB = _mm_mul_ps(rd,iB);
164
iC = _mm_mul_ps(rd,iC);
165
iD = _mm_mul_ps(rd,iD);
167
result.template writePacket<ResultAlignment>( 0, _mm_shuffle_ps(iA,iB,0x77));
168
result.template writePacket<ResultAlignment>( 4, _mm_shuffle_ps(iA,iB,0x22));
169
result.template writePacket<ResultAlignment>( 8, _mm_shuffle_ps(iC,iD,0x77));
170
result.template writePacket<ResultAlignment>(12, _mm_shuffle_ps(iC,iD,0x22));
175
template<typename MatrixType, typename ResultType>
176
struct compute_inverse_size4<Architecture::SSE, double, MatrixType, ResultType>
179
MatrixAlignment = bool(MatrixType::Flags&AlignedBit),
180
ResultAlignment = bool(ResultType::Flags&AlignedBit),
181
StorageOrdersMatch = (MatrixType::Flags&RowMajorBit) == (ResultType::Flags&RowMajorBit)
183
static void run(const MatrixType& matrix, ResultType& result)
185
const EIGEN_ALIGN16 long long int _Sign_NP[2] = { 0x8000000000000000ll, 0x0000000000000000ll };
186
const EIGEN_ALIGN16 long long int _Sign_PN[2] = { 0x0000000000000000ll, 0x8000000000000000ll };
188
// The inverse is calculated using "Divide and Conquer" technique. The
189
// original matrix is divide into four 2x2 sub-matrices. Since each
190
// register of the matrix holds two element, the smaller matrices are
191
// consisted of two registers. Hence we get a better locality of the
194
// the four sub-matrices
195
__m128d A1, A2, B1, B2, C1, C2, D1, D2;
197
if(StorageOrdersMatch)
199
A1 = matrix.template packet<MatrixAlignment>( 0); B1 = matrix.template packet<MatrixAlignment>( 2);
200
A2 = matrix.template packet<MatrixAlignment>( 4); B2 = matrix.template packet<MatrixAlignment>( 6);
201
C1 = matrix.template packet<MatrixAlignment>( 8); D1 = matrix.template packet<MatrixAlignment>(10);
202
C2 = matrix.template packet<MatrixAlignment>(12); D2 = matrix.template packet<MatrixAlignment>(14);
207
A1 = matrix.template packet<MatrixAlignment>( 0); C1 = matrix.template packet<MatrixAlignment>( 2);
208
A2 = matrix.template packet<MatrixAlignment>( 4); C2 = matrix.template packet<MatrixAlignment>( 6);
210
A1 = _mm_unpacklo_pd(A1,A2);
211
A2 = _mm_unpackhi_pd(tmp,A2);
213
C1 = _mm_unpacklo_pd(C1,C2);
214
C2 = _mm_unpackhi_pd(tmp,C2);
216
B1 = matrix.template packet<MatrixAlignment>( 8); D1 = matrix.template packet<MatrixAlignment>(10);
217
B2 = matrix.template packet<MatrixAlignment>(12); D2 = matrix.template packet<MatrixAlignment>(14);
219
B1 = _mm_unpacklo_pd(B1,B2);
220
B2 = _mm_unpackhi_pd(tmp,B2);
222
D1 = _mm_unpacklo_pd(D1,D2);
223
D2 = _mm_unpackhi_pd(tmp,D2);
226
__m128d iA1, iA2, iB1, iB2, iC1, iC2, iD1, iD2, // partial invese of the sub-matrices
228
__m128d dA, dB, dC, dD; // determinant of the sub-matrices
229
__m128d det, d1, d2, rd;
232
dA = _mm_shuffle_pd(A2, A2, 1);
233
dA = _mm_mul_pd(A1, dA);
234
dA = _mm_sub_sd(dA, _mm_shuffle_pd(dA,dA,3));
236
dB = _mm_shuffle_pd(B2, B2, 1);
237
dB = _mm_mul_pd(B1, dB);
238
dB = _mm_sub_sd(dB, _mm_shuffle_pd(dB,dB,3));
241
AB1 = _mm_mul_pd(B1, _mm_shuffle_pd(A2,A2,3));
242
AB2 = _mm_mul_pd(B2, _mm_shuffle_pd(A1,A1,0));
243
AB1 = _mm_sub_pd(AB1, _mm_mul_pd(B2, _mm_shuffle_pd(A1,A1,3)));
244
AB2 = _mm_sub_pd(AB2, _mm_mul_pd(B1, _mm_shuffle_pd(A2,A2,0)));
247
dC = _mm_shuffle_pd(C2, C2, 1);
248
dC = _mm_mul_pd(C1, dC);
249
dC = _mm_sub_sd(dC, _mm_shuffle_pd(dC,dC,3));
251
dD = _mm_shuffle_pd(D2, D2, 1);
252
dD = _mm_mul_pd(D1, dD);
253
dD = _mm_sub_sd(dD, _mm_shuffle_pd(dD,dD,3));
256
DC1 = _mm_mul_pd(C1, _mm_shuffle_pd(D2,D2,3));
257
DC2 = _mm_mul_pd(C2, _mm_shuffle_pd(D1,D1,0));
258
DC1 = _mm_sub_pd(DC1, _mm_mul_pd(C2, _mm_shuffle_pd(D1,D1,3)));
259
DC2 = _mm_sub_pd(DC2, _mm_mul_pd(C1, _mm_shuffle_pd(D2,D2,0)));
261
// rd = trace(AB*DC) = trace(A#*B*D#*C)
262
d1 = _mm_mul_pd(AB1, _mm_shuffle_pd(DC1, DC2, 0));
263
d2 = _mm_mul_pd(AB2, _mm_shuffle_pd(DC1, DC2, 3));
264
rd = _mm_add_pd(d1, d2);
265
rd = _mm_add_sd(rd, _mm_shuffle_pd(rd, rd,3));
268
iD1 = _mm_mul_pd(AB1, _mm_shuffle_pd(C1,C1,0));
269
iD2 = _mm_mul_pd(AB1, _mm_shuffle_pd(C2,C2,0));
270
iD1 = _mm_add_pd(iD1, _mm_mul_pd(AB2, _mm_shuffle_pd(C1,C1,3)));
271
iD2 = _mm_add_pd(iD2, _mm_mul_pd(AB2, _mm_shuffle_pd(C2,C2,3)));
274
iA1 = _mm_mul_pd(DC1, _mm_shuffle_pd(B1,B1,0));
275
iA2 = _mm_mul_pd(DC1, _mm_shuffle_pd(B2,B2,0));
276
iA1 = _mm_add_pd(iA1, _mm_mul_pd(DC2, _mm_shuffle_pd(B1,B1,3)));
277
iA2 = _mm_add_pd(iA2, _mm_mul_pd(DC2, _mm_shuffle_pd(B2,B2,3)));
279
// iD = D*|A| - C*A#*B
280
dA = _mm_shuffle_pd(dA,dA,0);
281
iD1 = _mm_sub_pd(_mm_mul_pd(D1, dA), iD1);
282
iD2 = _mm_sub_pd(_mm_mul_pd(D2, dA), iD2);
284
// iA = A*|D| - B*D#*C;
285
dD = _mm_shuffle_pd(dD,dD,0);
286
iA1 = _mm_sub_pd(_mm_mul_pd(A1, dD), iA1);
287
iA2 = _mm_sub_pd(_mm_mul_pd(A2, dD), iA2);
289
d1 = _mm_mul_sd(dA, dD);
290
d2 = _mm_mul_sd(dB, dC);
292
// iB = D * (A#B)# = D*B#*A
293
iB1 = _mm_mul_pd(D1, _mm_shuffle_pd(AB2,AB1,1));
294
iB2 = _mm_mul_pd(D2, _mm_shuffle_pd(AB2,AB1,1));
295
iB1 = _mm_sub_pd(iB1, _mm_mul_pd(_mm_shuffle_pd(D1,D1,1), _mm_shuffle_pd(AB2,AB1,2)));
296
iB2 = _mm_sub_pd(iB2, _mm_mul_pd(_mm_shuffle_pd(D2,D2,1), _mm_shuffle_pd(AB2,AB1,2)));
298
// det = |A|*|D| + |B|*|C| - trace(A#*B*D#*C)
299
det = _mm_add_sd(d1, d2);
300
det = _mm_sub_sd(det, rd);
302
// iC = A * (D#C)# = A*C#*D
303
iC1 = _mm_mul_pd(A1, _mm_shuffle_pd(DC2,DC1,1));
304
iC2 = _mm_mul_pd(A2, _mm_shuffle_pd(DC2,DC1,1));
305
iC1 = _mm_sub_pd(iC1, _mm_mul_pd(_mm_shuffle_pd(A1,A1,1), _mm_shuffle_pd(DC2,DC1,2)));
306
iC2 = _mm_sub_pd(iC2, _mm_mul_pd(_mm_shuffle_pd(A2,A2,1), _mm_shuffle_pd(DC2,DC1,2)));
308
rd = _mm_div_sd(_mm_set_sd(1.0), det);
309
// #ifdef ZERO_SINGULAR
310
// rd = _mm_and_pd(_mm_cmpneq_sd(det,_mm_setzero_pd()), rd);
312
rd = _mm_shuffle_pd(rd,rd,0);
314
// iB = C*|B| - D*B#*A
315
dB = _mm_shuffle_pd(dB,dB,0);
316
iB1 = _mm_sub_pd(_mm_mul_pd(C1, dB), iB1);
317
iB2 = _mm_sub_pd(_mm_mul_pd(C2, dB), iB2);
319
d1 = _mm_xor_pd(rd, _mm_load_pd((double*)_Sign_PN));
320
d2 = _mm_xor_pd(rd, _mm_load_pd((double*)_Sign_NP));
322
// iC = B*|C| - A*C#*D;
323
dC = _mm_shuffle_pd(dC,dC,0);
324
iC1 = _mm_sub_pd(_mm_mul_pd(B1, dC), iC1);
325
iC2 = _mm_sub_pd(_mm_mul_pd(B2, dC), iC2);
327
result.template writePacket<ResultAlignment>( 0, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 3), d1)); // iA# / det
328
result.template writePacket<ResultAlignment>( 4, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 0), d2));
329
result.template writePacket<ResultAlignment>( 2, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 3), d1)); // iB# / det
330
result.template writePacket<ResultAlignment>( 6, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 0), d2));
331
result.template writePacket<ResultAlignment>( 8, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 3), d1)); // iC# / det
332
result.template writePacket<ResultAlignment>(12, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 0), d2));
333
result.template writePacket<ResultAlignment>(10, _mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 3), d1)); // iD# / det
334
result.template writePacket<ResultAlignment>(14, _mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 0), d2));
340
#endif // EIGEN_INVERSE_SSE_H