1
// This file is part of Eigen, a lightweight C++ template library
4
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
6
// Eigen is free software; you can redistribute it and/or
7
// modify it under the terms of the GNU Lesser General Public
8
// License as published by the Free Software Foundation; either
9
// version 3 of the License, or (at your option) any later version.
11
// Alternatively, you can redistribute it and/or
12
// modify it under the terms of the GNU General Public License as
13
// published by the Free Software Foundation; either version 2 of
14
// the License, or (at your option) any later version.
16
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
17
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
19
// GNU General Public License for more details.
21
// You should have received a copy of the GNU Lesser General Public
22
// License and a copy of the GNU General Public License along with
23
// Eigen. If not, see <http://www.gnu.org/licenses/>.
25
#ifndef EIGEN_GENERAL_MATRIX_VECTOR_H
26
#define EIGEN_GENERAL_MATRIX_VECTOR_H
30
/* Optimized col-major matrix * vector product:
31
* This algorithm processes 4 columns at onces that allows to both reduce
32
* the number of load/stores of the result by a factor 4 and to reduce
33
* the instruction dependency. Moreover, we know that all bands have the
34
* same alignment pattern.
36
* Mixing type logic: C += alpha * A * B
37
* | A | B |alpha| comments
38
* |real |cplx |cplx | no vectorization
39
* |real |cplx |real | alpha is converted to a cplx when calling the run function, no vectorization
40
* |cplx |real |cplx | invalid, the caller has to do tmp: = A * B; C += alpha*tmp
41
* |cplx |real |real | optimal case, vectorization possible via real-cplx mul
43
template<typename Index, typename LhsScalar, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs>
44
struct general_matrix_vector_product<Index,LhsScalar,ColMajor,ConjugateLhs,RhsScalar,ConjugateRhs>
46
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
49
Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable
50
&& int(packet_traits<LhsScalar>::size)==int(packet_traits<RhsScalar>::size),
51
LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
52
RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
53
ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1
56
typedef typename packet_traits<LhsScalar>::type _LhsPacket;
57
typedef typename packet_traits<RhsScalar>::type _RhsPacket;
58
typedef typename packet_traits<ResScalar>::type _ResPacket;
60
typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
61
typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
62
typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
64
EIGEN_DONT_INLINE static void run(
65
Index rows, Index cols,
66
const LhsScalar* lhs, Index lhsStride,
67
const RhsScalar* rhs, Index rhsIncr,
69
#ifdef EIGEN_INTERNAL_DEBUGGING
74
eigen_internal_assert(resIncr==1);
75
#ifdef _EIGEN_ACCUMULATE_PACKETS
76
#error _EIGEN_ACCUMULATE_PACKETS has already been defined
78
#define _EIGEN_ACCUMULATE_PACKETS(A0,A13,A2) \
80
padd(pload<ResPacket>(&res[j]), \
82
padd(pcj.pmul(EIGEN_CAT(ploa , A0)<LhsPacket>(&lhs0[j]), ptmp0), \
83
pcj.pmul(EIGEN_CAT(ploa , A13)<LhsPacket>(&lhs1[j]), ptmp1)), \
84
padd(pcj.pmul(EIGEN_CAT(ploa , A2)<LhsPacket>(&lhs2[j]), ptmp2), \
85
pcj.pmul(EIGEN_CAT(ploa , A13)<LhsPacket>(&lhs3[j]), ptmp3)) )))
87
conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
88
conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;
92
enum { AllAligned = 0, EvenAligned, FirstAligned, NoneAligned };
93
const Index columnsAtOnce = 4;
94
const Index peels = 2;
95
const Index LhsPacketAlignedMask = LhsPacketSize-1;
96
const Index ResPacketAlignedMask = ResPacketSize-1;
97
const Index PeelAlignedMask = ResPacketSize*peels-1;
98
const Index size = rows;
100
// How many coeffs of the result do we have to skip to be aligned.
101
// Here we assume data are at least aligned on the base scalar type.
102
Index alignedStart = first_aligned(res,size);
103
Index alignedSize = ResPacketSize>1 ? alignedStart + ((size-alignedStart) & ~ResPacketAlignedMask) : 0;
104
const Index peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart;
106
const Index alignmentStep = LhsPacketSize>1 ? (LhsPacketSize - lhsStride % LhsPacketSize) & LhsPacketAlignedMask : 0;
107
Index alignmentPattern = alignmentStep==0 ? AllAligned
108
: alignmentStep==(LhsPacketSize/2) ? EvenAligned
111
// we cannot assume the first element is aligned because of sub-matrices
112
const Index lhsAlignmentOffset = first_aligned(lhs,size);
114
// find how many columns do we have to skip to be aligned with the result (if possible)
115
Index skipColumns = 0;
116
// if the data cannot be aligned (TODO add some compile time tests when possible, e.g. for floats)
117
if( (size_t(lhs)%sizeof(LhsScalar)) || (size_t(res)%sizeof(ResScalar)) )
122
else if (LhsPacketSize>1)
124
eigen_internal_assert(size_t(lhs+lhsAlignmentOffset)%sizeof(LhsPacket)==0 || size<LhsPacketSize);
126
while (skipColumns<LhsPacketSize &&
127
alignedStart != ((lhsAlignmentOffset + alignmentStep*skipColumns)%LhsPacketSize))
129
if (skipColumns==LhsPacketSize)
131
// nothing can be aligned, no need to skip any column
132
alignmentPattern = NoneAligned;
137
skipColumns = (std::min)(skipColumns,cols);
138
// note that the skiped columns are processed later.
141
eigen_internal_assert( (alignmentPattern==NoneAligned)
142
|| (skipColumns + columnsAtOnce >= cols)
143
|| LhsPacketSize > size
144
|| (size_t(lhs+alignedStart+lhsStride*skipColumns)%sizeof(LhsPacket))==0);
146
else if(Vectorizable)
150
alignmentPattern = AllAligned;
153
Index offset1 = (FirstAligned && alignmentStep==1?3:1);
154
Index offset3 = (FirstAligned && alignmentStep==1?1:3);
156
Index columnBound = ((cols-skipColumns)/columnsAtOnce)*columnsAtOnce + skipColumns;
157
for (Index i=skipColumns; i<columnBound; i+=columnsAtOnce)
159
RhsPacket ptmp0 = pset1<RhsPacket>(alpha*rhs[i*rhsIncr]),
160
ptmp1 = pset1<RhsPacket>(alpha*rhs[(i+offset1)*rhsIncr]),
161
ptmp2 = pset1<RhsPacket>(alpha*rhs[(i+2)*rhsIncr]),
162
ptmp3 = pset1<RhsPacket>(alpha*rhs[(i+offset3)*rhsIncr]);
164
// this helps a lot generating better binary code
165
const LhsScalar *lhs0 = lhs + i*lhsStride, *lhs1 = lhs + (i+offset1)*lhsStride,
166
*lhs2 = lhs + (i+2)*lhsStride, *lhs3 = lhs + (i+offset3)*lhsStride;
170
/* explicit vectorization */
171
// process initial unaligned coeffs
172
for (Index j=0; j<alignedStart; ++j)
174
res[j] = cj.pmadd(lhs0[j], pfirst(ptmp0), res[j]);
175
res[j] = cj.pmadd(lhs1[j], pfirst(ptmp1), res[j]);
176
res[j] = cj.pmadd(lhs2[j], pfirst(ptmp2), res[j]);
177
res[j] = cj.pmadd(lhs3[j], pfirst(ptmp3), res[j]);
180
if (alignedSize>alignedStart)
182
switch(alignmentPattern)
185
for (Index j = alignedStart; j<alignedSize; j+=ResPacketSize)
186
_EIGEN_ACCUMULATE_PACKETS(d,d,d);
189
for (Index j = alignedStart; j<alignedSize; j+=ResPacketSize)
190
_EIGEN_ACCUMULATE_PACKETS(d,du,d);
195
LhsPacket A00, A01, A02, A03, A10, A11, A12, A13;
198
A01 = pload<LhsPacket>(&lhs1[alignedStart-1]);
199
A02 = pload<LhsPacket>(&lhs2[alignedStart-2]);
200
A03 = pload<LhsPacket>(&lhs3[alignedStart-3]);
202
for (Index j = alignedStart; j<peeledSize; j+=peels*ResPacketSize)
204
A11 = pload<LhsPacket>(&lhs1[j-1+LhsPacketSize]); palign<1>(A01,A11);
205
A12 = pload<LhsPacket>(&lhs2[j-2+LhsPacketSize]); palign<2>(A02,A12);
206
A13 = pload<LhsPacket>(&lhs3[j-3+LhsPacketSize]); palign<3>(A03,A13);
208
A00 = pload<LhsPacket>(&lhs0[j]);
209
A10 = pload<LhsPacket>(&lhs0[j+LhsPacketSize]);
210
T0 = pcj.pmadd(A00, ptmp0, pload<ResPacket>(&res[j]));
211
T1 = pcj.pmadd(A10, ptmp0, pload<ResPacket>(&res[j+ResPacketSize]));
213
T0 = pcj.pmadd(A01, ptmp1, T0);
214
A01 = pload<LhsPacket>(&lhs1[j-1+2*LhsPacketSize]); palign<1>(A11,A01);
215
T0 = pcj.pmadd(A02, ptmp2, T0);
216
A02 = pload<LhsPacket>(&lhs2[j-2+2*LhsPacketSize]); palign<2>(A12,A02);
217
T0 = pcj.pmadd(A03, ptmp3, T0);
219
A03 = pload<LhsPacket>(&lhs3[j-3+2*LhsPacketSize]); palign<3>(A13,A03);
220
T1 = pcj.pmadd(A11, ptmp1, T1);
221
T1 = pcj.pmadd(A12, ptmp2, T1);
222
T1 = pcj.pmadd(A13, ptmp3, T1);
223
pstore(&res[j+ResPacketSize],T1);
226
for (Index j = peeledSize; j<alignedSize; j+=ResPacketSize)
227
_EIGEN_ACCUMULATE_PACKETS(d,du,du);
230
for (Index j = alignedStart; j<alignedSize; j+=ResPacketSize)
231
_EIGEN_ACCUMULATE_PACKETS(du,du,du);
235
} // end explicit vectorization
237
/* process remaining coeffs (or all if there is no explicit vectorization) */
238
for (Index j=alignedSize; j<size; ++j)
240
res[j] = cj.pmadd(lhs0[j], pfirst(ptmp0), res[j]);
241
res[j] = cj.pmadd(lhs1[j], pfirst(ptmp1), res[j]);
242
res[j] = cj.pmadd(lhs2[j], pfirst(ptmp2), res[j]);
243
res[j] = cj.pmadd(lhs3[j], pfirst(ptmp3), res[j]);
247
// process remaining first and last columns (at most columnsAtOnce-1)
249
Index start = columnBound;
252
for (Index k=start; k<end; ++k)
254
RhsPacket ptmp0 = pset1<RhsPacket>(alpha*rhs[k*rhsIncr]);
255
const LhsScalar* lhs0 = lhs + k*lhsStride;
259
/* explicit vectorization */
260
// process first unaligned result's coeffs
261
for (Index j=0; j<alignedStart; ++j)
262
res[j] += cj.pmul(lhs0[j], pfirst(ptmp0));
263
// process aligned result's coeffs
264
if ((size_t(lhs0+alignedStart)%sizeof(LhsPacket))==0)
265
for (Index i = alignedStart;i<alignedSize;i+=ResPacketSize)
266
pstore(&res[i], pcj.pmadd(ploadu<LhsPacket>(&lhs0[i]), ptmp0, pload<ResPacket>(&res[i])));
268
for (Index i = alignedStart;i<alignedSize;i+=ResPacketSize)
269
pstore(&res[i], pcj.pmadd(ploadu<LhsPacket>(&lhs0[i]), ptmp0, pload<ResPacket>(&res[i])));
272
// process remaining scalars (or all if no explicit vectorization)
273
for (Index i=alignedSize; i<size; ++i)
274
res[i] += cj.pmul(lhs0[i], pfirst(ptmp0));
284
} while(Vectorizable);
285
#undef _EIGEN_ACCUMULATE_PACKETS
289
/* Optimized row-major matrix * vector product:
290
* This algorithm processes 4 rows at onces that allows to both reduce
291
* the number of load/stores of the result by a factor 4 and to reduce
292
* the instruction dependency. Moreover, we know that all bands have the
293
* same alignment pattern.
296
* - alpha is always a complex (or converted to a complex)
299
template<typename Index, typename LhsScalar, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs>
300
struct general_matrix_vector_product<Index,LhsScalar,RowMajor,ConjugateLhs,RhsScalar,ConjugateRhs>
302
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
305
Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable
306
&& int(packet_traits<LhsScalar>::size)==int(packet_traits<RhsScalar>::size),
307
LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
308
RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
309
ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1
312
typedef typename packet_traits<LhsScalar>::type _LhsPacket;
313
typedef typename packet_traits<RhsScalar>::type _RhsPacket;
314
typedef typename packet_traits<ResScalar>::type _ResPacket;
316
typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
317
typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
318
typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
320
EIGEN_DONT_INLINE static void run(
321
Index rows, Index cols,
322
const LhsScalar* lhs, Index lhsStride,
323
const RhsScalar* rhs, Index rhsIncr,
324
ResScalar* res, Index resIncr,
327
EIGEN_UNUSED_VARIABLE(rhsIncr);
328
eigen_internal_assert(rhsIncr==1);
329
#ifdef _EIGEN_ACCUMULATE_PACKETS
330
#error _EIGEN_ACCUMULATE_PACKETS has already been defined
333
#define _EIGEN_ACCUMULATE_PACKETS(A0,A13,A2) {\
334
RhsPacket b = pload<RhsPacket>(&rhs[j]); \
335
ptmp0 = pcj.pmadd(EIGEN_CAT(ploa,A0) <LhsPacket>(&lhs0[j]), b, ptmp0); \
336
ptmp1 = pcj.pmadd(EIGEN_CAT(ploa,A13)<LhsPacket>(&lhs1[j]), b, ptmp1); \
337
ptmp2 = pcj.pmadd(EIGEN_CAT(ploa,A2) <LhsPacket>(&lhs2[j]), b, ptmp2); \
338
ptmp3 = pcj.pmadd(EIGEN_CAT(ploa,A13)<LhsPacket>(&lhs3[j]), b, ptmp3); }
340
conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
341
conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;
343
enum { AllAligned=0, EvenAligned=1, FirstAligned=2, NoneAligned=3 };
344
const Index rowsAtOnce = 4;
345
const Index peels = 2;
346
const Index RhsPacketAlignedMask = RhsPacketSize-1;
347
const Index LhsPacketAlignedMask = LhsPacketSize-1;
348
const Index PeelAlignedMask = RhsPacketSize*peels-1;
349
const Index depth = cols;
351
// How many coeffs of the result do we have to skip to be aligned.
352
// Here we assume data are at least aligned on the base scalar type
353
// if that's not the case then vectorization is discarded, see below.
354
Index alignedStart = first_aligned(rhs, depth);
355
Index alignedSize = RhsPacketSize>1 ? alignedStart + ((depth-alignedStart) & ~RhsPacketAlignedMask) : 0;
356
const Index peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart;
358
const Index alignmentStep = LhsPacketSize>1 ? (LhsPacketSize - lhsStride % LhsPacketSize) & LhsPacketAlignedMask : 0;
359
Index alignmentPattern = alignmentStep==0 ? AllAligned
360
: alignmentStep==(LhsPacketSize/2) ? EvenAligned
363
// we cannot assume the first element is aligned because of sub-matrices
364
const Index lhsAlignmentOffset = first_aligned(lhs,depth);
366
// find how many rows do we have to skip to be aligned with rhs (if possible)
368
// if the data cannot be aligned (TODO add some compile time tests when possible, e.g. for floats)
369
if( (sizeof(LhsScalar)!=sizeof(RhsScalar)) || (size_t(lhs)%sizeof(LhsScalar)) || (size_t(rhs)%sizeof(RhsScalar)) )
374
else if (LhsPacketSize>1)
376
eigen_internal_assert(size_t(lhs+lhsAlignmentOffset)%sizeof(LhsPacket)==0 || depth<LhsPacketSize);
378
while (skipRows<LhsPacketSize &&
379
alignedStart != ((lhsAlignmentOffset + alignmentStep*skipRows)%LhsPacketSize))
381
if (skipRows==LhsPacketSize)
383
// nothing can be aligned, no need to skip any column
384
alignmentPattern = NoneAligned;
389
skipRows = (std::min)(skipRows,Index(rows));
390
// note that the skiped columns are processed later.
392
eigen_internal_assert( alignmentPattern==NoneAligned
394
|| (skipRows + rowsAtOnce >= rows)
395
|| LhsPacketSize > depth
396
|| (size_t(lhs+alignedStart+lhsStride*skipRows)%sizeof(LhsPacket))==0);
398
else if(Vectorizable)
402
alignmentPattern = AllAligned;
405
Index offset1 = (FirstAligned && alignmentStep==1?3:1);
406
Index offset3 = (FirstAligned && alignmentStep==1?1:3);
408
Index rowBound = ((rows-skipRows)/rowsAtOnce)*rowsAtOnce + skipRows;
409
for (Index i=skipRows; i<rowBound; i+=rowsAtOnce)
411
EIGEN_ALIGN16 ResScalar tmp0 = ResScalar(0);
412
ResScalar tmp1 = ResScalar(0), tmp2 = ResScalar(0), tmp3 = ResScalar(0);
414
// this helps the compiler generating good binary code
415
const LhsScalar *lhs0 = lhs + i*lhsStride, *lhs1 = lhs + (i+offset1)*lhsStride,
416
*lhs2 = lhs + (i+2)*lhsStride, *lhs3 = lhs + (i+offset3)*lhsStride;
420
/* explicit vectorization */
421
ResPacket ptmp0 = pset1<ResPacket>(ResScalar(0)), ptmp1 = pset1<ResPacket>(ResScalar(0)),
422
ptmp2 = pset1<ResPacket>(ResScalar(0)), ptmp3 = pset1<ResPacket>(ResScalar(0));
424
// process initial unaligned coeffs
425
// FIXME this loop get vectorized by the compiler !
426
for (Index j=0; j<alignedStart; ++j)
428
RhsScalar b = rhs[j];
429
tmp0 += cj.pmul(lhs0[j],b); tmp1 += cj.pmul(lhs1[j],b);
430
tmp2 += cj.pmul(lhs2[j],b); tmp3 += cj.pmul(lhs3[j],b);
433
if (alignedSize>alignedStart)
435
switch(alignmentPattern)
438
for (Index j = alignedStart; j<alignedSize; j+=RhsPacketSize)
439
_EIGEN_ACCUMULATE_PACKETS(d,d,d);
442
for (Index j = alignedStart; j<alignedSize; j+=RhsPacketSize)
443
_EIGEN_ACCUMULATE_PACKETS(d,du,d);
448
/* Here we proccess 4 rows with with two peeled iterations to hide
449
* tghe overhead of unaligned loads. Moreover unaligned loads are handled
450
* using special shift/move operations between the two aligned packets
451
* overlaping the desired unaligned packet. This is *much* more efficient
452
* than basic unaligned loads.
454
LhsPacket A01, A02, A03, A11, A12, A13;
455
A01 = pload<LhsPacket>(&lhs1[alignedStart-1]);
456
A02 = pload<LhsPacket>(&lhs2[alignedStart-2]);
457
A03 = pload<LhsPacket>(&lhs3[alignedStart-3]);
459
for (Index j = alignedStart; j<peeledSize; j+=peels*RhsPacketSize)
461
RhsPacket b = pload<RhsPacket>(&rhs[j]);
462
A11 = pload<LhsPacket>(&lhs1[j-1+LhsPacketSize]); palign<1>(A01,A11);
463
A12 = pload<LhsPacket>(&lhs2[j-2+LhsPacketSize]); palign<2>(A02,A12);
464
A13 = pload<LhsPacket>(&lhs3[j-3+LhsPacketSize]); palign<3>(A03,A13);
466
ptmp0 = pcj.pmadd(pload<LhsPacket>(&lhs0[j]), b, ptmp0);
467
ptmp1 = pcj.pmadd(A01, b, ptmp1);
468
A01 = pload<LhsPacket>(&lhs1[j-1+2*LhsPacketSize]); palign<1>(A11,A01);
469
ptmp2 = pcj.pmadd(A02, b, ptmp2);
470
A02 = pload<LhsPacket>(&lhs2[j-2+2*LhsPacketSize]); palign<2>(A12,A02);
471
ptmp3 = pcj.pmadd(A03, b, ptmp3);
472
A03 = pload<LhsPacket>(&lhs3[j-3+2*LhsPacketSize]); palign<3>(A13,A03);
474
b = pload<RhsPacket>(&rhs[j+RhsPacketSize]);
475
ptmp0 = pcj.pmadd(pload<LhsPacket>(&lhs0[j+LhsPacketSize]), b, ptmp0);
476
ptmp1 = pcj.pmadd(A11, b, ptmp1);
477
ptmp2 = pcj.pmadd(A12, b, ptmp2);
478
ptmp3 = pcj.pmadd(A13, b, ptmp3);
481
for (Index j = peeledSize; j<alignedSize; j+=RhsPacketSize)
482
_EIGEN_ACCUMULATE_PACKETS(d,du,du);
485
for (Index j = alignedStart; j<alignedSize; j+=RhsPacketSize)
486
_EIGEN_ACCUMULATE_PACKETS(du,du,du);
489
tmp0 += predux(ptmp0);
490
tmp1 += predux(ptmp1);
491
tmp2 += predux(ptmp2);
492
tmp3 += predux(ptmp3);
494
} // end explicit vectorization
496
// process remaining coeffs (or all if no explicit vectorization)
497
// FIXME this loop get vectorized by the compiler !
498
for (Index j=alignedSize; j<depth; ++j)
500
RhsScalar b = rhs[j];
501
tmp0 += cj.pmul(lhs0[j],b); tmp1 += cj.pmul(lhs1[j],b);
502
tmp2 += cj.pmul(lhs2[j],b); tmp3 += cj.pmul(lhs3[j],b);
504
res[i*resIncr] += alpha*tmp0;
505
res[(i+offset1)*resIncr] += alpha*tmp1;
506
res[(i+2)*resIncr] += alpha*tmp2;
507
res[(i+offset3)*resIncr] += alpha*tmp3;
510
// process remaining first and last rows (at most columnsAtOnce-1)
512
Index start = rowBound;
515
for (Index i=start; i<end; ++i)
517
EIGEN_ALIGN16 ResScalar tmp0 = ResScalar(0);
518
ResPacket ptmp0 = pset1<ResPacket>(tmp0);
519
const LhsScalar* lhs0 = lhs + i*lhsStride;
520
// process first unaligned result's coeffs
521
// FIXME this loop get vectorized by the compiler !
522
for (Index j=0; j<alignedStart; ++j)
523
tmp0 += cj.pmul(lhs0[j], rhs[j]);
525
if (alignedSize>alignedStart)
527
// process aligned rhs coeffs
528
if ((size_t(lhs0+alignedStart)%sizeof(LhsPacket))==0)
529
for (Index j = alignedStart;j<alignedSize;j+=RhsPacketSize)
530
ptmp0 = pcj.pmadd(pload<LhsPacket>(&lhs0[j]), pload<RhsPacket>(&rhs[j]), ptmp0);
532
for (Index j = alignedStart;j<alignedSize;j+=RhsPacketSize)
533
ptmp0 = pcj.pmadd(ploadu<LhsPacket>(&lhs0[j]), pload<RhsPacket>(&rhs[j]), ptmp0);
534
tmp0 += predux(ptmp0);
537
// process remaining scalars
538
// FIXME this loop get vectorized by the compiler !
539
for (Index j=alignedSize; j<depth; ++j)
540
tmp0 += cj.pmul(lhs0[j], rhs[j]);
541
res[i*resIncr] += alpha*tmp0;
551
} while(Vectorizable);
553
#undef _EIGEN_ACCUMULATE_PACKETS
557
} // end namespace internal
559
#endif // EIGEN_GENERAL_MATRIX_VECTOR_H