10 #ifndef EIGEN_GENERAL_BLOCK_PANEL_H
11 #define EIGEN_GENERAL_BLOCK_PANEL_H
18 template<
typename _LhsScalar,
typename _RhsScalar,
bool _ConjLhs=false,
bool _ConjRhs=false>
23 inline std::ptrdiff_t manage_caching_sizes_helper(std::ptrdiff_t a, std::ptrdiff_t b)
28 #if EIGEN_ARCH_i386_OR_x86_64
29 const std::ptrdiff_t defaultL1CacheSize = 32*1024;
30 const std::ptrdiff_t defaultL2CacheSize = 256*1024;
31 const std::ptrdiff_t defaultL3CacheSize = 2*1024*1024;
33 const std::ptrdiff_t defaultL1CacheSize = 16*1024;
34 const std::ptrdiff_t defaultL2CacheSize = 512*1024;
35 const std::ptrdiff_t defaultL3CacheSize = 512*1024;
40 CacheSizes(): m_l1(-1),m_l2(-1),m_l3(-1) {
41 int l1CacheSize, l2CacheSize, l3CacheSize;
42 queryCacheSizes(l1CacheSize, l2CacheSize, l3CacheSize);
43 m_l1 = manage_caching_sizes_helper(l1CacheSize, defaultL1CacheSize);
44 m_l2 = manage_caching_sizes_helper(l2CacheSize, defaultL2CacheSize);
45 m_l3 = manage_caching_sizes_helper(l3CacheSize, defaultL3CacheSize);
55 inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1, std::ptrdiff_t* l2, std::ptrdiff_t* l3)
57 static CacheSizes m_cacheSizes;
62 eigen_internal_assert(l1!=0 && l2!=0);
63 m_cacheSizes.m_l1 = *l1;
64 m_cacheSizes.m_l2 = *l2;
65 m_cacheSizes.m_l3 = *l3;
67 else if(action==GetAction)
69 eigen_internal_assert(l1!=0 && l2!=0);
70 *l1 = m_cacheSizes.m_l1;
71 *l2 = m_cacheSizes.m_l2;
72 *l3 = m_cacheSizes.m_l3;
76 eigen_internal_assert(
false);
92 template<
typename LhsScalar,
typename RhsScalar,
int KcFactor>
93 void evaluateProductBlockingSizesHeuristic(Index& k, Index& m, Index& n, Index num_threads = 1)
95 typedef gebp_traits<LhsScalar,RhsScalar> Traits;
102 std::ptrdiff_t l1, l2, l3;
103 manage_caching_sizes(GetAction, &l1, &l2, &l3);
105 if (num_threads > 1) {
106 typedef typename Traits::ResScalar ResScalar;
108 kdiv = KcFactor * (Traits::mr *
sizeof(LhsScalar) + Traits::nr *
sizeof(RhsScalar)),
109 ksub = Traits::mr * Traits::nr *
sizeof(ResScalar),
122 const Index k_cache = (std::min<Index>)((l1-ksub)/kdiv, 320);
124 k = k_cache & k_mask;
125 eigen_internal_assert(k > 0);
128 const Index n_cache = (l2-l1) / (nr *
sizeof(RhsScalar) * k);
129 const Index n_per_thread = numext::div_ceil(n, num_threads);
130 if (n_cache <= n_per_thread) {
132 eigen_internal_assert(n_cache >= static_cast<Index>(nr));
133 n = n_cache & nr_mask;
134 eigen_internal_assert(n > 0);
136 n = (std::min<Index>)(n, (n_per_thread + nr - 1) & nr_mask);
141 const Index m_cache = (l3-l2) / (
sizeof(LhsScalar) * k * num_threads);
142 const Index m_per_thread = numext::div_ceil(m, num_threads);
143 if(m_cache < m_per_thread && m_cache >= static_cast<Index>(mr)) {
144 m = m_cache & mr_mask;
145 eigen_internal_assert(m > 0);
147 m = (std::min<Index>)(m, (m_per_thread + mr - 1) & mr_mask);
154 #ifdef EIGEN_DEBUG_SMALL_PRODUCT_BLOCKS
164 if((std::max)(k,(std::max)(m,n))<48)
167 typedef typename Traits::ResScalar ResScalar;
170 k_div = KcFactor * (Traits::mr *
sizeof(LhsScalar) + Traits::nr *
sizeof(RhsScalar)),
171 k_sub = Traits::mr * Traits::nr *
sizeof(ResScalar)
181 const Index max_kc = ((l1-k_sub)/k_div) & (~(k_peeling-1));
182 const Index old_k = k;
188 k = (k%max_kc)==0 ? max_kc
189 : max_kc - k_peeling * ((max_kc-1-(k%max_kc))/(k_peeling*(k/max_kc+1)));
191 eigen_internal_assert(((old_k/k) == (old_k/max_kc)) &&
"the number of sweeps has to remain the same");
200 #ifdef EIGEN_DEBUG_SMALL_PRODUCT_BLOCKS
201 const Index actual_l2 = l3;
203 const Index actual_l2 = 1572864;
213 const Index lhs_bytes = m * k *
sizeof(LhsScalar);
214 const Index remaining_l1 = l1- k_sub - lhs_bytes;
215 if(remaining_l1 >= Index(Traits::nr*
sizeof(RhsScalar))*k)
218 max_nc = remaining_l1 / (k*
sizeof(RhsScalar));
223 max_nc = (3*actual_l2)/(2*2*max_kc*
sizeof(RhsScalar));
226 Index nc = std::min<Index>(actual_l2/(2*k*
sizeof(RhsScalar)), max_nc) & (~(Traits::nr-1));
234 : (nc - Traits::nr * ((nc-(n%nc))/(Traits::nr*(n/nc+1))));
241 Index problem_size = k*n*
sizeof(LhsScalar);
242 Index actual_lm = actual_l2;
244 if(problem_size<=1024)
250 else if(l3!=0 && problem_size<=32768)
257 Index mc = (std::min<Index>)(actual_lm/(3*k*
sizeof(LhsScalar)), max_mc);
258 if (mc > Traits::mr) mc -= mc % Traits::mr;
259 else if (mc==0)
return;
261 : (mc - Traits::mr * ((mc-(m%mc))/(Traits::mr*(m/mc+1))));
266 inline bool useSpecificBlockingSizes(Index& k, Index& m, Index& n)
268 #ifdef EIGEN_TEST_SPECIFIC_BLOCKING_SIZES
269 if (EIGEN_TEST_SPECIFIC_BLOCKING_SIZES) {
270 k = std::min<Index>(k, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_K);
271 m = std::min<Index>(m, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_M);
272 n = std::min<Index>(n, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_N);
276 EIGEN_UNUSED_VARIABLE(k)
277 EIGEN_UNUSED_VARIABLE(m)
278 EIGEN_UNUSED_VARIABLE(n)
299 template<
typename LhsScalar,
typename RhsScalar,
int KcFactor>
300 void computeProductBlockingSizes(Index& k, Index& m, Index& n, Index num_threads = 1)
302 if (!useSpecificBlockingSizes(k, m, n)) {
303 evaluateProductBlockingSizesHeuristic<LhsScalar, RhsScalar, KcFactor>(k, m, n, num_threads);
306 typedef gebp_traits<LhsScalar,RhsScalar> Traits;
312 if (k > kr) k -= k % kr;
313 if (m > mr) m -= m % mr;
314 if (n > nr) n -= n % nr;
317 template<
typename LhsScalar,
typename RhsScalar>
318 inline void computeProductBlockingSizes(Index& k, Index& m, Index& n, Index num_threads = 1)
320 computeProductBlockingSizes<LhsScalar,RhsScalar,1>(k, m, n, num_threads);
323 #ifdef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD
324 #define CJMADD(CJ,A,B,C,T) C = CJ.pmadd(A,B,C);
329 template<
typename CJ,
typename A,
typename B,
typename C,
typename T>
struct gebp_madd_selector {
330 EIGEN_ALWAYS_INLINE
static void run(
const CJ& cj, A& a, B& b, C& c, T& )
336 template<
typename CJ,
typename T>
struct gebp_madd_selector<CJ,T,T,T,T> {
337 EIGEN_ALWAYS_INLINE
static void run(
const CJ& cj, T& a, T& b, T& c, T& t)
339 t = b; t = cj.pmul(a,t); c = padd(c,t);
343 template<
typename CJ,
typename A,
typename B,
typename C,
typename T>
344 EIGEN_STRONG_INLINE
void gebp_madd(
const CJ& cj, A& a, B& b, C& c, T& t)
346 gebp_madd_selector<CJ,A,B,C,T>::run(cj,a,b,c,t);
349 #define CJMADD(CJ,A,B,C,T) gebp_madd(CJ,A,B,C,T);
363 template<
typename _LhsScalar,
typename _RhsScalar,
bool _ConjLhs,
bool _ConjRhs>
367 typedef _LhsScalar LhsScalar;
368 typedef _RhsScalar RhsScalar;
369 typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
374 Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
375 LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
376 RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
377 ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
379 NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
385 default_mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*LhsPacketSize,
386 #
if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD) && !defined(EIGEN_VECTORIZE_ALTIVEC) && !defined(EIGEN_VECTORIZE_VSX)
390 mr = Vectorizable ? 3*LhsPacketSize : default_mr,
395 LhsProgress = LhsPacketSize,
399 typedef typename packet_traits<LhsScalar>::type _LhsPacket;
400 typedef typename packet_traits<RhsScalar>::type _RhsPacket;
401 typedef typename packet_traits<ResScalar>::type _ResPacket;
403 typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
404 typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
405 typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
407 typedef ResPacket AccPacket;
409 EIGEN_STRONG_INLINE
void initAcc(AccPacket& p)
411 p = pset1<ResPacket>(ResScalar(0));
414 EIGEN_STRONG_INLINE
void broadcastRhs(
const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
416 pbroadcast4(b, b0, b1, b2, b3);
424 template<
typename RhsPacketType>
425 EIGEN_STRONG_INLINE
void loadRhs(
const RhsScalar* b, RhsPacketType& dest)
const
427 dest = pset1<RhsPacketType>(*b);
430 EIGEN_STRONG_INLINE
void loadRhsQuad(
const RhsScalar* b, RhsPacket& dest)
const
432 dest = ploadquad<RhsPacket>(b);
435 template<
typename LhsPacketType>
436 EIGEN_STRONG_INLINE
void loadLhs(
const LhsScalar* a, LhsPacketType& dest)
const
438 dest = pload<LhsPacketType>(a);
441 template<
typename LhsPacketType>
442 EIGEN_STRONG_INLINE
void loadLhsUnaligned(
const LhsScalar* a, LhsPacketType& dest)
const
444 dest = ploadu<LhsPacketType>(a);
447 template<
typename LhsPacketType,
typename RhsPacketType,
typename AccPacketType>
448 EIGEN_STRONG_INLINE
void madd(
const LhsPacketType& a,
const RhsPacketType& b, AccPacketType& c, AccPacketType& tmp)
const
454 #ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
455 EIGEN_UNUSED_VARIABLE(tmp);
458 tmp = b; tmp = pmul(a,tmp); c = padd(c,tmp);
462 EIGEN_STRONG_INLINE
void acc(
const AccPacket& c,
const ResPacket& alpha, ResPacket& r)
const
464 r = pmadd(c,alpha,r);
467 template<
typename ResPacketHalf>
468 EIGEN_STRONG_INLINE
void acc(
const ResPacketHalf& c,
const ResPacketHalf& alpha, ResPacketHalf& r)
const
470 r = pmadd(c,alpha,r);
478 template<
typename RealScalar,
bool _ConjLhs>
479 class gebp_traits<
std::complex<RealScalar>, RealScalar, _ConjLhs, false>
482 typedef std::complex<RealScalar> LhsScalar;
483 typedef RealScalar RhsScalar;
484 typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
489 Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
490 LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
491 RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
492 ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
494 NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
496 #if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD) && !defined(EIGEN_VECTORIZE_ALTIVEC) && !defined(EIGEN_VECTORIZE_VSX)
498 mr = 3*LhsPacketSize,
500 mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*LhsPacketSize,
503 LhsProgress = LhsPacketSize,
507 typedef typename packet_traits<LhsScalar>::type _LhsPacket;
508 typedef typename packet_traits<RhsScalar>::type _RhsPacket;
509 typedef typename packet_traits<ResScalar>::type _ResPacket;
511 typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
512 typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
513 typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
515 typedef ResPacket AccPacket;
517 EIGEN_STRONG_INLINE
void initAcc(AccPacket& p)
519 p = pset1<ResPacket>(ResScalar(0));
522 EIGEN_STRONG_INLINE
void loadRhs(
const RhsScalar* b, RhsPacket& dest)
const
524 dest = pset1<RhsPacket>(*b);
527 EIGEN_STRONG_INLINE
void loadRhsQuad(
const RhsScalar* b, RhsPacket& dest)
const
529 dest = pset1<RhsPacket>(*b);
532 EIGEN_STRONG_INLINE
void loadLhs(
const LhsScalar* a, LhsPacket& dest)
const
534 dest = pload<LhsPacket>(a);
537 EIGEN_STRONG_INLINE
void loadLhsUnaligned(
const LhsScalar* a, LhsPacket& dest)
const
539 dest = ploadu<LhsPacket>(a);
542 EIGEN_STRONG_INLINE
void broadcastRhs(
const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
544 pbroadcast4(b, b0, b1, b2, b3);
552 EIGEN_STRONG_INLINE
void madd(
const LhsPacket& a,
const RhsPacket& b, AccPacket& c, RhsPacket& tmp)
const
554 madd_impl(a, b, c, tmp,
typename conditional<Vectorizable,true_type,false_type>::type());
557 EIGEN_STRONG_INLINE
void madd_impl(
const LhsPacket& a,
const RhsPacket& b, AccPacket& c, RhsPacket& tmp,
const true_type&)
const
559 #ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
560 EIGEN_UNUSED_VARIABLE(tmp);
561 c.v = pmadd(a.v,b,c.v);
563 tmp = b; tmp = pmul(a.v,tmp); c.v = padd(c.v,tmp);
567 EIGEN_STRONG_INLINE
void madd_impl(
const LhsScalar& a,
const RhsScalar& b, ResScalar& c, RhsScalar& ,
const false_type&)
const
572 EIGEN_STRONG_INLINE
void acc(
const AccPacket& c,
const ResPacket& alpha, ResPacket& r)
const
574 r = cj.pmadd(c,alpha,r);
578 conj_helper<ResPacket,ResPacket,ConjLhs,false> cj;
581 template<
typename Packet>
588 template<
typename Packet>
589 DoublePacket<Packet> padd(
const DoublePacket<Packet> &a,
const DoublePacket<Packet> &b)
591 DoublePacket<Packet> res;
592 res.first = padd(a.first, b.first);
593 res.second = padd(a.second,b.second);
597 template<
typename Packet>
598 const DoublePacket<Packet>& predux4(
const DoublePacket<Packet> &a)
603 template<
typename Packet>
struct unpacket_traits<DoublePacket<Packet> > {
typedef DoublePacket<Packet> half; };
613 template<
typename RealScalar,
bool _ConjLhs,
bool _ConjRhs>
614 class gebp_traits<
std::complex<RealScalar>, std::complex<RealScalar>, _ConjLhs, _ConjRhs >
617 typedef std::complex<RealScalar> Scalar;
618 typedef std::complex<RealScalar> LhsScalar;
619 typedef std::complex<RealScalar> RhsScalar;
620 typedef std::complex<RealScalar> ResScalar;
625 Vectorizable = packet_traits<RealScalar>::Vectorizable
626 && packet_traits<Scalar>::Vectorizable,
627 RealPacketSize = Vectorizable ? packet_traits<RealScalar>::size : 1,
628 ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
629 LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
630 RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
636 LhsProgress = ResPacketSize,
640 typedef typename packet_traits<RealScalar>::type RealPacket;
641 typedef typename packet_traits<Scalar>::type ScalarPacket;
642 typedef DoublePacket<RealPacket> DoublePacketType;
644 typedef typename conditional<Vectorizable,RealPacket, Scalar>::type LhsPacket;
645 typedef typename conditional<Vectorizable,DoublePacketType,Scalar>::type RhsPacket;
646 typedef typename conditional<Vectorizable,ScalarPacket,Scalar>::type ResPacket;
647 typedef typename conditional<Vectorizable,DoublePacketType,Scalar>::type AccPacket;
649 EIGEN_STRONG_INLINE
void initAcc(Scalar& p) { p = Scalar(0); }
651 EIGEN_STRONG_INLINE
void initAcc(DoublePacketType& p)
653 p.first = pset1<RealPacket>(RealScalar(0));
654 p.second = pset1<RealPacket>(RealScalar(0));
658 EIGEN_STRONG_INLINE
void loadRhs(
const RhsScalar* b, ResPacket& dest)
const
660 dest = pset1<ResPacket>(*b);
664 EIGEN_STRONG_INLINE
void loadRhs(
const RhsScalar* b, DoublePacketType& dest)
const
666 dest.first = pset1<RealPacket>(real(*b));
667 dest.second = pset1<RealPacket>(imag(*b));
670 EIGEN_STRONG_INLINE
void loadRhsQuad(
const RhsScalar* b, ResPacket& dest)
const
674 EIGEN_STRONG_INLINE
void loadRhsQuad(
const RhsScalar* b, DoublePacketType& dest)
const
676 eigen_internal_assert(unpacket_traits<ScalarPacket>::size<=4);
680 EIGEN_STRONG_INLINE
void broadcastRhs(
const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
690 EIGEN_STRONG_INLINE
void broadcastRhs(
const RhsScalar* b, DoublePacketType& b0, DoublePacketType& b1)
698 EIGEN_STRONG_INLINE
void broadcastRhs(
const RhsScalar* b, RhsScalar& b0, RhsScalar& b1)
706 EIGEN_STRONG_INLINE
void loadLhs(
const LhsScalar* a, LhsPacket& dest)
const
708 dest = pload<LhsPacket>((
const typename unpacket_traits<LhsPacket>::type*)(a));
711 EIGEN_STRONG_INLINE
void loadLhsUnaligned(
const LhsScalar* a, LhsPacket& dest)
const
713 dest = ploadu<LhsPacket>((
const typename unpacket_traits<LhsPacket>::type*)(a));
716 EIGEN_STRONG_INLINE
void madd(
const LhsPacket& a,
const RhsPacket& b, DoublePacketType& c, RhsPacket& )
const
718 c.first = padd(pmul(a,b.first), c.first);
719 c.second = padd(pmul(a,b.second),c.second);
722 EIGEN_STRONG_INLINE
void madd(
const LhsPacket& a,
const RhsPacket& b, ResPacket& c, RhsPacket& )
const
727 EIGEN_STRONG_INLINE
void acc(
const Scalar& c,
const Scalar& alpha, Scalar& r)
const { r += alpha * c; }
729 EIGEN_STRONG_INLINE
void acc(
const DoublePacketType& c,
const ResPacket& alpha, ResPacket& r)
const
733 if((!ConjLhs)&&(!ConjRhs))
735 tmp = pcplxflip(pconj(ResPacket(c.second)));
736 tmp = padd(ResPacket(c.first),tmp);
738 else if((!ConjLhs)&&(ConjRhs))
740 tmp = pconj(pcplxflip(ResPacket(c.second)));
741 tmp = padd(ResPacket(c.first),tmp);
743 else if((ConjLhs)&&(!ConjRhs))
745 tmp = pcplxflip(ResPacket(c.second));
746 tmp = padd(pconj(ResPacket(c.first)),tmp);
748 else if((ConjLhs)&&(ConjRhs))
750 tmp = pcplxflip(ResPacket(c.second));
751 tmp = psub(pconj(ResPacket(c.first)),tmp);
754 r = pmadd(tmp,alpha,r);
758 conj_helper<LhsScalar,RhsScalar,ConjLhs,ConjRhs> cj;
761 template<
typename RealScalar,
bool _ConjRhs>
762 class gebp_traits<RealScalar,
std::complex<RealScalar>, false, _ConjRhs >
765 typedef std::complex<RealScalar> Scalar;
766 typedef RealScalar LhsScalar;
767 typedef Scalar RhsScalar;
768 typedef Scalar ResScalar;
773 Vectorizable = packet_traits<RealScalar>::Vectorizable
774 && packet_traits<Scalar>::Vectorizable,
775 LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
776 RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
777 ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
779 NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
782 mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*ResPacketSize,
784 LhsProgress = ResPacketSize,
788 typedef typename packet_traits<LhsScalar>::type _LhsPacket;
789 typedef typename packet_traits<RhsScalar>::type _RhsPacket;
790 typedef typename packet_traits<ResScalar>::type _ResPacket;
792 typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
793 typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
794 typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
796 typedef ResPacket AccPacket;
798 EIGEN_STRONG_INLINE
void initAcc(AccPacket& p)
800 p = pset1<ResPacket>(ResScalar(0));
803 EIGEN_STRONG_INLINE
void loadRhs(
const RhsScalar* b, RhsPacket& dest)
const
805 dest = pset1<RhsPacket>(*b);
808 void broadcastRhs(
const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
810 pbroadcast4(b, b0, b1, b2, b3);
820 EIGEN_STRONG_INLINE
void loadLhs(
const LhsScalar* a, LhsPacket& dest)
const
822 dest = ploaddup<LhsPacket>(a);
825 EIGEN_STRONG_INLINE
void loadRhsQuad(
const RhsScalar* b, RhsPacket& dest)
const
827 eigen_internal_assert(unpacket_traits<RhsPacket>::size<=4);
831 EIGEN_STRONG_INLINE
void loadLhsUnaligned(
const LhsScalar* a, LhsPacket& dest)
const
833 dest = ploaddup<LhsPacket>(a);
836 EIGEN_STRONG_INLINE
void madd(
const LhsPacket& a,
const RhsPacket& b, AccPacket& c, RhsPacket& tmp)
const
838 madd_impl(a, b, c, tmp,
typename conditional<Vectorizable,true_type,false_type>::type());
841 EIGEN_STRONG_INLINE
void madd_impl(
const LhsPacket& a,
const RhsPacket& b, AccPacket& c, RhsPacket& tmp,
const true_type&)
const
843 #ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
844 EIGEN_UNUSED_VARIABLE(tmp);
845 c.v = pmadd(a,b.v,c.v);
847 tmp = b; tmp.v = pmul(a,tmp.v); c = padd(c,tmp);
852 EIGEN_STRONG_INLINE
void madd_impl(
const LhsScalar& a,
const RhsScalar& b, ResScalar& c, RhsScalar& ,
const false_type&)
const
857 EIGEN_STRONG_INLINE
void acc(
const AccPacket& c,
const ResPacket& alpha, ResPacket& r)
const
859 r = cj.pmadd(alpha,c,r);
863 conj_helper<ResPacket,ResPacket,false,ConjRhs> cj;
867 template <
typename GebpKernel,
bool UseRotatingKernel = GebpKernel::UseRotatingKernel>
868 struct PossiblyRotatingKernelHelper
872 typedef typename GebpKernel::Traits Traits;
873 typedef typename Traits::RhsScalar RhsScalar;
874 typedef typename Traits::RhsPacket RhsPacket;
875 typedef typename Traits::AccPacket AccPacket;
877 const Traits& traits;
878 PossiblyRotatingKernelHelper(
const Traits& t) : traits(t) {}
881 template <
size_t K,
size_t Index>
882 void loadOrRotateRhs(RhsPacket& to,
const RhsScalar* from)
const
884 traits.loadRhs(from + (Index+4*K)*Traits::RhsProgress, to);
887 void unrotateResult(AccPacket&,
896 template <
typename GebpKernel>
897 struct PossiblyRotatingKernelHelper<GebpKernel, true>
899 typedef typename GebpKernel::Traits Traits;
900 typedef typename Traits::RhsScalar RhsScalar;
901 typedef typename Traits::RhsPacket RhsPacket;
902 typedef typename Traits::AccPacket AccPacket;
904 const Traits& traits;
905 PossiblyRotatingKernelHelper(
const Traits& t) : traits(t) {}
907 template <
size_t K,
size_t Index>
908 void loadOrRotateRhs(RhsPacket& to,
const RhsScalar* from)
const
911 to = pload<RhsPacket>(from + 4*K*Traits::RhsProgress);
913 EIGEN_ASM_COMMENT(
"Do not reorder code, we're very tight on registers");
918 void unrotateResult(AccPacket& res0,
923 PacketBlock<AccPacket> resblock;
924 resblock.packet[0] = res0;
925 resblock.packet[1] = res1;
926 resblock.packet[2] = res2;
927 resblock.packet[3] = res3;
928 ptranspose(resblock);
929 resblock.packet[3] = protate<1>(resblock.packet[3]);
930 resblock.packet[2] = protate<2>(resblock.packet[2]);
931 resblock.packet[1] = protate<3>(resblock.packet[1]);
932 ptranspose(resblock);
933 res0 = resblock.packet[0];
934 res1 = resblock.packet[1];
935 res2 = resblock.packet[2];
936 res3 = resblock.packet[3];
947 template<
typename LhsScalar,
typename RhsScalar,
typename Index,
typename DataMapper,
int mr,
int nr,
bool ConjugateLhs,
bool ConjugateRhs>
950 typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> Traits;
951 typedef typename Traits::ResScalar ResScalar;
952 typedef typename Traits::LhsPacket LhsPacket;
953 typedef typename Traits::RhsPacket RhsPacket;
954 typedef typename Traits::ResPacket ResPacket;
955 typedef typename Traits::AccPacket AccPacket;
957 typedef gebp_traits<RhsScalar,LhsScalar,ConjugateRhs,ConjugateLhs> SwappedTraits;
958 typedef typename SwappedTraits::ResScalar SResScalar;
959 typedef typename SwappedTraits::LhsPacket SLhsPacket;
960 typedef typename SwappedTraits::RhsPacket SRhsPacket;
961 typedef typename SwappedTraits::ResPacket SResPacket;
962 typedef typename SwappedTraits::AccPacket SAccPacket;
964 typedef typename DataMapper::LinearMapper LinearMapper;
967 Vectorizable = Traits::Vectorizable,
968 LhsProgress = Traits::LhsProgress,
969 RhsProgress = Traits::RhsProgress,
970 ResPacketSize = Traits::ResPacketSize
974 static const bool UseRotatingKernel =
976 internal::is_same<LhsScalar, float>::value &&
977 internal::is_same<RhsScalar, float>::value &&
978 internal::is_same<ResScalar, float>::value &&
979 Traits::LhsPacketSize == 4 &&
980 Traits::RhsPacketSize == 4 &&
981 Traits::ResPacketSize == 4;
984 void operator()(
const DataMapper& res,
const LhsScalar* blockA,
const RhsScalar* blockB,
985 Index rows, Index depth, Index cols, ResScalar alpha,
986 Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0);
989 template<
typename LhsScalar,
typename RhsScalar,
typename Index,
typename DataMapper,
int mr,
int nr,
bool ConjugateLhs,
bool ConjugateRhs>
991 void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,ConjugateRhs>
992 ::operator()(
const DataMapper& res,
const LhsScalar* blockA,
const RhsScalar* blockB,
993 Index rows, Index depth, Index cols, ResScalar alpha,
994 Index strideA, Index strideB, Index offsetA, Index offsetB)
997 SwappedTraits straits;
999 if(strideA==-1) strideA = depth;
1000 if(strideB==-1) strideB = depth;
1001 conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
1002 Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
1003 const Index peeled_mc3 = mr>=3*Traits::LhsProgress ? (rows/(3*LhsProgress))*(3*LhsProgress) : 0;
1004 const Index peeled_mc2 = mr>=2*Traits::LhsProgress ? peeled_mc3+((rows-peeled_mc3)/(2*LhsProgress))*(2*LhsProgress) : 0;
1005 const Index peeled_mc1 = mr>=1*Traits::LhsProgress ? (rows/(1*LhsProgress))*(1*LhsProgress) : 0;
1007 const Index peeled_kc = depth & ~(pk-1);
1008 const Index prefetch_res_offset = 32/
sizeof(ResScalar);
1014 if(mr>=3*Traits::LhsProgress)
1016 PossiblyRotatingKernelHelper<gebp_kernel> possiblyRotatingKernelHelper(traits);
1023 const Index l1 = defaultL1CacheSize;
1027 const Index actual_panel_rows = (3*LhsProgress) * std::max<Index>(1,( (l1 -
sizeof(ResScalar)*mr*nr - depth*nr*
sizeof(RhsScalar)) / (depth *
sizeof(LhsScalar) * 3*LhsProgress) ));
1028 for(Index i1=0; i1<peeled_mc3; i1+=actual_panel_rows)
1030 const Index actual_panel_end = (std::min)(i1+actual_panel_rows, peeled_mc3);
1031 for(Index j2=0; j2<packet_cols4; j2+=nr)
1033 for(Index i=i1; i<actual_panel_end; i+=3*LhsProgress)
1039 const LhsScalar* blA = &blockA[i*strideA+offsetA*(3*LhsProgress)];
1043 AccPacket C0, C1, C2, C3,
1046 traits.initAcc(C0); traits.initAcc(C1); traits.initAcc(C2); traits.initAcc(C3);
1047 traits.initAcc(C4); traits.initAcc(C5); traits.initAcc(C6); traits.initAcc(C7);
1048 traits.initAcc(C8); traits.initAcc(C9); traits.initAcc(C10); traits.initAcc(C11);
1050 LinearMapper r0 = res.getLinearMapper(i, j2 + 0);
1051 LinearMapper r1 = res.getLinearMapper(i, j2 + 1);
1052 LinearMapper r2 = res.getLinearMapper(i, j2 + 2);
1053 LinearMapper r3 = res.getLinearMapper(i, j2 + 3);
1061 const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
1065 for(Index k=0; k<peeled_kc; k+=pk)
1067 EIGEN_ASM_COMMENT(
"begin gebp micro kernel 3pX4");
1071 #define EIGEN_GEBP_ONESTEP(K) \
1073 EIGEN_ASM_COMMENT("begin step of gebp micro kernel 3pX4"); \
1074 EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
1075 internal::prefetch(blA+(3*K+16)*LhsProgress); \
1076 if (EIGEN_ARCH_ARM) internal::prefetch(blB+(4*K+16)*RhsProgress); \
1077 traits.loadLhs(&blA[(0+3*K)*LhsProgress], A0); \
1078 traits.loadLhs(&blA[(1+3*K)*LhsProgress], A1); \
1079 traits.loadLhs(&blA[(2+3*K)*LhsProgress], A2); \
1080 possiblyRotatingKernelHelper.template loadOrRotateRhs<K, 0>(B_0, blB); \
1081 traits.madd(A0, B_0, C0, T0); \
1082 traits.madd(A1, B_0, C4, T0); \
1083 traits.madd(A2, B_0, C8, B_0); \
1084 possiblyRotatingKernelHelper.template loadOrRotateRhs<K, 1>(B_0, blB); \
1085 traits.madd(A0, B_0, C1, T0); \
1086 traits.madd(A1, B_0, C5, T0); \
1087 traits.madd(A2, B_0, C9, B_0); \
1088 possiblyRotatingKernelHelper.template loadOrRotateRhs<K, 2>(B_0, blB); \
1089 traits.madd(A0, B_0, C2, T0); \
1090 traits.madd(A1, B_0, C6, T0); \
1091 traits.madd(A2, B_0, C10, B_0); \
1092 possiblyRotatingKernelHelper.template loadOrRotateRhs<K, 3>(B_0, blB); \
1093 traits.madd(A0, B_0, C3 , T0); \
1094 traits.madd(A1, B_0, C7, T0); \
1095 traits.madd(A2, B_0, C11, B_0); \
1096 EIGEN_ASM_COMMENT("end step of gebp micro kernel 3pX4"); \
1099 internal::prefetch(blB);
1100 EIGEN_GEBP_ONESTEP(0);
1101 EIGEN_GEBP_ONESTEP(1);
1102 EIGEN_GEBP_ONESTEP(2);
1103 EIGEN_GEBP_ONESTEP(3);
1104 EIGEN_GEBP_ONESTEP(4);
1105 EIGEN_GEBP_ONESTEP(5);
1106 EIGEN_GEBP_ONESTEP(6);
1107 EIGEN_GEBP_ONESTEP(7);
1109 blB += pk*4*RhsProgress;
1110 blA += pk*3*Traits::LhsProgress;
1112 EIGEN_ASM_COMMENT(
"end gebp micro kernel 3pX4");
1115 for(Index k=peeled_kc; k<depth; k++)
1119 EIGEN_GEBP_ONESTEP(0);
1120 blB += 4*RhsProgress;
1121 blA += 3*Traits::LhsProgress;
1124 #undef EIGEN_GEBP_ONESTEP
1126 possiblyRotatingKernelHelper.unrotateResult(C0, C1, C2, C3);
1127 possiblyRotatingKernelHelper.unrotateResult(C4, C5, C6, C7);
1128 possiblyRotatingKernelHelper.unrotateResult(C8, C9, C10, C11);
1130 ResPacket R0, R1, R2;
1131 ResPacket alphav = pset1<ResPacket>(alpha);
1133 R0 = r0.loadPacket(0 * Traits::ResPacketSize);
1134 R1 = r0.loadPacket(1 * Traits::ResPacketSize);
1135 R2 = r0.loadPacket(2 * Traits::ResPacketSize);
1136 traits.acc(C0, alphav, R0);
1137 traits.acc(C4, alphav, R1);
1138 traits.acc(C8, alphav, R2);
1139 r0.storePacket(0 * Traits::ResPacketSize, R0);
1140 r0.storePacket(1 * Traits::ResPacketSize, R1);
1141 r0.storePacket(2 * Traits::ResPacketSize, R2);
1143 R0 = r1.loadPacket(0 * Traits::ResPacketSize);
1144 R1 = r1.loadPacket(1 * Traits::ResPacketSize);
1145 R2 = r1.loadPacket(2 * Traits::ResPacketSize);
1146 traits.acc(C1, alphav, R0);
1147 traits.acc(C5, alphav, R1);
1148 traits.acc(C9, alphav, R2);
1149 r1.storePacket(0 * Traits::ResPacketSize, R0);
1150 r1.storePacket(1 * Traits::ResPacketSize, R1);
1151 r1.storePacket(2 * Traits::ResPacketSize, R2);
1153 R0 = r2.loadPacket(0 * Traits::ResPacketSize);
1154 R1 = r2.loadPacket(1 * Traits::ResPacketSize);
1155 R2 = r2.loadPacket(2 * Traits::ResPacketSize);
1156 traits.acc(C2, alphav, R0);
1157 traits.acc(C6, alphav, R1);
1158 traits.acc(C10, alphav, R2);
1159 r2.storePacket(0 * Traits::ResPacketSize, R0);
1160 r2.storePacket(1 * Traits::ResPacketSize, R1);
1161 r2.storePacket(2 * Traits::ResPacketSize, R2);
1163 R0 = r3.loadPacket(0 * Traits::ResPacketSize);
1164 R1 = r3.loadPacket(1 * Traits::ResPacketSize);
1165 R2 = r3.loadPacket(2 * Traits::ResPacketSize);
1166 traits.acc(C3, alphav, R0);
1167 traits.acc(C7, alphav, R1);
1168 traits.acc(C11, alphav, R2);
1169 r3.storePacket(0 * Traits::ResPacketSize, R0);
1170 r3.storePacket(1 * Traits::ResPacketSize, R1);
1171 r3.storePacket(2 * Traits::ResPacketSize, R2);
1176 for(Index j2=packet_cols4; j2<cols; j2++)
1178 for(Index i=i1; i<actual_panel_end; i+=3*LhsProgress)
1181 const LhsScalar* blA = &blockA[i*strideA+offsetA*(3*Traits::LhsProgress)];
1185 AccPacket C0, C4, C8;
1190 LinearMapper r0 = res.getLinearMapper(i, j2);
1194 const RhsScalar* blB = &blockB[j2*strideB+offsetB];
1195 LhsPacket A0, A1, A2;
1197 for(Index k=0; k<peeled_kc; k+=pk)
1199 EIGEN_ASM_COMMENT(
"begin gebp micro kernel 3pX1");
1201 #define EIGEN_GEBGP_ONESTEP(K) \
1203 EIGEN_ASM_COMMENT("begin step of gebp micro kernel 3pX1"); \
1204 EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
1205 traits.loadLhs(&blA[(0+3*K)*LhsProgress], A0); \
1206 traits.loadLhs(&blA[(1+3*K)*LhsProgress], A1); \
1207 traits.loadLhs(&blA[(2+3*K)*LhsProgress], A2); \
1208 traits.loadRhs(&blB[(0+K)*RhsProgress], B_0); \
1209 traits.madd(A0, B_0, C0, B_0); \
1210 traits.madd(A1, B_0, C4, B_0); \
1211 traits.madd(A2, B_0, C8, B_0); \
1212 EIGEN_ASM_COMMENT("end step of gebp micro kernel 3pX1"); \
1215 EIGEN_GEBGP_ONESTEP(0);
1216 EIGEN_GEBGP_ONESTEP(1);
1217 EIGEN_GEBGP_ONESTEP(2);
1218 EIGEN_GEBGP_ONESTEP(3);
1219 EIGEN_GEBGP_ONESTEP(4);
1220 EIGEN_GEBGP_ONESTEP(5);
1221 EIGEN_GEBGP_ONESTEP(6);
1222 EIGEN_GEBGP_ONESTEP(7);
1224 blB += pk*RhsProgress;
1225 blA += pk*3*Traits::LhsProgress;
1227 EIGEN_ASM_COMMENT(
"end gebp micro kernel 3pX1");
1231 for(Index k=peeled_kc; k<depth; k++)
1234 EIGEN_GEBGP_ONESTEP(0);
1236 blA += 3*Traits::LhsProgress;
1238 #undef EIGEN_GEBGP_ONESTEP
1239 ResPacket R0, R1, R2;
1240 ResPacket alphav = pset1<ResPacket>(alpha);
1242 R0 = r0.loadPacket(0 * Traits::ResPacketSize);
1243 R1 = r0.loadPacket(1 * Traits::ResPacketSize);
1244 R2 = r0.loadPacket(2 * Traits::ResPacketSize);
1245 traits.acc(C0, alphav, R0);
1246 traits.acc(C4, alphav, R1);
1247 traits.acc(C8, alphav, R2);
1248 r0.storePacket(0 * Traits::ResPacketSize, R0);
1249 r0.storePacket(1 * Traits::ResPacketSize, R1);
1250 r0.storePacket(2 * Traits::ResPacketSize, R2);
1257 if(mr>=2*Traits::LhsProgress)
1259 const Index l1 = defaultL1CacheSize;
1263 Index actual_panel_rows = (2*LhsProgress) * std::max<Index>(1,( (l1 -
sizeof(ResScalar)*mr*nr - depth*nr*
sizeof(RhsScalar)) / (depth *
sizeof(LhsScalar) * 2*LhsProgress) ));
1265 for(Index i1=peeled_mc3; i1<peeled_mc2; i1+=actual_panel_rows)
1267 Index actual_panel_end = (std::min)(i1+actual_panel_rows, peeled_mc2);
1268 for(Index j2=0; j2<packet_cols4; j2+=nr)
1270 for(Index i=i1; i<actual_panel_end; i+=2*LhsProgress)
1276 const LhsScalar* blA = &blockA[i*strideA+offsetA*(2*Traits::LhsProgress)];
1280 AccPacket C0, C1, C2, C3,
1282 traits.initAcc(C0); traits.initAcc(C1); traits.initAcc(C2); traits.initAcc(C3);
1283 traits.initAcc(C4); traits.initAcc(C5); traits.initAcc(C6); traits.initAcc(C7);
1285 LinearMapper r0 = res.getLinearMapper(i, j2 + 0);
1286 LinearMapper r1 = res.getLinearMapper(i, j2 + 1);
1287 LinearMapper r2 = res.getLinearMapper(i, j2 + 2);
1288 LinearMapper r3 = res.getLinearMapper(i, j2 + 3);
1290 r0.prefetch(prefetch_res_offset);
1291 r1.prefetch(prefetch_res_offset);
1292 r2.prefetch(prefetch_res_offset);
1293 r3.prefetch(prefetch_res_offset);
1296 const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
1300 for(Index k=0; k<peeled_kc; k+=pk)
1302 EIGEN_ASM_COMMENT(
"begin gebp micro kernel 2pX4");
1303 RhsPacket B_0, B1, B2, B3, T0;
1305 #define EIGEN_GEBGP_ONESTEP(K) \
1307 EIGEN_ASM_COMMENT("begin step of gebp micro kernel 2pX4"); \
1308 EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
1309 traits.loadLhs(&blA[(0+2*K)*LhsProgress], A0); \
1310 traits.loadLhs(&blA[(1+2*K)*LhsProgress], A1); \
1311 traits.broadcastRhs(&blB[(0+4*K)*RhsProgress], B_0, B1, B2, B3); \
1312 traits.madd(A0, B_0, C0, T0); \
1313 traits.madd(A1, B_0, C4, B_0); \
1314 traits.madd(A0, B1, C1, T0); \
1315 traits.madd(A1, B1, C5, B1); \
1316 traits.madd(A0, B2, C2, T0); \
1317 traits.madd(A1, B2, C6, B2); \
1318 traits.madd(A0, B3, C3, T0); \
1319 traits.madd(A1, B3, C7, B3); \
1320 EIGEN_ASM_COMMENT("end step of gebp micro kernel 2pX4"); \
1323 internal::prefetch(blB+(48+0));
1324 EIGEN_GEBGP_ONESTEP(0);
1325 EIGEN_GEBGP_ONESTEP(1);
1326 EIGEN_GEBGP_ONESTEP(2);
1327 EIGEN_GEBGP_ONESTEP(3);
1328 internal::prefetch(blB+(48+16));
1329 EIGEN_GEBGP_ONESTEP(4);
1330 EIGEN_GEBGP_ONESTEP(5);
1331 EIGEN_GEBGP_ONESTEP(6);
1332 EIGEN_GEBGP_ONESTEP(7);
1334 blB += pk*4*RhsProgress;
1335 blA += pk*(2*Traits::LhsProgress);
1337 EIGEN_ASM_COMMENT(
"end gebp micro kernel 2pX4");
1340 for(Index k=peeled_kc; k<depth; k++)
1342 RhsPacket B_0, B1, B2, B3, T0;
1343 EIGEN_GEBGP_ONESTEP(0);
1344 blB += 4*RhsProgress;
1345 blA += 2*Traits::LhsProgress;
1347 #undef EIGEN_GEBGP_ONESTEP
1349 ResPacket R0, R1, R2, R3;
1350 ResPacket alphav = pset1<ResPacket>(alpha);
1352 R0 = r0.loadPacket(0 * Traits::ResPacketSize);
1353 R1 = r0.loadPacket(1 * Traits::ResPacketSize);
1354 R2 = r1.loadPacket(0 * Traits::ResPacketSize);
1355 R3 = r1.loadPacket(1 * Traits::ResPacketSize);
1356 traits.acc(C0, alphav, R0);
1357 traits.acc(C4, alphav, R1);
1358 traits.acc(C1, alphav, R2);
1359 traits.acc(C5, alphav, R3);
1360 r0.storePacket(0 * Traits::ResPacketSize, R0);
1361 r0.storePacket(1 * Traits::ResPacketSize, R1);
1362 r1.storePacket(0 * Traits::ResPacketSize, R2);
1363 r1.storePacket(1 * Traits::ResPacketSize, R3);
1365 R0 = r2.loadPacket(0 * Traits::ResPacketSize);
1366 R1 = r2.loadPacket(1 * Traits::ResPacketSize);
1367 R2 = r3.loadPacket(0 * Traits::ResPacketSize);
1368 R3 = r3.loadPacket(1 * Traits::ResPacketSize);
1369 traits.acc(C2, alphav, R0);
1370 traits.acc(C6, alphav, R1);
1371 traits.acc(C3, alphav, R2);
1372 traits.acc(C7, alphav, R3);
1373 r2.storePacket(0 * Traits::ResPacketSize, R0);
1374 r2.storePacket(1 * Traits::ResPacketSize, R1);
1375 r3.storePacket(0 * Traits::ResPacketSize, R2);
1376 r3.storePacket(1 * Traits::ResPacketSize, R3);
1381 for(Index j2=packet_cols4; j2<cols; j2++)
1383 for(Index i=i1; i<actual_panel_end; i+=2*LhsProgress)
1386 const LhsScalar* blA = &blockA[i*strideA+offsetA*(2*Traits::LhsProgress)];
1394 LinearMapper r0 = res.getLinearMapper(i, j2);
1395 r0.prefetch(prefetch_res_offset);
1398 const RhsScalar* blB = &blockB[j2*strideB+offsetB];
1401 for(Index k=0; k<peeled_kc; k+=pk)
1403 EIGEN_ASM_COMMENT(
"begin gebp micro kernel 2pX1");
1406 #define EIGEN_GEBGP_ONESTEP(K) \
1408 EIGEN_ASM_COMMENT("begin step of gebp micro kernel 2pX1"); \
1409 EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
1410 traits.loadLhs(&blA[(0+2*K)*LhsProgress], A0); \
1411 traits.loadLhs(&blA[(1+2*K)*LhsProgress], A1); \
1412 traits.loadRhs(&blB[(0+K)*RhsProgress], B_0); \
1413 traits.madd(A0, B_0, C0, B1); \
1414 traits.madd(A1, B_0, C4, B_0); \
1415 EIGEN_ASM_COMMENT("end step of gebp micro kernel 2pX1"); \
1418 EIGEN_GEBGP_ONESTEP(0);
1419 EIGEN_GEBGP_ONESTEP(1);
1420 EIGEN_GEBGP_ONESTEP(2);
1421 EIGEN_GEBGP_ONESTEP(3);
1422 EIGEN_GEBGP_ONESTEP(4);
1423 EIGEN_GEBGP_ONESTEP(5);
1424 EIGEN_GEBGP_ONESTEP(6);
1425 EIGEN_GEBGP_ONESTEP(7);
1427 blB += pk*RhsProgress;
1428 blA += pk*2*Traits::LhsProgress;
1430 EIGEN_ASM_COMMENT(
"end gebp micro kernel 2pX1");
1434 for(Index k=peeled_kc; k<depth; k++)
1437 EIGEN_GEBGP_ONESTEP(0);
1439 blA += 2*Traits::LhsProgress;
1441 #undef EIGEN_GEBGP_ONESTEP
1443 ResPacket alphav = pset1<ResPacket>(alpha);
1445 R0 = r0.loadPacket(0 * Traits::ResPacketSize);
1446 R1 = r0.loadPacket(1 * Traits::ResPacketSize);
1447 traits.acc(C0, alphav, R0);
1448 traits.acc(C4, alphav, R1);
1449 r0.storePacket(0 * Traits::ResPacketSize, R0);
1450 r0.storePacket(1 * Traits::ResPacketSize, R1);
1456 if(mr>=1*Traits::LhsProgress)
1459 for(Index i=peeled_mc2; i<peeled_mc1; i+=1*LhsProgress)
1462 for(Index j2=0; j2<packet_cols4; j2+=nr)
1467 const LhsScalar* blA = &blockA[i*strideA+offsetA*(1*Traits::LhsProgress)];
1471 AccPacket C0, C1, C2, C3;
1477 LinearMapper r0 = res.getLinearMapper(i, j2 + 0);
1478 LinearMapper r1 = res.getLinearMapper(i, j2 + 1);
1479 LinearMapper r2 = res.getLinearMapper(i, j2 + 2);
1480 LinearMapper r3 = res.getLinearMapper(i, j2 + 3);
1482 r0.prefetch(prefetch_res_offset);
1483 r1.prefetch(prefetch_res_offset);
1484 r2.prefetch(prefetch_res_offset);
1485 r3.prefetch(prefetch_res_offset);
1488 const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
1492 for(Index k=0; k<peeled_kc; k+=pk)
1494 EIGEN_ASM_COMMENT(
"begin gebp micro kernel 1pX4");
1495 RhsPacket B_0, B1, B2, B3;
1497 #define EIGEN_GEBGP_ONESTEP(K) \
1499 EIGEN_ASM_COMMENT("begin step of gebp micro kernel 1pX4"); \
1500 EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
1501 traits.loadLhs(&blA[(0+1*K)*LhsProgress], A0); \
1502 traits.broadcastRhs(&blB[(0+4*K)*RhsProgress], B_0, B1, B2, B3); \
1503 traits.madd(A0, B_0, C0, B_0); \
1504 traits.madd(A0, B1, C1, B1); \
1505 traits.madd(A0, B2, C2, B2); \
1506 traits.madd(A0, B3, C3, B3); \
1507 EIGEN_ASM_COMMENT("end step of gebp micro kernel 1pX4"); \
1510 internal::prefetch(blB+(48+0));
1511 EIGEN_GEBGP_ONESTEP(0);
1512 EIGEN_GEBGP_ONESTEP(1);
1513 EIGEN_GEBGP_ONESTEP(2);
1514 EIGEN_GEBGP_ONESTEP(3);
1515 internal::prefetch(blB+(48+16));
1516 EIGEN_GEBGP_ONESTEP(4);
1517 EIGEN_GEBGP_ONESTEP(5);
1518 EIGEN_GEBGP_ONESTEP(6);
1519 EIGEN_GEBGP_ONESTEP(7);
1521 blB += pk*4*RhsProgress;
1522 blA += pk*1*LhsProgress;
1524 EIGEN_ASM_COMMENT(
"end gebp micro kernel 1pX4");
1527 for(Index k=peeled_kc; k<depth; k++)
1529 RhsPacket B_0, B1, B2, B3;
1530 EIGEN_GEBGP_ONESTEP(0);
1531 blB += 4*RhsProgress;
1532 blA += 1*LhsProgress;
1534 #undef EIGEN_GEBGP_ONESTEP
1537 ResPacket alphav = pset1<ResPacket>(alpha);
1539 R0 = r0.loadPacket(0 * Traits::ResPacketSize);
1540 R1 = r1.loadPacket(0 * Traits::ResPacketSize);
1541 traits.acc(C0, alphav, R0);
1542 traits.acc(C1, alphav, R1);
1543 r0.storePacket(0 * Traits::ResPacketSize, R0);
1544 r1.storePacket(0 * Traits::ResPacketSize, R1);
1546 R0 = r2.loadPacket(0 * Traits::ResPacketSize);
1547 R1 = r3.loadPacket(0 * Traits::ResPacketSize);
1548 traits.acc(C2, alphav, R0);
1549 traits.acc(C3, alphav, R1);
1550 r2.storePacket(0 * Traits::ResPacketSize, R0);
1551 r3.storePacket(0 * Traits::ResPacketSize, R1);
1555 for(Index j2=packet_cols4; j2<cols; j2++)
1558 const LhsScalar* blA = &blockA[i*strideA+offsetA*(1*Traits::LhsProgress)];
1565 LinearMapper r0 = res.getLinearMapper(i, j2);
1568 const RhsScalar* blB = &blockB[j2*strideB+offsetB];
1571 for(Index k=0; k<peeled_kc; k+=pk)
1573 EIGEN_ASM_COMMENT(
"begin gebp micro kernel 1pX1");
1576 #define EIGEN_GEBGP_ONESTEP(K) \
1578 EIGEN_ASM_COMMENT("begin step of gebp micro kernel 1pX1"); \
1579 EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
1580 traits.loadLhs(&blA[(0+1*K)*LhsProgress], A0); \
1581 traits.loadRhs(&blB[(0+K)*RhsProgress], B_0); \
1582 traits.madd(A0, B_0, C0, B_0); \
1583 EIGEN_ASM_COMMENT("end step of gebp micro kernel 1pX1"); \
1586 EIGEN_GEBGP_ONESTEP(0);
1587 EIGEN_GEBGP_ONESTEP(1);
1588 EIGEN_GEBGP_ONESTEP(2);
1589 EIGEN_GEBGP_ONESTEP(3);
1590 EIGEN_GEBGP_ONESTEP(4);
1591 EIGEN_GEBGP_ONESTEP(5);
1592 EIGEN_GEBGP_ONESTEP(6);
1593 EIGEN_GEBGP_ONESTEP(7);
1595 blB += pk*RhsProgress;
1596 blA += pk*1*Traits::LhsProgress;
1598 EIGEN_ASM_COMMENT(
"end gebp micro kernel 1pX1");
1602 for(Index k=peeled_kc; k<depth; k++)
1605 EIGEN_GEBGP_ONESTEP(0);
1607 blA += 1*Traits::LhsProgress;
1609 #undef EIGEN_GEBGP_ONESTEP
1611 ResPacket alphav = pset1<ResPacket>(alpha);
1612 R0 = r0.loadPacket(0 * Traits::ResPacketSize);
1613 traits.acc(C0, alphav, R0);
1614 r0.storePacket(0 * Traits::ResPacketSize, R0);
1622 for(Index j2=0; j2<packet_cols4; j2+=nr)
1625 for(Index i=peeled_mc1; i<rows; i+=1)
1627 const LhsScalar* blA = &blockA[i*strideA+offsetA];
1629 const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
1631 if( (SwappedTraits::LhsProgress % 4)==0 )
1634 SAccPacket C0, C1, C2, C3;
1635 straits.initAcc(C0);
1636 straits.initAcc(C1);
1637 straits.initAcc(C2);
1638 straits.initAcc(C3);
1640 const Index spk = (std::max)(1,SwappedTraits::LhsProgress/4);
1641 const Index endk = (depth/spk)*spk;
1642 const Index endk4 = (depth/(spk*4))*(spk*4);
1645 for(; k<endk4; k+=4*spk)
1650 straits.loadLhsUnaligned(blB+0*SwappedTraits::LhsProgress, A0);
1651 straits.loadLhsUnaligned(blB+1*SwappedTraits::LhsProgress, A1);
1653 straits.loadRhsQuad(blA+0*spk, B_0);
1654 straits.loadRhsQuad(blA+1*spk, B_1);
1655 straits.madd(A0,B_0,C0,B_0);
1656 straits.madd(A1,B_1,C1,B_1);
1658 straits.loadLhsUnaligned(blB+2*SwappedTraits::LhsProgress, A0);
1659 straits.loadLhsUnaligned(blB+3*SwappedTraits::LhsProgress, A1);
1660 straits.loadRhsQuad(blA+2*spk, B_0);
1661 straits.loadRhsQuad(blA+3*spk, B_1);
1662 straits.madd(A0,B_0,C2,B_0);
1663 straits.madd(A1,B_1,C3,B_1);
1665 blB += 4*SwappedTraits::LhsProgress;
1668 C0 = padd(padd(C0,C1),padd(C2,C3));
1669 for(; k<endk; k+=spk)
1674 straits.loadLhsUnaligned(blB, A0);
1675 straits.loadRhsQuad(blA, B_0);
1676 straits.madd(A0,B_0,C0,B_0);
1678 blB += SwappedTraits::LhsProgress;
1681 if(SwappedTraits::LhsProgress==8)
1684 typedef typename conditional<SwappedTraits::LhsProgress==8,typename unpacket_traits<SResPacket>::half,SResPacket>::type SResPacketHalf;
1685 typedef typename conditional<SwappedTraits::LhsProgress==8,typename unpacket_traits<SLhsPacket>::half,SLhsPacket>::type SLhsPacketHalf;
1686 typedef typename conditional<SwappedTraits::LhsProgress==8,typename unpacket_traits<SLhsPacket>::half,SRhsPacket>::type SRhsPacketHalf;
1687 typedef typename conditional<SwappedTraits::LhsProgress==8,typename unpacket_traits<SAccPacket>::half,SAccPacket>::type SAccPacketHalf;
1689 SResPacketHalf R = res.template gatherPacket<SResPacketHalf>(i, j2);
1690 SResPacketHalf alphav = pset1<SResPacketHalf>(alpha);
1697 straits.loadLhsUnaligned(blB, a0);
1698 straits.loadRhs(blA, b0);
1699 SAccPacketHalf c0 = predux4(C0);
1700 straits.madd(a0,b0,c0,b0);
1701 straits.acc(c0, alphav, R);
1705 straits.acc(predux4(C0), alphav, R);
1707 res.scatterPacket(i, j2, R);
1711 SResPacket R = res.template gatherPacket<SResPacket>(i, j2);
1712 SResPacket alphav = pset1<SResPacket>(alpha);
1713 straits.acc(C0, alphav, R);
1714 res.scatterPacket(i, j2, R);
1720 ResScalar C0(0), C1(0), C2(0), C3(0);
1722 for(Index k=0; k<depth; k++)
1731 CJMADD(cj,A0,B_0,C0, B_0);
1732 CJMADD(cj,A0,B_1,C1, B_1);
1736 CJMADD(cj,A0,B_0,C2, B_0);
1737 CJMADD(cj,A0,B_1,C3, B_1);
1741 res(i, j2 + 0) += alpha * C0;
1742 res(i, j2 + 1) += alpha * C1;
1743 res(i, j2 + 2) += alpha * C2;
1744 res(i, j2 + 3) += alpha * C3;
1749 for(Index j2=packet_cols4; j2<cols; j2++)
1752 for(Index i=peeled_mc1; i<rows; i+=1)
1754 const LhsScalar* blA = &blockA[i*strideA+offsetA];
1758 const RhsScalar* blB = &blockB[j2*strideB+offsetB];
1759 for(Index k=0; k<depth; k++)
1761 LhsScalar A0 = blA[k];
1762 RhsScalar B_0 = blB[k];
1763 CJMADD(cj, A0, B_0, C0, B_0);
1765 res(i, j2) += alpha * C0;
1788 template<
typename Scalar,
typename Index,
typename DataMapper,
int Pack1,
int Pack2,
bool Conjugate,
bool PanelMode>
1789 struct gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2,
ColMajor, Conjugate, PanelMode>
1791 typedef typename DataMapper::LinearMapper LinearMapper;
1792 EIGEN_DONT_INLINE
void operator()(Scalar* blockA,
const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0);
1795 template<
typename Scalar,
typename Index,
typename DataMapper,
int Pack1,
int Pack2,
bool Conjugate,
bool PanelMode>
1796 EIGEN_DONT_INLINE
void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, ColMajor, Conjugate, PanelMode>
1797 ::operator()(Scalar* blockA,
const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
1799 typedef typename packet_traits<Scalar>::type Packet;
1800 enum { PacketSize = packet_traits<Scalar>::size };
1802 EIGEN_ASM_COMMENT(
"EIGEN PRODUCT PACK LHS");
1803 EIGEN_UNUSED_VARIABLE(stride);
1804 EIGEN_UNUSED_VARIABLE(offset);
1805 eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
1806 eigen_assert( ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) || (Pack1<=4) );
1807 conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
1810 const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0;
1811 const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0;
1812 const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0;
1813 const Index peeled_mc0 = Pack2>=1*PacketSize ? peeled_mc1
1814 : Pack2>1 ? (rows/Pack2)*Pack2 : 0;
1819 if(Pack1>=3*PacketSize)
1821 for(; i<peeled_mc3; i+=3*PacketSize)
1823 if(PanelMode) count += (3*PacketSize) * offset;
1825 for(Index k=0; k<depth; k++)
1828 A = lhs.loadPacket(i+0*PacketSize, k);
1829 B = lhs.loadPacket(i+1*PacketSize, k);
1830 C = lhs.loadPacket(i+2*PacketSize, k);
1831 pstore(blockA+count, cj.pconj(A)); count+=PacketSize;
1832 pstore(blockA+count, cj.pconj(B)); count+=PacketSize;
1833 pstore(blockA+count, cj.pconj(C)); count+=PacketSize;
1835 if(PanelMode) count += (3*PacketSize) * (stride-offset-depth);
1839 if(Pack1>=2*PacketSize)
1841 for(; i<peeled_mc2; i+=2*PacketSize)
1843 if(PanelMode) count += (2*PacketSize) * offset;
1845 for(Index k=0; k<depth; k++)
1848 A = lhs.loadPacket(i+0*PacketSize, k);
1849 B = lhs.loadPacket(i+1*PacketSize, k);
1850 pstore(blockA+count, cj.pconj(A)); count+=PacketSize;
1851 pstore(blockA+count, cj.pconj(B)); count+=PacketSize;
1853 if(PanelMode) count += (2*PacketSize) * (stride-offset-depth);
1857 if(Pack1>=1*PacketSize)
1859 for(; i<peeled_mc1; i+=1*PacketSize)
1861 if(PanelMode) count += (1*PacketSize) * offset;
1863 for(Index k=0; k<depth; k++)
1866 A = lhs.loadPacket(i+0*PacketSize, k);
1867 pstore(blockA+count, cj.pconj(A));
1870 if(PanelMode) count += (1*PacketSize) * (stride-offset-depth);
1874 if(Pack2<PacketSize && Pack2>1)
1876 for(; i<peeled_mc0; i+=Pack2)
1878 if(PanelMode) count += Pack2 * offset;
1880 for(Index k=0; k<depth; k++)
1881 for(Index w=0; w<Pack2; w++)
1882 blockA[count++] = cj(lhs(i+w, k));
1884 if(PanelMode) count += Pack2 * (stride-offset-depth);
1889 if(PanelMode) count += offset;
1890 for(Index k=0; k<depth; k++)
1891 blockA[count++] = cj(lhs(i, k));
1892 if(PanelMode) count += (stride-offset-depth);
1896 template<
typename Scalar,
typename Index,
typename DataMapper,
int Pack1,
int Pack2,
bool Conjugate,
bool PanelMode>
1897 struct gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2,
RowMajor, Conjugate, PanelMode>
1899 typedef typename DataMapper::LinearMapper LinearMapper;
1900 EIGEN_DONT_INLINE
void operator()(Scalar* blockA,
const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0);
1903 template<
typename Scalar,
typename Index,
typename DataMapper,
int Pack1,
int Pack2,
bool Conjugate,
bool PanelMode>
1904 EIGEN_DONT_INLINE
void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, RowMajor, Conjugate, PanelMode>
1905 ::operator()(Scalar* blockA,
const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
1907 typedef typename packet_traits<Scalar>::type Packet;
1908 enum { PacketSize = packet_traits<Scalar>::size };
1910 EIGEN_ASM_COMMENT(
"EIGEN PRODUCT PACK LHS");
1911 EIGEN_UNUSED_VARIABLE(stride);
1912 EIGEN_UNUSED_VARIABLE(offset);
1913 eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
1914 conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
1925 Index remaining_rows = rows-i;
1926 Index peeled_mc = i+(remaining_rows/pack)*pack;
1927 for(; i<peeled_mc; i+=pack)
1929 if(PanelMode) count += pack * offset;
1931 const Index peeled_k = (depth/PacketSize)*PacketSize;
1933 if(pack>=PacketSize)
1935 for(; k<peeled_k; k+=PacketSize)
1937 for (Index m = 0; m < pack; m += PacketSize)
1939 PacketBlock<Packet> kernel;
1940 for (
int p = 0; p < PacketSize; ++p) kernel.packet[p] = lhs.loadPacket(i+p+m, k);
1942 for (
int p = 0; p < PacketSize; ++p) pstore(blockA+count+m+(pack)*p, cj.pconj(kernel.packet[p]));
1944 count += PacketSize*pack;
1950 for(; w<pack-3; w+=4)
1952 Scalar a(cj(lhs(i+w+0, k))),
1953 b(cj(lhs(i+w+1, k))),
1954 c(cj(lhs(i+w+2, k))),
1955 d(cj(lhs(i+w+3, k)));
1956 blockA[count++] = a;
1957 blockA[count++] = b;
1958 blockA[count++] = c;
1959 blockA[count++] = d;
1963 blockA[count++] = cj(lhs(i+w, k));
1966 if(PanelMode) count += pack * (stride-offset-depth);
1970 if(pack<Pack2 && (pack+PacketSize)!=Pack2)
1976 if(PanelMode) count += offset;
1977 for(Index k=0; k<depth; k++)
1978 blockA[count++] = cj(lhs(i, k));
1979 if(PanelMode) count += (stride-offset-depth);
1990 template<
typename Scalar,
typename Index,
typename DataMapper,
int nr,
bool Conjugate,
bool PanelMode>
1991 struct gemm_pack_rhs<Scalar, Index, DataMapper, nr,
ColMajor, Conjugate, PanelMode>
1993 typedef typename packet_traits<Scalar>::type Packet;
1994 typedef typename DataMapper::LinearMapper LinearMapper;
1995 enum { PacketSize = packet_traits<Scalar>::size };
1996 EIGEN_DONT_INLINE
void operator()(Scalar* blockB,
const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0);
1999 template<
typename Scalar,
typename Index,
typename DataMapper,
int nr,
bool Conjugate,
bool PanelMode>
2000 EIGEN_DONT_INLINE
void gemm_pack_rhs<Scalar, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode>
2001 ::operator()(Scalar* blockB,
const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset)
2003 EIGEN_ASM_COMMENT(
"EIGEN PRODUCT PACK RHS COLMAJOR");
2004 EIGEN_UNUSED_VARIABLE(stride);
2005 EIGEN_UNUSED_VARIABLE(offset);
2006 eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
2007 conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
2008 Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0;
2009 Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
2011 const Index peeled_k = (depth/PacketSize)*PacketSize;
2060 for(Index j2=packet_cols8; j2<packet_cols4; j2+=4)
2063 if(PanelMode) count += 4 * offset;
2064 const LinearMapper dm0 = rhs.getLinearMapper(0, j2 + 0);
2065 const LinearMapper dm1 = rhs.getLinearMapper(0, j2 + 1);
2066 const LinearMapper dm2 = rhs.getLinearMapper(0, j2 + 2);
2067 const LinearMapper dm3 = rhs.getLinearMapper(0, j2 + 3);
2070 if((PacketSize%4)==0)
2072 for(; k<peeled_k; k+=PacketSize) {
2073 PacketBlock<Packet,(PacketSize%4)==0?4:PacketSize> kernel;
2074 kernel.packet[0] = dm0.loadPacket(k);
2075 kernel.packet[1%PacketSize] = dm1.loadPacket(k);
2076 kernel.packet[2%PacketSize] = dm2.loadPacket(k);
2077 kernel.packet[3%PacketSize] = dm3.loadPacket(k);
2079 pstoreu(blockB+count+0*PacketSize, cj.pconj(kernel.packet[0]));
2080 pstoreu(blockB+count+1*PacketSize, cj.pconj(kernel.packet[1%PacketSize]));
2081 pstoreu(blockB+count+2*PacketSize, cj.pconj(kernel.packet[2%PacketSize]));
2082 pstoreu(blockB+count+3*PacketSize, cj.pconj(kernel.packet[3%PacketSize]));
2083 count+=4*PacketSize;
2088 blockB[count+0] = cj(dm0(k));
2089 blockB[count+1] = cj(dm1(k));
2090 blockB[count+2] = cj(dm2(k));
2091 blockB[count+3] = cj(dm3(k));
2095 if(PanelMode) count += 4 * (stride-offset-depth);
2100 for(Index j2=packet_cols4; j2<cols; ++j2)
2102 if(PanelMode) count += offset;
2103 const LinearMapper dm0 = rhs.getLinearMapper(0, j2);
2104 for(Index k=0; k<depth; k++)
2106 blockB[count] = cj(dm0(k));
2109 if(PanelMode) count += (stride-offset-depth);
2114 template<
typename Scalar,
typename Index,
typename DataMapper,
int nr,
bool Conjugate,
bool PanelMode>
2115 struct gemm_pack_rhs<Scalar, Index, DataMapper, nr,
RowMajor, Conjugate, PanelMode>
2117 typedef typename packet_traits<Scalar>::type Packet;
2118 typedef typename DataMapper::LinearMapper LinearMapper;
2119 enum { PacketSize = packet_traits<Scalar>::size };
2120 EIGEN_DONT_INLINE
void operator()(Scalar* blockB,
const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0);
2123 template<
typename Scalar,
typename Index,
typename DataMapper,
int nr,
bool Conjugate,
bool PanelMode>
2124 EIGEN_DONT_INLINE
void gemm_pack_rhs<Scalar, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode>
2125 ::operator()(Scalar* blockB,
const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset)
2127 EIGEN_ASM_COMMENT(
"EIGEN PRODUCT PACK RHS ROWMAJOR");
2128 EIGEN_UNUSED_VARIABLE(stride);
2129 EIGEN_UNUSED_VARIABLE(offset);
2130 eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
2131 conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
2132 Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0;
2133 Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
2171 for(Index j2=packet_cols8; j2<packet_cols4; j2+=4)
2174 if(PanelMode) count += 4 * offset;
2175 for(Index k=0; k<depth; k++)
2177 if (PacketSize==4) {
2178 Packet A = rhs.loadPacket(k, j2);
2179 pstoreu(blockB+count, cj.pconj(A));
2180 count += PacketSize;
2182 const LinearMapper dm0 = rhs.getLinearMapper(k, j2);
2183 blockB[count+0] = cj(dm0(0));
2184 blockB[count+1] = cj(dm0(1));
2185 blockB[count+2] = cj(dm0(2));
2186 blockB[count+3] = cj(dm0(3));
2191 if(PanelMode) count += 4 * (stride-offset-depth);
2195 for(Index j2=packet_cols4; j2<cols; ++j2)
2197 if(PanelMode) count += offset;
2198 for(Index k=0; k<depth; k++)
2200 blockB[count] = cj(rhs(k, j2));
2203 if(PanelMode) count += stride-offset-depth;
2211 inline std::ptrdiff_t l1CacheSize()
2213 std::ptrdiff_t l1, l2, l3;
2214 internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
2220 inline std::ptrdiff_t l2CacheSize()
2222 std::ptrdiff_t l1, l2, l3;
2223 internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
2232 inline void setCpuCacheSizes(std::ptrdiff_t l1, std::ptrdiff_t l2, std::ptrdiff_t l3)
2234 internal::manage_caching_sizes(SetAction, &l1, &l2, &l3);
2239 #endif // EIGEN_GENERAL_BLOCK_PANEL_H
Definition: Constants.h:320
Definition: StdDeque.h:58
Definition: Constants.h:322
Definition: Eigen_Colamd.h:54