Eigen  3.2.92
AVX/PacketMath.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner (benoit.steiner.goog@gmail.com)
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_PACKET_MATH_AVX_H
11 #define EIGEN_PACKET_MATH_AVX_H
12 
13 namespace Eigen {
14 
15 namespace internal {
16 
17 #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
18 #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
19 #endif
20 
21 #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
22 #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))
23 #endif
24 
25 #ifdef __FMA__
26 #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
27 #define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
28 #endif
29 #endif
30 
31 typedef __m256 Packet8f;
32 typedef __m256i Packet8i;
33 typedef __m256d Packet4d;
34 
35 template<> struct is_arithmetic<__m256> { enum { value = true }; };
36 template<> struct is_arithmetic<__m256i> { enum { value = true }; };
37 template<> struct is_arithmetic<__m256d> { enum { value = true }; };
38 
39 #define _EIGEN_DECLARE_CONST_Packet8f(NAME,X) \
40  const Packet8f p8f_##NAME = pset1<Packet8f>(X)
41 
42 #define _EIGEN_DECLARE_CONST_Packet4d(NAME,X) \
43  const Packet4d p4d_##NAME = pset1<Packet4d>(X)
44 
45 #define _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(NAME,X) \
46  const Packet8f p8f_##NAME = _mm256_castsi256_ps(pset1<Packet8i>(X))
47 
48 #define _EIGEN_DECLARE_CONST_Packet8i(NAME,X) \
49  const Packet8i p8i_##NAME = pset1<Packet8i>(X)
50 
51 
52 template<> struct packet_traits<float> : default_packet_traits
53 {
54  typedef Packet8f type;
55  typedef Packet4f half;
56  enum {
57  Vectorizable = 1,
58  AlignedOnScalar = 1,
59  size=8,
60  HasHalfPacket = 1,
61 
62  HasDiv = 1,
63  HasSin = EIGEN_FAST_MATH,
64  HasCos = 0,
65  HasLog = 1,
66  HasExp = 1,
67  HasSqrt = 1,
68  HasRsqrt = 1,
69  HasBlend = 1,
70  HasRound = 1,
71  HasFloor = 1,
72  HasCeil = 1
73  };
74 };
75 template<> struct packet_traits<double> : default_packet_traits
76 {
77  typedef Packet4d type;
78  typedef Packet2d half;
79  enum {
80  Vectorizable = 1,
81  AlignedOnScalar = 1,
82  size=4,
83  HasHalfPacket = 1,
84 
85  HasDiv = 1,
86  HasExp = 1,
87  HasSqrt = 1,
88  HasRsqrt = 1,
89  HasBlend = 1,
90  HasRound = 1,
91  HasFloor = 1,
92  HasCeil = 1
93  };
94 };
95 
96 /* Proper support for integers is only provided by AVX2. In the meantime, we'll
97  use SSE instructions and packets to deal with integers.
98 template<> struct packet_traits<int> : default_packet_traits
99 {
100  typedef Packet8i type;
101  enum {
102  Vectorizable = 1,
103  AlignedOnScalar = 1,
104  size=8
105  };
106 };
107 */
108 
109 template<> struct unpacket_traits<Packet8f> { typedef float type; typedef Packet4f half; enum {size=8, alignment=Aligned32}; };
110 template<> struct unpacket_traits<Packet4d> { typedef double type; typedef Packet2d half; enum {size=4, alignment=Aligned32}; };
111 template<> struct unpacket_traits<Packet8i> { typedef int type; typedef Packet4i half; enum {size=8, alignment=Aligned32}; };
112 
113 template<> EIGEN_STRONG_INLINE Packet8f pset1<Packet8f>(const float& from) { return _mm256_set1_ps(from); }
114 template<> EIGEN_STRONG_INLINE Packet4d pset1<Packet4d>(const double& from) { return _mm256_set1_pd(from); }
115 template<> EIGEN_STRONG_INLINE Packet8i pset1<Packet8i>(const int& from) { return _mm256_set1_epi32(from); }
116 
117 template<> EIGEN_STRONG_INLINE Packet8f pload1<Packet8f>(const float* from) { return _mm256_broadcast_ss(from); }
118 template<> EIGEN_STRONG_INLINE Packet4d pload1<Packet4d>(const double* from) { return _mm256_broadcast_sd(from); }
119 
120 template<> EIGEN_STRONG_INLINE Packet8f plset<Packet8f>(const float& a) { return _mm256_add_ps(_mm256_set1_ps(a), _mm256_set_ps(7.0,6.0,5.0,4.0,3.0,2.0,1.0,0.0)); }
121 template<> EIGEN_STRONG_INLINE Packet4d plset<Packet4d>(const double& a) { return _mm256_add_pd(_mm256_set1_pd(a), _mm256_set_pd(3.0,2.0,1.0,0.0)); }
122 
123 template<> EIGEN_STRONG_INLINE Packet8f padd<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_add_ps(a,b); }
124 template<> EIGEN_STRONG_INLINE Packet4d padd<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_add_pd(a,b); }
125 
126 template<> EIGEN_STRONG_INLINE Packet8f psub<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_sub_ps(a,b); }
127 template<> EIGEN_STRONG_INLINE Packet4d psub<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_sub_pd(a,b); }
128 
129 template<> EIGEN_STRONG_INLINE Packet8f pnegate(const Packet8f& a)
130 {
131  return _mm256_sub_ps(_mm256_set1_ps(0.0),a);
132 }
133 template<> EIGEN_STRONG_INLINE Packet4d pnegate(const Packet4d& a)
134 {
135  return _mm256_sub_pd(_mm256_set1_pd(0.0),a);
136 }
137 
138 template<> EIGEN_STRONG_INLINE Packet8f pconj(const Packet8f& a) { return a; }
139 template<> EIGEN_STRONG_INLINE Packet4d pconj(const Packet4d& a) { return a; }
140 template<> EIGEN_STRONG_INLINE Packet8i pconj(const Packet8i& a) { return a; }
141 
142 template<> EIGEN_STRONG_INLINE Packet8f pmul<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_mul_ps(a,b); }
143 template<> EIGEN_STRONG_INLINE Packet4d pmul<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_mul_pd(a,b); }
144 
145 
146 template<> EIGEN_STRONG_INLINE Packet8f pdiv<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_div_ps(a,b); }
147 template<> EIGEN_STRONG_INLINE Packet4d pdiv<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_div_pd(a,b); }
148 template<> EIGEN_STRONG_INLINE Packet8i pdiv<Packet8i>(const Packet8i& /*a*/, const Packet8i& /*b*/)
149 { eigen_assert(false && "packet integer division are not supported by AVX");
150  return pset1<Packet8i>(0);
151 }
152 
153 #ifdef __FMA__
154 template<> EIGEN_STRONG_INLINE Packet8f pmadd(const Packet8f& a, const Packet8f& b, const Packet8f& c) {
155 #if EIGEN_COMP_GNUC || EIGEN_COMP_CLANG
156  // clang stupidly generates a vfmadd213ps instruction plus some vmovaps on registers,
157  // and gcc stupidly generates a vfmadd132ps instruction,
158  // so let's enforce it to generate a vfmadd231ps instruction since the most common use case is to accumulate
159  // the result of the product.
160  Packet8f res = c;
161  __asm__("vfmadd231ps %[a], %[b], %[c]" : [c] "+x" (res) : [a] "x" (a), [b] "x" (b));
162  return res;
163 #else
164  return _mm256_fmadd_ps(a,b,c);
165 #endif
166 }
167 template<> EIGEN_STRONG_INLINE Packet4d pmadd(const Packet4d& a, const Packet4d& b, const Packet4d& c) {
168 #if EIGEN_COMP_GNUC || EIGEN_COMP_CLANG
169  // see above
170  Packet4d res = c;
171  __asm__("vfmadd231pd %[a], %[b], %[c]" : [c] "+x" (res) : [a] "x" (a), [b] "x" (b));
172  return res;
173 #else
174  return _mm256_fmadd_pd(a,b,c);
175 #endif
176 }
177 #endif
178 
179 template<> EIGEN_STRONG_INLINE Packet8f pmin<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_min_ps(a,b); }
180 template<> EIGEN_STRONG_INLINE Packet4d pmin<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_min_pd(a,b); }
181 
182 template<> EIGEN_STRONG_INLINE Packet8f pmax<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_max_ps(a,b); }
183 template<> EIGEN_STRONG_INLINE Packet4d pmax<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_max_pd(a,b); }
184 
185 template<> EIGEN_STRONG_INLINE Packet8f pround<Packet8f>(const Packet8f& a) { return _mm256_round_ps(a, _MM_FROUND_CUR_DIRECTION); }
186 template<> EIGEN_STRONG_INLINE Packet4d pround<Packet4d>(const Packet4d& a) { return _mm256_round_pd(a, _MM_FROUND_CUR_DIRECTION); }
187 
188 template<> EIGEN_STRONG_INLINE Packet8f pceil<Packet8f>(const Packet8f& a) { return _mm256_ceil_ps(a); }
189 template<> EIGEN_STRONG_INLINE Packet4d pceil<Packet4d>(const Packet4d& a) { return _mm256_ceil_pd(a); }
190 
191 template<> EIGEN_STRONG_INLINE Packet8f pfloor<Packet8f>(const Packet8f& a) { return _mm256_floor_ps(a); }
192 template<> EIGEN_STRONG_INLINE Packet4d pfloor<Packet4d>(const Packet4d& a) { return _mm256_floor_pd(a); }
193 
194 template<> EIGEN_STRONG_INLINE Packet8f pand<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_and_ps(a,b); }
195 template<> EIGEN_STRONG_INLINE Packet4d pand<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_and_pd(a,b); }
196 
197 template<> EIGEN_STRONG_INLINE Packet8f por<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_or_ps(a,b); }
198 template<> EIGEN_STRONG_INLINE Packet4d por<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_or_pd(a,b); }
199 
200 template<> EIGEN_STRONG_INLINE Packet8f pxor<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_xor_ps(a,b); }
201 template<> EIGEN_STRONG_INLINE Packet4d pxor<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_xor_pd(a,b); }
202 
203 template<> EIGEN_STRONG_INLINE Packet8f pandnot<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_andnot_ps(a,b); }
204 template<> EIGEN_STRONG_INLINE Packet4d pandnot<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_andnot_pd(a,b); }
205 
206 template<> EIGEN_STRONG_INLINE Packet8f pload<Packet8f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_ps(from); }
207 template<> EIGEN_STRONG_INLINE Packet4d pload<Packet4d>(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_pd(from); }
208 template<> EIGEN_STRONG_INLINE Packet8i pload<Packet8i>(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(reinterpret_cast<const __m256i*>(from)); }
209 
210 template<> EIGEN_STRONG_INLINE Packet8f ploadu<Packet8f>(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_ps(from); }
211 template<> EIGEN_STRONG_INLINE Packet4d ploadu<Packet4d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_pd(from); }
212 template<> EIGEN_STRONG_INLINE Packet8i ploadu<Packet8i>(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from)); }
213 
214 // Loads 4 floats from memory a returns the packet {a0, a0 a1, a1, a2, a2, a3, a3}
215 template<> EIGEN_STRONG_INLINE Packet8f ploaddup<Packet8f>(const float* from)
216 {
217  // TODO try to find a way to avoid the need of a temporary register
218 // Packet8f tmp = _mm256_castps128_ps256(_mm_loadu_ps(from));
219 // tmp = _mm256_insertf128_ps(tmp, _mm_movehl_ps(_mm256_castps256_ps128(tmp),_mm256_castps256_ps128(tmp)), 1);
220 // return _mm256_unpacklo_ps(tmp,tmp);
221 
222  // _mm256_insertf128_ps is very slow on Haswell, thus:
223  Packet8f tmp = _mm256_broadcast_ps((const __m128*)(const void*)from);
224  // mimic an "inplace" permutation of the lower 128bits using a blend
225  tmp = _mm256_blend_ps(tmp,_mm256_castps128_ps256(_mm_permute_ps( _mm256_castps256_ps128(tmp), _MM_SHUFFLE(1,0,1,0))), 15);
226  // then we can perform a consistent permutation on the global register to get everything in shape:
227  return _mm256_permute_ps(tmp, _MM_SHUFFLE(3,3,2,2));
228 }
229 // Loads 2 doubles from memory a returns the packet {a0, a0 a1, a1}
230 template<> EIGEN_STRONG_INLINE Packet4d ploaddup<Packet4d>(const double* from)
231 {
232  Packet4d tmp = _mm256_broadcast_pd((const __m128d*)(const void*)from);
233  return _mm256_permute_pd(tmp, 3<<2);
234 }
235 
236 // Loads 2 floats from memory a returns the packet {a0, a0 a0, a0, a1, a1, a1, a1}
237 template<> EIGEN_STRONG_INLINE Packet8f ploadquad<Packet8f>(const float* from)
238 {
239  Packet8f tmp = _mm256_castps128_ps256(_mm_broadcast_ss(from));
240  return _mm256_insertf128_ps(tmp, _mm_broadcast_ss(from+1), 1);
241 }
242 
243 template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet8f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_ps(to, from); }
244 template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet4d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_pd(to, from); }
245 template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet8i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); }
246 
247 template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet8f& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_ps(to, from); }
248 template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet4d& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_pd(to, from); }
249 template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet8i& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); }
250 
251 // NOTE: leverage _mm256_i32gather_ps and _mm256_i32gather_pd if AVX2 instructions are available
252 // NOTE: for the record the following seems to be slower: return _mm256_i32gather_ps(from, _mm256_set1_epi32(stride), 4);
253 template<> EIGEN_DEVICE_FUNC inline Packet8f pgather<float, Packet8f>(const float* from, Index stride)
254 {
255  return _mm256_set_ps(from[7*stride], from[6*stride], from[5*stride], from[4*stride],
256  from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
257 }
258 template<> EIGEN_DEVICE_FUNC inline Packet4d pgather<double, Packet4d>(const double* from, Index stride)
259 {
260  return _mm256_set_pd(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
261 }
262 
263 template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet8f>(float* to, const Packet8f& from, Index stride)
264 {
265  __m128 low = _mm256_extractf128_ps(from, 0);
266  to[stride*0] = _mm_cvtss_f32(low);
267  to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 1));
268  to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 2));
269  to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 3));
270 
271  __m128 high = _mm256_extractf128_ps(from, 1);
272  to[stride*4] = _mm_cvtss_f32(high);
273  to[stride*5] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 1));
274  to[stride*6] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 2));
275  to[stride*7] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3));
276 }
277 template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet4d>(double* to, const Packet4d& from, Index stride)
278 {
279  __m128d low = _mm256_extractf128_pd(from, 0);
280  to[stride*0] = _mm_cvtsd_f64(low);
281  to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1));
282  __m128d high = _mm256_extractf128_pd(from, 1);
283  to[stride*2] = _mm_cvtsd_f64(high);
284  to[stride*3] = _mm_cvtsd_f64(_mm_shuffle_pd(high, high, 1));
285 }
286 
287 template<> EIGEN_STRONG_INLINE void pstore1<Packet8f>(float* to, const float& a)
288 {
289  Packet8f pa = pset1<Packet8f>(a);
290  pstore(to, pa);
291 }
292 template<> EIGEN_STRONG_INLINE void pstore1<Packet4d>(double* to, const double& a)
293 {
294  Packet4d pa = pset1<Packet4d>(a);
295  pstore(to, pa);
296 }
297 template<> EIGEN_STRONG_INLINE void pstore1<Packet8i>(int* to, const int& a)
298 {
299  Packet8i pa = pset1<Packet8i>(a);
300  pstore(to, pa);
301 }
302 
303 template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
304 template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
305 template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
306 
307 template<> EIGEN_STRONG_INLINE float pfirst<Packet8f>(const Packet8f& a) {
308  return _mm_cvtss_f32(_mm256_castps256_ps128(a));
309 }
310 template<> EIGEN_STRONG_INLINE double pfirst<Packet4d>(const Packet4d& a) {
311  return _mm_cvtsd_f64(_mm256_castpd256_pd128(a));
312 }
313 template<> EIGEN_STRONG_INLINE int pfirst<Packet8i>(const Packet8i& a) {
314  return _mm_cvtsi128_si32(_mm256_castsi256_si128(a));
315 }
316 
317 
318 template<> EIGEN_STRONG_INLINE Packet8f preverse(const Packet8f& a)
319 {
320  __m256 tmp = _mm256_shuffle_ps(a,a,0x1b);
321  return _mm256_permute2f128_ps(tmp, tmp, 1);
322 }
323 template<> EIGEN_STRONG_INLINE Packet4d preverse(const Packet4d& a)
324 {
325  __m256d tmp = _mm256_shuffle_pd(a,a,5);
326  return _mm256_permute2f128_pd(tmp, tmp, 1);
327 
328  __m256d swap_halves = _mm256_permute2f128_pd(a,a,1);
329  return _mm256_permute_pd(swap_halves,5);
330 }
331 
332 // pabs should be ok
333 template<> EIGEN_STRONG_INLINE Packet8f pabs(const Packet8f& a)
334 {
335  const Packet8f mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
336  return _mm256_and_ps(a,mask);
337 }
338 template<> EIGEN_STRONG_INLINE Packet4d pabs(const Packet4d& a)
339 {
340  const Packet4d mask = _mm256_castsi256_pd(_mm256_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
341  return _mm256_and_pd(a,mask);
342 }
343 
344 // preduxp should be ok
345 // FIXME: why is this ok? why isn't the simply implementation working as expected?
346 template<> EIGEN_STRONG_INLINE Packet8f preduxp<Packet8f>(const Packet8f* vecs)
347 {
348  __m256 hsum1 = _mm256_hadd_ps(vecs[0], vecs[1]);
349  __m256 hsum2 = _mm256_hadd_ps(vecs[2], vecs[3]);
350  __m256 hsum3 = _mm256_hadd_ps(vecs[4], vecs[5]);
351  __m256 hsum4 = _mm256_hadd_ps(vecs[6], vecs[7]);
352 
353  __m256 hsum5 = _mm256_hadd_ps(hsum1, hsum1);
354  __m256 hsum6 = _mm256_hadd_ps(hsum2, hsum2);
355  __m256 hsum7 = _mm256_hadd_ps(hsum3, hsum3);
356  __m256 hsum8 = _mm256_hadd_ps(hsum4, hsum4);
357 
358  __m256 perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23);
359  __m256 perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23);
360  __m256 perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23);
361  __m256 perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23);
362 
363  __m256 sum1 = _mm256_add_ps(perm1, hsum5);
364  __m256 sum2 = _mm256_add_ps(perm2, hsum6);
365  __m256 sum3 = _mm256_add_ps(perm3, hsum7);
366  __m256 sum4 = _mm256_add_ps(perm4, hsum8);
367 
368  __m256 blend1 = _mm256_blend_ps(sum1, sum2, 0xcc);
369  __m256 blend2 = _mm256_blend_ps(sum3, sum4, 0xcc);
370 
371  __m256 final = _mm256_blend_ps(blend1, blend2, 0xf0);
372  return final;
373 }
374 template<> EIGEN_STRONG_INLINE Packet4d preduxp<Packet4d>(const Packet4d* vecs)
375 {
376  Packet4d tmp0, tmp1;
377 
378  tmp0 = _mm256_hadd_pd(vecs[0], vecs[1]);
379  tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1));
380 
381  tmp1 = _mm256_hadd_pd(vecs[2], vecs[3]);
382  tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1));
383 
384  return _mm256_blend_pd(tmp0, tmp1, 0xC);
385 }
386 
387 template<> EIGEN_STRONG_INLINE float predux<Packet8f>(const Packet8f& a)
388 {
389  Packet8f tmp0 = _mm256_hadd_ps(a,_mm256_permute2f128_ps(a,a,1));
390  tmp0 = _mm256_hadd_ps(tmp0,tmp0);
391  return pfirst(_mm256_hadd_ps(tmp0, tmp0));
392 }
393 template<> EIGEN_STRONG_INLINE double predux<Packet4d>(const Packet4d& a)
394 {
395  Packet4d tmp0 = _mm256_hadd_pd(a,_mm256_permute2f128_pd(a,a,1));
396  return pfirst(_mm256_hadd_pd(tmp0,tmp0));
397 }
398 
399 template<> EIGEN_STRONG_INLINE Packet4f predux4<Packet8f>(const Packet8f& a)
400 {
401  return _mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1));
402 }
403 
404 template<> EIGEN_STRONG_INLINE float predux_mul<Packet8f>(const Packet8f& a)
405 {
406  Packet8f tmp;
407  tmp = _mm256_mul_ps(a, _mm256_permute2f128_ps(a,a,1));
408  tmp = _mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
409  return pfirst(_mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
410 }
411 template<> EIGEN_STRONG_INLINE double predux_mul<Packet4d>(const Packet4d& a)
412 {
413  Packet4d tmp;
414  tmp = _mm256_mul_pd(a, _mm256_permute2f128_pd(a,a,1));
415  return pfirst(_mm256_mul_pd(tmp, _mm256_shuffle_pd(tmp,tmp,1)));
416 }
417 
418 template<> EIGEN_STRONG_INLINE float predux_min<Packet8f>(const Packet8f& a)
419 {
420  Packet8f tmp = _mm256_min_ps(a, _mm256_permute2f128_ps(a,a,1));
421  tmp = _mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
422  return pfirst(_mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
423 }
424 template<> EIGEN_STRONG_INLINE double predux_min<Packet4d>(const Packet4d& a)
425 {
426  Packet4d tmp = _mm256_min_pd(a, _mm256_permute2f128_pd(a,a,1));
427  return pfirst(_mm256_min_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
428 }
429 
430 template<> EIGEN_STRONG_INLINE float predux_max<Packet8f>(const Packet8f& a)
431 {
432  Packet8f tmp = _mm256_max_ps(a, _mm256_permute2f128_ps(a,a,1));
433  tmp = _mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
434  return pfirst(_mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
435 }
436 
437 template<> EIGEN_STRONG_INLINE double predux_max<Packet4d>(const Packet4d& a)
438 {
439  Packet4d tmp = _mm256_max_pd(a, _mm256_permute2f128_pd(a,a,1));
440  return pfirst(_mm256_max_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
441 }
442 
443 
444 template<int Offset>
445 struct palign_impl<Offset,Packet8f>
446 {
447  static EIGEN_STRONG_INLINE void run(Packet8f& first, const Packet8f& second)
448  {
449  if (Offset==1)
450  {
451  first = _mm256_blend_ps(first, second, 1);
452  Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(0,3,2,1));
453  Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
454  first = _mm256_blend_ps(tmp1, tmp2, 0x88);
455  }
456  else if (Offset==2)
457  {
458  first = _mm256_blend_ps(first, second, 3);
459  Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(1,0,3,2));
460  Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
461  first = _mm256_blend_ps(tmp1, tmp2, 0xcc);
462  }
463  else if (Offset==3)
464  {
465  first = _mm256_blend_ps(first, second, 7);
466  Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(2,1,0,3));
467  Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
468  first = _mm256_blend_ps(tmp1, tmp2, 0xee);
469  }
470  else if (Offset==4)
471  {
472  first = _mm256_blend_ps(first, second, 15);
473  Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(3,2,1,0));
474  Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
475  first = _mm256_permute_ps(tmp2, _MM_SHUFFLE(3,2,1,0));
476  }
477  else if (Offset==5)
478  {
479  first = _mm256_blend_ps(first, second, 31);
480  first = _mm256_permute2f128_ps(first, first, 1);
481  Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(0,3,2,1));
482  first = _mm256_permute2f128_ps(tmp, tmp, 1);
483  first = _mm256_blend_ps(tmp, first, 0x88);
484  }
485  else if (Offset==6)
486  {
487  first = _mm256_blend_ps(first, second, 63);
488  first = _mm256_permute2f128_ps(first, first, 1);
489  Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(1,0,3,2));
490  first = _mm256_permute2f128_ps(tmp, tmp, 1);
491  first = _mm256_blend_ps(tmp, first, 0xcc);
492  }
493  else if (Offset==7)
494  {
495  first = _mm256_blend_ps(first, second, 127);
496  first = _mm256_permute2f128_ps(first, first, 1);
497  Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(2,1,0,3));
498  first = _mm256_permute2f128_ps(tmp, tmp, 1);
499  first = _mm256_blend_ps(tmp, first, 0xee);
500  }
501  }
502 };
503 
504 template<int Offset>
505 struct palign_impl<Offset,Packet4d>
506 {
507  static EIGEN_STRONG_INLINE void run(Packet4d& first, const Packet4d& second)
508  {
509  if (Offset==1)
510  {
511  first = _mm256_blend_pd(first, second, 1);
512  __m256d tmp = _mm256_permute_pd(first, 5);
513  first = _mm256_permute2f128_pd(tmp, tmp, 1);
514  first = _mm256_blend_pd(tmp, first, 0xA);
515  }
516  else if (Offset==2)
517  {
518  first = _mm256_blend_pd(first, second, 3);
519  first = _mm256_permute2f128_pd(first, first, 1);
520  }
521  else if (Offset==3)
522  {
523  first = _mm256_blend_pd(first, second, 7);
524  __m256d tmp = _mm256_permute_pd(first, 5);
525  first = _mm256_permute2f128_pd(tmp, tmp, 1);
526  first = _mm256_blend_pd(tmp, first, 5);
527  }
528  }
529 };
530 
531 EIGEN_DEVICE_FUNC inline void
532 ptranspose(PacketBlock<Packet8f,8>& kernel) {
533  __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
534  __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
535  __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
536  __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
537  __m256 T4 = _mm256_unpacklo_ps(kernel.packet[4], kernel.packet[5]);
538  __m256 T5 = _mm256_unpackhi_ps(kernel.packet[4], kernel.packet[5]);
539  __m256 T6 = _mm256_unpacklo_ps(kernel.packet[6], kernel.packet[7]);
540  __m256 T7 = _mm256_unpackhi_ps(kernel.packet[6], kernel.packet[7]);
541  __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0));
542  __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2));
543  __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0));
544  __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2));
545  __m256 S4 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(1,0,1,0));
546  __m256 S5 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(3,2,3,2));
547  __m256 S6 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(1,0,1,0));
548  __m256 S7 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(3,2,3,2));
549  kernel.packet[0] = _mm256_permute2f128_ps(S0, S4, 0x20);
550  kernel.packet[1] = _mm256_permute2f128_ps(S1, S5, 0x20);
551  kernel.packet[2] = _mm256_permute2f128_ps(S2, S6, 0x20);
552  kernel.packet[3] = _mm256_permute2f128_ps(S3, S7, 0x20);
553  kernel.packet[4] = _mm256_permute2f128_ps(S0, S4, 0x31);
554  kernel.packet[5] = _mm256_permute2f128_ps(S1, S5, 0x31);
555  kernel.packet[6] = _mm256_permute2f128_ps(S2, S6, 0x31);
556  kernel.packet[7] = _mm256_permute2f128_ps(S3, S7, 0x31);
557 }
558 
559 EIGEN_DEVICE_FUNC inline void
560 ptranspose(PacketBlock<Packet8f,4>& kernel) {
561  __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
562  __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
563  __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
564  __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
565 
566  __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0));
567  __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2));
568  __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0));
569  __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2));
570 
571  kernel.packet[0] = _mm256_permute2f128_ps(S0, S1, 0x20);
572  kernel.packet[1] = _mm256_permute2f128_ps(S2, S3, 0x20);
573  kernel.packet[2] = _mm256_permute2f128_ps(S0, S1, 0x31);
574  kernel.packet[3] = _mm256_permute2f128_ps(S2, S3, 0x31);
575 }
576 
577 EIGEN_DEVICE_FUNC inline void
578 ptranspose(PacketBlock<Packet4d,4>& kernel) {
579  __m256d T0 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 15);
580  __m256d T1 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 0);
581  __m256d T2 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 15);
582  __m256d T3 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 0);
583 
584  kernel.packet[1] = _mm256_permute2f128_pd(T0, T2, 32);
585  kernel.packet[3] = _mm256_permute2f128_pd(T0, T2, 49);
586  kernel.packet[0] = _mm256_permute2f128_pd(T1, T3, 32);
587  kernel.packet[2] = _mm256_permute2f128_pd(T1, T3, 49);
588 }
589 
590 template<> EIGEN_STRONG_INLINE Packet8f pblend(const Selector<8>& ifPacket, const Packet8f& thenPacket, const Packet8f& elsePacket) {
591  const __m256 zero = _mm256_setzero_ps();
592  const __m256 select = _mm256_set_ps(ifPacket.select[7], ifPacket.select[6], ifPacket.select[5], ifPacket.select[4], ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
593  __m256 false_mask = _mm256_cmp_ps(select, zero, _CMP_EQ_UQ);
594  return _mm256_blendv_ps(thenPacket, elsePacket, false_mask);
595 }
596 template<> EIGEN_STRONG_INLINE Packet4d pblend(const Selector<4>& ifPacket, const Packet4d& thenPacket, const Packet4d& elsePacket) {
597  const __m256d zero = _mm256_setzero_pd();
598  const __m256d select = _mm256_set_pd(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
599  __m256d false_mask = _mm256_cmp_pd(select, zero, _CMP_EQ_UQ);
600  return _mm256_blendv_pd(thenPacket, elsePacket, false_mask);
601 }
602 
603 } // end namespace internal
604 
605 } // end namespace Eigen
606 
607 #endif // EIGEN_PACKET_MATH_AVX_H
Definition: LDLT.h:16
Definition: Constants.h:231
Definition: Eigen_Colamd.h:54