Eigen  3.3.3
PacketMath.h
00001 // This file is part of Eigen, a lightweight C++ template library
00002 // for linear algebra.
00003 //
00004 // Copyright (C) 2016 Benoit Steiner (benoit.steiner.goog@gmail.com)
00005 //
00006 // This Source Code Form is subject to the terms of the Mozilla
00007 // Public License v. 2.0. If a copy of the MPL was not distributed
00008 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
00009 
00010 #ifndef EIGEN_PACKET_MATH_AVX512_H
00011 #define EIGEN_PACKET_MATH_AVX512_H
00012 
00013 namespace Eigen {
00014 
00015 namespace internal {
00016 
00017 #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
00018 #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
00019 #endif
00020 
00021 #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
00022 #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))
00023 #endif
00024 
00025 #ifdef __FMA__
00026 #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
00027 #define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
00028 #endif
00029 #endif
00030 
00031 typedef __m512 Packet16f;
00032 typedef __m512i Packet16i;
00033 typedef __m512d Packet8d;
00034 
00035 template <>
00036 struct is_arithmetic<__m512> {
00037   enum { value = true };
00038 };
00039 template <>
00040 struct is_arithmetic<__m512i> {
00041   enum { value = true };
00042 };
00043 template <>
00044 struct is_arithmetic<__m512d> {
00045   enum { value = true };
00046 };
00047 
00048 template<> struct packet_traits<float>  : default_packet_traits
00049 {
00050   typedef Packet16f type;
00051   typedef Packet8f half;
00052   enum {
00053     Vectorizable = 1,
00054     AlignedOnScalar = 1,
00055     size = 16,
00056     HasHalfPacket = 1,
00057 #if EIGEN_GNUC_AT_LEAST(5, 3)
00058 #ifdef EIGEN_VECTORIZE_AVX512DQ
00059     HasLog = 1,
00060 #endif
00061     HasExp = 1,
00062     HasSqrt = 1,
00063     HasRsqrt = 1,
00064 #endif
00065     HasDiv = 1
00066   };
00067  };
00068 template<> struct packet_traits<double> : default_packet_traits
00069 {
00070   typedef Packet8d type;
00071   typedef Packet4d half;
00072   enum {
00073     Vectorizable = 1,
00074     AlignedOnScalar = 1,
00075     size = 8,
00076     HasHalfPacket = 1,
00077 #if EIGEN_GNUC_AT_LEAST(5, 3)
00078     HasSqrt = 1,
00079     HasRsqrt = EIGEN_FAST_MATH,
00080 #endif
00081     HasDiv = 1
00082   };
00083 };
00084 
00085 /* TODO Implement AVX512 for integers
00086 template<> struct packet_traits<int>    : default_packet_traits
00087 {
00088   typedef Packet16i type;
00089   enum {
00090     Vectorizable = 1,
00091     AlignedOnScalar = 1,
00092     size=8
00093   };
00094 };
00095 */
00096 
00097 template <>
00098 struct unpacket_traits<Packet16f> {
00099   typedef float type;
00100   typedef Packet8f half;
00101   enum { size = 16, alignment=Aligned64 };
00102 };
00103 template <>
00104 struct unpacket_traits<Packet8d> {
00105   typedef double type;
00106   typedef Packet4d half;
00107   enum { size = 8, alignment=Aligned64 };
00108 };
00109 template <>
00110 struct unpacket_traits<Packet16i> {
00111   typedef int type;
00112   typedef Packet8i half;
00113   enum { size = 16, alignment=Aligned64 };
00114 };
00115 
00116 template <>
00117 EIGEN_STRONG_INLINE Packet16f pset1<Packet16f>(const float& from) {
00118   return _mm512_set1_ps(from);
00119 }
00120 template <>
00121 EIGEN_STRONG_INLINE Packet8d pset1<Packet8d>(const double& from) {
00122   return _mm512_set1_pd(from);
00123 }
00124 template <>
00125 EIGEN_STRONG_INLINE Packet16i pset1<Packet16i>(const int& from) {
00126   return _mm512_set1_epi32(from);
00127 }
00128 
00129 template <>
00130 EIGEN_STRONG_INLINE Packet16f pload1<Packet16f>(const float* from) {
00131   return _mm512_broadcastss_ps(_mm_load_ps1(from));
00132 }
00133 template <>
00134 EIGEN_STRONG_INLINE Packet8d pload1<Packet8d>(const double* from) {
00135   return _mm512_broadcastsd_pd(_mm_load_pd1(from));
00136 }
00137 
00138 template <>
00139 EIGEN_STRONG_INLINE Packet16f plset<Packet16f>(const float& a) {
00140   return _mm512_add_ps(
00141       _mm512_set1_ps(a),
00142       _mm512_set_ps(15.0f, 14.0f, 13.0f, 12.0f, 11.0f, 10.0f, 9.0f, 8.0f, 7.0f, 6.0f, 5.0f,
00143                     4.0f, 3.0f, 2.0f, 1.0f, 0.0f));
00144 }
00145 template <>
00146 EIGEN_STRONG_INLINE Packet8d plset<Packet8d>(const double& a) {
00147   return _mm512_add_pd(_mm512_set1_pd(a),
00148                        _mm512_set_pd(7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0));
00149 }
00150 
00151 template <>
00152 EIGEN_STRONG_INLINE Packet16f padd<Packet16f>(const Packet16f& a,
00153                                               const Packet16f& b) {
00154   return _mm512_add_ps(a, b);
00155 }
00156 template <>
00157 EIGEN_STRONG_INLINE Packet8d padd<Packet8d>(const Packet8d& a,
00158                                             const Packet8d& b) {
00159   return _mm512_add_pd(a, b);
00160 }
00161 
00162 template <>
00163 EIGEN_STRONG_INLINE Packet16f psub<Packet16f>(const Packet16f& a,
00164                                               const Packet16f& b) {
00165   return _mm512_sub_ps(a, b);
00166 }
00167 template <>
00168 EIGEN_STRONG_INLINE Packet8d psub<Packet8d>(const Packet8d& a,
00169                                             const Packet8d& b) {
00170   return _mm512_sub_pd(a, b);
00171 }
00172 
00173 template <>
00174 EIGEN_STRONG_INLINE Packet16f pnegate(const Packet16f& a) {
00175   return _mm512_sub_ps(_mm512_set1_ps(0.0), a);
00176 }
00177 template <>
00178 EIGEN_STRONG_INLINE Packet8d pnegate(const Packet8d& a) {
00179   return _mm512_sub_pd(_mm512_set1_pd(0.0), a);
00180 }
00181 
00182 template <>
00183 EIGEN_STRONG_INLINE Packet16f pconj(const Packet16f& a) {
00184   return a;
00185 }
00186 template <>
00187 EIGEN_STRONG_INLINE Packet8d pconj(const Packet8d& a) {
00188   return a;
00189 }
00190 template <>
00191 EIGEN_STRONG_INLINE Packet16i pconj(const Packet16i& a) {
00192   return a;
00193 }
00194 
00195 template <>
00196 EIGEN_STRONG_INLINE Packet16f pmul<Packet16f>(const Packet16f& a,
00197                                               const Packet16f& b) {
00198   return _mm512_mul_ps(a, b);
00199 }
00200 template <>
00201 EIGEN_STRONG_INLINE Packet8d pmul<Packet8d>(const Packet8d& a,
00202                                             const Packet8d& b) {
00203   return _mm512_mul_pd(a, b);
00204 }
00205 
00206 template <>
00207 EIGEN_STRONG_INLINE Packet16f pdiv<Packet16f>(const Packet16f& a,
00208                                               const Packet16f& b) {
00209   return _mm512_div_ps(a, b);
00210 }
00211 template <>
00212 EIGEN_STRONG_INLINE Packet8d pdiv<Packet8d>(const Packet8d& a,
00213                                             const Packet8d& b) {
00214   return _mm512_div_pd(a, b);
00215 }
00216 
00217 #ifdef __FMA__
00218 template <>
00219 EIGEN_STRONG_INLINE Packet16f pmadd(const Packet16f& a, const Packet16f& b,
00220                                     const Packet16f& c) {
00221   return _mm512_fmadd_ps(a, b, c);
00222 }
00223 template <>
00224 EIGEN_STRONG_INLINE Packet8d pmadd(const Packet8d& a, const Packet8d& b,
00225                                    const Packet8d& c) {
00226   return _mm512_fmadd_pd(a, b, c);
00227 }
00228 #endif
00229 
00230 template <>
00231 EIGEN_STRONG_INLINE Packet16f pmin<Packet16f>(const Packet16f& a,
00232                                               const Packet16f& b) {
00233   return _mm512_min_ps(a, b);
00234 }
00235 template <>
00236 EIGEN_STRONG_INLINE Packet8d pmin<Packet8d>(const Packet8d& a,
00237                                             const Packet8d& b) {
00238   return _mm512_min_pd(a, b);
00239 }
00240 
00241 template <>
00242 EIGEN_STRONG_INLINE Packet16f pmax<Packet16f>(const Packet16f& a,
00243                                               const Packet16f& b) {
00244   return _mm512_max_ps(a, b);
00245 }
00246 template <>
00247 EIGEN_STRONG_INLINE Packet8d pmax<Packet8d>(const Packet8d& a,
00248                                             const Packet8d& b) {
00249   return _mm512_max_pd(a, b);
00250 }
00251 
00252 template <>
00253 EIGEN_STRONG_INLINE Packet16f pand<Packet16f>(const Packet16f& a,
00254                                               const Packet16f& b) {
00255 #ifdef EIGEN_VECTORIZE_AVX512DQ
00256   return _mm512_and_ps(a, b);
00257 #else
00258   Packet16f res = _mm512_undefined_ps();
00259   Packet4f lane0_a = _mm512_extractf32x4_ps(a, 0);
00260   Packet4f lane0_b = _mm512_extractf32x4_ps(b, 0);
00261   res = _mm512_insertf32x4(res, _mm_and_ps(lane0_a, lane0_b), 0);
00262 
00263   Packet4f lane1_a = _mm512_extractf32x4_ps(a, 1);
00264   Packet4f lane1_b = _mm512_extractf32x4_ps(b, 1);
00265   res = _mm512_insertf32x4(res, _mm_and_ps(lane1_a, lane1_b), 1);
00266 
00267   Packet4f lane2_a = _mm512_extractf32x4_ps(a, 2);
00268   Packet4f lane2_b = _mm512_extractf32x4_ps(b, 2);
00269   res = _mm512_insertf32x4(res, _mm_and_ps(lane2_a, lane2_b), 2);
00270 
00271   Packet4f lane3_a = _mm512_extractf32x4_ps(a, 3);
00272   Packet4f lane3_b = _mm512_extractf32x4_ps(b, 3);
00273   res = _mm512_insertf32x4(res, _mm_and_ps(lane3_a, lane3_b), 3);
00274 
00275   return res;
00276 #endif
00277 }
00278 template <>
00279 EIGEN_STRONG_INLINE Packet8d pand<Packet8d>(const Packet8d& a,
00280                                             const Packet8d& b) {
00281 #ifdef EIGEN_VECTORIZE_AVX512DQ
00282   return _mm512_and_pd(a, b);
00283 #else
00284   Packet8d res = _mm512_undefined_pd();
00285   Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0);
00286   Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0);
00287   res = _mm512_insertf64x4(res, _mm256_and_pd(lane0_a, lane0_b), 0);
00288 
00289   Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1);
00290   Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1);
00291   res = _mm512_insertf64x4(res, _mm256_and_pd(lane1_a, lane1_b), 1);
00292 
00293   return res;
00294 #endif
00295 }
00296 template <>
00297 EIGEN_STRONG_INLINE Packet16f por<Packet16f>(const Packet16f& a,
00298                                              const Packet16f& b) {
00299 #ifdef EIGEN_VECTORIZE_AVX512DQ
00300   return _mm512_or_ps(a, b);
00301 #else
00302   Packet16f res = _mm512_undefined_ps();
00303   Packet4f lane0_a = _mm512_extractf32x4_ps(a, 0);
00304   Packet4f lane0_b = _mm512_extractf32x4_ps(b, 0);
00305   res = _mm512_insertf32x4(res, _mm_or_ps(lane0_a, lane0_b), 0);
00306 
00307   Packet4f lane1_a = _mm512_extractf32x4_ps(a, 1);
00308   Packet4f lane1_b = _mm512_extractf32x4_ps(b, 1);
00309   res = _mm512_insertf32x4(res, _mm_or_ps(lane1_a, lane1_b), 1);
00310 
00311   Packet4f lane2_a = _mm512_extractf32x4_ps(a, 2);
00312   Packet4f lane2_b = _mm512_extractf32x4_ps(b, 2);
00313   res = _mm512_insertf32x4(res, _mm_or_ps(lane2_a, lane2_b), 2);
00314 
00315   Packet4f lane3_a = _mm512_extractf32x4_ps(a, 3);
00316   Packet4f lane3_b = _mm512_extractf32x4_ps(b, 3);
00317   res = _mm512_insertf32x4(res, _mm_or_ps(lane3_a, lane3_b), 3);
00318 
00319   return res;
00320 #endif
00321 }
00322 
00323 template <>
00324 EIGEN_STRONG_INLINE Packet8d por<Packet8d>(const Packet8d& a,
00325                                            const Packet8d& b) {
00326 #ifdef EIGEN_VECTORIZE_AVX512DQ
00327   return _mm512_or_pd(a, b);
00328 #else
00329   Packet8d res = _mm512_undefined_pd();
00330   Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0);
00331   Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0);
00332   res = _mm512_insertf64x4(res, _mm256_or_pd(lane0_a, lane0_b), 0);
00333 
00334   Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1);
00335   Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1);
00336   res = _mm512_insertf64x4(res, _mm256_or_pd(lane1_a, lane1_b), 1);
00337 
00338   return res;
00339 #endif
00340 }
00341 
00342 template <>
00343 EIGEN_STRONG_INLINE Packet16f pxor<Packet16f>(const Packet16f& a,
00344                                               const Packet16f& b) {
00345 #ifdef EIGEN_VECTORIZE_AVX512DQ
00346   return _mm512_xor_ps(a, b);
00347 #else
00348   Packet16f res = _mm512_undefined_ps();
00349   Packet4f lane0_a = _mm512_extractf32x4_ps(a, 0);
00350   Packet4f lane0_b = _mm512_extractf32x4_ps(b, 0);
00351   res = _mm512_insertf32x4(res, _mm_xor_ps(lane0_a, lane0_b), 0);
00352 
00353   Packet4f lane1_a = _mm512_extractf32x4_ps(a, 1);
00354   Packet4f lane1_b = _mm512_extractf32x4_ps(b, 1);
00355   res = _mm512_insertf32x4(res, _mm_xor_ps(lane1_a, lane1_b), 1);
00356 
00357   Packet4f lane2_a = _mm512_extractf32x4_ps(a, 2);
00358   Packet4f lane2_b = _mm512_extractf32x4_ps(b, 2);
00359   res = _mm512_insertf32x4(res, _mm_xor_ps(lane2_a, lane2_b), 2);
00360 
00361   Packet4f lane3_a = _mm512_extractf32x4_ps(a, 3);
00362   Packet4f lane3_b = _mm512_extractf32x4_ps(b, 3);
00363   res = _mm512_insertf32x4(res, _mm_xor_ps(lane3_a, lane3_b), 3);
00364 
00365   return res;
00366 #endif
00367 }
00368 template <>
00369 EIGEN_STRONG_INLINE Packet8d pxor<Packet8d>(const Packet8d& a,
00370                                             const Packet8d& b) {
00371 #ifdef EIGEN_VECTORIZE_AVX512DQ
00372   return _mm512_xor_pd(a, b);
00373 #else
00374   Packet8d res = _mm512_undefined_pd();
00375   Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0);
00376   Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0);
00377   res = _mm512_insertf64x4(res, _mm256_xor_pd(lane0_a, lane0_b), 0);
00378 
00379   Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1);
00380   Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1);
00381   res = _mm512_insertf64x4(res, _mm256_xor_pd(lane1_a, lane1_b), 1);
00382 
00383   return res;
00384 #endif
00385 }
00386 
00387 template <>
00388 EIGEN_STRONG_INLINE Packet16f pandnot<Packet16f>(const Packet16f& a,
00389                                                  const Packet16f& b) {
00390 #ifdef EIGEN_VECTORIZE_AVX512DQ
00391   return _mm512_andnot_ps(a, b);
00392 #else
00393   Packet16f res = _mm512_undefined_ps();
00394   Packet4f lane0_a = _mm512_extractf32x4_ps(a, 0);
00395   Packet4f lane0_b = _mm512_extractf32x4_ps(b, 0);
00396   res = _mm512_insertf32x4(res, _mm_andnot_ps(lane0_a, lane0_b), 0);
00397 
00398   Packet4f lane1_a = _mm512_extractf32x4_ps(a, 1);
00399   Packet4f lane1_b = _mm512_extractf32x4_ps(b, 1);
00400   res = _mm512_insertf32x4(res, _mm_andnot_ps(lane1_a, lane1_b), 1);
00401 
00402   Packet4f lane2_a = _mm512_extractf32x4_ps(a, 2);
00403   Packet4f lane2_b = _mm512_extractf32x4_ps(b, 2);
00404   res = _mm512_insertf32x4(res, _mm_andnot_ps(lane2_a, lane2_b), 2);
00405 
00406   Packet4f lane3_a = _mm512_extractf32x4_ps(a, 3);
00407   Packet4f lane3_b = _mm512_extractf32x4_ps(b, 3);
00408   res = _mm512_insertf32x4(res, _mm_andnot_ps(lane3_a, lane3_b), 3);
00409 
00410   return res;
00411 #endif
00412 }
00413 template <>
00414 EIGEN_STRONG_INLINE Packet8d pandnot<Packet8d>(const Packet8d& a,
00415                                                const Packet8d& b) {
00416 #ifdef EIGEN_VECTORIZE_AVX512DQ
00417   return _mm512_andnot_pd(a, b);
00418 #else
00419   Packet8d res = _mm512_undefined_pd();
00420   Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0);
00421   Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0);
00422   res = _mm512_insertf64x4(res, _mm256_andnot_pd(lane0_a, lane0_b), 0);
00423 
00424   Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1);
00425   Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1);
00426   res = _mm512_insertf64x4(res, _mm256_andnot_pd(lane1_a, lane1_b), 1);
00427 
00428   return res;
00429 #endif
00430 }
00431 
00432 template <>
00433 EIGEN_STRONG_INLINE Packet16f pload<Packet16f>(const float* from) {
00434   EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_ps(from);
00435 }
00436 template <>
00437 EIGEN_STRONG_INLINE Packet8d pload<Packet8d>(const double* from) {
00438   EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_pd(from);
00439 }
00440 template <>
00441 EIGEN_STRONG_INLINE Packet16i pload<Packet16i>(const int* from) {
00442   EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_si512(
00443       reinterpret_cast<const __m512i*>(from));
00444 }
00445 
00446 template <>
00447 EIGEN_STRONG_INLINE Packet16f ploadu<Packet16f>(const float* from) {
00448   EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_ps(from);
00449 }
00450 template <>
00451 EIGEN_STRONG_INLINE Packet8d ploadu<Packet8d>(const double* from) {
00452   EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_pd(from);
00453 }
00454 template <>
00455 EIGEN_STRONG_INLINE Packet16i ploadu<Packet16i>(const int* from) {
00456   EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_si512(
00457       reinterpret_cast<const __m512i*>(from));
00458 }
00459 
00460 // Loads 8 floats from memory a returns the packet
00461 // {a0, a0  a1, a1, a2, a2, a3, a3, a4, a4, a5, a5, a6, a6, a7, a7}
00462 template <>
00463 EIGEN_STRONG_INLINE Packet16f ploaddup<Packet16f>(const float* from) {
00464   Packet8f lane0 = _mm256_broadcast_ps((const __m128*)(const void*)from);
00465   // mimic an "inplace" permutation of the lower 128bits using a blend
00466   lane0 = _mm256_blend_ps(
00467       lane0, _mm256_castps128_ps256(_mm_permute_ps(
00468                  _mm256_castps256_ps128(lane0), _MM_SHUFFLE(1, 0, 1, 0))),
00469       15);
00470   // then we can perform a consistent permutation on the global register to get
00471   // everything in shape:
00472   lane0 = _mm256_permute_ps(lane0, _MM_SHUFFLE(3, 3, 2, 2));
00473 
00474   Packet8f lane1 = _mm256_broadcast_ps((const __m128*)(const void*)(from + 4));
00475   // mimic an "inplace" permutation of the lower 128bits using a blend
00476   lane1 = _mm256_blend_ps(
00477       lane1, _mm256_castps128_ps256(_mm_permute_ps(
00478                  _mm256_castps256_ps128(lane1), _MM_SHUFFLE(1, 0, 1, 0))),
00479       15);
00480   // then we can perform a consistent permutation on the global register to get
00481   // everything in shape:
00482   lane1 = _mm256_permute_ps(lane1, _MM_SHUFFLE(3, 3, 2, 2));
00483 
00484 #ifdef EIGEN_VECTORIZE_AVX512DQ
00485   Packet16f res = _mm512_undefined_ps();
00486   return _mm512_insertf32x8(res, lane0, 0);
00487   return _mm512_insertf32x8(res, lane1, 1);
00488   return res;
00489 #else
00490   Packet16f res = _mm512_undefined_ps();
00491   res = _mm512_insertf32x4(res, _mm256_extractf128_ps(lane0, 0), 0);
00492   res = _mm512_insertf32x4(res, _mm256_extractf128_ps(lane0, 1), 1);
00493   res = _mm512_insertf32x4(res, _mm256_extractf128_ps(lane1, 0), 2);
00494   res = _mm512_insertf32x4(res, _mm256_extractf128_ps(lane1, 1), 3);
00495   return res;
00496 #endif
00497 }
00498 // Loads 4 doubles from memory a returns the packet {a0, a0  a1, a1, a2, a2, a3,
00499 // a3}
00500 template <>
00501 EIGEN_STRONG_INLINE Packet8d ploaddup<Packet8d>(const double* from) {
00502   Packet4d lane0 = _mm256_broadcast_pd((const __m128d*)(const void*)from);
00503   lane0 = _mm256_permute_pd(lane0, 3 << 2);
00504 
00505   Packet4d lane1 = _mm256_broadcast_pd((const __m128d*)(const void*)(from + 2));
00506   lane1 = _mm256_permute_pd(lane1, 3 << 2);
00507 
00508   Packet8d res = _mm512_undefined_pd();
00509   res = _mm512_insertf64x4(res, lane0, 0);
00510   return _mm512_insertf64x4(res, lane1, 1);
00511 }
00512 
00513 // Loads 4 floats from memory a returns the packet
00514 // {a0, a0  a0, a0, a1, a1, a1, a1, a2, a2, a2, a2, a3, a3, a3, a3}
00515 template <>
00516 EIGEN_STRONG_INLINE Packet16f ploadquad<Packet16f>(const float* from) {
00517   Packet16f tmp = _mm512_undefined_ps();
00518   tmp = _mm512_insertf32x4(tmp, _mm_load_ps1(from), 0);
00519   tmp = _mm512_insertf32x4(tmp, _mm_load_ps1(from + 1), 1);
00520   tmp = _mm512_insertf32x4(tmp, _mm_load_ps1(from + 2), 2);
00521   tmp = _mm512_insertf32x4(tmp, _mm_load_ps1(from + 3), 3);
00522   return tmp;
00523 }
00524 // Loads 2 doubles from memory a returns the packet
00525 // {a0, a0  a0, a0, a1, a1, a1, a1}
00526 template <>
00527 EIGEN_STRONG_INLINE Packet8d ploadquad<Packet8d>(const double* from) {
00528   Packet8d tmp = _mm512_undefined_pd();
00529   Packet2d tmp0 = _mm_load_pd1(from);
00530   Packet2d tmp1 = _mm_load_pd1(from + 1);
00531   Packet4d lane0 = _mm256_broadcastsd_pd(tmp0);
00532   Packet4d lane1 = _mm256_broadcastsd_pd(tmp1);
00533   tmp = _mm512_insertf64x4(tmp, lane0, 0);
00534   return _mm512_insertf64x4(tmp, lane1, 1);
00535 }
00536 
00537 template <>
00538 EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet16f& from) {
00539   EIGEN_DEBUG_ALIGNED_STORE _mm512_store_ps(to, from);
00540 }
00541 template <>
00542 EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet8d& from) {
00543   EIGEN_DEBUG_ALIGNED_STORE _mm512_store_pd(to, from);
00544 }
00545 template <>
00546 EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet16i& from) {
00547   EIGEN_DEBUG_ALIGNED_STORE _mm512_storeu_si512(reinterpret_cast<__m512i*>(to),
00548                                                 from);
00549 }
00550 
00551 template <>
00552 EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet16f& from) {
00553   EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_ps(to, from);
00554 }
00555 template <>
00556 EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet8d& from) {
00557   EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_pd(to, from);
00558 }
00559 template <>
00560 EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet16i& from) {
00561   EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_si512(
00562       reinterpret_cast<__m512i*>(to), from);
00563 }
00564 
00565 template <>
00566 EIGEN_DEVICE_FUNC inline Packet16f pgather<float, Packet16f>(const float* from,
00567                                                              Index stride) {
00568   Packet16i stride_vector = _mm512_set1_epi32(stride);
00569   Packet16i stride_multiplier =
00570       _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
00571   Packet16i indices = _mm512_mullo_epi32(stride_vector, stride_multiplier);
00572 
00573   return _mm512_i32gather_ps(indices, from, 4);
00574 }
00575 template <>
00576 EIGEN_DEVICE_FUNC inline Packet8d pgather<double, Packet8d>(const double* from,
00577                                                             Index stride) {
00578   Packet8i stride_vector = _mm256_set1_epi32(stride);
00579   Packet8i stride_multiplier = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0);
00580   Packet8i indices = _mm256_mullo_epi32(stride_vector, stride_multiplier);
00581 
00582   return _mm512_i32gather_pd(indices, from, 8);
00583 }
00584 
00585 template <>
00586 EIGEN_DEVICE_FUNC inline void pscatter<float, Packet16f>(float* to,
00587                                                          const Packet16f& from,
00588                                                          Index stride) {
00589   Packet16i stride_vector = _mm512_set1_epi32(stride);
00590   Packet16i stride_multiplier =
00591       _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
00592   Packet16i indices = _mm512_mullo_epi32(stride_vector, stride_multiplier);
00593   _mm512_i32scatter_ps(to, indices, from, 4);
00594 }
00595 template <>
00596 EIGEN_DEVICE_FUNC inline void pscatter<double, Packet8d>(double* to,
00597                                                          const Packet8d& from,
00598                                                          Index stride) {
00599   Packet8i stride_vector = _mm256_set1_epi32(stride);
00600   Packet8i stride_multiplier = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0);
00601   Packet8i indices = _mm256_mullo_epi32(stride_vector, stride_multiplier);
00602   _mm512_i32scatter_pd(to, indices, from, 8);
00603 }
00604 
00605 template <>
00606 EIGEN_STRONG_INLINE void pstore1<Packet16f>(float* to, const float& a) {
00607   Packet16f pa = pset1<Packet16f>(a);
00608   pstore(to, pa);
00609 }
00610 template <>
00611 EIGEN_STRONG_INLINE void pstore1<Packet8d>(double* to, const double& a) {
00612   Packet8d pa = pset1<Packet8d>(a);
00613   pstore(to, pa);
00614 }
00615 template <>
00616 EIGEN_STRONG_INLINE void pstore1<Packet16i>(int* to, const int& a) {
00617   Packet16i pa = pset1<Packet16i>(a);
00618   pstore(to, pa);
00619 }
00620 
00621 template<> EIGEN_STRONG_INLINE void prefetch<float>(const float*   addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
00622 template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
00623 template<> EIGEN_STRONG_INLINE void prefetch<int>(const int*       addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
00624 
00625 template <>
00626 EIGEN_STRONG_INLINE float pfirst<Packet16f>(const Packet16f& a) {
00627   return _mm_cvtss_f32(_mm512_extractf32x4_ps(a, 0));
00628 }
00629 template <>
00630 EIGEN_STRONG_INLINE double pfirst<Packet8d>(const Packet8d& a) {
00631   return _mm_cvtsd_f64(_mm256_extractf128_pd(_mm512_extractf64x4_pd(a, 0), 0));
00632 }
00633 template <>
00634 EIGEN_STRONG_INLINE int pfirst<Packet16i>(const Packet16i& a) {
00635   return _mm_extract_epi32(_mm512_extracti32x4_epi32(a, 0), 0);
00636 }
00637 
00638 template<> EIGEN_STRONG_INLINE Packet16f preverse(const Packet16f& a)
00639 {
00640   return _mm512_permutexvar_ps(_mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15), a);
00641 }
00642 
00643 template<> EIGEN_STRONG_INLINE Packet8d preverse(const Packet8d& a)
00644 {
00645   return _mm512_permutexvar_pd(_mm512_set_epi32(0, 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7), a);
00646 }
00647 
00648 template<> EIGEN_STRONG_INLINE Packet16f pabs(const Packet16f& a)
00649 {
00650   // _mm512_abs_ps intrinsic not found, so hack around it
00651   return (__m512)_mm512_and_si512((__m512i)a, _mm512_set1_epi32(0x7fffffff));
00652 }
00653 template <>
00654 EIGEN_STRONG_INLINE Packet8d pabs(const Packet8d& a) {
00655   // _mm512_abs_ps intrinsic not found, so hack around it
00656   return (__m512d)_mm512_and_si512((__m512i)a,
00657                                    _mm512_set1_epi64(0x7fffffffffffffff));
00658 }
00659 
00660 #ifdef EIGEN_VECTORIZE_AVX512DQ
00661 // AVX512F does not define _mm512_extractf32x8_ps to extract _m256 from _m512
00662 #define EIGEN_EXTRACT_8f_FROM_16f(INPUT, OUTPUT)                           \
00663   __m256 OUTPUT##_0 = _mm512_extractf32x8_ps(INPUT, 0) __m256 OUTPUT##_1 = \
00664       _mm512_extractf32x8_ps(INPUT, 1)
00665 #else
00666 #define EIGEN_EXTRACT_8f_FROM_16f(INPUT, OUTPUT)                \
00667   __m256 OUTPUT##_0 = _mm256_insertf128_ps(                     \
00668       _mm256_castps128_ps256(_mm512_extractf32x4_ps(INPUT, 0)), \
00669       _mm512_extractf32x4_ps(INPUT, 1), 1);                     \
00670   __m256 OUTPUT##_1 = _mm256_insertf128_ps(                     \
00671       _mm256_castps128_ps256(_mm512_extractf32x4_ps(INPUT, 2)), \
00672       _mm512_extractf32x4_ps(INPUT, 3), 1);
00673 #endif
00674 
00675 #ifdef EIGEN_VECTORIZE_AVX512DQ
00676 #define EIGEN_INSERT_8f_INTO_16f(OUTPUT, INPUTA, INPUTB) \
00677   OUTPUT = _mm512_insertf32x8(OUTPUT, INPUTA, 0);        \
00678   OUTPUT = _mm512_insertf32x8(OUTPUT, INPUTB, 1);
00679 #else
00680 #define EIGEN_INSERT_8f_INTO_16f(OUTPUT, INPUTA, INPUTB)                    \
00681   OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTA, 0), 0); \
00682   OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTA, 1), 1); \
00683   OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTB, 0), 2); \
00684   OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTB, 1), 3);
00685 #endif
00686 template<> EIGEN_STRONG_INLINE Packet16f preduxp<Packet16f>(const Packet16f*
00687 vecs)
00688 {
00689   EIGEN_EXTRACT_8f_FROM_16f(vecs[0], vecs0);
00690   EIGEN_EXTRACT_8f_FROM_16f(vecs[1], vecs1);
00691   EIGEN_EXTRACT_8f_FROM_16f(vecs[2], vecs2);
00692   EIGEN_EXTRACT_8f_FROM_16f(vecs[3], vecs3);
00693   EIGEN_EXTRACT_8f_FROM_16f(vecs[4], vecs4);
00694   EIGEN_EXTRACT_8f_FROM_16f(vecs[5], vecs5);
00695   EIGEN_EXTRACT_8f_FROM_16f(vecs[6], vecs6);
00696   EIGEN_EXTRACT_8f_FROM_16f(vecs[7], vecs7);
00697   EIGEN_EXTRACT_8f_FROM_16f(vecs[8], vecs8);
00698   EIGEN_EXTRACT_8f_FROM_16f(vecs[9], vecs9);
00699   EIGEN_EXTRACT_8f_FROM_16f(vecs[10], vecs10);
00700   EIGEN_EXTRACT_8f_FROM_16f(vecs[11], vecs11);
00701   EIGEN_EXTRACT_8f_FROM_16f(vecs[12], vecs12);
00702   EIGEN_EXTRACT_8f_FROM_16f(vecs[13], vecs13);
00703   EIGEN_EXTRACT_8f_FROM_16f(vecs[14], vecs14);
00704   EIGEN_EXTRACT_8f_FROM_16f(vecs[15], vecs15);
00705 
00706   __m256 hsum1 = _mm256_hadd_ps(vecs0_0, vecs1_0);
00707   __m256 hsum2 = _mm256_hadd_ps(vecs2_0, vecs3_0);
00708   __m256 hsum3 = _mm256_hadd_ps(vecs4_0, vecs5_0);
00709   __m256 hsum4 = _mm256_hadd_ps(vecs6_0, vecs7_0);
00710 
00711   __m256 hsum5 = _mm256_hadd_ps(hsum1, hsum1);
00712   __m256 hsum6 = _mm256_hadd_ps(hsum2, hsum2);
00713   __m256 hsum7 = _mm256_hadd_ps(hsum3, hsum3);
00714   __m256 hsum8 = _mm256_hadd_ps(hsum4, hsum4);
00715 
00716   __m256 perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23);
00717   __m256 perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23);
00718   __m256 perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23);
00719   __m256 perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23);
00720 
00721   __m256 sum1 = _mm256_add_ps(perm1, hsum5);
00722   __m256 sum2 = _mm256_add_ps(perm2, hsum6);
00723   __m256 sum3 = _mm256_add_ps(perm3, hsum7);
00724   __m256 sum4 = _mm256_add_ps(perm4, hsum8);
00725 
00726   __m256 blend1 = _mm256_blend_ps(sum1, sum2, 0xcc);
00727   __m256 blend2 = _mm256_blend_ps(sum3, sum4, 0xcc);
00728 
00729   __m256 final = _mm256_blend_ps(blend1, blend2, 0xf0);
00730 
00731   hsum1 = _mm256_hadd_ps(vecs0_1, vecs1_1);
00732   hsum2 = _mm256_hadd_ps(vecs2_1, vecs3_1);
00733   hsum3 = _mm256_hadd_ps(vecs4_1, vecs5_1);
00734   hsum4 = _mm256_hadd_ps(vecs6_1, vecs7_1);
00735 
00736   hsum5 = _mm256_hadd_ps(hsum1, hsum1);
00737   hsum6 = _mm256_hadd_ps(hsum2, hsum2);
00738   hsum7 = _mm256_hadd_ps(hsum3, hsum3);
00739   hsum8 = _mm256_hadd_ps(hsum4, hsum4);
00740 
00741   perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23);
00742   perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23);
00743   perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23);
00744   perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23);
00745 
00746   sum1 = _mm256_add_ps(perm1, hsum5);
00747   sum2 = _mm256_add_ps(perm2, hsum6);
00748   sum3 = _mm256_add_ps(perm3, hsum7);
00749   sum4 = _mm256_add_ps(perm4, hsum8);
00750 
00751   blend1 = _mm256_blend_ps(sum1, sum2, 0xcc);
00752   blend2 = _mm256_blend_ps(sum3, sum4, 0xcc);
00753 
00754   final = padd(final, _mm256_blend_ps(blend1, blend2, 0xf0));
00755 
00756   hsum1 = _mm256_hadd_ps(vecs8_0, vecs9_0);
00757   hsum2 = _mm256_hadd_ps(vecs10_0, vecs11_0);
00758   hsum3 = _mm256_hadd_ps(vecs12_0, vecs13_0);
00759   hsum4 = _mm256_hadd_ps(vecs14_0, vecs15_0);
00760 
00761   hsum5 = _mm256_hadd_ps(hsum1, hsum1);
00762   hsum6 = _mm256_hadd_ps(hsum2, hsum2);
00763   hsum7 = _mm256_hadd_ps(hsum3, hsum3);
00764   hsum8 = _mm256_hadd_ps(hsum4, hsum4);
00765 
00766   perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23);
00767   perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23);
00768   perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23);
00769   perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23);
00770 
00771   sum1 = _mm256_add_ps(perm1, hsum5);
00772   sum2 = _mm256_add_ps(perm2, hsum6);
00773   sum3 = _mm256_add_ps(perm3, hsum7);
00774   sum4 = _mm256_add_ps(perm4, hsum8);
00775 
00776   blend1 = _mm256_blend_ps(sum1, sum2, 0xcc);
00777   blend2 = _mm256_blend_ps(sum3, sum4, 0xcc);
00778 
00779   __m256 final_1 = _mm256_blend_ps(blend1, blend2, 0xf0);
00780 
00781   hsum1 = _mm256_hadd_ps(vecs8_1, vecs9_1);
00782   hsum2 = _mm256_hadd_ps(vecs10_1, vecs11_1);
00783   hsum3 = _mm256_hadd_ps(vecs12_1, vecs13_1);
00784   hsum4 = _mm256_hadd_ps(vecs14_1, vecs15_1);
00785 
00786   hsum5 = _mm256_hadd_ps(hsum1, hsum1);
00787   hsum6 = _mm256_hadd_ps(hsum2, hsum2);
00788   hsum7 = _mm256_hadd_ps(hsum3, hsum3);
00789   hsum8 = _mm256_hadd_ps(hsum4, hsum4);
00790 
00791   perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23);
00792   perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23);
00793   perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23);
00794   perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23);
00795 
00796   sum1 = _mm256_add_ps(perm1, hsum5);
00797   sum2 = _mm256_add_ps(perm2, hsum6);
00798   sum3 = _mm256_add_ps(perm3, hsum7);
00799   sum4 = _mm256_add_ps(perm4, hsum8);
00800 
00801   blend1 = _mm256_blend_ps(sum1, sum2, 0xcc);
00802   blend2 = _mm256_blend_ps(sum3, sum4, 0xcc);
00803 
00804   final_1 = padd(final_1, _mm256_blend_ps(blend1, blend2, 0xf0));
00805 
00806   __m512 final_output;
00807 
00808   EIGEN_INSERT_8f_INTO_16f(final_output, final, final_1);
00809   return final_output;
00810 }
00811 
00812 template<> EIGEN_STRONG_INLINE Packet8d preduxp<Packet8d>(const Packet8d* vecs)
00813 {
00814   Packet4d vecs0_0 = _mm512_extractf64x4_pd(vecs[0], 0);
00815   Packet4d vecs0_1 = _mm512_extractf64x4_pd(vecs[0], 1);
00816 
00817   Packet4d vecs1_0 = _mm512_extractf64x4_pd(vecs[1], 0);
00818   Packet4d vecs1_1 = _mm512_extractf64x4_pd(vecs[1], 1);
00819 
00820   Packet4d vecs2_0 = _mm512_extractf64x4_pd(vecs[2], 0);
00821   Packet4d vecs2_1 = _mm512_extractf64x4_pd(vecs[2], 1);
00822 
00823   Packet4d vecs3_0 = _mm512_extractf64x4_pd(vecs[3], 0);
00824   Packet4d vecs3_1 = _mm512_extractf64x4_pd(vecs[3], 1);
00825 
00826   Packet4d vecs4_0 = _mm512_extractf64x4_pd(vecs[4], 0);
00827   Packet4d vecs4_1 = _mm512_extractf64x4_pd(vecs[4], 1);
00828 
00829   Packet4d vecs5_0 = _mm512_extractf64x4_pd(vecs[5], 0);
00830   Packet4d vecs5_1 = _mm512_extractf64x4_pd(vecs[5], 1);
00831 
00832   Packet4d vecs6_0 = _mm512_extractf64x4_pd(vecs[6], 0);
00833   Packet4d vecs6_1 = _mm512_extractf64x4_pd(vecs[6], 1);
00834 
00835   Packet4d vecs7_0 = _mm512_extractf64x4_pd(vecs[7], 0);
00836   Packet4d vecs7_1 = _mm512_extractf64x4_pd(vecs[7], 1);
00837 
00838   Packet4d tmp0, tmp1;
00839 
00840   tmp0 = _mm256_hadd_pd(vecs0_0, vecs1_0);
00841   tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1));
00842 
00843   tmp1 = _mm256_hadd_pd(vecs2_0, vecs3_0);
00844   tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1));
00845 
00846   __m256d final_0 = _mm256_blend_pd(tmp0, tmp1, 0xC);
00847 
00848   tmp0 = _mm256_hadd_pd(vecs0_1, vecs1_1);
00849   tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1));
00850 
00851   tmp1 = _mm256_hadd_pd(vecs2_1, vecs3_1);
00852   tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1));
00853 
00854   final_0 = padd(final_0, _mm256_blend_pd(tmp0, tmp1, 0xC));
00855 
00856   tmp0 = _mm256_hadd_pd(vecs4_0, vecs5_0);
00857   tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1));
00858 
00859   tmp1 = _mm256_hadd_pd(vecs6_0, vecs7_0);
00860   tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1));
00861 
00862   __m256d final_1 = _mm256_blend_pd(tmp0, tmp1, 0xC);
00863 
00864   tmp0 = _mm256_hadd_pd(vecs4_1, vecs5_1);
00865   tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1));
00866 
00867   tmp1 = _mm256_hadd_pd(vecs6_1, vecs7_1);
00868   tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1));
00869 
00870   final_1 = padd(final_1, _mm256_blend_pd(tmp0, tmp1, 0xC));
00871 
00872   __m512d final_output = _mm512_insertf64x4(final_output, final_0, 0);
00873 
00874   return _mm512_insertf64x4(final_output, final_1, 1);
00875 }
00876 
00877 template <>
00878 EIGEN_STRONG_INLINE float predux<Packet16f>(const Packet16f& a) {
00879   //#ifdef EIGEN_VECTORIZE_AVX512DQ
00880 #if 0
00881   Packet8f lane0 = _mm512_extractf32x8_ps(a, 0);
00882   Packet8f lane1 = _mm512_extractf32x8_ps(a, 1);
00883   Packet8f sum = padd(lane0, lane1);
00884   Packet8f tmp0 = _mm256_hadd_ps(sum, _mm256_permute2f128_ps(a, a, 1));
00885   tmp0 = _mm256_hadd_ps(tmp0, tmp0);
00886   return pfirst(_mm256_hadd_ps(tmp0, tmp0));
00887 #else
00888   Packet4f lane0 = _mm512_extractf32x4_ps(a, 0);
00889   Packet4f lane1 = _mm512_extractf32x4_ps(a, 1);
00890   Packet4f lane2 = _mm512_extractf32x4_ps(a, 2);
00891   Packet4f lane3 = _mm512_extractf32x4_ps(a, 3);
00892   Packet4f sum = padd(padd(lane0, lane1), padd(lane2, lane3));
00893   sum = _mm_hadd_ps(sum, sum);
00894   sum = _mm_hadd_ps(sum, _mm_permute_ps(sum, 1));
00895   return pfirst(sum);
00896 #endif
00897 }
00898 template <>
00899 EIGEN_STRONG_INLINE double predux<Packet8d>(const Packet8d& a) {
00900   Packet4d lane0 = _mm512_extractf64x4_pd(a, 0);
00901   Packet4d lane1 = _mm512_extractf64x4_pd(a, 1);
00902   Packet4d sum = padd(lane0, lane1);
00903   Packet4d tmp0 = _mm256_hadd_pd(sum, _mm256_permute2f128_pd(sum, sum, 1));
00904   return pfirst(_mm256_hadd_pd(tmp0, tmp0));
00905 }
00906 
00907 template <>
00908 EIGEN_STRONG_INLINE Packet8f predux_downto4<Packet16f>(const Packet16f& a) {
00909 #ifdef EIGEN_VECTORIZE_AVX512DQ
00910   Packet8f lane0 = _mm512_extractf32x8_ps(a, 0);
00911   Packet8f lane1 = _mm512_extractf32x8_ps(a, 1);
00912   return padd(lane0, lane1);
00913 #else
00914   Packet4f lane0 = _mm512_extractf32x4_ps(a, 0);
00915   Packet4f lane1 = _mm512_extractf32x4_ps(a, 1);
00916   Packet4f lane2 = _mm512_extractf32x4_ps(a, 2);
00917   Packet4f lane3 = _mm512_extractf32x4_ps(a, 3);
00918   Packet4f sum0 = padd(lane0, lane2);
00919   Packet4f sum1 = padd(lane1, lane3);
00920   return _mm256_insertf128_ps(_mm256_castps128_ps256(sum0), sum1, 1);
00921 #endif
00922 }
00923 template <>
00924 EIGEN_STRONG_INLINE Packet4d predux_downto4<Packet8d>(const Packet8d& a) {
00925   Packet4d lane0 = _mm512_extractf64x4_pd(a, 0);
00926   Packet4d lane1 = _mm512_extractf64x4_pd(a, 1);
00927   Packet4d res = padd(lane0, lane1);
00928   return res;
00929 }
00930 
00931 template <>
00932 EIGEN_STRONG_INLINE float predux_mul<Packet16f>(const Packet16f& a) {
00933 //#ifdef EIGEN_VECTORIZE_AVX512DQ
00934 #if 0
00935   Packet8f lane0 = _mm512_extractf32x8_ps(a, 0);
00936   Packet8f lane1 = _mm512_extractf32x8_ps(a, 1);
00937   Packet8f res = pmul(lane0, lane1);
00938   res = pmul(res, _mm256_permute2f128_ps(res, res, 1));
00939   res = pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
00940   return pfirst(pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
00941 #else
00942   Packet4f lane0 = _mm512_extractf32x4_ps(a, 0);
00943   Packet4f lane1 = _mm512_extractf32x4_ps(a, 1);
00944   Packet4f lane2 = _mm512_extractf32x4_ps(a, 2);
00945   Packet4f lane3 = _mm512_extractf32x4_ps(a, 3);
00946   Packet4f res = pmul(pmul(lane0, lane1), pmul(lane2, lane3));
00947   res = pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
00948   return pfirst(pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
00949 #endif
00950 }
00951 template <>
00952 EIGEN_STRONG_INLINE double predux_mul<Packet8d>(const Packet8d& a) {
00953   Packet4d lane0 = _mm512_extractf64x4_pd(a, 0);
00954   Packet4d lane1 = _mm512_extractf64x4_pd(a, 1);
00955   Packet4d res = pmul(lane0, lane1);
00956   res = pmul(res, _mm256_permute2f128_pd(res, res, 1));
00957   return pfirst(pmul(res, _mm256_shuffle_pd(res, res, 1)));
00958 }
00959 
00960 template <>
00961 EIGEN_STRONG_INLINE float predux_min<Packet16f>(const Packet16f& a) {
00962   Packet4f lane0 = _mm512_extractf32x4_ps(a, 0);
00963   Packet4f lane1 = _mm512_extractf32x4_ps(a, 1);
00964   Packet4f lane2 = _mm512_extractf32x4_ps(a, 2);
00965   Packet4f lane3 = _mm512_extractf32x4_ps(a, 3);
00966   Packet4f res = _mm_min_ps(_mm_min_ps(lane0, lane1), _mm_min_ps(lane2, lane3));
00967   res = _mm_min_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
00968   return pfirst(_mm_min_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
00969 }
00970 template <>
00971 EIGEN_STRONG_INLINE double predux_min<Packet8d>(const Packet8d& a) {
00972   Packet4d lane0 = _mm512_extractf64x4_pd(a, 0);
00973   Packet4d lane1 = _mm512_extractf64x4_pd(a, 1);
00974   Packet4d res = _mm256_min_pd(lane0, lane1);
00975   res = _mm256_min_pd(res, _mm256_permute2f128_pd(res, res, 1));
00976   return pfirst(_mm256_min_pd(res, _mm256_shuffle_pd(res, res, 1)));
00977 }
00978 
00979 template <>
00980 EIGEN_STRONG_INLINE float predux_max<Packet16f>(const Packet16f& a) {
00981   Packet4f lane0 = _mm512_extractf32x4_ps(a, 0);
00982   Packet4f lane1 = _mm512_extractf32x4_ps(a, 1);
00983   Packet4f lane2 = _mm512_extractf32x4_ps(a, 2);
00984   Packet4f lane3 = _mm512_extractf32x4_ps(a, 3);
00985   Packet4f res = _mm_max_ps(_mm_max_ps(lane0, lane1), _mm_max_ps(lane2, lane3));
00986   res = _mm_max_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
00987   return pfirst(_mm_max_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
00988 }
00989 template <>
00990 EIGEN_STRONG_INLINE double predux_max<Packet8d>(const Packet8d& a) {
00991   Packet4d lane0 = _mm512_extractf64x4_pd(a, 0);
00992   Packet4d lane1 = _mm512_extractf64x4_pd(a, 1);
00993   Packet4d res = _mm256_max_pd(lane0, lane1);
00994   res = _mm256_max_pd(res, _mm256_permute2f128_pd(res, res, 1));
00995   return pfirst(_mm256_max_pd(res, _mm256_shuffle_pd(res, res, 1)));
00996 }
00997 
00998 template <int Offset>
00999 struct palign_impl<Offset, Packet16f> {
01000   static EIGEN_STRONG_INLINE void run(Packet16f& first,
01001                                       const Packet16f& second) {
01002     if (Offset != 0) {
01003       __m512i first_idx = _mm512_set_epi32(
01004           Offset + 15, Offset + 14, Offset + 13, Offset + 12, Offset + 11,
01005           Offset + 10, Offset + 9, Offset + 8, Offset + 7, Offset + 6,
01006           Offset + 5, Offset + 4, Offset + 3, Offset + 2, Offset + 1, Offset);
01007 
01008       __m512i second_idx =
01009           _mm512_set_epi32(Offset - 1, Offset - 2, Offset - 3, Offset - 4,
01010                            Offset - 5, Offset - 6, Offset - 7, Offset - 8,
01011                            Offset - 9, Offset - 10, Offset - 11, Offset - 12,
01012                            Offset - 13, Offset - 14, Offset - 15, Offset - 16);
01013 
01014       unsigned short mask = 0xFFFF;
01015       mask <<= (16 - Offset);
01016 
01017       first = _mm512_permutexvar_ps(first_idx, first);
01018       Packet16f tmp = _mm512_permutexvar_ps(second_idx, second);
01019       first = _mm512_mask_blend_ps(mask, first, tmp);
01020     }
01021   }
01022 };
01023 template <int Offset>
01024 struct palign_impl<Offset, Packet8d> {
01025   static EIGEN_STRONG_INLINE void run(Packet8d& first, const Packet8d& second) {
01026     if (Offset != 0) {
01027       __m512i first_idx = _mm512_set_epi32(
01028           0, Offset + 7, 0, Offset + 6, 0, Offset + 5, 0, Offset + 4, 0,
01029           Offset + 3, 0, Offset + 2, 0, Offset + 1, 0, Offset);
01030 
01031       __m512i second_idx = _mm512_set_epi32(
01032           0, Offset - 1, 0, Offset - 2, 0, Offset - 3, 0, Offset - 4, 0,
01033           Offset - 5, 0, Offset - 6, 0, Offset - 7, 0, Offset - 8);
01034 
01035       unsigned char mask = 0xFF;
01036       mask <<= (8 - Offset);
01037 
01038       first = _mm512_permutexvar_pd(first_idx, first);
01039       Packet8d tmp = _mm512_permutexvar_pd(second_idx, second);
01040       first = _mm512_mask_blend_pd(mask, first, tmp);
01041     }
01042   }
01043 };
01044 
01045 
01046 #define PACK_OUTPUT(OUTPUT, INPUT, INDEX, STRIDE) \
01047   EIGEN_INSERT_8f_INTO_16f(OUTPUT[INDEX], INPUT[INDEX], INPUT[INDEX + STRIDE]);
01048 
01049 EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet16f, 16>& kernel) {
01050   __m512 T0 = _mm512_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
01051   __m512 T1 = _mm512_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
01052   __m512 T2 = _mm512_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
01053   __m512 T3 = _mm512_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
01054   __m512 T4 = _mm512_unpacklo_ps(kernel.packet[4], kernel.packet[5]);
01055   __m512 T5 = _mm512_unpackhi_ps(kernel.packet[4], kernel.packet[5]);
01056   __m512 T6 = _mm512_unpacklo_ps(kernel.packet[6], kernel.packet[7]);
01057   __m512 T7 = _mm512_unpackhi_ps(kernel.packet[6], kernel.packet[7]);
01058   __m512 T8 = _mm512_unpacklo_ps(kernel.packet[8], kernel.packet[9]);
01059   __m512 T9 = _mm512_unpackhi_ps(kernel.packet[8], kernel.packet[9]);
01060   __m512 T10 = _mm512_unpacklo_ps(kernel.packet[10], kernel.packet[11]);
01061   __m512 T11 = _mm512_unpackhi_ps(kernel.packet[10], kernel.packet[11]);
01062   __m512 T12 = _mm512_unpacklo_ps(kernel.packet[12], kernel.packet[13]);
01063   __m512 T13 = _mm512_unpackhi_ps(kernel.packet[12], kernel.packet[13]);
01064   __m512 T14 = _mm512_unpacklo_ps(kernel.packet[14], kernel.packet[15]);
01065   __m512 T15 = _mm512_unpackhi_ps(kernel.packet[14], kernel.packet[15]);
01066   __m512 S0 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));
01067   __m512 S1 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));
01068   __m512 S2 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));
01069   __m512 S3 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));
01070   __m512 S4 = _mm512_shuffle_ps(T4, T6, _MM_SHUFFLE(1, 0, 1, 0));
01071   __m512 S5 = _mm512_shuffle_ps(T4, T6, _MM_SHUFFLE(3, 2, 3, 2));
01072   __m512 S6 = _mm512_shuffle_ps(T5, T7, _MM_SHUFFLE(1, 0, 1, 0));
01073   __m512 S7 = _mm512_shuffle_ps(T5, T7, _MM_SHUFFLE(3, 2, 3, 2));
01074   __m512 S8 = _mm512_shuffle_ps(T8, T10, _MM_SHUFFLE(1, 0, 1, 0));
01075   __m512 S9 = _mm512_shuffle_ps(T8, T10, _MM_SHUFFLE(3, 2, 3, 2));
01076   __m512 S10 = _mm512_shuffle_ps(T9, T11, _MM_SHUFFLE(1, 0, 1, 0));
01077   __m512 S11 = _mm512_shuffle_ps(T9, T11, _MM_SHUFFLE(3, 2, 3, 2));
01078   __m512 S12 = _mm512_shuffle_ps(T12, T14, _MM_SHUFFLE(1, 0, 1, 0));
01079   __m512 S13 = _mm512_shuffle_ps(T12, T14, _MM_SHUFFLE(3, 2, 3, 2));
01080   __m512 S14 = _mm512_shuffle_ps(T13, T15, _MM_SHUFFLE(1, 0, 1, 0));
01081   __m512 S15 = _mm512_shuffle_ps(T13, T15, _MM_SHUFFLE(3, 2, 3, 2));
01082 
01083   EIGEN_EXTRACT_8f_FROM_16f(S0, S0);
01084   EIGEN_EXTRACT_8f_FROM_16f(S1, S1);
01085   EIGEN_EXTRACT_8f_FROM_16f(S2, S2);
01086   EIGEN_EXTRACT_8f_FROM_16f(S3, S3);
01087   EIGEN_EXTRACT_8f_FROM_16f(S4, S4);
01088   EIGEN_EXTRACT_8f_FROM_16f(S5, S5);
01089   EIGEN_EXTRACT_8f_FROM_16f(S6, S6);
01090   EIGEN_EXTRACT_8f_FROM_16f(S7, S7);
01091   EIGEN_EXTRACT_8f_FROM_16f(S8, S8);
01092   EIGEN_EXTRACT_8f_FROM_16f(S9, S9);
01093   EIGEN_EXTRACT_8f_FROM_16f(S10, S10);
01094   EIGEN_EXTRACT_8f_FROM_16f(S11, S11);
01095   EIGEN_EXTRACT_8f_FROM_16f(S12, S12);
01096   EIGEN_EXTRACT_8f_FROM_16f(S13, S13);
01097   EIGEN_EXTRACT_8f_FROM_16f(S14, S14);
01098   EIGEN_EXTRACT_8f_FROM_16f(S15, S15);
01099 
01100   PacketBlock<Packet8f, 32> tmp;
01101 
01102   tmp.packet[0] = _mm256_permute2f128_ps(S0_0, S4_0, 0x20);
01103   tmp.packet[1] = _mm256_permute2f128_ps(S1_0, S5_0, 0x20);
01104   tmp.packet[2] = _mm256_permute2f128_ps(S2_0, S6_0, 0x20);
01105   tmp.packet[3] = _mm256_permute2f128_ps(S3_0, S7_0, 0x20);
01106   tmp.packet[4] = _mm256_permute2f128_ps(S0_0, S4_0, 0x31);
01107   tmp.packet[5] = _mm256_permute2f128_ps(S1_0, S5_0, 0x31);
01108   tmp.packet[6] = _mm256_permute2f128_ps(S2_0, S6_0, 0x31);
01109   tmp.packet[7] = _mm256_permute2f128_ps(S3_0, S7_0, 0x31);
01110 
01111   tmp.packet[8] = _mm256_permute2f128_ps(S0_1, S4_1, 0x20);
01112   tmp.packet[9] = _mm256_permute2f128_ps(S1_1, S5_1, 0x20);
01113   tmp.packet[10] = _mm256_permute2f128_ps(S2_1, S6_1, 0x20);
01114   tmp.packet[11] = _mm256_permute2f128_ps(S3_1, S7_1, 0x20);
01115   tmp.packet[12] = _mm256_permute2f128_ps(S0_1, S4_1, 0x31);
01116   tmp.packet[13] = _mm256_permute2f128_ps(S1_1, S5_1, 0x31);
01117   tmp.packet[14] = _mm256_permute2f128_ps(S2_1, S6_1, 0x31);
01118   tmp.packet[15] = _mm256_permute2f128_ps(S3_1, S7_1, 0x31);
01119 
01120   // Second set of _m256 outputs
01121   tmp.packet[16] = _mm256_permute2f128_ps(S8_0, S12_0, 0x20);
01122   tmp.packet[17] = _mm256_permute2f128_ps(S9_0, S13_0, 0x20);
01123   tmp.packet[18] = _mm256_permute2f128_ps(S10_0, S14_0, 0x20);
01124   tmp.packet[19] = _mm256_permute2f128_ps(S11_0, S15_0, 0x20);
01125   tmp.packet[20] = _mm256_permute2f128_ps(S8_0, S12_0, 0x31);
01126   tmp.packet[21] = _mm256_permute2f128_ps(S9_0, S13_0, 0x31);
01127   tmp.packet[22] = _mm256_permute2f128_ps(S10_0, S14_0, 0x31);
01128   tmp.packet[23] = _mm256_permute2f128_ps(S11_0, S15_0, 0x31);
01129 
01130   tmp.packet[24] = _mm256_permute2f128_ps(S8_1, S12_1, 0x20);
01131   tmp.packet[25] = _mm256_permute2f128_ps(S9_1, S13_1, 0x20);
01132   tmp.packet[26] = _mm256_permute2f128_ps(S10_1, S14_1, 0x20);
01133   tmp.packet[27] = _mm256_permute2f128_ps(S11_1, S15_1, 0x20);
01134   tmp.packet[28] = _mm256_permute2f128_ps(S8_1, S12_1, 0x31);
01135   tmp.packet[29] = _mm256_permute2f128_ps(S9_1, S13_1, 0x31);
01136   tmp.packet[30] = _mm256_permute2f128_ps(S10_1, S14_1, 0x31);
01137   tmp.packet[31] = _mm256_permute2f128_ps(S11_1, S15_1, 0x31);
01138 
01139   // Pack them into the output
01140   PACK_OUTPUT(kernel.packet, tmp.packet, 0, 16);
01141   PACK_OUTPUT(kernel.packet, tmp.packet, 1, 16);
01142   PACK_OUTPUT(kernel.packet, tmp.packet, 2, 16);
01143   PACK_OUTPUT(kernel.packet, tmp.packet, 3, 16);
01144 
01145   PACK_OUTPUT(kernel.packet, tmp.packet, 4, 16);
01146   PACK_OUTPUT(kernel.packet, tmp.packet, 5, 16);
01147   PACK_OUTPUT(kernel.packet, tmp.packet, 6, 16);
01148   PACK_OUTPUT(kernel.packet, tmp.packet, 7, 16);
01149 
01150   PACK_OUTPUT(kernel.packet, tmp.packet, 8, 16);
01151   PACK_OUTPUT(kernel.packet, tmp.packet, 9, 16);
01152   PACK_OUTPUT(kernel.packet, tmp.packet, 10, 16);
01153   PACK_OUTPUT(kernel.packet, tmp.packet, 11, 16);
01154 
01155   PACK_OUTPUT(kernel.packet, tmp.packet, 12, 16);
01156   PACK_OUTPUT(kernel.packet, tmp.packet, 13, 16);
01157   PACK_OUTPUT(kernel.packet, tmp.packet, 14, 16);
01158   PACK_OUTPUT(kernel.packet, tmp.packet, 15, 16);
01159 }
01160 #define PACK_OUTPUT_2(OUTPUT, INPUT, INDEX, STRIDE)         \
01161   EIGEN_INSERT_8f_INTO_16f(OUTPUT[INDEX], INPUT[2 * INDEX], \
01162                            INPUT[2 * INDEX + STRIDE]);
01163 
01164 EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet16f, 4>& kernel) {
01165   __m512 T0 = _mm512_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
01166   __m512 T1 = _mm512_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
01167   __m512 T2 = _mm512_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
01168   __m512 T3 = _mm512_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
01169 
01170   __m512 S0 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));
01171   __m512 S1 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));
01172   __m512 S2 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));
01173   __m512 S3 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));
01174 
01175   EIGEN_EXTRACT_8f_FROM_16f(S0, S0);
01176   EIGEN_EXTRACT_8f_FROM_16f(S1, S1);
01177   EIGEN_EXTRACT_8f_FROM_16f(S2, S2);
01178   EIGEN_EXTRACT_8f_FROM_16f(S3, S3);
01179 
01180   PacketBlock<Packet8f, 8> tmp;
01181 
01182   tmp.packet[0] = _mm256_permute2f128_ps(S0_0, S1_0, 0x20);
01183   tmp.packet[1] = _mm256_permute2f128_ps(S2_0, S3_0, 0x20);
01184   tmp.packet[2] = _mm256_permute2f128_ps(S0_0, S1_0, 0x31);
01185   tmp.packet[3] = _mm256_permute2f128_ps(S2_0, S3_0, 0x31);
01186 
01187   tmp.packet[4] = _mm256_permute2f128_ps(S0_1, S1_1, 0x20);
01188   tmp.packet[5] = _mm256_permute2f128_ps(S2_1, S3_1, 0x20);
01189   tmp.packet[6] = _mm256_permute2f128_ps(S0_1, S1_1, 0x31);
01190   tmp.packet[7] = _mm256_permute2f128_ps(S2_1, S3_1, 0x31);
01191 
01192   PACK_OUTPUT_2(kernel.packet, tmp.packet, 0, 1);
01193   PACK_OUTPUT_2(kernel.packet, tmp.packet, 1, 1);
01194   PACK_OUTPUT_2(kernel.packet, tmp.packet, 2, 1);
01195   PACK_OUTPUT_2(kernel.packet, tmp.packet, 3, 1);
01196 }
01197 
01198 #define PACK_OUTPUT_SQ_D(OUTPUT, INPUT, INDEX, STRIDE)                \
01199   OUTPUT[INDEX] = _mm512_insertf64x4(OUTPUT[INDEX], INPUT[INDEX], 0); \
01200   OUTPUT[INDEX] = _mm512_insertf64x4(OUTPUT[INDEX], INPUT[INDEX + STRIDE], 1);
01201 
01202 #define PACK_OUTPUT_D(OUTPUT, INPUT, INDEX, STRIDE)                         \
01203   OUTPUT[INDEX] = _mm512_insertf64x4(OUTPUT[INDEX], INPUT[(2 * INDEX)], 0); \
01204   OUTPUT[INDEX] =                                                           \
01205       _mm512_insertf64x4(OUTPUT[INDEX], INPUT[(2 * INDEX) + STRIDE], 1);
01206 
01207 EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet8d, 4>& kernel) {
01208   __m512d T0 = _mm512_shuffle_pd(kernel.packet[0], kernel.packet[1], 0);
01209   __m512d T1 = _mm512_shuffle_pd(kernel.packet[0], kernel.packet[1], 0xff);
01210   __m512d T2 = _mm512_shuffle_pd(kernel.packet[2], kernel.packet[3], 0);
01211   __m512d T3 = _mm512_shuffle_pd(kernel.packet[2], kernel.packet[3], 0xff);
01212 
01213   PacketBlock<Packet4d, 8> tmp;
01214 
01215   tmp.packet[0] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0),
01216                                          _mm512_extractf64x4_pd(T2, 0), 0x20);
01217   tmp.packet[1] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0),
01218                                          _mm512_extractf64x4_pd(T3, 0), 0x20);
01219   tmp.packet[2] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0),
01220                                          _mm512_extractf64x4_pd(T2, 0), 0x31);
01221   tmp.packet[3] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0),
01222                                          _mm512_extractf64x4_pd(T3, 0), 0x31);
01223 
01224   tmp.packet[4] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1),
01225                                          _mm512_extractf64x4_pd(T2, 1), 0x20);
01226   tmp.packet[5] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1),
01227                                          _mm512_extractf64x4_pd(T3, 1), 0x20);
01228   tmp.packet[6] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1),
01229                                          _mm512_extractf64x4_pd(T2, 1), 0x31);
01230   tmp.packet[7] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1),
01231                                          _mm512_extractf64x4_pd(T3, 1), 0x31);
01232 
01233   PACK_OUTPUT_D(kernel.packet, tmp.packet, 0, 1);
01234   PACK_OUTPUT_D(kernel.packet, tmp.packet, 1, 1);
01235   PACK_OUTPUT_D(kernel.packet, tmp.packet, 2, 1);
01236   PACK_OUTPUT_D(kernel.packet, tmp.packet, 3, 1);
01237 }
01238 
01239 EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet8d, 8>& kernel) {
01240   __m512d T0 = _mm512_unpacklo_pd(kernel.packet[0], kernel.packet[1]);
01241   __m512d T1 = _mm512_unpackhi_pd(kernel.packet[0], kernel.packet[1]);
01242   __m512d T2 = _mm512_unpacklo_pd(kernel.packet[2], kernel.packet[3]);
01243   __m512d T3 = _mm512_unpackhi_pd(kernel.packet[2], kernel.packet[3]);
01244   __m512d T4 = _mm512_unpacklo_pd(kernel.packet[4], kernel.packet[5]);
01245   __m512d T5 = _mm512_unpackhi_pd(kernel.packet[4], kernel.packet[5]);
01246   __m512d T6 = _mm512_unpacklo_pd(kernel.packet[6], kernel.packet[7]);
01247   __m512d T7 = _mm512_unpackhi_pd(kernel.packet[6], kernel.packet[7]);
01248 
01249   PacketBlock<Packet4d, 16> tmp;
01250 
01251   tmp.packet[0] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0),
01252                                          _mm512_extractf64x4_pd(T2, 0), 0x20);
01253   tmp.packet[1] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0),
01254                                          _mm512_extractf64x4_pd(T3, 0), 0x20);
01255   tmp.packet[2] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0),
01256                                          _mm512_extractf64x4_pd(T2, 0), 0x31);
01257   tmp.packet[3] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0),
01258                                          _mm512_extractf64x4_pd(T3, 0), 0x31);
01259 
01260   tmp.packet[4] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1),
01261                                          _mm512_extractf64x4_pd(T2, 1), 0x20);
01262   tmp.packet[5] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1),
01263                                          _mm512_extractf64x4_pd(T3, 1), 0x20);
01264   tmp.packet[6] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1),
01265                                          _mm512_extractf64x4_pd(T2, 1), 0x31);
01266   tmp.packet[7] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1),
01267                                          _mm512_extractf64x4_pd(T3, 1), 0x31);
01268 
01269   tmp.packet[8] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 0),
01270                                          _mm512_extractf64x4_pd(T6, 0), 0x20);
01271   tmp.packet[9] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 0),
01272                                          _mm512_extractf64x4_pd(T7, 0), 0x20);
01273   tmp.packet[10] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 0),
01274                                           _mm512_extractf64x4_pd(T6, 0), 0x31);
01275   tmp.packet[11] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 0),
01276                                           _mm512_extractf64x4_pd(T7, 0), 0x31);
01277 
01278   tmp.packet[12] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 1),
01279                                           _mm512_extractf64x4_pd(T6, 1), 0x20);
01280   tmp.packet[13] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 1),
01281                                           _mm512_extractf64x4_pd(T7, 1), 0x20);
01282   tmp.packet[14] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 1),
01283                                           _mm512_extractf64x4_pd(T6, 1), 0x31);
01284   tmp.packet[15] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 1),
01285                                           _mm512_extractf64x4_pd(T7, 1), 0x31);
01286 
01287   PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 0, 8);
01288   PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 1, 8);
01289   PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 2, 8);
01290   PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 3, 8);
01291 
01292   PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 4, 8);
01293   PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 5, 8);
01294   PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 6, 8);
01295   PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 7, 8);
01296 }
01297 template <>
01298 EIGEN_STRONG_INLINE Packet16f pblend(const Selector<16>& /*ifPacket*/,
01299                                      const Packet16f& /*thenPacket*/,
01300                                      const Packet16f& /*elsePacket*/) {
01301   assert(false && "To be implemented");
01302   return Packet16f();
01303 }
01304 template <>
01305 EIGEN_STRONG_INLINE Packet8d pblend(const Selector<8>& /*ifPacket*/,
01306                                     const Packet8d& /*thenPacket*/,
01307                                     const Packet8d& /*elsePacket*/) {
01308   assert(false && "To be implemented");
01309   return Packet8d();
01310 }
01311 
01312 } // end namespace internal
01313 
01314 } // end namespace Eigen
01315 
01316 #endif // EIGEN_PACKET_MATH_AVX512_H
 All Classes Functions Variables Typedefs Enumerations Enumerator Friends