![]() |
Eigen
3.3.3
|
00001 // This file is part of Eigen, a lightweight C++ template library 00002 // for linear algebra. 00003 // 00004 // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> 00005 // Copyright (C) 2010 Konstantinos Margaritis <markos@freevec.org> 00006 // Heavily based on Gael's SSE version. 00007 // 00008 // This Source Code Form is subject to the terms of the Mozilla 00009 // Public License v. 2.0. If a copy of the MPL was not distributed 00010 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. 00011 00012 #ifndef EIGEN_PACKET_MATH_NEON_H 00013 #define EIGEN_PACKET_MATH_NEON_H 00014 00015 namespace Eigen { 00016 00017 namespace internal { 00018 00019 #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 00020 #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 00021 #endif 00022 00023 #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD 00024 #define EIGEN_HAS_SINGLE_INSTRUCTION_MADD 00025 #endif 00026 00027 #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD 00028 #define EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD 00029 #endif 00030 00031 #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 00032 #if EIGEN_ARCH_ARM64 00033 #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32 00034 #else 00035 #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16 00036 #endif 00037 #endif 00038 00039 typedef float32x2_t Packet2f; 00040 typedef float32x4_t Packet4f; 00041 typedef int32x4_t Packet4i; 00042 typedef int32x2_t Packet2i; 00043 typedef uint32x4_t Packet4ui; 00044 00045 #define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \ 00046 const Packet4f p4f_##NAME = pset1<Packet4f>(X) 00047 00048 #define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \ 00049 const Packet4f p4f_##NAME = vreinterpretq_f32_u32(pset1<int32_t>(X)) 00050 00051 #define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \ 00052 const Packet4i p4i_##NAME = pset1<Packet4i>(X) 00053 00054 // arm64 does have the pld instruction. If available, let's trust the __builtin_prefetch built-in function 00055 // which available on LLVM and GCC (at least) 00056 #if EIGEN_HAS_BUILTIN(__builtin_prefetch) || EIGEN_COMP_GNUC 00057 #define EIGEN_ARM_PREFETCH(ADDR) __builtin_prefetch(ADDR); 00058 #elif defined __pld 00059 #define EIGEN_ARM_PREFETCH(ADDR) __pld(ADDR) 00060 #elif !EIGEN_ARCH_ARM64 00061 #define EIGEN_ARM_PREFETCH(ADDR) __asm__ __volatile__ ( " pld [%[addr]]\n" :: [addr] "r" (ADDR) : "cc" ); 00062 #else 00063 // by default no explicit prefetching 00064 #define EIGEN_ARM_PREFETCH(ADDR) 00065 #endif 00066 00067 template<> struct packet_traits<float> : default_packet_traits 00068 { 00069 typedef Packet4f type; 00070 typedef Packet4f half; // Packet2f intrinsics not implemented yet 00071 enum { 00072 Vectorizable = 1, 00073 AlignedOnScalar = 1, 00074 size = 4, 00075 HasHalfPacket=0, // Packet2f intrinsics not implemented yet 00076 00077 HasDiv = 1, 00078 // FIXME check the Has* 00079 HasSin = 0, 00080 HasCos = 0, 00081 HasLog = 0, 00082 HasExp = 1, 00083 HasSqrt = 0 00084 }; 00085 }; 00086 template<> struct packet_traits<int32_t> : default_packet_traits 00087 { 00088 typedef Packet4i type; 00089 typedef Packet4i half; // Packet2i intrinsics not implemented yet 00090 enum { 00091 Vectorizable = 1, 00092 AlignedOnScalar = 1, 00093 size=4, 00094 HasHalfPacket=0 // Packet2i intrinsics not implemented yet 00095 // FIXME check the Has* 00096 }; 00097 }; 00098 00099 #if EIGEN_GNUC_AT_MOST(4,4) && !EIGEN_COMP_LLVM 00100 // workaround gcc 4.2, 4.3 and 4.4 compilatin issue 00101 EIGEN_STRONG_INLINE float32x4_t vld1q_f32(const float* x) { return ::vld1q_f32((const float32_t*)x); } 00102 EIGEN_STRONG_INLINE float32x2_t vld1_f32 (const float* x) { return ::vld1_f32 ((const float32_t*)x); } 00103 EIGEN_STRONG_INLINE float32x2_t vld1_dup_f32 (const float* x) { return ::vld1_dup_f32 ((const float32_t*)x); } 00104 EIGEN_STRONG_INLINE void vst1q_f32(float* to, float32x4_t from) { ::vst1q_f32((float32_t*)to,from); } 00105 EIGEN_STRONG_INLINE void vst1_f32 (float* to, float32x2_t from) { ::vst1_f32 ((float32_t*)to,from); } 00106 #endif 00107 00108 template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4, alignment=Aligned16}; typedef Packet4f half; }; 00109 template<> struct unpacket_traits<Packet4i> { typedef int32_t type; enum {size=4, alignment=Aligned16}; typedef Packet4i half; }; 00110 00111 template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return vdupq_n_f32(from); } 00112 template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int32_t& from) { return vdupq_n_s32(from); } 00113 00114 template<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a) 00115 { 00116 const float32_t f[] = {0, 1, 2, 3}; 00117 Packet4f countdown = vld1q_f32(f); 00118 return vaddq_f32(pset1<Packet4f>(a), countdown); 00119 } 00120 template<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int32_t& a) 00121 { 00122 const int32_t i[] = {0, 1, 2, 3}; 00123 Packet4i countdown = vld1q_s32(i); 00124 return vaddq_s32(pset1<Packet4i>(a), countdown); 00125 } 00126 00127 template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return vaddq_f32(a,b); } 00128 template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return vaddq_s32(a,b); } 00129 00130 template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return vsubq_f32(a,b); } 00131 template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return vsubq_s32(a,b); } 00132 00133 template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return vnegq_f32(a); } 00134 template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return vnegq_s32(a); } 00135 00136 template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; } 00137 template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; } 00138 00139 template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return vmulq_f32(a,b); } 00140 template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmulq_s32(a,b); } 00141 00142 template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) 00143 { 00144 #if EIGEN_ARCH_ARM64 00145 return vdivq_f32(a,b); 00146 #else 00147 Packet4f inv, restep, div; 00148 00149 // NEON does not offer a divide instruction, we have to do a reciprocal approximation 00150 // However NEON in contrast to other SIMD engines (AltiVec/SSE), offers 00151 // a reciprocal estimate AND a reciprocal step -which saves a few instructions 00152 // vrecpeq_f32() returns an estimate to 1/b, which we will finetune with 00153 // Newton-Raphson and vrecpsq_f32() 00154 inv = vrecpeq_f32(b); 00155 00156 // This returns a differential, by which we will have to multiply inv to get a better 00157 // approximation of 1/b. 00158 restep = vrecpsq_f32(b, inv); 00159 inv = vmulq_f32(restep, inv); 00160 00161 // Finally, multiply a by 1/b and get the wanted result of the division. 00162 div = vmulq_f32(a, inv); 00163 00164 return div; 00165 #endif 00166 } 00167 00168 template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, const Packet4i& /*b*/) 00169 { eigen_assert(false && "packet integer division are not supported by NEON"); 00170 return pset1<Packet4i>(0); 00171 } 00172 00173 // Clang/ARM wrongly advertises __ARM_FEATURE_FMA even when it's not available, 00174 // then implements a slow software scalar fallback calling fmaf()! 00175 // Filed LLVM bug: 00176 // https://llvm.org/bugs/show_bug.cgi?id=27216 00177 #if (defined __ARM_FEATURE_FMA) && !(EIGEN_COMP_CLANG && EIGEN_ARCH_ARM) 00178 // See bug 936. 00179 // FMA is available on VFPv4 i.e. when compiling with -mfpu=neon-vfpv4. 00180 // FMA is a true fused multiply-add i.e. only 1 rounding at the end, no intermediate rounding. 00181 // MLA is not fused i.e. does 2 roundings. 00182 // In addition to giving better accuracy, FMA also gives better performance here on a Krait (Nexus 4): 00183 // MLA: 10 GFlop/s ; FMA: 12 GFlops/s. 00184 template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vfmaq_f32(c,a,b); } 00185 #else 00186 template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { 00187 #if EIGEN_COMP_CLANG && EIGEN_ARCH_ARM 00188 // Clang/ARM will replace VMLA by VMUL+VADD at least for some values of -mcpu, 00189 // at least -mcpu=cortex-a8 and -mcpu=cortex-a7. Since the former is the default on 00190 // -march=armv7-a, that is a very common case. 00191 // See e.g. this thread: 00192 // http://lists.llvm.org/pipermail/llvm-dev/2013-December/068806.html 00193 // Filed LLVM bug: 00194 // https://llvm.org/bugs/show_bug.cgi?id=27219 00195 Packet4f r = c; 00196 asm volatile( 00197 "vmla.f32 %q[r], %q[a], %q[b]" 00198 : [r] "+w" (r) 00199 : [a] "w" (a), 00200 [b] "w" (b) 00201 : ); 00202 return r; 00203 #else 00204 return vmlaq_f32(c,a,b); 00205 #endif 00206 } 00207 #endif 00208 00209 // No FMA instruction for int, so use MLA unconditionally. 00210 template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return vmlaq_s32(c,a,b); } 00211 00212 template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return vminq_f32(a,b); } 00213 template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) { return vminq_s32(a,b); } 00214 00215 template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return vmaxq_f32(a,b); } 00216 template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmaxq_s32(a,b); } 00217 00218 // Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics 00219 template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) 00220 { 00221 return vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); 00222 } 00223 template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return vandq_s32(a,b); } 00224 00225 template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) 00226 { 00227 return vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); 00228 } 00229 template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return vorrq_s32(a,b); } 00230 00231 template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) 00232 { 00233 return vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); 00234 } 00235 template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return veorq_s32(a,b); } 00236 00237 template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) 00238 { 00239 return vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); 00240 } 00241 template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return vbicq_s32(a,b); } 00242 00243 template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f32(from); } 00244 template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int32_t* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s32(from); } 00245 00246 template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f32(from); } 00247 template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int32_t* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_s32(from); } 00248 00249 template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from) 00250 { 00251 float32x2_t lo, hi; 00252 lo = vld1_dup_f32(from); 00253 hi = vld1_dup_f32(from+1); 00254 return vcombine_f32(lo, hi); 00255 } 00256 template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int32_t* from) 00257 { 00258 int32x2_t lo, hi; 00259 lo = vld1_dup_s32(from); 00260 hi = vld1_dup_s32(from+1); 00261 return vcombine_s32(lo, hi); 00262 } 00263 00264 template<> EIGEN_STRONG_INLINE void pstore<float> (float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f32(to, from); } 00265 template<> EIGEN_STRONG_INLINE void pstore<int32_t>(int32_t* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_s32(to, from); } 00266 00267 template<> EIGEN_STRONG_INLINE void pstoreu<float> (float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); } 00268 template<> EIGEN_STRONG_INLINE void pstoreu<int32_t>(int32_t* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); } 00269 00270 template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride) 00271 { 00272 Packet4f res = pset1<Packet4f>(0.f); 00273 res = vsetq_lane_f32(from[0*stride], res, 0); 00274 res = vsetq_lane_f32(from[1*stride], res, 1); 00275 res = vsetq_lane_f32(from[2*stride], res, 2); 00276 res = vsetq_lane_f32(from[3*stride], res, 3); 00277 return res; 00278 } 00279 template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int32_t, Packet4i>(const int32_t* from, Index stride) 00280 { 00281 Packet4i res = pset1<Packet4i>(0); 00282 res = vsetq_lane_s32(from[0*stride], res, 0); 00283 res = vsetq_lane_s32(from[1*stride], res, 1); 00284 res = vsetq_lane_s32(from[2*stride], res, 2); 00285 res = vsetq_lane_s32(from[3*stride], res, 3); 00286 return res; 00287 } 00288 00289 template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride) 00290 { 00291 to[stride*0] = vgetq_lane_f32(from, 0); 00292 to[stride*1] = vgetq_lane_f32(from, 1); 00293 to[stride*2] = vgetq_lane_f32(from, 2); 00294 to[stride*3] = vgetq_lane_f32(from, 3); 00295 } 00296 template<> EIGEN_DEVICE_FUNC inline void pscatter<int32_t, Packet4i>(int32_t* to, const Packet4i& from, Index stride) 00297 { 00298 to[stride*0] = vgetq_lane_s32(from, 0); 00299 to[stride*1] = vgetq_lane_s32(from, 1); 00300 to[stride*2] = vgetq_lane_s32(from, 2); 00301 to[stride*3] = vgetq_lane_s32(from, 3); 00302 } 00303 00304 template<> EIGEN_STRONG_INLINE void prefetch<float> (const float* addr) { EIGEN_ARM_PREFETCH(addr); } 00305 template<> EIGEN_STRONG_INLINE void prefetch<int32_t>(const int32_t* addr) { EIGEN_ARM_PREFETCH(addr); } 00306 00307 // FIXME only store the 2 first elements ? 00308 template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float EIGEN_ALIGN16 x[4]; vst1q_f32(x, a); return x[0]; } 00309 template<> EIGEN_STRONG_INLINE int32_t pfirst<Packet4i>(const Packet4i& a) { int32_t EIGEN_ALIGN16 x[4]; vst1q_s32(x, a); return x[0]; } 00310 00311 template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) { 00312 float32x2_t a_lo, a_hi; 00313 Packet4f a_r64; 00314 00315 a_r64 = vrev64q_f32(a); 00316 a_lo = vget_low_f32(a_r64); 00317 a_hi = vget_high_f32(a_r64); 00318 return vcombine_f32(a_hi, a_lo); 00319 } 00320 template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) { 00321 int32x2_t a_lo, a_hi; 00322 Packet4i a_r64; 00323 00324 a_r64 = vrev64q_s32(a); 00325 a_lo = vget_low_s32(a_r64); 00326 a_hi = vget_high_s32(a_r64); 00327 return vcombine_s32(a_hi, a_lo); 00328 } 00329 00330 template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vabsq_f32(a); } 00331 template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vabsq_s32(a); } 00332 00333 template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a) 00334 { 00335 float32x2_t a_lo, a_hi, sum; 00336 00337 a_lo = vget_low_f32(a); 00338 a_hi = vget_high_f32(a); 00339 sum = vpadd_f32(a_lo, a_hi); 00340 sum = vpadd_f32(sum, sum); 00341 return vget_lane_f32(sum, 0); 00342 } 00343 00344 template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs) 00345 { 00346 float32x4x2_t vtrn1, vtrn2, res1, res2; 00347 Packet4f sum1, sum2, sum; 00348 00349 // NEON zip performs interleaving of the supplied vectors. 00350 // We perform two interleaves in a row to acquire the transposed vector 00351 vtrn1 = vzipq_f32(vecs[0], vecs[2]); 00352 vtrn2 = vzipq_f32(vecs[1], vecs[3]); 00353 res1 = vzipq_f32(vtrn1.val[0], vtrn2.val[0]); 00354 res2 = vzipq_f32(vtrn1.val[1], vtrn2.val[1]); 00355 00356 // Do the addition of the resulting vectors 00357 sum1 = vaddq_f32(res1.val[0], res1.val[1]); 00358 sum2 = vaddq_f32(res2.val[0], res2.val[1]); 00359 sum = vaddq_f32(sum1, sum2); 00360 00361 return sum; 00362 } 00363 00364 template<> EIGEN_STRONG_INLINE int32_t predux<Packet4i>(const Packet4i& a) 00365 { 00366 int32x2_t a_lo, a_hi, sum; 00367 00368 a_lo = vget_low_s32(a); 00369 a_hi = vget_high_s32(a); 00370 sum = vpadd_s32(a_lo, a_hi); 00371 sum = vpadd_s32(sum, sum); 00372 return vget_lane_s32(sum, 0); 00373 } 00374 00375 template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs) 00376 { 00377 int32x4x2_t vtrn1, vtrn2, res1, res2; 00378 Packet4i sum1, sum2, sum; 00379 00380 // NEON zip performs interleaving of the supplied vectors. 00381 // We perform two interleaves in a row to acquire the transposed vector 00382 vtrn1 = vzipq_s32(vecs[0], vecs[2]); 00383 vtrn2 = vzipq_s32(vecs[1], vecs[3]); 00384 res1 = vzipq_s32(vtrn1.val[0], vtrn2.val[0]); 00385 res2 = vzipq_s32(vtrn1.val[1], vtrn2.val[1]); 00386 00387 // Do the addition of the resulting vectors 00388 sum1 = vaddq_s32(res1.val[0], res1.val[1]); 00389 sum2 = vaddq_s32(res2.val[0], res2.val[1]); 00390 sum = vaddq_s32(sum1, sum2); 00391 00392 return sum; 00393 } 00394 00395 // Other reduction functions: 00396 // mul 00397 template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a) 00398 { 00399 float32x2_t a_lo, a_hi, prod; 00400 00401 // Get a_lo = |a1|a2| and a_hi = |a3|a4| 00402 a_lo = vget_low_f32(a); 00403 a_hi = vget_high_f32(a); 00404 // Get the product of a_lo * a_hi -> |a1*a3|a2*a4| 00405 prod = vmul_f32(a_lo, a_hi); 00406 // Multiply prod with its swapped value |a2*a4|a1*a3| 00407 prod = vmul_f32(prod, vrev64_f32(prod)); 00408 00409 return vget_lane_f32(prod, 0); 00410 } 00411 template<> EIGEN_STRONG_INLINE int32_t predux_mul<Packet4i>(const Packet4i& a) 00412 { 00413 int32x2_t a_lo, a_hi, prod; 00414 00415 // Get a_lo = |a1|a2| and a_hi = |a3|a4| 00416 a_lo = vget_low_s32(a); 00417 a_hi = vget_high_s32(a); 00418 // Get the product of a_lo * a_hi -> |a1*a3|a2*a4| 00419 prod = vmul_s32(a_lo, a_hi); 00420 // Multiply prod with its swapped value |a2*a4|a1*a3| 00421 prod = vmul_s32(prod, vrev64_s32(prod)); 00422 00423 return vget_lane_s32(prod, 0); 00424 } 00425 00426 // min 00427 template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a) 00428 { 00429 float32x2_t a_lo, a_hi, min; 00430 00431 a_lo = vget_low_f32(a); 00432 a_hi = vget_high_f32(a); 00433 min = vpmin_f32(a_lo, a_hi); 00434 min = vpmin_f32(min, min); 00435 00436 return vget_lane_f32(min, 0); 00437 } 00438 00439 template<> EIGEN_STRONG_INLINE int32_t predux_min<Packet4i>(const Packet4i& a) 00440 { 00441 int32x2_t a_lo, a_hi, min; 00442 00443 a_lo = vget_low_s32(a); 00444 a_hi = vget_high_s32(a); 00445 min = vpmin_s32(a_lo, a_hi); 00446 min = vpmin_s32(min, min); 00447 00448 return vget_lane_s32(min, 0); 00449 } 00450 00451 // max 00452 template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a) 00453 { 00454 float32x2_t a_lo, a_hi, max; 00455 00456 a_lo = vget_low_f32(a); 00457 a_hi = vget_high_f32(a); 00458 max = vpmax_f32(a_lo, a_hi); 00459 max = vpmax_f32(max, max); 00460 00461 return vget_lane_f32(max, 0); 00462 } 00463 00464 template<> EIGEN_STRONG_INLINE int32_t predux_max<Packet4i>(const Packet4i& a) 00465 { 00466 int32x2_t a_lo, a_hi, max; 00467 00468 a_lo = vget_low_s32(a); 00469 a_hi = vget_high_s32(a); 00470 max = vpmax_s32(a_lo, a_hi); 00471 max = vpmax_s32(max, max); 00472 00473 return vget_lane_s32(max, 0); 00474 } 00475 00476 // this PALIGN_NEON business is to work around a bug in LLVM Clang 3.0 causing incorrect compilation errors, 00477 // see bug 347 and this LLVM bug: http://llvm.org/bugs/show_bug.cgi?id=11074 00478 #define PALIGN_NEON(Offset,Type,Command) \ 00479 template<>\ 00480 struct palign_impl<Offset,Type>\ 00481 {\ 00482 EIGEN_STRONG_INLINE static void run(Type& first, const Type& second)\ 00483 {\ 00484 if (Offset!=0)\ 00485 first = Command(first, second, Offset);\ 00486 }\ 00487 };\ 00488 00489 PALIGN_NEON(0,Packet4f,vextq_f32) 00490 PALIGN_NEON(1,Packet4f,vextq_f32) 00491 PALIGN_NEON(2,Packet4f,vextq_f32) 00492 PALIGN_NEON(3,Packet4f,vextq_f32) 00493 PALIGN_NEON(0,Packet4i,vextq_s32) 00494 PALIGN_NEON(1,Packet4i,vextq_s32) 00495 PALIGN_NEON(2,Packet4i,vextq_s32) 00496 PALIGN_NEON(3,Packet4i,vextq_s32) 00497 00498 #undef PALIGN_NEON 00499 00500 EIGEN_DEVICE_FUNC inline void 00501 ptranspose(PacketBlock<Packet4f,4>& kernel) { 00502 float32x4x2_t tmp1 = vzipq_f32(kernel.packet[0], kernel.packet[1]); 00503 float32x4x2_t tmp2 = vzipq_f32(kernel.packet[2], kernel.packet[3]); 00504 00505 kernel.packet[0] = vcombine_f32(vget_low_f32(tmp1.val[0]), vget_low_f32(tmp2.val[0])); 00506 kernel.packet[1] = vcombine_f32(vget_high_f32(tmp1.val[0]), vget_high_f32(tmp2.val[0])); 00507 kernel.packet[2] = vcombine_f32(vget_low_f32(tmp1.val[1]), vget_low_f32(tmp2.val[1])); 00508 kernel.packet[3] = vcombine_f32(vget_high_f32(tmp1.val[1]), vget_high_f32(tmp2.val[1])); 00509 } 00510 00511 EIGEN_DEVICE_FUNC inline void 00512 ptranspose(PacketBlock<Packet4i,4>& kernel) { 00513 int32x4x2_t tmp1 = vzipq_s32(kernel.packet[0], kernel.packet[1]); 00514 int32x4x2_t tmp2 = vzipq_s32(kernel.packet[2], kernel.packet[3]); 00515 kernel.packet[0] = vcombine_s32(vget_low_s32(tmp1.val[0]), vget_low_s32(tmp2.val[0])); 00516 kernel.packet[1] = vcombine_s32(vget_high_s32(tmp1.val[0]), vget_high_s32(tmp2.val[0])); 00517 kernel.packet[2] = vcombine_s32(vget_low_s32(tmp1.val[1]), vget_low_s32(tmp2.val[1])); 00518 kernel.packet[3] = vcombine_s32(vget_high_s32(tmp1.val[1]), vget_high_s32(tmp2.val[1])); 00519 } 00520 00521 //---------- double ---------- 00522 00523 // Clang 3.5 in the iOS toolchain has an ICE triggered by NEON intrisics for double. 00524 // Confirmed at least with __apple_build_version__ = 6000054. 00525 #ifdef __apple_build_version__ 00526 // Let's hope that by the time __apple_build_version__ hits the 601* range, the bug will be fixed. 00527 // https://gist.github.com/yamaya/2924292 suggests that the 3 first digits are only updated with 00528 // major toolchain updates. 00529 #define EIGEN_APPLE_DOUBLE_NEON_BUG (__apple_build_version__ < 6010000) 00530 #else 00531 #define EIGEN_APPLE_DOUBLE_NEON_BUG 0 00532 #endif 00533 00534 #if EIGEN_ARCH_ARM64 && !EIGEN_APPLE_DOUBLE_NEON_BUG 00535 00536 // Bug 907: workaround missing declarations of the following two functions in the ADK 00537 // Defining these functions as templates ensures that if these intrinsics are 00538 // already defined in arm_neon.h, then our workaround doesn't cause a conflict 00539 // and has lower priority in overload resolution. 00540 template <typename T> 00541 uint64x2_t vreinterpretq_u64_f64(T a) 00542 { 00543 return (uint64x2_t) a; 00544 } 00545 00546 template <typename T> 00547 float64x2_t vreinterpretq_f64_u64(T a) 00548 { 00549 return (float64x2_t) a; 00550 } 00551 00552 typedef float64x2_t Packet2d; 00553 typedef float64x1_t Packet1d; 00554 00555 template<> struct packet_traits<double> : default_packet_traits 00556 { 00557 typedef Packet2d type; 00558 typedef Packet2d half; 00559 enum { 00560 Vectorizable = 1, 00561 AlignedOnScalar = 1, 00562 size = 2, 00563 HasHalfPacket=0, 00564 00565 HasDiv = 1, 00566 // FIXME check the Has* 00567 HasSin = 0, 00568 HasCos = 0, 00569 HasLog = 0, 00570 HasExp = 0, 00571 HasSqrt = 0 00572 }; 00573 }; 00574 00575 template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2, alignment=Aligned16}; typedef Packet2d half; }; 00576 00577 template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return vdupq_n_f64(from); } 00578 00579 template<> EIGEN_STRONG_INLINE Packet2d plset<Packet2d>(const double& a) 00580 { 00581 const double countdown_raw[] = {0.0,1.0}; 00582 const Packet2d countdown = vld1q_f64(countdown_raw); 00583 return vaddq_f64(pset1<Packet2d>(a), countdown); 00584 } 00585 template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return vaddq_f64(a,b); } 00586 00587 template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return vsubq_f64(a,b); } 00588 00589 template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) { return vnegq_f64(a); } 00590 00591 template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; } 00592 00593 template<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return vmulq_f64(a,b); } 00594 00595 template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return vdivq_f64(a,b); } 00596 00597 #ifdef __ARM_FEATURE_FMA 00598 // See bug 936. See above comment about FMA for float. 00599 template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vfmaq_f64(c,a,b); } 00600 #else 00601 template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vmlaq_f64(c,a,b); } 00602 #endif 00603 00604 template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return vminq_f64(a,b); } 00605 00606 template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return vmaxq_f64(a,b); } 00607 00608 // Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics 00609 template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) 00610 { 00611 return vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); 00612 } 00613 00614 template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) 00615 { 00616 return vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); 00617 } 00618 00619 template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) 00620 { 00621 return vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); 00622 } 00623 00624 template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) 00625 { 00626 return vreinterpretq_f64_u64(vbicq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); 00627 } 00628 00629 template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f64(from); } 00630 00631 template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f64(from); } 00632 00633 template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from) 00634 { 00635 return vld1q_dup_f64(from); 00636 } 00637 template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f64(to, from); } 00638 00639 template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f64(to, from); } 00640 00641 template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride) 00642 { 00643 Packet2d res = pset1<Packet2d>(0.0); 00644 res = vsetq_lane_f64(from[0*stride], res, 0); 00645 res = vsetq_lane_f64(from[1*stride], res, 1); 00646 return res; 00647 } 00648 template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride) 00649 { 00650 to[stride*0] = vgetq_lane_f64(from, 0); 00651 to[stride*1] = vgetq_lane_f64(from, 1); 00652 } 00653 template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { EIGEN_ARM_PREFETCH(addr); } 00654 00655 // FIXME only store the 2 first elements ? 00656 template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return vgetq_lane_f64(a, 0); } 00657 00658 template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a) { return vcombine_f64(vget_high_f64(a), vget_low_f64(a)); } 00659 00660 template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) { return vabsq_f64(a); } 00661 00662 #if EIGEN_COMP_CLANG && defined(__apple_build_version__) 00663 // workaround ICE, see bug 907 00664 template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) { return (vget_low_f64(a) + vget_high_f64(a))[0]; } 00665 #else 00666 template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) { return vget_lane_f64(vget_low_f64(a) + vget_high_f64(a), 0); } 00667 #endif 00668 00669 template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs) 00670 { 00671 float64x2_t trn1, trn2; 00672 00673 // NEON zip performs interleaving of the supplied vectors. 00674 // We perform two interleaves in a row to acquire the transposed vector 00675 trn1 = vzip1q_f64(vecs[0], vecs[1]); 00676 trn2 = vzip2q_f64(vecs[0], vecs[1]); 00677 00678 // Do the addition of the resulting vectors 00679 return vaddq_f64(trn1, trn2); 00680 } 00681 // Other reduction functions: 00682 // mul 00683 #if EIGEN_COMP_CLANG && defined(__apple_build_version__) 00684 template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a) { return (vget_low_f64(a) * vget_high_f64(a))[0]; } 00685 #else 00686 template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a) { return vget_lane_f64(vget_low_f64(a) * vget_high_f64(a), 0); } 00687 #endif 00688 00689 // min 00690 template<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a) { return vgetq_lane_f64(vpminq_f64(a, a), 0); } 00691 00692 // max 00693 template<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a) { return vgetq_lane_f64(vpmaxq_f64(a, a), 0); } 00694 00695 // this PALIGN_NEON business is to work around a bug in LLVM Clang 3.0 causing incorrect compilation errors, 00696 // see bug 347 and this LLVM bug: http://llvm.org/bugs/show_bug.cgi?id=11074 00697 #define PALIGN_NEON(Offset,Type,Command) \ 00698 template<>\ 00699 struct palign_impl<Offset,Type>\ 00700 {\ 00701 EIGEN_STRONG_INLINE static void run(Type& first, const Type& second)\ 00702 {\ 00703 if (Offset!=0)\ 00704 first = Command(first, second, Offset);\ 00705 }\ 00706 };\ 00707 00708 PALIGN_NEON(0,Packet2d,vextq_f64) 00709 PALIGN_NEON(1,Packet2d,vextq_f64) 00710 #undef PALIGN_NEON 00711 00712 EIGEN_DEVICE_FUNC inline void 00713 ptranspose(PacketBlock<Packet2d,2>& kernel) { 00714 float64x2_t trn1 = vzip1q_f64(kernel.packet[0], kernel.packet[1]); 00715 float64x2_t trn2 = vzip2q_f64(kernel.packet[0], kernel.packet[1]); 00716 00717 kernel.packet[0] = trn1; 00718 kernel.packet[1] = trn2; 00719 } 00720 #endif // EIGEN_ARCH_ARM64 00721 00722 } // end namespace internal 00723 00724 } // end namespace Eigen 00725 00726 #endif // EIGEN_PACKET_MATH_NEON_H