![]() |
Eigen
3.3.3
|
00001 // This file is part of Eigen, a lightweight C++ template library 00002 // for linear algebra. 00003 // 00004 // Copyright (C) 2016 Pedro Gonnet (pedro.gonnet@gmail.com) 00005 // 00006 // This Source Code Form is subject to the terms of the Mozilla 00007 // Public License v. 2.0. If a copy of the MPL was not distributed 00008 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. 00009 00010 #ifndef THIRD_PARTY_EIGEN3_EIGEN_SRC_CORE_ARCH_AVX512_MATHFUNCTIONS_H_ 00011 #define THIRD_PARTY_EIGEN3_EIGEN_SRC_CORE_ARCH_AVX512_MATHFUNCTIONS_H_ 00012 00013 namespace Eigen { 00014 00015 namespace internal { 00016 00017 // Disable the code for older versions of gcc that don't support many of the required avx512 instrinsics. 00018 #if EIGEN_GNUC_AT_LEAST(5, 3) 00019 00020 #define _EIGEN_DECLARE_CONST_Packet16f(NAME, X) \ 00021 const Packet16f p16f_##NAME = pset1<Packet16f>(X) 00022 00023 #define _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(NAME, X) \ 00024 const Packet16f p16f_##NAME = (__m512)pset1<Packet16i>(X) 00025 00026 #define _EIGEN_DECLARE_CONST_Packet8d(NAME, X) \ 00027 const Packet8d p8d_##NAME = pset1<Packet8d>(X) 00028 00029 #define _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(NAME, X) \ 00030 const Packet8d p8d_##NAME = _mm512_castsi512_pd(_mm512_set1_epi64(X)) 00031 00032 // Natural logarithm 00033 // Computes log(x) as log(2^e * m) = C*e + log(m), where the constant C =log(2) 00034 // and m is in the range [sqrt(1/2),sqrt(2)). In this range, the logarithm can 00035 // be easily approximated by a polynomial centered on m=1 for stability. 00036 #if defined(EIGEN_VECTORIZE_AVX512DQ) 00037 template <> 00038 EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f 00039 plog<Packet16f>(const Packet16f& _x) { 00040 Packet16f x = _x; 00041 _EIGEN_DECLARE_CONST_Packet16f(1, 1.0f); 00042 _EIGEN_DECLARE_CONST_Packet16f(half, 0.5f); 00043 _EIGEN_DECLARE_CONST_Packet16f(126f, 126.0f); 00044 00045 _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(inv_mant_mask, ~0x7f800000); 00046 00047 // The smallest non denormalized float number. 00048 _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(min_norm_pos, 0x00800000); 00049 _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(minus_inf, 0xff800000); 00050 _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(nan, 0x7fc00000); 00051 00052 // Polynomial coefficients. 00053 _EIGEN_DECLARE_CONST_Packet16f(cephes_SQRTHF, 0.707106781186547524f); 00054 _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p0, 7.0376836292E-2f); 00055 _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p1, -1.1514610310E-1f); 00056 _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p2, 1.1676998740E-1f); 00057 _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p3, -1.2420140846E-1f); 00058 _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p4, +1.4249322787E-1f); 00059 _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p5, -1.6668057665E-1f); 00060 _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p6, +2.0000714765E-1f); 00061 _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p7, -2.4999993993E-1f); 00062 _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p8, +3.3333331174E-1f); 00063 _EIGEN_DECLARE_CONST_Packet16f(cephes_log_q1, -2.12194440e-4f); 00064 _EIGEN_DECLARE_CONST_Packet16f(cephes_log_q2, 0.693359375f); 00065 00066 // invalid_mask is set to true when x is NaN 00067 __mmask16 invalid_mask = 00068 _mm512_cmp_ps_mask(x, _mm512_setzero_ps(), _CMP_NGE_UQ); 00069 __mmask16 iszero_mask = 00070 _mm512_cmp_ps_mask(x, _mm512_setzero_ps(), _CMP_EQ_UQ); 00071 00072 // Truncate input values to the minimum positive normal. 00073 x = pmax(x, p16f_min_norm_pos); 00074 00075 // Extract the shifted exponents. 00076 Packet16f emm0 = _mm512_cvtepi32_ps(_mm512_srli_epi32((__m512i)x, 23)); 00077 Packet16f e = _mm512_sub_ps(emm0, p16f_126f); 00078 00079 // Set the exponents to -1, i.e. x are in the range [0.5,1). 00080 x = _mm512_and_ps(x, p16f_inv_mant_mask); 00081 x = _mm512_or_ps(x, p16f_half); 00082 00083 // part2: Shift the inputs from the range [0.5,1) to [sqrt(1/2),sqrt(2)) 00084 // and shift by -1. The values are then centered around 0, which improves 00085 // the stability of the polynomial evaluation. 00086 // if( x < SQRTHF ) { 00087 // e -= 1; 00088 // x = x + x - 1.0; 00089 // } else { x = x - 1.0; } 00090 __mmask16 mask = _mm512_cmp_ps_mask(x, p16f_cephes_SQRTHF, _CMP_LT_OQ); 00091 Packet16f tmp = _mm512_mask_blend_ps(mask, x, _mm512_setzero_ps()); 00092 x = psub(x, p16f_1); 00093 e = psub(e, _mm512_mask_blend_ps(mask, p16f_1, _mm512_setzero_ps())); 00094 x = padd(x, tmp); 00095 00096 Packet16f x2 = pmul(x, x); 00097 Packet16f x3 = pmul(x2, x); 00098 00099 // Evaluate the polynomial approximant of degree 8 in three parts, probably 00100 // to improve instruction-level parallelism. 00101 Packet16f y, y1, y2; 00102 y = pmadd(p16f_cephes_log_p0, x, p16f_cephes_log_p1); 00103 y1 = pmadd(p16f_cephes_log_p3, x, p16f_cephes_log_p4); 00104 y2 = pmadd(p16f_cephes_log_p6, x, p16f_cephes_log_p7); 00105 y = pmadd(y, x, p16f_cephes_log_p2); 00106 y1 = pmadd(y1, x, p16f_cephes_log_p5); 00107 y2 = pmadd(y2, x, p16f_cephes_log_p8); 00108 y = pmadd(y, x3, y1); 00109 y = pmadd(y, x3, y2); 00110 y = pmul(y, x3); 00111 00112 // Add the logarithm of the exponent back to the result of the interpolation. 00113 y1 = pmul(e, p16f_cephes_log_q1); 00114 tmp = pmul(x2, p16f_half); 00115 y = padd(y, y1); 00116 x = psub(x, tmp); 00117 y2 = pmul(e, p16f_cephes_log_q2); 00118 x = padd(x, y); 00119 x = padd(x, y2); 00120 00121 // Filter out invalid inputs, i.e. negative arg will be NAN, 0 will be -INF. 00122 return _mm512_mask_blend_ps(iszero_mask, p16f_minus_inf, 00123 _mm512_mask_blend_ps(invalid_mask, p16f_nan, x)); 00124 } 00125 #endif 00126 00127 // Exponential function. Works by writing "x = m*log(2) + r" where 00128 // "m = floor(x/log(2)+1/2)" and "r" is the remainder. The result is then 00129 // "exp(x) = 2^m*exp(r)" where exp(r) is in the range [-1,1). 00130 template <> 00131 EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f 00132 pexp<Packet16f>(const Packet16f& _x) { 00133 _EIGEN_DECLARE_CONST_Packet16f(1, 1.0f); 00134 _EIGEN_DECLARE_CONST_Packet16f(half, 0.5f); 00135 _EIGEN_DECLARE_CONST_Packet16f(127, 127.0f); 00136 00137 _EIGEN_DECLARE_CONST_Packet16f(exp_hi, 88.3762626647950f); 00138 _EIGEN_DECLARE_CONST_Packet16f(exp_lo, -88.3762626647949f); 00139 00140 _EIGEN_DECLARE_CONST_Packet16f(cephes_LOG2EF, 1.44269504088896341f); 00141 00142 _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p0, 1.9875691500E-4f); 00143 _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p1, 1.3981999507E-3f); 00144 _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p2, 8.3334519073E-3f); 00145 _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p3, 4.1665795894E-2f); 00146 _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p4, 1.6666665459E-1f); 00147 _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p5, 5.0000001201E-1f); 00148 00149 // Clamp x. 00150 Packet16f x = pmax(pmin(_x, p16f_exp_hi), p16f_exp_lo); 00151 00152 // Express exp(x) as exp(m*ln(2) + r), start by extracting 00153 // m = floor(x/ln(2) + 0.5). 00154 Packet16f m = _mm512_floor_ps(pmadd(x, p16f_cephes_LOG2EF, p16f_half)); 00155 00156 // Get r = x - m*ln(2). Note that we can do this without losing more than one 00157 // ulp precision due to the FMA instruction. 00158 _EIGEN_DECLARE_CONST_Packet16f(nln2, -0.6931471805599453f); 00159 Packet16f r = _mm512_fmadd_ps(m, p16f_nln2, x); 00160 Packet16f r2 = pmul(r, r); 00161 00162 // TODO(gonnet): Split into odd/even polynomials and try to exploit 00163 // instruction-level parallelism. 00164 Packet16f y = p16f_cephes_exp_p0; 00165 y = pmadd(y, r, p16f_cephes_exp_p1); 00166 y = pmadd(y, r, p16f_cephes_exp_p2); 00167 y = pmadd(y, r, p16f_cephes_exp_p3); 00168 y = pmadd(y, r, p16f_cephes_exp_p4); 00169 y = pmadd(y, r, p16f_cephes_exp_p5); 00170 y = pmadd(y, r2, r); 00171 y = padd(y, p16f_1); 00172 00173 // Build emm0 = 2^m. 00174 Packet16i emm0 = _mm512_cvttps_epi32(padd(m, p16f_127)); 00175 emm0 = _mm512_slli_epi32(emm0, 23); 00176 00177 // Return 2^m * exp(r). 00178 return pmax(pmul(y, _mm512_castsi512_ps(emm0)), _x); 00179 } 00180 00181 /*template <> 00182 EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d 00183 pexp<Packet8d>(const Packet8d& _x) { 00184 Packet8d x = _x; 00185 00186 _EIGEN_DECLARE_CONST_Packet8d(1, 1.0); 00187 _EIGEN_DECLARE_CONST_Packet8d(2, 2.0); 00188 00189 _EIGEN_DECLARE_CONST_Packet8d(exp_hi, 709.437); 00190 _EIGEN_DECLARE_CONST_Packet8d(exp_lo, -709.436139303); 00191 00192 _EIGEN_DECLARE_CONST_Packet8d(cephes_LOG2EF, 1.4426950408889634073599); 00193 00194 _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_p0, 1.26177193074810590878e-4); 00195 _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_p1, 3.02994407707441961300e-2); 00196 _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_p2, 9.99999999999999999910e-1); 00197 00198 _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_q0, 3.00198505138664455042e-6); 00199 _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_q1, 2.52448340349684104192e-3); 00200 _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_q2, 2.27265548208155028766e-1); 00201 _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_q3, 2.00000000000000000009e0); 00202 00203 _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_C1, 0.693145751953125); 00204 _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_C2, 1.42860682030941723212e-6); 00205 00206 // clamp x 00207 x = pmax(pmin(x, p8d_exp_hi), p8d_exp_lo); 00208 00209 // Express exp(x) as exp(g + n*log(2)). 00210 const Packet8d n = 00211 _mm512_mul_round_pd(p8d_cephes_LOG2EF, x, _MM_FROUND_TO_NEAREST_INT); 00212 00213 // Get the remainder modulo log(2), i.e. the "g" described above. Subtract 00214 // n*log(2) out in two steps, i.e. n*C1 + n*C2, C1+C2=log2 to get the last 00215 // digits right. 00216 const Packet8d nC1 = pmul(n, p8d_cephes_exp_C1); 00217 const Packet8d nC2 = pmul(n, p8d_cephes_exp_C2); 00218 x = psub(x, nC1); 00219 x = psub(x, nC2); 00220 00221 const Packet8d x2 = pmul(x, x); 00222 00223 // Evaluate the numerator polynomial of the rational interpolant. 00224 Packet8d px = p8d_cephes_exp_p0; 00225 px = pmadd(px, x2, p8d_cephes_exp_p1); 00226 px = pmadd(px, x2, p8d_cephes_exp_p2); 00227 px = pmul(px, x); 00228 00229 // Evaluate the denominator polynomial of the rational interpolant. 00230 Packet8d qx = p8d_cephes_exp_q0; 00231 qx = pmadd(qx, x2, p8d_cephes_exp_q1); 00232 qx = pmadd(qx, x2, p8d_cephes_exp_q2); 00233 qx = pmadd(qx, x2, p8d_cephes_exp_q3); 00234 00235 // I don't really get this bit, copied from the SSE2 routines, so... 00236 // TODO(gonnet): Figure out what is going on here, perhaps find a better 00237 // rational interpolant? 00238 x = _mm512_div_pd(px, psub(qx, px)); 00239 x = pmadd(p8d_2, x, p8d_1); 00240 00241 // Build e=2^n. 00242 const Packet8d e = _mm512_castsi512_pd(_mm512_slli_epi64( 00243 _mm512_add_epi64(_mm512_cvtpd_epi64(n), _mm512_set1_epi64(1023)), 52)); 00244 00245 // Construct the result 2^n * exp(g) = e * x. The max is used to catch 00246 // non-finite values in the input. 00247 return pmax(pmul(x, e), _x); 00248 }*/ 00249 00250 // Functions for sqrt. 00251 // The EIGEN_FAST_MATH version uses the _mm_rsqrt_ps approximation and one step 00252 // of Newton's method, at a cost of 1-2 bits of precision as opposed to the 00253 // exact solution. The main advantage of this approach is not just speed, but 00254 // also the fact that it can be inlined and pipelined with other computations, 00255 // further reducing its effective latency. 00256 #if EIGEN_FAST_MATH 00257 template <> 00258 EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f 00259 psqrt<Packet16f>(const Packet16f& _x) { 00260 _EIGEN_DECLARE_CONST_Packet16f(one_point_five, 1.5f); 00261 _EIGEN_DECLARE_CONST_Packet16f(minus_half, -0.5f); 00262 _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(flt_min, 0x00800000); 00263 00264 Packet16f neg_half = pmul(_x, p16f_minus_half); 00265 00266 // select only the inverse sqrt of positive normal inputs (denormals are 00267 // flushed to zero and cause infs as well). 00268 __mmask16 non_zero_mask = _mm512_cmp_ps_mask(_x, p16f_flt_min, _CMP_GE_OQ); 00269 Packet16f x = _mm512_mask_blend_ps(non_zero_mask, _mm512_rsqrt14_ps(_x), 00270 _mm512_setzero_ps()); 00271 00272 // Do a single step of Newton's iteration. 00273 x = pmul(x, pmadd(neg_half, pmul(x, x), p16f_one_point_five)); 00274 00275 // Multiply the original _x by it's reciprocal square root to extract the 00276 // square root. 00277 return pmul(_x, x); 00278 } 00279 00280 template <> 00281 EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d 00282 psqrt<Packet8d>(const Packet8d& _x) { 00283 _EIGEN_DECLARE_CONST_Packet8d(one_point_five, 1.5); 00284 _EIGEN_DECLARE_CONST_Packet8d(minus_half, -0.5); 00285 _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(dbl_min, 0x0010000000000000LL); 00286 00287 Packet8d neg_half = pmul(_x, p8d_minus_half); 00288 00289 // select only the inverse sqrt of positive normal inputs (denormals are 00290 // flushed to zero and cause infs as well). 00291 __mmask8 non_zero_mask = _mm512_cmp_pd_mask(_x, p8d_dbl_min, _CMP_GE_OQ); 00292 Packet8d x = _mm512_mask_blend_pd(non_zero_mask, _mm512_rsqrt14_pd(_x), 00293 _mm512_setzero_pd()); 00294 00295 // Do a first step of Newton's iteration. 00296 x = pmul(x, pmadd(neg_half, pmul(x, x), p8d_one_point_five)); 00297 00298 // Do a second step of Newton's iteration. 00299 x = pmul(x, pmadd(neg_half, pmul(x, x), p8d_one_point_five)); 00300 00301 // Multiply the original _x by it's reciprocal square root to extract the 00302 // square root. 00303 return pmul(_x, x); 00304 } 00305 #else 00306 template <> 00307 EIGEN_STRONG_INLINE Packet16f psqrt<Packet16f>(const Packet16f& x) { 00308 return _mm512_sqrt_ps(x); 00309 } 00310 template <> 00311 EIGEN_STRONG_INLINE Packet8d psqrt<Packet8d>(const Packet8d& x) { 00312 return _mm512_sqrt_pd(x); 00313 } 00314 #endif 00315 00316 // Functions for rsqrt. 00317 // Almost identical to the sqrt routine, just leave out the last multiplication 00318 // and fill in NaN/Inf where needed. Note that this function only exists as an 00319 // iterative version for doubles since there is no instruction for diretly 00320 // computing the reciprocal square root in AVX-512. 00321 #ifdef EIGEN_FAST_MATH 00322 template <> 00323 EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f 00324 prsqrt<Packet16f>(const Packet16f& _x) { 00325 _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(inf, 0x7f800000); 00326 _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(nan, 0x7fc00000); 00327 _EIGEN_DECLARE_CONST_Packet16f(one_point_five, 1.5f); 00328 _EIGEN_DECLARE_CONST_Packet16f(minus_half, -0.5f); 00329 _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(flt_min, 0x00800000); 00330 00331 Packet16f neg_half = pmul(_x, p16f_minus_half); 00332 00333 // select only the inverse sqrt of positive normal inputs (denormals are 00334 // flushed to zero and cause infs as well). 00335 __mmask16 le_zero_mask = _mm512_cmp_ps_mask(_x, p16f_flt_min, _CMP_LT_OQ); 00336 Packet16f x = _mm512_mask_blend_ps(le_zero_mask, _mm512_setzero_ps(), 00337 _mm512_rsqrt14_ps(_x)); 00338 00339 // Fill in NaNs and Infs for the negative/zero entries. 00340 __mmask16 neg_mask = _mm512_cmp_ps_mask(_x, _mm512_setzero_ps(), _CMP_LT_OQ); 00341 Packet16f infs_and_nans = _mm512_mask_blend_ps( 00342 neg_mask, p16f_nan, 00343 _mm512_mask_blend_ps(le_zero_mask, p16f_inf, _mm512_setzero_ps())); 00344 00345 // Do a single step of Newton's iteration. 00346 x = pmul(x, pmadd(neg_half, pmul(x, x), p16f_one_point_five)); 00347 00348 // Insert NaNs and Infs in all the right places. 00349 return _mm512_mask_blend_ps(le_zero_mask, infs_and_nans, x); 00350 } 00351 00352 template <> 00353 EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d 00354 prsqrt<Packet8d>(const Packet8d& _x) { 00355 _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(inf, 0x7ff0000000000000LL); 00356 _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(nan, 0x7ff1000000000000LL); 00357 _EIGEN_DECLARE_CONST_Packet8d(one_point_five, 1.5); 00358 _EIGEN_DECLARE_CONST_Packet8d(minus_half, -0.5); 00359 _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(dbl_min, 0x0010000000000000LL); 00360 00361 Packet8d neg_half = pmul(_x, p8d_minus_half); 00362 00363 // select only the inverse sqrt of positive normal inputs (denormals are 00364 // flushed to zero and cause infs as well). 00365 __mmask8 le_zero_mask = _mm512_cmp_pd_mask(_x, p8d_dbl_min, _CMP_LT_OQ); 00366 Packet8d x = _mm512_mask_blend_pd(le_zero_mask, _mm512_setzero_pd(), 00367 _mm512_rsqrt14_pd(_x)); 00368 00369 // Fill in NaNs and Infs for the negative/zero entries. 00370 __mmask8 neg_mask = _mm512_cmp_pd_mask(_x, _mm512_setzero_pd(), _CMP_LT_OQ); 00371 Packet8d infs_and_nans = _mm512_mask_blend_pd( 00372 neg_mask, p8d_nan, 00373 _mm512_mask_blend_pd(le_zero_mask, p8d_inf, _mm512_setzero_pd())); 00374 00375 // Do a first step of Newton's iteration. 00376 x = pmul(x, pmadd(neg_half, pmul(x, x), p8d_one_point_five)); 00377 00378 // Do a second step of Newton's iteration. 00379 x = pmul(x, pmadd(neg_half, pmul(x, x), p8d_one_point_five)); 00380 00381 // Insert NaNs and Infs in all the right places. 00382 return _mm512_mask_blend_pd(le_zero_mask, infs_and_nans, x); 00383 } 00384 #else 00385 template <> 00386 EIGEN_STRONG_INLINE Packet16f prsqrt<Packet16f>(const Packet16f& x) { 00387 return _mm512_rsqrt28_ps(x); 00388 } 00389 #endif 00390 #endif 00391 00392 } // end namespace internal 00393 00394 } // end namespace Eigen 00395 00396 #endif // THIRD_PARTY_EIGEN3_EIGEN_SRC_CORE_ARCH_AVX512_MATHFUNCTIONS_H_