![]() |
Eigen
3.3.3
|
00001 // This file is part of Eigen, a lightweight C++ template library 00002 // for linear algebra. 00003 // 00004 // Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr> 00005 // 00006 // This Source Code Form is subject to the terms of the Mozilla 00007 // Public License v. 2.0. If a copy of the MPL was not distributed 00008 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. 00009 00010 #ifndef EIGEN_SPARSELU_GEMM_KERNEL_H 00011 #define EIGEN_SPARSELU_GEMM_KERNEL_H 00012 00013 namespace Eigen { 00014 00015 namespace internal { 00016 00017 00024 template<typename Scalar> 00025 EIGEN_DONT_INLINE 00026 void sparselu_gemm(Index m, Index n, Index d, const Scalar* A, Index lda, const Scalar* B, Index ldb, Scalar* C, Index ldc) 00027 { 00028 using namespace Eigen::internal; 00029 00030 typedef typename packet_traits<Scalar>::type Packet; 00031 enum { 00032 NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS, 00033 PacketSize = packet_traits<Scalar>::size, 00034 PM = 8, // peeling in M 00035 RN = 2, // register blocking 00036 RK = NumberOfRegisters>=16 ? 4 : 2, // register blocking 00037 BM = 4096/sizeof(Scalar), // number of rows of A-C per chunk 00038 SM = PM*PacketSize // step along M 00039 }; 00040 Index d_end = (d/RK)*RK; // number of columns of A (rows of B) suitable for full register blocking 00041 Index n_end = (n/RN)*RN; // number of columns of B-C suitable for processing RN columns at once 00042 Index i0 = internal::first_default_aligned(A,m); 00043 00044 eigen_internal_assert(((lda%PacketSize)==0) && ((ldc%PacketSize)==0) && (i0==internal::first_default_aligned(C,m))); 00045 00046 // handle the non aligned rows of A and C without any optimization: 00047 for(Index i=0; i<i0; ++i) 00048 { 00049 for(Index j=0; j<n; ++j) 00050 { 00051 Scalar c = C[i+j*ldc]; 00052 for(Index k=0; k<d; ++k) 00053 c += B[k+j*ldb] * A[i+k*lda]; 00054 C[i+j*ldc] = c; 00055 } 00056 } 00057 // process the remaining rows per chunk of BM rows 00058 for(Index ib=i0; ib<m; ib+=BM) 00059 { 00060 Index actual_b = std::min<Index>(BM, m-ib); // actual number of rows 00061 Index actual_b_end1 = (actual_b/SM)*SM; // actual number of rows suitable for peeling 00062 Index actual_b_end2 = (actual_b/PacketSize)*PacketSize; // actual number of rows suitable for vectorization 00063 00064 // Let's process two columns of B-C at once 00065 for(Index j=0; j<n_end; j+=RN) 00066 { 00067 const Scalar* Bc0 = B+(j+0)*ldb; 00068 const Scalar* Bc1 = B+(j+1)*ldb; 00069 00070 for(Index k=0; k<d_end; k+=RK) 00071 { 00072 00073 // load and expand a RN x RK block of B 00074 Packet b00, b10, b20, b30, b01, b11, b21, b31; 00075 { b00 = pset1<Packet>(Bc0[0]); } 00076 { b10 = pset1<Packet>(Bc0[1]); } 00077 if(RK==4) { b20 = pset1<Packet>(Bc0[2]); } 00078 if(RK==4) { b30 = pset1<Packet>(Bc0[3]); } 00079 { b01 = pset1<Packet>(Bc1[0]); } 00080 { b11 = pset1<Packet>(Bc1[1]); } 00081 if(RK==4) { b21 = pset1<Packet>(Bc1[2]); } 00082 if(RK==4) { b31 = pset1<Packet>(Bc1[3]); } 00083 00084 Packet a0, a1, a2, a3, c0, c1, t0, t1; 00085 00086 const Scalar* A0 = A+ib+(k+0)*lda; 00087 const Scalar* A1 = A+ib+(k+1)*lda; 00088 const Scalar* A2 = A+ib+(k+2)*lda; 00089 const Scalar* A3 = A+ib+(k+3)*lda; 00090 00091 Scalar* C0 = C+ib+(j+0)*ldc; 00092 Scalar* C1 = C+ib+(j+1)*ldc; 00093 00094 a0 = pload<Packet>(A0); 00095 a1 = pload<Packet>(A1); 00096 if(RK==4) 00097 { 00098 a2 = pload<Packet>(A2); 00099 a3 = pload<Packet>(A3); 00100 } 00101 else 00102 { 00103 // workaround "may be used uninitialized in this function" warning 00104 a2 = a3 = a0; 00105 } 00106 00107 #define KMADD(c, a, b, tmp) {tmp = b; tmp = pmul(a,tmp); c = padd(c,tmp);} 00108 #define WORK(I) \ 00109 c0 = pload<Packet>(C0+i+(I)*PacketSize); \ 00110 c1 = pload<Packet>(C1+i+(I)*PacketSize); \ 00111 KMADD(c0, a0, b00, t0) \ 00112 KMADD(c1, a0, b01, t1) \ 00113 a0 = pload<Packet>(A0+i+(I+1)*PacketSize); \ 00114 KMADD(c0, a1, b10, t0) \ 00115 KMADD(c1, a1, b11, t1) \ 00116 a1 = pload<Packet>(A1+i+(I+1)*PacketSize); \ 00117 if(RK==4){ KMADD(c0, a2, b20, t0) }\ 00118 if(RK==4){ KMADD(c1, a2, b21, t1) }\ 00119 if(RK==4){ a2 = pload<Packet>(A2+i+(I+1)*PacketSize); }\ 00120 if(RK==4){ KMADD(c0, a3, b30, t0) }\ 00121 if(RK==4){ KMADD(c1, a3, b31, t1) }\ 00122 if(RK==4){ a3 = pload<Packet>(A3+i+(I+1)*PacketSize); }\ 00123 pstore(C0+i+(I)*PacketSize, c0); \ 00124 pstore(C1+i+(I)*PacketSize, c1) 00125 00126 // process rows of A' - C' with aggressive vectorization and peeling 00127 for(Index i=0; i<actual_b_end1; i+=PacketSize*8) 00128 { 00129 EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL1"); 00130 prefetch((A0+i+(5)*PacketSize)); 00131 prefetch((A1+i+(5)*PacketSize)); 00132 if(RK==4) prefetch((A2+i+(5)*PacketSize)); 00133 if(RK==4) prefetch((A3+i+(5)*PacketSize)); 00134 00135 WORK(0); 00136 WORK(1); 00137 WORK(2); 00138 WORK(3); 00139 WORK(4); 00140 WORK(5); 00141 WORK(6); 00142 WORK(7); 00143 } 00144 // process the remaining rows with vectorization only 00145 for(Index i=actual_b_end1; i<actual_b_end2; i+=PacketSize) 00146 { 00147 WORK(0); 00148 } 00149 #undef WORK 00150 // process the remaining rows without vectorization 00151 for(Index i=actual_b_end2; i<actual_b; ++i) 00152 { 00153 if(RK==4) 00154 { 00155 C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1]+A2[i]*Bc0[2]+A3[i]*Bc0[3]; 00156 C1[i] += A0[i]*Bc1[0]+A1[i]*Bc1[1]+A2[i]*Bc1[2]+A3[i]*Bc1[3]; 00157 } 00158 else 00159 { 00160 C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1]; 00161 C1[i] += A0[i]*Bc1[0]+A1[i]*Bc1[1]; 00162 } 00163 } 00164 00165 Bc0 += RK; 00166 Bc1 += RK; 00167 } // peeled loop on k 00168 } // peeled loop on the columns j 00169 // process the last column (we now perform a matrix-vector product) 00170 if((n-n_end)>0) 00171 { 00172 const Scalar* Bc0 = B+(n-1)*ldb; 00173 00174 for(Index k=0; k<d_end; k+=RK) 00175 { 00176 00177 // load and expand a 1 x RK block of B 00178 Packet b00, b10, b20, b30; 00179 b00 = pset1<Packet>(Bc0[0]); 00180 b10 = pset1<Packet>(Bc0[1]); 00181 if(RK==4) b20 = pset1<Packet>(Bc0[2]); 00182 if(RK==4) b30 = pset1<Packet>(Bc0[3]); 00183 00184 Packet a0, a1, a2, a3, c0, t0/*, t1*/; 00185 00186 const Scalar* A0 = A+ib+(k+0)*lda; 00187 const Scalar* A1 = A+ib+(k+1)*lda; 00188 const Scalar* A2 = A+ib+(k+2)*lda; 00189 const Scalar* A3 = A+ib+(k+3)*lda; 00190 00191 Scalar* C0 = C+ib+(n_end)*ldc; 00192 00193 a0 = pload<Packet>(A0); 00194 a1 = pload<Packet>(A1); 00195 if(RK==4) 00196 { 00197 a2 = pload<Packet>(A2); 00198 a3 = pload<Packet>(A3); 00199 } 00200 else 00201 { 00202 // workaround "may be used uninitialized in this function" warning 00203 a2 = a3 = a0; 00204 } 00205 00206 #define WORK(I) \ 00207 c0 = pload<Packet>(C0+i+(I)*PacketSize); \ 00208 KMADD(c0, a0, b00, t0) \ 00209 a0 = pload<Packet>(A0+i+(I+1)*PacketSize); \ 00210 KMADD(c0, a1, b10, t0) \ 00211 a1 = pload<Packet>(A1+i+(I+1)*PacketSize); \ 00212 if(RK==4){ KMADD(c0, a2, b20, t0) }\ 00213 if(RK==4){ a2 = pload<Packet>(A2+i+(I+1)*PacketSize); }\ 00214 if(RK==4){ KMADD(c0, a3, b30, t0) }\ 00215 if(RK==4){ a3 = pload<Packet>(A3+i+(I+1)*PacketSize); }\ 00216 pstore(C0+i+(I)*PacketSize, c0); 00217 00218 // agressive vectorization and peeling 00219 for(Index i=0; i<actual_b_end1; i+=PacketSize*8) 00220 { 00221 EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL2"); 00222 WORK(0); 00223 WORK(1); 00224 WORK(2); 00225 WORK(3); 00226 WORK(4); 00227 WORK(5); 00228 WORK(6); 00229 WORK(7); 00230 } 00231 // vectorization only 00232 for(Index i=actual_b_end1; i<actual_b_end2; i+=PacketSize) 00233 { 00234 WORK(0); 00235 } 00236 // remaining scalars 00237 for(Index i=actual_b_end2; i<actual_b; ++i) 00238 { 00239 if(RK==4) 00240 C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1]+A2[i]*Bc0[2]+A3[i]*Bc0[3]; 00241 else 00242 C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1]; 00243 } 00244 00245 Bc0 += RK; 00246 #undef WORK 00247 } 00248 } 00249 00250 // process the last columns of A, corresponding to the last rows of B 00251 Index rd = d-d_end; 00252 if(rd>0) 00253 { 00254 for(Index j=0; j<n; ++j) 00255 { 00256 enum { 00257 Alignment = PacketSize>1 ? Aligned : 0 00258 }; 00259 typedef Map<Matrix<Scalar,Dynamic,1>, Alignment > MapVector; 00260 typedef Map<const Matrix<Scalar,Dynamic,1>, Alignment > ConstMapVector; 00261 if(rd==1) MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b); 00262 00263 else if(rd==2) MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b) 00264 + B[1+d_end+j*ldb] * ConstMapVector(A+(d_end+1)*lda+ib, actual_b); 00265 00266 else MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b) 00267 + B[1+d_end+j*ldb] * ConstMapVector(A+(d_end+1)*lda+ib, actual_b) 00268 + B[2+d_end+j*ldb] * ConstMapVector(A+(d_end+2)*lda+ib, actual_b); 00269 } 00270 } 00271 00272 } // blocking on the rows of A and C 00273 } 00274 #undef KMADD 00275 00276 } // namespace internal 00277 00278 } // namespace Eigen 00279 00280 #endif // EIGEN_SPARSELU_GEMM_KERNEL_H