![]() |
Eigen
3.3.3
|
00001 // This file is part of Eigen, a lightweight C++ template library 00002 // for linear algebra. 00003 // 00004 // Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr> 00005 // Copyright (C) 2012-2014 Gael Guennebaud <gael.guennebaud@inria.fr> 00006 // 00007 // This Source Code Form is subject to the terms of the Mozilla 00008 // Public License v. 2.0. If a copy of the MPL was not distributed 00009 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. 00010 00011 00012 #ifndef EIGEN_SPARSE_LU_H 00013 #define EIGEN_SPARSE_LU_H 00014 00015 namespace Eigen { 00016 00017 template <typename _MatrixType, typename _OrderingType = COLAMDOrdering<typename _MatrixType::StorageIndex> > class SparseLU; 00018 template <typename MappedSparseMatrixType> struct SparseLUMatrixLReturnType; 00019 template <typename MatrixLType, typename MatrixUType> struct SparseLUMatrixUReturnType; 00020 00073 template <typename _MatrixType, typename _OrderingType> 00074 class SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >, public internal::SparseLUImpl<typename _MatrixType::Scalar, typename _MatrixType::StorageIndex> 00075 { 00076 protected: 00077 typedef SparseSolverBase<SparseLU<_MatrixType,_OrderingType> > APIBase; 00078 using APIBase::m_isInitialized; 00079 public: 00080 using APIBase::_solve_impl; 00081 00082 typedef _MatrixType MatrixType; 00083 typedef _OrderingType OrderingType; 00084 typedef typename MatrixType::Scalar Scalar; 00085 typedef typename MatrixType::RealScalar RealScalar; 00086 typedef typename MatrixType::StorageIndex StorageIndex; 00087 typedef SparseMatrix<Scalar,ColMajor,StorageIndex> NCMatrix; 00088 typedef internal::MappedSuperNodalMatrix<Scalar, StorageIndex> SCMatrix; 00089 typedef Matrix<Scalar,Dynamic,1> ScalarVector; 00090 typedef Matrix<StorageIndex,Dynamic,1> IndexVector; 00091 typedef PermutationMatrix<Dynamic, Dynamic, StorageIndex> PermutationType; 00092 typedef internal::SparseLUImpl<Scalar, StorageIndex> Base; 00093 00094 enum { 00095 ColsAtCompileTime = MatrixType::ColsAtCompileTime, 00096 MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime 00097 }; 00098 00099 public: 00100 SparseLU():m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1) 00101 { 00102 initperfvalues(); 00103 } 00104 explicit SparseLU(const MatrixType& matrix) 00105 : m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1) 00106 { 00107 initperfvalues(); 00108 compute(matrix); 00109 } 00110 00111 ~SparseLU() 00112 { 00113 // Free all explicit dynamic pointers 00114 } 00115 00116 void analyzePattern (const MatrixType& matrix); 00117 void factorize (const MatrixType& matrix); 00118 void simplicialfactorize(const MatrixType& matrix); 00119 00124 void compute (const MatrixType& matrix) 00125 { 00126 // Analyze 00127 analyzePattern(matrix); 00128 //Factorize 00129 factorize(matrix); 00130 } 00131 00132 inline Index rows() const { return m_mat.rows(); } 00133 inline Index cols() const { return m_mat.cols(); } 00135 void isSymmetric(bool sym) 00136 { 00137 m_symmetricmode = sym; 00138 } 00139 00146 SparseLUMatrixLReturnType<SCMatrix> matrixL() const 00147 { 00148 return SparseLUMatrixLReturnType<SCMatrix>(m_Lstore); 00149 } 00156 SparseLUMatrixUReturnType<SCMatrix,MappedSparseMatrix<Scalar,ColMajor,StorageIndex> > matrixU() const 00157 { 00158 return SparseLUMatrixUReturnType<SCMatrix, MappedSparseMatrix<Scalar,ColMajor,StorageIndex> >(m_Lstore, m_Ustore); 00159 } 00160 00165 inline const PermutationType& rowsPermutation() const 00166 { 00167 return m_perm_r; 00168 } 00173 inline const PermutationType& colsPermutation() const 00174 { 00175 return m_perm_c; 00176 } 00178 void setPivotThreshold(const RealScalar& thresh) 00179 { 00180 m_diagpivotthresh = thresh; 00181 } 00182 00183 #ifdef EIGEN_PARSED_BY_DOXYGEN 00184 00190 template<typename Rhs> 00191 inline const Solve<SparseLU, Rhs> solve(const MatrixBase<Rhs>& B) const; 00192 #endif // EIGEN_PARSED_BY_DOXYGEN 00193 00202 ComputationInfo info() const 00203 { 00204 eigen_assert(m_isInitialized && "Decomposition is not initialized."); 00205 return m_info; 00206 } 00207 00211 std::string lastErrorMessage() const 00212 { 00213 return m_lastError; 00214 } 00215 00216 template<typename Rhs, typename Dest> 00217 bool _solve_impl(const MatrixBase<Rhs> &B, MatrixBase<Dest> &X_base) const 00218 { 00219 Dest& X(X_base.derived()); 00220 eigen_assert(m_factorizationIsOk && "The matrix should be factorized first"); 00221 EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0, 00222 THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); 00223 00224 // Permute the right hand side to form X = Pr*B 00225 // on return, X is overwritten by the computed solution 00226 X.resize(B.rows(),B.cols()); 00227 00228 // this ugly const_cast_derived() helps to detect aliasing when applying the permutations 00229 for(Index j = 0; j < B.cols(); ++j) 00230 X.col(j) = rowsPermutation() * B.const_cast_derived().col(j); 00231 00232 //Forward substitution with L 00233 this->matrixL().solveInPlace(X); 00234 this->matrixU().solveInPlace(X); 00235 00236 // Permute back the solution 00237 for (Index j = 0; j < B.cols(); ++j) 00238 X.col(j) = colsPermutation().inverse() * X.col(j); 00239 00240 return true; 00241 } 00242 00253 Scalar absDeterminant() 00254 { 00255 using std::abs; 00256 eigen_assert(m_factorizationIsOk && "The matrix should be factorized first."); 00257 // Initialize with the determinant of the row matrix 00258 Scalar det = Scalar(1.); 00259 // Note that the diagonal blocks of U are stored in supernodes, 00260 // which are available in the L part :) 00261 for (Index j = 0; j < this->cols(); ++j) 00262 { 00263 for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it) 00264 { 00265 if(it.index() == j) 00266 { 00267 det *= abs(it.value()); 00268 break; 00269 } 00270 } 00271 } 00272 return det; 00273 } 00274 00283 Scalar logAbsDeterminant() const 00284 { 00285 using std::log; 00286 using std::abs; 00287 00288 eigen_assert(m_factorizationIsOk && "The matrix should be factorized first."); 00289 Scalar det = Scalar(0.); 00290 for (Index j = 0; j < this->cols(); ++j) 00291 { 00292 for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it) 00293 { 00294 if(it.row() < j) continue; 00295 if(it.row() == j) 00296 { 00297 det += log(abs(it.value())); 00298 break; 00299 } 00300 } 00301 } 00302 return det; 00303 } 00304 00309 Scalar signDeterminant() 00310 { 00311 eigen_assert(m_factorizationIsOk && "The matrix should be factorized first."); 00312 // Initialize with the determinant of the row matrix 00313 Index det = 1; 00314 // Note that the diagonal blocks of U are stored in supernodes, 00315 // which are available in the L part :) 00316 for (Index j = 0; j < this->cols(); ++j) 00317 { 00318 for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it) 00319 { 00320 if(it.index() == j) 00321 { 00322 if(it.value()<0) 00323 det = -det; 00324 else if(it.value()==0) 00325 return 0; 00326 break; 00327 } 00328 } 00329 } 00330 return det * m_detPermR * m_detPermC; 00331 } 00332 00337 Scalar determinant() 00338 { 00339 eigen_assert(m_factorizationIsOk && "The matrix should be factorized first."); 00340 // Initialize with the determinant of the row matrix 00341 Scalar det = Scalar(1.); 00342 // Note that the diagonal blocks of U are stored in supernodes, 00343 // which are available in the L part :) 00344 for (Index j = 0; j < this->cols(); ++j) 00345 { 00346 for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it) 00347 { 00348 if(it.index() == j) 00349 { 00350 det *= it.value(); 00351 break; 00352 } 00353 } 00354 } 00355 return (m_detPermR * m_detPermC) > 0 ? det : -det; 00356 } 00357 00358 protected: 00359 // Functions 00360 void initperfvalues() 00361 { 00362 m_perfv.panel_size = 16; 00363 m_perfv.relax = 1; 00364 m_perfv.maxsuper = 128; 00365 m_perfv.rowblk = 16; 00366 m_perfv.colblk = 8; 00367 m_perfv.fillfactor = 20; 00368 } 00369 00370 // Variables 00371 mutable ComputationInfo m_info; 00372 bool m_factorizationIsOk; 00373 bool m_analysisIsOk; 00374 std::string m_lastError; 00375 NCMatrix m_mat; // The input (permuted ) matrix 00376 SCMatrix m_Lstore; // The lower triangular matrix (supernodal) 00377 MappedSparseMatrix<Scalar,ColMajor,StorageIndex> m_Ustore; // The upper triangular matrix 00378 PermutationType m_perm_c; // Column permutation 00379 PermutationType m_perm_r ; // Row permutation 00380 IndexVector m_etree; // Column elimination tree 00381 00382 typename Base::GlobalLU_t m_glu; 00383 00384 // SparseLU options 00385 bool m_symmetricmode; 00386 // values for performance 00387 internal::perfvalues m_perfv; 00388 RealScalar m_diagpivotthresh; // Specifies the threshold used for a diagonal entry to be an acceptable pivot 00389 Index m_nnzL, m_nnzU; // Nonzeros in L and U factors 00390 Index m_detPermR, m_detPermC; // Determinants of the permutation matrices 00391 private: 00392 // Disable copy constructor 00393 SparseLU (const SparseLU& ); 00394 00395 }; // End class SparseLU 00396 00397 00398 00399 // Functions needed by the anaysis phase 00410 template <typename MatrixType, typename OrderingType> 00411 void SparseLU<MatrixType, OrderingType>::analyzePattern(const MatrixType& mat) 00412 { 00413 00414 //TODO It is possible as in SuperLU to compute row and columns scaling vectors to equilibrate the matrix mat. 00415 00416 // Firstly, copy the whole input matrix. 00417 m_mat = mat; 00418 00419 // Compute fill-in ordering 00420 OrderingType ord; 00421 ord(m_mat,m_perm_c); 00422 00423 // Apply the permutation to the column of the input matrix 00424 if (m_perm_c.size()) 00425 { 00426 m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers. FIXME : This vector is filled but not subsequently used. 00427 // Then, permute only the column pointers 00428 ei_declare_aligned_stack_constructed_variable(StorageIndex,outerIndexPtr,mat.cols()+1,mat.isCompressed()?const_cast<StorageIndex*>(mat.outerIndexPtr()):0); 00429 00430 // If the input matrix 'mat' is uncompressed, then the outer-indices do not match the ones of m_mat, and a copy is thus needed. 00431 if(!mat.isCompressed()) 00432 IndexVector::Map(outerIndexPtr, mat.cols()+1) = IndexVector::Map(m_mat.outerIndexPtr(),mat.cols()+1); 00433 00434 // Apply the permutation and compute the nnz per column. 00435 for (Index i = 0; i < mat.cols(); i++) 00436 { 00437 m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i]; 00438 m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i]; 00439 } 00440 } 00441 00442 // Compute the column elimination tree of the permuted matrix 00443 IndexVector firstRowElt; 00444 internal::coletree(m_mat, m_etree,firstRowElt); 00445 00446 // In symmetric mode, do not do postorder here 00447 if (!m_symmetricmode) { 00448 IndexVector post, iwork; 00449 // Post order etree 00450 internal::treePostorder(StorageIndex(m_mat.cols()), m_etree, post); 00451 00452 00453 // Renumber etree in postorder 00454 Index m = m_mat.cols(); 00455 iwork.resize(m+1); 00456 for (Index i = 0; i < m; ++i) iwork(post(i)) = post(m_etree(i)); 00457 m_etree = iwork; 00458 00459 // Postmultiply A*Pc by post, i.e reorder the matrix according to the postorder of the etree 00460 PermutationType post_perm(m); 00461 for (Index i = 0; i < m; i++) 00462 post_perm.indices()(i) = post(i); 00463 00464 // Combine the two permutations : postorder the permutation for future use 00465 if(m_perm_c.size()) { 00466 m_perm_c = post_perm * m_perm_c; 00467 } 00468 00469 } // end postordering 00470 00471 m_analysisIsOk = true; 00472 } 00473 00474 // Functions needed by the numerical factorization phase 00475 00476 00495 template <typename MatrixType, typename OrderingType> 00496 void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix) 00497 { 00498 using internal::emptyIdxLU; 00499 eigen_assert(m_analysisIsOk && "analyzePattern() should be called first"); 00500 eigen_assert((matrix.rows() == matrix.cols()) && "Only for squared matrices"); 00501 00502 typedef typename IndexVector::Scalar StorageIndex; 00503 00504 m_isInitialized = true; 00505 00506 00507 // Apply the column permutation computed in analyzepattern() 00508 // m_mat = matrix * m_perm_c.inverse(); 00509 m_mat = matrix; 00510 if (m_perm_c.size()) 00511 { 00512 m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers. 00513 //Then, permute only the column pointers 00514 const StorageIndex * outerIndexPtr; 00515 if (matrix.isCompressed()) outerIndexPtr = matrix.outerIndexPtr(); 00516 else 00517 { 00518 StorageIndex* outerIndexPtr_t = new StorageIndex[matrix.cols()+1]; 00519 for(Index i = 0; i <= matrix.cols(); i++) outerIndexPtr_t[i] = m_mat.outerIndexPtr()[i]; 00520 outerIndexPtr = outerIndexPtr_t; 00521 } 00522 for (Index i = 0; i < matrix.cols(); i++) 00523 { 00524 m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i]; 00525 m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i]; 00526 } 00527 if(!matrix.isCompressed()) delete[] outerIndexPtr; 00528 } 00529 else 00530 { //FIXME This should not be needed if the empty permutation is handled transparently 00531 m_perm_c.resize(matrix.cols()); 00532 for(StorageIndex i = 0; i < matrix.cols(); ++i) m_perm_c.indices()(i) = i; 00533 } 00534 00535 Index m = m_mat.rows(); 00536 Index n = m_mat.cols(); 00537 Index nnz = m_mat.nonZeros(); 00538 Index maxpanel = m_perfv.panel_size * m; 00539 // Allocate working storage common to the factor routines 00540 Index lwork = 0; 00541 Index info = Base::memInit(m, n, nnz, lwork, m_perfv.fillfactor, m_perfv.panel_size, m_glu); 00542 if (info) 00543 { 00544 m_lastError = "UNABLE TO ALLOCATE WORKING MEMORY\n\n" ; 00545 m_factorizationIsOk = false; 00546 return ; 00547 } 00548 00549 // Set up pointers for integer working arrays 00550 IndexVector segrep(m); segrep.setZero(); 00551 IndexVector parent(m); parent.setZero(); 00552 IndexVector xplore(m); xplore.setZero(); 00553 IndexVector repfnz(maxpanel); 00554 IndexVector panel_lsub(maxpanel); 00555 IndexVector xprune(n); xprune.setZero(); 00556 IndexVector marker(m*internal::LUNoMarker); marker.setZero(); 00557 00558 repfnz.setConstant(-1); 00559 panel_lsub.setConstant(-1); 00560 00561 // Set up pointers for scalar working arrays 00562 ScalarVector dense; 00563 dense.setZero(maxpanel); 00564 ScalarVector tempv; 00565 tempv.setZero(internal::LUnumTempV(m, m_perfv.panel_size, m_perfv.maxsuper, /*m_perfv.rowblk*/m) ); 00566 00567 // Compute the inverse of perm_c 00568 PermutationType iperm_c(m_perm_c.inverse()); 00569 00570 // Identify initial relaxed snodes 00571 IndexVector relax_end(n); 00572 if ( m_symmetricmode == true ) 00573 Base::heap_relax_snode(n, m_etree, m_perfv.relax, marker, relax_end); 00574 else 00575 Base::relax_snode(n, m_etree, m_perfv.relax, marker, relax_end); 00576 00577 00578 m_perm_r.resize(m); 00579 m_perm_r.indices().setConstant(-1); 00580 marker.setConstant(-1); 00581 m_detPermR = 1; // Record the determinant of the row permutation 00582 00583 m_glu.supno(0) = emptyIdxLU; m_glu.xsup.setConstant(0); 00584 m_glu.xsup(0) = m_glu.xlsub(0) = m_glu.xusub(0) = m_glu.xlusup(0) = Index(0); 00585 00586 // Work on one 'panel' at a time. A panel is one of the following : 00587 // (a) a relaxed supernode at the bottom of the etree, or 00588 // (b) panel_size contiguous columns, <panel_size> defined by the user 00589 Index jcol; 00590 IndexVector panel_histo(n); 00591 Index pivrow; // Pivotal row number in the original row matrix 00592 Index nseg1; // Number of segments in U-column above panel row jcol 00593 Index nseg; // Number of segments in each U-column 00594 Index irep; 00595 Index i, k, jj; 00596 for (jcol = 0; jcol < n; ) 00597 { 00598 // Adjust panel size so that a panel won't overlap with the next relaxed snode. 00599 Index panel_size = m_perfv.panel_size; // upper bound on panel width 00600 for (k = jcol + 1; k < (std::min)(jcol+panel_size, n); k++) 00601 { 00602 if (relax_end(k) != emptyIdxLU) 00603 { 00604 panel_size = k - jcol; 00605 break; 00606 } 00607 } 00608 if (k == n) 00609 panel_size = n - jcol; 00610 00611 // Symbolic outer factorization on a panel of columns 00612 Base::panel_dfs(m, panel_size, jcol, m_mat, m_perm_r.indices(), nseg1, dense, panel_lsub, segrep, repfnz, xprune, marker, parent, xplore, m_glu); 00613 00614 // Numeric sup-panel updates in topological order 00615 Base::panel_bmod(m, panel_size, jcol, nseg1, dense, tempv, segrep, repfnz, m_glu); 00616 00617 // Sparse LU within the panel, and below the panel diagonal 00618 for ( jj = jcol; jj< jcol + panel_size; jj++) 00619 { 00620 k = (jj - jcol) * m; // Column index for w-wide arrays 00621 00622 nseg = nseg1; // begin after all the panel segments 00623 //Depth-first-search for the current column 00624 VectorBlock<IndexVector> panel_lsubk(panel_lsub, k, m); 00625 VectorBlock<IndexVector> repfnz_k(repfnz, k, m); 00626 info = Base::column_dfs(m, jj, m_perm_r.indices(), m_perfv.maxsuper, nseg, panel_lsubk, segrep, repfnz_k, xprune, marker, parent, xplore, m_glu); 00627 if ( info ) 00628 { 00629 m_lastError = "UNABLE TO EXPAND MEMORY IN COLUMN_DFS() "; 00630 m_info = NumericalIssue; 00631 m_factorizationIsOk = false; 00632 return; 00633 } 00634 // Numeric updates to this column 00635 VectorBlock<ScalarVector> dense_k(dense, k, m); 00636 VectorBlock<IndexVector> segrep_k(segrep, nseg1, m-nseg1); 00637 info = Base::column_bmod(jj, (nseg - nseg1), dense_k, tempv, segrep_k, repfnz_k, jcol, m_glu); 00638 if ( info ) 00639 { 00640 m_lastError = "UNABLE TO EXPAND MEMORY IN COLUMN_BMOD() "; 00641 m_info = NumericalIssue; 00642 m_factorizationIsOk = false; 00643 return; 00644 } 00645 00646 // Copy the U-segments to ucol(*) 00647 info = Base::copy_to_ucol(jj, nseg, segrep, repfnz_k ,m_perm_r.indices(), dense_k, m_glu); 00648 if ( info ) 00649 { 00650 m_lastError = "UNABLE TO EXPAND MEMORY IN COPY_TO_UCOL() "; 00651 m_info = NumericalIssue; 00652 m_factorizationIsOk = false; 00653 return; 00654 } 00655 00656 // Form the L-segment 00657 info = Base::pivotL(jj, m_diagpivotthresh, m_perm_r.indices(), iperm_c.indices(), pivrow, m_glu); 00658 if ( info ) 00659 { 00660 m_lastError = "THE MATRIX IS STRUCTURALLY SINGULAR ... ZERO COLUMN AT "; 00661 std::ostringstream returnInfo; 00662 returnInfo << info; 00663 m_lastError += returnInfo.str(); 00664 m_info = NumericalIssue; 00665 m_factorizationIsOk = false; 00666 return; 00667 } 00668 00669 // Update the determinant of the row permutation matrix 00670 // FIXME: the following test is not correct, we should probably take iperm_c into account and pivrow is not directly the row pivot. 00671 if (pivrow != jj) m_detPermR = -m_detPermR; 00672 00673 // Prune columns (0:jj-1) using column jj 00674 Base::pruneL(jj, m_perm_r.indices(), pivrow, nseg, segrep, repfnz_k, xprune, m_glu); 00675 00676 // Reset repfnz for this column 00677 for (i = 0; i < nseg; i++) 00678 { 00679 irep = segrep(i); 00680 repfnz_k(irep) = emptyIdxLU; 00681 } 00682 } // end SparseLU within the panel 00683 jcol += panel_size; // Move to the next panel 00684 } // end for -- end elimination 00685 00686 m_detPermR = m_perm_r.determinant(); 00687 m_detPermC = m_perm_c.determinant(); 00688 00689 // Count the number of nonzeros in factors 00690 Base::countnz(n, m_nnzL, m_nnzU, m_glu); 00691 // Apply permutation to the L subscripts 00692 Base::fixupL(n, m_perm_r.indices(), m_glu); 00693 00694 // Create supernode matrix L 00695 m_Lstore.setInfos(m, n, m_glu.lusup, m_glu.xlusup, m_glu.lsub, m_glu.xlsub, m_glu.supno, m_glu.xsup); 00696 // Create the column major upper sparse matrix U; 00697 new (&m_Ustore) MappedSparseMatrix<Scalar, ColMajor, StorageIndex> ( m, n, m_nnzU, m_glu.xusub.data(), m_glu.usub.data(), m_glu.ucol.data() ); 00698 00699 m_info = Success; 00700 m_factorizationIsOk = true; 00701 } 00702 00703 template<typename MappedSupernodalType> 00704 struct SparseLUMatrixLReturnType : internal::no_assignment_operator 00705 { 00706 typedef typename MappedSupernodalType::Scalar Scalar; 00707 explicit SparseLUMatrixLReturnType(const MappedSupernodalType& mapL) : m_mapL(mapL) 00708 { } 00709 Index rows() { return m_mapL.rows(); } 00710 Index cols() { return m_mapL.cols(); } 00711 template<typename Dest> 00712 void solveInPlace( MatrixBase<Dest> &X) const 00713 { 00714 m_mapL.solveInPlace(X); 00715 } 00716 const MappedSupernodalType& m_mapL; 00717 }; 00718 00719 template<typename MatrixLType, typename MatrixUType> 00720 struct SparseLUMatrixUReturnType : internal::no_assignment_operator 00721 { 00722 typedef typename MatrixLType::Scalar Scalar; 00723 SparseLUMatrixUReturnType(const MatrixLType& mapL, const MatrixUType& mapU) 00724 : m_mapL(mapL),m_mapU(mapU) 00725 { } 00726 Index rows() { return m_mapL.rows(); } 00727 Index cols() { return m_mapL.cols(); } 00728 00729 template<typename Dest> void solveInPlace(MatrixBase<Dest> &X) const 00730 { 00731 Index nrhs = X.cols(); 00732 Index n = X.rows(); 00733 // Backward solve with U 00734 for (Index k = m_mapL.nsuper(); k >= 0; k--) 00735 { 00736 Index fsupc = m_mapL.supToCol()[k]; 00737 Index lda = m_mapL.colIndexPtr()[fsupc+1] - m_mapL.colIndexPtr()[fsupc]; // leading dimension 00738 Index nsupc = m_mapL.supToCol()[k+1] - fsupc; 00739 Index luptr = m_mapL.colIndexPtr()[fsupc]; 00740 00741 if (nsupc == 1) 00742 { 00743 for (Index j = 0; j < nrhs; j++) 00744 { 00745 X(fsupc, j) /= m_mapL.valuePtr()[luptr]; 00746 } 00747 } 00748 else 00749 { 00750 Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > A( &(m_mapL.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) ); 00751 Map< Matrix<Scalar,Dynamic,Dest::ColsAtCompileTime, ColMajor>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) ); 00752 U = A.template triangularView<Upper>().solve(U); 00753 } 00754 00755 for (Index j = 0; j < nrhs; ++j) 00756 { 00757 for (Index jcol = fsupc; jcol < fsupc + nsupc; jcol++) 00758 { 00759 typename MatrixUType::InnerIterator it(m_mapU, jcol); 00760 for ( ; it; ++it) 00761 { 00762 Index irow = it.index(); 00763 X(irow, j) -= X(jcol, j) * it.value(); 00764 } 00765 } 00766 } 00767 } // End For U-solve 00768 } 00769 const MatrixLType& m_mapL; 00770 const MatrixUType& m_mapU; 00771 }; 00772 00773 } // End namespace Eigen 00774 00775 #endif