![]() |
Eigen
3.3.3
|
00001 // This file is part of Eigen, a lightweight C++ template library 00002 // for linear algebra. 00003 // 00004 // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr> 00005 // 00006 // This Source Code Form is subject to the terms of the Mozilla 00007 // Public License v. 2.0. If a copy of the MPL was not distributed 00008 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. 00009 00010 #ifndef EIGEN_SPARSEMATRIX_H 00011 #define EIGEN_SPARSEMATRIX_H 00012 00013 namespace Eigen { 00014 00045 namespace internal { 00046 template<typename _Scalar, int _Options, typename _StorageIndex> 00047 struct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> > 00048 { 00049 typedef _Scalar Scalar; 00050 typedef _StorageIndex StorageIndex; 00051 typedef Sparse StorageKind; 00052 typedef MatrixXpr XprKind; 00053 enum { 00054 RowsAtCompileTime = Dynamic, 00055 ColsAtCompileTime = Dynamic, 00056 MaxRowsAtCompileTime = Dynamic, 00057 MaxColsAtCompileTime = Dynamic, 00058 Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit, 00059 SupportedAccessPatterns = InnerRandomAccessPattern 00060 }; 00061 }; 00062 00063 template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex> 00064 struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> > 00065 { 00066 typedef SparseMatrix<_Scalar, _Options, _StorageIndex> MatrixType; 00067 typedef typename ref_selector<MatrixType>::type MatrixTypeNested; 00068 typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested; 00069 00070 typedef _Scalar Scalar; 00071 typedef Dense StorageKind; 00072 typedef _StorageIndex StorageIndex; 00073 typedef MatrixXpr XprKind; 00074 00075 enum { 00076 RowsAtCompileTime = Dynamic, 00077 ColsAtCompileTime = 1, 00078 MaxRowsAtCompileTime = Dynamic, 00079 MaxColsAtCompileTime = 1, 00080 Flags = LvalueBit 00081 }; 00082 }; 00083 00084 template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex> 00085 struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> > 00086 : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> > 00087 { 00088 enum { 00089 Flags = 0 00090 }; 00091 }; 00092 00093 } // end namespace internal 00094 00095 template<typename _Scalar, int _Options, typename _StorageIndex> 00096 class SparseMatrix 00097 : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _StorageIndex> > 00098 { 00099 typedef SparseCompressedBase<SparseMatrix> Base; 00100 using Base::convert_index; 00101 friend class SparseVector<_Scalar,0,_StorageIndex>; 00102 public: 00103 using Base::isCompressed; 00104 using Base::nonZeros; 00105 EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix) 00106 using Base::operator+=; 00107 using Base::operator-=; 00108 00109 typedef MappedSparseMatrix<Scalar,Flags> Map; 00110 typedef Diagonal<SparseMatrix> DiagonalReturnType; 00111 typedef Diagonal<const SparseMatrix> ConstDiagonalReturnType; 00112 typedef typename Base::InnerIterator InnerIterator; 00113 typedef typename Base::ReverseInnerIterator ReverseInnerIterator; 00114 00115 00116 using Base::IsRowMajor; 00117 typedef internal::CompressedStorage<Scalar,StorageIndex> Storage; 00118 enum { 00119 Options = _Options 00120 }; 00121 00122 typedef typename Base::IndexVector IndexVector; 00123 typedef typename Base::ScalarVector ScalarVector; 00124 protected: 00125 typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix; 00126 00127 Index m_outerSize; 00128 Index m_innerSize; 00129 StorageIndex* m_outerIndex; 00130 StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed 00131 Storage m_data; 00132 00133 public: 00134 00136 inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } 00138 inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } 00139 00141 inline Index innerSize() const { return m_innerSize; } 00143 inline Index outerSize() const { return m_outerSize; } 00144 00148 inline const Scalar* valuePtr() const { return m_data.valuePtr(); } 00152 inline Scalar* valuePtr() { return m_data.valuePtr(); } 00153 00157 inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); } 00161 inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); } 00162 00166 inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; } 00170 inline StorageIndex* outerIndexPtr() { return m_outerIndex; } 00171 00175 inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; } 00179 inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; } 00180 00182 inline Storage& data() { return m_data; } 00184 inline const Storage& data() const { return m_data; } 00185 00188 inline Scalar coeff(Index row, Index col) const 00189 { 00190 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols()); 00191 00192 const Index outer = IsRowMajor ? row : col; 00193 const Index inner = IsRowMajor ? col : row; 00194 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1]; 00195 return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner)); 00196 } 00197 00206 inline Scalar& coeffRef(Index row, Index col) 00207 { 00208 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols()); 00209 00210 const Index outer = IsRowMajor ? row : col; 00211 const Index inner = IsRowMajor ? col : row; 00212 00213 Index start = m_outerIndex[outer]; 00214 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1]; 00215 eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix"); 00216 if(end<=start) 00217 return insert(row,col); 00218 const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner)); 00219 if((p<end) && (m_data.index(p)==inner)) 00220 return m_data.value(p); 00221 else 00222 return insert(row,col); 00223 } 00224 00240 Scalar& insert(Index row, Index col); 00241 00242 public: 00243 00251 inline void setZero() 00252 { 00253 m_data.clear(); 00254 memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex)); 00255 if(m_innerNonZeros) 00256 memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex)); 00257 } 00258 00262 inline void reserve(Index reserveSize) 00263 { 00264 eigen_assert(isCompressed() && "This function does not make sense in non compressed mode."); 00265 m_data.reserve(reserveSize); 00266 } 00267 00268 #ifdef EIGEN_PARSED_BY_DOXYGEN 00269 00281 template<class SizesType> 00282 inline void reserve(const SizesType& reserveSizes); 00283 #else 00284 template<class SizesType> 00285 inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif = 00286 #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename 00287 typename 00288 #endif 00289 SizesType::value_type()) 00290 { 00291 EIGEN_UNUSED_VARIABLE(enableif); 00292 reserveInnerVectors(reserveSizes); 00293 } 00294 #endif // EIGEN_PARSED_BY_DOXYGEN 00295 protected: 00296 template<class SizesType> 00297 inline void reserveInnerVectors(const SizesType& reserveSizes) 00298 { 00299 if(isCompressed()) 00300 { 00301 Index totalReserveSize = 0; 00302 // turn the matrix into non-compressed mode 00303 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex))); 00304 if (!m_innerNonZeros) internal::throw_std_bad_alloc(); 00305 00306 // temporarily use m_innerSizes to hold the new starting points. 00307 StorageIndex* newOuterIndex = m_innerNonZeros; 00308 00309 StorageIndex count = 0; 00310 for(Index j=0; j<m_outerSize; ++j) 00311 { 00312 newOuterIndex[j] = count; 00313 count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]); 00314 totalReserveSize += reserveSizes[j]; 00315 } 00316 m_data.reserve(totalReserveSize); 00317 StorageIndex previousOuterIndex = m_outerIndex[m_outerSize]; 00318 for(Index j=m_outerSize-1; j>=0; --j) 00319 { 00320 StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j]; 00321 for(Index i=innerNNZ-1; i>=0; --i) 00322 { 00323 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i); 00324 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i); 00325 } 00326 previousOuterIndex = m_outerIndex[j]; 00327 m_outerIndex[j] = newOuterIndex[j]; 00328 m_innerNonZeros[j] = innerNNZ; 00329 } 00330 m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1]; 00331 00332 m_data.resize(m_outerIndex[m_outerSize]); 00333 } 00334 else 00335 { 00336 StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex))); 00337 if (!newOuterIndex) internal::throw_std_bad_alloc(); 00338 00339 StorageIndex count = 0; 00340 for(Index j=0; j<m_outerSize; ++j) 00341 { 00342 newOuterIndex[j] = count; 00343 StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j]; 00344 StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved); 00345 count += toReserve + m_innerNonZeros[j]; 00346 } 00347 newOuterIndex[m_outerSize] = count; 00348 00349 m_data.resize(count); 00350 for(Index j=m_outerSize-1; j>=0; --j) 00351 { 00352 Index offset = newOuterIndex[j] - m_outerIndex[j]; 00353 if(offset>0) 00354 { 00355 StorageIndex innerNNZ = m_innerNonZeros[j]; 00356 for(Index i=innerNNZ-1; i>=0; --i) 00357 { 00358 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i); 00359 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i); 00360 } 00361 } 00362 } 00363 00364 std::swap(m_outerIndex, newOuterIndex); 00365 std::free(newOuterIndex); 00366 } 00367 00368 } 00369 public: 00370 00371 //--- low level purely coherent filling --- 00372 00383 inline Scalar& insertBack(Index row, Index col) 00384 { 00385 return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row); 00386 } 00387 00390 inline Scalar& insertBackByOuterInner(Index outer, Index inner) 00391 { 00392 eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)"); 00393 eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)"); 00394 Index p = m_outerIndex[outer+1]; 00395 ++m_outerIndex[outer+1]; 00396 m_data.append(Scalar(0), inner); 00397 return m_data.value(p); 00398 } 00399 00402 inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner) 00403 { 00404 Index p = m_outerIndex[outer+1]; 00405 ++m_outerIndex[outer+1]; 00406 m_data.append(Scalar(0), inner); 00407 return m_data.value(p); 00408 } 00409 00412 inline void startVec(Index outer) 00413 { 00414 eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially"); 00415 eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially"); 00416 m_outerIndex[outer+1] = m_outerIndex[outer]; 00417 } 00418 00422 inline void finalize() 00423 { 00424 if(isCompressed()) 00425 { 00426 StorageIndex size = internal::convert_index<StorageIndex>(m_data.size()); 00427 Index i = m_outerSize; 00428 // find the last filled column 00429 while (i>=0 && m_outerIndex[i]==0) 00430 --i; 00431 ++i; 00432 while (i<=m_outerSize) 00433 { 00434 m_outerIndex[i] = size; 00435 ++i; 00436 } 00437 } 00438 } 00439 00440 //--- 00441 00442 template<typename InputIterators> 00443 void setFromTriplets(const InputIterators& begin, const InputIterators& end); 00444 00445 template<typename InputIterators,typename DupFunctor> 00446 void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func); 00447 00448 void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); } 00449 00450 template<typename DupFunctor> 00451 void collapseDuplicates(DupFunctor dup_func = DupFunctor()); 00452 00453 //--- 00454 00457 Scalar& insertByOuterInner(Index j, Index i) 00458 { 00459 return insert(IsRowMajor ? j : i, IsRowMajor ? i : j); 00460 } 00461 00464 void makeCompressed() 00465 { 00466 if(isCompressed()) 00467 return; 00468 00469 eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0); 00470 00471 Index oldStart = m_outerIndex[1]; 00472 m_outerIndex[1] = m_innerNonZeros[0]; 00473 for(Index j=1; j<m_outerSize; ++j) 00474 { 00475 Index nextOldStart = m_outerIndex[j+1]; 00476 Index offset = oldStart - m_outerIndex[j]; 00477 if(offset>0) 00478 { 00479 for(Index k=0; k<m_innerNonZeros[j]; ++k) 00480 { 00481 m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k); 00482 m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k); 00483 } 00484 } 00485 m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j]; 00486 oldStart = nextOldStart; 00487 } 00488 std::free(m_innerNonZeros); 00489 m_innerNonZeros = 0; 00490 m_data.resize(m_outerIndex[m_outerSize]); 00491 m_data.squeeze(); 00492 } 00493 00495 void uncompress() 00496 { 00497 if(m_innerNonZeros != 0) 00498 return; 00499 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex))); 00500 for (Index i = 0; i < m_outerSize; i++) 00501 { 00502 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; 00503 } 00504 } 00505 00507 void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision()) 00508 { 00509 prune(default_prunning_func(reference,epsilon)); 00510 } 00511 00519 template<typename KeepFunc> 00520 void prune(const KeepFunc& keep = KeepFunc()) 00521 { 00522 // TODO optimize the uncompressed mode to avoid moving and allocating the data twice 00523 makeCompressed(); 00524 00525 StorageIndex k = 0; 00526 for(Index j=0; j<m_outerSize; ++j) 00527 { 00528 Index previousStart = m_outerIndex[j]; 00529 m_outerIndex[j] = k; 00530 Index end = m_outerIndex[j+1]; 00531 for(Index i=previousStart; i<end; ++i) 00532 { 00533 if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i))) 00534 { 00535 m_data.value(k) = m_data.value(i); 00536 m_data.index(k) = m_data.index(i); 00537 ++k; 00538 } 00539 } 00540 } 00541 m_outerIndex[m_outerSize] = k; 00542 m_data.resize(k,0); 00543 } 00544 00553 void conservativeResize(Index rows, Index cols) 00554 { 00555 // No change 00556 if (this->rows() == rows && this->cols() == cols) return; 00557 00558 // If one dimension is null, then there is nothing to be preserved 00559 if(rows==0 || cols==0) return resize(rows,cols); 00560 00561 Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows(); 00562 Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols(); 00563 StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows); 00564 00565 // Deals with inner non zeros 00566 if (m_innerNonZeros) 00567 { 00568 // Resize m_innerNonZeros 00569 StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex))); 00570 if (!newInnerNonZeros) internal::throw_std_bad_alloc(); 00571 m_innerNonZeros = newInnerNonZeros; 00572 00573 for(Index i=m_outerSize; i<m_outerSize+outerChange; i++) 00574 m_innerNonZeros[i] = 0; 00575 } 00576 else if (innerChange < 0) 00577 { 00578 // Inner size decreased: allocate a new m_innerNonZeros 00579 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize+outerChange+1) * sizeof(StorageIndex))); 00580 if (!m_innerNonZeros) internal::throw_std_bad_alloc(); 00581 for(Index i = 0; i < m_outerSize; i++) 00582 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; 00583 } 00584 00585 // Change the m_innerNonZeros in case of a decrease of inner size 00586 if (m_innerNonZeros && innerChange < 0) 00587 { 00588 for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++) 00589 { 00590 StorageIndex &n = m_innerNonZeros[i]; 00591 StorageIndex start = m_outerIndex[i]; 00592 while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n; 00593 } 00594 } 00595 00596 m_innerSize = newInnerSize; 00597 00598 // Re-allocate outer index structure if necessary 00599 if (outerChange == 0) 00600 return; 00601 00602 StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex))); 00603 if (!newOuterIndex) internal::throw_std_bad_alloc(); 00604 m_outerIndex = newOuterIndex; 00605 if (outerChange > 0) 00606 { 00607 StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize]; 00608 for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++) 00609 m_outerIndex[i] = last; 00610 } 00611 m_outerSize += outerChange; 00612 } 00613 00621 void resize(Index rows, Index cols) 00622 { 00623 const Index outerSize = IsRowMajor ? rows : cols; 00624 m_innerSize = IsRowMajor ? cols : rows; 00625 m_data.clear(); 00626 if (m_outerSize != outerSize || m_outerSize==0) 00627 { 00628 std::free(m_outerIndex); 00629 m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex))); 00630 if (!m_outerIndex) internal::throw_std_bad_alloc(); 00631 00632 m_outerSize = outerSize; 00633 } 00634 if(m_innerNonZeros) 00635 { 00636 std::free(m_innerNonZeros); 00637 m_innerNonZeros = 0; 00638 } 00639 memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex)); 00640 } 00641 00644 void resizeNonZeros(Index size) 00645 { 00646 m_data.resize(size); 00647 } 00648 00650 const ConstDiagonalReturnType diagonal() const { return ConstDiagonalReturnType(*this); } 00651 00656 DiagonalReturnType diagonal() { return DiagonalReturnType(*this); } 00657 00659 inline SparseMatrix() 00660 : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) 00661 { 00662 check_template_parameters(); 00663 resize(0, 0); 00664 } 00665 00667 inline SparseMatrix(Index rows, Index cols) 00668 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) 00669 { 00670 check_template_parameters(); 00671 resize(rows, cols); 00672 } 00673 00675 template<typename OtherDerived> 00676 inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other) 00677 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) 00678 { 00679 EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value), 00680 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) 00681 check_template_parameters(); 00682 const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit); 00683 if (needToTranspose) 00684 *this = other.derived(); 00685 else 00686 { 00687 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN 00688 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN 00689 #endif 00690 internal::call_assignment_no_alias(*this, other.derived()); 00691 } 00692 } 00693 00695 template<typename OtherDerived, unsigned int UpLo> 00696 inline SparseMatrix(const SparseSelfAdjointView<OtherDerived, UpLo>& other) 00697 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) 00698 { 00699 check_template_parameters(); 00700 Base::operator=(other); 00701 } 00702 00704 inline SparseMatrix(const SparseMatrix& other) 00705 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) 00706 { 00707 check_template_parameters(); 00708 *this = other.derived(); 00709 } 00710 00712 template<typename OtherDerived> 00713 SparseMatrix(const ReturnByValue<OtherDerived>& other) 00714 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) 00715 { 00716 check_template_parameters(); 00717 initAssignment(other); 00718 other.evalTo(*this); 00719 } 00720 00722 template<typename OtherDerived> 00723 explicit SparseMatrix(const DiagonalBase<OtherDerived>& other) 00724 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) 00725 { 00726 check_template_parameters(); 00727 *this = other.derived(); 00728 } 00729 00732 inline void swap(SparseMatrix& other) 00733 { 00734 //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n"); 00735 std::swap(m_outerIndex, other.m_outerIndex); 00736 std::swap(m_innerSize, other.m_innerSize); 00737 std::swap(m_outerSize, other.m_outerSize); 00738 std::swap(m_innerNonZeros, other.m_innerNonZeros); 00739 m_data.swap(other.m_data); 00740 } 00741 00744 inline void setIdentity() 00745 { 00746 eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES"); 00747 this->m_data.resize(rows()); 00748 Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1)); 00749 Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes(); 00750 Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows())); 00751 std::free(m_innerNonZeros); 00752 m_innerNonZeros = 0; 00753 } 00754 inline SparseMatrix& operator=(const SparseMatrix& other) 00755 { 00756 if (other.isRValue()) 00757 { 00758 swap(other.const_cast_derived()); 00759 } 00760 else if(this!=&other) 00761 { 00762 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN 00763 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN 00764 #endif 00765 initAssignment(other); 00766 if(other.isCompressed()) 00767 { 00768 internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex); 00769 m_data = other.m_data; 00770 } 00771 else 00772 { 00773 Base::operator=(other); 00774 } 00775 } 00776 return *this; 00777 } 00778 00779 #ifndef EIGEN_PARSED_BY_DOXYGEN 00780 template<typename OtherDerived> 00781 inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other) 00782 { return Base::operator=(other.derived()); } 00783 #endif // EIGEN_PARSED_BY_DOXYGEN 00784 00785 template<typename OtherDerived> 00786 EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other); 00787 00788 friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m) 00789 { 00790 EIGEN_DBG_SPARSE( 00791 s << "Nonzero entries:\n"; 00792 if(m.isCompressed()) 00793 { 00794 for (Index i=0; i<m.nonZeros(); ++i) 00795 s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") "; 00796 } 00797 else 00798 { 00799 for (Index i=0; i<m.outerSize(); ++i) 00800 { 00801 Index p = m.m_outerIndex[i]; 00802 Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i]; 00803 Index k=p; 00804 for (; k<pe; ++k) { 00805 s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") "; 00806 } 00807 for (; k<m.m_outerIndex[i+1]; ++k) { 00808 s << "(_,_) "; 00809 } 00810 } 00811 } 00812 s << std::endl; 00813 s << std::endl; 00814 s << "Outer pointers:\n"; 00815 for (Index i=0; i<m.outerSize(); ++i) { 00816 s << m.m_outerIndex[i] << " "; 00817 } 00818 s << " $" << std::endl; 00819 if(!m.isCompressed()) 00820 { 00821 s << "Inner non zeros:\n"; 00822 for (Index i=0; i<m.outerSize(); ++i) { 00823 s << m.m_innerNonZeros[i] << " "; 00824 } 00825 s << " $" << std::endl; 00826 } 00827 s << std::endl; 00828 ); 00829 s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m); 00830 return s; 00831 } 00832 00834 inline ~SparseMatrix() 00835 { 00836 std::free(m_outerIndex); 00837 std::free(m_innerNonZeros); 00838 } 00839 00841 Scalar sum() const; 00842 00843 # ifdef EIGEN_SPARSEMATRIX_PLUGIN 00844 # include EIGEN_SPARSEMATRIX_PLUGIN 00845 # endif 00846 00847 protected: 00848 00849 template<typename Other> 00850 void initAssignment(const Other& other) 00851 { 00852 resize(other.rows(), other.cols()); 00853 if(m_innerNonZeros) 00854 { 00855 std::free(m_innerNonZeros); 00856 m_innerNonZeros = 0; 00857 } 00858 } 00859 00862 EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col); 00863 00866 class SingletonVector 00867 { 00868 StorageIndex m_index; 00869 StorageIndex m_value; 00870 public: 00871 typedef StorageIndex value_type; 00872 SingletonVector(Index i, Index v) 00873 : m_index(convert_index(i)), m_value(convert_index(v)) 00874 {} 00875 00876 StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; } 00877 }; 00878 00881 EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col); 00882 00883 public: 00886 EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col) 00887 { 00888 const Index outer = IsRowMajor ? row : col; 00889 const Index inner = IsRowMajor ? col : row; 00890 00891 eigen_assert(!isCompressed()); 00892 eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer])); 00893 00894 Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++; 00895 m_data.index(p) = convert_index(inner); 00896 return (m_data.value(p) = 0); 00897 } 00898 00899 private: 00900 static void check_template_parameters() 00901 { 00902 EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); 00903 EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS); 00904 } 00905 00906 struct default_prunning_func { 00907 default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {} 00908 inline bool operator() (const Index&, const Index&, const Scalar& value) const 00909 { 00910 return !internal::isMuchSmallerThan(value, reference, epsilon); 00911 } 00912 Scalar reference; 00913 RealScalar epsilon; 00914 }; 00915 }; 00916 00917 namespace internal { 00918 00919 template<typename InputIterator, typename SparseMatrixType, typename DupFunctor> 00920 void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func) 00921 { 00922 enum { IsRowMajor = SparseMatrixType::IsRowMajor }; 00923 typedef typename SparseMatrixType::Scalar Scalar; 00924 typedef typename SparseMatrixType::StorageIndex StorageIndex; 00925 SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols()); 00926 00927 if(begin!=end) 00928 { 00929 // pass 1: count the nnz per inner-vector 00930 typename SparseMatrixType::IndexVector wi(trMat.outerSize()); 00931 wi.setZero(); 00932 for(InputIterator it(begin); it!=end; ++it) 00933 { 00934 eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols()); 00935 wi(IsRowMajor ? it->col() : it->row())++; 00936 } 00937 00938 // pass 2: insert all the elements into trMat 00939 trMat.reserve(wi); 00940 for(InputIterator it(begin); it!=end; ++it) 00941 trMat.insertBackUncompressed(it->row(),it->col()) = it->value(); 00942 00943 // pass 3: 00944 trMat.collapseDuplicates(dup_func); 00945 } 00946 00947 // pass 4: transposed copy -> implicit sorting 00948 mat = trMat; 00949 } 00950 00951 } 00952 00953 00991 template<typename Scalar, int _Options, typename _StorageIndex> 00992 template<typename InputIterators> 00993 void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end) 00994 { 00995 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>()); 00996 } 00997 01007 template<typename Scalar, int _Options, typename _StorageIndex> 01008 template<typename InputIterators,typename DupFunctor> 01009 void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func) 01010 { 01011 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *this, dup_func); 01012 } 01013 01015 template<typename Scalar, int _Options, typename _StorageIndex> 01016 template<typename DupFunctor> 01017 void SparseMatrix<Scalar,_Options,_StorageIndex>::collapseDuplicates(DupFunctor dup_func) 01018 { 01019 eigen_assert(!isCompressed()); 01020 // TODO, in practice we should be able to use m_innerNonZeros for that task 01021 IndexVector wi(innerSize()); 01022 wi.fill(-1); 01023 StorageIndex count = 0; 01024 // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers 01025 for(Index j=0; j<outerSize(); ++j) 01026 { 01027 StorageIndex start = count; 01028 Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j]; 01029 for(Index k=m_outerIndex[j]; k<oldEnd; ++k) 01030 { 01031 Index i = m_data.index(k); 01032 if(wi(i)>=start) 01033 { 01034 // we already meet this entry => accumulate it 01035 m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k)); 01036 } 01037 else 01038 { 01039 m_data.value(count) = m_data.value(k); 01040 m_data.index(count) = m_data.index(k); 01041 wi(i) = count; 01042 ++count; 01043 } 01044 } 01045 m_outerIndex[j] = start; 01046 } 01047 m_outerIndex[m_outerSize] = count; 01048 01049 // turn the matrix into compressed form 01050 std::free(m_innerNonZeros); 01051 m_innerNonZeros = 0; 01052 m_data.resize(m_outerIndex[m_outerSize]); 01053 } 01054 01055 template<typename Scalar, int _Options, typename _StorageIndex> 01056 template<typename OtherDerived> 01057 EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(const SparseMatrixBase<OtherDerived>& other) 01058 { 01059 EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value), 01060 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) 01061 01062 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN 01063 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN 01064 #endif 01065 01066 const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit); 01067 if (needToTranspose) 01068 { 01069 #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN 01070 EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN 01071 #endif 01072 // two passes algorithm: 01073 // 1 - compute the number of coeffs per dest inner vector 01074 // 2 - do the actual copy/eval 01075 // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed 01076 typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy; 01077 typedef typename internal::remove_all<OtherCopy>::type _OtherCopy; 01078 typedef internal::evaluator<_OtherCopy> OtherCopyEval; 01079 OtherCopy otherCopy(other.derived()); 01080 OtherCopyEval otherCopyEval(otherCopy); 01081 01082 SparseMatrix dest(other.rows(),other.cols()); 01083 Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero(); 01084 01085 // pass 1 01086 // FIXME the above copy could be merged with that pass 01087 for (Index j=0; j<otherCopy.outerSize(); ++j) 01088 for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it) 01089 ++dest.m_outerIndex[it.index()]; 01090 01091 // prefix sum 01092 StorageIndex count = 0; 01093 IndexVector positions(dest.outerSize()); 01094 for (Index j=0; j<dest.outerSize(); ++j) 01095 { 01096 StorageIndex tmp = dest.m_outerIndex[j]; 01097 dest.m_outerIndex[j] = count; 01098 positions[j] = count; 01099 count += tmp; 01100 } 01101 dest.m_outerIndex[dest.outerSize()] = count; 01102 // alloc 01103 dest.m_data.resize(count); 01104 // pass 2 01105 for (StorageIndex j=0; j<otherCopy.outerSize(); ++j) 01106 { 01107 for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it) 01108 { 01109 Index pos = positions[it.index()]++; 01110 dest.m_data.index(pos) = j; 01111 dest.m_data.value(pos) = it.value(); 01112 } 01113 } 01114 this->swap(dest); 01115 return *this; 01116 } 01117 else 01118 { 01119 if(other.isRValue()) 01120 { 01121 initAssignment(other.derived()); 01122 } 01123 // there is no special optimization 01124 return Base::operator=(other.derived()); 01125 } 01126 } 01127 01128 template<typename _Scalar, int _Options, typename _StorageIndex> 01129 typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insert(Index row, Index col) 01130 { 01131 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols()); 01132 01133 const Index outer = IsRowMajor ? row : col; 01134 const Index inner = IsRowMajor ? col : row; 01135 01136 if(isCompressed()) 01137 { 01138 if(nonZeros()==0) 01139 { 01140 // reserve space if not already done 01141 if(m_data.allocatedSize()==0) 01142 m_data.reserve(2*m_innerSize); 01143 01144 // turn the matrix into non-compressed mode 01145 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex))); 01146 if(!m_innerNonZeros) internal::throw_std_bad_alloc(); 01147 01148 memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex)); 01149 01150 // pack all inner-vectors to the end of the pre-allocated space 01151 // and allocate the entire free-space to the first inner-vector 01152 StorageIndex end = convert_index(m_data.allocatedSize()); 01153 for(Index j=1; j<=m_outerSize; ++j) 01154 m_outerIndex[j] = end; 01155 } 01156 else 01157 { 01158 // turn the matrix into non-compressed mode 01159 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex))); 01160 if(!m_innerNonZeros) internal::throw_std_bad_alloc(); 01161 for(Index j=0; j<m_outerSize; ++j) 01162 m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j]; 01163 } 01164 } 01165 01166 // check whether we can do a fast "push back" insertion 01167 Index data_end = m_data.allocatedSize(); 01168 01169 // First case: we are filling a new inner vector which is packed at the end. 01170 // We assume that all remaining inner-vectors are also empty and packed to the end. 01171 if(m_outerIndex[outer]==data_end) 01172 { 01173 eigen_internal_assert(m_innerNonZeros[outer]==0); 01174 01175 // pack previous empty inner-vectors to end of the used-space 01176 // and allocate the entire free-space to the current inner-vector. 01177 StorageIndex p = convert_index(m_data.size()); 01178 Index j = outer; 01179 while(j>=0 && m_innerNonZeros[j]==0) 01180 m_outerIndex[j--] = p; 01181 01182 // push back the new element 01183 ++m_innerNonZeros[outer]; 01184 m_data.append(Scalar(0), inner); 01185 01186 // check for reallocation 01187 if(data_end != m_data.allocatedSize()) 01188 { 01189 // m_data has been reallocated 01190 // -> move remaining inner-vectors back to the end of the free-space 01191 // so that the entire free-space is allocated to the current inner-vector. 01192 eigen_internal_assert(data_end < m_data.allocatedSize()); 01193 StorageIndex new_end = convert_index(m_data.allocatedSize()); 01194 for(Index k=outer+1; k<=m_outerSize; ++k) 01195 if(m_outerIndex[k]==data_end) 01196 m_outerIndex[k] = new_end; 01197 } 01198 return m_data.value(p); 01199 } 01200 01201 // Second case: the next inner-vector is packed to the end 01202 // and the current inner-vector end match the used-space. 01203 if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size()) 01204 { 01205 eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0); 01206 01207 // add space for the new element 01208 ++m_innerNonZeros[outer]; 01209 m_data.resize(m_data.size()+1); 01210 01211 // check for reallocation 01212 if(data_end != m_data.allocatedSize()) 01213 { 01214 // m_data has been reallocated 01215 // -> move remaining inner-vectors back to the end of the free-space 01216 // so that the entire free-space is allocated to the current inner-vector. 01217 eigen_internal_assert(data_end < m_data.allocatedSize()); 01218 StorageIndex new_end = convert_index(m_data.allocatedSize()); 01219 for(Index k=outer+1; k<=m_outerSize; ++k) 01220 if(m_outerIndex[k]==data_end) 01221 m_outerIndex[k] = new_end; 01222 } 01223 01224 // and insert it at the right position (sorted insertion) 01225 Index startId = m_outerIndex[outer]; 01226 Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1; 01227 while ( (p > startId) && (m_data.index(p-1) > inner) ) 01228 { 01229 m_data.index(p) = m_data.index(p-1); 01230 m_data.value(p) = m_data.value(p-1); 01231 --p; 01232 } 01233 01234 m_data.index(p) = convert_index(inner); 01235 return (m_data.value(p) = 0); 01236 } 01237 01238 if(m_data.size() != m_data.allocatedSize()) 01239 { 01240 // make sure the matrix is compatible to random un-compressed insertion: 01241 m_data.resize(m_data.allocatedSize()); 01242 this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2)); 01243 } 01244 01245 return insertUncompressed(row,col); 01246 } 01247 01248 template<typename _Scalar, int _Options, typename _StorageIndex> 01249 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertUncompressed(Index row, Index col) 01250 { 01251 eigen_assert(!isCompressed()); 01252 01253 const Index outer = IsRowMajor ? row : col; 01254 const StorageIndex inner = convert_index(IsRowMajor ? col : row); 01255 01256 Index room = m_outerIndex[outer+1] - m_outerIndex[outer]; 01257 StorageIndex innerNNZ = m_innerNonZeros[outer]; 01258 if(innerNNZ>=room) 01259 { 01260 // this inner vector is full, we need to reallocate the whole buffer :( 01261 reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ))); 01262 } 01263 01264 Index startId = m_outerIndex[outer]; 01265 Index p = startId + m_innerNonZeros[outer]; 01266 while ( (p > startId) && (m_data.index(p-1) > inner) ) 01267 { 01268 m_data.index(p) = m_data.index(p-1); 01269 m_data.value(p) = m_data.value(p-1); 01270 --p; 01271 } 01272 eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end"); 01273 01274 m_innerNonZeros[outer]++; 01275 01276 m_data.index(p) = inner; 01277 return (m_data.value(p) = 0); 01278 } 01279 01280 template<typename _Scalar, int _Options, typename _StorageIndex> 01281 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertCompressed(Index row, Index col) 01282 { 01283 eigen_assert(isCompressed()); 01284 01285 const Index outer = IsRowMajor ? row : col; 01286 const Index inner = IsRowMajor ? col : row; 01287 01288 Index previousOuter = outer; 01289 if (m_outerIndex[outer+1]==0) 01290 { 01291 // we start a new inner vector 01292 while (previousOuter>=0 && m_outerIndex[previousOuter]==0) 01293 { 01294 m_outerIndex[previousOuter] = convert_index(m_data.size()); 01295 --previousOuter; 01296 } 01297 m_outerIndex[outer+1] = m_outerIndex[outer]; 01298 } 01299 01300 // here we have to handle the tricky case where the outerIndex array 01301 // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g., 01302 // the 2nd inner vector... 01303 bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0)) 01304 && (std::size_t(m_outerIndex[outer+1]) == m_data.size()); 01305 01306 std::size_t startId = m_outerIndex[outer]; 01307 // FIXME let's make sure sizeof(long int) == sizeof(std::size_t) 01308 std::size_t p = m_outerIndex[outer+1]; 01309 ++m_outerIndex[outer+1]; 01310 01311 double reallocRatio = 1; 01312 if (m_data.allocatedSize()<=m_data.size()) 01313 { 01314 // if there is no preallocated memory, let's reserve a minimum of 32 elements 01315 if (m_data.size()==0) 01316 { 01317 m_data.reserve(32); 01318 } 01319 else 01320 { 01321 // we need to reallocate the data, to reduce multiple reallocations 01322 // we use a smart resize algorithm based on the current filling ratio 01323 // in addition, we use double to avoid integers overflows 01324 double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1); 01325 reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size()); 01326 // furthermore we bound the realloc ratio to: 01327 // 1) reduce multiple minor realloc when the matrix is almost filled 01328 // 2) avoid to allocate too much memory when the matrix is almost empty 01329 reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.); 01330 } 01331 } 01332 m_data.resize(m_data.size()+1,reallocRatio); 01333 01334 if (!isLastVec) 01335 { 01336 if (previousOuter==-1) 01337 { 01338 // oops wrong guess. 01339 // let's correct the outer offsets 01340 for (Index k=0; k<=(outer+1); ++k) 01341 m_outerIndex[k] = 0; 01342 Index k=outer+1; 01343 while(m_outerIndex[k]==0) 01344 m_outerIndex[k++] = 1; 01345 while (k<=m_outerSize && m_outerIndex[k]!=0) 01346 m_outerIndex[k++]++; 01347 p = 0; 01348 --k; 01349 k = m_outerIndex[k]-1; 01350 while (k>0) 01351 { 01352 m_data.index(k) = m_data.index(k-1); 01353 m_data.value(k) = m_data.value(k-1); 01354 k--; 01355 } 01356 } 01357 else 01358 { 01359 // we are not inserting into the last inner vec 01360 // update outer indices: 01361 Index j = outer+2; 01362 while (j<=m_outerSize && m_outerIndex[j]!=0) 01363 m_outerIndex[j++]++; 01364 --j; 01365 // shift data of last vecs: 01366 Index k = m_outerIndex[j]-1; 01367 while (k>=Index(p)) 01368 { 01369 m_data.index(k) = m_data.index(k-1); 01370 m_data.value(k) = m_data.value(k-1); 01371 k--; 01372 } 01373 } 01374 } 01375 01376 while ( (p > startId) && (m_data.index(p-1) > inner) ) 01377 { 01378 m_data.index(p) = m_data.index(p-1); 01379 m_data.value(p) = m_data.value(p-1); 01380 --p; 01381 } 01382 01383 m_data.index(p) = inner; 01384 return (m_data.value(p) = 0); 01385 } 01386 01387 namespace internal { 01388 01389 template<typename _Scalar, int _Options, typename _StorageIndex> 01390 struct evaluator<SparseMatrix<_Scalar,_Options,_StorageIndex> > 01391 : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > > 01392 { 01393 typedef evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > > Base; 01394 typedef SparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType; 01395 evaluator() : Base() {} 01396 explicit evaluator(const SparseMatrixType &mat) : Base(mat) {} 01397 }; 01398 01399 } 01400 01401 } // end namespace Eigen 01402 01403 #endif // EIGEN_SPARSEMATRIX_H