![]() |
Eigen
3.3.3
|
00001 // This file is part of Eigen, a lightweight C++ template library 00002 // for linear algebra. 00003 // 00004 // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr> 00005 // 00006 // This Source Code Form is subject to the terms of the Mozilla 00007 // Public License v. 2.0. If a copy of the MPL was not distributed 00008 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. 00009 00010 #ifndef EIGEN_SPARSE_BLOCK_H 00011 #define EIGEN_SPARSE_BLOCK_H 00012 00013 namespace Eigen { 00014 00015 // Subset of columns or rows 00016 template<typename XprType, int BlockRows, int BlockCols> 00017 class BlockImpl<XprType,BlockRows,BlockCols,true,Sparse> 00018 : public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,true> > 00019 { 00020 typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested; 00021 typedef Block<XprType, BlockRows, BlockCols, true> BlockType; 00022 public: 00023 enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor }; 00024 protected: 00025 enum { OuterSize = IsRowMajor ? BlockRows : BlockCols }; 00026 typedef SparseMatrixBase<BlockType> Base; 00027 using Base::convert_index; 00028 public: 00029 EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) 00030 00031 inline BlockImpl(XprType& xpr, Index i) 00032 : m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize) 00033 {} 00034 00035 inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) 00036 : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols)) 00037 {} 00038 00039 EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } 00040 EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } 00041 00042 Index nonZeros() const 00043 { 00044 typedef internal::evaluator<XprType> EvaluatorType; 00045 EvaluatorType matEval(m_matrix); 00046 Index nnz = 0; 00047 Index end = m_outerStart + m_outerSize.value(); 00048 for(Index j=m_outerStart; j<end; ++j) 00049 for(typename EvaluatorType::InnerIterator it(matEval, j); it; ++it) 00050 ++nnz; 00051 return nnz; 00052 } 00053 00054 inline const Scalar coeff(Index row, Index col) const 00055 { 00056 return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart)); 00057 } 00058 00059 inline const Scalar coeff(Index index) const 00060 { 00061 return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart); 00062 } 00063 00064 inline const XprType& nestedExpression() const { return m_matrix; } 00065 inline XprType& nestedExpression() { return m_matrix; } 00066 Index startRow() const { return IsRowMajor ? m_outerStart : 0; } 00067 Index startCol() const { return IsRowMajor ? 0 : m_outerStart; } 00068 Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } 00069 Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } 00070 00071 protected: 00072 00073 typename internal::ref_selector<XprType>::non_const_type m_matrix; 00074 Index m_outerStart; 00075 const internal::variable_if_dynamic<Index, OuterSize> m_outerSize; 00076 00077 protected: 00078 // Disable assignment with clear error message. 00079 // Note that simply removing operator= yields compilation errors with ICC+MSVC 00080 template<typename T> 00081 BlockImpl& operator=(const T&) 00082 { 00083 EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY); 00084 return *this; 00085 } 00086 }; 00087 00088 00089 /*************************************************************************** 00090 * specialization for SparseMatrix 00091 ***************************************************************************/ 00092 00093 namespace internal { 00094 00095 template<typename SparseMatrixType, int BlockRows, int BlockCols> 00096 class sparse_matrix_block_impl 00097 : public SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> > 00098 { 00099 typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _MatrixTypeNested; 00100 typedef Block<SparseMatrixType, BlockRows, BlockCols, true> BlockType; 00101 typedef SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> > Base; 00102 using Base::convert_index; 00103 public: 00104 enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor }; 00105 EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) 00106 protected: 00107 typedef typename Base::IndexVector IndexVector; 00108 enum { OuterSize = IsRowMajor ? BlockRows : BlockCols }; 00109 public: 00110 00111 inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index i) 00112 : m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize) 00113 {} 00114 00115 inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) 00116 : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols)) 00117 {} 00118 00119 template<typename OtherDerived> 00120 inline BlockType& operator=(const SparseMatrixBase<OtherDerived>& other) 00121 { 00122 typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _NestedMatrixType; 00123 _NestedMatrixType& matrix = m_matrix; 00124 // This assignment is slow if this vector set is not empty 00125 // and/or it is not at the end of the nonzeros of the underlying matrix. 00126 00127 // 1 - eval to a temporary to avoid transposition and/or aliasing issues 00128 Ref<const SparseMatrix<Scalar, IsRowMajor ? RowMajor : ColMajor, StorageIndex> > tmp(other.derived()); 00129 eigen_internal_assert(tmp.outerSize()==m_outerSize.value()); 00130 00131 // 2 - let's check whether there is enough allocated memory 00132 Index nnz = tmp.nonZeros(); 00133 Index start = m_outerStart==0 ? 0 : m_matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block 00134 Index end = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending position of the current block 00135 Index block_size = end - start; // available room in the current block 00136 Index tail_size = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end; 00137 00138 Index free_size = m_matrix.isCompressed() 00139 ? Index(matrix.data().allocatedSize()) + block_size 00140 : block_size; 00141 00142 Index tmp_start = tmp.outerIndexPtr()[0]; 00143 00144 bool update_trailing_pointers = false; 00145 if(nnz>free_size) 00146 { 00147 // realloc manually to reduce copies 00148 typename SparseMatrixType::Storage newdata(m_matrix.data().allocatedSize() - block_size + nnz); 00149 00150 internal::smart_copy(m_matrix.valuePtr(), m_matrix.valuePtr() + start, newdata.valuePtr()); 00151 internal::smart_copy(m_matrix.innerIndexPtr(), m_matrix.innerIndexPtr() + start, newdata.indexPtr()); 00152 00153 internal::smart_copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, newdata.valuePtr() + start); 00154 internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, newdata.indexPtr() + start); 00155 00156 internal::smart_copy(matrix.valuePtr()+end, matrix.valuePtr()+end + tail_size, newdata.valuePtr()+start+nnz); 00157 internal::smart_copy(matrix.innerIndexPtr()+end, matrix.innerIndexPtr()+end + tail_size, newdata.indexPtr()+start+nnz); 00158 00159 newdata.resize(m_matrix.outerIndexPtr()[m_matrix.outerSize()] - block_size + nnz); 00160 00161 matrix.data().swap(newdata); 00162 00163 update_trailing_pointers = true; 00164 } 00165 else 00166 { 00167 if(m_matrix.isCompressed()) 00168 { 00169 // no need to realloc, simply copy the tail at its respective position and insert tmp 00170 matrix.data().resize(start + nnz + tail_size); 00171 00172 internal::smart_memmove(matrix.valuePtr()+end, matrix.valuePtr() + end+tail_size, matrix.valuePtr() + start+nnz); 00173 internal::smart_memmove(matrix.innerIndexPtr()+end, matrix.innerIndexPtr() + end+tail_size, matrix.innerIndexPtr() + start+nnz); 00174 00175 update_trailing_pointers = true; 00176 } 00177 00178 internal::smart_copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, matrix.valuePtr() + start); 00179 internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, matrix.innerIndexPtr() + start); 00180 } 00181 00182 // update outer index pointers and innerNonZeros 00183 if(IsVectorAtCompileTime) 00184 { 00185 if(!m_matrix.isCompressed()) 00186 matrix.innerNonZeroPtr()[m_outerStart] = StorageIndex(nnz); 00187 matrix.outerIndexPtr()[m_outerStart] = StorageIndex(start); 00188 } 00189 else 00190 { 00191 StorageIndex p = StorageIndex(start); 00192 for(Index k=0; k<m_outerSize.value(); ++k) 00193 { 00194 StorageIndex nnz_k = internal::convert_index<StorageIndex>(tmp.innerVector(k).nonZeros()); 00195 if(!m_matrix.isCompressed()) 00196 matrix.innerNonZeroPtr()[m_outerStart+k] = nnz_k; 00197 matrix.outerIndexPtr()[m_outerStart+k] = p; 00198 p += nnz_k; 00199 } 00200 } 00201 00202 if(update_trailing_pointers) 00203 { 00204 StorageIndex offset = internal::convert_index<StorageIndex>(nnz - block_size); 00205 for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k) 00206 { 00207 matrix.outerIndexPtr()[k] += offset; 00208 } 00209 } 00210 00211 return derived(); 00212 } 00213 00214 inline BlockType& operator=(const BlockType& other) 00215 { 00216 return operator=<BlockType>(other); 00217 } 00218 00219 inline const Scalar* valuePtr() const 00220 { return m_matrix.valuePtr(); } 00221 inline Scalar* valuePtr() 00222 { return m_matrix.valuePtr(); } 00223 00224 inline const StorageIndex* innerIndexPtr() const 00225 { return m_matrix.innerIndexPtr(); } 00226 inline StorageIndex* innerIndexPtr() 00227 { return m_matrix.innerIndexPtr(); } 00228 00229 inline const StorageIndex* outerIndexPtr() const 00230 { return m_matrix.outerIndexPtr() + m_outerStart; } 00231 inline StorageIndex* outerIndexPtr() 00232 { return m_matrix.outerIndexPtr() + m_outerStart; } 00233 00234 inline const StorageIndex* innerNonZeroPtr() const 00235 { return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); } 00236 inline StorageIndex* innerNonZeroPtr() 00237 { return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); } 00238 00239 bool isCompressed() const { return m_matrix.innerNonZeroPtr()==0; } 00240 00241 inline Scalar& coeffRef(Index row, Index col) 00242 { 00243 return m_matrix.coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart)); 00244 } 00245 00246 inline const Scalar coeff(Index row, Index col) const 00247 { 00248 return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart)); 00249 } 00250 00251 inline const Scalar coeff(Index index) const 00252 { 00253 return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart); 00254 } 00255 00256 const Scalar& lastCoeff() const 00257 { 00258 EIGEN_STATIC_ASSERT_VECTOR_ONLY(sparse_matrix_block_impl); 00259 eigen_assert(Base::nonZeros()>0); 00260 if(m_matrix.isCompressed()) 00261 return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1]; 00262 else 00263 return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1]; 00264 } 00265 00266 EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } 00267 EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } 00268 00269 inline const SparseMatrixType& nestedExpression() const { return m_matrix; } 00270 inline SparseMatrixType& nestedExpression() { return m_matrix; } 00271 Index startRow() const { return IsRowMajor ? m_outerStart : 0; } 00272 Index startCol() const { return IsRowMajor ? 0 : m_outerStart; } 00273 Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } 00274 Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } 00275 00276 protected: 00277 00278 typename internal::ref_selector<SparseMatrixType>::non_const_type m_matrix; 00279 Index m_outerStart; 00280 const internal::variable_if_dynamic<Index, OuterSize> m_outerSize; 00281 00282 }; 00283 00284 } // namespace internal 00285 00286 template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols> 00287 class BlockImpl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse> 00288 : public internal::sparse_matrix_block_impl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols> 00289 { 00290 public: 00291 typedef _StorageIndex StorageIndex; 00292 typedef SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType; 00293 typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base; 00294 inline BlockImpl(SparseMatrixType& xpr, Index i) 00295 : Base(xpr, i) 00296 {} 00297 00298 inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) 00299 : Base(xpr, startRow, startCol, blockRows, blockCols) 00300 {} 00301 00302 using Base::operator=; 00303 }; 00304 00305 template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols> 00306 class BlockImpl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse> 00307 : public internal::sparse_matrix_block_impl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols> 00308 { 00309 public: 00310 typedef _StorageIndex StorageIndex; 00311 typedef const SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType; 00312 typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base; 00313 inline BlockImpl(SparseMatrixType& xpr, Index i) 00314 : Base(xpr, i) 00315 {} 00316 00317 inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) 00318 : Base(xpr, startRow, startCol, blockRows, blockCols) 00319 {} 00320 00321 using Base::operator=; 00322 private: 00323 template<typename Derived> BlockImpl(const SparseMatrixBase<Derived>& xpr, Index i); 00324 template<typename Derived> BlockImpl(const SparseMatrixBase<Derived>& xpr); 00325 }; 00326 00327 //---------- 00328 00332 template<typename Derived> 00333 typename SparseMatrixBase<Derived>::InnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer) 00334 { return InnerVectorReturnType(derived(), outer); } 00335 00339 template<typename Derived> 00340 const typename SparseMatrixBase<Derived>::ConstInnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer) const 00341 { return ConstInnerVectorReturnType(derived(), outer); } 00342 00346 template<typename Derived> 00347 typename SparseMatrixBase<Derived>::InnerVectorsReturnType 00348 SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) 00349 { 00350 return Block<Derived,Dynamic,Dynamic,true>(derived(), 00351 IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart, 00352 IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize); 00353 00354 } 00355 00359 template<typename Derived> 00360 const typename SparseMatrixBase<Derived>::ConstInnerVectorsReturnType 00361 SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const 00362 { 00363 return Block<const Derived,Dynamic,Dynamic,true>(derived(), 00364 IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart, 00365 IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize); 00366 00367 } 00368 00372 template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel> 00373 class BlockImpl<XprType,BlockRows,BlockCols,InnerPanel,Sparse> 00374 : public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,InnerPanel> >, internal::no_assignment_operator 00375 { 00376 typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType; 00377 typedef SparseMatrixBase<BlockType> Base; 00378 using Base::convert_index; 00379 public: 00380 enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor }; 00381 EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) 00382 00383 typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested; 00384 00387 inline BlockImpl(XprType& xpr, Index i) 00388 : m_matrix(xpr), 00389 m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? convert_index(i) : 0), 00390 m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? convert_index(i) : 0), 00391 m_blockRows(BlockRows==1 ? 1 : xpr.rows()), 00392 m_blockCols(BlockCols==1 ? 1 : xpr.cols()) 00393 {} 00394 00397 inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) 00398 : m_matrix(xpr), m_startRow(convert_index(startRow)), m_startCol(convert_index(startCol)), m_blockRows(convert_index(blockRows)), m_blockCols(convert_index(blockCols)) 00399 {} 00400 00401 inline Index rows() const { return m_blockRows.value(); } 00402 inline Index cols() const { return m_blockCols.value(); } 00403 00404 inline Scalar& coeffRef(Index row, Index col) 00405 { 00406 return m_matrix.coeffRef(row + m_startRow.value(), col + m_startCol.value()); 00407 } 00408 00409 inline const Scalar coeff(Index row, Index col) const 00410 { 00411 return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value()); 00412 } 00413 00414 inline Scalar& coeffRef(Index index) 00415 { 00416 return m_matrix.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), 00417 m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); 00418 } 00419 00420 inline const Scalar coeff(Index index) const 00421 { 00422 return m_matrix.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), 00423 m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); 00424 } 00425 00426 inline const XprType& nestedExpression() const { return m_matrix; } 00427 inline XprType& nestedExpression() { return m_matrix; } 00428 Index startRow() const { return m_startRow.value(); } 00429 Index startCol() const { return m_startCol.value(); } 00430 Index blockRows() const { return m_blockRows.value(); } 00431 Index blockCols() const { return m_blockCols.value(); } 00432 00433 protected: 00434 // friend class internal::GenericSparseBlockInnerIteratorImpl<XprType,BlockRows,BlockCols,InnerPanel>; 00435 friend struct internal::unary_evaluator<Block<XprType,BlockRows,BlockCols,InnerPanel>, internal::IteratorBased, Scalar >; 00436 00437 Index nonZeros() const { return Dynamic; } 00438 00439 typename internal::ref_selector<XprType>::non_const_type m_matrix; 00440 const internal::variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow; 00441 const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol; 00442 const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows; 00443 const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols; 00444 00445 protected: 00446 // Disable assignment with clear error message. 00447 // Note that simply removing operator= yields compilation errors with ICC+MSVC 00448 template<typename T> 00449 BlockImpl& operator=(const T&) 00450 { 00451 EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY); 00452 return *this; 00453 } 00454 00455 }; 00456 00457 namespace internal { 00458 00459 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel> 00460 struct unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased > 00461 : public evaluator_base<Block<ArgType,BlockRows,BlockCols,InnerPanel> > 00462 { 00463 class InnerVectorInnerIterator; 00464 class OuterVectorInnerIterator; 00465 public: 00466 typedef Block<ArgType,BlockRows,BlockCols,InnerPanel> XprType; 00467 typedef typename XprType::StorageIndex StorageIndex; 00468 typedef typename XprType::Scalar Scalar; 00469 00470 enum { 00471 IsRowMajor = XprType::IsRowMajor, 00472 00473 OuterVector = (BlockCols==1 && ArgType::IsRowMajor) 00474 | // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&". 00475 // revert to || as soon as not needed anymore. 00476 (BlockRows==1 && !ArgType::IsRowMajor), 00477 00478 CoeffReadCost = evaluator<ArgType>::CoeffReadCost, 00479 Flags = XprType::Flags 00480 }; 00481 00482 typedef typename internal::conditional<OuterVector,OuterVectorInnerIterator,InnerVectorInnerIterator>::type InnerIterator; 00483 00484 explicit unary_evaluator(const XprType& op) 00485 : m_argImpl(op.nestedExpression()), m_block(op) 00486 {} 00487 00488 inline Index nonZerosEstimate() const { 00489 Index nnz = m_block.nonZeros(); 00490 if(nnz<0) 00491 return m_argImpl.nonZerosEstimate() * m_block.size() / m_block.nestedExpression().size(); 00492 return nnz; 00493 } 00494 00495 protected: 00496 typedef typename evaluator<ArgType>::InnerIterator EvalIterator; 00497 00498 evaluator<ArgType> m_argImpl; 00499 const XprType &m_block; 00500 }; 00501 00502 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel> 00503 class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::InnerVectorInnerIterator 00504 : public EvalIterator 00505 { 00506 enum { IsRowMajor = unary_evaluator::IsRowMajor }; 00507 const XprType& m_block; 00508 Index m_end; 00509 public: 00510 00511 EIGEN_STRONG_INLINE InnerVectorInnerIterator(const unary_evaluator& aEval, Index outer) 00512 : EvalIterator(aEval.m_argImpl, outer + (IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol())), 00513 m_block(aEval.m_block), 00514 m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows()) 00515 { 00516 while( (EvalIterator::operator bool()) && (EvalIterator::index() < (IsRowMajor ? m_block.startCol() : m_block.startRow())) ) 00517 EvalIterator::operator++(); 00518 } 00519 00520 inline StorageIndex index() const { return EvalIterator::index() - convert_index<StorageIndex>(IsRowMajor ? m_block.startCol() : m_block.startRow()); } 00521 inline Index outer() const { return EvalIterator::outer() - (IsRowMajor ? m_block.startRow() : m_block.startCol()); } 00522 inline Index row() const { return EvalIterator::row() - m_block.startRow(); } 00523 inline Index col() const { return EvalIterator::col() - m_block.startCol(); } 00524 00525 inline operator bool() const { return EvalIterator::operator bool() && EvalIterator::index() < m_end; } 00526 }; 00527 00528 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel> 00529 class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::OuterVectorInnerIterator 00530 { 00531 enum { IsRowMajor = unary_evaluator::IsRowMajor }; 00532 const unary_evaluator& m_eval; 00533 Index m_outerPos; 00534 const Index m_innerIndex; 00535 Index m_end; 00536 EvalIterator m_it; 00537 public: 00538 00539 EIGEN_STRONG_INLINE OuterVectorInnerIterator(const unary_evaluator& aEval, Index outer) 00540 : m_eval(aEval), 00541 m_outerPos( (IsRowMajor ? aEval.m_block.startCol() : aEval.m_block.startRow()) ), 00542 m_innerIndex(IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol()), 00543 m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows()), 00544 m_it(m_eval.m_argImpl, m_outerPos) 00545 { 00546 EIGEN_UNUSED_VARIABLE(outer); 00547 eigen_assert(outer==0); 00548 00549 while(m_it && m_it.index() < m_innerIndex) ++m_it; 00550 if((!m_it) || (m_it.index()!=m_innerIndex)) 00551 ++(*this); 00552 } 00553 00554 inline StorageIndex index() const { return convert_index<StorageIndex>(m_outerPos - (IsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow())); } 00555 inline Index outer() const { return 0; } 00556 inline Index row() const { return IsRowMajor ? 0 : index(); } 00557 inline Index col() const { return IsRowMajor ? index() : 0; } 00558 00559 inline Scalar value() const { return m_it.value(); } 00560 inline Scalar& valueRef() { return m_it.valueRef(); } 00561 00562 inline OuterVectorInnerIterator& operator++() 00563 { 00564 // search next non-zero entry 00565 while(++m_outerPos<m_end) 00566 { 00567 // Restart iterator at the next inner-vector: 00568 m_it.~EvalIterator(); 00569 ::new (&m_it) EvalIterator(m_eval.m_argImpl, m_outerPos); 00570 // search for the key m_innerIndex in the current outer-vector 00571 while(m_it && m_it.index() < m_innerIndex) ++m_it; 00572 if(m_it && m_it.index()==m_innerIndex) break; 00573 } 00574 return *this; 00575 } 00576 00577 inline operator bool() const { return m_outerPos < m_end; } 00578 }; 00579 00580 template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols> 00581 struct unary_evaluator<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true>, IteratorBased> 00582 : evaluator<SparseCompressedBase<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > > 00583 { 00584 typedef Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> XprType; 00585 typedef evaluator<SparseCompressedBase<XprType> > Base; 00586 explicit unary_evaluator(const XprType &xpr) : Base(xpr) {} 00587 }; 00588 00589 template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols> 00590 struct unary_evaluator<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true>, IteratorBased> 00591 : evaluator<SparseCompressedBase<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > > 00592 { 00593 typedef Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> XprType; 00594 typedef evaluator<SparseCompressedBase<XprType> > Base; 00595 explicit unary_evaluator(const XprType &xpr) : Base(xpr) {} 00596 }; 00597 00598 } // end namespace internal 00599 00600 00601 } // end namespace Eigen 00602 00603 #endif // EIGEN_SPARSE_BLOCK_H