![]() |
Eigen
3.3.3
|
00001 // This file is part of Eigen, a lightweight C++ template library 00002 // for linear algebra. 00003 // 00004 // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr> 00005 // 00006 // This Source Code Form is subject to the terms of the Mozilla 00007 // Public License v. 2.0. If a copy of the MPL was not distributed 00008 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. 00009 00010 #ifndef EIGEN_SPARSEASSIGN_H 00011 #define EIGEN_SPARSEASSIGN_H 00012 00013 namespace Eigen { 00014 00015 template<typename Derived> 00016 template<typename OtherDerived> 00017 Derived& SparseMatrixBase<Derived>::operator=(const EigenBase<OtherDerived> &other) 00018 { 00019 internal::call_assignment_no_alias(derived(), other.derived()); 00020 return derived(); 00021 } 00022 00023 template<typename Derived> 00024 template<typename OtherDerived> 00025 Derived& SparseMatrixBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other) 00026 { 00027 // TODO use the evaluator mechanism 00028 other.evalTo(derived()); 00029 return derived(); 00030 } 00031 00032 template<typename Derived> 00033 template<typename OtherDerived> 00034 inline Derived& SparseMatrixBase<Derived>::operator=(const SparseMatrixBase<OtherDerived>& other) 00035 { 00036 // by default sparse evaluation do not alias, so we can safely bypass the generic call_assignment routine 00037 internal::Assignment<Derived,OtherDerived,internal::assign_op<Scalar,typename OtherDerived::Scalar> > 00038 ::run(derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>()); 00039 return derived(); 00040 } 00041 00042 template<typename Derived> 00043 inline Derived& SparseMatrixBase<Derived>::operator=(const Derived& other) 00044 { 00045 internal::call_assignment_no_alias(derived(), other.derived()); 00046 return derived(); 00047 } 00048 00049 namespace internal { 00050 00051 template<> 00052 struct storage_kind_to_evaluator_kind<Sparse> { 00053 typedef IteratorBased Kind; 00054 }; 00055 00056 template<> 00057 struct storage_kind_to_shape<Sparse> { 00058 typedef SparseShape Shape; 00059 }; 00060 00061 struct Sparse2Sparse {}; 00062 struct Sparse2Dense {}; 00063 00064 template<> struct AssignmentKind<SparseShape, SparseShape> { typedef Sparse2Sparse Kind; }; 00065 template<> struct AssignmentKind<SparseShape, SparseTriangularShape> { typedef Sparse2Sparse Kind; }; 00066 template<> struct AssignmentKind<DenseShape, SparseShape> { typedef Sparse2Dense Kind; }; 00067 template<> struct AssignmentKind<DenseShape, SparseTriangularShape> { typedef Sparse2Dense Kind; }; 00068 00069 00070 template<typename DstXprType, typename SrcXprType> 00071 void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src) 00072 { 00073 typedef typename DstXprType::Scalar Scalar; 00074 typedef internal::evaluator<DstXprType> DstEvaluatorType; 00075 typedef internal::evaluator<SrcXprType> SrcEvaluatorType; 00076 00077 SrcEvaluatorType srcEvaluator(src); 00078 00079 const bool transpose = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit); 00080 const Index outerEvaluationSize = (SrcEvaluatorType::Flags&RowMajorBit) ? src.rows() : src.cols(); 00081 if ((!transpose) && src.isRValue()) 00082 { 00083 // eval without temporary 00084 dst.resize(src.rows(), src.cols()); 00085 dst.setZero(); 00086 dst.reserve((std::max)(src.rows(),src.cols())*2); 00087 for (Index j=0; j<outerEvaluationSize; ++j) 00088 { 00089 dst.startVec(j); 00090 for (typename SrcEvaluatorType::InnerIterator it(srcEvaluator, j); it; ++it) 00091 { 00092 Scalar v = it.value(); 00093 dst.insertBackByOuterInner(j,it.index()) = v; 00094 } 00095 } 00096 dst.finalize(); 00097 } 00098 else 00099 { 00100 // eval through a temporary 00101 eigen_assert(( ((internal::traits<DstXprType>::SupportedAccessPatterns & OuterRandomAccessPattern)==OuterRandomAccessPattern) || 00102 (!((DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit)))) && 00103 "the transpose operation is supposed to be handled in SparseMatrix::operator="); 00104 00105 enum { Flip = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit) }; 00106 00107 00108 DstXprType temp(src.rows(), src.cols()); 00109 00110 temp.reserve((std::max)(src.rows(),src.cols())*2); 00111 for (Index j=0; j<outerEvaluationSize; ++j) 00112 { 00113 temp.startVec(j); 00114 for (typename SrcEvaluatorType::InnerIterator it(srcEvaluator, j); it; ++it) 00115 { 00116 Scalar v = it.value(); 00117 temp.insertBackByOuterInner(Flip?it.index():j,Flip?j:it.index()) = v; 00118 } 00119 } 00120 temp.finalize(); 00121 00122 dst = temp.markAsRValue(); 00123 } 00124 } 00125 00126 // Generic Sparse to Sparse assignment 00127 template< typename DstXprType, typename SrcXprType, typename Functor> 00128 struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Sparse> 00129 { 00130 static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/) 00131 { 00132 assign_sparse_to_sparse(dst.derived(), src.derived()); 00133 } 00134 }; 00135 00136 // Generic Sparse to Dense assignment 00137 template< typename DstXprType, typename SrcXprType, typename Functor> 00138 struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense> 00139 { 00140 static void run(DstXprType &dst, const SrcXprType &src, const Functor &func) 00141 { 00142 if(internal::is_same<Functor,internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> >::value) 00143 dst.setZero(); 00144 00145 internal::evaluator<SrcXprType> srcEval(src); 00146 resize_if_allowed(dst, src, func); 00147 internal::evaluator<DstXprType> dstEval(dst); 00148 00149 const Index outerEvaluationSize = (internal::evaluator<SrcXprType>::Flags&RowMajorBit) ? src.rows() : src.cols(); 00150 for (Index j=0; j<outerEvaluationSize; ++j) 00151 for (typename internal::evaluator<SrcXprType>::InnerIterator i(srcEval,j); i; ++i) 00152 func.assignCoeff(dstEval.coeffRef(i.row(),i.col()), i.value()); 00153 } 00154 }; 00155 00156 // Specialization for "dst = dec.solve(rhs)" 00157 // NOTE we need to specialize it for Sparse2Sparse to avoid ambiguous specialization error 00158 template<typename DstXprType, typename DecType, typename RhsType, typename Scalar> 00159 struct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar,Scalar>, Sparse2Sparse> 00160 { 00161 typedef Solve<DecType,RhsType> SrcXprType; 00162 static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &) 00163 { 00164 Index dstRows = src.rows(); 00165 Index dstCols = src.cols(); 00166 if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) 00167 dst.resize(dstRows, dstCols); 00168 00169 src.dec()._solve_impl(src.rhs(), dst); 00170 } 00171 }; 00172 00173 struct Diagonal2Sparse {}; 00174 00175 template<> struct AssignmentKind<SparseShape,DiagonalShape> { typedef Diagonal2Sparse Kind; }; 00176 00177 template< typename DstXprType, typename SrcXprType, typename Functor> 00178 struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Sparse> 00179 { 00180 typedef typename DstXprType::StorageIndex StorageIndex; 00181 typedef typename DstXprType::Scalar Scalar; 00182 typedef Array<StorageIndex,Dynamic,1> ArrayXI; 00183 typedef Array<Scalar,Dynamic,1> ArrayXS; 00184 template<int Options> 00185 static void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/) 00186 { 00187 Index dstRows = src.rows(); 00188 Index dstCols = src.cols(); 00189 if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) 00190 dst.resize(dstRows, dstCols); 00191 00192 Index size = src.diagonal().size(); 00193 dst.makeCompressed(); 00194 dst.resizeNonZeros(size); 00195 Map<ArrayXI>(dst.innerIndexPtr(), size).setLinSpaced(0,StorageIndex(size)-1); 00196 Map<ArrayXI>(dst.outerIndexPtr(), size+1).setLinSpaced(0,StorageIndex(size)); 00197 Map<ArrayXS>(dst.valuePtr(), size) = src.diagonal(); 00198 } 00199 00200 template<typename DstDerived> 00201 static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/) 00202 { 00203 dst.diagonal() = src.diagonal(); 00204 } 00205 00206 static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/) 00207 { dst.diagonal() += src.diagonal(); } 00208 00209 static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/) 00210 { dst.diagonal() -= src.diagonal(); } 00211 }; 00212 } // end namespace internal 00213 00214 } // end namespace Eigen 00215 00216 #endif // EIGEN_SPARSEASSIGN_H