Eigen  3.3.3
SparseSelfAdjointView.h
00001 // This file is part of Eigen, a lightweight C++ template library
00002 // for linear algebra.
00003 //
00004 // Copyright (C) 2009-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
00005 //
00006 // This Source Code Form is subject to the terms of the Mozilla
00007 // Public License v. 2.0. If a copy of the MPL was not distributed
00008 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
00009 
00010 #ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
00011 #define EIGEN_SPARSE_SELFADJOINTVIEW_H
00012 
00013 namespace Eigen { 
00014   
00029 namespace internal {
00030   
00031 template<typename MatrixType, unsigned int Mode>
00032 struct traits<SparseSelfAdjointView<MatrixType,Mode> > : traits<MatrixType> {
00033 };
00034 
00035 template<int SrcMode,int DstMode,typename MatrixType,int DestOrder>
00036 void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
00037 
00038 template<int Mode,typename MatrixType,int DestOrder>
00039 void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
00040 
00041 }
00042 
00043 template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
00044   : public EigenBase<SparseSelfAdjointView<MatrixType,_Mode> >
00045 {
00046   public:
00047     
00048     enum {
00049       Mode = _Mode,
00050       RowsAtCompileTime = internal::traits<SparseSelfAdjointView>::RowsAtCompileTime,
00051       ColsAtCompileTime = internal::traits<SparseSelfAdjointView>::ColsAtCompileTime
00052     };
00053 
00054     typedef EigenBase<SparseSelfAdjointView> Base;
00055     typedef typename MatrixType::Scalar Scalar;
00056     typedef typename MatrixType::StorageIndex StorageIndex;
00057     typedef Matrix<StorageIndex,Dynamic,1> VectorI;
00058     typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested;
00059     typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
00060     
00061     explicit inline SparseSelfAdjointView(MatrixType& matrix) : m_matrix(matrix)
00062     {
00063       eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
00064     }
00065 
00066     inline Index rows() const { return m_matrix.rows(); }
00067     inline Index cols() const { return m_matrix.cols(); }
00068 
00070     const _MatrixTypeNested& matrix() const { return m_matrix; }
00071     typename internal::remove_reference<MatrixTypeNested>::type& matrix() { return m_matrix; }
00072 
00078     template<typename OtherDerived>
00079     Product<SparseSelfAdjointView, OtherDerived>
00080     operator*(const SparseMatrixBase<OtherDerived>& rhs) const
00081     {
00082       return Product<SparseSelfAdjointView, OtherDerived>(*this, rhs.derived());
00083     }
00084 
00090     template<typename OtherDerived> friend
00091     Product<OtherDerived, SparseSelfAdjointView>
00092     operator*(const SparseMatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
00093     {
00094       return Product<OtherDerived, SparseSelfAdjointView>(lhs.derived(), rhs);
00095     }
00096     
00098     template<typename OtherDerived>
00099     Product<SparseSelfAdjointView,OtherDerived>
00100     operator*(const MatrixBase<OtherDerived>& rhs) const
00101     {
00102       return Product<SparseSelfAdjointView,OtherDerived>(*this, rhs.derived());
00103     }
00104 
00106     template<typename OtherDerived> friend
00107     Product<OtherDerived,SparseSelfAdjointView>
00108     operator*(const MatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
00109     {
00110       return Product<OtherDerived,SparseSelfAdjointView>(lhs.derived(), rhs);
00111     }
00112 
00121     template<typename DerivedU>
00122     SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
00123     
00125     // TODO implement twists in a more evaluator friendly fashion
00126     SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode> twistedBy(const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm) const
00127     {
00128       return SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode>(m_matrix, perm);
00129     }
00130 
00131     template<typename SrcMatrixType,int SrcMode>
00132     SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcMode>& permutedMatrix)
00133     {
00134       internal::call_assignment_no_alias_no_transpose(*this, permutedMatrix);
00135       return *this;
00136     }
00137 
00138     SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src)
00139     {
00140       PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
00141       return *this = src.twistedBy(pnull);
00142     }
00143 
00144     template<typename SrcMatrixType,unsigned int SrcMode>
00145     SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcMode>& src)
00146     {
00147       PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
00148       return *this = src.twistedBy(pnull);
00149     }
00150     
00151     void resize(Index rows, Index cols)
00152     {
00153       EIGEN_ONLY_USED_FOR_DEBUG(rows);
00154       EIGEN_ONLY_USED_FOR_DEBUG(cols);
00155       eigen_assert(rows == this->rows() && cols == this->cols()
00156                 && "SparseSelfadjointView::resize() does not actually allow to resize.");
00157     }
00158     
00159   protected:
00160 
00161     MatrixTypeNested m_matrix;
00162     //mutable VectorI m_countPerRow;
00163     //mutable VectorI m_countPerCol;
00164   private:
00165     template<typename Dest> void evalTo(Dest &) const;
00166 };
00167 
00168 /***************************************************************************
00169 * Implementation of SparseMatrixBase methods
00170 ***************************************************************************/
00171 
00172 template<typename Derived>
00173 template<unsigned int UpLo>
00174 typename SparseMatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView() const
00175 {
00176   return SparseSelfAdjointView<const Derived, UpLo>(derived());
00177 }
00178 
00179 template<typename Derived>
00180 template<unsigned int UpLo>
00181 typename SparseMatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView()
00182 {
00183   return SparseSelfAdjointView<Derived, UpLo>(derived());
00184 }
00185 
00186 /***************************************************************************
00187 * Implementation of SparseSelfAdjointView methods
00188 ***************************************************************************/
00189 
00190 template<typename MatrixType, unsigned int Mode>
00191 template<typename DerivedU>
00192 SparseSelfAdjointView<MatrixType,Mode>&
00193 SparseSelfAdjointView<MatrixType,Mode>::rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha)
00194 {
00195   SparseMatrix<Scalar,(MatrixType::Flags&RowMajorBit)?RowMajor:ColMajor> tmp = u * u.adjoint();
00196   if(alpha==Scalar(0))
00197     m_matrix = tmp.template triangularView<Mode>();
00198   else
00199     m_matrix += alpha * tmp.template triangularView<Mode>();
00200 
00201   return *this;
00202 }
00203 
00204 namespace internal {
00205   
00206 // TODO currently a selfadjoint expression has the form SelfAdjointView<.,.>
00207 //      in the future selfadjoint-ness should be defined by the expression traits
00208 //      such that Transpose<SelfAdjointView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)
00209 template<typename MatrixType, unsigned int Mode>
00210 struct evaluator_traits<SparseSelfAdjointView<MatrixType,Mode> >
00211 {
00212   typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;
00213   typedef SparseSelfAdjointShape Shape;
00214 };
00215 
00216 struct SparseSelfAdjoint2Sparse {};
00217 
00218 template<> struct AssignmentKind<SparseShape,SparseSelfAdjointShape> { typedef SparseSelfAdjoint2Sparse Kind; };
00219 template<> struct AssignmentKind<SparseSelfAdjointShape,SparseShape> { typedef Sparse2Sparse Kind; };
00220 
00221 template< typename DstXprType, typename SrcXprType, typename Functor>
00222 struct Assignment<DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse>
00223 {
00224   typedef typename DstXprType::StorageIndex StorageIndex;
00225   typedef internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> AssignOpType;
00226 
00227   template<typename DestScalar,int StorageOrder>
00228   static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const AssignOpType&/*func*/)
00229   {
00230     internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), dst);
00231   }
00232 
00233   // FIXME: the handling of += and -= in sparse matrices should be cleanup so that next two overloads could be reduced to:
00234   template<typename DestScalar,int StorageOrder,typename AssignFunc>
00235   static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const AssignFunc& func)
00236   {
00237     SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
00238     run(tmp, src, AssignOpType());
00239     call_assignment_no_alias_no_transpose(dst, tmp, func);
00240   }
00241 
00242   template<typename DestScalar,int StorageOrder>
00243   static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src,
00244                   const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>& /* func */)
00245   {
00246     SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
00247     run(tmp, src, AssignOpType());
00248     dst += tmp;
00249   }
00250 
00251   template<typename DestScalar,int StorageOrder>
00252   static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src,
00253                   const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>& /* func */)
00254   {
00255     SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
00256     run(tmp, src, AssignOpType());
00257     dst -= tmp;
00258   }
00259   
00260   template<typename DestScalar>
00261   static void run(DynamicSparseMatrix<DestScalar,ColMajor,StorageIndex>& dst, const SrcXprType &src, const AssignOpType&/*func*/)
00262   {
00263     // TODO directly evaluate into dst;
00264     SparseMatrix<DestScalar,ColMajor,StorageIndex> tmp(dst.rows(),dst.cols());
00265     internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), tmp);
00266     dst = tmp;
00267   }
00268 };
00269 
00270 } // end namespace internal
00271 
00272 /***************************************************************************
00273 * Implementation of sparse self-adjoint time dense matrix
00274 ***************************************************************************/
00275 
00276 namespace internal {
00277 
00278 template<int Mode, typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
00279 inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
00280 {
00281   EIGEN_ONLY_USED_FOR_DEBUG(alpha);
00282   
00283   typedef typename internal::nested_eval<SparseLhsType,DenseRhsType::MaxColsAtCompileTime>::type SparseLhsTypeNested;
00284   typedef typename internal::remove_all<SparseLhsTypeNested>::type SparseLhsTypeNestedCleaned;
00285   typedef evaluator<SparseLhsTypeNestedCleaned> LhsEval;
00286   typedef typename LhsEval::InnerIterator LhsIterator;
00287   typedef typename SparseLhsType::Scalar LhsScalar;
00288   
00289   enum {
00290     LhsIsRowMajor = (LhsEval::Flags&RowMajorBit)==RowMajorBit,
00291     ProcessFirstHalf =
00292               ((Mode&(Upper|Lower))==(Upper|Lower))
00293           || ( (Mode&Upper) && !LhsIsRowMajor)
00294           || ( (Mode&Lower) && LhsIsRowMajor),
00295     ProcessSecondHalf = !ProcessFirstHalf
00296   };
00297   
00298   SparseLhsTypeNested lhs_nested(lhs);
00299   LhsEval lhsEval(lhs_nested);
00300 
00301   // work on one column at once
00302   for (Index k=0; k<rhs.cols(); ++k)
00303   {
00304     for (Index j=0; j<lhs.outerSize(); ++j)
00305     {
00306       LhsIterator i(lhsEval,j);
00307       // handle diagonal coeff
00308       if (ProcessSecondHalf)
00309       {
00310         while (i && i.index()<j) ++i;
00311         if(i && i.index()==j)
00312         {
00313           res(j,k) += alpha * i.value() * rhs(j,k);
00314           ++i;
00315         }
00316       }
00317 
00318       // premultiplied rhs for scatters
00319       typename ScalarBinaryOpTraits<AlphaType, typename DenseRhsType::Scalar>::ReturnType rhs_j(alpha*rhs(j,k));
00320       // accumulator for partial scalar product
00321       typename DenseResType::Scalar res_j(0);
00322       for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
00323       {
00324         LhsScalar lhs_ij = i.value();
00325         if(!LhsIsRowMajor) lhs_ij = numext::conj(lhs_ij);
00326         res_j += lhs_ij * rhs(i.index(),k);
00327         res(i.index(),k) += numext::conj(lhs_ij) * rhs_j;
00328       }
00329       res(j,k) += alpha * res_j;
00330 
00331       // handle diagonal coeff
00332       if (ProcessFirstHalf && i && (i.index()==j))
00333         res(j,k) += alpha * i.value() * rhs(j,k);
00334     }
00335   }
00336 }
00337 
00338 
00339 template<typename LhsView, typename Rhs, int ProductType>
00340 struct generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType>
00341 : generic_product_impl_base<LhsView, Rhs, generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType> >
00342 {
00343   template<typename Dest>
00344   static void scaleAndAddTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs, const typename Dest::Scalar& alpha)
00345   {
00346     typedef typename LhsView::_MatrixTypeNested Lhs;
00347     typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
00348     typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
00349     LhsNested lhsNested(lhsView.matrix());
00350     RhsNested rhsNested(rhs);
00351     
00352     internal::sparse_selfadjoint_time_dense_product<LhsView::Mode>(lhsNested, rhsNested, dst, alpha);
00353   }
00354 };
00355 
00356 template<typename Lhs, typename RhsView, int ProductType>
00357 struct generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType>
00358 : generic_product_impl_base<Lhs, RhsView, generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType> >
00359 {
00360   template<typename Dest>
00361   static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView, const typename Dest::Scalar& alpha)
00362   {
00363     typedef typename RhsView::_MatrixTypeNested Rhs;
00364     typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
00365     typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
00366     LhsNested lhsNested(lhs);
00367     RhsNested rhsNested(rhsView.matrix());
00368     
00369     // transpose everything
00370     Transpose<Dest> dstT(dst);
00371     internal::sparse_selfadjoint_time_dense_product<RhsView::Mode>(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
00372   }
00373 };
00374 
00375 // NOTE: these two overloads are needed to evaluate the sparse selfadjoint view into a full sparse matrix
00376 // TODO: maybe the copy could be handled by generic_product_impl so that these overloads would not be needed anymore
00377 
00378 template<typename LhsView, typename Rhs, int ProductTag>
00379 struct product_evaluator<Product<LhsView, Rhs, DefaultProduct>, ProductTag, SparseSelfAdjointShape, SparseShape>
00380   : public evaluator<typename Product<typename Rhs::PlainObject, Rhs, DefaultProduct>::PlainObject>
00381 {
00382   typedef Product<LhsView, Rhs, DefaultProduct> XprType;
00383   typedef typename XprType::PlainObject PlainObject;
00384   typedef evaluator<PlainObject> Base;
00385 
00386   product_evaluator(const XprType& xpr)
00387     : m_lhs(xpr.lhs()), m_result(xpr.rows(), xpr.cols())
00388   {
00389     ::new (static_cast<Base*>(this)) Base(m_result);
00390     generic_product_impl<typename Rhs::PlainObject, Rhs, SparseShape, SparseShape, ProductTag>::evalTo(m_result, m_lhs, xpr.rhs());
00391   }
00392   
00393 protected:
00394   typename Rhs::PlainObject m_lhs;
00395   PlainObject m_result;
00396 };
00397 
00398 template<typename Lhs, typename RhsView, int ProductTag>
00399 struct product_evaluator<Product<Lhs, RhsView, DefaultProduct>, ProductTag, SparseShape, SparseSelfAdjointShape>
00400   : public evaluator<typename Product<Lhs, typename Lhs::PlainObject, DefaultProduct>::PlainObject>
00401 {
00402   typedef Product<Lhs, RhsView, DefaultProduct> XprType;
00403   typedef typename XprType::PlainObject PlainObject;
00404   typedef evaluator<PlainObject> Base;
00405 
00406   product_evaluator(const XprType& xpr)
00407     : m_rhs(xpr.rhs()), m_result(xpr.rows(), xpr.cols())
00408   {
00409     ::new (static_cast<Base*>(this)) Base(m_result);
00410     generic_product_impl<Lhs, typename Lhs::PlainObject, SparseShape, SparseShape, ProductTag>::evalTo(m_result, xpr.lhs(), m_rhs);
00411   }
00412   
00413 protected:
00414   typename Lhs::PlainObject m_rhs;
00415   PlainObject m_result;
00416 };
00417 
00418 } // namespace internal
00419 
00420 /***************************************************************************
00421 * Implementation of symmetric copies and permutations
00422 ***************************************************************************/
00423 namespace internal {
00424 
00425 template<int Mode,typename MatrixType,int DestOrder>
00426 void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
00427 {
00428   typedef typename MatrixType::StorageIndex StorageIndex;
00429   typedef typename MatrixType::Scalar Scalar;
00430   typedef SparseMatrix<Scalar,DestOrder,StorageIndex> Dest;
00431   typedef Matrix<StorageIndex,Dynamic,1> VectorI;
00432   typedef evaluator<MatrixType> MatEval;
00433   typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
00434   
00435   MatEval matEval(mat);
00436   Dest& dest(_dest.derived());
00437   enum {
00438     StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
00439   };
00440   
00441   Index size = mat.rows();
00442   VectorI count;
00443   count.resize(size);
00444   count.setZero();
00445   dest.resize(size,size);
00446   for(Index j = 0; j<size; ++j)
00447   {
00448     Index jp = perm ? perm[j] : j;
00449     for(MatIterator it(matEval,j); it; ++it)
00450     {
00451       Index i = it.index();
00452       Index r = it.row();
00453       Index c = it.col();
00454       Index ip = perm ? perm[i] : i;
00455       if(Mode==(Upper|Lower))
00456         count[StorageOrderMatch ? jp : ip]++;
00457       else if(r==c)
00458         count[ip]++;
00459       else if(( Mode==Lower && r>c) || ( Mode==Upper && r<c))
00460       {
00461         count[ip]++;
00462         count[jp]++;
00463       }
00464     }
00465   }
00466   Index nnz = count.sum();
00467   
00468   // reserve space
00469   dest.resizeNonZeros(nnz);
00470   dest.outerIndexPtr()[0] = 0;
00471   for(Index j=0; j<size; ++j)
00472     dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
00473   for(Index j=0; j<size; ++j)
00474     count[j] = dest.outerIndexPtr()[j];
00475   
00476   // copy data
00477   for(StorageIndex j = 0; j<size; ++j)
00478   {
00479     for(MatIterator it(matEval,j); it; ++it)
00480     {
00481       StorageIndex i = internal::convert_index<StorageIndex>(it.index());
00482       Index r = it.row();
00483       Index c = it.col();
00484       
00485       StorageIndex jp = perm ? perm[j] : j;
00486       StorageIndex ip = perm ? perm[i] : i;
00487       
00488       if(Mode==(Upper|Lower))
00489       {
00490         Index k = count[StorageOrderMatch ? jp : ip]++;
00491         dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
00492         dest.valuePtr()[k] = it.value();
00493       }
00494       else if(r==c)
00495       {
00496         Index k = count[ip]++;
00497         dest.innerIndexPtr()[k] = ip;
00498         dest.valuePtr()[k] = it.value();
00499       }
00500       else if(( (Mode&Lower)==Lower && r>c) || ( (Mode&Upper)==Upper && r<c))
00501       {
00502         if(!StorageOrderMatch)
00503           std::swap(ip,jp);
00504         Index k = count[jp]++;
00505         dest.innerIndexPtr()[k] = ip;
00506         dest.valuePtr()[k] = it.value();
00507         k = count[ip]++;
00508         dest.innerIndexPtr()[k] = jp;
00509         dest.valuePtr()[k] = numext::conj(it.value());
00510       }
00511     }
00512   }
00513 }
00514 
00515 template<int _SrcMode,int _DstMode,typename MatrixType,int DstOrder>
00516 void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
00517 {
00518   typedef typename MatrixType::StorageIndex StorageIndex;
00519   typedef typename MatrixType::Scalar Scalar;
00520   SparseMatrix<Scalar,DstOrder,StorageIndex>& dest(_dest.derived());
00521   typedef Matrix<StorageIndex,Dynamic,1> VectorI;
00522   typedef evaluator<MatrixType> MatEval;
00523   typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
00524 
00525   enum {
00526     SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
00527     StorageOrderMatch = int(SrcOrder) == int(DstOrder),
00528     DstMode = DstOrder==RowMajor ? (_DstMode==Upper ? Lower : Upper) : _DstMode,
00529     SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode
00530   };
00531 
00532   MatEval matEval(mat);
00533   
00534   Index size = mat.rows();
00535   VectorI count(size);
00536   count.setZero();
00537   dest.resize(size,size);
00538   for(StorageIndex j = 0; j<size; ++j)
00539   {
00540     StorageIndex jp = perm ? perm[j] : j;
00541     for(MatIterator it(matEval,j); it; ++it)
00542     {
00543       StorageIndex i = it.index();
00544       if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
00545         continue;
00546                   
00547       StorageIndex ip = perm ? perm[i] : i;
00548       count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
00549     }
00550   }
00551   dest.outerIndexPtr()[0] = 0;
00552   for(Index j=0; j<size; ++j)
00553     dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
00554   dest.resizeNonZeros(dest.outerIndexPtr()[size]);
00555   for(Index j=0; j<size; ++j)
00556     count[j] = dest.outerIndexPtr()[j];
00557   
00558   for(StorageIndex j = 0; j<size; ++j)
00559   {
00560     
00561     for(MatIterator it(matEval,j); it; ++it)
00562     {
00563       StorageIndex i = it.index();
00564       if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
00565         continue;
00566                   
00567       StorageIndex jp = perm ? perm[j] : j;
00568       StorageIndex ip = perm? perm[i] : i;
00569       
00570       Index k = count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
00571       dest.innerIndexPtr()[k] = int(DstMode)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
00572       
00573       if(!StorageOrderMatch) std::swap(ip,jp);
00574       if( ((int(DstMode)==int(Lower) && ip<jp) || (int(DstMode)==int(Upper) && ip>jp)))
00575         dest.valuePtr()[k] = numext::conj(it.value());
00576       else
00577         dest.valuePtr()[k] = it.value();
00578     }
00579   }
00580 }
00581 
00582 }
00583 
00584 // TODO implement twists in a more evaluator friendly fashion
00585 
00586 namespace internal {
00587 
00588 template<typename MatrixType, int Mode>
00589 struct traits<SparseSymmetricPermutationProduct<MatrixType,Mode> > : traits<MatrixType> {
00590 };
00591 
00592 }
00593 
00594 template<typename MatrixType,int Mode>
00595 class SparseSymmetricPermutationProduct
00596   : public EigenBase<SparseSymmetricPermutationProduct<MatrixType,Mode> >
00597 {
00598   public:
00599     typedef typename MatrixType::Scalar Scalar;
00600     typedef typename MatrixType::StorageIndex StorageIndex;
00601     enum {
00602       RowsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::RowsAtCompileTime,
00603       ColsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::ColsAtCompileTime
00604     };
00605   protected:
00606     typedef PermutationMatrix<Dynamic,Dynamic,StorageIndex> Perm;
00607   public:
00608     typedef Matrix<StorageIndex,Dynamic,1> VectorI;
00609     typedef typename MatrixType::Nested MatrixTypeNested;
00610     typedef typename internal::remove_all<MatrixTypeNested>::type NestedExpression;
00611     
00612     SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
00613       : m_matrix(mat), m_perm(perm)
00614     {}
00615     
00616     inline Index rows() const { return m_matrix.rows(); }
00617     inline Index cols() const { return m_matrix.cols(); }
00618         
00619     const NestedExpression& matrix() const { return m_matrix; }
00620     const Perm& perm() const { return m_perm; }
00621     
00622   protected:
00623     MatrixTypeNested m_matrix;
00624     const Perm& m_perm;
00625 
00626 };
00627 
00628 namespace internal {
00629   
00630 template<typename DstXprType, typename MatrixType, int Mode, typename Scalar>
00631 struct Assignment<DstXprType, SparseSymmetricPermutationProduct<MatrixType,Mode>, internal::assign_op<Scalar,typename MatrixType::Scalar>, Sparse2Sparse>
00632 {
00633   typedef SparseSymmetricPermutationProduct<MatrixType,Mode> SrcXprType;
00634   typedef typename DstXprType::StorageIndex DstIndex;
00635   template<int Options>
00636   static void run(SparseMatrix<Scalar,Options,DstIndex> &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
00637   {
00638     // internal::permute_symm_to_fullsymm<Mode>(m_matrix,_dest,m_perm.indices().data());
00639     SparseMatrix<Scalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
00640     internal::permute_symm_to_fullsymm<Mode>(src.matrix(),tmp,src.perm().indices().data());
00641     dst = tmp;
00642   }
00643   
00644   template<typename DestType,unsigned int DestMode>
00645   static void run(SparseSelfAdjointView<DestType,DestMode>& dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
00646   {
00647     internal::permute_symm_to_symm<Mode,DestMode>(src.matrix(),dst.matrix(),src.perm().indices().data());
00648   }
00649 };
00650 
00651 } // end namespace internal
00652 
00653 } // end namespace Eigen
00654 
00655 #endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
 All Classes Functions Variables Typedefs Enumerations Enumerator Friends