![]() |
Eigen
3.3.3
|
00001 // This file is part of Eigen, a lightweight C++ template library 00002 // for linear algebra. 00003 // 00004 // Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com> 00005 // Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr> 00006 // Copyright (C) 2011-2012 Jitse Niesen <jitse@maths.leeds.ac.uk> 00007 // 00008 // This Source Code Form is subject to the terms of the Mozilla 00009 // Public License v. 2.0. If a copy of the MPL was not distributed 00010 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. 00011 00012 00013 #ifndef EIGEN_COREEVALUATORS_H 00014 #define EIGEN_COREEVALUATORS_H 00015 00016 namespace Eigen { 00017 00018 namespace internal { 00019 00020 // This class returns the evaluator kind from the expression storage kind. 00021 // Default assumes index based accessors 00022 template<typename StorageKind> 00023 struct storage_kind_to_evaluator_kind { 00024 typedef IndexBased Kind; 00025 }; 00026 00027 // This class returns the evaluator shape from the expression storage kind. 00028 // It can be Dense, Sparse, Triangular, Diagonal, SelfAdjoint, Band, etc. 00029 template<typename StorageKind> struct storage_kind_to_shape; 00030 00031 template<> struct storage_kind_to_shape<Dense> { typedef DenseShape Shape; }; 00032 template<> struct storage_kind_to_shape<SolverStorage> { typedef SolverShape Shape; }; 00033 template<> struct storage_kind_to_shape<PermutationStorage> { typedef PermutationShape Shape; }; 00034 template<> struct storage_kind_to_shape<TranspositionsStorage> { typedef TranspositionsShape Shape; }; 00035 00036 // Evaluators have to be specialized with respect to various criteria such as: 00037 // - storage/structure/shape 00038 // - scalar type 00039 // - etc. 00040 // Therefore, we need specialization of evaluator providing additional template arguments for each kind of evaluators. 00041 // We currently distinguish the following kind of evaluators: 00042 // - unary_evaluator for expressions taking only one arguments (CwiseUnaryOp, CwiseUnaryView, Transpose, MatrixWrapper, ArrayWrapper, Reverse, Replicate) 00043 // - binary_evaluator for expression taking two arguments (CwiseBinaryOp) 00044 // - ternary_evaluator for expression taking three arguments (CwiseTernaryOp) 00045 // - product_evaluator for linear algebra products (Product); special case of binary_evaluator because it requires additional tags for dispatching. 00046 // - mapbase_evaluator for Map, Block, Ref 00047 // - block_evaluator for Block (special dispatching to a mapbase_evaluator or unary_evaluator) 00048 00049 template< typename T, 00050 typename Arg1Kind = typename evaluator_traits<typename T::Arg1>::Kind, 00051 typename Arg2Kind = typename evaluator_traits<typename T::Arg2>::Kind, 00052 typename Arg3Kind = typename evaluator_traits<typename T::Arg3>::Kind, 00053 typename Arg1Scalar = typename traits<typename T::Arg1>::Scalar, 00054 typename Arg2Scalar = typename traits<typename T::Arg2>::Scalar, 00055 typename Arg3Scalar = typename traits<typename T::Arg3>::Scalar> struct ternary_evaluator; 00056 00057 template< typename T, 00058 typename LhsKind = typename evaluator_traits<typename T::Lhs>::Kind, 00059 typename RhsKind = typename evaluator_traits<typename T::Rhs>::Kind, 00060 typename LhsScalar = typename traits<typename T::Lhs>::Scalar, 00061 typename RhsScalar = typename traits<typename T::Rhs>::Scalar> struct binary_evaluator; 00062 00063 template< typename T, 00064 typename Kind = typename evaluator_traits<typename T::NestedExpression>::Kind, 00065 typename Scalar = typename T::Scalar> struct unary_evaluator; 00066 00067 // evaluator_traits<T> contains traits for evaluator<T> 00068 00069 template<typename T> 00070 struct evaluator_traits_base 00071 { 00072 // by default, get evaluator kind and shape from storage 00073 typedef typename storage_kind_to_evaluator_kind<typename traits<T>::StorageKind>::Kind Kind; 00074 typedef typename storage_kind_to_shape<typename traits<T>::StorageKind>::Shape Shape; 00075 }; 00076 00077 // Default evaluator traits 00078 template<typename T> 00079 struct evaluator_traits : public evaluator_traits_base<T> 00080 { 00081 }; 00082 00083 template<typename T, typename Shape = typename evaluator_traits<T>::Shape > 00084 struct evaluator_assume_aliasing { 00085 static const bool value = false; 00086 }; 00087 00088 // By default, we assume a unary expression: 00089 template<typename T> 00090 struct evaluator : public unary_evaluator<T> 00091 { 00092 typedef unary_evaluator<T> Base; 00093 EIGEN_DEVICE_FUNC explicit evaluator(const T& xpr) : Base(xpr) {} 00094 }; 00095 00096 00097 // TODO: Think about const-correctness 00098 template<typename T> 00099 struct evaluator<const T> 00100 : evaluator<T> 00101 { 00102 EIGEN_DEVICE_FUNC 00103 explicit evaluator(const T& xpr) : evaluator<T>(xpr) {} 00104 }; 00105 00106 // ---------- base class for all evaluators ---------- 00107 00108 template<typename ExpressionType> 00109 struct evaluator_base : public noncopyable 00110 { 00111 // TODO that's not very nice to have to propagate all these traits. They are currently only needed to handle outer,inner indices. 00112 typedef traits<ExpressionType> ExpressionTraits; 00113 00114 enum { 00115 Alignment = 0 00116 }; 00117 }; 00118 00119 // -------------------- Matrix and Array -------------------- 00120 // 00121 // evaluator<PlainObjectBase> is a common base class for the 00122 // Matrix and Array evaluators. 00123 // Here we directly specialize evaluator. This is not really a unary expression, and it is, by definition, dense, 00124 // so no need for more sophisticated dispatching. 00125 00126 template<typename Derived> 00127 struct evaluator<PlainObjectBase<Derived> > 00128 : evaluator_base<Derived> 00129 { 00130 typedef PlainObjectBase<Derived> PlainObjectType; 00131 typedef typename PlainObjectType::Scalar Scalar; 00132 typedef typename PlainObjectType::CoeffReturnType CoeffReturnType; 00133 00134 enum { 00135 IsRowMajor = PlainObjectType::IsRowMajor, 00136 IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime, 00137 RowsAtCompileTime = PlainObjectType::RowsAtCompileTime, 00138 ColsAtCompileTime = PlainObjectType::ColsAtCompileTime, 00139 00140 CoeffReadCost = NumTraits<Scalar>::ReadCost, 00141 Flags = traits<Derived>::EvaluatorFlags, 00142 Alignment = traits<Derived>::Alignment 00143 }; 00144 00145 EIGEN_DEVICE_FUNC evaluator() 00146 : m_data(0), 00147 m_outerStride(IsVectorAtCompileTime ? 0 00148 : int(IsRowMajor) ? ColsAtCompileTime 00149 : RowsAtCompileTime) 00150 { 00151 EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); 00152 } 00153 00154 EIGEN_DEVICE_FUNC explicit evaluator(const PlainObjectType& m) 00155 : m_data(m.data()), m_outerStride(IsVectorAtCompileTime ? 0 : m.outerStride()) 00156 { 00157 EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); 00158 } 00159 00160 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 00161 CoeffReturnType coeff(Index row, Index col) const 00162 { 00163 if (IsRowMajor) 00164 return m_data[row * m_outerStride.value() + col]; 00165 else 00166 return m_data[row + col * m_outerStride.value()]; 00167 } 00168 00169 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 00170 CoeffReturnType coeff(Index index) const 00171 { 00172 return m_data[index]; 00173 } 00174 00175 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 00176 Scalar& coeffRef(Index row, Index col) 00177 { 00178 if (IsRowMajor) 00179 return const_cast<Scalar*>(m_data)[row * m_outerStride.value() + col]; 00180 else 00181 return const_cast<Scalar*>(m_data)[row + col * m_outerStride.value()]; 00182 } 00183 00184 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 00185 Scalar& coeffRef(Index index) 00186 { 00187 return const_cast<Scalar*>(m_data)[index]; 00188 } 00189 00190 template<int LoadMode, typename PacketType> 00191 EIGEN_STRONG_INLINE 00192 PacketType packet(Index row, Index col) const 00193 { 00194 if (IsRowMajor) 00195 return ploadt<PacketType, LoadMode>(m_data + row * m_outerStride.value() + col); 00196 else 00197 return ploadt<PacketType, LoadMode>(m_data + row + col * m_outerStride.value()); 00198 } 00199 00200 template<int LoadMode, typename PacketType> 00201 EIGEN_STRONG_INLINE 00202 PacketType packet(Index index) const 00203 { 00204 return ploadt<PacketType, LoadMode>(m_data + index); 00205 } 00206 00207 template<int StoreMode,typename PacketType> 00208 EIGEN_STRONG_INLINE 00209 void writePacket(Index row, Index col, const PacketType& x) 00210 { 00211 if (IsRowMajor) 00212 return pstoret<Scalar, PacketType, StoreMode> 00213 (const_cast<Scalar*>(m_data) + row * m_outerStride.value() + col, x); 00214 else 00215 return pstoret<Scalar, PacketType, StoreMode> 00216 (const_cast<Scalar*>(m_data) + row + col * m_outerStride.value(), x); 00217 } 00218 00219 template<int StoreMode, typename PacketType> 00220 EIGEN_STRONG_INLINE 00221 void writePacket(Index index, const PacketType& x) 00222 { 00223 return pstoret<Scalar, PacketType, StoreMode>(const_cast<Scalar*>(m_data) + index, x); 00224 } 00225 00226 protected: 00227 const Scalar *m_data; 00228 00229 // We do not need to know the outer stride for vectors 00230 variable_if_dynamic<Index, IsVectorAtCompileTime ? 0 00231 : int(IsRowMajor) ? ColsAtCompileTime 00232 : RowsAtCompileTime> m_outerStride; 00233 }; 00234 00235 template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols> 00236 struct evaluator<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > 00237 : evaluator<PlainObjectBase<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > > 00238 { 00239 typedef Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> XprType; 00240 00241 EIGEN_DEVICE_FUNC evaluator() {} 00242 00243 EIGEN_DEVICE_FUNC explicit evaluator(const XprType& m) 00244 : evaluator<PlainObjectBase<XprType> >(m) 00245 { } 00246 }; 00247 00248 template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols> 00249 struct evaluator<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > 00250 : evaluator<PlainObjectBase<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > > 00251 { 00252 typedef Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> XprType; 00253 00254 EIGEN_DEVICE_FUNC evaluator() {} 00255 00256 EIGEN_DEVICE_FUNC explicit evaluator(const XprType& m) 00257 : evaluator<PlainObjectBase<XprType> >(m) 00258 { } 00259 }; 00260 00261 // -------------------- Transpose -------------------- 00262 00263 template<typename ArgType> 00264 struct unary_evaluator<Transpose<ArgType>, IndexBased> 00265 : evaluator_base<Transpose<ArgType> > 00266 { 00267 typedef Transpose<ArgType> XprType; 00268 00269 enum { 00270 CoeffReadCost = evaluator<ArgType>::CoeffReadCost, 00271 Flags = evaluator<ArgType>::Flags ^ RowMajorBit, 00272 Alignment = evaluator<ArgType>::Alignment 00273 }; 00274 00275 EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& t) : m_argImpl(t.nestedExpression()) {} 00276 00277 typedef typename XprType::Scalar Scalar; 00278 typedef typename XprType::CoeffReturnType CoeffReturnType; 00279 00280 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 00281 CoeffReturnType coeff(Index row, Index col) const 00282 { 00283 return m_argImpl.coeff(col, row); 00284 } 00285 00286 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 00287 CoeffReturnType coeff(Index index) const 00288 { 00289 return m_argImpl.coeff(index); 00290 } 00291 00292 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 00293 Scalar& coeffRef(Index row, Index col) 00294 { 00295 return m_argImpl.coeffRef(col, row); 00296 } 00297 00298 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 00299 typename XprType::Scalar& coeffRef(Index index) 00300 { 00301 return m_argImpl.coeffRef(index); 00302 } 00303 00304 template<int LoadMode, typename PacketType> 00305 EIGEN_STRONG_INLINE 00306 PacketType packet(Index row, Index col) const 00307 { 00308 return m_argImpl.template packet<LoadMode,PacketType>(col, row); 00309 } 00310 00311 template<int LoadMode, typename PacketType> 00312 EIGEN_STRONG_INLINE 00313 PacketType packet(Index index) const 00314 { 00315 return m_argImpl.template packet<LoadMode,PacketType>(index); 00316 } 00317 00318 template<int StoreMode, typename PacketType> 00319 EIGEN_STRONG_INLINE 00320 void writePacket(Index row, Index col, const PacketType& x) 00321 { 00322 m_argImpl.template writePacket<StoreMode,PacketType>(col, row, x); 00323 } 00324 00325 template<int StoreMode, typename PacketType> 00326 EIGEN_STRONG_INLINE 00327 void writePacket(Index index, const PacketType& x) 00328 { 00329 m_argImpl.template writePacket<StoreMode,PacketType>(index, x); 00330 } 00331 00332 protected: 00333 evaluator<ArgType> m_argImpl; 00334 }; 00335 00336 // -------------------- CwiseNullaryOp -------------------- 00337 // Like Matrix and Array, this is not really a unary expression, so we directly specialize evaluator. 00338 // Likewise, there is not need to more sophisticated dispatching here. 00339 00340 template<typename Scalar,typename NullaryOp, 00341 bool has_nullary = has_nullary_operator<NullaryOp>::value, 00342 bool has_unary = has_unary_operator<NullaryOp>::value, 00343 bool has_binary = has_binary_operator<NullaryOp>::value> 00344 struct nullary_wrapper 00345 { 00346 template <typename IndexType> 00347 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { return op(i,j); } 00348 template <typename IndexType> 00349 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); } 00350 00351 template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { return op.template packetOp<T>(i,j); } 00352 template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp<T>(i); } 00353 }; 00354 00355 template<typename Scalar,typename NullaryOp> 00356 struct nullary_wrapper<Scalar,NullaryOp,true,false,false> 00357 { 00358 template <typename IndexType> 00359 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType=0, IndexType=0) const { return op(); } 00360 template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType=0, IndexType=0) const { return op.template packetOp<T>(); } 00361 }; 00362 00363 template<typename Scalar,typename NullaryOp> 00364 struct nullary_wrapper<Scalar,NullaryOp,false,false,true> 00365 { 00366 template <typename IndexType> 00367 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j=0) const { return op(i,j); } 00368 template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j=0) const { return op.template packetOp<T>(i,j); } 00369 }; 00370 00371 // We need the following specialization for vector-only functors assigned to a runtime vector, 00372 // for instance, using linspace and assigning a RowVectorXd to a MatrixXd or even a row of a MatrixXd. 00373 // In this case, i==0 and j is used for the actual iteration. 00374 template<typename Scalar,typename NullaryOp> 00375 struct nullary_wrapper<Scalar,NullaryOp,false,true,false> 00376 { 00377 template <typename IndexType> 00378 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { 00379 eigen_assert(i==0 || j==0); 00380 return op(i+j); 00381 } 00382 template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { 00383 eigen_assert(i==0 || j==0); 00384 return op.template packetOp<T>(i+j); 00385 } 00386 00387 template <typename IndexType> 00388 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); } 00389 template <typename T, typename IndexType> 00390 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp<T>(i); } 00391 }; 00392 00393 template<typename Scalar,typename NullaryOp> 00394 struct nullary_wrapper<Scalar,NullaryOp,false,false,false> {}; 00395 00396 #if 0 && EIGEN_COMP_MSVC>0 00397 // Disable this ugly workaround. This is now handled in traits<Ref>::match, 00398 // but this piece of code might still become handly if some other weird compilation 00399 // erros pop up again. 00400 00401 // MSVC exhibits a weird compilation error when 00402 // compiling: 00403 // Eigen::MatrixXf A = MatrixXf::Random(3,3); 00404 // Ref<const MatrixXf> R = 2.f*A; 00405 // and that has_*ary_operator<scalar_constant_op<float>> have not been instantiated yet. 00406 // The "problem" is that evaluator<2.f*A> is instantiated by traits<Ref>::match<2.f*A> 00407 // and at that time has_*ary_operator<T> returns true regardless of T. 00408 // Then nullary_wrapper is badly instantiated as nullary_wrapper<.,.,true,true,true>. 00409 // The trick is thus to defer the proper instantiation of nullary_wrapper when coeff(), 00410 // and packet() are really instantiated as implemented below: 00411 00412 // This is a simple wrapper around Index to enforce the re-instantiation of 00413 // has_*ary_operator when needed. 00414 template<typename T> struct nullary_wrapper_workaround_msvc { 00415 nullary_wrapper_workaround_msvc(const T&); 00416 operator T()const; 00417 }; 00418 00419 template<typename Scalar,typename NullaryOp> 00420 struct nullary_wrapper<Scalar,NullaryOp,true,true,true> 00421 { 00422 template <typename IndexType> 00423 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { 00424 return nullary_wrapper<Scalar,NullaryOp, 00425 has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value, 00426 has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value, 00427 has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().operator()(op,i,j); 00428 } 00429 template <typename IndexType> 00430 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { 00431 return nullary_wrapper<Scalar,NullaryOp, 00432 has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value, 00433 has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value, 00434 has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().operator()(op,i); 00435 } 00436 00437 template <typename T, typename IndexType> 00438 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { 00439 return nullary_wrapper<Scalar,NullaryOp, 00440 has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value, 00441 has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value, 00442 has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().template packetOp<T>(op,i,j); 00443 } 00444 template <typename T, typename IndexType> 00445 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { 00446 return nullary_wrapper<Scalar,NullaryOp, 00447 has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value, 00448 has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value, 00449 has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().template packetOp<T>(op,i); 00450 } 00451 }; 00452 #endif // MSVC workaround 00453 00454 template<typename NullaryOp, typename PlainObjectType> 00455 struct evaluator<CwiseNullaryOp<NullaryOp,PlainObjectType> > 00456 : evaluator_base<CwiseNullaryOp<NullaryOp,PlainObjectType> > 00457 { 00458 typedef CwiseNullaryOp<NullaryOp,PlainObjectType> XprType; 00459 typedef typename internal::remove_all<PlainObjectType>::type PlainObjectTypeCleaned; 00460 00461 enum { 00462 CoeffReadCost = internal::functor_traits<NullaryOp>::Cost, 00463 00464 Flags = (evaluator<PlainObjectTypeCleaned>::Flags 00465 & ( HereditaryBits 00466 | (functor_has_linear_access<NullaryOp>::ret ? LinearAccessBit : 0) 00467 | (functor_traits<NullaryOp>::PacketAccess ? PacketAccessBit : 0))) 00468 | (functor_traits<NullaryOp>::IsRepeatable ? 0 : EvalBeforeNestingBit), 00469 Alignment = AlignedMax 00470 }; 00471 00472 EIGEN_DEVICE_FUNC explicit evaluator(const XprType& n) 00473 : m_functor(n.functor()), m_wrapper() 00474 { 00475 EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); 00476 } 00477 00478 typedef typename XprType::CoeffReturnType CoeffReturnType; 00479 00480 template <typename IndexType> 00481 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 00482 CoeffReturnType coeff(IndexType row, IndexType col) const 00483 { 00484 return m_wrapper(m_functor, row, col); 00485 } 00486 00487 template <typename IndexType> 00488 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 00489 CoeffReturnType coeff(IndexType index) const 00490 { 00491 return m_wrapper(m_functor,index); 00492 } 00493 00494 template<int LoadMode, typename PacketType, typename IndexType> 00495 EIGEN_STRONG_INLINE 00496 PacketType packet(IndexType row, IndexType col) const 00497 { 00498 return m_wrapper.template packetOp<PacketType>(m_functor, row, col); 00499 } 00500 00501 template<int LoadMode, typename PacketType, typename IndexType> 00502 EIGEN_STRONG_INLINE 00503 PacketType packet(IndexType index) const 00504 { 00505 return m_wrapper.template packetOp<PacketType>(m_functor, index); 00506 } 00507 00508 protected: 00509 const NullaryOp m_functor; 00510 const internal::nullary_wrapper<CoeffReturnType,NullaryOp> m_wrapper; 00511 }; 00512 00513 // -------------------- CwiseUnaryOp -------------------- 00514 00515 template<typename UnaryOp, typename ArgType> 00516 struct unary_evaluator<CwiseUnaryOp<UnaryOp, ArgType>, IndexBased > 00517 : evaluator_base<CwiseUnaryOp<UnaryOp, ArgType> > 00518 { 00519 typedef CwiseUnaryOp<UnaryOp, ArgType> XprType; 00520 00521 enum { 00522 CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<UnaryOp>::Cost, 00523 00524 Flags = evaluator<ArgType>::Flags 00525 & (HereditaryBits | LinearAccessBit | (functor_traits<UnaryOp>::PacketAccess ? PacketAccessBit : 0)), 00526 Alignment = evaluator<ArgType>::Alignment 00527 }; 00528 00529 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 00530 explicit unary_evaluator(const XprType& op) 00531 : m_functor(op.functor()), 00532 m_argImpl(op.nestedExpression()) 00533 { 00534 EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost); 00535 EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); 00536 } 00537 00538 typedef typename XprType::CoeffReturnType CoeffReturnType; 00539 00540 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 00541 CoeffReturnType coeff(Index row, Index col) const 00542 { 00543 return m_functor(m_argImpl.coeff(row, col)); 00544 } 00545 00546 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 00547 CoeffReturnType coeff(Index index) const 00548 { 00549 return m_functor(m_argImpl.coeff(index)); 00550 } 00551 00552 template<int LoadMode, typename PacketType> 00553 EIGEN_STRONG_INLINE 00554 PacketType packet(Index row, Index col) const 00555 { 00556 return m_functor.packetOp(m_argImpl.template packet<LoadMode, PacketType>(row, col)); 00557 } 00558 00559 template<int LoadMode, typename PacketType> 00560 EIGEN_STRONG_INLINE 00561 PacketType packet(Index index) const 00562 { 00563 return m_functor.packetOp(m_argImpl.template packet<LoadMode, PacketType>(index)); 00564 } 00565 00566 protected: 00567 const UnaryOp m_functor; 00568 evaluator<ArgType> m_argImpl; 00569 }; 00570 00571 // -------------------- CwiseTernaryOp -------------------- 00572 00573 // this is a ternary expression 00574 template<typename TernaryOp, typename Arg1, typename Arg2, typename Arg3> 00575 struct evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> > 00576 : public ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> > 00577 { 00578 typedef CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> XprType; 00579 typedef ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> > Base; 00580 00581 EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {} 00582 }; 00583 00584 template<typename TernaryOp, typename Arg1, typename Arg2, typename Arg3> 00585 struct ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3>, IndexBased, IndexBased> 00586 : evaluator_base<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> > 00587 { 00588 typedef CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> XprType; 00589 00590 enum { 00591 CoeffReadCost = evaluator<Arg1>::CoeffReadCost + evaluator<Arg2>::CoeffReadCost + evaluator<Arg3>::CoeffReadCost + functor_traits<TernaryOp>::Cost, 00592 00593 Arg1Flags = evaluator<Arg1>::Flags, 00594 Arg2Flags = evaluator<Arg2>::Flags, 00595 Arg3Flags = evaluator<Arg3>::Flags, 00596 SameType = is_same<typename Arg1::Scalar,typename Arg2::Scalar>::value && is_same<typename Arg1::Scalar,typename Arg3::Scalar>::value, 00597 StorageOrdersAgree = (int(Arg1Flags)&RowMajorBit)==(int(Arg2Flags)&RowMajorBit) && (int(Arg1Flags)&RowMajorBit)==(int(Arg3Flags)&RowMajorBit), 00598 Flags0 = (int(Arg1Flags) | int(Arg2Flags) | int(Arg3Flags)) & ( 00599 HereditaryBits 00600 | (int(Arg1Flags) & int(Arg2Flags) & int(Arg3Flags) & 00601 ( (StorageOrdersAgree ? LinearAccessBit : 0) 00602 | (functor_traits<TernaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0) 00603 ) 00604 ) 00605 ), 00606 Flags = (Flags0 & ~RowMajorBit) | (Arg1Flags & RowMajorBit), 00607 Alignment = EIGEN_PLAIN_ENUM_MIN( 00608 EIGEN_PLAIN_ENUM_MIN(evaluator<Arg1>::Alignment, evaluator<Arg2>::Alignment), 00609 evaluator<Arg3>::Alignment) 00610 }; 00611 00612 EIGEN_DEVICE_FUNC explicit ternary_evaluator(const XprType& xpr) 00613 : m_functor(xpr.functor()), 00614 m_arg1Impl(xpr.arg1()), 00615 m_arg2Impl(xpr.arg2()), 00616 m_arg3Impl(xpr.arg3()) 00617 { 00618 EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<TernaryOp>::Cost); 00619 EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); 00620 } 00621 00622 typedef typename XprType::CoeffReturnType CoeffReturnType; 00623 00624 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 00625 CoeffReturnType coeff(Index row, Index col) const 00626 { 00627 return m_functor(m_arg1Impl.coeff(row, col), m_arg2Impl.coeff(row, col), m_arg3Impl.coeff(row, col)); 00628 } 00629 00630 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 00631 CoeffReturnType coeff(Index index) const 00632 { 00633 return m_functor(m_arg1Impl.coeff(index), m_arg2Impl.coeff(index), m_arg3Impl.coeff(index)); 00634 } 00635 00636 template<int LoadMode, typename PacketType> 00637 EIGEN_STRONG_INLINE 00638 PacketType packet(Index row, Index col) const 00639 { 00640 return m_functor.packetOp(m_arg1Impl.template packet<LoadMode,PacketType>(row, col), 00641 m_arg2Impl.template packet<LoadMode,PacketType>(row, col), 00642 m_arg3Impl.template packet<LoadMode,PacketType>(row, col)); 00643 } 00644 00645 template<int LoadMode, typename PacketType> 00646 EIGEN_STRONG_INLINE 00647 PacketType packet(Index index) const 00648 { 00649 return m_functor.packetOp(m_arg1Impl.template packet<LoadMode,PacketType>(index), 00650 m_arg2Impl.template packet<LoadMode,PacketType>(index), 00651 m_arg3Impl.template packet<LoadMode,PacketType>(index)); 00652 } 00653 00654 protected: 00655 const TernaryOp m_functor; 00656 evaluator<Arg1> m_arg1Impl; 00657 evaluator<Arg2> m_arg2Impl; 00658 evaluator<Arg3> m_arg3Impl; 00659 }; 00660 00661 // -------------------- CwiseBinaryOp -------------------- 00662 00663 // this is a binary expression 00664 template<typename BinaryOp, typename Lhs, typename Rhs> 00665 struct evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> > 00666 : public binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> > 00667 { 00668 typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType; 00669 typedef binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> > Base; 00670 00671 EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {} 00672 }; 00673 00674 template<typename BinaryOp, typename Lhs, typename Rhs> 00675 struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IndexBased, IndexBased> 00676 : evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> > 00677 { 00678 typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType; 00679 00680 enum { 00681 CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost, 00682 00683 LhsFlags = evaluator<Lhs>::Flags, 00684 RhsFlags = evaluator<Rhs>::Flags, 00685 SameType = is_same<typename Lhs::Scalar,typename Rhs::Scalar>::value, 00686 StorageOrdersAgree = (int(LhsFlags)&RowMajorBit)==(int(RhsFlags)&RowMajorBit), 00687 Flags0 = (int(LhsFlags) | int(RhsFlags)) & ( 00688 HereditaryBits 00689 | (int(LhsFlags) & int(RhsFlags) & 00690 ( (StorageOrdersAgree ? LinearAccessBit : 0) 00691 | (functor_traits<BinaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0) 00692 ) 00693 ) 00694 ), 00695 Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit), 00696 Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<Lhs>::Alignment,evaluator<Rhs>::Alignment) 00697 }; 00698 00699 EIGEN_DEVICE_FUNC explicit binary_evaluator(const XprType& xpr) 00700 : m_functor(xpr.functor()), 00701 m_lhsImpl(xpr.lhs()), 00702 m_rhsImpl(xpr.rhs()) 00703 { 00704 EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost); 00705 EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); 00706 } 00707 00708 typedef typename XprType::CoeffReturnType CoeffReturnType; 00709 00710 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 00711 CoeffReturnType coeff(Index row, Index col) const 00712 { 00713 return m_functor(m_lhsImpl.coeff(row, col), m_rhsImpl.coeff(row, col)); 00714 } 00715 00716 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 00717 CoeffReturnType coeff(Index index) const 00718 { 00719 return m_functor(m_lhsImpl.coeff(index), m_rhsImpl.coeff(index)); 00720 } 00721 00722 template<int LoadMode, typename PacketType> 00723 EIGEN_STRONG_INLINE 00724 PacketType packet(Index row, Index col) const 00725 { 00726 return m_functor.packetOp(m_lhsImpl.template packet<LoadMode,PacketType>(row, col), 00727 m_rhsImpl.template packet<LoadMode,PacketType>(row, col)); 00728 } 00729 00730 template<int LoadMode, typename PacketType> 00731 EIGEN_STRONG_INLINE 00732 PacketType packet(Index index) const 00733 { 00734 return m_functor.packetOp(m_lhsImpl.template packet<LoadMode,PacketType>(index), 00735 m_rhsImpl.template packet<LoadMode,PacketType>(index)); 00736 } 00737 00738 protected: 00739 const BinaryOp m_functor; 00740 evaluator<Lhs> m_lhsImpl; 00741 evaluator<Rhs> m_rhsImpl; 00742 }; 00743 00744 // -------------------- CwiseUnaryView -------------------- 00745 00746 template<typename UnaryOp, typename ArgType> 00747 struct unary_evaluator<CwiseUnaryView<UnaryOp, ArgType>, IndexBased> 00748 : evaluator_base<CwiseUnaryView<UnaryOp, ArgType> > 00749 { 00750 typedef CwiseUnaryView<UnaryOp, ArgType> XprType; 00751 00752 enum { 00753 CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<UnaryOp>::Cost, 00754 00755 Flags = (evaluator<ArgType>::Flags & (HereditaryBits | LinearAccessBit | DirectAccessBit)), 00756 00757 Alignment = 0 // FIXME it is not very clear why alignment is necessarily lost... 00758 }; 00759 00760 EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& op) 00761 : m_unaryOp(op.functor()), 00762 m_argImpl(op.nestedExpression()) 00763 { 00764 EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost); 00765 EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); 00766 } 00767 00768 typedef typename XprType::Scalar Scalar; 00769 typedef typename XprType::CoeffReturnType CoeffReturnType; 00770 00771 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 00772 CoeffReturnType coeff(Index row, Index col) const 00773 { 00774 return m_unaryOp(m_argImpl.coeff(row, col)); 00775 } 00776 00777 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 00778 CoeffReturnType coeff(Index index) const 00779 { 00780 return m_unaryOp(m_argImpl.coeff(index)); 00781 } 00782 00783 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 00784 Scalar& coeffRef(Index row, Index col) 00785 { 00786 return m_unaryOp(m_argImpl.coeffRef(row, col)); 00787 } 00788 00789 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 00790 Scalar& coeffRef(Index index) 00791 { 00792 return m_unaryOp(m_argImpl.coeffRef(index)); 00793 } 00794 00795 protected: 00796 const UnaryOp m_unaryOp; 00797 evaluator<ArgType> m_argImpl; 00798 }; 00799 00800 // -------------------- Map -------------------- 00801 00802 // FIXME perhaps the PlainObjectType could be provided by Derived::PlainObject ? 00803 // but that might complicate template specialization 00804 template<typename Derived, typename PlainObjectType> 00805 struct mapbase_evaluator; 00806 00807 template<typename Derived, typename PlainObjectType> 00808 struct mapbase_evaluator : evaluator_base<Derived> 00809 { 00810 typedef Derived XprType; 00811 typedef typename XprType::PointerType PointerType; 00812 typedef typename XprType::Scalar Scalar; 00813 typedef typename XprType::CoeffReturnType CoeffReturnType; 00814 00815 enum { 00816 IsRowMajor = XprType::RowsAtCompileTime, 00817 ColsAtCompileTime = XprType::ColsAtCompileTime, 00818 CoeffReadCost = NumTraits<Scalar>::ReadCost 00819 }; 00820 00821 EIGEN_DEVICE_FUNC explicit mapbase_evaluator(const XprType& map) 00822 : m_data(const_cast<PointerType>(map.data())), 00823 m_innerStride(map.innerStride()), 00824 m_outerStride(map.outerStride()) 00825 { 00826 EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(evaluator<Derived>::Flags&PacketAccessBit, internal::inner_stride_at_compile_time<Derived>::ret==1), 00827 PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1); 00828 EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); 00829 } 00830 00831 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 00832 CoeffReturnType coeff(Index row, Index col) const 00833 { 00834 return m_data[col * colStride() + row * rowStride()]; 00835 } 00836 00837 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 00838 CoeffReturnType coeff(Index index) const 00839 { 00840 return m_data[index * m_innerStride.value()]; 00841 } 00842 00843 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 00844 Scalar& coeffRef(Index row, Index col) 00845 { 00846 return m_data[col * colStride() + row * rowStride()]; 00847 } 00848 00849 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 00850 Scalar& coeffRef(Index index) 00851 { 00852 return m_data[index * m_innerStride.value()]; 00853 } 00854 00855 template<int LoadMode, typename PacketType> 00856 EIGEN_STRONG_INLINE 00857 PacketType packet(Index row, Index col) const 00858 { 00859 PointerType ptr = m_data + row * rowStride() + col * colStride(); 00860 return internal::ploadt<PacketType, LoadMode>(ptr); 00861 } 00862 00863 template<int LoadMode, typename PacketType> 00864 EIGEN_STRONG_INLINE 00865 PacketType packet(Index index) const 00866 { 00867 return internal::ploadt<PacketType, LoadMode>(m_data + index * m_innerStride.value()); 00868 } 00869 00870 template<int StoreMode, typename PacketType> 00871 EIGEN_STRONG_INLINE 00872 void writePacket(Index row, Index col, const PacketType& x) 00873 { 00874 PointerType ptr = m_data + row * rowStride() + col * colStride(); 00875 return internal::pstoret<Scalar, PacketType, StoreMode>(ptr, x); 00876 } 00877 00878 template<int StoreMode, typename PacketType> 00879 EIGEN_STRONG_INLINE 00880 void writePacket(Index index, const PacketType& x) 00881 { 00882 internal::pstoret<Scalar, PacketType, StoreMode>(m_data + index * m_innerStride.value(), x); 00883 } 00884 protected: 00885 EIGEN_DEVICE_FUNC 00886 inline Index rowStride() const { return XprType::IsRowMajor ? m_outerStride.value() : m_innerStride.value(); } 00887 EIGEN_DEVICE_FUNC 00888 inline Index colStride() const { return XprType::IsRowMajor ? m_innerStride.value() : m_outerStride.value(); } 00889 00890 PointerType m_data; 00891 const internal::variable_if_dynamic<Index, XprType::InnerStrideAtCompileTime> m_innerStride; 00892 const internal::variable_if_dynamic<Index, XprType::OuterStrideAtCompileTime> m_outerStride; 00893 }; 00894 00895 template<typename PlainObjectType, int MapOptions, typename StrideType> 00896 struct evaluator<Map<PlainObjectType, MapOptions, StrideType> > 00897 : public mapbase_evaluator<Map<PlainObjectType, MapOptions, StrideType>, PlainObjectType> 00898 { 00899 typedef Map<PlainObjectType, MapOptions, StrideType> XprType; 00900 typedef typename XprType::Scalar Scalar; 00901 // TODO: should check for smaller packet types once we can handle multi-sized packet types 00902 typedef typename packet_traits<Scalar>::type PacketScalar; 00903 00904 enum { 00905 InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0 00906 ? int(PlainObjectType::InnerStrideAtCompileTime) 00907 : int(StrideType::InnerStrideAtCompileTime), 00908 OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0 00909 ? int(PlainObjectType::OuterStrideAtCompileTime) 00910 : int(StrideType::OuterStrideAtCompileTime), 00911 HasNoInnerStride = InnerStrideAtCompileTime == 1, 00912 HasNoOuterStride = StrideType::OuterStrideAtCompileTime == 0, 00913 HasNoStride = HasNoInnerStride && HasNoOuterStride, 00914 IsDynamicSize = PlainObjectType::SizeAtCompileTime==Dynamic, 00915 00916 PacketAccessMask = bool(HasNoInnerStride) ? ~int(0) : ~int(PacketAccessBit), 00917 LinearAccessMask = bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime) ? ~int(0) : ~int(LinearAccessBit), 00918 Flags = int( evaluator<PlainObjectType>::Flags) & (LinearAccessMask&PacketAccessMask), 00919 00920 Alignment = int(MapOptions)&int(AlignedMask) 00921 }; 00922 00923 EIGEN_DEVICE_FUNC explicit evaluator(const XprType& map) 00924 : mapbase_evaluator<XprType, PlainObjectType>(map) 00925 { } 00926 }; 00927 00928 // -------------------- Ref -------------------- 00929 00930 template<typename PlainObjectType, int RefOptions, typename StrideType> 00931 struct evaluator<Ref<PlainObjectType, RefOptions, StrideType> > 00932 : public mapbase_evaluator<Ref<PlainObjectType, RefOptions, StrideType>, PlainObjectType> 00933 { 00934 typedef Ref<PlainObjectType, RefOptions, StrideType> XprType; 00935 00936 enum { 00937 Flags = evaluator<Map<PlainObjectType, RefOptions, StrideType> >::Flags, 00938 Alignment = evaluator<Map<PlainObjectType, RefOptions, StrideType> >::Alignment 00939 }; 00940 00941 EIGEN_DEVICE_FUNC explicit evaluator(const XprType& ref) 00942 : mapbase_evaluator<XprType, PlainObjectType>(ref) 00943 { } 00944 }; 00945 00946 // -------------------- Block -------------------- 00947 00948 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel, 00949 bool HasDirectAccess = internal::has_direct_access<ArgType>::ret> struct block_evaluator; 00950 00951 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel> 00952 struct evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> > 00953 : block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel> 00954 { 00955 typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType; 00956 typedef typename XprType::Scalar Scalar; 00957 // TODO: should check for smaller packet types once we can handle multi-sized packet types 00958 typedef typename packet_traits<Scalar>::type PacketScalar; 00959 00960 enum { 00961 CoeffReadCost = evaluator<ArgType>::CoeffReadCost, 00962 00963 RowsAtCompileTime = traits<XprType>::RowsAtCompileTime, 00964 ColsAtCompileTime = traits<XprType>::ColsAtCompileTime, 00965 MaxRowsAtCompileTime = traits<XprType>::MaxRowsAtCompileTime, 00966 MaxColsAtCompileTime = traits<XprType>::MaxColsAtCompileTime, 00967 00968 ArgTypeIsRowMajor = (int(evaluator<ArgType>::Flags)&RowMajorBit) != 0, 00969 IsRowMajor = (MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1) ? 1 00970 : (MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1) ? 0 00971 : ArgTypeIsRowMajor, 00972 HasSameStorageOrderAsArgType = (IsRowMajor == ArgTypeIsRowMajor), 00973 InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime), 00974 InnerStrideAtCompileTime = HasSameStorageOrderAsArgType 00975 ? int(inner_stride_at_compile_time<ArgType>::ret) 00976 : int(outer_stride_at_compile_time<ArgType>::ret), 00977 OuterStrideAtCompileTime = HasSameStorageOrderAsArgType 00978 ? int(outer_stride_at_compile_time<ArgType>::ret) 00979 : int(inner_stride_at_compile_time<ArgType>::ret), 00980 MaskPacketAccessBit = (InnerStrideAtCompileTime == 1) ? PacketAccessBit : 0, 00981 00982 FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || (InnerPanel && (evaluator<ArgType>::Flags&LinearAccessBit))) ? LinearAccessBit : 0, 00983 FlagsRowMajorBit = XprType::Flags&RowMajorBit, 00984 Flags0 = evaluator<ArgType>::Flags & ( (HereditaryBits & ~RowMajorBit) | 00985 DirectAccessBit | 00986 MaskPacketAccessBit), 00987 Flags = Flags0 | FlagsLinearAccessBit | FlagsRowMajorBit, 00988 00989 PacketAlignment = unpacket_traits<PacketScalar>::alignment, 00990 Alignment0 = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % int(PacketAlignment)) == 0)) ? int(PacketAlignment) : 0, 00991 Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ArgType>::Alignment, Alignment0) 00992 }; 00993 typedef block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel> block_evaluator_type; 00994 EIGEN_DEVICE_FUNC explicit evaluator(const XprType& block) : block_evaluator_type(block) 00995 { 00996 EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); 00997 } 00998 }; 00999 01000 // no direct-access => dispatch to a unary evaluator 01001 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel> 01002 struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /*HasDirectAccess*/ false> 01003 : unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> > 01004 { 01005 typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType; 01006 01007 EIGEN_DEVICE_FUNC explicit block_evaluator(const XprType& block) 01008 : unary_evaluator<XprType>(block) 01009 {} 01010 }; 01011 01012 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel> 01013 struct unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>, IndexBased> 01014 : evaluator_base<Block<ArgType, BlockRows, BlockCols, InnerPanel> > 01015 { 01016 typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType; 01017 01018 EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& block) 01019 : m_argImpl(block.nestedExpression()), 01020 m_startRow(block.startRow()), 01021 m_startCol(block.startCol()) 01022 { } 01023 01024 typedef typename XprType::Scalar Scalar; 01025 typedef typename XprType::CoeffReturnType CoeffReturnType; 01026 01027 enum { 01028 RowsAtCompileTime = XprType::RowsAtCompileTime 01029 }; 01030 01031 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 01032 CoeffReturnType coeff(Index row, Index col) const 01033 { 01034 return m_argImpl.coeff(m_startRow.value() + row, m_startCol.value() + col); 01035 } 01036 01037 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 01038 CoeffReturnType coeff(Index index) const 01039 { 01040 return coeff(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0); 01041 } 01042 01043 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 01044 Scalar& coeffRef(Index row, Index col) 01045 { 01046 return m_argImpl.coeffRef(m_startRow.value() + row, m_startCol.value() + col); 01047 } 01048 01049 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 01050 Scalar& coeffRef(Index index) 01051 { 01052 return coeffRef(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0); 01053 } 01054 01055 template<int LoadMode, typename PacketType> 01056 EIGEN_STRONG_INLINE 01057 PacketType packet(Index row, Index col) const 01058 { 01059 return m_argImpl.template packet<LoadMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col); 01060 } 01061 01062 template<int LoadMode, typename PacketType> 01063 EIGEN_STRONG_INLINE 01064 PacketType packet(Index index) const 01065 { 01066 return packet<LoadMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index, 01067 RowsAtCompileTime == 1 ? index : 0); 01068 } 01069 01070 template<int StoreMode, typename PacketType> 01071 EIGEN_STRONG_INLINE 01072 void writePacket(Index row, Index col, const PacketType& x) 01073 { 01074 return m_argImpl.template writePacket<StoreMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col, x); 01075 } 01076 01077 template<int StoreMode, typename PacketType> 01078 EIGEN_STRONG_INLINE 01079 void writePacket(Index index, const PacketType& x) 01080 { 01081 return writePacket<StoreMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index, 01082 RowsAtCompileTime == 1 ? index : 0, 01083 x); 01084 } 01085 01086 protected: 01087 evaluator<ArgType> m_argImpl; 01088 const variable_if_dynamic<Index, (ArgType::RowsAtCompileTime == 1 && BlockRows==1) ? 0 : Dynamic> m_startRow; 01089 const variable_if_dynamic<Index, (ArgType::ColsAtCompileTime == 1 && BlockCols==1) ? 0 : Dynamic> m_startCol; 01090 }; 01091 01092 // TODO: This evaluator does not actually use the child evaluator; 01093 // all action is via the data() as returned by the Block expression. 01094 01095 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel> 01096 struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /* HasDirectAccess */ true> 01097 : mapbase_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>, 01098 typename Block<ArgType, BlockRows, BlockCols, InnerPanel>::PlainObject> 01099 { 01100 typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType; 01101 typedef typename XprType::Scalar Scalar; 01102 01103 EIGEN_DEVICE_FUNC explicit block_evaluator(const XprType& block) 01104 : mapbase_evaluator<XprType, typename XprType::PlainObject>(block) 01105 { 01106 // TODO: for the 3.3 release, this should be turned to an internal assertion, but let's keep it as is for the beta lifetime 01107 eigen_assert(((internal::UIntPtr(block.data()) % EIGEN_PLAIN_ENUM_MAX(1,evaluator<XprType>::Alignment)) == 0) && "data is not aligned"); 01108 } 01109 }; 01110 01111 01112 // -------------------- Select -------------------- 01113 // NOTE shall we introduce a ternary_evaluator? 01114 01115 // TODO enable vectorization for Select 01116 template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType> 01117 struct evaluator<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> > 01118 : evaluator_base<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> > 01119 { 01120 typedef Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> XprType; 01121 enum { 01122 CoeffReadCost = evaluator<ConditionMatrixType>::CoeffReadCost 01123 + EIGEN_PLAIN_ENUM_MAX(evaluator<ThenMatrixType>::CoeffReadCost, 01124 evaluator<ElseMatrixType>::CoeffReadCost), 01125 01126 Flags = (unsigned int)evaluator<ThenMatrixType>::Flags & evaluator<ElseMatrixType>::Flags & HereditaryBits, 01127 01128 Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ThenMatrixType>::Alignment, evaluator<ElseMatrixType>::Alignment) 01129 }; 01130 01131 EIGEN_DEVICE_FUNC explicit evaluator(const XprType& select) 01132 : m_conditionImpl(select.conditionMatrix()), 01133 m_thenImpl(select.thenMatrix()), 01134 m_elseImpl(select.elseMatrix()) 01135 { 01136 EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); 01137 } 01138 01139 typedef typename XprType::CoeffReturnType CoeffReturnType; 01140 01141 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 01142 CoeffReturnType coeff(Index row, Index col) const 01143 { 01144 if (m_conditionImpl.coeff(row, col)) 01145 return m_thenImpl.coeff(row, col); 01146 else 01147 return m_elseImpl.coeff(row, col); 01148 } 01149 01150 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 01151 CoeffReturnType coeff(Index index) const 01152 { 01153 if (m_conditionImpl.coeff(index)) 01154 return m_thenImpl.coeff(index); 01155 else 01156 return m_elseImpl.coeff(index); 01157 } 01158 01159 protected: 01160 evaluator<ConditionMatrixType> m_conditionImpl; 01161 evaluator<ThenMatrixType> m_thenImpl; 01162 evaluator<ElseMatrixType> m_elseImpl; 01163 }; 01164 01165 01166 // -------------------- Replicate -------------------- 01167 01168 template<typename ArgType, int RowFactor, int ColFactor> 01169 struct unary_evaluator<Replicate<ArgType, RowFactor, ColFactor> > 01170 : evaluator_base<Replicate<ArgType, RowFactor, ColFactor> > 01171 { 01172 typedef Replicate<ArgType, RowFactor, ColFactor> XprType; 01173 typedef typename XprType::CoeffReturnType CoeffReturnType; 01174 enum { 01175 Factor = (RowFactor==Dynamic || ColFactor==Dynamic) ? Dynamic : RowFactor*ColFactor 01176 }; 01177 typedef typename internal::nested_eval<ArgType,Factor>::type ArgTypeNested; 01178 typedef typename internal::remove_all<ArgTypeNested>::type ArgTypeNestedCleaned; 01179 01180 enum { 01181 CoeffReadCost = evaluator<ArgTypeNestedCleaned>::CoeffReadCost, 01182 LinearAccessMask = XprType::IsVectorAtCompileTime ? LinearAccessBit : 0, 01183 Flags = (evaluator<ArgTypeNestedCleaned>::Flags & (HereditaryBits|LinearAccessMask) & ~RowMajorBit) | (traits<XprType>::Flags & RowMajorBit), 01184 01185 Alignment = evaluator<ArgTypeNestedCleaned>::Alignment 01186 }; 01187 01188 EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& replicate) 01189 : m_arg(replicate.nestedExpression()), 01190 m_argImpl(m_arg), 01191 m_rows(replicate.nestedExpression().rows()), 01192 m_cols(replicate.nestedExpression().cols()) 01193 {} 01194 01195 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 01196 CoeffReturnType coeff(Index row, Index col) const 01197 { 01198 // try to avoid using modulo; this is a pure optimization strategy 01199 const Index actual_row = internal::traits<XprType>::RowsAtCompileTime==1 ? 0 01200 : RowFactor==1 ? row 01201 : row % m_rows.value(); 01202 const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0 01203 : ColFactor==1 ? col 01204 : col % m_cols.value(); 01205 01206 return m_argImpl.coeff(actual_row, actual_col); 01207 } 01208 01209 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 01210 CoeffReturnType coeff(Index index) const 01211 { 01212 // try to avoid using modulo; this is a pure optimization strategy 01213 const Index actual_index = internal::traits<XprType>::RowsAtCompileTime==1 01214 ? (ColFactor==1 ? index : index%m_cols.value()) 01215 : (RowFactor==1 ? index : index%m_rows.value()); 01216 01217 return m_argImpl.coeff(actual_index); 01218 } 01219 01220 template<int LoadMode, typename PacketType> 01221 EIGEN_STRONG_INLINE 01222 PacketType packet(Index row, Index col) const 01223 { 01224 const Index actual_row = internal::traits<XprType>::RowsAtCompileTime==1 ? 0 01225 : RowFactor==1 ? row 01226 : row % m_rows.value(); 01227 const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0 01228 : ColFactor==1 ? col 01229 : col % m_cols.value(); 01230 01231 return m_argImpl.template packet<LoadMode,PacketType>(actual_row, actual_col); 01232 } 01233 01234 template<int LoadMode, typename PacketType> 01235 EIGEN_STRONG_INLINE 01236 PacketType packet(Index index) const 01237 { 01238 const Index actual_index = internal::traits<XprType>::RowsAtCompileTime==1 01239 ? (ColFactor==1 ? index : index%m_cols.value()) 01240 : (RowFactor==1 ? index : index%m_rows.value()); 01241 01242 return m_argImpl.template packet<LoadMode,PacketType>(actual_index); 01243 } 01244 01245 protected: 01246 const ArgTypeNested m_arg; 01247 evaluator<ArgTypeNestedCleaned> m_argImpl; 01248 const variable_if_dynamic<Index, ArgType::RowsAtCompileTime> m_rows; 01249 const variable_if_dynamic<Index, ArgType::ColsAtCompileTime> m_cols; 01250 }; 01251 01252 01253 // -------------------- PartialReduxExpr -------------------- 01254 01255 template< typename ArgType, typename MemberOp, int Direction> 01256 struct evaluator<PartialReduxExpr<ArgType, MemberOp, Direction> > 01257 : evaluator_base<PartialReduxExpr<ArgType, MemberOp, Direction> > 01258 { 01259 typedef PartialReduxExpr<ArgType, MemberOp, Direction> XprType; 01260 typedef typename internal::nested_eval<ArgType,1>::type ArgTypeNested; 01261 typedef typename internal::remove_all<ArgTypeNested>::type ArgTypeNestedCleaned; 01262 typedef typename ArgType::Scalar InputScalar; 01263 typedef typename XprType::Scalar Scalar; 01264 enum { 01265 TraversalSize = Direction==int(Vertical) ? int(ArgType::RowsAtCompileTime) : int(ArgType::ColsAtCompileTime) 01266 }; 01267 typedef typename MemberOp::template Cost<InputScalar,int(TraversalSize)> CostOpType; 01268 enum { 01269 CoeffReadCost = TraversalSize==Dynamic ? HugeCost 01270 : TraversalSize * evaluator<ArgType>::CoeffReadCost + int(CostOpType::value), 01271 01272 Flags = (traits<XprType>::Flags&RowMajorBit) | (evaluator<ArgType>::Flags&(HereditaryBits&(~RowMajorBit))) | LinearAccessBit, 01273 01274 Alignment = 0 // FIXME this will need to be improved once PartialReduxExpr is vectorized 01275 }; 01276 01277 EIGEN_DEVICE_FUNC explicit evaluator(const XprType xpr) 01278 : m_arg(xpr.nestedExpression()), m_functor(xpr.functor()) 01279 { 01280 EIGEN_INTERNAL_CHECK_COST_VALUE(TraversalSize==Dynamic ? HugeCost : int(CostOpType::value)); 01281 EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); 01282 } 01283 01284 typedef typename XprType::CoeffReturnType CoeffReturnType; 01285 01286 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 01287 const Scalar coeff(Index i, Index j) const 01288 { 01289 if (Direction==Vertical) 01290 return m_functor(m_arg.col(j)); 01291 else 01292 return m_functor(m_arg.row(i)); 01293 } 01294 01295 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 01296 const Scalar coeff(Index index) const 01297 { 01298 if (Direction==Vertical) 01299 return m_functor(m_arg.col(index)); 01300 else 01301 return m_functor(m_arg.row(index)); 01302 } 01303 01304 protected: 01305 typename internal::add_const_on_value_type<ArgTypeNested>::type m_arg; 01306 const MemberOp m_functor; 01307 }; 01308 01309 01310 // -------------------- MatrixWrapper and ArrayWrapper -------------------- 01311 // 01312 // evaluator_wrapper_base<T> is a common base class for the 01313 // MatrixWrapper and ArrayWrapper evaluators. 01314 01315 template<typename XprType> 01316 struct evaluator_wrapper_base 01317 : evaluator_base<XprType> 01318 { 01319 typedef typename remove_all<typename XprType::NestedExpressionType>::type ArgType; 01320 enum { 01321 CoeffReadCost = evaluator<ArgType>::CoeffReadCost, 01322 Flags = evaluator<ArgType>::Flags, 01323 Alignment = evaluator<ArgType>::Alignment 01324 }; 01325 01326 EIGEN_DEVICE_FUNC explicit evaluator_wrapper_base(const ArgType& arg) : m_argImpl(arg) {} 01327 01328 typedef typename ArgType::Scalar Scalar; 01329 typedef typename ArgType::CoeffReturnType CoeffReturnType; 01330 01331 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 01332 CoeffReturnType coeff(Index row, Index col) const 01333 { 01334 return m_argImpl.coeff(row, col); 01335 } 01336 01337 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 01338 CoeffReturnType coeff(Index index) const 01339 { 01340 return m_argImpl.coeff(index); 01341 } 01342 01343 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 01344 Scalar& coeffRef(Index row, Index col) 01345 { 01346 return m_argImpl.coeffRef(row, col); 01347 } 01348 01349 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 01350 Scalar& coeffRef(Index index) 01351 { 01352 return m_argImpl.coeffRef(index); 01353 } 01354 01355 template<int LoadMode, typename PacketType> 01356 EIGEN_STRONG_INLINE 01357 PacketType packet(Index row, Index col) const 01358 { 01359 return m_argImpl.template packet<LoadMode,PacketType>(row, col); 01360 } 01361 01362 template<int LoadMode, typename PacketType> 01363 EIGEN_STRONG_INLINE 01364 PacketType packet(Index index) const 01365 { 01366 return m_argImpl.template packet<LoadMode,PacketType>(index); 01367 } 01368 01369 template<int StoreMode, typename PacketType> 01370 EIGEN_STRONG_INLINE 01371 void writePacket(Index row, Index col, const PacketType& x) 01372 { 01373 m_argImpl.template writePacket<StoreMode>(row, col, x); 01374 } 01375 01376 template<int StoreMode, typename PacketType> 01377 EIGEN_STRONG_INLINE 01378 void writePacket(Index index, const PacketType& x) 01379 { 01380 m_argImpl.template writePacket<StoreMode>(index, x); 01381 } 01382 01383 protected: 01384 evaluator<ArgType> m_argImpl; 01385 }; 01386 01387 template<typename TArgType> 01388 struct unary_evaluator<MatrixWrapper<TArgType> > 01389 : evaluator_wrapper_base<MatrixWrapper<TArgType> > 01390 { 01391 typedef MatrixWrapper<TArgType> XprType; 01392 01393 EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& wrapper) 01394 : evaluator_wrapper_base<MatrixWrapper<TArgType> >(wrapper.nestedExpression()) 01395 { } 01396 }; 01397 01398 template<typename TArgType> 01399 struct unary_evaluator<ArrayWrapper<TArgType> > 01400 : evaluator_wrapper_base<ArrayWrapper<TArgType> > 01401 { 01402 typedef ArrayWrapper<TArgType> XprType; 01403 01404 EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& wrapper) 01405 : evaluator_wrapper_base<ArrayWrapper<TArgType> >(wrapper.nestedExpression()) 01406 { } 01407 }; 01408 01409 01410 // -------------------- Reverse -------------------- 01411 01412 // defined in Reverse.h: 01413 template<typename PacketType, bool ReversePacket> struct reverse_packet_cond; 01414 01415 template<typename ArgType, int Direction> 01416 struct unary_evaluator<Reverse<ArgType, Direction> > 01417 : evaluator_base<Reverse<ArgType, Direction> > 01418 { 01419 typedef Reverse<ArgType, Direction> XprType; 01420 typedef typename XprType::Scalar Scalar; 01421 typedef typename XprType::CoeffReturnType CoeffReturnType; 01422 01423 enum { 01424 IsRowMajor = XprType::IsRowMajor, 01425 IsColMajor = !IsRowMajor, 01426 ReverseRow = (Direction == Vertical) || (Direction == BothDirections), 01427 ReverseCol = (Direction == Horizontal) || (Direction == BothDirections), 01428 ReversePacket = (Direction == BothDirections) 01429 || ((Direction == Vertical) && IsColMajor) 01430 || ((Direction == Horizontal) && IsRowMajor), 01431 01432 CoeffReadCost = evaluator<ArgType>::CoeffReadCost, 01433 01434 // let's enable LinearAccess only with vectorization because of the product overhead 01435 // FIXME enable DirectAccess with negative strides? 01436 Flags0 = evaluator<ArgType>::Flags, 01437 LinearAccess = ( (Direction==BothDirections) && (int(Flags0)&PacketAccessBit) ) 01438 || ((ReverseRow && XprType::ColsAtCompileTime==1) || (ReverseCol && XprType::RowsAtCompileTime==1)) 01439 ? LinearAccessBit : 0, 01440 01441 Flags = int(Flags0) & (HereditaryBits | PacketAccessBit | LinearAccess), 01442 01443 Alignment = 0 // FIXME in some rare cases, Alignment could be preserved, like a Vector4f. 01444 }; 01445 01446 EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& reverse) 01447 : m_argImpl(reverse.nestedExpression()), 01448 m_rows(ReverseRow ? reverse.nestedExpression().rows() : 1), 01449 m_cols(ReverseCol ? reverse.nestedExpression().cols() : 1) 01450 { } 01451 01452 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 01453 CoeffReturnType coeff(Index row, Index col) const 01454 { 01455 return m_argImpl.coeff(ReverseRow ? m_rows.value() - row - 1 : row, 01456 ReverseCol ? m_cols.value() - col - 1 : col); 01457 } 01458 01459 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 01460 CoeffReturnType coeff(Index index) const 01461 { 01462 return m_argImpl.coeff(m_rows.value() * m_cols.value() - index - 1); 01463 } 01464 01465 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 01466 Scalar& coeffRef(Index row, Index col) 01467 { 01468 return m_argImpl.coeffRef(ReverseRow ? m_rows.value() - row - 1 : row, 01469 ReverseCol ? m_cols.value() - col - 1 : col); 01470 } 01471 01472 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 01473 Scalar& coeffRef(Index index) 01474 { 01475 return m_argImpl.coeffRef(m_rows.value() * m_cols.value() - index - 1); 01476 } 01477 01478 template<int LoadMode, typename PacketType> 01479 EIGEN_STRONG_INLINE 01480 PacketType packet(Index row, Index col) const 01481 { 01482 enum { 01483 PacketSize = unpacket_traits<PacketType>::size, 01484 OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1, 01485 OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1 01486 }; 01487 typedef internal::reverse_packet_cond<PacketType,ReversePacket> reverse_packet; 01488 return reverse_packet::run(m_argImpl.template packet<LoadMode,PacketType>( 01489 ReverseRow ? m_rows.value() - row - OffsetRow : row, 01490 ReverseCol ? m_cols.value() - col - OffsetCol : col)); 01491 } 01492 01493 template<int LoadMode, typename PacketType> 01494 EIGEN_STRONG_INLINE 01495 PacketType packet(Index index) const 01496 { 01497 enum { PacketSize = unpacket_traits<PacketType>::size }; 01498 return preverse(m_argImpl.template packet<LoadMode,PacketType>(m_rows.value() * m_cols.value() - index - PacketSize)); 01499 } 01500 01501 template<int LoadMode, typename PacketType> 01502 EIGEN_STRONG_INLINE 01503 void writePacket(Index row, Index col, const PacketType& x) 01504 { 01505 // FIXME we could factorize some code with packet(i,j) 01506 enum { 01507 PacketSize = unpacket_traits<PacketType>::size, 01508 OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1, 01509 OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1 01510 }; 01511 typedef internal::reverse_packet_cond<PacketType,ReversePacket> reverse_packet; 01512 m_argImpl.template writePacket<LoadMode>( 01513 ReverseRow ? m_rows.value() - row - OffsetRow : row, 01514 ReverseCol ? m_cols.value() - col - OffsetCol : col, 01515 reverse_packet::run(x)); 01516 } 01517 01518 template<int LoadMode, typename PacketType> 01519 EIGEN_STRONG_INLINE 01520 void writePacket(Index index, const PacketType& x) 01521 { 01522 enum { PacketSize = unpacket_traits<PacketType>::size }; 01523 m_argImpl.template writePacket<LoadMode> 01524 (m_rows.value() * m_cols.value() - index - PacketSize, preverse(x)); 01525 } 01526 01527 protected: 01528 evaluator<ArgType> m_argImpl; 01529 01530 // If we do not reverse rows, then we do not need to know the number of rows; same for columns 01531 // Nonetheless, in this case it is important to set to 1 such that the coeff(index) method works fine for vectors. 01532 const variable_if_dynamic<Index, ReverseRow ? ArgType::RowsAtCompileTime : 1> m_rows; 01533 const variable_if_dynamic<Index, ReverseCol ? ArgType::ColsAtCompileTime : 1> m_cols; 01534 }; 01535 01536 01537 // -------------------- Diagonal -------------------- 01538 01539 template<typename ArgType, int DiagIndex> 01540 struct evaluator<Diagonal<ArgType, DiagIndex> > 01541 : evaluator_base<Diagonal<ArgType, DiagIndex> > 01542 { 01543 typedef Diagonal<ArgType, DiagIndex> XprType; 01544 01545 enum { 01546 CoeffReadCost = evaluator<ArgType>::CoeffReadCost, 01547 01548 Flags = (unsigned int)(evaluator<ArgType>::Flags & (HereditaryBits | DirectAccessBit) & ~RowMajorBit) | LinearAccessBit, 01549 01550 Alignment = 0 01551 }; 01552 01553 EIGEN_DEVICE_FUNC explicit evaluator(const XprType& diagonal) 01554 : m_argImpl(diagonal.nestedExpression()), 01555 m_index(diagonal.index()) 01556 { } 01557 01558 typedef typename XprType::Scalar Scalar; 01559 typedef typename XprType::CoeffReturnType CoeffReturnType; 01560 01561 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 01562 CoeffReturnType coeff(Index row, Index) const 01563 { 01564 return m_argImpl.coeff(row + rowOffset(), row + colOffset()); 01565 } 01566 01567 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 01568 CoeffReturnType coeff(Index index) const 01569 { 01570 return m_argImpl.coeff(index + rowOffset(), index + colOffset()); 01571 } 01572 01573 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 01574 Scalar& coeffRef(Index row, Index) 01575 { 01576 return m_argImpl.coeffRef(row + rowOffset(), row + colOffset()); 01577 } 01578 01579 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE 01580 Scalar& coeffRef(Index index) 01581 { 01582 return m_argImpl.coeffRef(index + rowOffset(), index + colOffset()); 01583 } 01584 01585 protected: 01586 evaluator<ArgType> m_argImpl; 01587 const internal::variable_if_dynamicindex<Index, XprType::DiagIndex> m_index; 01588 01589 private: 01590 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value() > 0 ? 0 : -m_index.value(); } 01591 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value() > 0 ? m_index.value() : 0; } 01592 }; 01593 01594 01595 //---------------------------------------------------------------------- 01596 // deprecated code 01597 //---------------------------------------------------------------------- 01598 01599 // -------------------- EvalToTemp -------------------- 01600 01601 // expression class for evaluating nested expression to a temporary 01602 01603 template<typename ArgType> class EvalToTemp; 01604 01605 template<typename ArgType> 01606 struct traits<EvalToTemp<ArgType> > 01607 : public traits<ArgType> 01608 { }; 01609 01610 template<typename ArgType> 01611 class EvalToTemp 01612 : public dense_xpr_base<EvalToTemp<ArgType> >::type 01613 { 01614 public: 01615 01616 typedef typename dense_xpr_base<EvalToTemp>::type Base; 01617 EIGEN_GENERIC_PUBLIC_INTERFACE(EvalToTemp) 01618 01619 explicit EvalToTemp(const ArgType& arg) 01620 : m_arg(arg) 01621 { } 01622 01623 const ArgType& arg() const 01624 { 01625 return m_arg; 01626 } 01627 01628 Index rows() const 01629 { 01630 return m_arg.rows(); 01631 } 01632 01633 Index cols() const 01634 { 01635 return m_arg.cols(); 01636 } 01637 01638 private: 01639 const ArgType& m_arg; 01640 }; 01641 01642 template<typename ArgType> 01643 struct evaluator<EvalToTemp<ArgType> > 01644 : public evaluator<typename ArgType::PlainObject> 01645 { 01646 typedef EvalToTemp<ArgType> XprType; 01647 typedef typename ArgType::PlainObject PlainObject; 01648 typedef evaluator<PlainObject> Base; 01649 01650 EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) 01651 : m_result(xpr.arg()) 01652 { 01653 ::new (static_cast<Base*>(this)) Base(m_result); 01654 } 01655 01656 // This constructor is used when nesting an EvalTo evaluator in another evaluator 01657 EIGEN_DEVICE_FUNC evaluator(const ArgType& arg) 01658 : m_result(arg) 01659 { 01660 ::new (static_cast<Base*>(this)) Base(m_result); 01661 } 01662 01663 protected: 01664 PlainObject m_result; 01665 }; 01666 01667 } // namespace internal 01668 01669 } // end namespace Eigen 01670 01671 #endif // EIGEN_COREEVALUATORS_H