![]() |
Eigen-unsupported
3.3.3
|
00001 // This file is part of Eigen, a lightweight C++ template library 00002 // for linear algebra. 00003 // 00004 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com> 00005 // 00006 // This Source Code Form is subject to the terms of the Mozilla 00007 // Public License v. 2.0. If a copy of the MPL was not distributed 00008 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. 00009 00010 #ifndef EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H 00011 #define EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H 00012 00013 namespace Eigen { 00014 00023 namespace internal { 00024 template<typename LhsXprType, typename RhsXprType> 00025 struct traits<TensorAssignOp<LhsXprType, RhsXprType> > 00026 { 00027 typedef typename LhsXprType::Scalar Scalar; 00028 typedef typename traits<LhsXprType>::StorageKind StorageKind; 00029 typedef typename promote_index_type<typename traits<LhsXprType>::Index, 00030 typename traits<RhsXprType>::Index>::type Index; 00031 typedef typename LhsXprType::Nested LhsNested; 00032 typedef typename RhsXprType::Nested RhsNested; 00033 typedef typename remove_reference<LhsNested>::type _LhsNested; 00034 typedef typename remove_reference<RhsNested>::type _RhsNested; 00035 static const std::size_t NumDimensions = internal::traits<LhsXprType>::NumDimensions; 00036 static const int Layout = internal::traits<LhsXprType>::Layout; 00037 00038 enum { 00039 Flags = 0 00040 }; 00041 }; 00042 00043 template<typename LhsXprType, typename RhsXprType> 00044 struct eval<TensorAssignOp<LhsXprType, RhsXprType>, Eigen::Dense> 00045 { 00046 typedef const TensorAssignOp<LhsXprType, RhsXprType>& type; 00047 }; 00048 00049 template<typename LhsXprType, typename RhsXprType> 00050 struct nested<TensorAssignOp<LhsXprType, RhsXprType>, 1, typename eval<TensorAssignOp<LhsXprType, RhsXprType> >::type> 00051 { 00052 typedef TensorAssignOp<LhsXprType, RhsXprType> type; 00053 }; 00054 00055 } // end namespace internal 00056 00057 00058 00059 template<typename LhsXprType, typename RhsXprType> 00060 class TensorAssignOp : public TensorBase<TensorAssignOp<LhsXprType, RhsXprType> > 00061 { 00062 public: 00063 typedef typename Eigen::internal::traits<TensorAssignOp>::Scalar Scalar; 00064 typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; 00065 typedef typename LhsXprType::CoeffReturnType CoeffReturnType; 00066 typedef typename Eigen::internal::nested<TensorAssignOp>::type Nested; 00067 typedef typename Eigen::internal::traits<TensorAssignOp>::StorageKind StorageKind; 00068 typedef typename Eigen::internal::traits<TensorAssignOp>::Index Index; 00069 00070 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorAssignOp(LhsXprType& lhs, const RhsXprType& rhs) 00071 : m_lhs_xpr(lhs), m_rhs_xpr(rhs) {} 00072 00074 EIGEN_DEVICE_FUNC 00075 typename internal::remove_all<typename LhsXprType::Nested>::type& 00076 lhsExpression() const { return *((typename internal::remove_all<typename LhsXprType::Nested>::type*)&m_lhs_xpr); } 00077 00078 EIGEN_DEVICE_FUNC 00079 const typename internal::remove_all<typename RhsXprType::Nested>::type& 00080 rhsExpression() const { return m_rhs_xpr; } 00081 00082 protected: 00083 typename internal::remove_all<typename LhsXprType::Nested>::type& m_lhs_xpr; 00084 const typename internal::remove_all<typename RhsXprType::Nested>::type& m_rhs_xpr; 00085 }; 00086 00087 00088 template<typename LeftArgType, typename RightArgType, typename Device> 00089 struct TensorEvaluator<const TensorAssignOp<LeftArgType, RightArgType>, Device> 00090 { 00091 typedef TensorAssignOp<LeftArgType, RightArgType> XprType; 00092 typedef typename XprType::Index Index; 00093 typedef typename XprType::Scalar Scalar; 00094 typedef typename XprType::CoeffReturnType CoeffReturnType; 00095 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType; 00096 typedef typename TensorEvaluator<RightArgType, Device>::Dimensions Dimensions; 00097 static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; 00098 00099 enum { 00100 IsAligned = TensorEvaluator<LeftArgType, Device>::IsAligned & TensorEvaluator<RightArgType, Device>::IsAligned, 00101 PacketAccess = TensorEvaluator<LeftArgType, Device>::PacketAccess & TensorEvaluator<RightArgType, Device>::PacketAccess, 00102 Layout = TensorEvaluator<LeftArgType, Device>::Layout, 00103 RawAccess = TensorEvaluator<LeftArgType, Device>::RawAccess 00104 }; 00105 00106 EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) : 00107 m_leftImpl(op.lhsExpression(), device), 00108 m_rightImpl(op.rhsExpression(), device) 00109 { 00110 EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<LeftArgType, Device>::Layout) == static_cast<int>(TensorEvaluator<RightArgType, Device>::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE); 00111 } 00112 00113 EIGEN_DEVICE_FUNC const Dimensions& dimensions() const 00114 { 00115 // The dimensions of the lhs and the rhs tensors should be equal to prevent 00116 // overflows and ensure the result is fully initialized. 00117 // TODO: use left impl instead if right impl dimensions are known at compile time. 00118 return m_rightImpl.dimensions(); 00119 } 00120 00121 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) { 00122 eigen_assert(dimensions_match(m_leftImpl.dimensions(), m_rightImpl.dimensions())); 00123 m_leftImpl.evalSubExprsIfNeeded(NULL); 00124 // If the lhs provides raw access to its storage area (i.e. if m_leftImpl.data() returns a non 00125 // null value), attempt to evaluate the rhs expression in place. Returns true iff in place 00126 // evaluation isn't supported and the caller still needs to manually assign the values generated 00127 // by the rhs to the lhs. 00128 return m_rightImpl.evalSubExprsIfNeeded(m_leftImpl.data()); 00129 } 00130 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { 00131 m_leftImpl.cleanup(); 00132 m_rightImpl.cleanup(); 00133 } 00134 00135 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalScalar(Index i) { 00136 m_leftImpl.coeffRef(i) = m_rightImpl.coeff(i); 00137 } 00138 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalPacket(Index i) { 00139 const int LhsStoreMode = TensorEvaluator<LeftArgType, Device>::IsAligned ? Aligned : Unaligned; 00140 const int RhsLoadMode = TensorEvaluator<RightArgType, Device>::IsAligned ? Aligned : Unaligned; 00141 m_leftImpl.template writePacket<LhsStoreMode>(i, m_rightImpl.template packet<RhsLoadMode>(i)); 00142 } 00143 EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const 00144 { 00145 return m_leftImpl.coeff(index); 00146 } 00147 template<int LoadMode> 00148 EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const 00149 { 00150 return m_leftImpl.template packet<LoadMode>(index); 00151 } 00152 00153 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost 00154 costPerCoeff(bool vectorized) const { 00155 // We assume that evalPacket or evalScalar is called to perform the 00156 // assignment and account for the cost of the write here, but reduce left 00157 // cost by one load because we are using m_leftImpl.coeffRef. 00158 TensorOpCost left = m_leftImpl.costPerCoeff(vectorized); 00159 return m_rightImpl.costPerCoeff(vectorized) + 00160 TensorOpCost( 00161 numext::maxi(0.0, left.bytes_loaded() - sizeof(CoeffReturnType)), 00162 left.bytes_stored(), left.compute_cycles()) + 00163 TensorOpCost(0, sizeof(CoeffReturnType), 0, vectorized, PacketSize); 00164 } 00165 00167 const TensorEvaluator<LeftArgType, Device>& left_impl() const { return m_leftImpl; } 00169 const TensorEvaluator<RightArgType, Device>& right_impl() const { return m_rightImpl; } 00170 00171 EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return m_leftImpl.data(); } 00172 00173 private: 00174 TensorEvaluator<LeftArgType, Device> m_leftImpl; 00175 TensorEvaluator<RightArgType, Device> m_rightImpl; 00176 }; 00177 00178 } 00179 00180 00181 #endif // EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H