TensorChipping.h
00001 // This file is part of Eigen, a lightweight C++ template library
00002 // for linear algebra.
00003 //
00004 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
00005 //
00006 // This Source Code Form is subject to the terms of the Mozilla
00007 // Public License v. 2.0. If a copy of the MPL was not distributed
00008 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
00009 
00010 #ifndef EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H
00011 #define EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H
00012 
00013 namespace Eigen {
00014 
00023 namespace internal {
00024 template<DenseIndex DimId, typename XprType>
00025 struct traits<TensorChippingOp<DimId, XprType> > : public traits<XprType>
00026 {
00027   typedef typename XprType::Scalar Scalar;
00028   typedef traits<XprType> XprTraits;
00029   typedef typename XprTraits::StorageKind StorageKind;
00030   typedef typename XprTraits::Index Index;
00031   typedef typename XprType::Nested Nested;
00032   typedef typename remove_reference<Nested>::type _Nested;
00033   static const int NumDimensions = XprTraits::NumDimensions - 1;
00034   static const int Layout = XprTraits::Layout;
00035 };
00036 
00037 template<DenseIndex DimId, typename XprType>
00038 struct eval<TensorChippingOp<DimId, XprType>, Eigen::Dense>
00039 {
00040   typedef const TensorChippingOp<DimId, XprType>& type;
00041 };
00042 
00043 template<DenseIndex DimId, typename XprType>
00044 struct nested<TensorChippingOp<DimId, XprType>, 1, typename eval<TensorChippingOp<DimId, XprType> >::type>
00045 {
00046   typedef TensorChippingOp<DimId, XprType> type;
00047 };
00048 
00049 template <DenseIndex DimId>
00050 struct DimensionId
00051 {
00052   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DimensionId(DenseIndex dim) {
00053     eigen_assert(dim == DimId);
00054   }
00055   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex actualDim() const {
00056     return DimId;
00057   }
00058 };
00059 template <>
00060 struct DimensionId<Dynamic>
00061 {
00062   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DimensionId(DenseIndex dim) : actual_dim(dim) {
00063     eigen_assert(dim >= 0);
00064   }
00065   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex actualDim() const {
00066     return actual_dim;
00067   }
00068  private:
00069   const DenseIndex actual_dim;
00070 };
00071 
00072 
00073 }  // end namespace internal
00074 
00075 
00076 
00077 template<DenseIndex DimId, typename XprType>
00078 class TensorChippingOp : public TensorBase<TensorChippingOp<DimId, XprType> >
00079 {
00080   public:
00081   typedef typename Eigen::internal::traits<TensorChippingOp>::Scalar Scalar;
00082   typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
00083   typedef typename XprType::CoeffReturnType CoeffReturnType;
00084   typedef typename Eigen::internal::nested<TensorChippingOp>::type Nested;
00085   typedef typename Eigen::internal::traits<TensorChippingOp>::StorageKind StorageKind;
00086   typedef typename Eigen::internal::traits<TensorChippingOp>::Index Index;
00087 
00088   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorChippingOp(const XprType& expr, const Index offset, const Index dim)
00089       : m_xpr(expr), m_offset(offset), m_dim(dim) {
00090   }
00091 
00092   EIGEN_DEVICE_FUNC
00093   const Index offset() const { return m_offset; }
00094   EIGEN_DEVICE_FUNC
00095   const Index dim() const { return m_dim.actualDim(); }
00096 
00097   EIGEN_DEVICE_FUNC
00098   const typename internal::remove_all<typename XprType::Nested>::type&
00099   expression() const { return m_xpr; }
00100 
00101   EIGEN_DEVICE_FUNC
00102   EIGEN_STRONG_INLINE TensorChippingOp& operator = (const TensorChippingOp& other)
00103   {
00104     typedef TensorAssignOp<TensorChippingOp, const TensorChippingOp> Assign;
00105     Assign assign(*this, other);
00106     internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
00107     return *this;
00108   }
00109 
00110   template<typename OtherDerived>
00111   EIGEN_DEVICE_FUNC
00112   EIGEN_STRONG_INLINE TensorChippingOp& operator = (const OtherDerived& other)
00113   {
00114     typedef TensorAssignOp<TensorChippingOp, const OtherDerived> Assign;
00115     Assign assign(*this, other);
00116     internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
00117     return *this;
00118   }
00119 
00120   protected:
00121     typename XprType::Nested m_xpr;
00122     const Index m_offset;
00123     const internal::DimensionId<DimId> m_dim;
00124 };
00125 
00126 
00127 // Eval as rvalue
00128 template<DenseIndex DimId, typename ArgType, typename Device>
00129 struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
00130 {
00131   typedef TensorChippingOp<DimId, ArgType> XprType;
00132   static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
00133   static const int NumDims = NumInputDims-1;
00134   typedef typename XprType::Index Index;
00135   typedef DSizes<Index, NumDims> Dimensions;
00136   typedef typename XprType::Scalar Scalar;
00137   typedef typename XprType::CoeffReturnType CoeffReturnType;
00138   typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
00139   static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
00140 
00141 
00142   enum {
00143     // Alignment can't be guaranteed at compile time since it depends on the
00144     // slice offsets.
00145     IsAligned = false,
00146     PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
00147     Layout = TensorEvaluator<ArgType, Device>::Layout,
00148     CoordAccess = false,  // to be implemented
00149     RawAccess = false
00150   };
00151 
00152   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
00153       : m_impl(op.expression(), device), m_dim(op.dim()), m_device(device)
00154   {
00155     EIGEN_STATIC_ASSERT((NumInputDims >= 1), YOU_MADE_A_PROGRAMMING_MISTAKE);
00156     eigen_assert(NumInputDims > m_dim.actualDim());
00157 
00158     const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
00159     eigen_assert(op.offset() < input_dims[m_dim.actualDim()]);
00160 
00161     int j = 0;
00162     for (int i = 0; i < NumInputDims; ++i) {
00163       if (i != m_dim.actualDim()) {
00164         m_dimensions[j] = input_dims[i];
00165         ++j;
00166       }
00167     }
00168 
00169     m_stride = 1;
00170     m_inputStride = 1;
00171     if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
00172       for (int i = 0; i < m_dim.actualDim(); ++i) {
00173         m_stride *= input_dims[i];
00174         m_inputStride *= input_dims[i];
00175       }
00176     } else {
00177       for (int i = NumInputDims-1; i > m_dim.actualDim(); --i) {
00178         m_stride *= input_dims[i];
00179         m_inputStride *= input_dims[i];
00180       }
00181     }
00182     m_inputStride *= input_dims[m_dim.actualDim()];
00183     m_inputOffset = m_stride * op.offset();
00184   }
00185 
00186   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
00187 
00188   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
00189     m_impl.evalSubExprsIfNeeded(NULL);
00190     return true;
00191   }
00192 
00193   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
00194     m_impl.cleanup();
00195   }
00196 
00197   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
00198   {
00199     return m_impl.coeff(srcCoeff(index));
00200   }
00201 
00202   template<int LoadMode>
00203   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
00204   {
00205     EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
00206     eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
00207 
00208     if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == 0) ||
00209         (static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == NumInputDims-1)) {
00210       // m_stride is equal to 1, so let's avoid the integer division.
00211       eigen_assert(m_stride == 1);
00212       Index inputIndex = index * m_inputStride + m_inputOffset;
00213       EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
00214       for (int i = 0; i < PacketSize; ++i) {
00215         values[i] = m_impl.coeff(inputIndex);
00216         inputIndex += m_inputStride;
00217       }
00218       PacketReturnType rslt = internal::pload<PacketReturnType>(values);
00219       return rslt;
00220     } else if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == NumInputDims - 1) ||
00221                (static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == 0)) {
00222       // m_stride is aways greater than index, so let's avoid the integer division.
00223       eigen_assert(m_stride > index);
00224       return m_impl.template packet<LoadMode>(index + m_inputOffset);
00225     } else {
00226       const Index idx = index / m_stride;
00227       const Index rem = index - idx * m_stride;
00228       if (rem + PacketSize <= m_stride) {
00229         Index inputIndex = idx * m_inputStride + m_inputOffset + rem;
00230         return m_impl.template packet<LoadMode>(inputIndex);
00231       } else {
00232         // Cross the stride boundary. Fallback to slow path.
00233         EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
00234         for (int i = 0; i < PacketSize; ++i) {
00235           values[i] = coeff(index);
00236           ++index;
00237         }
00238         PacketReturnType rslt = internal::pload<PacketReturnType>(values);
00239         return rslt;
00240       }
00241     }
00242   }
00243 
00244   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
00245   costPerCoeff(bool vectorized) const {
00246     double cost = 0;
00247     if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) &&
00248          m_dim.actualDim() == 0) ||
00249         (static_cast<int>(Layout) == static_cast<int>(RowMajor) &&
00250          m_dim.actualDim() == NumInputDims - 1)) {
00251       cost += TensorOpCost::MulCost<Index>() + TensorOpCost::AddCost<Index>();
00252     } else if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) &&
00253                 m_dim.actualDim() == NumInputDims - 1) ||
00254                (static_cast<int>(Layout) == static_cast<int>(RowMajor) &&
00255                 m_dim.actualDim() == 0)) {
00256       cost += TensorOpCost::AddCost<Index>();
00257     } else {
00258       cost += 3 * TensorOpCost::MulCost<Index>() + TensorOpCost::DivCost<Index>() +
00259               3 * TensorOpCost::AddCost<Index>();
00260     }
00261 
00262     return m_impl.costPerCoeff(vectorized) +
00263            TensorOpCost(0, 0, cost, vectorized, PacketSize);
00264   }
00265 
00266   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType* data() const {
00267     CoeffReturnType* result = const_cast<CoeffReturnType*>(m_impl.data());
00268     if (((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == NumDims) ||
00269          (static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == 0)) &&
00270         result) {
00271       return result + m_inputOffset;
00272     } else {
00273       return NULL;
00274     }
00275   }
00276 
00277  protected:
00278   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const
00279   {
00280     Index inputIndex;
00281     if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == 0) ||
00282         (static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == NumInputDims-1)) {
00283       // m_stride is equal to 1, so let's avoid the integer division.
00284       eigen_assert(m_stride == 1);
00285       inputIndex = index * m_inputStride + m_inputOffset;
00286     } else if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == NumInputDims-1) ||
00287                (static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == 0)) {
00288       // m_stride is aways greater than index, so let's avoid the integer division.
00289       eigen_assert(m_stride > index);
00290       inputIndex = index + m_inputOffset;
00291     } else {
00292       const Index idx = index / m_stride;
00293       inputIndex = idx * m_inputStride + m_inputOffset;
00294       index -= idx * m_stride;
00295       inputIndex += index;
00296     }
00297     return inputIndex;
00298   }
00299 
00300   Dimensions m_dimensions;
00301   Index m_stride;
00302   Index m_inputOffset;
00303   Index m_inputStride;
00304   TensorEvaluator<ArgType, Device> m_impl;
00305   const internal::DimensionId<DimId> m_dim;
00306   const Device& m_device;
00307 };
00308 
00309 
00310 // Eval as lvalue
00311 template<DenseIndex DimId, typename ArgType, typename Device>
00312 struct TensorEvaluator<TensorChippingOp<DimId, ArgType>, Device>
00313   : public TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
00314 {
00315   typedef TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device> Base;
00316   typedef TensorChippingOp<DimId, ArgType> XprType;
00317   static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
00318   static const int NumDims = NumInputDims-1;
00319   typedef typename XprType::Index Index;
00320   typedef DSizes<Index, NumDims> Dimensions;
00321   typedef typename XprType::Scalar Scalar;
00322   typedef typename XprType::CoeffReturnType CoeffReturnType;
00323   typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
00324   static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
00325 
00326   enum {
00327     IsAligned = false,
00328     PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
00329     RawAccess = false
00330   };
00331 
00332   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
00333     : Base(op, device)
00334     { }
00335 
00336   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
00337   {
00338     return this->m_impl.coeffRef(this->srcCoeff(index));
00339   }
00340 
00341   template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
00342   void writePacket(Index index, const PacketReturnType& x)
00343   {
00344     EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
00345 
00346     if ((static_cast<int>(this->Layout) == static_cast<int>(ColMajor) && this->m_dim.actualDim() == 0) ||
00347         (static_cast<int>(this->Layout) == static_cast<int>(RowMajor) && this->m_dim.actualDim() == NumInputDims-1)) {
00348       // m_stride is equal to 1, so let's avoid the integer division.
00349       eigen_assert(this->m_stride == 1);
00350       EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
00351       internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
00352       Index inputIndex = index * this->m_inputStride + this->m_inputOffset;
00353       for (int i = 0; i < PacketSize; ++i) {
00354         this->m_impl.coeffRef(inputIndex) = values[i];
00355         inputIndex += this->m_inputStride;
00356       }
00357     } else if ((static_cast<int>(this->Layout) == static_cast<int>(ColMajor) && this->m_dim.actualDim() == NumInputDims-1) ||
00358                (static_cast<int>(this->Layout) == static_cast<int>(RowMajor) && this->m_dim.actualDim() == 0)) {
00359       // m_stride is aways greater than index, so let's avoid the integer division.
00360       eigen_assert(this->m_stride > index);
00361       this->m_impl.template writePacket<StoreMode>(index + this->m_inputOffset, x);
00362     } else {
00363       const Index idx = index / this->m_stride;
00364       const Index rem = index - idx * this->m_stride;
00365       if (rem + PacketSize <= this->m_stride) {
00366         const Index inputIndex = idx * this->m_inputStride + this->m_inputOffset + rem;
00367         this->m_impl.template writePacket<StoreMode>(inputIndex, x);
00368       } else {
00369         // Cross stride boundary. Fallback to slow path.
00370         EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
00371         internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
00372         for (int i = 0; i < PacketSize; ++i) {
00373           this->coeffRef(index) = values[i];
00374           ++index;
00375         }
00376       }
00377     }
00378   }
00379 };
00380 
00381 
00382 } // end namespace Eigen
00383 
00384 #endif // EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H
 All Classes Functions Variables Typedefs Enumerator