![]() |
Eigen-unsupported
3.3.3
|
00001 // This file is part of Eigen, a lightweight C++ template library 00002 // for linear algebra. 00003 // 00004 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com> 00005 // 00006 // This Source Code Form is subject to the terms of the Mozilla 00007 // Public License v. 2.0. If a copy of the MPL was not distributed 00008 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. 00009 00010 #ifndef EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H 00011 #define EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H 00012 00013 namespace Eigen { 00014 00026 template<typename Scalar_, typename Dimensions_, int Options_, typename IndexType> 00027 class TensorFixedSize : public TensorBase<TensorFixedSize<Scalar_, Dimensions_, Options_, IndexType> > 00028 { 00029 public: 00030 typedef TensorFixedSize<Scalar_, Dimensions_, Options_, IndexType> Self; 00031 typedef TensorBase<TensorFixedSize<Scalar_, Dimensions_, Options_, IndexType> > Base; 00032 typedef typename Eigen::internal::nested<Self>::type Nested; 00033 typedef typename internal::traits<Self>::StorageKind StorageKind; 00034 typedef typename internal::traits<Self>::Index Index; 00035 typedef Scalar_ Scalar; 00036 typedef typename NumTraits<Scalar>::Real RealScalar; 00037 typedef typename Base::CoeffReturnType CoeffReturnType; 00038 00039 static const int Options = Options_; 00040 00041 enum { 00042 IsAligned = bool(EIGEN_MAX_ALIGN_BYTES>0), 00043 Layout = Options_ & RowMajor ? RowMajor : ColMajor, 00044 CoordAccess = true, 00045 RawAccess = true 00046 }; 00047 00048 typedef Dimensions_ Dimensions; 00049 static const std::size_t NumIndices = Dimensions::count; 00050 00051 protected: 00052 TensorStorage<Scalar, Dimensions, Options> m_storage; 00053 00054 public: 00055 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const { return NumIndices; } 00056 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; } 00057 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_storage.dimensions(); } 00058 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_storage.size(); } 00059 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); } 00060 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); } 00061 00062 // This makes EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED 00063 // work, because that uses base().coeffRef() - and we don't yet 00064 // implement a similar class hierarchy 00065 inline Self& base() { return *this; } 00066 inline const Self& base() const { return *this; } 00067 00068 #if EIGEN_HAS_VARIADIC_TEMPLATES 00069 template<typename... IndexTypes> 00070 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index firstIndex, IndexTypes... otherIndices) const 00071 { 00072 // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. 00073 EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) 00074 return coeff(array<Index, NumIndices>{{firstIndex, otherIndices...}}); 00075 } 00076 #endif 00077 00078 EIGEN_DEVICE_FUNC 00079 EIGEN_STRONG_INLINE const Scalar& coeff(const array<Index, NumIndices>& indices) const 00080 { 00081 eigen_internal_assert(checkIndexRange(indices)); 00082 return m_storage.data()[linearizedIndex(indices)]; 00083 } 00084 00085 EIGEN_DEVICE_FUNC 00086 EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const 00087 { 00088 eigen_internal_assert(index >= 0 && index < size()); 00089 return m_storage.data()[index]; 00090 } 00091 00092 EIGEN_DEVICE_FUNC 00093 EIGEN_STRONG_INLINE const Scalar& coeff() const 00094 { 00095 EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); 00096 return m_storage.data()[0]; 00097 } 00098 00099 00100 #if EIGEN_HAS_VARIADIC_TEMPLATES 00101 template<typename... IndexTypes> 00102 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index firstIndex, IndexTypes... otherIndices) 00103 { 00104 // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. 00105 EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) 00106 return coeffRef(array<Index, NumIndices>{{firstIndex, otherIndices...}}); 00107 } 00108 #endif 00109 00110 EIGEN_DEVICE_FUNC 00111 EIGEN_STRONG_INLINE Scalar& coeffRef(const array<Index, NumIndices>& indices) 00112 { 00113 eigen_internal_assert(checkIndexRange(indices)); 00114 return m_storage.data()[linearizedIndex(indices)]; 00115 } 00116 00117 EIGEN_DEVICE_FUNC 00118 EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) 00119 { 00120 eigen_internal_assert(index >= 0 && index < size()); 00121 return m_storage.data()[index]; 00122 } 00123 00124 EIGEN_DEVICE_FUNC 00125 EIGEN_STRONG_INLINE Scalar& coeffRef() 00126 { 00127 EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); 00128 return m_storage.data()[0]; 00129 } 00130 00131 #if EIGEN_HAS_VARIADIC_TEMPLATES 00132 template<typename... IndexTypes> 00133 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index firstIndex, IndexTypes... otherIndices) const 00134 { 00135 // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. 00136 EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) 00137 return this->operator()(array<Index, NumIndices>{{firstIndex, otherIndices...}}); 00138 } 00139 #else 00140 EIGEN_DEVICE_FUNC 00141 EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const 00142 { 00143 if (Options&RowMajor) { 00144 const Index index = i1 + i0 * m_storage.dimensions()[1]; 00145 return m_storage.data()[index]; 00146 } else { 00147 const Index index = i0 + i1 * m_storage.dimensions()[0]; 00148 return m_storage.data()[index]; 00149 } 00150 } 00151 EIGEN_DEVICE_FUNC 00152 EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const 00153 { 00154 if (Options&RowMajor) { 00155 const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0); 00156 return m_storage.data()[index]; 00157 } else { 00158 const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2); 00159 return m_storage.data()[index]; 00160 } 00161 } 00162 EIGEN_DEVICE_FUNC 00163 EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const 00164 { 00165 if (Options&RowMajor) { 00166 const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)); 00167 return m_storage.data()[index]; 00168 } else { 00169 const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3)); 00170 return m_storage.data()[index]; 00171 } 00172 } 00173 EIGEN_DEVICE_FUNC 00174 EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const 00175 { 00176 if (Options&RowMajor) { 00177 const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0))); 00178 return m_storage.data()[index]; 00179 } else { 00180 const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4))); 00181 return m_storage.data()[index]; 00182 } 00183 } 00184 #endif 00185 00186 00187 EIGEN_DEVICE_FUNC 00188 EIGEN_STRONG_INLINE const Scalar& operator()(const array<Index, NumIndices>& indices) const 00189 { 00190 eigen_assert(checkIndexRange(indices)); 00191 return coeff(indices); 00192 } 00193 00194 EIGEN_DEVICE_FUNC 00195 EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const 00196 { 00197 eigen_internal_assert(index >= 0 && index < size()); 00198 return coeff(index); 00199 } 00200 00201 EIGEN_DEVICE_FUNC 00202 EIGEN_STRONG_INLINE const Scalar& operator()() const 00203 { 00204 EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); 00205 return coeff(); 00206 } 00207 00208 EIGEN_DEVICE_FUNC 00209 EIGEN_STRONG_INLINE const Scalar& operator[](Index index) const 00210 { 00211 // The bracket operator is only for vectors, use the parenthesis operator instead. 00212 EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE); 00213 return coeff(index); 00214 } 00215 00216 #if EIGEN_HAS_VARIADIC_TEMPLATES 00217 template<typename... IndexTypes> 00218 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index firstIndex, IndexTypes... otherIndices) 00219 { 00220 // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. 00221 EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) 00222 return operator()(array<Index, NumIndices>{{firstIndex, otherIndices...}}); 00223 } 00224 #else 00225 EIGEN_DEVICE_FUNC 00226 EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1) 00227 { 00228 if (Options&RowMajor) { 00229 const Index index = i1 + i0 * m_storage.dimensions()[1]; 00230 return m_storage.data()[index]; 00231 } else { 00232 const Index index = i0 + i1 * m_storage.dimensions()[0]; 00233 return m_storage.data()[index]; 00234 } 00235 } 00236 EIGEN_DEVICE_FUNC 00237 EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2) 00238 { 00239 if (Options&RowMajor) { 00240 const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0); 00241 return m_storage.data()[index]; 00242 } else { 00243 const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2); 00244 return m_storage.data()[index]; 00245 } 00246 } 00247 EIGEN_DEVICE_FUNC 00248 EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3) 00249 { 00250 if (Options&RowMajor) { 00251 const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)); 00252 return m_storage.data()[index]; 00253 } else { 00254 const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3)); 00255 return m_storage.data()[index]; 00256 } 00257 } 00258 EIGEN_DEVICE_FUNC 00259 EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) 00260 { 00261 if (Options&RowMajor) { 00262 const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0))); 00263 return m_storage.data()[index]; 00264 } else { 00265 const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4))); 00266 return m_storage.data()[index]; 00267 } 00268 } 00269 #endif 00270 00271 EIGEN_DEVICE_FUNC 00272 EIGEN_STRONG_INLINE Scalar& operator()(const array<Index, NumIndices>& indices) 00273 { 00274 eigen_assert(checkIndexRange(indices)); 00275 return coeffRef(indices); 00276 } 00277 00278 EIGEN_DEVICE_FUNC 00279 EIGEN_STRONG_INLINE Scalar& operator()(Index index) 00280 { 00281 eigen_assert(index >= 0 && index < size()); 00282 return coeffRef(index); 00283 } 00284 00285 EIGEN_DEVICE_FUNC 00286 EIGEN_STRONG_INLINE Scalar& operator()() 00287 { 00288 EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); 00289 return coeffRef(); 00290 } 00291 00292 EIGEN_DEVICE_FUNC 00293 EIGEN_STRONG_INLINE Scalar& operator[](Index index) 00294 { 00295 // The bracket operator is only for vectors, use the parenthesis operator instead 00296 EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE) 00297 return coeffRef(index); 00298 } 00299 00300 EIGEN_DEVICE_FUNC 00301 EIGEN_STRONG_INLINE TensorFixedSize() 00302 : m_storage() 00303 { 00304 } 00305 00306 EIGEN_DEVICE_FUNC 00307 EIGEN_STRONG_INLINE TensorFixedSize(const Self& other) 00308 : m_storage(other.m_storage) 00309 { 00310 } 00311 00312 #if EIGEN_HAS_RVALUE_REFERENCES 00313 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFixedSize(Self&& other) 00314 : m_storage(other.m_storage) 00315 { 00316 } 00317 #endif 00318 00319 template<typename OtherDerived> 00320 EIGEN_DEVICE_FUNC 00321 EIGEN_STRONG_INLINE TensorFixedSize(const TensorBase<OtherDerived, ReadOnlyAccessors>& other) 00322 { 00323 typedef TensorAssignOp<TensorFixedSize, const OtherDerived> Assign; 00324 Assign assign(*this, other.derived()); 00325 internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice()); 00326 } 00327 template<typename OtherDerived> 00328 EIGEN_DEVICE_FUNC 00329 EIGEN_STRONG_INLINE TensorFixedSize(const TensorBase<OtherDerived, WriteAccessors>& other) 00330 { 00331 typedef TensorAssignOp<TensorFixedSize, const OtherDerived> Assign; 00332 Assign assign(*this, other.derived()); 00333 internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice()); 00334 } 00335 00336 EIGEN_DEVICE_FUNC 00337 EIGEN_STRONG_INLINE TensorFixedSize& operator=(const TensorFixedSize& other) 00338 { 00339 // FIXME: check that the dimensions of other match the dimensions of *this. 00340 // Unfortunately this isn't possible yet when the rhs is an expression. 00341 typedef TensorAssignOp<Self, const TensorFixedSize> Assign; 00342 Assign assign(*this, other); 00343 internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice()); 00344 return *this; 00345 } 00346 template<typename OtherDerived> 00347 EIGEN_DEVICE_FUNC 00348 EIGEN_STRONG_INLINE TensorFixedSize& operator=(const OtherDerived& other) 00349 { 00350 // FIXME: check that the dimensions of other match the dimensions of *this. 00351 // Unfortunately this isn't possible yet when the rhs is an expression. 00352 typedef TensorAssignOp<Self, const OtherDerived> Assign; 00353 Assign assign(*this, other); 00354 internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice()); 00355 return *this; 00356 } 00357 00358 protected: 00359 EIGEN_DEVICE_FUNC 00360 EIGEN_STRONG_INLINE bool checkIndexRange(const array<Index, NumIndices>& /*indices*/) const 00361 { 00362 using internal::array_apply_and_reduce; 00363 using internal::array_zip_and_reduce; 00364 using internal::greater_equal_zero_op; 00365 using internal::logical_and_op; 00366 using internal::lesser_op; 00367 00368 return true; 00369 // check whether the indices are all >= 0 00370 /* array_apply_and_reduce<logical_and_op, greater_equal_zero_op>(indices) && 00371 // check whether the indices fit in the dimensions 00372 array_zip_and_reduce<logical_and_op, lesser_op>(indices, m_storage.dimensions());*/ 00373 } 00374 00375 EIGEN_DEVICE_FUNC 00376 EIGEN_STRONG_INLINE Index linearizedIndex(const array<Index, NumIndices>& indices) const 00377 { 00378 if (Options&RowMajor) { 00379 return m_storage.dimensions().IndexOfRowMajor(indices); 00380 } else { 00381 return m_storage.dimensions().IndexOfColMajor(indices); 00382 } 00383 } 00384 }; 00385 00386 00387 } // end namespace Eigen 00388 00389 #endif // EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H