Eigen  3.3.3
AmbiVector.h
00001 // This file is part of Eigen, a lightweight C++ template library
00002 // for linear algebra.
00003 //
00004 // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
00005 //
00006 // This Source Code Form is subject to the terms of the Mozilla
00007 // Public License v. 2.0. If a copy of the MPL was not distributed
00008 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
00009 
00010 #ifndef EIGEN_AMBIVECTOR_H
00011 #define EIGEN_AMBIVECTOR_H
00012 
00013 namespace Eigen { 
00014 
00015 namespace internal {
00016 
00022 template<typename _Scalar, typename _StorageIndex>
00023 class AmbiVector
00024 {
00025   public:
00026     typedef _Scalar Scalar;
00027     typedef _StorageIndex StorageIndex;
00028     typedef typename NumTraits<Scalar>::Real RealScalar;
00029 
00030     explicit AmbiVector(Index size)
00031       : m_buffer(0), m_zero(0), m_size(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
00032     {
00033       resize(size);
00034     }
00035 
00036     void init(double estimatedDensity);
00037     void init(int mode);
00038 
00039     Index nonZeros() const;
00040 
00042     void setBounds(Index start, Index end) { m_start = convert_index(start); m_end = convert_index(end); }
00043 
00044     void setZero();
00045 
00046     void restart();
00047     Scalar& coeffRef(Index i);
00048     Scalar& coeff(Index i);
00049 
00050     class Iterator;
00051 
00052     ~AmbiVector() { delete[] m_buffer; }
00053 
00054     void resize(Index size)
00055     {
00056       if (m_allocatedSize < size)
00057         reallocate(size);
00058       m_size = convert_index(size);
00059     }
00060 
00061     StorageIndex size() const { return m_size; }
00062 
00063   protected:
00064     StorageIndex convert_index(Index idx)
00065     {
00066       return internal::convert_index<StorageIndex>(idx);
00067     }
00068 
00069     void reallocate(Index size)
00070     {
00071       // if the size of the matrix is not too large, let's allocate a bit more than needed such
00072       // that we can handle dense vector even in sparse mode.
00073       delete[] m_buffer;
00074       if (size<1000)
00075       {
00076         Index allocSize = (size * sizeof(ListEl) + sizeof(Scalar) - 1)/sizeof(Scalar);
00077         m_allocatedElements = convert_index((allocSize*sizeof(Scalar))/sizeof(ListEl));
00078         m_buffer = new Scalar[allocSize];
00079       }
00080       else
00081       {
00082         m_allocatedElements = convert_index((size*sizeof(Scalar))/sizeof(ListEl));
00083         m_buffer = new Scalar[size];
00084       }
00085       m_size = convert_index(size);
00086       m_start = 0;
00087       m_end = m_size;
00088     }
00089 
00090     void reallocateSparse()
00091     {
00092       Index copyElements = m_allocatedElements;
00093       m_allocatedElements = (std::min)(StorageIndex(m_allocatedElements*1.5),m_size);
00094       Index allocSize = m_allocatedElements * sizeof(ListEl);
00095       allocSize = (allocSize + sizeof(Scalar) - 1)/sizeof(Scalar);
00096       Scalar* newBuffer = new Scalar[allocSize];
00097       memcpy(newBuffer,  m_buffer,  copyElements * sizeof(ListEl));
00098       delete[] m_buffer;
00099       m_buffer = newBuffer;
00100     }
00101 
00102   protected:
00103     // element type of the linked list
00104     struct ListEl
00105     {
00106       StorageIndex next;
00107       StorageIndex index;
00108       Scalar value;
00109     };
00110 
00111     // used to store data in both mode
00112     Scalar* m_buffer;
00113     Scalar m_zero;
00114     StorageIndex m_size;
00115     StorageIndex m_start;
00116     StorageIndex m_end;
00117     StorageIndex m_allocatedSize;
00118     StorageIndex m_allocatedElements;
00119     StorageIndex m_mode;
00120 
00121     // linked list mode
00122     StorageIndex m_llStart;
00123     StorageIndex m_llCurrent;
00124     StorageIndex m_llSize;
00125 };
00126 
00128 template<typename _Scalar,typename _StorageIndex>
00129 Index AmbiVector<_Scalar,_StorageIndex>::nonZeros() const
00130 {
00131   if (m_mode==IsSparse)
00132     return m_llSize;
00133   else
00134     return m_end - m_start;
00135 }
00136 
00137 template<typename _Scalar,typename _StorageIndex>
00138 void AmbiVector<_Scalar,_StorageIndex>::init(double estimatedDensity)
00139 {
00140   if (estimatedDensity>0.1)
00141     init(IsDense);
00142   else
00143     init(IsSparse);
00144 }
00145 
00146 template<typename _Scalar,typename _StorageIndex>
00147 void AmbiVector<_Scalar,_StorageIndex>::init(int mode)
00148 {
00149   m_mode = mode;
00150   if (m_mode==IsSparse)
00151   {
00152     m_llSize = 0;
00153     m_llStart = -1;
00154   }
00155 }
00156 
00162 template<typename _Scalar,typename _StorageIndex>
00163 void AmbiVector<_Scalar,_StorageIndex>::restart()
00164 {
00165   m_llCurrent = m_llStart;
00166 }
00167 
00169 template<typename _Scalar,typename _StorageIndex>
00170 void AmbiVector<_Scalar,_StorageIndex>::setZero()
00171 {
00172   if (m_mode==IsDense)
00173   {
00174     for (Index i=m_start; i<m_end; ++i)
00175       m_buffer[i] = Scalar(0);
00176   }
00177   else
00178   {
00179     eigen_assert(m_mode==IsSparse);
00180     m_llSize = 0;
00181     m_llStart = -1;
00182   }
00183 }
00184 
00185 template<typename _Scalar,typename _StorageIndex>
00186 _Scalar& AmbiVector<_Scalar,_StorageIndex>::coeffRef(Index i)
00187 {
00188   if (m_mode==IsDense)
00189     return m_buffer[i];
00190   else
00191   {
00192     ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
00193     // TODO factorize the following code to reduce code generation
00194     eigen_assert(m_mode==IsSparse);
00195     if (m_llSize==0)
00196     {
00197       // this is the first element
00198       m_llStart = 0;
00199       m_llCurrent = 0;
00200       ++m_llSize;
00201       llElements[0].value = Scalar(0);
00202       llElements[0].index = convert_index(i);
00203       llElements[0].next = -1;
00204       return llElements[0].value;
00205     }
00206     else if (i<llElements[m_llStart].index)
00207     {
00208       // this is going to be the new first element of the list
00209       ListEl& el = llElements[m_llSize];
00210       el.value = Scalar(0);
00211       el.index = convert_index(i);
00212       el.next = m_llStart;
00213       m_llStart = m_llSize;
00214       ++m_llSize;
00215       m_llCurrent = m_llStart;
00216       return el.value;
00217     }
00218     else
00219     {
00220       StorageIndex nextel = llElements[m_llCurrent].next;
00221       eigen_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index");
00222       while (nextel >= 0 && llElements[nextel].index<=i)
00223       {
00224         m_llCurrent = nextel;
00225         nextel = llElements[nextel].next;
00226       }
00227 
00228       if (llElements[m_llCurrent].index==i)
00229       {
00230         // the coefficient already exists and we found it !
00231         return llElements[m_llCurrent].value;
00232       }
00233       else
00234       {
00235         if (m_llSize>=m_allocatedElements)
00236         {
00237           reallocateSparse();
00238           llElements = reinterpret_cast<ListEl*>(m_buffer);
00239         }
00240         eigen_internal_assert(m_llSize<m_allocatedElements && "internal error: overflow in sparse mode");
00241         // let's insert a new coefficient
00242         ListEl& el = llElements[m_llSize];
00243         el.value = Scalar(0);
00244         el.index = convert_index(i);
00245         el.next = llElements[m_llCurrent].next;
00246         llElements[m_llCurrent].next = m_llSize;
00247         ++m_llSize;
00248         return el.value;
00249       }
00250     }
00251   }
00252 }
00253 
00254 template<typename _Scalar,typename _StorageIndex>
00255 _Scalar& AmbiVector<_Scalar,_StorageIndex>::coeff(Index i)
00256 {
00257   if (m_mode==IsDense)
00258     return m_buffer[i];
00259   else
00260   {
00261     ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
00262     eigen_assert(m_mode==IsSparse);
00263     if ((m_llSize==0) || (i<llElements[m_llStart].index))
00264     {
00265       return m_zero;
00266     }
00267     else
00268     {
00269       Index elid = m_llStart;
00270       while (elid >= 0 && llElements[elid].index<i)
00271         elid = llElements[elid].next;
00272 
00273       if (llElements[elid].index==i)
00274         return llElements[m_llCurrent].value;
00275       else
00276         return m_zero;
00277     }
00278   }
00279 }
00280 
00282 template<typename _Scalar,typename _StorageIndex>
00283 class AmbiVector<_Scalar,_StorageIndex>::Iterator
00284 {
00285   public:
00286     typedef _Scalar Scalar;
00287     typedef typename NumTraits<Scalar>::Real RealScalar;
00288 
00295     explicit Iterator(const AmbiVector& vec, const RealScalar& epsilon = 0)
00296       : m_vector(vec)
00297     {
00298       using std::abs;
00299       m_epsilon = epsilon;
00300       m_isDense = m_vector.m_mode==IsDense;
00301       if (m_isDense)
00302       {
00303         m_currentEl = 0;   // this is to avoid a compilation warning
00304         m_cachedValue = 0; // this is to avoid a compilation warning
00305         m_cachedIndex = m_vector.m_start-1;
00306         ++(*this);
00307       }
00308       else
00309       {
00310         ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
00311         m_currentEl = m_vector.m_llStart;
00312         while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon)
00313           m_currentEl = llElements[m_currentEl].next;
00314         if (m_currentEl<0)
00315         {
00316           m_cachedValue = 0; // this is to avoid a compilation warning
00317           m_cachedIndex = -1;
00318         }
00319         else
00320         {
00321           m_cachedIndex = llElements[m_currentEl].index;
00322           m_cachedValue = llElements[m_currentEl].value;
00323         }
00324       }
00325     }
00326 
00327     StorageIndex index() const { return m_cachedIndex; }
00328     Scalar value() const { return m_cachedValue; }
00329 
00330     operator bool() const { return m_cachedIndex>=0; }
00331 
00332     Iterator& operator++()
00333     {
00334       using std::abs;
00335       if (m_isDense)
00336       {
00337         do {
00338           ++m_cachedIndex;
00339         } while (m_cachedIndex<m_vector.m_end && abs(m_vector.m_buffer[m_cachedIndex])<=m_epsilon);
00340         if (m_cachedIndex<m_vector.m_end)
00341           m_cachedValue = m_vector.m_buffer[m_cachedIndex];
00342         else
00343           m_cachedIndex=-1;
00344       }
00345       else
00346       {
00347         ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
00348         do {
00349           m_currentEl = llElements[m_currentEl].next;
00350         } while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon);
00351         if (m_currentEl<0)
00352         {
00353           m_cachedIndex = -1;
00354         }
00355         else
00356         {
00357           m_cachedIndex = llElements[m_currentEl].index;
00358           m_cachedValue = llElements[m_currentEl].value;
00359         }
00360       }
00361       return *this;
00362     }
00363 
00364   protected:
00365     const AmbiVector& m_vector; // the target vector
00366     StorageIndex m_currentEl;   // the current element in sparse/linked-list mode
00367     RealScalar m_epsilon;       // epsilon used to prune zero coefficients
00368     StorageIndex m_cachedIndex; // current coordinate
00369     Scalar m_cachedValue;       // current value
00370     bool m_isDense;             // mode of the vector
00371 };
00372 
00373 } // end namespace internal
00374 
00375 } // end namespace Eigen
00376 
00377 #endif // EIGEN_AMBIVECTOR_H
 All Classes Functions Variables Typedefs Enumerations Enumerator Friends