escript  Revision_
Data.h
Go to the documentation of this file.
00001 
00002 /*****************************************************************************
00003 *
00004 * Copyright (c) 2003-2014 by University of Queensland
00005 * http://www.uq.edu.au
00006 *
00007 * Primary Business: Queensland, Australia
00008 * Licensed under the Open Software License version 3.0
00009 * http://www.opensource.org/licenses/osl-3.0.php
00010 *
00011 * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
00012 * Development 2012-2013 by School of Earth Sciences
00013 * Development from 2014 by Centre for Geoscience Computing (GeoComp)
00014 *
00015 *****************************************************************************/
00016 
00017 
00020 #ifndef DATA_H
00021 #define DATA_H
00022 #include "system_dep.h"
00023 
00024 #include "DataTypes.h"
00025 #include "DataAbstract.h"
00026 #include "DataAlgorithm.h"
00027 #include "FunctionSpace.h"
00028 #include "BinaryOp.h"
00029 #include "UnaryOp.h"
00030 #include "DataException.h"
00031 
00032 
00033 
00034 #include "DataC.h"
00035 
00036 #ifdef _OPENMP
00037 #include <omp.h>
00038 #endif
00039 
00040 #include "esysUtils/Esys_MPI.h"
00041 #include <string>
00042 #include <algorithm>
00043 #include <sstream>
00044 
00045 #include <boost/shared_ptr.hpp>
00046 #include <boost/python/object.hpp>
00047 #include <boost/python/tuple.hpp>
00048 
00049 namespace escript {
00050 
00051 //
00052 // Forward declaration for various implementations of Data.
00053 class DataConstant;
00054 class DataTagged;
00055 class DataExpanded;
00056 class DataLazy;
00057 
00071 class Data {
00072 
00073   public:
00074 
00075   // These typedefs allow function names to be cast to pointers
00076   // to functions of the appropriate type when calling unaryOp etc.
00077   typedef double (*UnaryDFunPtr)(double);
00078   typedef double (*BinaryDFunPtr)(double,double);
00079 
00080 
00090   ESCRIPT_DLL_API
00091   Data();
00092 
00098   ESCRIPT_DLL_API
00099   Data(const Data& inData);
00100 
00107   ESCRIPT_DLL_API
00108   Data(const Data& inData,
00109        const FunctionSpace& what);
00110 
00115   ESCRIPT_DLL_API
00116   Data(const DataTypes::ValueType& value,
00117          const DataTypes::ShapeType& shape,
00118                  const FunctionSpace& what=FunctionSpace(),
00119                  bool expanded=false);
00120 
00132   ESCRIPT_DLL_API
00133   Data(double value,
00134        const DataTypes::ShapeType& dataPointShape=DataTypes::ShapeType(),
00135        const FunctionSpace& what=FunctionSpace(),
00136        bool expanded=false);
00137 
00145   ESCRIPT_DLL_API
00146   Data(const Data& inData,
00147        const DataTypes::RegionType& region);
00148 
00159   ESCRIPT_DLL_API
00160   Data(const boost::python::object& value,
00161        const FunctionSpace& what=FunctionSpace(),
00162        bool expanded=false);
00163 
00174   ESCRIPT_DLL_API     
00175   Data(const WrappedArray& w, const FunctionSpace& what,
00176            bool expanded=false);       
00177        
00178 
00188   ESCRIPT_DLL_API
00189   Data(const boost::python::object& value,
00190        const Data& other);
00191 
00196   ESCRIPT_DLL_API
00197   Data(double value,
00198        const boost::python::tuple& shape=boost::python::make_tuple(),
00199        const FunctionSpace& what=FunctionSpace(),
00200        bool expanded=false);
00201 
00206   ESCRIPT_DLL_API
00207   explicit Data(DataAbstract* underlyingdata);
00208 
00212   ESCRIPT_DLL_API
00213   explicit Data(DataAbstract_ptr underlyingdata);
00214 
00219   ESCRIPT_DLL_API
00220   ~Data();
00221 
00225   ESCRIPT_DLL_API
00226   void
00227   copy(const Data& other);
00228 
00232   ESCRIPT_DLL_API
00233   Data
00234   copySelf();
00235 
00236 
00240   ESCRIPT_DLL_API
00241   Data
00242   delay();
00243 
00247   ESCRIPT_DLL_API
00248   void 
00249   delaySelf();
00250 
00251 
00261   ESCRIPT_DLL_API
00262   void
00263   setProtection();
00264 
00270   ESCRIPT_DLL_API
00271   bool
00272   isProtected() const;
00273 
00274 
00279   ESCRIPT_DLL_API
00280   const boost::python::object
00281   getValueOfDataPointAsTuple(int dataPointNo);
00282 
00287   ESCRIPT_DLL_API
00288   void
00289   setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object);
00290 
00295   ESCRIPT_DLL_API
00296   void
00297   setValueOfDataPointToArray(int dataPointNo, const boost::python::object&);
00298 
00303   ESCRIPT_DLL_API
00304   void
00305   setValueOfDataPoint(int dataPointNo, const double);
00306 
00310   ESCRIPT_DLL_API
00311   const boost::python::object
00312   getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo);
00313 
00314   
00318   ESCRIPT_DLL_API
00319   void
00320   setTupleForGlobalDataPoint(int id, int proc, boost::python::object);
00321   
00327   ESCRIPT_DLL_API
00328   int
00329   getTagNumber(int dpno);
00330 
00335   ESCRIPT_DLL_API
00336   escriptDataC
00337   getDataC();
00338 
00339 
00340 
00345   ESCRIPT_DLL_API
00346   escriptDataC
00347   getDataC() const;
00348 
00349 
00354   ESCRIPT_DLL_API
00355   std::string
00356   toString() const;
00357 
00362   ESCRIPT_DLL_API
00363   void
00364   expand();
00365 
00372   ESCRIPT_DLL_API
00373   void
00374   tag();
00375 
00380   ESCRIPT_DLL_API
00381   void
00382   resolve();
00383 
00384 
00392   ESCRIPT_DLL_API
00393   void
00394   requireWrite();
00395 
00401   ESCRIPT_DLL_API
00402   bool
00403   isExpanded() const;
00404 
00410   ESCRIPT_DLL_API
00411   bool
00412   actsExpanded() const;
00413   
00414 
00419   ESCRIPT_DLL_API
00420   bool
00421   isTagged() const;
00422 
00427   ESCRIPT_DLL_API
00428   bool
00429   isConstant() const;
00430 
00434   ESCRIPT_DLL_API
00435   bool
00436   isLazy() const;
00437 
00441   ESCRIPT_DLL_API
00442   bool
00443   isReady() const;
00444 
00450   ESCRIPT_DLL_API
00451   bool
00452   isEmpty() const;
00453 
00458   ESCRIPT_DLL_API
00459   inline
00460   const FunctionSpace&
00461   getFunctionSpace() const
00462   {
00463     return m_data->getFunctionSpace();
00464   }
00465 
00470   ESCRIPT_DLL_API
00471   inline
00472 //   const AbstractDomain&
00473   const_Domain_ptr
00474   getDomain() const
00475   {
00476      return getFunctionSpace().getDomain();
00477   }
00478 
00479 
00485   ESCRIPT_DLL_API
00486   inline
00487 //   const AbstractDomain&
00488   Domain_ptr
00489   getDomainPython() const
00490   {
00491      return getFunctionSpace().getDomainPython();
00492   }
00493 
00498   ESCRIPT_DLL_API
00499   inline
00500   unsigned int
00501   getDataPointRank() const
00502   {
00503     return m_data->getRank();
00504   }
00505 
00510   ESCRIPT_DLL_API
00511   inline
00512   int
00513   getNumDataPoints() const
00514   {
00515     return getNumSamples() * getNumDataPointsPerSample();
00516   }
00521   ESCRIPT_DLL_API
00522   inline
00523   int
00524   getNumSamples() const
00525   {
00526     return m_data->getNumSamples();
00527   }
00528 
00533   ESCRIPT_DLL_API
00534   inline
00535   int
00536   getNumDataPointsPerSample() const
00537   {
00538     return m_data->getNumDPPSample();
00539   }
00540 
00546   ESCRIPT_DLL_API
00547   inline
00548   bool numSamplesEqual(int numDataPointsPerSample, int numSamples) const
00549   {
00550     return (isEmpty() ||
00551             (numDataPointsPerSample==getNumDataPointsPerSample() && numSamples==getNumSamples()));
00552   }
00553 
00559   inline
00560   bool isDataPointShapeEqual(int rank, const int* dimensions) const
00561   {
00562     if (isEmpty())
00563       return true;
00564     const DataTypes::ShapeType givenShape(&dimensions[0],&dimensions[rank]);
00565     return (getDataPointShape()==givenShape);
00566   }
00567 
00572   ESCRIPT_DLL_API
00573   int
00574   getNoValues() const
00575   {
00576     return m_data->getNoValues();
00577   }
00578 
00579 
00584   ESCRIPT_DLL_API
00585   void
00586   dump(const std::string fileName) const;
00587 
00594   ESCRIPT_DLL_API
00595   const boost::python::object
00596   toListOfTuples(bool scalarastuple=true);
00597 
00598 
00606   ESCRIPT_DLL_API
00607   inline
00608   const DataAbstract::ValueType::value_type*
00609   getSampleDataRO(DataAbstract::ValueType::size_type sampleNo) const;
00610 
00611 
00619   ESCRIPT_DLL_API
00620   inline
00621   DataAbstract::ValueType::value_type*
00622   getSampleDataRW(DataAbstract::ValueType::size_type sampleNo);
00623 
00624 
00631   ESCRIPT_DLL_API
00632   inline
00633   DataAbstract::ValueType::value_type*
00634   getSampleDataByTag(int tag)
00635   {
00636     return m_data->getSampleDataByTag(tag);
00637   }
00638 
00645   ESCRIPT_DLL_API
00646   DataTypes::ValueType::const_reference
00647   getDataPointRO(int sampleNo, int dataPointNo);
00648 
00655   ESCRIPT_DLL_API
00656   DataTypes::ValueType::reference
00657   getDataPointRW(int sampleNo, int dataPointNo);
00658 
00659 
00660 
00665   ESCRIPT_DLL_API
00666   inline
00667   DataTypes::ValueType::size_type
00668   getDataOffset(int sampleNo,
00669                int dataPointNo)
00670   {
00671       return m_data->getPointOffset(sampleNo,dataPointNo);
00672   }
00673 
00678   ESCRIPT_DLL_API
00679   inline
00680   const DataTypes::ShapeType&
00681   getDataPointShape() const
00682   {
00683     return m_data->getShape();
00684   }
00685 
00690   ESCRIPT_DLL_API
00691   const boost::python::tuple
00692   getShapeTuple() const;
00693 
00699   ESCRIPT_DLL_API
00700   int
00701   getDataPointSize() const;
00702 
00707   ESCRIPT_DLL_API
00708   DataTypes::ValueType::size_type
00709   getLength() const;
00710 
00715   ESCRIPT_DLL_API
00716   bool
00717   hasNoSamples() const
00718   {
00719     return getLength()==0;
00720   }
00721 
00730   ESCRIPT_DLL_API
00731   void
00732   setTaggedValueByName(std::string name,
00733                        const boost::python::object& value);
00734 
00744   ESCRIPT_DLL_API
00745   void
00746   setTaggedValue(int tagKey,
00747                  const boost::python::object& value);
00748 
00759   ESCRIPT_DLL_API
00760   void
00761   setTaggedValueFromCPP(int tagKey,
00762             const DataTypes::ShapeType& pointshape,
00763                         const DataTypes::ValueType& value,
00764             int dataOffset=0);
00765 
00766 
00767 
00772   ESCRIPT_DLL_API
00773   void
00774   copyWithMask(const Data& other,
00775                const Data& mask);
00776 
00786   ESCRIPT_DLL_API
00787   void
00788   setToZero();
00789 
00796   ESCRIPT_DLL_API
00797   Data
00798   interpolate(const FunctionSpace& functionspace) const;
00799 
00800   ESCRIPT_DLL_API
00801   Data
00802   interpolateFromTable3D(const WrappedArray& table, double Amin, double Astep,
00803                        double undef, Data& B, double Bmin, double Bstep, Data& C, 
00804             double Cmin, double Cstep, bool check_boundaries);
00805 
00806   ESCRIPT_DLL_API
00807   Data
00808   interpolateFromTable2D(const WrappedArray& table, double Amin, double Astep,
00809                        double undef, Data& B, double Bmin, double Bstep,bool check_boundaries);
00810 
00811   ESCRIPT_DLL_API
00812   Data
00813   interpolateFromTable1D(const WrappedArray& table, double Amin, double Astep,
00814                        double undef,bool check_boundaries);
00815 
00816 
00817   ESCRIPT_DLL_API
00818   Data
00819   interpolateFromTable3DP(boost::python::object table, double Amin, double Astep,
00820                         Data& B, double Bmin, double Bstep, Data& C, double Cmin, double Cstep, double undef,bool check_boundaries);
00821 
00822 
00823   ESCRIPT_DLL_API
00824   Data
00825   interpolateFromTable2DP(boost::python::object table, double Amin, double Astep,
00826                         Data& B, double Bmin, double Bstep, double undef,bool check_boundaries);
00827 
00828   ESCRIPT_DLL_API
00829   Data
00830   interpolateFromTable1DP(boost::python::object table, double Amin, double Astep,
00831                         double undef,bool check_boundaries);
00832   
00833   ESCRIPT_DLL_API
00834   Data
00835   nonuniforminterp(boost::python::object in, boost::python::object out, bool check_boundaries);
00836 
00837   ESCRIPT_DLL_API
00838   Data
00839   nonuniformslope(boost::python::object in, boost::python::object out, bool check_boundaries);  
00840   
00847   ESCRIPT_DLL_API
00848   Data
00849   gradOn(const FunctionSpace& functionspace) const;
00850 
00851   ESCRIPT_DLL_API
00852   Data
00853   grad() const;
00854 
00859   ESCRIPT_DLL_API
00860   boost::python::object
00861   integrateToTuple_const() const;
00862 
00863 
00868   ESCRIPT_DLL_API
00869   boost::python::object
00870   integrateToTuple();
00871 
00872 
00873 
00879   ESCRIPT_DLL_API
00880   Data
00881   oneOver() const;
00887   ESCRIPT_DLL_API
00888   Data
00889   wherePositive() const;
00890 
00896   ESCRIPT_DLL_API
00897   Data
00898   whereNegative() const;
00899 
00905   ESCRIPT_DLL_API
00906   Data
00907   whereNonNegative() const;
00908 
00914   ESCRIPT_DLL_API
00915   Data
00916   whereNonPositive() const;
00917 
00923   ESCRIPT_DLL_API
00924   Data
00925   whereZero(double tol=0.0) const;
00926 
00932   ESCRIPT_DLL_API
00933   Data
00934   whereNonZero(double tol=0.0) const;
00935 
00947   ESCRIPT_DLL_API
00948   double
00949   Lsup();
00950 
00951   ESCRIPT_DLL_API
00952   double
00953   Lsup_const() const;
00954 
00955 
00967   ESCRIPT_DLL_API
00968   double
00969   sup();
00970 
00971   ESCRIPT_DLL_API
00972   double
00973   sup_const() const;
00974 
00975 
00987   ESCRIPT_DLL_API
00988   double
00989   inf();
00990 
00991   ESCRIPT_DLL_API
00992   double
00993   inf_const() const;
00994 
00995 
00996 
01002   ESCRIPT_DLL_API
01003   Data
01004   abs() const;
01005 
01011   ESCRIPT_DLL_API
01012   Data
01013   maxval() const;
01014 
01020   ESCRIPT_DLL_API
01021   Data
01022   minval() const;
01023 
01031   ESCRIPT_DLL_API
01032   const boost::python::tuple
01033   minGlobalDataPoint() const;
01034 
01042   ESCRIPT_DLL_API
01043   const boost::python::tuple
01044   maxGlobalDataPoint() const;
01045 
01046 
01047 
01054   ESCRIPT_DLL_API
01055   Data
01056   sign() const;
01057 
01063   ESCRIPT_DLL_API
01064   Data
01065   symmetric() const;
01066 
01072   ESCRIPT_DLL_API
01073   Data
01074   nonsymmetric() const;
01075 
01081   ESCRIPT_DLL_API
01082   Data
01083   trace(int axis_offset) const;
01084 
01090   ESCRIPT_DLL_API
01091   Data
01092   transpose(int axis_offset) const;
01093 
01100   ESCRIPT_DLL_API
01101   Data
01102   eigenvalues() const;
01103 
01113   ESCRIPT_DLL_API
01114   const boost::python::tuple
01115   eigenvalues_and_eigenvectors(const double tol=1.e-12) const;
01116 
01122   ESCRIPT_DLL_API
01123   Data
01124   swapaxes(const int axis0, const int axis1) const;
01125 
01131   ESCRIPT_DLL_API
01132   Data
01133   erf() const;
01134 
01140   ESCRIPT_DLL_API
01141   Data
01142   sin() const;
01143 
01149   ESCRIPT_DLL_API
01150   Data
01151   cos() const;
01152 
01158   ESCRIPT_DLL_API
01159   Data
01160   tan() const;
01161 
01167   ESCRIPT_DLL_API
01168   Data
01169   asin() const;
01170 
01176   ESCRIPT_DLL_API
01177   Data
01178   acos() const;
01179 
01185   ESCRIPT_DLL_API
01186   Data
01187   atan() const;
01188 
01194   ESCRIPT_DLL_API
01195   Data
01196   sinh() const;
01197 
01203   ESCRIPT_DLL_API
01204   Data
01205   cosh() const;
01206 
01212   ESCRIPT_DLL_API
01213   Data
01214   tanh() const;
01215 
01221   ESCRIPT_DLL_API
01222   Data
01223   asinh() const;
01224 
01230   ESCRIPT_DLL_API
01231   Data
01232   acosh() const;
01233 
01239   ESCRIPT_DLL_API
01240   Data
01241   atanh() const;
01242 
01248   ESCRIPT_DLL_API
01249   Data
01250   log10() const;
01251 
01257   ESCRIPT_DLL_API
01258   Data
01259   log() const;
01260 
01266   ESCRIPT_DLL_API
01267   Data
01268   exp() const;
01269 
01275   ESCRIPT_DLL_API
01276   Data
01277   sqrt() const;
01278 
01284   ESCRIPT_DLL_API
01285   Data
01286   neg() const;
01287 
01294   ESCRIPT_DLL_API
01295   Data
01296   pos() const;
01297 
01305   ESCRIPT_DLL_API
01306   Data
01307   powD(const Data& right) const;
01308 
01316   ESCRIPT_DLL_API
01317   Data
01318   powO(const boost::python::object& right) const;
01319 
01328   ESCRIPT_DLL_API
01329   Data
01330   rpowO(const boost::python::object& left) const;
01331 
01338   ESCRIPT_DLL_API
01339   Data& operator+=(const Data& right);
01340   ESCRIPT_DLL_API
01341   Data& operator+=(const boost::python::object& right);
01342 
01343   ESCRIPT_DLL_API
01344   Data& operator=(const Data& other);
01345 
01352   ESCRIPT_DLL_API
01353   Data& operator-=(const Data& right);
01354   ESCRIPT_DLL_API
01355   Data& operator-=(const boost::python::object& right);
01356 
01363   ESCRIPT_DLL_API
01364   Data& operator*=(const Data& right);
01365   ESCRIPT_DLL_API
01366   Data& operator*=(const boost::python::object& right);
01367 
01374   ESCRIPT_DLL_API
01375   Data& operator/=(const Data& right);
01376   ESCRIPT_DLL_API
01377   Data& operator/=(const boost::python::object& right);
01378 
01383   ESCRIPT_DLL_API
01384   Data truedivD(const Data& right);
01385 
01390   ESCRIPT_DLL_API
01391   Data truedivO(const boost::python::object& right);
01392 
01397   ESCRIPT_DLL_API
01398   Data rtruedivO(const boost::python::object& left);
01399 
01404   ESCRIPT_DLL_API
01405   boost::python::object __add__(const boost::python::object& right);
01406   
01407 
01412   ESCRIPT_DLL_API
01413   boost::python::object __sub__(const boost::python::object& right);
01414   
01419   ESCRIPT_DLL_API
01420   boost::python::object __rsub__(const boost::python::object& right);  
01421 
01426   ESCRIPT_DLL_API
01427   boost::python::object __mul__(const boost::python::object& right);
01428     
01433   ESCRIPT_DLL_API
01434   boost::python::object __div__(const boost::python::object& right);
01435   
01440   ESCRIPT_DLL_API
01441   boost::python::object __rdiv__(const boost::python::object& right);    
01442   
01446   ESCRIPT_DLL_API
01447   Data
01448   matrixInverse() const;
01449 
01454   ESCRIPT_DLL_API
01455   bool
01456   probeInterpolation(const FunctionSpace& functionspace) const;
01457 
01473   ESCRIPT_DLL_API
01474   Data
01475   getItem(const boost::python::object& key) const;
01476 
01488   ESCRIPT_DLL_API
01489   void
01490   setItemD(const boost::python::object& key,
01491            const Data& value);
01492 
01493   ESCRIPT_DLL_API
01494   void
01495   setItemO(const boost::python::object& key,
01496            const boost::python::object& value);
01497 
01498   // These following public methods should be treated as private.
01499 
01505   template <class UnaryFunction>
01506   ESCRIPT_DLL_API
01507   inline
01508   void
01509   unaryOp2(UnaryFunction operation);
01510 
01518   ESCRIPT_DLL_API
01519   Data
01520   getSlice(const DataTypes::RegionType& region) const;
01521 
01530   ESCRIPT_DLL_API
01531   void
01532   setSlice(const Data& value,
01533            const DataTypes::RegionType& region);
01534 
01539   ESCRIPT_DLL_API
01540   void
01541         print(void);
01542 
01549   ESCRIPT_DLL_API
01550         int
01551         get_MPIRank(void) const;
01552 
01559   ESCRIPT_DLL_API
01560         int
01561         get_MPISize(void) const;
01562 
01568   ESCRIPT_DLL_API
01569         MPI_Comm
01570         get_MPIComm(void) const;
01571 
01577   ESCRIPT_DLL_API
01578         DataAbstract*
01579         borrowData(void) const;
01580 
01581   ESCRIPT_DLL_API
01582         DataAbstract_ptr
01583         borrowDataPtr(void) const;
01584 
01585   ESCRIPT_DLL_API
01586         DataReady_ptr
01587         borrowReadyPtr(void) const;
01588 
01589 
01590 
01598   ESCRIPT_DLL_API
01599         DataTypes::ValueType::const_reference
01600         getDataAtOffsetRO(DataTypes::ValueType::size_type i);
01601 
01602 
01603   ESCRIPT_DLL_API
01604         DataTypes::ValueType::reference
01605         getDataAtOffsetRW(DataTypes::ValueType::size_type i);
01606 
01616   ESCRIPT_DLL_API
01617   DataTypes::ValueType&
01618   getExpandedVectorReference();
01619  
01620  protected:
01621 
01622  private:
01623 
01624 template <class BinaryOp>
01625   double 
01626 #ifdef ESYS_MPI
01627   lazyAlgWorker(double init, MPI_Op mpiop_type);
01628 #else
01629   lazyAlgWorker(double init);
01630 #endif
01631 
01632   double
01633   LsupWorker() const;
01634 
01635   double
01636   supWorker() const;
01637 
01638   double
01639   infWorker() const;
01640 
01641   boost::python::object
01642   integrateWorker() const;
01643 
01644   void
01645   calc_minGlobalDataPoint(int& ProcNo,  int& DataPointNo) const;
01646 
01647   void
01648   calc_maxGlobalDataPoint(int& ProcNo,  int& DataPointNo) const;
01649 
01650   // For internal use in Data.cpp only!
01651   // other uses should call the main entry points and allow laziness
01652   Data
01653   minval_nonlazy() const;
01654 
01655   // For internal use in Data.cpp only!
01656   Data
01657   maxval_nonlazy() const;
01658 
01659 
01666   inline
01667   void
01668   operandCheck(const Data& right) const
01669   {
01670     return m_data->operandCheck(*(right.m_data.get()));
01671   }
01672 
01678   template <class BinaryFunction>
01679   inline
01680   double
01681   algorithm(BinaryFunction operation,
01682             double initial_value) const;
01683 
01691   template <class BinaryFunction>
01692   inline
01693   Data
01694   dp_algorithm(BinaryFunction operation,
01695                double initial_value) const;
01696 
01705   template <class BinaryFunction>
01706   inline
01707   void
01708   binaryOp(const Data& right,
01709            BinaryFunction operation);
01710 
01716   void
01717   typeMatchLeft(Data& right) const;
01718 
01724   void
01725   typeMatchRight(const Data& right);
01726 
01732   void
01733   initialise(const DataTypes::ValueType& value,
01734          const DataTypes::ShapeType& shape,
01735              const FunctionSpace& what,
01736              bool expanded);
01737 
01738   void
01739   initialise(const WrappedArray& value,
01740                  const FunctionSpace& what,
01741                  bool expanded);
01742 
01743   void
01744   initialise(const double value,
01745          const DataTypes::ShapeType& shape,
01746              const FunctionSpace& what,
01747              bool expanded);
01748 
01749   //
01750   // flag to protect the data object against any update
01751   bool m_protected;
01752   mutable bool m_shared;
01753   bool m_lazy;
01754 
01755   //
01756   // pointer to the actual data object
01757 //   boost::shared_ptr<DataAbstract> m_data;
01758   DataAbstract_ptr m_data;
01759 
01760 // If possible please use getReadyPtr instead.
01761 // But see warning below.
01762   const DataReady*
01763   getReady() const;
01764 
01765   DataReady*
01766   getReady();
01767 
01768 
01769 // Be wary of using this for local operations since it (temporarily) increases reference count.
01770 // If you are just using this to call a method on DataReady instead of DataAbstract consider using 
01771 // getReady() instead
01772   DataReady_ptr
01773   getReadyPtr();
01774 
01775   const_DataReady_ptr
01776   getReadyPtr() const;
01777 
01778 
01784   void updateShareStatus(bool nowshared) const
01785   {
01786     m_shared=nowshared;     // m_shared is mutable
01787   }
01788 
01789   // In the isShared() method below:
01790   // A problem would occur if m_data (the address pointed to) were being modified 
01791   // while the call m_data->is_shared is being executed.
01792   // 
01793   // Q: So why do I think this code can be thread safe/correct?
01794   // A: We need to make some assumptions.
01795   // 1. We assume it is acceptable to return true under some conditions when we aren't shared.
01796   // 2. We assume that no constructions or assignments which will share previously unshared
01797   //    will occur while this call is executing. This is consistent with the way Data:: and C are written.
01798   //
01799   // This means that the only transition we need to consider, is when a previously shared object is
01800   // not shared anymore. ie. the other objects have been destroyed or a deep copy has been made.
01801   // In those cases the m_shared flag changes to false after m_data has completed changing.
01802   // For any threads executing before the flag switches they will assume the object is still shared.
01803   bool isShared() const
01804   {
01805     return m_shared;
01806 /*  if (m_shared) return true;
01807     if (m_data->isShared())         
01808     {                   
01809         updateShareStatus(true);
01810         return true;
01811     }
01812     return false;*/
01813   }
01814 
01815   void forceResolve()
01816   {
01817     if (isLazy())
01818     {
01819         #ifdef _OPENMP
01820         if (omp_in_parallel())
01821         {   // Yes this is throwing an exception out of an omp thread which is forbidden.
01822         throw DataException("Please do not call forceResolve() in a parallel region.");
01823         }
01824         #endif
01825         resolve();
01826     }
01827   }
01828 
01833   void exclusiveWrite()
01834   {
01835 #ifdef _OPENMP
01836   if (omp_in_parallel())
01837   {
01838 // *((int*)0)=17;
01839     throw DataException("Programming error. Please do not run exclusiveWrite() in multi-threaded sections.");
01840   }
01841 #endif
01842     forceResolve();
01843     if (isShared())
01844     {
01845         DataAbstract* t=m_data->deepCopy();
01846         set_m_data(DataAbstract_ptr(t));
01847     }
01848   }
01849 
01853   void checkExclusiveWrite()
01854   {
01855     if  (isLazy() || isShared())
01856     {
01857         throw DataException("Programming error. ExclusiveWrite required - please call requireWrite()");
01858     }
01859   }
01860 
01867   void set_m_data(DataAbstract_ptr p);
01868 
01869   friend class DataAbstract;        // To allow calls to updateShareStatus
01870   friend class TestDomain;      // so its getX will work quickly
01871 #ifdef IKNOWWHATIMDOING
01872   friend ESCRIPT_DLL_API Data applyBinaryCFunction(boost::python::object cfunc, boost::python::tuple shape, escript::Data& d, escript::Data& e);
01873 #endif
01874   friend ESCRIPT_DLL_API Data condEval(escript::Data& mask, escript::Data& trueval, escript::Data& falseval);
01875   friend ESCRIPT_DLL_API Data randomData(const boost::python::tuple& shape, const FunctionSpace& what, long seed, const boost::python::tuple& filter);
01876 
01877 };
01878 
01879 
01880 #ifdef IKNOWWHATIMDOING
01881 ESCRIPT_DLL_API
01882 Data
01883 applyBinaryCFunction(boost::python::object func, boost::python::tuple shape, escript::Data& d, escript::Data& e);
01884 #endif
01885 
01886 
01887 ESCRIPT_DLL_API
01888 Data
01889 condEval(escript::Data& mask, escript::Data& trueval, escript::Data& falseval);
01890 
01891 
01892 
01896 ESCRIPT_DLL_API
01897 Data randomData(const boost::python::tuple& shape,
01898        const FunctionSpace& what,
01899        long seed, const boost::python::tuple& filter);
01900 
01901 
01902 }   // end namespace escript
01903 
01904 
01905 // No, this is not supposed to be at the top of the file
01906 // DataAbstact needs to be declared first, then DataReady needs to be fully declared
01907 // so that I can dynamic cast between them below.
01908 #include "DataReady.h"
01909 #include "DataLazy.h"
01910 
01911 namespace escript
01912 {
01913 
01914 inline
01915 const DataReady*
01916 Data::getReady() const
01917 {
01918    const DataReady* dr=dynamic_cast<const DataReady*>(m_data.get());
01919    EsysAssert((dr!=0), "Error - casting to DataReady.");
01920    return dr;
01921 }
01922 
01923 inline
01924 DataReady*
01925 Data::getReady()
01926 {
01927    DataReady* dr=dynamic_cast<DataReady*>(m_data.get());
01928    EsysAssert((dr!=0), "Error - casting to DataReady.");
01929    return dr;
01930 }
01931 
01932 // Be wary of using this for local operations since it (temporarily) increases reference count.
01933 // If you are just using this to call a method on DataReady instead of DataAbstract consider using 
01934 // getReady() instead
01935 inline
01936 DataReady_ptr
01937 Data::getReadyPtr()
01938 {
01939    DataReady_ptr dr=boost::dynamic_pointer_cast<DataReady>(m_data);
01940    EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
01941    return dr;
01942 }
01943 
01944 
01945 inline
01946 const_DataReady_ptr
01947 Data::getReadyPtr() const
01948 {
01949    const_DataReady_ptr dr=boost::dynamic_pointer_cast<const DataReady>(m_data);
01950    EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
01951    return dr;
01952 }
01953 
01954 inline
01955 DataAbstract::ValueType::value_type*
01956 Data::getSampleDataRW(DataAbstract::ValueType::size_type sampleNo)
01957 {
01958    if (isLazy())
01959    {
01960     throw DataException("Error, attempt to acquire RW access to lazy data. Please call requireWrite() first.");
01961    }
01962    return getReady()->getSampleDataRW(sampleNo);
01963 }
01964 
01965 inline
01966 const DataAbstract::ValueType::value_type*
01967 Data::getSampleDataRO(DataAbstract::ValueType::size_type sampleNo) const
01968 {
01969    DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
01970    if (l!=0)
01971    {
01972     size_t offset=0;
01973     const DataTypes::ValueType* res=l->resolveSample(sampleNo,offset);
01974     return &((*res)[offset]);
01975    }
01976    return getReady()->getSampleDataRO(sampleNo);
01977 }
01978 
01982 inline double rpow(double x,double y)
01983 {
01984     return pow(y,x);
01985 }
01986 
01992 ESCRIPT_DLL_API Data operator+(const Data& left, const Data& right);
01993 
01999 ESCRIPT_DLL_API Data operator-(const Data& left, const Data& right);
02000 
02006 ESCRIPT_DLL_API Data operator*(const Data& left, const Data& right);
02007 
02013 ESCRIPT_DLL_API Data operator/(const Data& left, const Data& right);
02014 
02021 ESCRIPT_DLL_API Data operator+(const Data& left, const boost::python::object& right);
02022 
02029 ESCRIPT_DLL_API Data operator-(const Data& left, const boost::python::object& right);
02030 
02037 ESCRIPT_DLL_API Data operator*(const Data& left, const boost::python::object& right);
02038 
02045 ESCRIPT_DLL_API Data operator/(const Data& left, const boost::python::object& right);
02046 
02053 ESCRIPT_DLL_API Data operator+(const boost::python::object& left, const Data& right);
02054 
02061 ESCRIPT_DLL_API Data operator-(const boost::python::object& left, const Data& right);
02062 
02069 ESCRIPT_DLL_API Data operator*(const boost::python::object& left, const Data& right);
02070 
02077 ESCRIPT_DLL_API Data operator/(const boost::python::object& left, const Data& right);
02078 
02079 
02080 
02085 ESCRIPT_DLL_API std::ostream& operator<<(std::ostream& o, const Data& data);
02086 
02095 ESCRIPT_DLL_API
02096 Data
02097 C_GeneralTensorProduct(Data& arg_0,
02098                      Data& arg_1,
02099                      int axis_offset=0,
02100                      int transpose=0);
02101 
02107 inline
02108 Data
02109 Data::truedivD(const Data& right)
02110 {
02111     return *this / right;
02112 }
02113 
02119 inline
02120 Data
02121 Data::truedivO(const boost::python::object& right)
02122 {
02123     Data tmp(right, getFunctionSpace(), false);
02124     return truedivD(tmp);
02125 }
02126 
02132 inline
02133 Data
02134 Data::rtruedivO(const boost::python::object& left)
02135 {
02136     Data tmp(left, getFunctionSpace(), false);
02137     return tmp.truedivD(*this);
02138 }
02139 
02145 template <class BinaryFunction>
02146 inline
02147 void
02148 Data::binaryOp(const Data& right,
02149                BinaryFunction operation)
02150 {
02151    //
02152    // if this has a rank of zero promote it to the rank of the RHS
02153    if (getDataPointRank()==0 && right.getDataPointRank()!=0) {
02154      throw DataException("Error - attempt to update rank zero object with object with rank bigger than zero.");
02155    }
02156 
02157    if (isLazy() || right.isLazy())
02158    {
02159      throw DataException("Programmer error - attempt to call binaryOp with Lazy Data.");
02160    }
02161    //
02162    // initially make the temporary a shallow copy
02163    Data tempRight(right);
02164    FunctionSpace fsl=getFunctionSpace();
02165    FunctionSpace fsr=right.getFunctionSpace();
02166    if (fsl!=fsr) {
02167      signed char intres=fsl.getDomain()->preferredInterpolationOnDomain(fsr.getTypeCode(), fsl.getTypeCode());
02168      if (intres==0)
02169      {
02170          std::string msg="Error - attempt to combine incompatible FunctionSpaces.";
02171      msg+=fsl.toString();
02172      msg+="  ";
02173      msg+=fsr.toString();
02174          throw DataException(msg.c_str());
02175      } 
02176      else if (intres==1)
02177      {
02178        // an interpolation is required so create a new Data
02179        tempRight=Data(right,fsl);
02180      }
02181      else   // reverse interpolation preferred
02182      {
02183         // interpolate onto the RHS function space
02184        Data tempLeft(*this,fsr);
02185        set_m_data(tempLeft.m_data);
02186      }
02187    }
02188    operandCheck(tempRight);
02189    //
02190    // ensure this has the right type for the RHS
02191    typeMatchRight(tempRight);
02192    //
02193    // Need to cast to the concrete types so that the correct binaryOp
02194    // is called.
02195    if (isExpanded()) {
02196      //
02197      // Expanded data will be done in parallel, the right hand side can be
02198      // of any data type
02199      DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
02200      EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
02201      escript::binaryOp(*leftC,*(tempRight.getReady()),operation);
02202    } else if (isTagged()) {
02203      //
02204      // Tagged data is operated on serially, the right hand side can be
02205      // either DataConstant or DataTagged
02206      DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
02207      EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
02208      if (right.isTagged()) {
02209        DataTagged* rightC=dynamic_cast<DataTagged*>(tempRight.m_data.get());
02210        EsysAssert((rightC!=0), "Programming error - casting to DataTagged.");
02211        escript::binaryOp(*leftC,*rightC,operation);
02212      } else {
02213        DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
02214        EsysAssert((rightC!=0), "Programming error - casting to DataConstant.");
02215        escript::binaryOp(*leftC,*rightC,operation);
02216      }
02217    } else if (isConstant()) {
02218      DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
02219      DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
02220      EsysAssert((leftC!=0 && rightC!=0), "Programming error - casting to DataConstant.");
02221      escript::binaryOp(*leftC,*rightC,operation);
02222    }
02223 }
02224 
02232 template <class BinaryFunction>
02233 inline
02234 double
02235 Data::algorithm(BinaryFunction operation, double initial_value) const
02236 {
02237   if (isExpanded()) {
02238     DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
02239     EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
02240     return escript::algorithm(*leftC,operation,initial_value);
02241   } else if (isTagged()) {
02242     DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
02243     EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
02244     return escript::algorithm(*leftC,operation,initial_value);
02245   } else if (isConstant()) {
02246     DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
02247     EsysAssert((leftC!=0), "Programming error - casting to DataConstant.");
02248     return escript::algorithm(*leftC,operation,initial_value);
02249   } else if (isEmpty()) {
02250     throw DataException("Error - Operations not permitted on instances of DataEmpty.");
02251   } else if (isLazy()) {
02252     throw DataException("Error - Operations not permitted on instances of DataLazy.");
02253   } else {
02254     throw DataException("Error - Data encapsulates an unknown type.");
02255   }
02256 }
02257 
02266 template <class BinaryFunction>
02267 inline
02268 Data
02269 Data::dp_algorithm(BinaryFunction operation, double initial_value) const
02270 {
02271   if (isEmpty()) {
02272     throw DataException("Error - Operations not permitted on instances of DataEmpty.");
02273   } 
02274   else if (isExpanded()) {
02275     Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
02276     DataExpanded* dataE=dynamic_cast<DataExpanded*>(m_data.get());
02277     DataExpanded* resultE=dynamic_cast<DataExpanded*>(result.m_data.get());
02278     EsysAssert((dataE!=0), "Programming error - casting data to DataExpanded.");
02279     EsysAssert((resultE!=0), "Programming error - casting result to DataExpanded.");
02280     escript::dp_algorithm(*dataE,*resultE,operation,initial_value);
02281     return result;
02282   }
02283   else if (isTagged()) {
02284     DataTagged* dataT=dynamic_cast<DataTagged*>(m_data.get());
02285     EsysAssert((dataT!=0), "Programming error - casting data to DataTagged.");
02286     DataTypes::ValueType defval(1);
02287     defval[0]=0;
02288     DataTagged* resultT=new DataTagged(getFunctionSpace(), DataTypes::scalarShape, defval, dataT);
02289     escript::dp_algorithm(*dataT,*resultT,operation,initial_value);
02290     return Data(resultT);   // note: the Data object now owns the resultT pointer
02291   } 
02292   else if (isConstant()) {
02293     Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
02294     DataConstant* dataC=dynamic_cast<DataConstant*>(m_data.get());
02295     DataConstant* resultC=dynamic_cast<DataConstant*>(result.m_data.get());
02296     EsysAssert((dataC!=0), "Programming error - casting data to DataConstant.");
02297     EsysAssert((resultC!=0), "Programming error - casting result to DataConstant.");
02298     escript::dp_algorithm(*dataC,*resultC,operation,initial_value);
02299     return result;
02300   } else if (isLazy()) {
02301     throw DataException("Error - Operations not permitted on instances of DataLazy.");
02302   } else {
02303     throw DataException("Error - Data encapsulates an unknown type.");
02304   }
02305 }
02306 
02314 template <typename BinaryFunction>
02315 inline
02316 Data
02317 C_TensorBinaryOperation(Data const &arg_0,
02318                         Data const &arg_1,
02319                         BinaryFunction operation)
02320 {
02321   if (arg_0.isEmpty() || arg_1.isEmpty())
02322   {
02323      throw DataException("Error - Operations not permitted on instances of DataEmpty.");
02324   }
02325   if (arg_0.isLazy() || arg_1.isLazy())
02326   {
02327      throw DataException("Error - Operations not permitted on lazy data.");
02328   }
02329   // Interpolate if necessary and find an appropriate function space
02330   Data arg_0_Z, arg_1_Z;
02331   FunctionSpace fsl=arg_0.getFunctionSpace();
02332   FunctionSpace fsr=arg_1.getFunctionSpace();
02333   if (fsl!=fsr) {
02334      signed char intres=fsl.getDomain()->preferredInterpolationOnDomain(fsr.getTypeCode(), fsl.getTypeCode());
02335      if (intres==0)
02336      {
02337          std::string msg="Error - C_TensorBinaryOperation: arguments have incompatible function spaces.";
02338          msg+=fsl.toString();
02339          msg+=" ";
02340          msg+=fsr.toString();
02341          throw DataException(msg.c_str());
02342      } 
02343      else if (intres==1)
02344      {
02345       arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
02346       arg_0_Z =Data(arg_0);      
02347      }
02348      else   // reverse interpolation preferred
02349      {
02350       arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
02351       arg_1_Z = Data(arg_1);
02352      }    
02353   } else {
02354       arg_0_Z = Data(arg_0);
02355       arg_1_Z = Data(arg_1);
02356   }
02357   // Get rank and shape of inputs
02358   int rank0 = arg_0_Z.getDataPointRank();
02359   int rank1 = arg_1_Z.getDataPointRank();
02360   DataTypes::ShapeType shape0 = arg_0_Z.getDataPointShape();
02361   DataTypes::ShapeType shape1 = arg_1_Z.getDataPointShape();
02362   int size0 = arg_0_Z.getDataPointSize();
02363   int size1 = arg_1_Z.getDataPointSize();
02364   // Declare output Data object
02365   Data res;
02366 
02367   if (shape0 == shape1) {
02368     if (arg_0_Z.isConstant()   && arg_1_Z.isConstant()) {
02369       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());      // DataConstant output
02370       const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
02371       const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
02372       double *ptr_2 = &(res.getDataAtOffsetRW(0));
02373 
02374       tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02375     }
02376     else if (arg_0_Z.isConstant()   && arg_1_Z.isTagged()) {
02377 
02378       // Prepare the DataConstant input
02379       DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
02380 
02381       // Borrow DataTagged input from Data object
02382       DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
02383 
02384       // Prepare a DataTagged output 2
02385       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());      // DataTagged output
02386       res.tag();
02387       DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02388 
02389       // Prepare offset into DataConstant
02390       int offset_0 = tmp_0->getPointOffset(0,0);
02391       const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02392 
02393       // Get the pointers to the actual data
02394       const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
02395       double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02396 
02397       // Compute a result for the default
02398       tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02399       // Compute a result for each tag
02400       const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
02401       DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
02402       for (i=lookup_1.begin();i!=lookup_1.end();i++) {
02403         tmp_2->addTag(i->first);
02404         const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
02405         double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02406 
02407         tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02408       }
02409 
02410     }
02411     else if (arg_0_Z.isConstant()   && arg_1_Z.isExpanded()) {
02412       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
02413       DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
02414       DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
02415       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02416 
02417       int sampleNo_1,dataPointNo_1;
02418       int numSamples_1 = arg_1_Z.getNumSamples();
02419       int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
02420       int offset_0 = tmp_0->getPointOffset(0,0);
02421       res.requireWrite();
02422       #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
02423       for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
02424         for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
02425           int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
02426           int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
02427           const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02428           const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1)); 
02429           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2)); 
02430           tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02431         }
02432       }
02433 
02434     }
02435     else if (arg_0_Z.isTagged()     && arg_1_Z.isConstant()) {
02436       // Borrow DataTagged input from Data object
02437       DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02438 
02439       // Prepare the DataConstant input
02440       DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
02441 
02442       // Prepare a DataTagged output 2
02443       res = Data(0.0, shape0, arg_0_Z.getFunctionSpace());      // DataTagged output
02444       res.tag();
02445       DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02446 
02447       // Prepare offset into DataConstant
02448       int offset_1 = tmp_1->getPointOffset(0,0);
02449 
02450       const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02451       // Get the pointers to the actual data
02452       const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
02453       double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02454       // Compute a result for the default
02455       tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02456       // Compute a result for each tag
02457       const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
02458       DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
02459       for (i=lookup_0.begin();i!=lookup_0.end();i++) {
02460         tmp_2->addTag(i->first);
02461         const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
02462         double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02463         tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02464       }
02465 
02466     }
02467     else if (arg_0_Z.isTagged()     && arg_1_Z.isTagged()) {
02468       // Borrow DataTagged input from Data object
02469       DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02470 
02471       // Borrow DataTagged input from Data object
02472       DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
02473 
02474       // Prepare a DataTagged output 2
02475       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
02476       res.tag();        // DataTagged output
02477       DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02478 
02479       // Get the pointers to the actual data
02480       const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
02481       const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
02482       double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02483 
02484       // Compute a result for the default
02485       tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02486       // Merge the tags
02487       DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
02488       const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
02489       const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
02490       for (i=lookup_0.begin();i!=lookup_0.end();i++) {
02491         tmp_2->addTag(i->first); // use tmp_2 to get correct shape
02492       }
02493       for (i=lookup_1.begin();i!=lookup_1.end();i++) {
02494         tmp_2->addTag(i->first);
02495       }
02496       // Compute a result for each tag
02497       const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
02498       for (i=lookup_2.begin();i!=lookup_2.end();i++) {
02499 
02500         const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
02501         const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
02502         double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02503 
02504         tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02505       }
02506 
02507     }
02508     else if (arg_0_Z.isTagged()     && arg_1_Z.isExpanded()) {
02509       // After finding a common function space above the two inputs have the same numSamples and num DPPS
02510       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
02511       DataTagged*   tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02512       DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
02513       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02514 
02515       int sampleNo_0,dataPointNo_0;
02516       int numSamples_0 = arg_0_Z.getNumSamples();
02517       int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02518       res.requireWrite();
02519       #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02520       for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02521         int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
02522         const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02523         for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02524           int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
02525           int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02526           const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02527           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02528           tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02529         }
02530       }
02531 
02532     }
02533     else if (arg_0_Z.isExpanded()   && arg_1_Z.isConstant()) {
02534       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
02535       DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
02536       DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
02537       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02538 
02539       int sampleNo_0,dataPointNo_0;
02540       int numSamples_0 = arg_0_Z.getNumSamples();
02541       int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02542       int offset_1 = tmp_1->getPointOffset(0,0);
02543       res.requireWrite();
02544       #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02545       for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02546         for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02547           int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
02548           int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02549 
02550           const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02551           const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02552           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02553 
02554 
02555           tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02556         }
02557       }
02558 
02559     }
02560     else if (arg_0_Z.isExpanded()   && arg_1_Z.isTagged()) {
02561       // After finding a common function space above the two inputs have the same numSamples and num DPPS
02562       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
02563       DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
02564       DataTagged*   tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
02565       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02566 
02567       int sampleNo_0,dataPointNo_0;
02568       int numSamples_0 = arg_0_Z.getNumSamples();
02569       int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02570       res.requireWrite();
02571       #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02572       for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02573         int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
02574         const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02575         for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02576           int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
02577           int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02578           const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02579           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02580           tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02581         }
02582       }
02583 
02584     }
02585     else if (arg_0_Z.isExpanded()   && arg_1_Z.isExpanded()) {
02586       // After finding a common function space above the two inputs have the same numSamples and num DPPS
02587       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
02588       DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
02589       DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
02590       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02591 
02592       int sampleNo_0,dataPointNo_0;
02593       int numSamples_0 = arg_0_Z.getNumSamples();
02594       int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02595       res.requireWrite();
02596       #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02597       for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02598       dataPointNo_0=0;
02599 //        for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02600           int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
02601           int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
02602           int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02603           const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02604           const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02605           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02606           tensor_binary_operation(size0*numDataPointsPerSample_0, ptr_0, ptr_1, ptr_2, operation);
02607 //       }
02608       }
02609 
02610     }
02611     else {
02612       throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
02613     }
02614 
02615   } else if (0 == rank0) {
02616     if (arg_0_Z.isConstant()   && arg_1_Z.isConstant()) {
02617       res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());      // DataConstant output
02618       const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
02619       const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
02620       double *ptr_2 = &(res.getDataAtOffsetRW(0));
02621       tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02622     }
02623     else if (arg_0_Z.isConstant()   && arg_1_Z.isTagged()) {
02624 
02625       // Prepare the DataConstant input
02626       DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
02627 
02628       // Borrow DataTagged input from Data object
02629       DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
02630 
02631       // Prepare a DataTagged output 2
02632       res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());      // DataTagged output
02633       res.tag();
02634       DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02635 
02636       // Prepare offset into DataConstant
02637       int offset_0 = tmp_0->getPointOffset(0,0);
02638       const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02639 
02640       const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
02641       double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02642 
02643       // Compute a result for the default
02644       tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02645       // Compute a result for each tag
02646       const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
02647       DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
02648       for (i=lookup_1.begin();i!=lookup_1.end();i++) {
02649         tmp_2->addTag(i->first);
02650         const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
02651         double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02652         tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02653       }
02654 
02655     }
02656     else if (arg_0_Z.isConstant()   && arg_1_Z.isExpanded()) {
02657 
02658       res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
02659       DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
02660       DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
02661       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02662 
02663       int sampleNo_1;
02664       int numSamples_1 = arg_1_Z.getNumSamples();
02665       int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
02666       int offset_0 = tmp_0->getPointOffset(0,0);
02667       const double *ptr_src = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02668       double ptr_0 = ptr_src[0];
02669       int size = size1*numDataPointsPerSample_1;
02670       res.requireWrite();
02671       #pragma omp parallel for private(sampleNo_1) schedule(static)
02672       for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
02673 //        for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
02674           int offset_1 = tmp_1->getPointOffset(sampleNo_1,0);
02675           int offset_2 = tmp_2->getPointOffset(sampleNo_1,0);
02676 //          const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02677           const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02678           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02679           tensor_binary_operation(size, ptr_0, ptr_1, ptr_2, operation);
02680 
02681 //        }
02682       }
02683 
02684     }
02685     else if (arg_0_Z.isTagged()     && arg_1_Z.isConstant()) {
02686 
02687       // Borrow DataTagged input from Data object
02688       DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02689 
02690       // Prepare the DataConstant input
02691       DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
02692 
02693       // Prepare a DataTagged output 2
02694       res = Data(0.0, shape1, arg_0_Z.getFunctionSpace());      // DataTagged output
02695       res.tag();
02696       DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02697 
02698       // Prepare offset into DataConstant
02699       int offset_1 = tmp_1->getPointOffset(0,0);
02700       const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02701 
02702       // Get the pointers to the actual data
02703       const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
02704       double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02705 
02706 
02707       // Compute a result for the default
02708       tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02709       // Compute a result for each tag
02710       const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
02711       DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
02712       for (i=lookup_0.begin();i!=lookup_0.end();i++) {
02713         tmp_2->addTag(i->first);
02714         const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
02715         double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02716 
02717         tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02718       }
02719 
02720     }
02721     else if (arg_0_Z.isTagged()     && arg_1_Z.isTagged()) {
02722 
02723       // Borrow DataTagged input from Data object
02724       DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02725 
02726       // Borrow DataTagged input from Data object
02727       DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
02728 
02729       // Prepare a DataTagged output 2
02730       res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());
02731       res.tag();        // DataTagged output
02732       DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02733 
02734       // Get the pointers to the actual data
02735       const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
02736       const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
02737       double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02738 
02739       // Compute a result for the default
02740       tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02741       // Merge the tags
02742       DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
02743       const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
02744       const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
02745       for (i=lookup_0.begin();i!=lookup_0.end();i++) {
02746         tmp_2->addTag(i->first); // use tmp_2 to get correct shape
02747       }
02748       for (i=lookup_1.begin();i!=lookup_1.end();i++) {
02749         tmp_2->addTag(i->first);
02750       }
02751       // Compute a result for each tag
02752       const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
02753       for (i=lookup_2.begin();i!=lookup_2.end();i++) {
02754         const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
02755         const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
02756         double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02757 
02758         tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02759       }
02760 
02761     }
02762     else if (arg_0_Z.isTagged()     && arg_1_Z.isExpanded()) {
02763 
02764       // After finding a common function space above the two inputs have the same numSamples and num DPPS
02765       res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
02766       DataTagged*   tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02767       DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
02768       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02769 
02770       int sampleNo_0,dataPointNo_0;
02771       int numSamples_0 = arg_0_Z.getNumSamples();
02772       int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02773       res.requireWrite();
02774       #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02775       for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02776         int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
02777         const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02778         for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02779           int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
02780           int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02781           const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02782           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02783           tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02784         }
02785       }
02786 
02787     }
02788     else if (arg_0_Z.isExpanded()   && arg_1_Z.isConstant()) {
02789       res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
02790       DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
02791       DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
02792       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02793 
02794       int sampleNo_0,dataPointNo_0;
02795       int numSamples_0 = arg_0_Z.getNumSamples();
02796       int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02797       int offset_1 = tmp_1->getPointOffset(0,0);
02798       res.requireWrite();
02799       #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02800       for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02801         for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02802           int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
02803           int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02804           const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02805           const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02806           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02807           tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02808         }
02809       }
02810 
02811 
02812     }
02813     else if (arg_0_Z.isExpanded()   && arg_1_Z.isTagged()) {
02814 
02815       // After finding a common function space above the two inputs have the same numSamples and num DPPS
02816       res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
02817       DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
02818       DataTagged*   tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
02819       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02820 
02821       int sampleNo_0,dataPointNo_0;
02822       int numSamples_0 = arg_0_Z.getNumSamples();
02823       int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02824       res.requireWrite();
02825       #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02826       for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02827         int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
02828         const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02829         for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02830           int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
02831           int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02832           const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02833           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02834           tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02835         }
02836       }
02837 
02838     }
02839     else if (arg_0_Z.isExpanded()   && arg_1_Z.isExpanded()) {
02840 
02841       // After finding a common function space above the two inputs have the same numSamples and num DPPS
02842       res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
02843       DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
02844       DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
02845       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02846 
02847       int sampleNo_0,dataPointNo_0;
02848       int numSamples_0 = arg_0_Z.getNumSamples();
02849       int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02850       res.requireWrite();
02851       #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02852       for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02853         for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02854           int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
02855           int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
02856           int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02857           const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02858           const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02859           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02860           tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02861         }
02862       }
02863 
02864     }
02865     else {
02866       throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
02867     }
02868 
02869   } else if (0 == rank1) {
02870     if (arg_0_Z.isConstant()   && arg_1_Z.isConstant()) {
02871       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());      // DataConstant output
02872       const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
02873       const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
02874       double *ptr_2 = &(res.getDataAtOffsetRW(0));
02875       tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02876     }
02877     else if (arg_0_Z.isConstant()   && arg_1_Z.isTagged()) {
02878 
02879       // Prepare the DataConstant input
02880       DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
02881 
02882       // Borrow DataTagged input from Data object
02883       DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
02884 
02885       // Prepare a DataTagged output 2
02886       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());      // DataTagged output
02887       res.tag();
02888       DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02889 
02890       // Prepare offset into DataConstant
02891       int offset_0 = tmp_0->getPointOffset(0,0);
02892       const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02893 
02894       //Get the pointers to the actual data
02895       const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
02896       double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02897 
02898       // Compute a result for the default
02899       tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02900       // Compute a result for each tag
02901       const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
02902       DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
02903       for (i=lookup_1.begin();i!=lookup_1.end();i++) {
02904         tmp_2->addTag(i->first);
02905         const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
02906         double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02907         tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02908       }
02909     }
02910     else if (arg_0_Z.isConstant()   && arg_1_Z.isExpanded()) {
02911 
02912       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
02913       DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
02914       DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
02915       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02916 
02917       int sampleNo_1,dataPointNo_1;
02918       int numSamples_1 = arg_1_Z.getNumSamples();
02919       int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
02920       int offset_0 = tmp_0->getPointOffset(0,0);
02921       res.requireWrite();
02922       #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
02923       for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
02924         for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
02925           int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
02926           int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
02927           const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02928           const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02929           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02930           tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02931         }
02932       }
02933 
02934     }
02935     else if (arg_0_Z.isTagged()     && arg_1_Z.isConstant()) {
02936 
02937       // Borrow DataTagged input from Data object
02938       DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02939 
02940       // Prepare the DataConstant input
02941       DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
02942 
02943       // Prepare a DataTagged output 2
02944       res = Data(0.0, shape0, arg_0_Z.getFunctionSpace());      // DataTagged output
02945       res.tag();
02946       DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02947 
02948       // Prepare offset into DataConstant
02949       int offset_1 = tmp_1->getPointOffset(0,0);
02950       const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02951       // Get the pointers to the actual data
02952       const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
02953       double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02954       // Compute a result for the default
02955       tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02956       // Compute a result for each tag
02957       const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
02958       DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
02959       for (i=lookup_0.begin();i!=lookup_0.end();i++) {
02960         tmp_2->addTag(i->first);
02961         const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
02962         double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02963         tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02964       }
02965 
02966     }
02967     else if (arg_0_Z.isTagged()     && arg_1_Z.isTagged()) {
02968 
02969       // Borrow DataTagged input from Data object
02970       DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02971 
02972       // Borrow DataTagged input from Data object
02973       DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
02974 
02975       // Prepare a DataTagged output 2
02976       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
02977       res.tag();        // DataTagged output
02978       DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02979 
02980       // Get the pointers to the actual data
02981       const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
02982       const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
02983       double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02984 
02985       // Compute a result for the default
02986       tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02987       // Merge the tags
02988       DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
02989       const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
02990       const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
02991       for (i=lookup_0.begin();i!=lookup_0.end();i++) {
02992         tmp_2->addTag(i->first); // use tmp_2 to get correct shape
02993       }
02994       for (i=lookup_1.begin();i!=lookup_1.end();i++) {
02995         tmp_2->addTag(i->first);
02996       }
02997       // Compute a result for each tag
02998       const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
02999       for (i=lookup_2.begin();i!=lookup_2.end();i++) {
03000         const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
03001         const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
03002         double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
03003         tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
03004       }
03005 
03006     }
03007     else if (arg_0_Z.isTagged()     && arg_1_Z.isExpanded()) {
03008 
03009       // After finding a common function space above the two inputs have the same numSamples and num DPPS
03010       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
03011       DataTagged*   tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
03012       DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
03013       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
03014 
03015       int sampleNo_0,dataPointNo_0;
03016       int numSamples_0 = arg_0_Z.getNumSamples();
03017       int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
03018       res.requireWrite();
03019       #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
03020       for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
03021         int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
03022         const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
03023         for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
03024           int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
03025           int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
03026           const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
03027           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
03028           tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
03029         }
03030       }
03031 
03032     }
03033     else if (arg_0_Z.isExpanded()   && arg_1_Z.isConstant()) {
03034       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
03035       DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
03036       DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
03037       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
03038 
03039       int sampleNo_0;
03040       int numSamples_0 = arg_0_Z.getNumSamples();
03041       int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
03042       int offset_1 = tmp_1->getPointOffset(0,0);
03043       const double *ptr_src = &(arg_1_Z.getDataAtOffsetRO(offset_1));
03044       double ptr_1 = ptr_src[0];
03045       int size = size0 * numDataPointsPerSample_0;
03046       res.requireWrite();
03047       #pragma omp parallel for private(sampleNo_0) schedule(static)
03048       for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
03049 //        for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
03050           int offset_0 = tmp_0->getPointOffset(sampleNo_0,0);
03051           int offset_2 = tmp_2->getPointOffset(sampleNo_0,0);
03052           const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
03053 //          const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
03054           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
03055           tensor_binary_operation(size, ptr_0, ptr_1, ptr_2, operation);
03056 //        }
03057       }
03058 
03059 
03060     }
03061     else if (arg_0_Z.isExpanded()   && arg_1_Z.isTagged()) {
03062 
03063       // After finding a common function space above the two inputs have the same numSamples and num DPPS
03064       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
03065       DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
03066       DataTagged*   tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
03067       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
03068 
03069       int sampleNo_0,dataPointNo_0;
03070       int numSamples_0 = arg_0_Z.getNumSamples();
03071       int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
03072       res.requireWrite();
03073       #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
03074       for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
03075         int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
03076         const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
03077         for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
03078           int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
03079           int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
03080           const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
03081           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
03082           tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
03083         }
03084       }
03085 
03086     }
03087     else if (arg_0_Z.isExpanded()   && arg_1_Z.isExpanded()) {
03088 
03089       // After finding a common function space above the two inputs have the same numSamples and num DPPS
03090       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
03091       DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
03092       DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
03093       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
03094 
03095       int sampleNo_0,dataPointNo_0;
03096       int numSamples_0 = arg_0_Z.getNumSamples();
03097       int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
03098       res.requireWrite();
03099       #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
03100       for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
03101         for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
03102           int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
03103           int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
03104           int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
03105           const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
03106           const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
03107           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
03108           tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
03109         }
03110       }
03111 
03112     }
03113     else {
03114       throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
03115     }
03116 
03117   } else {
03118     throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible shapes");
03119   }
03120 
03121   return res;
03122 }
03123 
03124 template <typename UnaryFunction>
03125 Data
03126 C_TensorUnaryOperation(Data const &arg_0,
03127                        UnaryFunction operation)
03128 {
03129   if (arg_0.isEmpty())  // do this before we attempt to interpolate
03130   {
03131      throw DataException("Error - Operations not permitted on instances of DataEmpty.");
03132   }
03133   if (arg_0.isLazy())
03134   {
03135      throw DataException("Error - Operations not permitted on lazy data.");
03136   }
03137   // Interpolate if necessary and find an appropriate function space
03138   Data arg_0_Z = Data(arg_0);
03139 
03140   // Get rank and shape of inputs
03141   const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
03142   int size0 = arg_0_Z.getDataPointSize();
03143 
03144   // Declare output Data object
03145   Data res;
03146 
03147   if (arg_0_Z.isConstant()) {
03148     res = Data(0.0, shape0, arg_0_Z.getFunctionSpace());      // DataConstant output
03149     const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
03150     double *ptr_2 = &(res.getDataAtOffsetRW(0));
03151     tensor_unary_operation(size0, ptr_0, ptr_2, operation);
03152   }
03153   else if (arg_0_Z.isTagged()) {
03154 
03155     // Borrow DataTagged input from Data object
03156     DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
03157 
03158     // Prepare a DataTagged output 2
03159     res = Data(0.0, shape0, arg_0_Z.getFunctionSpace());   // DataTagged output
03160     res.tag();
03161     DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
03162 
03163     // Get the pointers to the actual data
03164     const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
03165     double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
03166     // Compute a result for the default
03167     tensor_unary_operation(size0, ptr_0, ptr_2, operation);
03168     // Compute a result for each tag
03169     const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
03170     DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
03171     for (i=lookup_0.begin();i!=lookup_0.end();i++) {
03172       tmp_2->addTag(i->first);
03173       const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
03174       double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
03175       tensor_unary_operation(size0, ptr_0, ptr_2, operation);
03176     }
03177 
03178   }
03179   else if (arg_0_Z.isExpanded()) {
03180 
03181     res = Data(0.0, shape0, arg_0_Z.getFunctionSpace(),true); // DataExpanded output
03182     DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
03183     DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
03184 
03185     int sampleNo_0,dataPointNo_0;
03186     int numSamples_0 = arg_0_Z.getNumSamples();
03187     int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
03188     #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
03189     for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
03190     dataPointNo_0=0;
03191 //      for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
03192         int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
03193         int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
03194         const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
03195         double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
03196         tensor_unary_operation(size0*numDataPointsPerSample_0, ptr_0, ptr_2, operation);
03197 //      }
03198     }
03199   }
03200   else {
03201     throw DataException("Error - C_TensorUnaryOperation: unknown combination of inputs");
03202   }
03203 
03204   return res;
03205 }
03206 
03207 }
03208 #endif