CppAD: A C++ Algorithmic Differentiation Package  20130918
solve_callback.hpp
Go to the documentation of this file.
00001 /* $Id$ */
00002 # ifndef CPPAD_SOLVE_FULL_INCLUDED
00003 # define CPPAD_SOLVE_FULL_INCLUDED
00004 /* --------------------------------------------------------------------------
00005 CppAD: C++ Algorithmic Differentiation: Copyright (C) 2003-14 Bradley M. Bell
00006 
00007 CppAD is distributed under multiple licenses. This distribution is under
00008 the terms of the 
00009                     Eclipse Public License Version 1.0.
00010 
00011 A copy of this license is included in the COPYING file of this distribution.
00012 Please visit http://www.coin-or.org/CppAD/ for information on other licenses.
00013 -------------------------------------------------------------------------- */
00014 
00015 # include <cppad/cppad.hpp>
00016 # include <coin/IpIpoptApplication.hpp>
00017 # include <coin/IpTNLP.hpp>
00018 # include <cppad/ipopt/solve_result.hpp>
00019 
00020 namespace CppAD { // BEGIN_CPPAD_NAMESPACE
00021 namespace ipopt {
00022 /*!
00023 \file solve_callback.hpp
00024 \brief Class that connects ipopt::solve to Ipopt
00025 */
00026 
00027 /*!
00028 Class that Ipopt uses for obtaining information about this problem.
00029 
00030 
00031 \section Evaluation_Methods Evaluation Methods
00032 The set of evaluation methods for this class is
00033 \verbatim
00034      { eval_f, eval_grad_f, eval_g, eval_jac_g, eval_h }
00035 \endverbatim
00036 Note that the bool return flag for the evaluations methods 
00037 does not appear in the Ipopt documentation.
00038 Looking at the code, it seems to be a flag telling Ipopt to abort
00039 when the flag is false.
00040 */
00041 template <class Dvector, class ADvector, class FG_eval>
00042 class solve_callback : public Ipopt::TNLP
00043 {
00044 private:
00045      // ------------------------------------------------------------------
00046      // Types used by this class
00047      // ------------------------------------------------------------------
00048      /// A Scalar value used by Ipopt
00049      typedef Ipopt::Number                         Number;
00050      /// An index value used by Ipopt 
00051      typedef Ipopt::Index                          Index;
00052      /// Indexing style used in Ipopt sparsity structure 
00053      typedef Ipopt::TNLP::IndexStyleEnum           IndexStyleEnum;
00054      // ------------------------------------------------------------------
00055      // Values directly passed in to constuctor
00056      // ------------------------------------------------------------------
00057      /// dimension of the range space for f(x).
00058      /// The objective is sum_i f_i (x). 
00059      /// Note that, at this point, there is no advantage having nf_ > 1.
00060      const size_t                    nf_;
00061      /// dimension of the domain space for f(x) and g(x)
00062      const size_t                    nx_;
00063      /// dimension of the range space for g(x)
00064      const size_t                    ng_;
00065      /// initial value for x
00066      const Dvector&                  xi_;
00067      /// lower limit for x 
00068      const Dvector&                  xl_;
00069      /// upper limit for x 
00070      const Dvector&                  xu_;
00071      /// lower limit for g(x)
00072      const Dvector&                  gl_;
00073      /// upper limit for g(x)
00074      const Dvector&                  gu_;
00075      /// object that evaluates f(x) and g(x)
00076      FG_eval&                        fg_eval_;
00077      /// should operation sequence be retaped for each new x.
00078      bool                            retape_;
00079      /// Should sparse methods be used to compute Jacobians and Hessians
00080      /// with forward mode used for Jacobian. 
00081      bool                            sparse_forward_;
00082      /// Should sparse methods be used to compute Jacobians and Hessians
00083      /// with reverse mode used for Jacobian. 
00084      bool                            sparse_reverse_;
00085      /// final results are returned to this structure
00086      solve_result<Dvector>&          solution_;
00087      // ------------------------------------------------------------------
00088      // Values that are initilaized by the constructor
00089      // ------------------------------------------------------------------
00090      /// AD function object that evaluates x -> [ f(x) , g(x) ]
00091      /// If retape is false, this object is initialzed by constructor
00092      /// otherwise it is set by cache_new_x each time it is called.
00093      CppAD::ADFun<double>            adfun_;
00094      /// value of x corresponding to previous new_x
00095      Dvector                         x0_;
00096      /// value of fg corresponding to previous new_x
00097      Dvector                         fg0_;
00098      // ----------------------------------------------------------------------
00099      // Jacobian information
00100      // ----------------------------------------------------------------------
00101      /// Sparsity pattern for Jacobian of [f(x), g(x) ].
00102      /// If sparse is true, this pattern set by constructor and does not change.
00103      /// Otherwise this vector has size zero.
00104      CppAD::vector< std::set<size_t> > pattern_jac_;
00105      /// Row indices of [f(x), g(x)] for Jacobian of g(x) in row order.
00106      /// (Set by constructor and not changed.)
00107      CppAD::vector<size_t>           row_jac_;
00108      /// Column indices for Jacobian of g(x), same order as row_jac_.
00109      /// (Set by constructor and not changed.)
00110      CppAD::vector<size_t>           col_jac_;
00111      /// col_order_jac_ sorts row_jac_ and col_jac_ in column order.
00112      /// (Set by constructor and not changed.)
00113      CppAD::vector<size_t>           col_order_jac_;
00114      /// Work vector used by SparseJacobian, stored here to avoid recalculation.
00115      CppAD::sparse_jacobian_work     work_jac_;
00116      // ----------------------------------------------------------------------
00117      // Hessian information
00118      // ----------------------------------------------------------------------
00119      /// Sparsity pattern for Hessian of Lagragian
00120      /// \f[ L(x) = \sigma \sum_i f_i (x) + \sum_i \lambda_i  g_i (x) \f]
00121      /// If sparse is true, this pattern set by constructor and does not change.
00122      /// Otherwise this vector has size zero.
00123      CppAD::vector< std::set<size_t> > pattern_hes_;
00124      /// Row indices of Hessian lower left triangle in row order.
00125      /// (Set by constructor and not changed.)
00126      CppAD::vector<size_t>           row_hes_;
00127      /// Column indices of Hessian left triangle in same order as row_hes_.
00128      /// (Set by constructor and not changed.)
00129      CppAD::vector<size_t>           col_hes_;
00130      /// Work vector used by SparseJacobian, stored here to avoid recalculation.
00131      CppAD::sparse_hessian_work      work_hes_;
00132      // ------------------------------------------------------------------
00133      // Private member functions
00134      // ------------------------------------------------------------------
00135      /*!
00136      Cache information for a new value of x.
00137 
00138      \param x
00139      is the new value for x.
00140 
00141      \par x0_
00142      the elements of this vector are set to the new value for x.
00143 
00144      \par fg0_
00145      the elements of this vector are set to the new value for [f(x), g(x)]
00146 
00147      \par adfun_
00148      If retape is true, the operation sequence for this function
00149      is changes to correspond to the argument x.
00150      If retape is false, the operation sequence is not changed.
00151      The zero order Taylor coefficients for this function are set
00152      so they correspond to the argument x.  
00153      */
00154      void cache_new_x(const Number* x)
00155      {    size_t i;
00156           if( retape_ )
00157           {    // make adfun_, as well as x0_ and fg0_ correspond to this x
00158                ADvector a_x(nx_), a_fg(nf_ + ng_);
00159                for(i = 0; i < nx_; i++)
00160                {    x0_[i] = x[i];
00161                     a_x[i] = x[i];
00162                }
00163                CppAD::Independent(a_x);
00164                fg_eval_(a_fg, a_x);
00165                adfun_.Dependent(a_x, a_fg);
00166           }
00167           else
00168           {    // make x0_ and fg0_ correspond to this x
00169                for(i = 0; i < nx_; i++)
00170                     x0_[i] = x[i];
00171           }
00172           fg0_ = adfun_.Forward(0, x0_);
00173      }
00174 public:
00175      // ----------------------------------------------------------------------
00176      /*! 
00177      Constructor for the interface between ipopt::solve and Ipopt
00178      
00179      \param nf
00180      dimension of the range space for f(x)
00181      
00182      \param nx
00183      dimension of the domain space for f(x) and g(x).
00184      
00185      \param ng
00186      dimension of the range space for g(x)
00187      
00188      \param xi
00189      initial value of x during the optimization procedure (size nx).
00190      
00191      \param xl
00192      lower limit for x (size nx).
00193      
00194      \param xu
00195      upper limit for x (size nx).
00196      
00197      \param gl
00198      lower limit for g(x) (size ng).
00199      
00200      \param gu
00201      upper limit for g(x) (size ng).
00202 
00203      \param fg_eval
00204      function object that evaluations f(x) and g(x) using fg_eval(fg, x)
00205 
00206      \param retape
00207      should the operation sequence be retaped for each argument value.
00208 
00209      \param sparse_forward
00210      should sparse matrix computations be used for Jacobians and Hessians
00211      with forward mode for Jacobian.
00212 
00213      \param sparse_reverse
00214      should sparse matrix computations be used for Jacobians and Hessians
00215      with reverse mode for Jacobian.
00216      (sparse_forward and sparse_reverse cannot both be true).
00217      
00218      \param solution
00219      object where final results are stored.
00220      */
00221      solve_callback(
00222           size_t                 nf              ,
00223           size_t                 nx              , 
00224           size_t                 ng              ,
00225           const Dvector&         xi              ,
00226           const Dvector&         xl              ,
00227           const Dvector&         xu              ,
00228           const Dvector&         gl              ,
00229           const Dvector&         gu              ,
00230           FG_eval&               fg_eval         ,
00231           bool                   retape          ,
00232           bool                   sparse_forward  ,
00233           bool                   sparse_reverse  ,
00234           solve_result<Dvector>& solution ) : 
00235      nf_ ( nf ),
00236      nx_ ( nx ),
00237      ng_ ( ng ),
00238      xi_ ( xi ),
00239      xl_ ( xl ),
00240      xu_ ( xu ),
00241      gl_ ( gl ),
00242      gu_ ( gu ),
00243      fg_eval_ ( fg_eval ),
00244      retape_ ( retape ),
00245      sparse_forward_ ( sparse_forward ),
00246      sparse_reverse_ ( sparse_reverse ),
00247      solution_ ( solution )
00248      {    CPPAD_ASSERT_UNKNOWN( ! ( sparse_forward_ & sparse_reverse_ ) );
00249 
00250           size_t i, j;
00251           size_t nfg = nf_ + ng_;
00252 
00253           // initialize x0_ and fg0_ wih proper dimensions and value nan
00254           x0_.resize(nx);
00255           fg0_.resize(nfg);
00256           for(i = 0; i < nx_; i++)
00257                x0_[i] = CppAD::nan(0.0);
00258           for(i = 0; i < nfg; i++)
00259                fg0_[i] = CppAD::nan(0.0);
00260 
00261           if( ! retape_ )
00262           {    // make adfun_ correspond to x -> [ f(x), g(x) ]
00263                ADvector a_x(nx_), a_fg(nfg);
00264                for(i = 0; i < nx_; i++)
00265                     a_x[i] = xi_[i];
00266                CppAD::Independent(a_x);
00267                fg_eval_(a_fg, a_x);
00268                adfun_.Dependent(a_x, a_fg);
00269                // optimize because we will make repeated use of this tape
00270                adfun_.optimize();
00271           }
00272           if( sparse_forward_ | sparse_reverse_ ) 
00273           {    CPPAD_ASSERT_UNKNOWN( ! retape );  
00274                // -----------------------------------------------------------
00275                // Jacobian
00276                pattern_jac_.resize(nf_ + ng_);
00277                if( nx_ <= nf_ + ng_ )
00278                {    // use forward mode to compute sparsity
00279                     CppAD::vector< std::set<size_t> > r(nx_);
00280                     for(i = 0; i < nx_; i++)
00281                     {    CPPAD_ASSERT_UNKNOWN( r[i].empty() );
00282                          r[i].insert(i);
00283                     }
00284                     pattern_jac_ = adfun_.ForSparseJac(nx_, r);
00285                }
00286                else
00287                {    // use reverse mode to compute sparsity
00288                     size_t m = nf_ + ng_;
00289                     CppAD::vector< std::set<size_t> > s(m);
00290                     for(i = 0; i < m; i++)
00291                     {    CPPAD_ASSERT_UNKNOWN( s[i].empty() );
00292                          s[i].insert(i);
00293                     }
00294                     pattern_jac_ = adfun_.RevSparseJac(m, s);
00295                }
00296                // Set row and column indices in Jacoian of [f(x), g(x)]
00297                // for Jacobian of g(x). These indices are in row major order.
00298                std::set<size_t>:: const_iterator itr, end;
00299                for(i = nf_; i < nfg; i++)
00300                {    itr = pattern_jac_[i].begin();
00301                     end = pattern_jac_[i].end();
00302                     while( itr != end )
00303                     {    j = *itr++;
00304                          row_jac_.push_back(i);
00305                          col_jac_.push_back(j);
00306                     }
00307                }
00308                // -----------------------------------------------------------
00309                // Hessian
00310                pattern_hes_.resize(nx_);
00311                CppAD::vector< std::set<size_t> > r(nx_);
00312                for(i = 0; i < nx_; i++)
00313                {    CPPAD_ASSERT_UNKNOWN( r[i].empty() );
00314                     r[i].insert(i);
00315                }
00316                // forward Jacobian sparsity for fg
00317                adfun_.ForSparseJac(nx_, r);
00318 
00319                // The Lagragian can use any of the components of f(x), g(x)
00320                CppAD::vector< std::set<size_t> > s(1);
00321                CPPAD_ASSERT_UNKNOWN( s[0].empty() );
00322                for(i = 0; i < nf_ + ng_; i++)
00323                     s[0].insert(i);
00324                pattern_hes_ = adfun_.RevSparseHes(nx_, s);
00325 
00326                // Set row and column indices for Lower triangle of Hessian 
00327                // of Lagragian.  These indices are in row major order.
00328                for(i = 0; i < nx_; i++)
00329                {    itr = pattern_hes_[i].begin();
00330                     end = pattern_hes_[i].end();
00331                     while( itr != end )
00332                     {    j = *itr++;
00333                          if( j <= i )
00334                          {    row_hes_.push_back(i);
00335                               col_hes_.push_back(j);
00336                          }
00337                     }
00338                }
00339           }
00340           else
00341           {    // Set row and column indices in Jacoian of [f(x), g(x)]
00342                // for Jacobian of g(x). These indices are in row major order.
00343                for(i = nf_; i < nfg; i++)
00344                {    for(j = 0; j < nx_; j++)
00345                     {    row_jac_.push_back(i);
00346                          col_jac_.push_back(j);   
00347                     }
00348                }
00349                // Set row and column indices for lower triangle of Hessian.
00350                // These indices are in row major order.
00351                for(i = 0; i < nx_; i++)
00352                {    for(j = 0; j <= i; j++)
00353                     {    row_hes_.push_back(i);
00354                          col_hes_.push_back(j);
00355                     }
00356                }
00357           }
00358 
00359           // Column order indirect sort of the Jacobian indices
00360           col_order_jac_.resize( col_jac_.size() );
00361           index_sort( col_jac_, col_order_jac_ );
00362      }
00363      // -----------------------------------------------------------------------
00364      /*!
00365      Return dimension information about optimization problem.
00366      
00367      \param[out] n
00368      is set to the value nx_.
00369      
00370      \param[out] m
00371      is set to the value ng_.
00372      
00373      \param[out] nnz_jac_g
00374      is set to ng_ * nx_ (sparsity not yet implemented) 
00375      
00376      \param[out] nnz_h_lag
00377      is set to nx_*(nx_+1)/2 (sparsity not yet implemented)
00378      
00379      \param[out] index_style
00380      is set to C_STYLE; i.e., zeoro based indexing is used in the
00381      information passed to Ipopt.
00382      */
00383      virtual bool get_nlp_info(
00384           Index&          n            , 
00385           Index&          m            , 
00386           Index&          nnz_jac_g    ,
00387           Index&          nnz_h_lag    , 
00388           IndexStyleEnum& index_style  )
00389      {
00390           n         = static_cast<Index>(nx_);
00391           m         = static_cast<Index>(ng_);
00392           nnz_jac_g = static_cast<Index>(row_jac_.size());
00393           nnz_h_lag = static_cast<Index>(row_hes_.size());
00394      
00395 # ifndef NDEBUG
00396           if( ! (sparse_forward_ | sparse_reverse_) )
00397           {    size_t nnz = static_cast<size_t>(nnz_jac_g);
00398                CPPAD_ASSERT_UNKNOWN( nnz == ng_ * nx_);
00399                //
00400                nnz = static_cast<size_t>(nnz_h_lag);
00401                CPPAD_ASSERT_UNKNOWN( nnz == (nx_ * (nx_ + 1)) / 2 );
00402           }
00403 # endif
00404 
00405           // use the fortran index style for row/col entries
00406           index_style = C_STYLE;
00407      
00408           return true;
00409      }
00410      // -----------------------------------------------------------------------
00411      /*!
00412      Return bound information about optimization problem.
00413      
00414      \param[in] n
00415      is the dimension of the domain space for f(x) and g(x); i.e.,
00416      it must be equal to nx_.
00417      
00418      \param[out] x_l
00419      is a vector of size nx_.
00420      The input value of its elements does not matter.
00421      On output, it is a copy of the lower bound for \f$ x \f$; i.e.,
00422      xl_.
00423      
00424      \param[out] x_u
00425      is a vector of size nx_.
00426      The input value of its elements does not matter.
00427      On output, it is a copy of the upper bound for \f$ x \f$; i.e.,
00428      xu_.
00429      
00430      \param[in] m
00431      is the dimension of the range space for g(x). i.e.,
00432      it must be equal to ng_.
00433      
00434      \param[out] g_l
00435      is a vector of size ng_.
00436      The input value of its elements does not matter.
00437      On output, it is a copy of the lower bound for \f$ g(x) \f$; i.e., gl_.
00438      
00439      \param[out] g_u
00440      is a vector of size ng_.
00441      The input value of its elements does not matter.
00442      On output, it is a copy of the upper bound for \f$ g(x) \f$; i.e, gu_.
00443      */
00444      virtual bool get_bounds_info(
00445           Index       n        , 
00446           Number*     x_l      , 
00447           Number*     x_u      ,
00448           Index       m        , 
00449           Number*     g_l      , 
00450           Number*     g_u      )
00451      {    size_t i;
00452           // here, the n and m we gave IPOPT in get_nlp_info are passed back 
00453           CPPAD_ASSERT_UNKNOWN(static_cast<size_t>(n) == nx_);
00454           CPPAD_ASSERT_UNKNOWN(static_cast<size_t>(m) == ng_);
00455      
00456           // pass back bounds
00457           for(i = 0; i < nx_; i++)
00458           {    x_l[i] = xl_[i];
00459                x_u[i] = xu_[i];
00460           }
00461           for(i = 0; i < ng_; i++)
00462           {    g_l[i] = gl_[i];
00463                g_u[i] = gu_[i];
00464           }
00465           
00466           return true;
00467      }
00468      // -----------------------------------------------------------------------
00469      /*!
00470      Return initial x value where optimiation is started.
00471      
00472      \param[in] n
00473      must be equal to the domain dimension for f(x) and g(x); i.e.,
00474      it must be equal to nx_.
00475      
00476      \param[in] init_x
00477      must be equal to true.
00478      
00479      \param[out] x
00480      is a vector of size nx_.
00481      The input value of its elements does not matter.
00482      On output, it is a copy of the initial value for \f$ x \f$; i.e. xi_.
00483      
00484      \param[in] init_z
00485      must be equal to false.
00486      
00487      \param z_L
00488      is not used.
00489      
00490      \param z_U
00491      is not used.
00492      
00493      \param[in] m
00494      must be equal to the domain dimension for f(x) and g(x); i.e.,
00495      it must be equal to ng_.
00496      
00497      \param init_lambda
00498      must be equal to false.
00499      
00500      \param lambda
00501      is not used.
00502      */
00503      virtual bool get_starting_point(
00504           Index           n            , 
00505           bool            init_x       , 
00506           Number*         x            ,
00507           bool            init_z       , 
00508           Number*         z_L          , 
00509           Number*         z_U          ,
00510           Index           m            , 
00511           bool            init_lambda  ,
00512           Number*         lambda       )
00513      {    size_t j;
00514      
00515           CPPAD_ASSERT_UNKNOWN(static_cast<size_t>(n) == nx_ );
00516           CPPAD_ASSERT_UNKNOWN(static_cast<size_t>(m) == ng_ );
00517           CPPAD_ASSERT_UNKNOWN(init_x == true);
00518           CPPAD_ASSERT_UNKNOWN(init_z == false);
00519           CPPAD_ASSERT_UNKNOWN(init_lambda == false);
00520      
00521           for(j = 0; j < nx_; j++)
00522                x[j] = xi_[j];
00523      
00524           return true;
00525      }
00526      // -----------------------------------------------------------------------
00527      /*!
00528      Evaluate the objective fucntion f(x).
00529      
00530      \param[in] n
00531      is the dimension of the argument space for f(x); i.e., must be equal nx_.
00532      
00533      \param[in] x
00534      is a vector of size nx_ containing the point at which to evaluate
00535      the function sum_i f_i (x).
00536      
00537      \param[in] new_x
00538      is false if the previous call to any one of the 
00539      \ref Evaluation_Methods used the same value for x.
00540      
00541      \param[out] obj_value
00542      is the value of the objective sum_i f_i (x) at this value of x.
00543      
00544      \return
00545      The return value is always true; see \ref Evaluation_Methods.
00546      */
00547      virtual bool eval_f(
00548           Index          n           , 
00549           const Number*  x           , 
00550           bool           new_x       , 
00551           Number&        obj_value   )
00552      {    size_t i;
00553           if( new_x )
00554                cache_new_x(x);
00555           //
00556           double sum = 0.0;
00557           for(i = 0; i < nf_; i++)
00558                sum += fg0_[i];
00559           obj_value = static_cast<Number>(sum);
00560           return true;
00561      }
00562      // -----------------------------------------------------------------------
00563      /*!
00564      Evaluate the gradient of f(x).
00565      
00566      \param[in] n
00567      is the dimension of the argument space for f(x); i.e., must be equal nx_.
00568      
00569      \param[in] x
00570      has a vector of size nx_ containing the point at which to evaluate
00571      the gradient of f(x).
00572      
00573      \param[in] new_x
00574      is false if the previous call to any one of the 
00575      \ref Evaluation_Methods used the same value for x.
00576      
00577      \param[out] grad_f
00578      is a vector of size nx_.
00579      The input value of its elements does not matter.
00580      The output value of its elements is the gradient of f(x) 
00581      at this value of.
00582      
00583      \return
00584      The return value is always true; see \ref Evaluation_Methods.
00585      */
00586      virtual bool eval_grad_f(
00587           Index           n        , 
00588           const Number*   x        , 
00589           bool            new_x    , 
00590           Number*         grad_f   )
00591      {    size_t i;
00592           if( new_x )
00593                cache_new_x(x);
00594           //
00595           Dvector w(nf_ + ng_), dw(nx_);
00596           for(i = 0; i < nf_; i++)
00597                w[i] = 1.0;
00598           for(i = 0; i < ng_; i++)
00599                w[nf_ + i] = 0.0;
00600           dw = adfun_.Reverse(1, w);
00601           for(i = 0; i < nx_; i++)
00602                grad_f[i] = dw[i];
00603           return true;
00604      }
00605      // -----------------------------------------------------------------------
00606      /*!
00607      Evaluate the function g(x).
00608      
00609      \param[in] n
00610      is the dimension of the argument space for g(x); i.e., must be equal nx_.
00611      
00612      \param[in] x
00613      has a vector of size n containing the point at which to evaluate
00614      the gradient of g(x).
00615      
00616      \param[in] new_x
00617      is false if the previous call to any one of the 
00618      \ref Evaluation_Methods used the same value for x.
00619      
00620      \param[in] m
00621      is the dimension of the range space for g(x); i.e., must be equal to ng_.
00622      
00623      \param[out] g
00624      is a vector of size ng_.
00625      The input value of its elements does not matter.
00626      The output value of its elements is 
00627      the value of the function g(x) at this value of x.
00628      
00629      \return
00630      The return value is always true; see \ref Evaluation_Methods.
00631      */
00632      virtual bool eval_g(
00633           Index   n            , 
00634           const   Number* x    , 
00635           bool    new_x        , 
00636           Index   m            , 
00637           Number* g            )
00638      {    size_t i;
00639           if( new_x )
00640                cache_new_x(x);
00641           //
00642           for(i = 0; i < ng_; i++)
00643                g[i] = fg0_[nf_ + i];
00644           return true;
00645      }
00646      // -----------------------------------------------------------------------
00647      /*!
00648      Evaluate the Jacobian of g(x).
00649      
00650      \param[in] n
00651      is the dimension of the argument space for g(x); 
00652      i.e., must be equal nx_.
00653      
00654      \param x
00655      If values is not NULL,
00656      x is a vector of size nx_ containing the point at which to evaluate
00657      the gradient of g(x).
00658      
00659      \param[in] new_x
00660      is false if the previous call to any one of the 
00661      \ref Evaluation_Methods used the same value for  x.
00662      
00663      \param[in] m
00664      is the dimension of the range space for g(x); 
00665      i.e., must be equal to ng_.
00666      
00667      \param[in] nele_jac
00668      is the number of possibly non-zero elements in the Jacobian of g(x);
00669      i.e., must be equal to ng_ * nx_.
00670      
00671      \param iRow
00672      if values is not NULL, iRow is not defined.
00673      if values is NULL, iRow
00674      is a vector with size nele_jac.
00675      The input value of its elements does not matter.
00676      On output, 
00677      For <tt>k = 0 , ... , nele_jac-1, iRow[k]</tt> is the 
00678      base zero row index for the 
00679      k-th possibly non-zero entry in the Jacobian of g(x).
00680      
00681      \param jCol
00682      if values is not NULL, jCol is not defined.
00683      if values is NULL, jCol
00684      is a vector with size nele_jac.
00685      The input value of its elements does not matter.
00686      On output, 
00687      For <tt>k = 0 , ... , nele_jac-1, jCol[k]</tt> is the 
00688      base zero column index for the 
00689      k-th possibly non-zero entry in the Jacobian of g(x).
00690      
00691      \param values
00692      if \c values is not \c NULL, \c values
00693      is a vector with size \c nele_jac.
00694      The input value of its elements does not matter.
00695      On output, 
00696      For <tt>k = 0 , ... , nele_jac-1, values[k]</tt> is the 
00697      value for the 
00698      k-th possibly non-zero entry in the Jacobian of g(x).
00699      
00700      \return
00701      The return value is always true; see \ref Evaluation_Methods.
00702      */
00703      virtual bool eval_jac_g(
00704           Index n, 
00705           const Number* x, 
00706           bool new_x,
00707                        
00708           Index m, 
00709           Index nele_jac, 
00710           Index* iRow, 
00711           Index *jCol,
00712                        
00713           Number* values)
00714      {    size_t i, j, k, ell;
00715           CPPAD_ASSERT_UNKNOWN(static_cast<size_t>(m)         == ng_ );
00716           CPPAD_ASSERT_UNKNOWN(static_cast<size_t>(n)         == nx_ );
00717           //
00718           size_t nk = row_jac_.size();
00719           CPPAD_ASSERT_UNKNOWN( static_cast<size_t>(nele_jac) == nk );
00720           //
00721           if( new_x )
00722                cache_new_x(x);
00723 
00724           if( values == NULL )
00725           {    for(k = 0; k < nk; k++)
00726                {    i = row_jac_[k];
00727                     j = col_jac_[k];
00728                     CPPAD_ASSERT_UNKNOWN( i >= nf_ );
00729                     iRow[k] = static_cast<Index>(i - nf_);
00730                     jCol[k] = static_cast<Index>(j);
00731                }
00732                return true;
00733           }
00734           //
00735           if( nk == 0 )
00736                return true;
00737           //
00738           if( sparse_forward_ )
00739           {    Dvector jac(nk);
00740                adfun_.SparseJacobianForward(
00741                     x0_ , pattern_jac_, row_jac_, col_jac_, jac, work_jac_
00742                );
00743                for(k = 0; k < nk; k++)
00744                     values[k] = jac[k];
00745           }
00746           else if( sparse_reverse_ )
00747           {    Dvector jac(nk);
00748                adfun_.SparseJacobianReverse(
00749                     x0_ , pattern_jac_, row_jac_, col_jac_, jac, work_jac_
00750                );
00751                for(k = 0; k < nk; k++)
00752                     values[k] = jac[k];
00753           }
00754           else if( nx_ < ng_ )
00755           {    // use forward mode
00756                Dvector x1(nx_), fg1(nf_ + ng_);
00757                for(j = 0; j < nx_; j++)
00758                     x1[j] = 0.0;
00759                // index in col_order_jac_ of next entry
00760                ell = 0;
00761                k   = col_order_jac_[ell];
00762                for(j = 0; j < nx_; j++)
00763                {    // compute j-th column of Jacobian of g(x)
00764                     x1[j] = 1.0;
00765                     fg1 = adfun_.Forward(1, x1);
00766                     while( ell < nk && col_jac_[k] <= j )
00767                     {    CPPAD_ASSERT_UNKNOWN( col_jac_[k] == j );
00768                          i = row_jac_[k];
00769                          CPPAD_ASSERT_UNKNOWN( i >= nf_ )
00770                          values[k] = fg1[i];
00771                          ell++;
00772                          if( ell < nk )
00773                               k = col_order_jac_[ell];
00774                     }
00775                     x1[j] = 0.0;
00776                }
00777           }
00778           else
00779           {    // user reverse mode     
00780                size_t nfg = nf_ + ng_;
00781                // user reverse mode
00782                Dvector w(nfg), dw(nx_);
00783                for(i = 0; i < nfg; i++)
00784                     w[i] = 0.0;
00785                // index in row_jac_ of next entry
00786                k = 0;
00787                for(i = nf_; i < nfg; i++)
00788                {    // compute i-th row of Jacobian of g(x)
00789                     w[i] = 1.0;
00790                     dw = adfun_.Reverse(1, w);
00791                     while( k < nk && row_jac_[k] <= i )
00792                     {    CPPAD_ASSERT_UNKNOWN( row_jac_[k] == i );
00793                          j = col_jac_[k];
00794                          values[k] = dw[j];
00795                          k++;
00796                     }
00797                     w[i] = 0.0;
00798                }
00799           }
00800           return true;
00801      }
00802      // -----------------------------------------------------------------------
00803      /*!
00804      Evaluate the Hessian of the Lagragian
00805      
00806      \section The_Hessian_of_the_Lagragian The Hessian of the Lagragian
00807      The Hessian of the Lagragian is defined as
00808      \f[
00809      H(x, \sigma, \lambda ) 
00810      =
00811      \sigma \nabla^2 f(x) + \sum_{i=0}^{m-1} \lambda_i \nabla^2 g(x)_i
00812      \f]
00813      
00814      \param[in] n
00815      is the dimension of the argument space for g(x); 
00816      i.e., must be equal nx_.
00817      
00818      \param x
00819      if values is not NULL, x
00820      is a vector of size nx_ containing the point at which to evaluate
00821      the Hessian of the Lagragian.
00822      
00823      \param[in] new_x
00824      is false if the previous call to any one of the 
00825      \ref Evaluation_Methods used the same value for x.
00826      
00827      \param[in] obj_factor
00828      the value \f$ \sigma \f$ multiplying the Hessian of
00829      f(x) in the expression for \ref The_Hessian_of_the_Lagragian.
00830      
00831      \param[in] m
00832      is the dimension of the range space for g(x); 
00833      i.e., must be equal to ng_.
00834      
00835      \param[in] lambda
00836      if values is not NULL, lambda
00837      is a vector of size ng_ specifing the value of \f$ \lambda \f$
00838      in the expression for \ref The_Hessian_of_the_Lagragian.
00839      
00840      \param[in] new_lambda
00841      is true if the previous call to eval_h had the same value for
00842      lambda and false otherwise.
00843      (Not currently used.)
00844      
00845      \param[in] nele_hess
00846      is the number of possibly non-zero elements in the 
00847      Hessian of the Lagragian;
00848      i.e., must be equal to nx_*(nx_+1)/2.
00849      
00850      \param iRow
00851      if values is not NULL, iRow is not defined.
00852      if values is NULL, iRow
00853      is a vector with size nele_hess.
00854      The input value of its elements does not matter.
00855      On output, 
00856      For <tt>k = 0 , ... , nele_hess-1, iRow[k]</tt> is the 
00857      base zero row index for the 
00858      k-th possibly non-zero entry in the Hessian fo the Lagragian.
00859      
00860      \param jCol
00861      if values is not NULL, jCol is not defined.
00862      if values is NULL, jCol
00863      is a vector with size nele_hess.
00864      The input value of its elements does not matter.
00865      On output, 
00866      For <tt>k = 0 , ... , nele_hess-1, jCol[k]</tt> is the 
00867      base zero column index for the 
00868      k-th possibly non-zero entry in the Hessian of the Lagragian.
00869      
00870      \param values
00871      if values is not NULL, it
00872      is a vector with size nele_hess.
00873      The input value of its elements does not matter.
00874      On output, 
00875      For <tt>k = 0 , ... , nele_hess-1, values[k]</tt> is the 
00876      value for the 
00877      k-th possibly non-zero entry in the Hessian of the Lagragian.
00878      
00879      \return
00880      The return value is always true; see \ref Evaluation_Methods.
00881      */
00882      virtual bool eval_h(
00883           Index         n              ,
00884           const Number* x              , 
00885           bool          new_x          ,
00886           Number        obj_factor     , 
00887           Index         m              ,
00888           const Number* lambda         ,
00889           bool          new_lambda     , 
00890           Index         nele_hess      , 
00891           Index*        iRow           ,
00892           Index*        jCol           , 
00893           Number*       values         )
00894      {    size_t i, j, k;
00895           CPPAD_ASSERT_UNKNOWN(static_cast<size_t>(m) == ng_ );
00896           CPPAD_ASSERT_UNKNOWN(static_cast<size_t>(n) == nx_ );
00897           //
00898           size_t nk = row_hes_.size();
00899           CPPAD_ASSERT_UNKNOWN( static_cast<size_t>(nele_hess) == nk ); 
00900           //
00901           if( new_x )
00902                cache_new_x(x);
00903           //
00904           if( values == NULL )
00905           {    for(k = 0; k < nk; k++)
00906                {    i = row_hes_[k];
00907                     j = col_hes_[k];
00908                     iRow[k] = static_cast<Index>(i);
00909                     jCol[k] = static_cast<Index>(j);
00910                }
00911                return true;
00912           }
00913           //
00914           if( nk == 0 )
00915                return true;
00916 
00917           // weigting vector for Lagragian
00918           Dvector w(nf_ + ng_);
00919           for(i = 0; i < nf_; i++)
00920                w[i] = obj_factor;
00921           for(i = 0; i < ng_; i++)
00922                w[i + nf_] = lambda[i];
00923           //
00924           if( sparse_forward_ | sparse_reverse_ )
00925           {    Dvector hes(nk);
00926                adfun_.SparseHessian(
00927                     x0_, w, pattern_hes_, row_hes_, col_hes_, hes, work_hes_
00928                );
00929                for(k = 0; k < nk; k++)
00930                     values[k] = hes[k];
00931           }
00932           else
00933           {    Dvector hes(nx_ * nx_);
00934                hes = adfun_.Hessian(x0_, w);
00935                for(k = 0; k < nk; k++)
00936                {    i = row_hes_[k];
00937                     j = col_hes_[k];
00938                     values[k] = hes[i * nx_ + j];
00939                }
00940           }
00941           return true;
00942      }
00943      // ----------------------------------------------------------------------
00944      /*!
00945      Pass solution information from Ipopt to users solution structure.
00946      
00947      \param[in] status
00948      is value that the Ipopt solution status
00949      which gets mapped to a correponding value for 
00950      \n
00951      <tt>solution_.status</tt>
00952      
00953      \param[in] n
00954      is the dimension of the domain space for f(x) and g(x); i.e.,
00955      it must be equal to nx_.
00956      
00957      \param[in] x
00958      is a vector with size nx_ specifing the final solution.
00959      This is the output value for
00960      \n
00961      <tt>solution_.x</tt> 
00962      
00963      \param[in] z_L
00964      is a vector with size nx_ specifing the Lagragian multipliers for the
00965      constraint \f$ x^l \leq x \f$.
00966      This is the output value for
00967      \n
00968      <tt>solution_.zl</tt>
00969      
00970      \param[in] z_U
00971      is a vector with size nx_ specifing the Lagragian multipliers for the
00972      constraint \f$ x \leq x^u \f$.
00973      This is the output value for
00974      \n
00975      <tt>solution_.zu</tt> 
00976      
00977      \param[in] m
00978      is the dimension of the range space for g(x). i.e.,
00979      it must be equal to ng_.
00980      
00981      \param[in] g
00982      is a vector with size ng_ containing the value of the constraint function
00983      g(x) at the final solution x.
00984      This is the output value for
00985      \n
00986      <tt>solution_.g</tt> 
00987      
00988      \param[in] lambda
00989      is a vector with size ng_ specifing the Lagragian multipliers for the
00990      constraints \f$ g^l \leq g(x) \leq g^u \f$.
00991      This is the output value for
00992      \n
00993      <tt>solution_.lambda</tt> 
00994      
00995      \param[in] obj_value
00996      is the value of the objective function f(x) at the final solution x.
00997      This is the output value for
00998      \n
00999      <tt>solution_.obj_value</tt>
01000      
01001      \param[in] ip_data
01002      is unspecified (by Ipopt) and hence not used.
01003      
01004      \param[in] ip_cq
01005      is unspecified (by Ipopt) and hence not used.
01006      
01007      \par solution_[out]
01008      this is a reference to the solution argument
01009      in the constructor for solve_callback.
01010      The results are stored here
01011      (see documentation above).
01012      */
01013      virtual void finalize_solution(
01014           Ipopt::SolverReturn               status    ,
01015           Index                             n         , 
01016           const Number*                     x         , 
01017           const Number*                     z_L       , 
01018           const Number*                     z_U       ,
01019           Index                             m         , 
01020           const Number*                     g         , 
01021           const Number*                     lambda    ,
01022           Number                            obj_value ,
01023           const Ipopt::IpoptData*           ip_data   ,
01024           Ipopt::IpoptCalculatedQuantities* ip_cq
01025      )
01026      {    size_t i, j;
01027      
01028           CPPAD_ASSERT_UNKNOWN(static_cast<size_t>(n) == nx_ );
01029           CPPAD_ASSERT_UNKNOWN(static_cast<size_t>(m) == ng_ );
01030      
01031           switch(status)
01032           {    // convert status from Ipopt enum to solve_result<Dvector> enum
01033                case Ipopt::SUCCESS:
01034                solution_.status = solve_result<Dvector>::success;
01035                break;
01036      
01037                case Ipopt::MAXITER_EXCEEDED:
01038                solution_.status = 
01039                     solve_result<Dvector>::maxiter_exceeded;
01040                break;
01041      
01042                case Ipopt::STOP_AT_TINY_STEP:
01043                solution_.status = 
01044                     solve_result<Dvector>::stop_at_tiny_step;
01045                break;
01046      
01047                case Ipopt::STOP_AT_ACCEPTABLE_POINT:
01048                solution_.status = 
01049                     solve_result<Dvector>::stop_at_acceptable_point;
01050                break;
01051      
01052                case Ipopt::LOCAL_INFEASIBILITY:
01053                solution_.status = 
01054                     solve_result<Dvector>::local_infeasibility;
01055                break;
01056      
01057                case Ipopt::USER_REQUESTED_STOP:
01058                solution_.status = 
01059                     solve_result<Dvector>::user_requested_stop;
01060                break;
01061      
01062                case Ipopt::DIVERGING_ITERATES:
01063                solution_.status = 
01064                     solve_result<Dvector>::diverging_iterates;
01065                break;
01066      
01067                case Ipopt::RESTORATION_FAILURE:
01068                solution_.status = 
01069                     solve_result<Dvector>::restoration_failure;
01070                break;
01071      
01072                case Ipopt::ERROR_IN_STEP_COMPUTATION:
01073                solution_.status = 
01074                     solve_result<Dvector>::error_in_step_computation;
01075                break;
01076      
01077                case Ipopt::INVALID_NUMBER_DETECTED:
01078                solution_.status = 
01079                     solve_result<Dvector>::invalid_number_detected;
01080                break;
01081      
01082                case Ipopt::INTERNAL_ERROR:
01083                solution_.status = 
01084                     solve_result<Dvector>::internal_error;
01085                break;
01086      
01087                default:
01088                solution_.status = 
01089                     solve_result<Dvector>::unknown;
01090           }
01091      
01092           solution_.x.resize(nx_);
01093           solution_.zl.resize(nx_);
01094           solution_.zu.resize(nx_);
01095           for(j = 0; j < nx_; j++)
01096           {    solution_.x[j]   = x[j];
01097                solution_.zl[j]  = z_L[j];
01098                solution_.zu[j]  = z_U[j];
01099           }
01100           solution_.g.resize(ng_);
01101           solution_.lambda.resize(ng_);
01102           for(i = 0; i < ng_; i++)
01103           {    solution_.g[i]      = g[i];
01104                solution_.lambda[i] = lambda[i];
01105           }
01106           solution_.obj_value = obj_value;
01107           return;
01108      }
01109 };
01110 
01111 } // end namespace ipopt
01112 } // END_CPPAD_NAMESPACE
01113 
01114 # endif
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines