Bonmin  1.8.7
BonTNLP2FPNLP.hpp
Go to the documentation of this file.
1 // Copyright (C) 2004, International Business Machines and others.
2 // All Rights Reserved.
3 // This code is published under the Eclipse Public License.
4 //
5 //
6 // Authors: Pierre Bonami 06/10/2005
7 
8 #ifndef _TNLP2FPNLP_HPP_
9 #define _TNLP2FPNLP_HPP_
10 
11 #include "IpTNLP.hpp"
12 #include "BonTMINLP.hpp"
13 #include "IpSmartPtr.hpp"
14 #include "BonTypes.hpp"
15 
16 namespace Bonmin
17 {
22  class TNLP2FPNLP : public Ipopt::TNLP
23  {
24  public:
28  TNLP2FPNLP(const Ipopt::SmartPtr<Ipopt::TNLP> tnlp, double objectiveScalingFactor = 100);
29 
32 
34  virtual ~TNLP2FPNLP();
37  tnlp_ = GetRawPtr(tnlp);}
40  void set_use_feasibility_pump_objective(bool use_feasibility_pump_objective)
42  { use_feasibility_pump_objective_ = use_feasibility_pump_objective; }
43 
46  void set_use_cutoff_constraint(bool use_cutoff_constraint)
47  { use_cutoff_constraint_ = use_cutoff_constraint; }
48 
50  void set_use_local_branching_constraint(bool use_local_branching_constraint)
51  { use_local_branching_constraint_ = use_local_branching_constraint; }
53 
56  void set_cutoff(Ipopt::Number cutoff);
58 
60  void set_rhs_local_branching_constraint(double rhs_local_branching_constraint)
61  { assert(rhs_local_branching_constraint >= 0);
62  rhs_local_branching_constraint_ = rhs_local_branching_constraint; }
64 
73  void set_dist_to_point_obj(size_t n, const Ipopt::Number * vals, const Ipopt::Index * inds);
74 
76  void setSigma(double sigma){
77  assert(sigma >= 0.);
78  sigma_ = sigma;}
80  void setLambda(double lambda){
81  assert(lambda >= 0. && lambda <= 1.);
82  lambda_ = lambda;}
84  void setNorm(int norm){
85  assert(norm >0 && norm < 3);
86  norm_ = norm;}
88 
92  virtual bool get_nlp_info(Ipopt::Index& n, Ipopt::Index& m, Ipopt::Index& nnz_jac_g,
93  Ipopt::Index& nnz_h_lag, Ipopt::TNLP::IndexStyleEnum& index_style);
94 
97  virtual bool get_bounds_info(Ipopt::Index n, Ipopt::Number* x_l, Ipopt::Number* x_u,
99 
102  virtual bool get_starting_point(Ipopt::Index n, bool init_x, Ipopt::Number* x,
103  bool init_z, Ipopt::Number* z_L, Ipopt::Number* z_U,
104  Ipopt::Index m, bool init_lambda,
105  Ipopt::Number* lambda)
106  {
107  int m2 = m;
108  if(use_cutoff_constraint_) {
109  m2--;
110  if(lambda!=NULL)lambda[m2] = 0;
111  }
112  if(use_local_branching_constraint_) {
113  m2--;
114  if(lambda!= NULL)lambda[m2] = 0;
115  }
116  int ret_code = tnlp_->get_starting_point(n, init_x, x,
117  init_z, z_L, z_U, m2, init_lambda, lambda);
118  return ret_code;
119  }
120 
122  virtual bool eval_f(Ipopt::Index n, const Ipopt::Number* x, bool new_x,
123  Ipopt::Number& obj_value);
124 
127  virtual bool eval_grad_f(Ipopt::Index n, const Ipopt::Number* x, bool new_x,
128  Ipopt::Number* grad_f);
129 
132  virtual bool eval_g(Ipopt::Index n, const Ipopt::Number* x, bool new_x,
134 
136  virtual bool eval_jac_g(Ipopt::Index n, const Ipopt::Number* x, bool new_x,
137  Ipopt::Index m, Ipopt::Index nele_jac, Ipopt::Index* iRow,
138  Ipopt::Index *jCol, Ipopt::Number* values);
139 
141  virtual bool eval_h(Ipopt::Index n, const Ipopt::Number* x, bool new_x,
142  Ipopt::Number obj_factor, Ipopt::Index m, const Ipopt::Number* lambda,
143  bool new_lambda, Ipopt::Index nele_hess,
144  Ipopt::Index* iRow, Ipopt::Index* jCol, Ipopt::Number* values);
146 
150  virtual void finalize_solution(Ipopt::SolverReturn status,
151  Ipopt::Index n, const Ipopt::Number* x, const Ipopt::Number* z_L, const Ipopt::Number* z_U,
152  Ipopt::Index m, const Ipopt::Number* g, const Ipopt::Number* lambda,
153  Ipopt::Number obj_value,
154  const Ipopt::IpoptData* ip_data,
157 
159  {
160  return tnlp_->get_variables_linearity(n, var_types);;
161  }
162 
167  {
168  int m2 = m;
169  if(use_cutoff_constraint_) {
170  m2--;
171  const_types[m2] = Ipopt::TNLP::NON_LINEAR;
172  }
173  if(use_local_branching_constraint_) {
174  m2--;
175  const_types[m2] = Ipopt::TNLP::LINEAR;
176  }
177  return tnlp_->get_constraints_linearity(m2, const_types);
178  }
181  void setObjectiveScaling(double value)
182  {
183  objectiveScalingFactor_ = value;
184  }
185  double getObjectiveScaling() const
186  {
187  return objectiveScalingFactor_;
188  }
189 
190  private:
194  double dist_to_point(const Ipopt::Number *x);
196 
205  TNLP2FPNLP();
206 
208  TNLP2FPNLP(const TNLP2FPNLP&);
209 
211  void operator=(const TNLP2FPNLP&);
213 
215  Ipopt::SmartPtr<TNLP> tnlp_;
216 
219  vector<Ipopt::Index> inds_;
222  vector<Ipopt::Number> vals_;
225  double lambda_;
227  double sigma_;
229  int norm_;
231 
233  double objectiveScalingFactor_;
234 
237  bool use_feasibility_pump_objective_;
239 
242  bool use_cutoff_constraint_;
243 
245  bool use_local_branching_constraint_;
247 
250  double cutoff_;
252 
254  double rhs_local_branching_constraint_;
256 
258  Ipopt::TNLP::IndexStyleEnum index_style_;
259 
260  };
261 
262 } // namespace Ipopt
263 
264 #endif /*_TNLP2FPNLP_HPP_*/
void set_use_local_branching_constraint(bool use_local_branching_constraint)
Flag to indicate that we want to use a local branching constraint.
void setNorm(int norm)
Set the value for simgma.
Number * x
virtual void finalize_solution(Ipopt::SolverReturn status, Ipopt::Index n, const Ipopt::Number *x, const Ipopt::Number *z_L, const Ipopt::Number *z_U, Ipopt::Index m, const Ipopt::Number *g, const Ipopt::Number *lambda, Ipopt::Number obj_value, const Ipopt::IpoptData *ip_data, Ipopt::IpoptCalculatedQuantities *ip_cq)
This method is called when the algorithm is complete so the TNLP can store/write the solution.
virtual bool get_starting_point(Ipopt::Index n, bool init_x, Ipopt::Number *x, bool init_z, Ipopt::Number *z_L, Ipopt::Number *z_U, Ipopt::Index m, bool init_lambda, Ipopt::Number *lambda)
Passed onto tnlp_.
Number Number Index m
virtual ~TNLP2FPNLP()
Default destructor.
void use(Ipopt::SmartPtr< TNLP > tnlp)
(C) Copyright International Business Machines Corporation 2007
double Number
virtual bool get_constraints_linearity(Ipopt::Index m, LinearityType *const_types)
overload this method to return the constraint linearity.
void set_use_feasibility_pump_objective(bool use_feasibility_pump_objective)
Flag to indicate that we want to use the feasibility pump objective.
void set_cutoff(Ipopt::Number cutoff)
Set the cutoff value to use in the cutoff constraint.
void set_dist_to_point_obj(size_t n, const Ipopt::Number *vals, const Ipopt::Index *inds)
Set the point to which distance is minimized.
virtual bool eval_h(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number obj_factor, Ipopt::Index m, const Ipopt::Number *lambda, bool new_lambda, Ipopt::Index nele_hess, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
Evaluate the modified Hessian of the Lagrangian.
virtual bool eval_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Number *g)
overload to return the values of the left-hand side of the constraints
double getObjectiveScaling() const
virtual bool eval_jac_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Index nele_jac, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
overload to return the jacobian of g
void set_rhs_local_branching_constraint(double rhs_local_branching_constraint)
Set the rhs of the local branching constraint.
U * GetRawPtr(const SmartPtr< U > &smart_ptr)
int Index
void set_use_cutoff_constraint(bool use_cutoff_constraint)
Flag to indicate that we want to use a cutoff constraint This constraint has the form f(x) <= (1-epsi...
virtual bool eval_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number &obj_value)
overloaded to return the value of the objective function
virtual bool get_nlp_info(Ipopt::Index &n, Ipopt::Index &m, Ipopt::Index &nnz_jac_g, Ipopt::Index &nnz_h_lag, Ipopt::TNLP::IndexStyleEnum &index_style)
get info from nlp_ and add hessian information
void setLambda(double lambda)
Set the value for lambda.
virtual bool get_variables_linearity(Ipopt::Index n, LinearityType *var_types)
virtual bool get_bounds_info(Ipopt::Index n, Ipopt::Number *x_l, Ipopt::Number *x_u, Ipopt::Index m, Ipopt::Number *g_l, Ipopt::Number *g_u)
This call is just passed onto tnlp_.
virtual bool eval_grad_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number *grad_f)
overload this method to return the vector of the gradient of the objective w.r.t.
This is an adapter class to convert an NLP to a Feasibility Pump NLP by changing the objective functi...
void setSigma(double sigma)
Set the value for sigma.
void setObjectiveScaling(double value)