Package PyDSTool :: Package Toolbox :: Package optimizers :: Package line_search :: Module strong_wolfe_powell_rule
[hide private]
[frames] | no frames]

Source Code for Module PyDSTool.Toolbox.optimizers.line_search.strong_wolfe_powell_rule

 1   
 2  # Matthieu Brucher 
 3  # Last Change : 2007-08-22 14:05 
 4   
 5  import numpy 
 6   
7 -class StrongWolfePowellRule(object):
8 """ 9 The strong Wolfe-Powell rule for a inexact line search 10 """
11 - def __init__(self, alpha = 1., rho = 0.1, sigma = 0.4, alpha_min = 0., alpha_max = 1., alpha_limit = 0.1, **kwargs):
12 """ 13 Initializes the search 14 Parameters : 15 - alpha is the first step size that will be tried (1.) 16 - rhos is the rhos acceptation factor (0.1) 17 - sigma is the factor for the Wolfe-Powell rule (0.4) 18 - alpha_min is the inf limit of the search interval (0.) 19 - alpha_max is the max limit of the search interval (1.) 20 - alpha_limit is a factor so that the estimated alpha is not near the limts of the tested bracket, leading to a divergence in the algorithm (alpha_limit = 0.1) 21 Those parameters should be tweaked depending on the function to optimize 22 """ 23 self.alpha = alpha 24 self.rho = rho 25 self.sigma = sigma 26 self.alpha_min = alpha_min 27 self.alpha_max = alpha_max 28 self.alpha_limit = alpha_limit
29
30 - def __call__(self, origin, function, state, **kwargs):
31 """ 32 Tries to find an acceptable candidate 33 """ 34 direction = state['direction'] 35 ak = self.alpha_min 36 bk = self.alpha_max 37 if 'initial_alpha_step' in state: 38 alpha = state['initial_alpha_step'] 39 else: 40 alpha = self.alpha 41 gradient = state['gradient'] 42 f1temp = function(origin) 43 f1ptemp = numpy.dot(gradient, direction) 44 while(True): 45 #print ak, bk, alpha 46 if numpy.isnan(alpha): 47 state['alpha_step'] = 0 48 return origin 49 50 ftemp = function(origin + alpha * direction) 51 #First rule test 52 #print "1.", ftemp, function(origin) + self.rho * alpha * numpy.dot(gradient, direction) 53 if ftemp <= function(origin) + self.rho * alpha * numpy.dot(gradient, direction): 54 fptemp = numpy.dot(function.gradient(origin + alpha * direction).T, direction) 55 #Second rule, Wolfe-Powell, test 56 #print "2.", abs(fptemp), abs(self.sigma * numpy.dot(gradient, direction)) 57 if abs(fptemp) <= abs(self.sigma * numpy.dot(gradient, direction)): 58 state['alpha_step'] = alpha 59 return origin + alpha * direction 60 else: 61 alphap = alpha + (alpha - ak) * fptemp / (f1ptemp - fptemp) 62 ak = alpha 63 alpha = alphap 64 f1temp = ftemp 65 f1ptemp = fptemp 66 else : 67 bracketSize = abs(bk - ak) 68 alphap = ak + (alpha - ak) / (2. * (1. + (f1temp - ftemp) / ((alpha - ak) * f1ptemp))) 69 bk = alpha 70 if abs(alphap - ak) < self.alpha_limit * bracketSize: 71 alpha = ak + self.alpha_limit * bracketSize 72 else: 73 alpha = alphap
74