Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions popt/misc_tools/optim_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,6 +346,10 @@ def get_optimize_result(obj):
savedata = obj.options['savedata']
else:
savedata = [ obj.options['savedata']]

if 'args' in savedata:
for a, arg in enumerate(obj.args):
results[f'args[{a}]'] = arg

# Loop over variables to store in save list
for save_typ in savedata:
Expand Down
12 changes: 7 additions & 5 deletions popt/update_schemes/enopt.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def __init__(self, fun, x, args, jac, hess, bounds=None, **options):
- beta: momentum coefficient for running accelerated optimization (default 0.0)
- alpha_maxiter: maximum number of backtracing trials (default 5)
- resample: number indicating how many times resampling is tried if no improvement is found
- optimizer: 'GA' (gradient accent) or Adam (default 'GA')
- optimizer: 'GD' (gradient descent) or Adam (default 'GD')
- nesterov: use Nesterov acceleration if true (default false)
- hessian: use Hessian approximation (if the algorithm permits use of Hessian) (default false)
- normalize: normalize the gradient if true (default true)
Expand Down Expand Up @@ -114,7 +114,7 @@ def __set__variable(var_name=None, defalut=None):
# Calculate objective function of startpoint
if not self.restart:
self.start_time = time.perf_counter()
self.obj_func_values = self._fun(self.mean_state, epf=self.epf)
self.obj_func_values = self.fun(self.mean_state, epf=self.epf)
self.nfev += 1
self.optimize_result = ot.get_optimize_result(self)
ot.save_optimize_results(self.optimize_result)
Expand All @@ -128,8 +128,8 @@ def __set__variable(var_name=None, defalut=None):
self.logger.info(' {:<21} {:<15.4e}'.format(self.iteration, np.mean(self.obj_func_values)))

# Initialize optimizer
optimizer = __set__variable('optimizer', 'GA')
if optimizer == 'GA':
optimizer = __set__variable('optimizer', 'GD')
if optimizer == 'GD':
self.optimizer = opt.GradientDescent(self.alpha, self.beta)
elif optimizer == 'Adam':
self.optimizer = opt.Adam(self.alpha, self.beta)
Expand All @@ -138,6 +138,8 @@ def __set__variable(var_name=None, defalut=None):
self.optimizer = opt.AdaMax(self.alpha, self.beta)
elif optimizer == 'Steihaug':
self.optimizer = opt.Steihaug(delta0=3.0)
else:
raise ValueError(f'Optimizer {optimizer} not recognized for EnOpt!')

# The EnOpt class self-ignites, and it is possible to send the EnOpt class as a callale method to scipy.minimize
self.run_loop() # run_loop resides in the Optimization class (super)
Expand Down Expand Up @@ -205,7 +207,7 @@ def calc_update(self):
new_state = ot.clip_state(new_state, self.bounds)

# Calculate new objective function
new_func_values = self._fun(new_state, epf=self.epf)
new_func_values = self.fun(new_state, epf=self.epf)
self.nfev += 1

if np.mean(self.obj_func_values) - np.mean(new_func_values) > self.obj_func_tol:
Expand Down
4 changes: 2 additions & 2 deletions popt/update_schemes/genopt.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,8 +101,8 @@ def __set__variable(var_name=None, defalut=None):
round(np.mean(self.obj_func_values),4)))

# Initialize optimizer
optimizer = __set__variable('optimizer', 'GA')
if optimizer == 'GA':
optimizer = __set__variable('optimizer', 'GD')
if optimizer == 'GD':
self.optimizer = opt.GradientDescent(self.alpha, self.beta)
elif optimizer == 'Adam':
self.optimizer = opt.Adam(self.alpha, self.beta)
Expand Down
97 changes: 47 additions & 50 deletions popt/update_schemes/linesearch.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,9 +228,9 @@ def __init__(self, fun, x, jac, method='GD', hess=None, args=(), bounds=None, c
self.saveit = options.get('saveit', True)

# set tolerance for convergence
self.xtol = options.get('xtol', 1e-8) # tolerance for control vector
self._xtol = options.get('xtol', 1e-8) # tolerance for control vector
self._ftol = options.get('ftol', 1e-4) # relative tolerance for function value
self.gtol = options.get('gtol', 1e-5) # tolerance for inf-norm of jacobian
self._gtol = options.get('gtol', 1e-5) # tolerance for inf-norm of jacobian

# Check method
valid_methods = ['GD', 'BFGS', 'Newton-CG']
Expand All @@ -246,12 +246,12 @@ def __init__(self, fun, x, jac, method='GD', hess=None, args=(), bounds=None, c

# Check for initial callable values
self._fk = options.get('fun0', None)
self.jk = options.get('jac0', None)
self.Hk = options.get('hess0', None)
self._jk = options.get('jac0', None)
self._Hk = options.get('hess0', None)

if self._fk is None: self._fk = self._fun(self._xk)
if self.jk is None: self.jk = self._jac(self._xk)
if self.Hk is None: self.Hk = self._hess(self._xk)
if self._fk is None: self._fk = self.fun(self._xk)
if self._jk is None: self._jk = self.jac(self._xk)
if self._Hk is None: self._Hk = self.hess(self._xk)

# Check for initial inverse hessian for the BFGS method
if self.method == 'BFGS':
Expand All @@ -265,21 +265,27 @@ def __init__(self, fun, x, jac, method='GD', hess=None, args=(), bounds=None, c
self.p_old = None

# Initial results
self.optimize_result = self.get_intermediate_results()
self.optimize_result = ot.get_optimize_result(self)
if self.saveit:
ot.save_optimize_results(self.optimize_result)
if self.logger is not None:
self.logger.info(f' ====== Running optimization - Line search ({method}) ======')
self.logger.info('\nSPECIFIED OPTIONS:\n'+pprint.pformat(OptimizeResult(self.options)))
self.logger.info('')
self.logger.info(f' {"iter.":<10} {fun_xk_symbol:<15} {jac_inf_symbol:<15} {"step-size":<15}')
self.logger.info(f' {self.iteration:<10} {self._fk:<15.4e} {la.norm(self.jk, np.inf):<15.4e} {0:<15.4e}')
self.logger.info(f' {self.iteration:<10} {self._fk:<15.4e} {la.norm(self._jk, np.inf):<15.4e} {0:<15.4e}')
self.logger.info('')

self.run_loop()

def fun(self, x, *args, **kwargs):
return self.function(x, *args, **kwargs)
def fun(self, x, *args, **kwargs):
self.nfev += 1
x = ot.clip_state(x, self.bounds) # ensure bounds are respected
if self.args is None:
f = np.mean(self.function(x, epf=self.epf))
else:
f = np.mean(self.function(x, *self.args, epf=self.epf))
return f

@property
def xk(self):
Expand All @@ -297,16 +303,7 @@ def ftol(self):
def ftol(self, value):
self._ftol = value

def _fun(self, x):
self.nfev += 1
x = ot.clip_state(x, self.bounds) # ensure bounds are respected
if self.args is None:
f = np.mean(self.function(x, epf = self.epf))
else:
f = np.mean(self.function(x, *self.args, epf = self.epf))
return f

def _jac(self, x):
def jac(self, x):
self.njev += 1
x = ot.clip_state(x, self.bounds) # ensure bounds are respected
if self.args is None:
Expand All @@ -320,7 +317,7 @@ def _jac(self, x):

return g

def _hess(self, x):
def hess(self, x):
if self.hessian is None:
return None

Expand All @@ -339,26 +336,26 @@ def calc_update(self, iter_resamp=0):

# If in resampling mode, compute jacobian
# Else, jacobian from in __init__ or from latest line_search is used
if self.jk is None:
self.jk = self._jac(self._xk)
if self._jk is None:
self._jk = self.jac(self._xk)

# Compute hessian
if (self.iteration != 1) or (iter_resamp > 0):
self.Hk = self._hess(self._xk)
self._Hk = self.hess(self._xk)

# Check normalization
if self.normalize:
self.jk = self.jk/la.norm(self.jk, np.inf)
if not self.Hk is None:
self.Hk = self.Hk/np.maximum(la.norm(self.Hk, np.inf), 1e-12)
self._jk = self._jk/la.norm(self._jk, np.inf)
if not self._Hk is None:
self._Hk = self._Hk/np.maximum(la.norm(self._Hk, np.inf), 1e-12)

# Calculate search direction (pk)
if self.method == 'GD':
pk = - self.jk
pk = - self._jk
if self.method == 'BFGS':
pk = - np.matmul(self.Hk_inv, self.jk)
pk = - np.matmul(self.Hk_inv, self._jk)
if self.method == 'Newton-CG':
pk = newton_cg(self.jk, Hk=self.Hk, xk=self._xk, jac=self._jac, logger=self.logger.info)
pk = newton_cg(self._jk, Hk=self._Hk, xk=self._xk, jac=self.jac, logger=self.logger.info)

# porject search direction onto the feasible set
if self.bounds is not None:
Expand All @@ -377,21 +374,21 @@ def calc_update(self, iter_resamp=0):
step_size=step_size,
xk=self._xk,
pk=pk,
fun=self._fun,
jac=self._jac,
fun=self.fun,
jac=self.jac,
fk=self._fk,
jk=self.jk,
jk=self._jk,
**self.lskwargs
)
else:
ls_res = line_search(
step_size=step_size,
xk=self._xk,
pk=pk,
fun=self._fun,
jac=self._jac,
fun=self.fun,
jac=self.jac,
fk=self._fk,
jk=self.jk,
jk=self._jk,
**self.lskwargs
)
step_size, f_new, j_new, _, _ = ls_res
Expand All @@ -400,7 +397,7 @@ def calc_update(self, iter_resamp=0):

# Save old values
x_old = self._xk
j_old = self.jk
j_old = self._jk
f_old = self._fk

# Update control
Expand All @@ -409,7 +406,7 @@ def calc_update(self, iter_resamp=0):
# Update state
self._xk = x_new
self._fk = f_new
self.jk = j_new
self._jk = j_new

# Update old fun, jac and pk values
self.j_old = j_old
Expand All @@ -424,26 +421,26 @@ def calc_update(self, iter_resamp=0):
# Update BFGS
if self.method == 'BFGS':
yk = j_new - j_old
if self.iteration == 1: self.Hk_inv = np.dot(yk,sk)/np.dot(yk,yk) * np.eye(sk.size)
if self.iteration == 1: self._Hk_inv = np.dot(yk,sk)/np.dot(yk,yk) * np.eye(sk.size)
self.Hk_inv = bfgs_update(self.Hk_inv, sk, yk)

# Update status
success = True

# Save Results
self.optimize_result = self.get_intermediate_results()
self.optimize_result = ot.get_optimize_result(self)
if self.saveit:
ot.save_optimize_results(self.optimize_result)

# Write logging info
if self.logger is not None:
self.logger.info('')
self.logger.info(f' {"iter.":<10} {fun_xk_symbol:<15} {jac_inf_symbol:<15} {"step-size":<15}')
self.logger.info(f' {self.iteration:<10} {self._fk:<15.4e} {la.norm(self.jk, np.inf):<15.4e} {step_size:<15.4e}')
self.logger.info(f' {self.iteration:<10} {self._fk:<15.4e} {la.norm(self._jk, np.inf):<15.4e} {step_size:<15.4e}')
self.logger.info('')

# Check for convergence
if (la.norm(sk, np.inf) < self.xtol):
if (la.norm(sk, np.inf) < self._xtol):
self.msg = 'Convergence criteria met: |dx| < xtol'
self.logger.info(self.msg)
success = False
Expand All @@ -453,7 +450,7 @@ def calc_update(self, iter_resamp=0):
self.logger.info(self.msg)
success = False
return success
if (la.norm(self.jk, np.inf) < self.gtol):
if (la.norm(self._jk, np.inf) < self._gtol):
self.msg = f'Convergence criteria met: {jac_inf_symbol} < gtol'
self.logger.info(self.msg)
success = False
Expand All @@ -477,7 +474,7 @@ def calc_update(self, iter_resamp=0):

self.logger.info('Resampling Gradient')
iter_resamp += 1
self.jk = None
self._jk = None

# Recursivly call function
success = self.calc_update(iter_resamp=iter_resamp)
Expand All @@ -493,7 +490,7 @@ def get_intermediate_results(self):
results = {
'fun': self._fk,
'x': self._xk,
'jac': self.jk,
'jac': self._jk,
'nfev': self.nfev,
'njev': self.njev,
'nit': self.iteration,
Expand Down Expand Up @@ -535,11 +532,11 @@ def _set_step_size(self, pk, amax):
alpha = self.step_size

else:
if (self.step_size_adapt == 1) and (np.dot(pk, self.jk) != 0):
alpha = 2*(self._fk - self.f_old)/np.dot(pk, self.jk)
elif (self.step_size_adapt == 2) and (np.dot(pk, self.jk) == 0):
if (self.step_size_adapt == 1) and (np.dot(pk, self._jk) != 0):
alpha = 2*(self._fk - self.f_old)/np.dot(pk, self._jk)
elif (self.step_size_adapt == 2) and (np.dot(pk, self._jk) == 0):
slope_old = np.dot(self.p_old, self.j_old)
slope_new = np.dot(pk, self.jk)
slope_new = np.dot(pk, self._jk)
alpha = self.step_size*slope_old/slope_new
else:
alpha = self.step_size
Expand Down
Loading