Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/tutorials/popt/tutorial_popt.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -404,7 +404,7 @@
" - restart: restart optimization from a restart file (default false)\n",
" - restartsave: save a restart file after each successful iteration (defalut false)\n",
" - tol: convergence tolerance for the objective function (default 1e-6)\n",
" - alpha: step size for the steepest decent method (default 0.1)\n",
" - alpha: step size for the steepest descent method (default 0.1)\n",
" - beta: momentum coefficient for running accelerated optimization (default 0.0)\n",
" - alpha_maxiter: maximum number of backtracing trials (default 5)\n",
" - resample: number indicating how many times resampling is tried if no improvement is found\n",
Expand Down
2 changes: 1 addition & 1 deletion pipt/loop/ensemble.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ def check_assimindex_simultaneous(self):
def _org_obs_data(self):
"""
Organize the input true observed data. The obs_data will be a list of length equal length of "TRUEDATAINDEX",
and each entery in the list will be a dictionary with keys equal to the "DATATYPE".
and each entry in the list will be a dictionary with keys equal to the "DATATYPE".
Also, the pred_data variable (predicted data or forward simulation) will be initialized here with the same
structure as the obs_data variable.

Expand Down
4 changes: 4 additions & 0 deletions popt/loop/optimize.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,6 +205,10 @@ def run_loop(self):
self.ftol *= self.epf['tol_factor'] # decrease tolerance
self.obj_func_values = self.fun(self.xk, epf = self.epf)
self.iteration = 0
info_str = ' {:<10} {:<10} {:<15} {:<15} {:<15} '.format('iter', 'alpha_iter',
'obj_func', 'step-size', 'cov[0,0]')
self.logger.info(info_str)
self.logger.info(' {:<21} {:<15.4e}'.format(self.iteration, np.mean(self.obj_func_values)))
self.epf_iteration += 1
optimize_result = ot.get_optimize_result(self)
ot.save_optimize_results(optimize_result)
Expand Down
4 changes: 2 additions & 2 deletions popt/update_schemes/enopt.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def __init__(self, fun, x, args, jac, hess, bounds=None, **options):
- restart: restart optimization from a restart file (default false)
- restartsave: save a restart file after each successful iteration (defalut false)
- tol: convergence tolerance for the objective function (default 1e-6)
- alpha: step size for the steepest decent method (default 0.1)
- alpha: step size for the steepest descent method (default 0.1)
- beta: momentum coefficient for running accelerated optimization (default 0.0)
- alpha_maxiter: maximum number of backtracing trials (default 5)
- resample: number indicating how many times resampling is tried if no improvement is found
Expand Down Expand Up @@ -130,7 +130,7 @@ def __set__variable(var_name=None, defalut=None):
# Initialize optimizer
optimizer = __set__variable('optimizer', 'GA')
if optimizer == 'GA':
self.optimizer = opt.GradientAscent(self.alpha, self.beta)
self.optimizer = opt.GradientDescent(self.alpha, self.beta)
elif optimizer == 'Adam':
self.optimizer = opt.Adam(self.alpha, self.beta)
elif optimizer == 'AdaMax':
Expand Down
2 changes: 1 addition & 1 deletion popt/update_schemes/genopt.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def __set__variable(var_name=None, defalut=None):
# Initialize optimizer
optimizer = __set__variable('optimizer', 'GA')
if optimizer == 'GA':
self.optimizer = opt.GradientAscent(self.alpha, self.beta)
self.optimizer = opt.GradientDescent(self.alpha, self.beta)
elif optimizer == 'Adam':
self.optimizer = opt.Adam(self.alpha, self.beta)

Expand Down
2 changes: 1 addition & 1 deletion popt/update_schemes/smcopt.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def __set__variable(var_name=None, defalut=None):
self.logger.info(info_str)
self.logger.info(' {:<21} {:<15.4e}'.format(self.iteration, np.mean(self.obj_func_values)))

self.optimizer = opt.GradientAscent(self.alpha, 0)
self.optimizer = opt.GradientDescent(self.alpha, 0)

# The SmcOpt class self-ignites
self.run_loop() # run_loop resides in the Optimization class (super)
Expand Down
14 changes: 7 additions & 7 deletions popt/update_schemes/subroutines/optimizers.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
"""Gradient acceleration."""
import numpy as np

__all__ = ['GradientAscent', 'Adam', 'AdaMax', 'Steihaug', ]
__all__ = ['GradientDescent', 'Adam', 'AdaMax', 'Steihaug', ]


class GradientAscent:
class GradientDescent:
r"""
A class for performing gradient ascent optimization with momentum and backtracking.
A class for performing gradient descent optimization with momentum and backtracking.
The gradient descent update equation with momentum is given by:

$$ \begin{align}
Expand Down Expand Up @@ -52,7 +52,7 @@ def __init__(self, step_size, momentum):
Parameters
----------
step_size : float
The step size (learning rate) for the gradient ascent.
The step size (learning rate) for the gradient descent.

momentum : float
The momentum factor to apply during updates.
Expand All @@ -72,7 +72,7 @@ def apply_update(self, control, gradient, **kwargs):
Apply a gradient update to the control parameter.

!!! note
This is the steepest decent update: x_new = x_old - x_step.
This is the steepest descent update: x_new = x_old - x_step.

Parameters
-------------------------------------------------------------------------------------
Expand Down Expand Up @@ -240,7 +240,7 @@ def apply_update(self, control, gradient, **kwargs):
Apply a gradient update to the control parameter.

!!! note
This is the steepest decent update: x_new = x_old - x_step.
This is the steepest descent update: x_new = x_old - x_step.

Parameters
-------------------------------------------------------------------------------------
Expand Down Expand Up @@ -269,7 +269,7 @@ def apply_update(self, control, gradient, **kwargs):
vel2_hat = self.temp_vel2/(1-beta2**iter)

step = alpha*vel1_hat/(np.sqrt(vel2_hat)+self.eps)
new_control = control - step # steepest decent
new_control = control - step # steepest descent
return new_control, step

def apply_backtracking(self):
Expand Down