1 """ @package forcebalance.optimizer Optimization algorithms. 3 My current implementation is to have a single optimizer class with several methods 10 from __future__
import division
11 from __future__
import print_function
13 from builtins
import str
14 from builtins
import range
15 from builtins
import object
16 import os, pickle, re, sys
19 from numpy.linalg
import multi_dot
20 from copy
import deepcopy
23 from forcebalance.nifty import col, flat, row, printcool, printcool_dictionary, pvec1d, pmat2d, warn_press_key, invert_svd, wopen, bak, est124
25 from collections
import OrderedDict
28 from forcebalance.output
import getLogger, DEBUG, CleanStreamHandler
29 logger = getLogger(__name__)
39 """ Optimizer class. Contains several methods for numerical optimization. 41 For various reasons, the optimizer depends on the force field and fitting 42 targets (i.e. we cannot treat it as a fully independent numerical optimizer). 43 The dependency is rather weak which suggests that I can remove it someday. 47 """ Create an Optimizer object. 49 The optimizer depends on both the FF and the fitting targets so there 50 is a chain of dependencies: FF --> FitSim --> Optimizer, and FF --> Optimizer 53 - Take options from the parser 54 - Pass in the objective function, force field, all fitting targets 57 super(Optimizer, self).
__init__(options)
90 self.set_option(options,
'root',
'root')
92 self.set_option(options,
'jobtype',
'jobtype')
94 self.set_option(options,
'trust0',
'trust0')
96 self.set_option(options,
'mintrust',
'mintrust')
98 self.set_option(options,
'eig_lowerbound',
'eps')
100 self.set_option(options,
'step_lowerbound')
102 self.set_option(options,
'lm_guess',
'lmg')
104 self.set_option(options,
'finite_difference_h',
'h')
105 self.set_option(options,
'finite_difference_h',
'h0')
107 self.set_option(options,
'finite_difference_factor',
'fdf')
109 self.set_option(options,
'objective_history',
'hist')
111 self.set_option(options,
'convergence_objective')
113 self.set_option(options,
'convergence_step')
115 self.set_option(options,
'convergence_gradient')
117 self.set_option(options,
'converge_lowq')
119 self.set_option(options,
'maxstep',
'maxstep')
121 self.set_option(options,
'scanindex_num',
'idxnum')
123 self.set_option(options,
'scanindex_name',
'idxname')
125 self.set_option(options,
'scan_vals',
'scan_vals')
127 self.set_option(options,
'readchk',
'rchk_fnm')
129 self.set_option(options,
'writechk',
'wchk_fnm')
131 self.set_option(options,
'writechk_step',
'wchk_step')
133 self.set_option(options,
'adaptive_factor',
'adapt_fac')
135 self.set_option(options,
'adaptive_damping',
'adapt_damp')
137 self.set_option(options,
'print_gradient',
'print_grad')
139 self.set_option(options,
'print_hessian',
'print_hess')
141 self.set_option(options,
'print_parameters',
'print_vals')
143 self.set_option(options,
'error_tolerance',
'err_tol')
145 self.set_option(options,
'search_tolerance',
'search_tol')
146 self.set_option(options,
'read_mvals')
147 self.set_option(options,
'read_pvals')
149 self.set_option(options,
'backup')
151 self.set_option(options,
'input_file')
153 self.set_option(options,
'criteria')
173 self.
bhyp = Objective.Penalty.ptyp
not in (2, 3)
178 self.
uncert = any([any([i
in tgt.type.lower()
for i
in [
'liquid',
'lipid',
'thermo']])
for tgt
in self.
Objective.Targets])
179 self.
bakdir = os.path.join(os.path.splitext(options[
'input_file'])[0]+
'.bak')
180 self.
resdir = os.path.join(
'result',os.path.splitext(options[
'input_file'])[0])
192 if options[
'read_mvals']
is not None:
193 self.
mvals0 = np.array(options[
'read_mvals'])
194 elif options[
'read_pvals']
is not None:
195 self.
mvals0 = FF.create_mvals(options[
'read_pvals'])
200 if options[
'continue']: self.
recover()
209 base, ext = os.path.splitext(self.input_file)
210 if not base.endswith(
".sav"):
215 if os.path.exists(savfnm):
217 if soptions[
'read_mvals']
and np.max(np.abs(self.
mvals0)) != 0.0:
218 warn_press_key(
"Save file read_mvals will overwrite input file.\nInput file: %s\nSave file : %s\n" % (soptions[
'read_mvals'], self.
mvals0))
219 self.
mvals0 = np.array(soptions[
'read_mvals'])
220 self.
read_mvals = np.array(soptions[
'read_mvals'])
222 maxrd = max([T.maxrd()
for T
in self.
Objective.Targets])
225 if len(set([T.maxrd()
for T
in self.
Objective.Targets])) == 1
and any([((T.maxid() - T.maxrd()) > 0)
for T
in self.
Objective.Targets]):
227 printcool(
"Continuing optimization from iteration %i\nThese targets will load data from disk:\n%s" % \
228 (maxrd,
'\n'.join([T.name
for T
in self.
Objective.Targets
if T.maxrd() == maxrd])), color=4)
232 if T.maxrd() == maxrd:
234 if os.path.exists(os.path.join(T.absrd(),
'mvals.txt')):
235 tmvals = np.loadtxt(os.path.join(T.absrd(),
'mvals.txt'))
236 if len(tmvals) > 0
and np.max(np.abs(tmvals - self.
mvals0) > 1e-4):
237 warn_press_key(
"mvals.txt in %s does not match loaded parameters.\nSave file : %s\Parameters : %s\n" % (T.absrd(), tmvals, self.
mvals0))
243 """ Mark in each target that the previous optimization step was good or bad. """ 249 """ Write a new input file (%s_save.in) containing the current mathematical parameters. """ 250 if self.
FF.use_pvals:
251 mvals = self.
FF.create_mvals(vals)
255 base, ext = os.path.splitext(self.input_file)
256 if not base.endswith(
".sav"):
261 if self.input_file
is not None and os.path.exists(self.input_file):
262 fin = open(self.input_file).readlines()
268 if os.path.exists(outfnm)
and self.
mvals_bak:
271 fout = open(outfnm,
'w')
273 line1 = line.split(
"#")[0].strip().lower()
274 if line1.startswith(
"$options"):
276 if in_options
and line1.startswith(
"$end"):
278 print(
"read_mvals", file=fout)
279 print(self.
FF.sprint_map(mvals, precision=8), file=fout)
280 print(
"/read_mvals", file=fout)
282 if not have_priors
and priors
is not None:
283 print(
"priors", file=fout)
284 print(
'\n'.join([
" %-35s : %.1e" % (k, priors[k])
for k
in list(priors.keys())]), file=fout)
285 print(
"/priors", file=fout)
288 elif in_options
and line1.startswith(
'jobtype')
and jobtype
is not None:
289 print(
"jobtype %s" % jobtype, file=fout)
291 if line1.startswith(
"/read_mvals"):
293 if line1.startswith(
"/priors"):
295 if in_mvals:
continue 296 if in_priors:
continue 297 print(line, end=
'', file=fout)
298 if line1.startswith(
"read_mvals"):
300 logger.error(
"Encountered more than one read_mvals section\n")
304 print(self.
FF.sprint_map(mvals, precision=8), file=fout)
305 if line1.startswith(
"priors")
and priors
is not None:
307 logger.error(
"Encountered more than one priors section\n")
311 print(
'\n'.join([
" %-35s : %.1e" % (k, priors[k])
for k
in list(priors.keys())]), file=fout)
315 """ Call the appropriate optimizer. This is the method we might want to call from an executable. """ 316 now = datetime.datetime.now().strftime(
"%Y-%m-%d %I:%M %p")
317 logger.info(
"Calculation started at %s\n" % now)
319 xk = self.
OptTab[self.jobtype]()
322 print_parameters =
True 324 if self.jobtype.lower() ==
'precondition': print_parameters=
False 325 if xk
is None and (self.
mvals0 == np.zeros(self.
FF.np)).
all():
326 logger.info(
"Parameter file same as original; will not be printed to results folder.\n")
327 print_parameters =
False 334 self.
mvals0 = self.
FF.create_pvals(xk)
if self.
FF.use_pvals
else xk.copy()
339 if self.
FF.use_pvals:
340 bar =
printcool(
"Final optimization parameters:",color=4)
341 self.
FF.print_map(self.
FF.create_mvals(xk))
342 bar =
printcool(
"Final physical parameters:",color=4)
343 self.
FF.print_map(xk)
345 bar =
printcool(
"Final optimization parameters:",color=4)
346 self.
FF.print_map(xk)
347 bar =
printcool(
"Final physical parameters:",color=4)
348 self.
FF.print_map(self.
FF.create_pvals(xk))
351 for fnm
in self.
FF.fnms:
352 if os.path.exists(os.path.join(self.
resdir, fnm)):
354 self.
FF.make(xk,printdir=self.
resdir)
358 printcool(
"The force field has been written to the %s directory.\n" 359 "Input file with optimization parameters saved to %s." % (self.
resdir, outfnm), color=0)
368 logger.info(
"Wall time since calculation start: %.1f seconds\n" % (time.time() - t0))
370 bar =
printcool(
"It is possible to commit no errors and still lose.\nThat is not a weakness. That is life.",ansi=
"40;93")
372 bar =
printcool(
"Calculation Finished.\n---==( May the Force be with you! )==---",ansi=
"1;44;93")
376 def adjh(self, trust):
378 h = min(self.fdf*trust, self.h0)
380 logger.info(
"Setting finite difference step to %.4e\n" % h)
388 The main ForceBalance adaptive trust-radius pseudo-Newton 389 optimizer. Tried and true in many situations. :) 391 Usually this function is called with the BFGS or NewtonRaphson 392 method. The NewtonRaphson method is consistently the best 393 method I have, because I always provide at least an 394 approximate Hessian to the objective function. The BFGS 395 method works well, but if gradients are cheap the SciPy_BFGS 396 method also works nicely. 398 The method adaptively changes the step size. If the step is 399 sufficiently good (i.e. the objective function goes down by a 400 large fraction of the predicted decrease), then the step size 401 is increased; if the step is bad, then it rejects the step and 404 The optimization is terminated after either a function value or 405 step size tolerance is reached. 407 @param[in] b_BFGS Switch to use BFGS (True) or Newton-Raphson (False) 411 if self.trust0 < 0.0:
412 detail =
"(Hessian Diagonal Search)" 413 elif self.adapt_fac != 0.0:
414 detail =
"(Adaptive Trust Radius)" 416 detail =
"(Trust Radius)" 417 printcool(
"Main Optimizer \n%s Method %s\n\n" 418 "\x1b[0mConvergence criteria (%i of 3 needed):\n" 419 "\x1b[0mObjective Function : %.3e\n" 420 "\x1b[0mNorm of Gradient : %.3e\n" 421 "\x1b[0mParameter step size : %.3e" %
422 (
"BFGS" if b_BFGS
else "Newton-Raphson", detail, self.criteria,
423 self.convergence_objective, self.convergence_gradient,
424 self.convergence_step), ansi=1, bold=1)
427 if self.
uncert and self.convergence_objective < 1e-3:
428 warn_press_key(
"Condensed phase targets detected - may not converge with current choice of" 429 " convergence_objective (%.e)\nRecommended range is 1e-2 - 1e-1 for this option." % self.convergence_objective)
435 Ord = 1
if b_BFGS
else 2
445 X_hist = np.array([])
447 trust = abs(self.trust0)
451 dx = np.zeros(self.
FF.np)
462 printcool(
"Color Key for Objective Function -=X2=-\n\x1b[1mBold\x1b[0m = Initial step\n" \
463 "\x1b[92mGreen = Current lowest value of objective function%s\x1b[0m\n" \
464 "\x1b[91mRed = Objective function rises, step rejected\x1b[0m\n" \
465 "\x1b[0mNo color = Not at the lowest value" % (
" (best estimate)" if self.
uncert else ""), \
466 bold=0, color=0, center=[
True,
False,
False,
False,
False])
469 criteria_satisfied = {
'step' :
False,
'grad' :
False,
'obj' :
False}
471 def print_progress(itn, nx, nd, ng, clr, x, std, qual):
474 logger.info(
"%6s%12s%12s%12s%14s%12s%12s\n" % (
"Step",
" |k| ",
" |dk| ",
" |grad| ",
" -=X2=- ",
"Delta(X2)",
"StepQual"))
475 logger.info(
"%6i%12.3e%12.3e%12.3e%s%14.5e\x1b[0m%12.3e% 11.3f\n\n" % (itn, nx, nd, ng, clr, x, std, qual))
485 if len(self.
chk.keys()) > 0
and ITERATION == self.
iterinit:
486 printcool(
"Iteration %i: Reading initial objective, gradient, Hessian from checkpoint file" % (ITERATION), color=4, bold=0)
487 logger.info(
"Reading initial objective, gradient, Hessian from checkpoint file\n")
488 xk, X, G, H = self.
chk[
'xk'], self.
chk[
'X'], self.
chk[
'G'], self.
chk[
'H']
489 X_hist, trust = self.
chk[
'X_hist'], self.
chk[
'trust']
492 printcool(
"Iteration %i: Evaluating objective function\nand derivatives through %s order" % (ITERATION,
"first" if Ord == 1
else "second"), color=4, bold=0)
493 data = self.
Objective.Full(xk,Ord,verbose=
True)
494 X, G, H = data[
'X'], data[
'G'], data[
'H']
500 dX_actual = X - X_prev
501 Best_Step = X < np.min(X_hist[Best_Start:])
503 Quality = dX_actual / dX_expect
506 logger.warning(
"Warning: Step size of zero detected (i.e. wrong direction). " 507 "Try reducing the finite_difference_h parameter\n")
509 if X > (X_prev + max(self.err_tol, self.convergence_objective)):
515 print_progress(ITERATION, nxk, ndx, ngd,
"\x1b[91m", X, X-X_prev, Quality)
517 trust = max(ndx*(1./(1+self.adapt_fac)), self.mintrust)
518 trustprint =
"Reducing trust radius to % .4e\n" % trust
525 printcool(
"Objective function rises!\nRe-evaluating at the previous point..",color=1)
528 Best_Start = ITERATION - self.
iterinit 531 X_hist = np.append(X_hist, X)
534 self.
chk = {
'xk': xk,
'X' : X,
'G' : G,
'H': H,
'X_hist': X_hist,
'trust': trust}
538 logger.info(
"Input file with saved parameters: %s\n" % outfnm)
540 if ITERATION == self.maxstep:
541 logger.info(
"Maximum number of optimization steps reached (%i)\n" % ITERATION)
543 data = self.
Objective.Full(xk,Ord,verbose=
True)
545 X, G, H = data[
'X'], data[
'G'], data[
'H']
547 nxk = np.linalg.norm(xk)
548 ngd = np.linalg.norm(G)
556 printcool(
"Objective function rises!\nTaking another step from previous point..",color=1)
560 data = deepcopy(datastor)
567 if Quality <= ThreLQ
and self.trust0 > 0:
568 trust = max(ndx*(1./(1+self.adapt_fac)), self.mintrust)
569 trustprint =
"Low quality step, reducing trust radius to % .4e\n" % trust
570 elif Quality >= ThreHQ
and bump
and self.trust0 > 0:
572 trust += self.adapt_fac*trust*np.exp(-1*self.adapt_damp*(trust/self.trust0 - 1))
573 if trust > curr_trust:
574 trustprint =
"Increasing trust radius to % .4e\n" % trust
575 color =
"\x1b[92m" if Best_Step
else "\x1b[0m" 578 for fnm
in self.
FF.fnms:
579 if os.path.exists(os.path.join(self.
resdir, fnm)):
581 self.
FF.make(xk,printdir=self.
resdir)
585 printcool(
"The force field has been written to the %s directory.\n" 586 "Input file with optimization parameters saved to %s." % (self.
resdir, outfnm), color=0)
592 Dx =
col(xk - xk_prev)
594 Mat1 = (np.dot(Dy,Dy.T))/(np.dot(Dy.T,Dx))[0,0]
595 Mat2 = (np.dot(np.dot(Hnew,Dx),np.dot(Hnew,Dx).T))/(multi_dot([Dx.T,Hnew,Dx]))[0,0]
600 if len(self.
FF.prmdestroy_this) > 0:
601 self.
FF.prmdestroy_save.append(self.
FF.prmdestroy_this)
602 self.
FF.linedestroy_save.append(self.
FF.linedestroy_this)
604 X_hist = np.append(X_hist, X)
607 stdfront = np.std(X_hist[-self.hist:])
if len(X_hist) > self.hist
else np.std(X_hist)
609 dX2_sign = -1
if (len(X_hist) > 1
and X_hist[-1] < X_hist[-2])
else 1
613 nxk = np.linalg.norm(xk)
614 ngd = np.linalg.norm(G)
616 print_progress(ITERATION, nxk, ndx, ngd, color, X, dX2_sign*stdfront, Quality)
622 bar =
printcool(
"Total Gradient",color=4)
623 self.
FF.print_map(vals=G,precision=8)
629 for key, val
in self.
Objective.ObjDict.items():
639 if self.
uncert or (self.converge_lowq
or Quality > ThreLQ)
or (ITERATION == self.
iterinit and self.
iterinit > 0):
640 if ngd < self.convergence_gradient:
641 logger.info(
"Convergence criterion reached for gradient norm (%.2e)\n" % self.convergence_gradient)
642 criteria_satisfied[
'grad'] =
True 643 else: criteria_satisfied[
'grad'] =
False 644 if ndx < self.convergence_step
and ndx >= 0.0
and ITERATION > self.
iterinit:
645 logger.info(
"Convergence criterion reached in step size (%.2e)\n" % self.convergence_step)
646 criteria_satisfied[
'step'] =
True 647 else: criteria_satisfied[
'step'] =
False 648 if stdfront < self.convergence_objective
and len(X_hist) >= self.hist:
649 logger.info(
"Convergence criterion reached for objective function (%.2e)\n" % self.convergence_objective)
650 criteria_satisfied[
'obj'] =
True 651 else: criteria_satisfied[
'obj'] =
False 652 if sum(criteria_satisfied.values()) >= self.criteria:
break 658 datastor = deepcopy(data)
666 pk_prev = self.
FF.create_pvals(xk)
670 logger.info(trustprint)
671 logger.info(
"Calculating nonlinear optimization step\n")
673 dx, dX_expect, bump = self.
step(xk, data, trust)
676 ndx = np.linalg.norm(dx)
683 if self.
uncert or (self.converge_lowq
or Quality > ThreLQ)
or (ITERATION == self.
iterinit and self.
iterinit > 0):
684 if ndx < self.convergence_step
and ndx >= 0.0
and ITERATION > self.
iterinit:
685 logger.info(
"Convergence criterion reached in step size (%.2e)\n" % self.convergence_step)
686 criteria_satisfied[
'step'] =
True 687 else: criteria_satisfied[
'step'] =
False 688 if sum(criteria_satisfied.values()) >= self.criteria:
break 691 if ITERATION == self.maxstep:
692 logger.info(
"Maximum number of optimization steps reached (%i)\n" % ITERATION)
695 if ITERATION > self.
iterinit and ndx < self.step_lowerbound:
696 logger.info(
"Step size is too small to continue (%.3e < %.3e)\n" % (ndx, self.step_lowerbound))
708 pk = self.
FF.create_pvals(xk)
710 bar =
printcool(
"Mathematical Parameters (Current + Step = Next)",color=5)
711 self.
FF.print_map(vals=[
"% .4e %s %.4e = % .4e" % (xk_prev[i],
'+' if dx[i] >= 0
else '-', abs(dx[i]), xk[i])
for i
in range(len(xk))])
713 bar =
printcool(
"Physical Parameters (Current + Step = Next)",color=5)
714 self.
FF.print_map(vals=[
"% .4e %s %.4e = % .4e" % (pk_prev[i],
'+' if dp[i] >= 0
else '-', abs(dp[i]), pk[i])
for i
in range(len(pk))])
717 self.
chk = {
'xk': xk,
'X' : X,
'G' : G,
'H': H,
'X_hist': X_hist,
'trust': trust}
721 logger.info(
"Input file with saved parameters: %s\n" % outfnm)
723 cnvgd = sum(criteria_satisfied.values()) >= self.criteria
724 bar =
printcool(
"\x1b[0m%s\x1b[0m\nFinal objective function value\nFull: % .6e Un-penalized: % .6e" %
725 (
"\x1b[1mOptimization Converged" if cnvgd
else "\x1b[1;91mConvergence Failure",
726 data[
'X'],data[
'X0']), color=2)
730 def step(self, xk, data, trust):
731 """ Computes the next step in the parameter space. There are lots of tricks here that I will document later. 733 @param[in] G The gradient 734 @param[in] H The Hessian 735 @param[in] trust The trust radius 738 from scipy
import optimize
740 X, G, H = (data[
'X0'], data[
'G0'], data[
'H0'])
if self.
bhyp else (data[
'X'], data[
'G'], data[
'H'])
742 H1 = np.delete(H1, self.
excision, axis=0)
743 H1 = np.delete(H1, self.
excision, axis=1)
744 Eig = np.linalg.eigh(H1)[0]
748 Adj = max(self.eps, 0.01*abs(Emin)) - Emin
749 logger.info(
"Hessian has a small or negative eigenvalue (%.1e), mixing in some steepest descent (%.1e) to correct this.\n" % (Emin, Adj))
750 logger.info(
"Eigenvalues are:\n")
752 H += Adj*np.eye(H.shape[0])
756 H = np.delete(H, self.
excision, axis=0)
757 H = np.delete(H, self.
excision, axis=1)
760 warn_press_key(
"Using the multiplicative hyperbolic penalty is discouraged!")
762 Obj0 = {
'X':X,
'G':G,
'H':H}
766 self.
dx = 1e10 * np.ones(len(HL))
768 self.
Grad = np.zeros(len(HL))
769 self.
Hess = np.zeros((len(HL),len(HL)))
772 def _compute(self, dx):
774 Tmp = np.dot(self.
H, dx)
776 self.
Val = (X + np.dot(dx, G) + 0.5*np.dot(dx,Tmp) + Reg_Term[0] - data[
'X'])
778 self.
Grad = G + Tmp + Reg_Term[1]
782 def compute_val(self, dx):
784 if np.dot(ddx, ddx) > 1e-16:
787 def compute_grad(self, dx):
789 if np.dot(ddx, ddx) > 1e-16:
792 def compute_hess(self, dx):
794 if np.dot(ddx, ddx) > 1e-16:
798 dx0 = np.zeros(len(xkd))
799 HL = H + (L-1)**2*np.eye(len(H))
807 Opt1 = optimize.fmin_l_bfgs_b(HYP.compute_val,dx0,fprime=HYP.compute_grad,m=30,factr=1e7,pgtol=1e-4,iprint=-1,disp=0,maxfun=1e5,maxiter=1e5)
808 logger.info(
"%.3f s (L-BFGS 1) ", time.time() - t0)
813 Opt2 = optimize.fmin_l_bfgs_b(HYP.compute_val,-xkd,fprime=HYP.compute_grad,m=30,factr=1e7,pgtol=1e-4,iprint=-1,disp=0,maxfun=1e5,maxiter=1e5)
814 logger.info(
"%.3f s (L-BFGS 2) ", time.time() - t0)
816 dx1, sol1 = Opt1[0], Opt1[1]
817 dx2, sol2 = Opt2[0], Opt2[1]
818 dxb, sol = (dx1, sol1)
if sol1 <= sol2
else (dx2, sol2)
820 dxb = np.insert(dxb, i, 0)
827 H = np.delete(H, self.
excision, axis=0)
828 H = np.delete(H, self.
excision, axis=1)
830 logger.debug(
"Inverting Hessian:\n")
831 logger.debug(
" G:\n")
832 pvec1d(G,precision=5, loglevel=DEBUG)
833 logger.debug(
" H:\n")
834 pmat2d(H,precision=5, loglevel=DEBUG)
837 dx =
flat(-1 * np.dot(Hi,
col(G)))
839 logger.debug(
" dx:\n")
840 pvec1d(dx,precision=5, loglevel=DEBUG)
843 dx = np.insert(dx, i, 0)
849 HT = H + (L-1)**2*np.eye(len(H))
850 logger.debug(
"Inverting Scaled Hessian:\n")
851 logger.debug(
" G:\n")
852 pvec1d(G,precision=5, loglevel=DEBUG)
853 logger.debug(
" HT: (Scal = %.4f)\n" % (1+(L-1)**2))
854 pmat2d(HT,precision=5, loglevel=DEBUG)
856 dx =
flat(-1 * np.dot(Hi,
col(G)))
857 logger.debug(
" dx:\n")
858 pvec1d(dx,precision=5, loglevel=DEBUG)
859 sol =
flat(0.5*multi_dot([
row(dx), H,
col(dx)]))[0] + np.dot(dx,G)
861 dx = np.insert(dx, i, 0)
865 return hyper_solver(L)
if self.
bhyp else para_solver(L)
868 N = np.linalg.norm(solver(L)[0])
869 logger.info(
"Finding trust radius: H%+.4f*I, length %.4e (target %.4e)\n" % ((L-1)**2,N,trust))
871 return (N - trust)**2
874 N = np.linalg.norm(solver(L)[0])
875 logger.debug(
"\rL = %.4e, Hessian diagonal addition = %.4e: found length %.4e, objective is %.4e\n" % (L, (L-1)**2, N, (N - trust)**2))
876 return (N - self.
h)**2
884 Result = self.
Objective.Full(xk_,0,verbose=
False,customdir=
"micro_%02i" % search_fun.micro)[
'X'] - data[
'X']
885 logger.info(
"Hessian diagonal search: H%+.4f*I, length %.4e, result % .4e\n" % ((L-1)**2,np.linalg.norm(dx),Result))
886 search_fun.micro += 1
892 dx, expect = solver(1)
893 dxnorm = np.linalg.norm(dx)
899 LOpt = optimize.brent(trust_fun,brack=(self.lmg,self.lmg*4),tol=1e-6)
902 dx, expect = solver(LOpt)
903 dxnorm = np.linalg.norm(dx)
904 logger.info(
"Trust-radius step found (length %.4e), % .4e added to Hessian diagonal\n" % (dxnorm, (LOpt-1)**2))
906 logger.info(
"Newton-Raphson step found (length %.4e)\n" % (dxnorm))
910 dx, expect = solver(1)
911 dxnorm = np.linalg.norm(dx)
913 LOpt = optimize.brent(trust_fun,brack=(self.lmg,self.lmg*4),tol=1e-4)
914 dx, expect = solver(LOpt)
915 dxnorm = np.linalg.norm(dx)
918 logger.info(
"Starting Hessian diagonal search with step size %.4e\n" % dxnorm)
921 Result = optimize.brent(search_fun,brack=(LOpt,LOpt*4),tol=self.search_tol,full_output=1)
923 LOpt = optimize.brent(h_fun,brack=(self.lmg,self.lmg*4),tol=1e-6)
924 dx, expect = solver(LOpt)
925 dxnorm = np.linalg.norm(dx)
926 logger.info(
"Restarting search with step size %.4e\n" % dxnorm)
927 Result = optimize.brent(search_fun,brack=(LOpt,LOpt*4),tol=self.search_tol,full_output=1)
930 dx, _ = solver(Result[0])
932 logger.info(
"Optimization step found (length %.4e)\n" % np.linalg.norm(dx))
936 if self.
Objective.Penalty.ptyp
in [4,5,6]:
937 self.
FF.make_redirect(dx+xk)
939 return dx, expect, bump
942 """ Optimize the force field parameters using the Newton-Raphson method (@see MainOptimizer) """ 946 """ Optimize the force field parameters using the BFGS method; currently the recommended choice (@see MainOptimizer) """ 950 """ Driver for SciPy optimizations. 952 Using any of the SciPy optimizers requires that SciPy is installed. 953 This method first defines several wrappers around the objective function that the SciPy 954 optimizers can use. Then it calls the algorith mitself. 956 @param[in] Algorithm The optimization algorithm to use, for example 'powell', 'simplex' or 'anneal' 959 from scipy
import optimize
966 def wrap(fin, Order=0, callback=True, fg=False):
970 if Order == 2: logger.info(
"%9s%9s%12s%12s%14s%12s\n" % (
"Eval(H)",
" |k| ",
" |dk| ",
" |grad| ",
" -=X2=- ",
"Delta(X2)"))
971 elif Order == 1: logger.info(
"%9s%9s%12s%12s%14s%12s\n" % (
"Eval(G)",
" |k| ",
" |dk| ",
" |grad| ",
" -=X2=- ",
"Delta(X2)"))
972 else: logger.info(
"%9s%9s%12s%14s%12s\n" % (
"Eval(X)",
" |k| ",
" |dk| ",
" -=X2=- ",
"Delta(X2)"))
974 def print_results(color, newline=True):
975 if newline: head =
'' ; foot =
'\n' 976 else: head =
'\r' ; foot =
'\r' 977 if Order: logger.info(head +
"%6i%12.3e%12.3e%12.3e%s%14.5e\x1b[0m%12.3e" % \
978 (fout.evals, np.linalg.norm(mvals), np.linalg.norm(mvals-self.
xk_prev), np.linalg.norm(G), color, X, X - self.
x_prev) + foot)
979 else: logger.info(head +
"%6i%12.3e%12.3e%s%14.5e\x1b[0m%12.3e" % \
980 (fout.evals, np.linalg.norm(mvals), np.linalg.norm(mvals-self.
xk_prev), color, X, X - self.
x_prev) + foot)
981 Result = fin(mvals,Order=Order,verbose=
False)
983 X, G, H = [Result[i]
for i
in [
'X',
'G',
'H']]
991 bar =
printcool(
"Current Mathematical Parameters:",color=5)
992 self.
FF.print_map(vals=[
"% .4e" % i
for i
in mvals])
1003 print_results(color)
1005 if not self.
prev_bad: print_heading()
1006 print_results(color, newline=
False)
1008 if np.linalg.norm(self.
xk_prev - mvals) > 0.0:
1027 def xwrap(func,callback=True):
1028 return wrap(func, Order=0, callback=callback)
1030 def fgwrap(func,callback=True):
1031 return wrap(func, Order=1, callback=callback, fg=
True)
1033 def gwrap(func,callback=True):
1034 return wrap(func, Order=1, callback=callback)
1036 def hwrap(func,callback=True):
1037 return wrap(func, Order=2, callback=callback)
1039 if Algorithm ==
"powell":
1040 printcool(
"Minimizing Objective Function using\nPowell's Conjugate Direction Method" , ansi=1, bold=1)
1041 return optimize.fmin_powell(xwrap(self.
Objective.Full),self.
mvals0,ftol=self.convergence_objective,xtol=self.convergence_step,maxiter=self.maxstep)
1042 elif Algorithm ==
"simplex":
1043 printcool(
"Minimizing Objective Function using\nNelder-Mead Simplex Method" , ansi=1, bold=1)
1044 return optimize.fmin(xwrap(self.
Objective.Full),self.
mvals0,ftol=self.convergence_objective,xtol=self.convergence_step,maxiter=self.maxstep,maxfun=self.maxstep*10)
1045 elif Algorithm ==
"anneal":
1046 printcool(
"Minimizing Objective Function using Simulated Annealing" , ansi=1, bold=1)
1047 xmin, Jmin, T, feval, iters, accept, status = optimize.anneal(xwrap(self.
Objective.Full), self.
mvals0, lower=self.
mvals0-1*self.trust0*np.ones(self.
np),
1048 upper=self.
mvals0+self.trust0*np.ones(self.
np),schedule=
'boltzmann', full_output=
True)
1049 scodes = {0 :
"Points no longer changing.",
1050 1 :
"Cooled to final temperature.",
1051 2 :
"Maximum function evaluations reached.",
1052 3 :
"Maximum cooling iterations reached.",
1053 4 :
"Maximum accepted query locations reached.",
1054 5 :
"Final point not the minimum amongst encountered points."}
1055 logger.info(
"Simulated annealing info:\n")
1056 logger.info(
"Status: %s \n" % scodes[status])
1057 logger.info(
"Function evaluations: %i" % feval)
1058 logger.info(
"Cooling iterations: %i" % iters)
1059 logger.info(
"Tests accepted: %i" % iters)
1061 elif Algorithm ==
"basinhopping":
1062 printcool(
"Minimizing Objective Function using Basin Hopping Method" , ansi=1, bold=1)
1064 Result = optimize.basinhopping(xwrap(self.
Objective.Full), self.
mvals0, niter=self.maxstep, T=T, stepsize=self.trust0, interval=20,
1065 minimizer_kwargs={
'method':
'nelder-mead',
'options':{
'xtol': self.convergence_step,
'ftol':self.convergence_objective}}, disp=
True)
1066 logger.info(Result.message +
"\n")
1068 elif Algorithm ==
"cg":
1069 printcool(
"Minimizing Objective Function using\nPolak-Ribiere Conjugate Gradient Method" , ansi=1, bold=1)
1070 return optimize.fmin_cg(xwrap(self.
Objective.Full,callback=
False),self.
mvals0,fprime=gwrap(self.
Objective.Full),gtol=self.convergence_gradient)
1071 elif Algorithm ==
"tnc":
1072 printcool(
"Minimizing Objective Function using\nTruncated Newton Algorithm (Unconfirmed)" , ansi=1, bold=1)
1073 Result = optimize.fmin_tnc(fgwrap(self.
Objective.Full,callback=
False),self.
mvals0,
1074 maxfun=self.maxstep,ftol=self.convergence_objective,pgtol=self.convergence_gradient,xtol=self.convergence_objective)
1076 elif Algorithm ==
"ncg":
1077 printcool(
"Minimizing Objective Function using\nNewton-CG Algorithm" , ansi=1, bold=1)
1078 Result = optimize.fmin_ncg(xwrap(self.
Objective.Full,callback=
False),self.
mvals0,fprime=gwrap(self.
Objective.Full,callback=
False),
1079 fhess=hwrap(self.
Objective.Full),avextol=self.convergence_objective,maxiter=self.maxstep,disp=
True)
1081 elif Algorithm ==
"bfgs":
1082 printcool(
"Minimizing Objective Function using\nBFGS Quasi-Newton Method" , ansi=1, bold=1)
1083 return optimize.fmin_bfgs(xwrap(self.
Objective.Full,callback=
False),self.
mvals0,fprime=gwrap(self.
Objective.Full),gtol=self.convergence_gradient)
1088 Genetic algorithm, under development. It currently works but a 1089 genetic algorithm is more like a concept; i.e. there is no 1090 single way to implement it. 1092 @todo Massive parallelization hasn't been implemented yet 1095 def generate_fresh(rows, cols):
1096 new_guys = np.zeros((rows, cols))
1097 for i
in range(rows):
1098 new_guys[i, int(cols*np.random.random())] = self.trust0 * np.random.randn()
1101 def cross_over(chrom1, chrom2):
1102 crosspt = 1 + int((len(chrom1) - 1) * np.random.random())
1103 Ans1 = np.hstack((chrom1[:crosspt],chrom2[crosspt:]))
1104 Ans2 = np.hstack((chrom2[:crosspt],chrom1[crosspt:]))
1108 mutpt = int(len(chrom) * np.random.random())
1109 chrom[mutpt] += self.trust0 * np.random.randn()
1112 def initial_generation():
1113 return np.vstack((self.
mvals0.copy(),np.random.randn(PopSize, self.
FF.np)*self.trust0)) / (self.
FF.np ** 0.5)
1116 def calculate_fitness(pop):
1117 return [self.
Objective.Full(i,Order=0,verbose=
False)[
'X']
for i
in pop]
1119 def sort_by_fitness(fits):
1120 return np.sort(fits), np.argsort(fits)
1122 def generate_new_population(sorted, pop):
1123 newpop = pop[sorted[1]]
1125 a = list(range(KeepNum))
1126 logger.info(
"Keeping: " + str(a) +
'\n')
1128 for i
in range(0, KeepNum, 2):
1129 logger.info(
"%i and %i reproducing to replace %i and %i\n" % (a[i],a[i+1],len(newpop)-i-2,len(newpop)-i-1))
1130 newpop[-i-1], newpop[-i-2] = cross_over(newpop[a[i]],newpop[a[i+1]])
1131 b = list(range(KeepNum, len(newpop)))
1133 for i
in b[:MutNum]:
1134 logger.info(
"Randomly mutating %i\n" % i)
1135 newpop[i] = mutate(newpop[i])
1138 def xwrap(func,verbose=True):
1140 if verbose: logger.info(
'\n')
1141 Answer = func(mvals,Order=0,verbose=verbose)[
'X']
1142 dx = (my_func.x_best - Answer)
if my_func.x_best
is not None else 0.0
1143 if Answer < my_func.x_best
or my_func.x_best
is None:
1145 my_func.x_best = Answer
1149 logger.info(
"k=" +
' '.join([
"% .4f" % i
for i
in mvals]) +
'\n')
1150 logger.info(
"X2= %s%12.3e\x1b[0m d(X2)= %12.3e\n" % (color,Answer,dx))
1152 my_func.x_best =
None 1160 KeepNum = int(KeepThresh * PopSize)
1161 MutNum = int(MutProb * PopSize)
1162 CrosNum = int(CrosProb/2 * PopSize) * 2
1163 Population = initial_generation()
1170 Fits = calculate_fitness(Population)
1171 Sorted = sort_by_fitness(Fits)
1172 logger.info(str(Sorted))
1173 Best[0].append(Sorted[0][0])
1174 Best[1].append(Population[Sorted[1][0]])
1175 logger.info(str(Best))
1176 if Gen == self.maxstep:
break 1177 Population = generate_new_population(Sorted, Population)
1180 logger.info(str(Best))
1181 return Population[Sorted[1][0]]
1185 """ Use SciPy's built-in simplex algorithm to optimize the parameters. @see Optimizer::ScipyOptimizer """ 1189 """ Use SciPy's built-in Powell direction-set algorithm to optimize the parameters. @see Optimizer::ScipyOptimizer """ 1193 """ Use SciPy's built-in simulated annealing algorithm to optimize the parameters. @see Optimizer::ScipyOptimizer """ 1197 """ Use SciPy's built-in conjugate gradient algorithm to optimize the parameters. @see Optimizer::ScipyOptimizer """ 1201 """ Use SciPy's built-in BFGS algorithm to optimize the parameters. @see Optimizer::ScipyOptimizer """ 1205 """ Use SciPy's built-in basin hopping algorithm to optimize the parameters. @see Optimizer::ScipyOptimizer """ 1209 """ Use SciPy's built-in truncated Newton (fmin_tnc) algorithm to optimize the parameters. @see Optimizer::ScipyOptimizer """ 1213 """ Use SciPy's built-in Newton-CG (fmin_ncg) algorithm to optimize the parameters. @see Optimizer::ScipyOptimizer """ 1217 """ Scan through parameter values. 1219 This option is activated using the inputs: 1223 scan_vals low:hi:nsteps 1224 scan_idxnum (number) -or- 1228 This method goes to the specified parameter indices and scans through 1229 the supplied values, evaluating the objective function at every step. 1231 I hope this method will be useful for people who just want to look at 1232 changing one or two parameters and seeing how it affects the force 1235 @todo Maybe a multidimensional grid can be done. 1236 @param[in] MathPhys Switch to use mathematical (True) or physical (False) parameters. 1244 vals_in = [float(i)
for i
in self.scan_vals.split(
":")]
1246 logger.error(
"Syntax error: in the input file please use scan_vals low:hi:nsteps\n")
1248 if len(vals_in) != 3:
1249 logger.error(
"Syntax error: in the input file please use scan_vals low:hi:nsteps\n")
1251 idx = [int(i)
for i
in self.idxnum]
1252 for j
in self.idxname:
1253 idx += [self.
FF.map[i]
for i
in self.
FF.map
if j
in i]
1255 scanvals = np.linspace(vals_in[0],vals_in[1],vals_in[2])
1256 logger.info(
'User input for %s parameter values to scan over:\n' % (
"mathematical" if MathPhys
else "physical"))
1257 logger.info(str(vals_in) +
'\n')
1258 logger.info(
'These parameter values will be used:\n')
1259 logger.info(str(scanvals) +
'\n')
1264 logger.info(
"Scanning parameter %i (%s) in the mathematical space\n" % (pidx,self.
FF.plist[pidx]))
1265 vals = self.
mvals0.copy()
1267 logger.info(
"Scanning parameter %i (%s) in the physical space\n" % (pidx,self.
FF.plist[pidx]))
1268 self.
FF.use_pvals =
True 1269 vals = self.
FF.pvals0.copy()
1272 printcool(
"Parameter %i (%s) Value is now % .4e ; Step %i/%i" % (pidx, self.
FF.plist[pidx],i,counter,len(scanvals)), color=1,sym=
"@")
1274 data = self.
Objective.Full(vals,Order=0,verbose=
True)
1275 if data[
'X'] < minobj:
1277 minvals = vals.copy()
1278 logger.info(
"Value = % .4e Objective = % .4e\n" % (i, data[
'X']))
1284 """ Scan through the mathematical parameter space. @see Optimizer::ScanValues """ 1288 """ Scan through the physical parameter space. @see Optimizer::ScanValues """ 1292 """ A single-point objective function computation. """ 1294 printcool(
"Objective Function Single Point: %.8f" % data[
'X'])
1297 """ A single-point gradient computation. """ 1299 bar =
printcool(
"Objective function: %.8f\nGradient below" % data[
'X'])
1300 self.
FF.print_map(vals=data[
'G'],precision=8)
1304 """ A single-point Hessian computation. """ 1306 bar =
printcool(
"Objective function: %.8f\nGradient below" % data[
'X'])
1307 self.
FF.print_map(vals=data[
'G'],precision=8)
1310 pmat2d(data[
'H'], precision=8)
1314 """ An experimental method to determine the parameter scale factors 1315 that results in the best conditioned Hessian. """ 1316 from scipy
import optimize
1318 X, G, H = (data[
'X0'], data[
'G0'], data[
'H0'])
1320 bar =
printcool(
"(Un-penalized) objective function: %.8f\nGradient below" % X)
1321 self.
FF.print_map(vals=G,precision=8)
1327 bar =
printcool(
"(Un-penalized) objective function: %.8f" % X)
1328 logger.info(
"More than 30 parameters; gradient and Hessian written to grad.txt and hess.txt\n")
1329 base, ext = os.path.splitext(self.input_file)
1330 np.savetxt(
'%s-grad.txt' % base, G)
1331 np.savetxt(
'%s-hess.txt' % base, H)
1334 H1 = np.delete(H1, self.
excision, axis=0)
1335 H1 = np.delete(H1, self.
excision, axis=1)
1337 Cond = np.linalg.cond(H1)
1345 logger.info(
"Initial condition number = %.3f\n" % Cond)
1346 def newcond(logrskeys, multiply=True):
1347 """ Condition number function to be optimized. 1351 logrskeys : np.ndarray 1352 Logarithms of the rescaling factor of each parameter type. 1353 The optimization is done in the log space. 1355 If set to True, then the exponentiated logrskeys will 1356 multiply the existing rescaling factors defined in the force field. 1361 Condition number of the Hessian matrix. 1363 new_rsord = OrderedDict([(k, np.exp(logrskeys[i]))
for i, k
in enumerate(self.
FF.rs_ord.keys())])
1364 answer = self.
FF.make_rescale(new_rsord, H=H.copy(), multiply=multiply)
1365 H_a = answer[
'H'].copy()
1366 H_a = np.delete(H_a, self.
excision, axis=0)
1367 H_a = np.delete(H_a, self.
excision, axis=1)
1369 Cond_a = np.linalg.cond(H_a)
1372 if Cond_a > 1e100: Cond_a = 1e100
1378 dlog = logrskeys - newcond.prev_step
1379 nlog = np.sqrt(np.sum(dlog**2))
1381 Reg = newcond.regularize * np.sum(logrskeys ** 2) / len(logrskeys)
1382 Obj = np.log(Cond_a) + Reg
1383 if newcond.verbose
and newcond.step_n % 1000 == 0:
1384 logger.info(
"\rEval# %%6i: Step: %%9.3f Along: %%%is Condition: %%10.3e Regularize: %%8.3f Objective: %%8.3f\n" % max([len(k)
for k
in list(self.
FF.rs_ord.keys())]) %
1385 (newcond.step_n, nlog, list(new_rsord.keys())[np.argmax(np.abs(dlog))], Cond_a, Reg, np.log(Cond_a) + Reg))
1387 elif newcond.verbose
and Obj < newcond.best:
1388 logger.info(
"\rEval# %%6i: Step: %%9.3f Along: %%%is Condition: %%10.3e Regularize: %%8.3f Objective: %%8.3f (new minimum)\n" % max([len(k)
for k
in list(self.
FF.rs_ord.keys())]) %
1389 (newcond.step_n, nlog, list(new_rsord.keys())[np.argmax(np.abs(dlog))], Cond_a, Reg, np.log(Cond_a) + Reg))
1392 newcond.prev_step = logrskeys
1395 newcond.prev_step = np.zeros(len(self.
FF.rs_ord.keys()),dtype=float)
1397 newcond.verbose =
True 1398 newcond.regularize = 0.1
1399 newcond.best = np.inf
1401 logrsmult = np.zeros(len(self.
FF.rs_ord.keys()),dtype=float)
1405 logger.info(
"Basin-hopping optimization of condition number in the space of log rescaling factors\n")
1407 if optmethod ==
'basin':
1408 optimized = optimize.basinhopping(newcond, logrsmult, stepsize=1.0, niter=self.maxstep,
1409 minimizer_kwargs={
'method':
'Powell',
'tol':0.1,
'options':{
'maxiter':1000}})
1410 optresult = optimized.x
1411 elif optmethod ==
'seq':
1412 optout = optimize.fmin_powell(newcond, logrsmult, xtol=0.01, ftol=0.01, maxfun=1000, disp=
False, full_output=
True)
1414 bestsol = optout[0].copy()
1415 logger.info(
"New multipliers:" +
' '.join([
'% .3f' % np.exp(s)
for s
in bestsol])+
'\n')
1416 logger.info(
"Sequential grid-scan + Powell in the space of log rescaling factors\n")
1418 outersol = bestsol.copy()
1422 for i
in range(len(logrsmult)):
1423 for j
in [np.log(0.1), np.log(10), np.log(0.01), np.log(100)]:
1425 logrsmult = outersol.copy()
1427 logger.info(
"Trying new initial guess with element %i changed by %.3f:\n" % (i, j))
1429 optout = optimize.fmin_powell(newcond, logrsmult, xtol=0.01, ftol=0.01, maxfun=1000, disp=
False, full_output=
True)
1430 if optout[1] < bestval:
1432 bestsol = optout[0].copy()
1433 logger.info(
"New multipliers:" +
' '.join([
'% .3f' % np.exp(s)
for s
in bestsol])+
'\n')
1434 logger.info(
"Done outer iteration %i\n" % outeriter)
1436 if np.linalg.norm(outersol-bestsol) < 0.1:
1437 logger.info(
"Sequential optimization solution moved by less than 0.1 (%.3f)\n" % np.linalg.norm(outersol-bestsol))
1439 if np.abs(outerval-bestval) < 0.1:
1440 logger.info(
"Sequential optimization value improved by less than 0.1 (%.3f)" % (outerval-bestval))
1443 outersol = bestsol.copy()
1444 if outeriter == maxouter:
1445 logger.info(
"Outer iterations reached maximum of %i\n" % maxouter)
1447 optresult = outersol.copy()
1450 new_rsord = OrderedDict([(k, np.exp(optresult[i]))
for i, k
in enumerate(self.
FF.rs_ord.keys())])
1451 answer = self.
FF.make_rescale(new_rsord)
1452 newcond.regularize = 0.0
1453 newcond.verbose =
False 1454 optval = np.exp(newcond(optresult))
1455 logger.info(
"\nOptimized condition number: %.3f\n" % optval)
1461 for i
in range(len(optresult)):
1462 trial = optresult.copy()
1464 if np.abs(newcond(trial)-newcond(optresult)) == 0.0:
1467 nonzeros.append(optresult[i])
1468 printkeys.append(list(self.
FF.rs_ord.keys())[i])
1470 optresult -= np.mean(nonzeros)
1475 opt_rsord = OrderedDict([(k,
est124(np.exp(optresult[i])*self.
FF.rs_ord[k]))
for i, k
in enumerate(self.
FF.rs_ord.keys())])
1477 answer = self.
FF.make_rescale(opt_rsord, mvals=self.
mvals0, H=H.copy(), multiply=
False)
1478 logger.info(
"Condition Number after Rounding Factors -> %.3f\n" % (np.exp(newcond(np.log(list(opt_rsord.values())), multiply=
False))))
1479 bar =
printcool(
"Previous values of the rescaling factors / prior widths:")
1480 logger.info(
''.join([
" %-35s : %.5e\n" % (i, self.
FF.rs_ord[i])
for i
in self.
FF.rs_ord.keys()]))
1482 opt_rsord = OrderedDict([(k, opt_rsord[k])
for k
in opt_rsord.keys()
if k
in printkeys])
1483 bar =
printcool(
"Recommended values (may be slightly stochastic):")
1484 logger.info(
''.join([
" %-35s : %.1e\n" % (k, opt_rsord[k])
for k
in opt_rsord.keys()]))
1486 if np.linalg.norm(self.
mvals0) != 0.0:
1487 bar =
printcool(
"Mathematical parameters in the new space:",color=4)
1488 self.
FF.print_map(answer[
'mvals'])
1492 printcool(
"Input file with new priors/mvals saved to %s (jobtype set to optimize)." % (outfnm), color=0)
1495 """ Finite-difference checker for the objective function gradient. 1497 For each element in the gradient, use a five-point finite difference 1498 stencil to compute a finite-difference derivative, and compare it to 1499 the analytic result. 1504 Fdata = np.zeros(self.
FF.np)
1505 printcool(
"Checking first derivatives by finite difference!\n%-8s%-20s%13s%13s%13s%13s" \
1506 % (
"Index",
"Parameter ID",
"Analytic",
"Numerical",
"Difference",
"Fractional"),bold=1,color=5)
1507 for i
in range(self.
FF.np):
1509 Denom = max(abs(Adata[i]),abs(Fdata[i]))
1510 Denom = Denom > 1e-8
and Denom
or 1e-8
1511 D = Adata[i] - Fdata[i]
1512 Q = (Adata[i] - Fdata[i])/Denom
1513 cD = abs(D) > 0.5
and "\x1b[1;91m" or (abs(D) > 1e-2
and "\x1b[91m" or (abs(D) > 1e-5
and "\x1b[93m" or "\x1b[92m"))
1514 cQ = abs(Q) > 0.5
and "\x1b[1;91m" or (abs(Q) > 1e-2
and "\x1b[91m" or (abs(Q) > 1e-5
and "\x1b[93m" or "\x1b[92m"))
1515 logger.info(
"\r %-8i%-20s% 13.4e% 13.4e%s% 13.4e%s% 13.4e\x1b[0m\n" \
1516 % (i, self.
FF.plist[i][:20], Adata[i], Fdata[i], cD, D, cQ, Q))
1519 """ Finite-difference checker for the objective function Hessian. 1521 For each element in the Hessian, use a five-point stencil in both 1522 parameter indices to compute a finite-difference derivative, and 1523 compare it to the analytic result. 1525 This is meant to be a foolproof checker, so it is pretty slow. We 1526 could write a faster checker if we assumed we had accurate first 1527 derivatives, but it's better to not make that assumption. 1529 The second derivative is computed by double-wrapping the objective 1530 function via the 'wrap2' function. 1534 Fdata = np.zeros((self.
FF.np,self.
FF.np))
1535 printcool(
"Checking second derivatives by finite difference!\n%-8s%-20s%-20s%13s%13s%13s%13s" \
1536 % (
"Index",
"Parameter1 ID",
"Parameter2 ID",
"Analytic",
"Numerical",
"Difference",
"Fractional"),bold=1,color=5)
1539 def wrap2(mvals0,pidxi,pidxj):
1541 mvals = list(mvals0)
1546 for i
in range(self.
FF.np):
1547 for j
in range(i,self.
FF.np):
1549 Denom = max(abs(Adata[i,j]),abs(Fdata[i,j]))
1550 Denom = Denom > 1e-8
and Denom
or 1e-8
1551 D = Adata[i,j] - Fdata[i,j]
1552 Q = (Adata[i,j] - Fdata[i,j])/Denom
1553 cD = abs(D) > 0.5
and "\x1b[1;91m" or (abs(D) > 1e-2
and "\x1b[91m" or (abs(D) > 1e-5
and "\x1b[93m" or "\x1b[92m"))
1554 cQ = abs(Q) > 0.5
and "\x1b[1;91m" or (abs(Q) > 1e-2
and "\x1b[91m" or (abs(Q) > 1e-5
and "\x1b[93m" or "\x1b[92m"))
1555 logger.info(
"\r %-8i%-20s%-20s% 13.4e% 13.4e%s% 13.4e%s% 13.4e\x1b[0m\n" \
1556 % (i, self.
FF.plist[i][:20], self.
FF.plist[j][:20], Adata[i,j], Fdata[i,j], cD, D, cQ, Q))
1559 """ Read the checkpoint file for the main optimizer. """ 1561 if self.rchk_fnm
is not None:
1562 absfnm = os.path.join(self.root,self.rchk_fnm)
1563 if os.path.exists(absfnm):
1564 self.
chk = pickle.load(open(absfnm))
1566 logger.info(
"\x1b[40m\x1b[1;92mWARNING:\x1b[0m read_chk is set to True, but checkpoint file not loaded (wrong filename or doesn't exist?)\n")
1570 """ Write the checkpoint file for the main optimizer. """ 1571 if self.wchk_fnm
is not None:
1572 logger.info(
"Writing the checkpoint file %s\n" % self.wchk_fnm)
1573 with
wopen(os.path.join(self.root,self.wchk_fnm), binary=
True)
as f: pickle.dump(self.
chk, f)
def FDCheckG(self)
Finite-difference checker for the objective function gradient.
def pmat2d(mat2d, precision=1, format="e", loglevel=INFO)
Printout of a 2-D array.
def NewtonCG(self)
Use SciPy's built-in Newton-CG (fmin_ncg) algorithm to optimize the parameters.
def Run(self)
Call the appropriate optimizer.
Nifty functions, intended to be imported by any module within ForceBalance.
def Precondition(self)
An experimental method to determine the parameter scale factors that results in the best conditioned ...
iterinit
The initial iteration number (nonzero if we restart a previous run.)
Input file parser for ForceBalance jobs.
def GeneticAlgorithm(self)
Genetic algorithm, under development.
def Gradient(self)
A single-point gradient computation.
def SinglePoint(self)
A single-point objective function computation.
def Anneal(self)
Use SciPy's built-in simulated annealing algorithm to optimize the parameters.
def invert_svd(X, thresh=1e-12)
Invert a matrix using singular value decomposition.
def MainOptimizer(self, b_BFGS=0)
The main ForceBalance adaptive trust-radius pseudo-Newton optimizer.
def Hessian(self)
A single-point Hessian computation.
def Simplex(self)
Use SciPy's built-in simplex algorithm to optimize the parameters.
def flat(vec)
Given any list, array, or matrix, return a single-index array.
Objective
reset the global variable
def BFGS(self)
Optimize the force field parameters using the BFGS method; currently the recommended choice (...
def f1d7p(f, h)
A highly accurate seven-point finite difference stencil for computing derivatives of a function...
excision
The indices to be excluded from the Hessian update.
goodstep
Specify whether the previous optimization step was good or bad.
def BasinHopping(self)
Use SciPy's built-in basin hopping algorithm to optimize the parameters.
def set_goodstep(self, val)
Mark in each target that the previous optimization step was good or bad.
def fdwrap(func, mvals0, pidx, key=None, kwargs)
A function wrapper for finite difference designed for differentiating 'get'-type functions.
def col(vec)
Given any list, array, or matrix, return a 1-column 2D array.
def __init__(self, options, Objective, FF)
Create an Optimizer object.
bhyp
Whether the penalty function is hyperbolic.
def est124(val)
Given any positive floating point value, return a value [124]e+xx that is closest to it in the log sp...
def ConjugateGradient(self)
Use SciPy's built-in conjugate gradient algorithm to optimize the parameters.
def Powell(self)
Use SciPy's built-in Powell direction-set algorithm to optimize the parameters.
failmsg
Print a special message on failure.
def warn_press_key(warning, timeout=10)
def printcool_dictionary(Dict, title="Dictionary Keys : Values", bold=False, color=2, keywidth=25, topwidth=50, center=True, leftpad=0)
See documentation for printcool; this is a nice way to print out keys/values in a dictionary...
def f1d5p(f, h)
A highly accurate five-point finite difference stencil for computing derivatives of a function...
def parse_inputs(input_file=None)
Parse through the input file and read all user-supplied options.
def step(self, xk, data, trust)
Computes the next step in the parameter space.
mvals_bak
The root directory.
def save_mvals_to_input(self, vals, priors=None, jobtype=None)
Write a new input file (s_save.in) containing the current mathematical parameters.
def printcool(text, sym="#", bold=False, color=2, ansi=None, bottom='-', minwidth=50, center=True, sym2="=")
Cool-looking printout for slick formatting of output.
def NewtonRaphson(self)
Optimize the force field parameters using the Newton-Raphson method (.
mvals0
The original parameter values.
def bak(path, dest=None, cwd=None, start=1)
def Scan_Values(self, MathPhys=1)
Scan through parameter values.
def pvec1d(vec1d, precision=1, format="e", loglevel=INFO)
Printout of a 1-D vector.
def ScanMVals(self)
Scan through the mathematical parameter space.
iteration
The current iteration number.
def readchk(self)
Read the checkpoint file for the main optimizer.
def ScanPVals(self)
Scan through the physical parameter space.
def ScipyOptimizer(self, Algorithm="None")
Driver for SciPy optimizations.
FF
The force field itself.
def writechk(self)
Write the checkpoint file for the main optimizer.
OptTab
A list of all the things we can ask the optimizer to do.
def wopen(dest, binary=False)
If trying to write to a symbolic link, remove it first.
def TruncatedNewton(self)
Use SciPy's built-in truncated Newton (fmin_tnc) algorithm to optimize the parameters.
def Scipy_BFGS(self)
Use SciPy's built-in BFGS algorithm to optimize the parameters.
def row(vec)
Given any list, array, or matrix, return a 1-row 2D array.
uncert
Target types which introduce uncertainty into the objective function.
def FDCheckH(self)
Finite-difference checker for the objective function Hessian.