ForceBalance API  1.3
Automated optimization of force fields and empirical potentials
optimizer.py
Go to the documentation of this file.
1 """ @package forcebalance.optimizer Optimization algorithms.
2 
3 My current implementation is to have a single optimizer class with several methods
4 contained inside.
5 
6 @author Lee-Ping Wang
7 @date 12/2011
8 
9 """
10 from __future__ import division
11 from __future__ import print_function
12 
13 from builtins import str
14 from builtins import range
15 from builtins import object
16 import os, pickle, re, sys
17 # import cProfile
18 import numpy as np
19 from numpy.linalg import multi_dot
20 from copy import deepcopy
21 import forcebalance
22 from forcebalance.parser import parse_inputs
23 from forcebalance.nifty import col, flat, row, printcool, printcool_dictionary, pvec1d, pmat2d, warn_press_key, invert_svd, wopen, bak, est124
24 from forcebalance.finite_difference import f1d7p, f1d5p, fdwrap
25 from collections import OrderedDict
26 import random
27 import time, datetime
28 from forcebalance.output import getLogger, DEBUG, CleanStreamHandler
29 logger = getLogger(__name__)
30 
31 # Global variable corresponding to the iteration number.
32 ITERATION = 0
33 
34 def Counter():
35  global ITERATION
36  return ITERATION
37 
38 class Optimizer(forcebalance.BaseClass):
39  """ Optimizer class. Contains several methods for numerical optimization.
40 
41  For various reasons, the optimizer depends on the force field and fitting
42  targets (i.e. we cannot treat it as a fully independent numerical optimizer).
43  The dependency is rather weak which suggests that I can remove it someday.
44  """
45 
46  def __init__(self,options,Objective,FF):
47  """ Create an Optimizer object.
48 
49  The optimizer depends on both the FF and the fitting targets so there
50  is a chain of dependencies: FF --> FitSim --> Optimizer, and FF --> Optimizer
51 
52  Here's what we do:
53  - Take options from the parser
54  - Pass in the objective function, force field, all fitting targets
55 
56  """
57  super(Optimizer, self).__init__(options)
58 
59 
60  self.OptTab = {'NEWTONRAPHSON' : self.NewtonRaphson,
61  'OPTIMIZE' : self.NewtonRaphson,
62  'NEWTON' : self.NewtonRaphson,
63  'NR' : self.NewtonRaphson,
64  'BFGS' : self.BFGS,
65  'SCIPY_BFGS' : self.Scipy_BFGS,
66  'POWELL' : self.Powell,
67  'SIMPLEX' : self.Simplex,
68  'ANNEAL' : self.Anneal,
69  'BASIN' : self.BasinHopping,
70  'BASINHOPPING' : self.BasinHopping,
71  'GENETIC' : self.GeneticAlgorithm,
72  'CG' : self.ConjugateGradient,
73  'CONJUGATEGRADIENT' : self.ConjugateGradient,
74  'TNC' : self.TruncatedNewton,
75  'NCG' : self.NewtonCG,
76  'SCAN_MVALS' : self.ScanMVals,
77  'SCAN_PVALS' : self.ScanPVals,
78  'SINGLE' : self.SinglePoint,
79  'GRADIENT' : self.Gradient,
80  'HESSIAN' : self.Hessian,
81  'PRECONDITION' : self.Precondition,
82  'FDCHECKG' : self.FDCheckG,
83  'FDCHECKH' : self.FDCheckH
84  }
85 
86  #======================================#
87  # Options that are given by the parser #
88  #======================================#
89 
90  self.set_option(options,'root','root')
91 
92  self.set_option(options,'jobtype','jobtype')
93 
94  self.set_option(options,'trust0','trust0')
95 
96  self.set_option(options,'mintrust','mintrust')
97 
98  self.set_option(options,'eig_lowerbound','eps')
99 
100  self.set_option(options,'step_lowerbound')
101 
102  self.set_option(options,'lm_guess','lmg')
103 
104  self.set_option(options,'finite_difference_h','h')
105  self.set_option(options,'finite_difference_h','h0')
106 
107  self.set_option(options,'finite_difference_factor','fdf')
108 
109  self.set_option(options,'objective_history','hist')
110 
111  self.set_option(options,'convergence_objective')
112 
113  self.set_option(options,'convergence_step')
114 
115  self.set_option(options,'convergence_gradient')
116 
117  self.set_option(options,'converge_lowq')
118 
119  self.set_option(options,'maxstep','maxstep')
120 
121  self.set_option(options,'scanindex_num','idxnum')
122 
123  self.set_option(options,'scanindex_name','idxname')
124 
125  self.set_option(options,'scan_vals','scan_vals')
126 
127  self.set_option(options,'readchk','rchk_fnm')
128 
129  self.set_option(options,'writechk','wchk_fnm')
130 
131  self.set_option(options,'writechk_step','wchk_step')
132 
133  self.set_option(options,'adaptive_factor','adapt_fac')
134 
135  self.set_option(options,'adaptive_damping','adapt_damp')
136 
137  self.set_option(options,'print_gradient','print_grad')
138 
139  self.set_option(options,'print_hessian','print_hess')
140 
141  self.set_option(options,'print_parameters','print_vals')
142 
143  self.set_option(options,'error_tolerance','err_tol')
144 
145  self.set_option(options,'search_tolerance','search_tol')
146  self.set_option(options,'read_mvals')
147  self.set_option(options,'read_pvals')
148 
149  self.set_option(options, 'backup')
150 
151  self.set_option(options, 'input_file')
152 
153  self.set_option(options, 'criteria')
154 
155  self.mvals_bak = 1
156 
157  self.failmsg = 0
158 
159  self.goodstep = 0
160 
161  self.iterinit = 0
162 
163  self.iteration = 0
164 
165  global ITERATION
166  ITERATION = 0
167  #======================================#
168  # Variables which are set here #
169  #======================================#
170 
171  self.Objective = Objective
172 
173  self.bhyp = Objective.Penalty.ptyp not in (2, 3)
174 
175  self.FF = FF
176 
178  self.uncert = any([any([i in tgt.type.lower() for i in ['liquid', 'lipid', 'thermo']]) for tgt in self.Objective.Targets])
179  self.bakdir = os.path.join(os.path.splitext(options['input_file'])[0]+'.bak')
180  self.resdir = os.path.join('result',os.path.splitext(options['input_file'])[0])
181 
182  #======================================#
183  # Variables from the force field #
184  #======================================#
185 
186  self.excision = list(FF.excision)
187 
188 
189  self.np = FF.np
190 
191 
192  if options['read_mvals'] is not None:
193  self.mvals0 = np.array(options['read_mvals'])
194  elif options['read_pvals'] is not None:
195  self.mvals0 = FF.create_mvals(options['read_pvals'])
196  else:
197  self.mvals0 = np.zeros(self.FF.np)
198 
199 
200  if options['continue']: self.recover()
201 
202 
203  printcool_dictionary(self.PrintOptionDict, title="Setup for optimizer")
204 
205  self.readchk()
206 
207  def recover(self):
208 
209  base, ext = os.path.splitext(self.input_file)
210  if not base.endswith(".sav"):
211  savfnm = base+".sav"
212  else:
213  savfnm = base+ext
214 
215  if os.path.exists(savfnm):
216  soptions, stgt_opts = parse_inputs(savfnm)
217  if soptions['read_mvals'] and np.max(np.abs(self.mvals0)) != 0.0:
218  warn_press_key("Save file read_mvals will overwrite input file.\nInput file: %s\nSave file : %s\n" % (soptions['read_mvals'], self.mvals0))
219  self.mvals0 = np.array(soptions['read_mvals'])
220  self.read_mvals = np.array(soptions['read_mvals'])
221 
222  maxrd = max([T.maxrd() for T in self.Objective.Targets])
223  if maxrd < 0: return
224 
225  if len(set([T.maxrd() for T in self.Objective.Targets])) == 1 and any([((T.maxid() - T.maxrd()) > 0) for T in self.Objective.Targets]):
226  maxrd += 1
227  printcool("Continuing optimization from iteration %i\nThese targets will load data from disk:\n%s" % \
228  (maxrd, '\n'.join([T.name for T in self.Objective.Targets if T.maxrd() == maxrd])), color=4)
229 
231  for T in self.Objective.Targets:
232  if T.maxrd() == maxrd:
233  T.rd = T.tempdir
234  if os.path.exists(os.path.join(T.absrd(), 'mvals.txt')):
235  tmvals = np.loadtxt(os.path.join(T.absrd(), 'mvals.txt'))
236  if len(tmvals) > 0 and np.max(np.abs(tmvals - self.mvals0) > 1e-4):
237  warn_press_key("mvals.txt in %s does not match loaded parameters.\nSave file : %s\Parameters : %s\n" % (T.absrd(), tmvals, self.mvals0))
238  else:
239  warn_press_key("mvals.txt does not exist in %s." % (T.absrd()))
240  self.iterinit = maxrd
241 
242  def set_goodstep(self, val):
243  """ Mark in each target that the previous optimization step was good or bad. """
244  self.goodstep = val
245  for T in self.Objective.Targets:
246  T.goodstep = val
248  def save_mvals_to_input(self, vals, priors=None, jobtype=None):
249  """ Write a new input file (%s_save.in) containing the current mathematical parameters. """
250  if self.FF.use_pvals:
251  mvals = self.FF.create_mvals(vals)
252  else:
253  mvals = vals.copy()
254 
255  base, ext = os.path.splitext(self.input_file)
256  if not base.endswith(".sav"):
257  outfnm = base+".sav"
258  else:
259  outfnm = base+ext
260 
261  if self.input_file is not None and os.path.exists(self.input_file):
262  fin = open(self.input_file).readlines()
263  have_mvals = 0
264  have_priors = 0
265  in_mvals = 0
266  in_priors = 0
267  in_options = 0
268  if os.path.exists(outfnm) and self.mvals_bak:
269  bak(outfnm, dest=self.bakdir)
270  self.mvals_bak = 0
271  fout = open(outfnm, 'w')
272  for line in fin:
273  line1 = line.split("#")[0].strip().lower()
274  if line1.startswith("$options"):
275  in_options = 1
276  if in_options and line1.startswith("$end"):
277  if not have_mvals:
278  print("read_mvals", file=fout)
279  print(self.FF.sprint_map(mvals, precision=8), file=fout)
280  print("/read_mvals", file=fout)
281  have_mvals = 1
282  if not have_priors and priors is not None:
283  print("priors", file=fout)
284  print('\n'.join([" %-35s : %.1e" % (k, priors[k]) for k in list(priors.keys())]), file=fout)
285  print("/priors", file=fout)
286  have_priors = 1
287  in_options = 0
288  elif in_options and line1.startswith('jobtype') and jobtype is not None:
289  print("jobtype %s" % jobtype, file=fout)
290  continue
291  if line1.startswith("/read_mvals"):
292  in_mvals = 0
293  if line1.startswith("/priors"):
294  in_priors = 0
295  if in_mvals: continue
296  if in_priors: continue
297  print(line, end='', file=fout)
298  if line1.startswith("read_mvals"):
299  if have_mvals:
300  logger.error("Encountered more than one read_mvals section\n")
301  raise RuntimeError
302  have_mvals = 1
303  in_mvals = 1
304  print(self.FF.sprint_map(mvals, precision=8), file=fout)
305  if line1.startswith("priors") and priors is not None:
306  if have_priors:
307  logger.error("Encountered more than one priors section\n")
308  raise RuntimeError
309  have_priors = 1
310  in_priors = 1
311  print('\n'.join([" %-35s : %.1e" % (k, priors[k]) for k in list(priors.keys())]), file=fout)
312  return outfnm
313 
314  def Run(self):
315  """ Call the appropriate optimizer. This is the method we might want to call from an executable. """
316  now = datetime.datetime.now().strftime("%Y-%m-%d %I:%M %p")
317  logger.info("Calculation started at %s\n" % now)
318  t0 = time.time()
319  xk = self.OptTab[self.jobtype]()
320 
321 
322  print_parameters = True
323 
324  if self.jobtype.lower() == 'precondition': print_parameters=False
325  if xk is None and (self.mvals0 == np.zeros(self.FF.np)).all():
326  logger.info("Parameter file same as original; will not be printed to results folder.\n")
327  print_parameters = False
328  elif xk is None:
329  xk = self.mvals0
330 
331 
332  check_after = False
333  if check_after:
334  self.mvals0 = self.FF.create_pvals(xk) if self.FF.use_pvals else xk.copy()
335  self.FDCheckG()
336 
337 
338  if print_parameters:
339  if self.FF.use_pvals:
340  bar = printcool("Final optimization parameters:",color=4)
341  self.FF.print_map(self.FF.create_mvals(xk))
342  bar = printcool("Final physical parameters:",color=4)
343  self.FF.print_map(xk)
344  else:
345  bar = printcool("Final optimization parameters:",color=4)
346  self.FF.print_map(xk)
347  bar = printcool("Final physical parameters:",color=4)
348  self.FF.print_map(self.FF.create_pvals(xk))
349  logger.info(bar)
350  if self.backup:
351  for fnm in self.FF.fnms:
352  if os.path.exists(os.path.join(self.resdir, fnm)):
353  bak(os.path.join(self.resdir, fnm))
354  self.FF.make(xk,printdir=self.resdir)
355  # logger.info("The force field has been written to the '%s' directory.\n" % self.resdir)
356  outfnm = self.save_mvals_to_input(xk)
357  # logger.info("Input file with optimization parameters saved to %s.\n" % outfnm)
358  printcool("The force field has been written to the %s directory.\n"
359  "Input file with optimization parameters saved to %s." % (self.resdir, outfnm), color=0)
360  # "To reload these parameters, use %s as the input\n"
361  # "file without changing the '%s' directory." %
362  # (outfnm, outfnm, self.FF.ffdir), color=0, center=False, sym2='-')
363 
364 
365  self.writechk()
366 
367 
368  logger.info("Wall time since calculation start: %.1f seconds\n" % (time.time() - t0))
369  if self.failmsg:
370  bar = printcool("It is possible to commit no errors and still lose.\nThat is not a weakness. That is life.",ansi="40;93")
371  else:
372  bar = printcool("Calculation Finished.\n---==( May the Force be with you! )==---",ansi="1;44;93")
373 
374  return xk
375 
376  def adjh(self, trust):
377  # The finite difference step size should be at most 1% of the trust radius.
378  h = min(self.fdf*trust, self.h0)
379  if h != self.h:
380  logger.info("Setting finite difference step to %.4e\n" % h)
381  self.h = h
382  for tgt in self.Objective.Targets:
383  tgt.h = h
384 
385  def MainOptimizer(self,b_BFGS=0):
386  """
388  The main ForceBalance adaptive trust-radius pseudo-Newton
389  optimizer. Tried and true in many situations. :)
390 
391  Usually this function is called with the BFGS or NewtonRaphson
392  method. The NewtonRaphson method is consistently the best
393  method I have, because I always provide at least an
394  approximate Hessian to the objective function. The BFGS
395  method works well, but if gradients are cheap the SciPy_BFGS
396  method also works nicely.
397 
398  The method adaptively changes the step size. If the step is
399  sufficiently good (i.e. the objective function goes down by a
400  large fraction of the predicted decrease), then the step size
401  is increased; if the step is bad, then it rejects the step and
402  tries again.
403 
404  The optimization is terminated after either a function value or
405  step size tolerance is reached.
406 
407  @param[in] b_BFGS Switch to use BFGS (True) or Newton-Raphson (False)
408 
409  """
410 
411  if self.trust0 < 0.0:
412  detail = "(Hessian Diagonal Search)"
413  elif self.adapt_fac != 0.0:
414  detail = "(Adaptive Trust Radius)"
415  else:
416  detail = "(Trust Radius)"
417  printcool("Main Optimizer \n%s Method %s\n\n"
418  "\x1b[0mConvergence criteria (%i of 3 needed):\n"
419  "\x1b[0mObjective Function : %.3e\n"
420  "\x1b[0mNorm of Gradient : %.3e\n"
421  "\x1b[0mParameter step size : %.3e" %
422  ("BFGS" if b_BFGS else "Newton-Raphson", detail, self.criteria,
423  self.convergence_objective, self.convergence_gradient,
424  self.convergence_step), ansi=1, bold=1)
425 
426  # Print a warning if optimization is unlikely to converge
427  if self.uncert and self.convergence_objective < 1e-3:
428  warn_press_key("Condensed phase targets detected - may not converge with current choice of"
429  " convergence_objective (%.e)\nRecommended range is 1e-2 - 1e-1 for this option." % self.convergence_objective)
430 
431  #========================#
432  #| Initialize variables |#
433  #========================#
434  # Order of derivatives
435  Ord = 1 if b_BFGS else 2
436  # Iteration number counter.
437  global ITERATION
438  ITERATION = self.iterinit
439  self.iteration = self.iterinit
440  # Indicates if the optimization step was "good" (i.e. not rejected).
441  self.set_goodstep(1)
442  # Indicates if the optimization is currently at the lowest value of the objective function so far.
443  Best_Step = 1
444  # Objective function history.
445  X_hist = np.array([])
446  # Trust radius.
447  trust = abs(self.trust0)
448  # Current value of the parameters.
449  xk = self.mvals0.copy()
450  # The current optimization step.
451  dx = np.zeros(self.FF.np)
452  # Length of the current optimization step.
453  ndx = 0.0
454  # Color indicating the quality of the optimization step.
455  color = "\x1b[1m"
456  # Ratio of actual objective function change to expected change.
457  Quality = 0.0
458  # Threshold for "low quality step" which decreases trust radius.
459  ThreLQ = 0.25
460  # Threshold for "high quality step" which increases trust radius.
461  ThreHQ = 0.75
462  printcool("Color Key for Objective Function -=X2=-\n\x1b[1mBold\x1b[0m = Initial step\n" \
463  "\x1b[92mGreen = Current lowest value of objective function%s\x1b[0m\n" \
464  "\x1b[91mRed = Objective function rises, step rejected\x1b[0m\n" \
465  "\x1b[0mNo color = Not at the lowest value" % (" (best estimate)" if self.uncert else ""), \
466  bold=0, color=0, center=[True, False, False, False, False])
467  # Optimization steps before this one are ineligible for consideration for "best step".
468  Best_Start = 0
469  criteria_satisfied = {'step' : False, 'grad' : False, 'obj' : False}
470 
471  def print_progress(itn, nx, nd, ng, clr, x, std, qual):
472  # Step number, norm of parameter vector / step / gradient, objective function value, change from previous steps, step quality.
473  logger.info("\n")
474  logger.info("%6s%12s%12s%12s%14s%12s%12s\n" % ("Step", " |k| "," |dk| "," |grad| "," -=X2=- ","Delta(X2)", "StepQual"))
475  logger.info("%6i%12.3e%12.3e%12.3e%s%14.5e\x1b[0m%12.3e% 11.3f\n\n" % (itn, nx, nd, ng, clr, x, std, qual))
476 
477  #=====================================#
478  #| Nonlinear Iterations |#
479  #| Loop until convergence is reached |#
480  #=====================================#
481  while 1:
482  #================================#
483  #| Evaluate objective function. |#
484  #================================#
485  if len(self.chk.keys()) > 0 and ITERATION == self.iterinit:
486  printcool("Iteration %i: Reading initial objective, gradient, Hessian from checkpoint file" % (ITERATION), color=4, bold=0)
487  logger.info("Reading initial objective, gradient, Hessian from checkpoint file\n")
488  xk, X, G, H = self.chk['xk'], self.chk['X'], self.chk['G'], self.chk['H']
489  X_hist, trust = self.chk['X_hist'], self.chk['trust']
490  else:
491  self.adjh(trust)
492  printcool("Iteration %i: Evaluating objective function\nand derivatives through %s order" % (ITERATION, "first" if Ord == 1 else "second"), color=4, bold=0)
493  data = self.Objective.Full(xk,Ord,verbose=True)
494  X, G, H = data['X'], data['G'], data['H']
495  trustprint = ''
496  #================================#
497  #| Assess optimization step. |#
498  #================================#
499  if ITERATION > self.iterinit:
500  dX_actual = X - X_prev
501  Best_Step = X < np.min(X_hist[Best_Start:])
502  try:
503  Quality = dX_actual / dX_expect
504  except:
505  # This should only be encountered in the Hessian diagonal search code (i.e. trust0 < 0).
506  logger.warning("Warning: Step size of zero detected (i.e. wrong direction). "
507  "Try reducing the finite_difference_h parameter\n")
508  Quality = 0.0
509  if X > (X_prev + max(self.err_tol, self.convergence_objective)):
510  #================================#
511  #| Reject step if |#
512  #| objective function rises. |#
513  #================================#
514  self.set_goodstep(0)
515  print_progress(ITERATION, nxk, ndx, ngd, "\x1b[91m", X, X-X_prev, Quality)
516  xk = xk_prev.copy()
517  trust = max(ndx*(1./(1+self.adapt_fac)), self.mintrust)
518  trustprint = "Reducing trust radius to % .4e\n" % trust
519  if self.uncert:
520  #================================#
521  #| Re-evaluate the objective |#
522  #| function and gradients at |#
523  #| the previous parameters. |#
524  #================================#
525  printcool("Objective function rises!\nRe-evaluating at the previous point..",color=1)
526  ITERATION += 1
527  self.iteration += 1
528  Best_Start = ITERATION - self.iterinit
529  Best_Step = 1
530  self.adjh(trust)
531  X_hist = np.append(X_hist, X)
532  # Write checkpoint file.
533  # (Lines copied from below for a good step.)
534  self.chk = {'xk': xk, 'X' : X, 'G' : G, 'H': H, 'X_hist': X_hist, 'trust': trust}
535  if self.wchk_step:
536  self.writechk()
537  outfnm = self.save_mvals_to_input(xk)
538  logger.info("Input file with saved parameters: %s\n" % outfnm)
539  # Check for whether the maximum number of optimization cycles is reached.
540  if ITERATION == self.maxstep:
541  logger.info("Maximum number of optimization steps reached (%i)\n" % ITERATION)
542  break
543  data = self.Objective.Full(xk,Ord,verbose=True)
544  self.set_goodstep(1)
545  X, G, H = data['X'], data['G'], data['H']
546  ndx = 0
547  nxk = np.linalg.norm(xk)
548  ngd = np.linalg.norm(G)
549  Quality = 0.0
550  color = "\x1b[92m"
551  else:
552  #================================#
553  #| Go back to the start of loop |#
554  #| and take a reduced step. |#
555  #================================#
556  printcool("Objective function rises!\nTaking another step from previous point..",color=1)
557  X = X_prev
558  G = G_prev.copy()
559  H = H_stor.copy()
560  data = deepcopy(datastor)
561  else:
562  self.set_goodstep(1)
563  #================================#
564  #| Adjust step size based on |#
565  #| step quality. |#
566  #================================#
567  if Quality <= ThreLQ and self.trust0 > 0:
568  trust = max(ndx*(1./(1+self.adapt_fac)), self.mintrust)
569  trustprint = "Low quality step, reducing trust radius to % .4e\n" % trust
570  elif Quality >= ThreHQ and bump and self.trust0 > 0:
571  curr_trust = trust
572  trust += self.adapt_fac*trust*np.exp(-1*self.adapt_damp*(trust/self.trust0 - 1))
573  if trust > curr_trust:
574  trustprint = "Increasing trust radius to % .4e\n" % trust
575  color = "\x1b[92m" if Best_Step else "\x1b[0m"
576  if Best_Step:
577  if self.backup:
578  for fnm in self.FF.fnms:
579  if os.path.exists(os.path.join(self.resdir, fnm)):
580  bak(os.path.join(self.resdir, fnm))
581  self.FF.make(xk,printdir=self.resdir)
582  # logger.info("The force field has been written to the '%s' directory.\n" % self.resdir)
583  outfnm = self.save_mvals_to_input(xk)
584  # logger.info("Input file with optimization parameters saved to %s.\n" % outfnm)
585  printcool("The force field has been written to the %s directory.\n"
586  "Input file with optimization parameters saved to %s." % (self.resdir, outfnm), color=0)
587  #================================#
588  #| Hessian update for BFGS. |#
589  #================================#
590  if b_BFGS:
591  Hnew = H_stor.copy()
592  Dx = col(xk - xk_prev)
593  Dy = col(G - G_prev)
594  Mat1 = (np.dot(Dy,Dy.T))/(np.dot(Dy.T,Dx))[0,0]
595  Mat2 = (np.dot(np.dot(Hnew,Dx),np.dot(Hnew,Dx).T))/(multi_dot([Dx.T,Hnew,Dx]))[0,0]
596  Hnew += Mat1-Mat2
597  H = Hnew.copy()
598  data['H'] = H.copy()
599  # (Experimental): Deleting lines in the parameter file
600  if len(self.FF.prmdestroy_this) > 0:
601  self.FF.prmdestroy_save.append(self.FF.prmdestroy_this)
602  self.FF.linedestroy_save.append(self.FF.linedestroy_this)
603  # Update objective function history.
604  X_hist = np.append(X_hist, X)
605  # Take the stdev over the previous (hist) values.
606  # Multiply by 2, so when hist=2 this is simply the difference.
607  stdfront = np.std(X_hist[-self.hist:]) if len(X_hist) > self.hist else np.std(X_hist)
608  stdfront *= 2
609  dX2_sign = -1 if (len(X_hist) > 1 and X_hist[-1] < X_hist[-2]) else 1
610  #================================#
611  #| Print optimization progress. |#
612  #================================#
613  nxk = np.linalg.norm(xk)
614  ngd = np.linalg.norm(G)
615  if self.goodstep:
616  print_progress(ITERATION, nxk, ndx, ngd, color, X, dX2_sign*stdfront, Quality)
617  #================================#
618  #| Print objective function, |#
619  #| gradient and Hessian. |#
620  #================================#
621  if self.print_grad:
622  bar = printcool("Total Gradient",color=4)
623  self.FF.print_map(vals=G,precision=8)
624  logger.info(bar)
625  if self.print_hess:
626  bar = printcool("Total Hessian",color=4)
627  pmat2d(H,precision=8)
628  logger.info(bar)
629  for key, val in self.Objective.ObjDict.items():
630  if Best_Step:
631  self.Objective.ObjDict_Last[key] = val
632  #================================#
633  #| Check convergence criteria. |#
634  #================================#
635  # Three conditions for checking convergence criteria:
636  # 1) If any of the targets contain statistical uncertainty
637  # 2) If the step quality is above the "low quality" threshold, -or- if we allow convergence on low quality steps
638  # 3) If we continued from a previous run and this is the first objective function evaluation
639  if self.uncert or (self.converge_lowq or Quality > ThreLQ) or (ITERATION == self.iterinit and self.iterinit > 0):
640  if ngd < self.convergence_gradient:
641  logger.info("Convergence criterion reached for gradient norm (%.2e)\n" % self.convergence_gradient)
642  criteria_satisfied['grad'] = True
643  else: criteria_satisfied['grad'] = False
644  if ndx < self.convergence_step and ndx >= 0.0 and ITERATION > self.iterinit:
645  logger.info("Convergence criterion reached in step size (%.2e)\n" % self.convergence_step)
646  criteria_satisfied['step'] = True
647  else: criteria_satisfied['step'] = False
648  if stdfront < self.convergence_objective and len(X_hist) >= self.hist:
649  logger.info("Convergence criterion reached for objective function (%.2e)\n" % self.convergence_objective)
650  criteria_satisfied['obj'] = True
651  else: criteria_satisfied['obj'] = False
652  if sum(criteria_satisfied.values()) >= self.criteria: break
653  #================================#
654  #| Save optimization variables |#
655  #| before taking the next step. |#
656  #================================#
657  # Previous data from objective function call.
658  datastor = deepcopy(data)
659  # Previous objective function and derivatives
660  X_prev = X
661  G_prev = G.copy()
662  H_stor = H.copy()
663  # Previous optimization variables
664  xk_prev = xk.copy()
665  # Previous physical parameters
666  pk_prev = self.FF.create_pvals(xk)
667  #================================#
668  #| Calculate optimization step. |#
669  #================================#
670  logger.info(trustprint)
671  logger.info("Calculating nonlinear optimization step\n")
672  # Calculate the optimization step.
673  dx, dX_expect, bump = self.step(xk, data, trust)
674  # Increment the parameters.
675  xk += dx
676  ndx = np.linalg.norm(dx)
677  #===============================#
678  #| Second convergence check. |#
679  #===============================#
680  # At this point in the optimization cycle, we have G(i) --> dx(i) but not dx(i) --> G(i+1).
681  # Here we check if the step size is below threshold and decide to converge before
682  # evaluating the next gradient.
683  if self.uncert or (self.converge_lowq or Quality > ThreLQ) or (ITERATION == self.iterinit and self.iterinit > 0):
684  if ndx < self.convergence_step and ndx >= 0.0 and ITERATION > self.iterinit:
685  logger.info("Convergence criterion reached in step size (%.2e)\n" % self.convergence_step)
686  criteria_satisfied['step'] = True
687  else: criteria_satisfied['step'] = False
688  if sum(criteria_satisfied.values()) >= self.criteria: break
689  # Either of these conditions could cause a convergence failure:
690  # The maximum number of optimization cycles is reached.
691  if ITERATION == self.maxstep:
692  logger.info("Maximum number of optimization steps reached (%i)\n" % ITERATION)
693  break
694  # The step size is too small to continue. Sometimes happens for very rough objective functions.
695  if ITERATION > self.iterinit and ndx < self.step_lowerbound:
696  logger.info("Step size is too small to continue (%.3e < %.3e)\n" % (ndx, self.step_lowerbound))
697  break
698  #================================#
699  #| Increase iteration number. |#
700  #================================#
701  ITERATION += 1
702  self.iteration += 1
703  # The search code benefits from knowing the step size here.
704  if self.trust0 < 0:
705  trust = ndx
706  # Print parameter values.
707  if self.print_vals:
708  pk = self.FF.create_pvals(xk)
709  dp = pk - pk_prev
710  bar = printcool("Mathematical Parameters (Current + Step = Next)",color=5)
711  self.FF.print_map(vals=["% .4e %s %.4e = % .4e" % (xk_prev[i], '+' if dx[i] >= 0 else '-', abs(dx[i]), xk[i]) for i in range(len(xk))])
712  logger.info(bar)
713  bar = printcool("Physical Parameters (Current + Step = Next)",color=5)
714  self.FF.print_map(vals=["% .4e %s %.4e = % .4e" % (pk_prev[i], '+' if dp[i] >= 0 else '-', abs(dp[i]), pk[i]) for i in range(len(pk))])
715  logger.info(bar)
716  # Write checkpoint file.
717  self.chk = {'xk': xk, 'X' : X, 'G' : G, 'H': H, 'X_hist': X_hist, 'trust': trust}
718  if self.wchk_step:
719  self.writechk()
720  outfnm = self.save_mvals_to_input(xk)
721  logger.info("Input file with saved parameters: %s\n" % outfnm)
722 
723  cnvgd = sum(criteria_satisfied.values()) >= self.criteria
724  bar = printcool("\x1b[0m%s\x1b[0m\nFinal objective function value\nFull: % .6e Un-penalized: % .6e" %
725  ("\x1b[1mOptimization Converged" if cnvgd else "\x1b[1;91mConvergence Failure",
726  data['X'],data['X0']), color=2)
727  self.failmsg = not cnvgd
728  return xk
729 
730  def step(self, xk, data, trust):
731  """ Computes the next step in the parameter space. There are lots of tricks here that I will document later.
732 
733  @param[in] G The gradient
734  @param[in] H The Hessian
735  @param[in] trust The trust radius
736 
737  """
738  from scipy import optimize
739 
740  X, G, H = (data['X0'], data['G0'], data['H0']) if self.bhyp else (data['X'], data['G'], data['H'])
741  H1 = H.copy()
742  H1 = np.delete(H1, self.excision, axis=0)
743  H1 = np.delete(H1, self.excision, axis=1)
744  Eig = np.linalg.eigh(H1)[0] # Diagonalize Hessian
745  Emin = min(Eig)
746  if Emin < self.eps: # Mix in SD step if Hessian minimum eigenvalue is negative
747  # Experiment.
748  Adj = max(self.eps, 0.01*abs(Emin)) - Emin
749  logger.info("Hessian has a small or negative eigenvalue (%.1e), mixing in some steepest descent (%.1e) to correct this.\n" % (Emin, Adj))
750  logger.info("Eigenvalues are:\n")
751  pvec1d(Eig)
752  H += Adj*np.eye(H.shape[0])
753 
754  if self.bhyp:
755  G = np.delete(G, self.excision)
756  H = np.delete(H, self.excision, axis=0)
757  H = np.delete(H, self.excision, axis=1)
758  xkd = np.delete(xk, self.excision)
759  if self.Objective.Penalty.fmul != 0.0:
760  warn_press_key("Using the multiplicative hyperbolic penalty is discouraged!")
761  # This is the gradient and Hessian without the contributions from the hyperbolic constraint.
762  Obj0 = {'X':X,'G':G,'H':H}
763  class Hyper(object):
764  def __init__(self, HL, Penalty):
765  self.H = HL.copy()
766  self.dx = 1e10 * np.ones(len(HL))
767  self.Val = 0
768  self.Grad = np.zeros(len(HL))
769  self.Hess = np.zeros((len(HL),len(HL)))
770  self.Penalty = Penalty
771  self.iter = 0
772  def _compute(self, dx):
773  self.dx = dx.copy()
774  Tmp = np.dot(self.H, dx)
775  Reg_Term = self.Penalty.compute(xkd+flat(dx), Obj0)
776  self.Val = (X + np.dot(dx, G) + 0.5*np.dot(dx,Tmp) + Reg_Term[0] - data['X'])
777  #self.Val = (X + np.dot(dx, G) + 0.5*row(dx)*Tmp + Reg_Term[0] - data['X'])[0,0]
778  self.Grad = G + Tmp + Reg_Term[1]
779  self.Hess = H + Reg_Term[2]
780  # print "_compute: iter = %i, val = %.6f, |grad| = %.6f" % (self.iter, self.Val, np.sqrt(np.dot(self.Grad, self.Grad)))
781  self.iter += 1
782  def compute_val(self, dx):
783  ddx = dx - self.dx
784  if np.dot(ddx, ddx) > 1e-16:
785  self._compute(dx)
786  return self.Val
787  def compute_grad(self, dx):
788  ddx = dx - self.dx
789  if np.dot(ddx, ddx) > 1e-16:
790  self._compute(dx)
791  return self.Grad
792  def compute_hess(self, dx):
793  ddx = dx - self.dx
794  if np.dot(ddx, ddx) > 1e-16:
795  self._compute(dx)
796  return self.Hess
797  def hyper_solver(L):
798  dx0 = np.zeros(len(xkd))
799  HL = H + (L-1)**2*np.eye(len(H))
800  HYP = Hyper(HL, self.Objective.Penalty)
801  # cProfile.runctx('HYP._compute(dx0)', globals={}, locals={'HYP': HYP, 'dx0': dx0})
802  # sys.exit()
803 
804  t0 = time.time()
805  # Opt1 = optimize.fmin_bfgs(HYP.compute_val,dx0,fprime=HYP.compute_grad,gtol=1e-5*np.sqrt(len(dx0)),full_output=True,disp=1)
806  # Opt1 = optimize.fmin_l_bfgs_b(HYP.compute_val,dx0,fprime=HYP.compute_grad,m=30,factr=1e7,pgtol=1e-4,iprint=0,disp=1,maxfun=1e5,maxiter=1e5)
807  Opt1 = optimize.fmin_l_bfgs_b(HYP.compute_val,dx0,fprime=HYP.compute_grad,m=30,factr=1e7,pgtol=1e-4,iprint=-1,disp=0,maxfun=1e5,maxiter=1e5)
808  logger.info("%.3f s (L-BFGS 1) ", time.time() - t0)
809 
810  t0 = time.time()
811  # Opt2 = optimize.fmin_bfgs(HYP.compute_val,-xkd,fprime=HYP.compute_grad,gtol=1e-5*np.sqrt(len(dx0)),full_output=True,disp=1)
812  # Opt2 = optimize.fmin_l_bfgs_b(HYP.compute_val,-xkd,fprime=HYP.compute_grad,m=30,factr=1e7,pgtol=1e-4,iprint=0,disp=1,maxfun=1e5,maxiter=1e5)
813  Opt2 = optimize.fmin_l_bfgs_b(HYP.compute_val,-xkd,fprime=HYP.compute_grad,m=30,factr=1e7,pgtol=1e-4,iprint=-1,disp=0,maxfun=1e5,maxiter=1e5)
814  logger.info("%.3f s (L-BFGS 2) ", time.time() - t0)
815 
816  dx1, sol1 = Opt1[0], Opt1[1]
817  dx2, sol2 = Opt2[0], Opt2[1]
818  dxb, sol = (dx1, sol1) if sol1 <= sol2 else (dx2, sol2)
819  for i in self.excision: # Reinsert deleted coordinates - don't take a step in those directions
820  dxb = np.insert(dxb, i, 0)
821  return dxb, sol
822  else:
823  # G0 and H0 are used for determining the expected function change.
824  G0 = G.copy()
825  H0 = H.copy()
826  G = np.delete(G, self.excision)
827  H = np.delete(H, self.excision, axis=0)
828  H = np.delete(H, self.excision, axis=1)
829 
830  logger.debug("Inverting Hessian:\n")
831  logger.debug(" G:\n")
832  pvec1d(G,precision=5, loglevel=DEBUG)
833  logger.debug(" H:\n")
834  pmat2d(H,precision=5, loglevel=DEBUG)
835 
836  Hi = invert_svd(H)
837  dx = flat(-1 * np.dot(Hi, col(G)))
838 
839  logger.debug(" dx:\n")
840  pvec1d(dx,precision=5, loglevel=DEBUG)
841 
842  for i in self.excision: # Reinsert deleted coordinates - don't take a step in those directions
843  dx = np.insert(dx, i, 0)
844 
845  def para_solver(L):
846  # Levenberg-Marquardt
847  # HT = H + (L-1)**2*np.diag(np.diag(H))
848  # Attempt to use plain Levenberg
849  HT = H + (L-1)**2*np.eye(len(H))
850  logger.debug("Inverting Scaled Hessian:\n")
851  logger.debug(" G:\n")
852  pvec1d(G,precision=5, loglevel=DEBUG)
853  logger.debug(" HT: (Scal = %.4f)\n" % (1+(L-1)**2))
854  pmat2d(HT,precision=5, loglevel=DEBUG)
855  Hi = invert_svd(HT)
856  dx = flat(-1 * np.dot(Hi, col(G)))
857  logger.debug(" dx:\n")
858  pvec1d(dx,precision=5, loglevel=DEBUG)
859  sol = flat(0.5*multi_dot([row(dx), H, col(dx)]))[0] + np.dot(dx,G)
860  for i in self.excision: # Reinsert deleted coordinates - don't take a step in those directions
861  dx = np.insert(dx, i, 0)
862  return dx, sol
863 
864  def solver(L):
865  return hyper_solver(L) if self.bhyp else para_solver(L)
866 
867  def trust_fun(L):
868  N = np.linalg.norm(solver(L)[0])
869  logger.info("Finding trust radius: H%+.4f*I, length %.4e (target %.4e)\n" % ((L-1)**2,N,trust))
870  # logger.debug("\rL = %.4e, Hessian diagonal addition = %.4e: found length %.4e, objective is %.4e\n" % (L, (L-1)**2, N, (N - trust)**2))
871  return (N - trust)**2
872 
873  def h_fun(L):
874  N = np.linalg.norm(solver(L)[0])
875  logger.debug("\rL = %.4e, Hessian diagonal addition = %.4e: found length %.4e, objective is %.4e\n" % (L, (L-1)**2, N, (N - trust)**2))
876  return (N - self.h)**2
877 
878  def search_fun(L):
879  # Evaluate ONLY the objective function. Most useful when
880  # the objective is cheap, but the derivative is expensive.
881  dx, sol = solver(L) # dx is how much the step changes from the previous step.
882  # This is our trial step.
883  xk_ = dx + xk
884  Result = self.Objective.Full(xk_,0,verbose=False,customdir="micro_%02i" % search_fun.micro)['X'] - data['X']
885  logger.info("Hessian diagonal search: H%+.4f*I, length %.4e, result % .4e\n" % ((L-1)**2,np.linalg.norm(dx),Result))
886  search_fun.micro += 1
887  return Result
888  search_fun.micro = 0
889 
890  if self.trust0 > 0: # This is the trust region code.
891  bump = False
892  dx, expect = solver(1)
893  dxnorm = np.linalg.norm(dx)
894  if dxnorm > trust:
895  bump = True
896  # Tried a few optimizers here, seems like Brent works well.
897  # Okay, the problem with Brent is that the tolerance is fractional.
898  # If the optimized value is zero, then it takes a lot of meaningless steps.
899  LOpt = optimize.brent(trust_fun,brack=(self.lmg,self.lmg*4),tol=1e-6)
900 
902  dx, expect = solver(LOpt)
903  dxnorm = np.linalg.norm(dx)
904  logger.info("Trust-radius step found (length %.4e), % .4e added to Hessian diagonal\n" % (dxnorm, (LOpt-1)**2))
905  else:
906  logger.info("Newton-Raphson step found (length %.4e)\n" % (dxnorm))
907 
908  else: # This is the search code.
909  # First obtain a step that is roughly the same length as the provided trust radius.
910  dx, expect = solver(1)
911  dxnorm = np.linalg.norm(dx)
912  if dxnorm > trust:
913  LOpt = optimize.brent(trust_fun,brack=(self.lmg,self.lmg*4),tol=1e-4)
914  dx, expect = solver(LOpt)
915  dxnorm = np.linalg.norm(dx)
916  else:
917  LOpt = 1
918  logger.info("Starting Hessian diagonal search with step size %.4e\n" % dxnorm)
919  bump = False
920  search_fun.micro = 0
921  Result = optimize.brent(search_fun,brack=(LOpt,LOpt*4),tol=self.search_tol,full_output=1)
922  if Result[1] > 0:
923  LOpt = optimize.brent(h_fun,brack=(self.lmg,self.lmg*4),tol=1e-6)
924  dx, expect = solver(LOpt)
925  dxnorm = np.linalg.norm(dx)
926  logger.info("Restarting search with step size %.4e\n" % dxnorm)
927  Result = optimize.brent(search_fun,brack=(LOpt,LOpt*4),tol=self.search_tol,full_output=1)
928 
930  dx, _ = solver(Result[0])
931  expect = Result[1]
932  logger.info("Optimization step found (length %.4e)\n" % np.linalg.norm(dx))
933 
934 
936  if self.Objective.Penalty.ptyp in [4,5,6]:
937  self.FF.make_redirect(dx+xk)
938 
939  return dx, expect, bump
940 
941  def NewtonRaphson(self):
942  """ Optimize the force field parameters using the Newton-Raphson method (@see MainOptimizer) """
943  return self.MainOptimizer(b_BFGS=0)
944 
945  def BFGS(self):
946  """ Optimize the force field parameters using the BFGS method; currently the recommended choice (@see MainOptimizer) """
947  return self.MainOptimizer(b_BFGS=1)
948 
949  def ScipyOptimizer(self,Algorithm="None"):
950  """ Driver for SciPy optimizations.
952  Using any of the SciPy optimizers requires that SciPy is installed.
953  This method first defines several wrappers around the objective function that the SciPy
954  optimizers can use. Then it calls the algorith mitself.
955 
956  @param[in] Algorithm The optimization algorithm to use, for example 'powell', 'simplex' or 'anneal'
957 
958  """
959  from scipy import optimize
960 
961  self.prev_bad = False
962  self.xk_prev = self.mvals0.copy()
963  self.x_prev = 0.0
964  self.x_best = None
965 
966  def wrap(fin, Order=0, callback=True, fg=False):
967  def fout(mvals):
968  def print_heading():
969  logger.info('\n')
970  if Order == 2: logger.info("%9s%9s%12s%12s%14s%12s\n" % ("Eval(H)", " |k| "," |dk| "," |grad| "," -=X2=- ","Delta(X2)"))
971  elif Order == 1: logger.info("%9s%9s%12s%12s%14s%12s\n" % ("Eval(G)", " |k| "," |dk| "," |grad| "," -=X2=- ","Delta(X2)"))
972  else: logger.info("%9s%9s%12s%14s%12s\n" % ("Eval(X)", " |k| "," |dk| "," -=X2=- ","Delta(X2)"))
974  def print_results(color, newline=True):
975  if newline: head = '' ; foot = '\n'
976  else: head = '\r' ; foot = '\r'
977  if Order: logger.info(head + "%6i%12.3e%12.3e%12.3e%s%14.5e\x1b[0m%12.3e" % \
978  (fout.evals, np.linalg.norm(mvals), np.linalg.norm(mvals-self.xk_prev), np.linalg.norm(G), color, X, X - self.x_prev) + foot)
979  else: logger.info(head + "%6i%12.3e%12.3e%s%14.5e\x1b[0m%12.3e" % \
980  (fout.evals, np.linalg.norm(mvals), np.linalg.norm(mvals-self.xk_prev), color, X, X - self.x_prev) + foot)
981  Result = fin(mvals,Order=Order,verbose=False)
982  fout.evals += 1
983  X, G, H = [Result[i] for i in ['X','G','H']]
984  if callback:
985  if X <= self.x_best or self.x_best is None:
986  color = "\x1b[92m"
987  self.x_best = X
988  self.prev_bad = False
989  if self.print_vals:
990  logger.info('\n')
991  bar = printcool("Current Mathematical Parameters:",color=5)
992  self.FF.print_map(vals=["% .4e" % i for i in mvals])
993  for Tgt in self.Objective.Targets:
994  Tgt.meta_indicate()
995  self.Objective.Indicate()
996  print_heading()
997  print_results(color)
998  else:
999  self.prev_bad = True
1000  color = "\x1b[91m"
1001  if Order:
1002  print_heading()
1003  print_results(color)
1004  else:
1005  if not self.prev_bad: print_heading()
1006  print_results(color, newline=False)
1007  if Order:
1008  if np.linalg.norm(self.xk_prev - mvals) > 0.0:
1009  self.adjh(np.linalg.norm(self.xk_prev - mvals))
1010  self.xk_prev = mvals.copy()
1011  self.x_prev = X
1012  if Order == 2:
1013  return H
1014  elif Order == 1:
1015  if fg:
1016  return X, G
1017  else:
1018  return G
1019  else:
1020  if X != X:
1021  return 1e10
1022  else:
1023  return X
1024  fout.evals = 0
1025  return fout
1026 
1027  def xwrap(func,callback=True):
1028  return wrap(func, Order=0, callback=callback)
1029 
1030  def fgwrap(func,callback=True):
1031  return wrap(func, Order=1, callback=callback, fg=True)
1032 
1033  def gwrap(func,callback=True):
1034  return wrap(func, Order=1, callback=callback)
1035 
1036  def hwrap(func,callback=True):
1037  return wrap(func, Order=2, callback=callback)
1038 
1039  if Algorithm == "powell":
1040  printcool("Minimizing Objective Function using\nPowell's Conjugate Direction Method" , ansi=1, bold=1)
1041  return optimize.fmin_powell(xwrap(self.Objective.Full),self.mvals0,ftol=self.convergence_objective,xtol=self.convergence_step,maxiter=self.maxstep)
1042  elif Algorithm == "simplex":
1043  printcool("Minimizing Objective Function using\nNelder-Mead Simplex Method" , ansi=1, bold=1)
1044  return optimize.fmin(xwrap(self.Objective.Full),self.mvals0,ftol=self.convergence_objective,xtol=self.convergence_step,maxiter=self.maxstep,maxfun=self.maxstep*10)
1045  elif Algorithm == "anneal":
1046  printcool("Minimizing Objective Function using Simulated Annealing" , ansi=1, bold=1)
1047  xmin, Jmin, T, feval, iters, accept, status = optimize.anneal(xwrap(self.Objective.Full), self.mvals0, lower=self.mvals0-1*self.trust0*np.ones(self.np),
1048  upper=self.mvals0+self.trust0*np.ones(self.np),schedule='boltzmann', full_output=True)
1049  scodes = {0 : "Points no longer changing.",
1050  1 : "Cooled to final temperature.",
1051  2 : "Maximum function evaluations reached.",
1052  3 : "Maximum cooling iterations reached.",
1053  4 : "Maximum accepted query locations reached.",
1054  5 : "Final point not the minimum amongst encountered points."}
1055  logger.info("Simulated annealing info:\n")
1056  logger.info("Status: %s \n" % scodes[status])
1057  logger.info("Function evaluations: %i" % feval)
1058  logger.info("Cooling iterations: %i" % iters)
1059  logger.info("Tests accepted: %i" % iters)
1060  return xmin
1061  elif Algorithm == "basinhopping":
1062  printcool("Minimizing Objective Function using Basin Hopping Method" , ansi=1, bold=1)
1063  T = xwrap(self.Objective.Full)(self.mvals0)
1064  Result = optimize.basinhopping(xwrap(self.Objective.Full), self.mvals0, niter=self.maxstep, T=T, stepsize=self.trust0, interval=20,
1065  minimizer_kwargs={'method':'nelder-mead','options':{'xtol': self.convergence_step,'ftol':self.convergence_objective}}, disp=True)
1066  logger.info(Result.message + "\n")
1067  return Result.x
1068  elif Algorithm == "cg":
1069  printcool("Minimizing Objective Function using\nPolak-Ribiere Conjugate Gradient Method" , ansi=1, bold=1)
1070  return optimize.fmin_cg(xwrap(self.Objective.Full,callback=False),self.mvals0,fprime=gwrap(self.Objective.Full),gtol=self.convergence_gradient)
1071  elif Algorithm == "tnc":
1072  printcool("Minimizing Objective Function using\nTruncated Newton Algorithm (Unconfirmed)" , ansi=1, bold=1)
1073  Result = optimize.fmin_tnc(fgwrap(self.Objective.Full,callback=False),self.mvals0,
1074  maxfun=self.maxstep,ftol=self.convergence_objective,pgtol=self.convergence_gradient,xtol=self.convergence_objective)
1075  return Result.x
1076  elif Algorithm == "ncg":
1077  printcool("Minimizing Objective Function using\nNewton-CG Algorithm" , ansi=1, bold=1)
1078  Result = optimize.fmin_ncg(xwrap(self.Objective.Full,callback=False),self.mvals0,fprime=gwrap(self.Objective.Full,callback=False),
1079  fhess=hwrap(self.Objective.Full),avextol=self.convergence_objective,maxiter=self.maxstep,disp=True)
1080  return Result
1081  elif Algorithm == "bfgs":
1082  printcool("Minimizing Objective Function using\nBFGS Quasi-Newton Method" , ansi=1, bold=1)
1083  return optimize.fmin_bfgs(xwrap(self.Objective.Full,callback=False),self.mvals0,fprime=gwrap(self.Objective.Full),gtol=self.convergence_gradient)
1084 
1085  def GeneticAlgorithm(self):
1086 
1087  """
1088  Genetic algorithm, under development. It currently works but a
1089  genetic algorithm is more like a concept; i.e. there is no
1090  single way to implement it.
1091 
1092  @todo Massive parallelization hasn't been implemented yet
1093 
1094  """
1095  def generate_fresh(rows, cols):
1096  new_guys = np.zeros((rows, cols))
1097  for i in range(rows):
1098  new_guys[i, int(cols*np.random.random())] = self.trust0 * np.random.randn()
1099  return new_guys
1100 
1101  def cross_over(chrom1, chrom2):
1102  crosspt = 1 + int((len(chrom1) - 1) * np.random.random())
1103  Ans1 = np.hstack((chrom1[:crosspt],chrom2[crosspt:]))
1104  Ans2 = np.hstack((chrom2[:crosspt],chrom1[crosspt:]))
1105  return Ans1, Ans2
1106 
1107  def mutate(chrom):
1108  mutpt = int(len(chrom) * np.random.random())
1109  chrom[mutpt] += self.trust0 * np.random.randn()
1110  return chrom
1111 
1112  def initial_generation():
1113  return np.vstack((self.mvals0.copy(),np.random.randn(PopSize, self.FF.np)*self.trust0)) / (self.FF.np ** 0.5)
1114  #return np.vstack((self.mvals0.copy(),generate_fresh(PopSize, self.np)))
1115 
1116  def calculate_fitness(pop):
1117  return [self.Objective.Full(i,Order=0,verbose=False)['X'] for i in pop]
1118 
1119  def sort_by_fitness(fits):
1120  return np.sort(fits), np.argsort(fits)
1121 
1122  def generate_new_population(sorted, pop):
1123  newpop = pop[sorted[1]]
1124  # Individuals in this range are kept
1125  a = list(range(KeepNum))
1126  logger.info("Keeping: " + str(a) + '\n')
1127  random.shuffle(a)
1128  for i in range(0, KeepNum, 2):
1129  logger.info("%i and %i reproducing to replace %i and %i\n" % (a[i],a[i+1],len(newpop)-i-2,len(newpop)-i-1))
1130  newpop[-i-1], newpop[-i-2] = cross_over(newpop[a[i]],newpop[a[i+1]])
1131  b = list(range(KeepNum, len(newpop)))
1132  random.shuffle(b)
1133  for i in b[:MutNum]:
1134  logger.info("Randomly mutating %i\n" % i)
1135  newpop[i] = mutate(newpop[i])
1136  return newpop
1137 
1138  def xwrap(func,verbose=True):
1139  def my_func(mvals):
1140  if verbose: logger.info('\n')
1141  Answer = func(mvals,Order=0,verbose=verbose)['X']
1142  dx = (my_func.x_best - Answer) if my_func.x_best is not None else 0.0
1143  if Answer < my_func.x_best or my_func.x_best is None:
1144  color = "\x1b[92m"
1145  my_func.x_best = Answer
1146  else:
1147  color = "\x1b[91m"
1148  if verbose:
1149  logger.info("k=" + ' '.join(["% .4f" % i for i in mvals]) + '\n')
1150  logger.info("X2= %s%12.3e\x1b[0m d(X2)= %12.3e\n" % (color,Answer,dx))
1151  return Answer
1152  my_func.x_best = None
1153  return my_func
1154 
1155  PopSize = 120
1156  KeepThresh = 0.5
1157  MutProb = 0.1
1158  CrosProb = 0.5
1159 
1160  KeepNum = int(KeepThresh * PopSize)
1161  MutNum = int(MutProb * PopSize)
1162  CrosNum = int(CrosProb/2 * PopSize) * 2
1163  Population = initial_generation()
1164  Gen = 0
1165 
1166  Best = [[],[]]
1167 
1168  while True:
1169  #print Population
1170  Fits = calculate_fitness(Population)
1171  Sorted = sort_by_fitness(Fits)
1172  logger.info(str(Sorted))
1173  Best[0].append(Sorted[0][0])
1174  Best[1].append(Population[Sorted[1][0]])
1175  logger.info(str(Best))
1176  if Gen == self.maxstep: break
1177  Population = generate_new_population(Sorted, Population)
1178  Gen += 1
1179 
1180  logger.info(str(Best))
1181  return Population[Sorted[1][0]]
1182 
1183 
1184  def Simplex(self):
1185  """ Use SciPy's built-in simplex algorithm to optimize the parameters. @see Optimizer::ScipyOptimizer """
1186  return self.ScipyOptimizer(Algorithm="simplex")
1187 
1188  def Powell(self):
1189  """ Use SciPy's built-in Powell direction-set algorithm to optimize the parameters. @see Optimizer::ScipyOptimizer """
1190  return self.ScipyOptimizer(Algorithm="powell")
1191 
1192  def Anneal(self):
1193  """ Use SciPy's built-in simulated annealing algorithm to optimize the parameters. @see Optimizer::ScipyOptimizer """
1194  return self.ScipyOptimizer(Algorithm="anneal")
1195 
1196  def ConjugateGradient(self):
1197  """ Use SciPy's built-in conjugate gradient algorithm to optimize the parameters. @see Optimizer::ScipyOptimizer """
1198  return self.ScipyOptimizer(Algorithm="cg")
1199 
1200  def Scipy_BFGS(self):
1201  """ Use SciPy's built-in BFGS algorithm to optimize the parameters. @see Optimizer::ScipyOptimizer """
1202  return self.ScipyOptimizer(Algorithm="bfgs")
1204  def BasinHopping(self):
1205  """ Use SciPy's built-in basin hopping algorithm to optimize the parameters. @see Optimizer::ScipyOptimizer """
1206  return self.ScipyOptimizer(Algorithm="basinhopping")
1207 
1208  def TruncatedNewton(self):
1209  """ Use SciPy's built-in truncated Newton (fmin_tnc) algorithm to optimize the parameters. @see Optimizer::ScipyOptimizer """
1210  return self.ScipyOptimizer(Algorithm="tnc")
1211 
1212  def NewtonCG(self):
1213  """ Use SciPy's built-in Newton-CG (fmin_ncg) algorithm to optimize the parameters. @see Optimizer::ScipyOptimizer """
1214  return self.ScipyOptimizer(Algorithm="ncg")
1215 
1216  def Scan_Values(self,MathPhys=1):
1217  """ Scan through parameter values.
1219  This option is activated using the inputs:
1220 
1221  @code
1222  scan[mp]vals
1223  scan_vals low:hi:nsteps
1224  scan_idxnum (number) -or-
1225  scan_idxname (name)
1226  @endcode
1227 
1228  This method goes to the specified parameter indices and scans through
1229  the supplied values, evaluating the objective function at every step.
1230 
1231  I hope this method will be useful for people who just want to look at
1232  changing one or two parameters and seeing how it affects the force
1233  field performance.
1234 
1235  @todo Maybe a multidimensional grid can be done.
1236  @param[in] MathPhys Switch to use mathematical (True) or physical (False) parameters.
1237 
1238  """
1239  # Iteration number counter.
1240  global ITERATION
1241  ITERATION = self.iterinit
1242  # First make sure that the user entered the correct syntax.
1243  try:
1244  vals_in = [float(i) for i in self.scan_vals.split(":")]
1245  except:
1246  logger.error("Syntax error: in the input file please use scan_vals low:hi:nsteps\n")
1247  raise RuntimeError
1248  if len(vals_in) != 3:
1249  logger.error("Syntax error: in the input file please use scan_vals low:hi:nsteps\n")
1250  raise RuntimeError
1251  idx = [int(i) for i in self.idxnum]
1252  for j in self.idxname:
1253  idx += [self.FF.map[i] for i in self.FF.map if j in i]
1254  idx = set(idx)
1255  scanvals = np.linspace(vals_in[0],vals_in[1],vals_in[2])
1256  logger.info('User input for %s parameter values to scan over:\n' % ("mathematical" if MathPhys else "physical"))
1257  logger.info(str(vals_in) + '\n')
1258  logger.info('These parameter values will be used:\n')
1259  logger.info(str(scanvals) + '\n')
1260  minvals = None
1261  minobj = 1e100
1262  for pidx in idx:
1263  if MathPhys:
1264  logger.info("Scanning parameter %i (%s) in the mathematical space\n" % (pidx,self.FF.plist[pidx]))
1265  vals = self.mvals0.copy()
1266  else:
1267  logger.info("Scanning parameter %i (%s) in the physical space\n" % (pidx,self.FF.plist[pidx]))
1268  self.FF.use_pvals = True
1269  vals = self.FF.pvals0.copy()
1270  counter = 1
1271  for i in scanvals:
1272  printcool("Parameter %i (%s) Value is now % .4e ; Step %i/%i" % (pidx, self.FF.plist[pidx],i,counter,len(scanvals)), color=1,sym="@")
1273  vals[pidx] = i
1274  data = self.Objective.Full(vals,Order=0,verbose=True)
1275  if data['X'] < minobj:
1276  minobj = data['X']
1277  minvals = vals.copy()
1278  logger.info("Value = % .4e Objective = % .4e\n" % (i, data['X']))
1279  ITERATION += 1
1280  counter += 1
1281  return minvals
1282 
1283  def ScanMVals(self):
1284  """ Scan through the mathematical parameter space. @see Optimizer::ScanValues """
1285  return self.Scan_Values(1)
1286 
1287  def ScanPVals(self):
1288  """ Scan through the physical parameter space. @see Optimizer::ScanValues """
1289  return self.Scan_Values(0)
1290 
1291  def SinglePoint(self):
1292  """ A single-point objective function computation. """
1293  data = self.Objective.Full(self.mvals0,Order=0,verbose=True)
1294  printcool("Objective Function Single Point: %.8f" % data['X'])
1295 
1296  def Gradient(self):
1297  """ A single-point gradient computation. """
1298  data = self.Objective.Full(self.mvals0,Order=1)
1299  bar = printcool("Objective function: %.8f\nGradient below" % data['X'])
1300  self.FF.print_map(vals=data['G'],precision=8)
1301  logger.info(bar)
1302 
1303  def Hessian(self):
1304  """ A single-point Hessian computation. """
1305  data = self.Objective.Full(self.mvals0,Order=2)
1306  bar = printcool("Objective function: %.8f\nGradient below" % data['X'])
1307  self.FF.print_map(vals=data['G'],precision=8)
1308  logger.info(bar)
1309  printcool("Hessian matrix:")
1310  pmat2d(data['H'], precision=8)
1311  logger.info(bar)
1312 
1313  def Precondition(self):
1314  """ An experimental method to determine the parameter scale factors
1315  that results in the best conditioned Hessian. """
1316  from scipy import optimize
1317  data = self.Objective.Full(self.mvals0,Order=2,verbose=True)
1318  X, G, H = (data['X0'], data['G0'], data['H0'])
1319  if len(G) < 30:
1320  bar = printcool("(Un-penalized) objective function: %.8f\nGradient below" % X)
1321  self.FF.print_map(vals=G,precision=8)
1322  logger.info(bar)
1323  printcool("Hessian matrix:")
1324  pmat2d(H, precision=8)
1325  logger.info(bar)
1326  else:
1327  bar = printcool("(Un-penalized) objective function: %.8f" % X)
1328  logger.info("More than 30 parameters; gradient and Hessian written to grad.txt and hess.txt\n")
1329  base, ext = os.path.splitext(self.input_file)
1330  np.savetxt('%s-grad.txt' % base, G)
1331  np.savetxt('%s-hess.txt' % base, H)
1332 
1333  H1 = H.copy()
1334  H1 = np.delete(H1, self.excision, axis=0)
1335  H1 = np.delete(H1, self.excision, axis=1)
1336  try:
1337  Cond = np.linalg.cond(H1)
1338  except:
1339  Cond = 1e100
1340  # Eig = np.linalg.eig(H1)[0] # Diagonalize Hessian
1341  # Cond = np.abs(np.max(Eig)/np.min(Eig))
1342  # Spectral gap?
1343  # eigsort = np.sort(np.abs(Eig))
1344  # Cond = eigsort[-1]/eigsort[-2]
1345  logger.info("Initial condition number = %.3f\n" % Cond)
1346  def newcond(logrskeys, multiply=True):
1347  """ Condition number function to be optimized.
1348 
1349  Parameters
1350  ----------
1351  logrskeys : np.ndarray
1352  Logarithms of the rescaling factor of each parameter type.
1353  The optimization is done in the log space.
1354  multiply : bool
1355  If set to True, then the exponentiated logrskeys will
1356  multiply the existing rescaling factors defined in the force field.
1357 
1358  Returns
1359  -------
1360  float
1361  Condition number of the Hessian matrix.
1362  """
1363  new_rsord = OrderedDict([(k, np.exp(logrskeys[i])) for i, k in enumerate(self.FF.rs_ord.keys())])
1364  answer = self.FF.make_rescale(new_rsord, H=H.copy(), multiply=multiply)
1365  H_a = answer['H'].copy()
1366  H_a = np.delete(H_a, self.excision, axis=0)
1367  H_a = np.delete(H_a, self.excision, axis=1)
1368  try:
1369  Cond_a = np.linalg.cond(H_a)
1370  except:
1371  Cond_a = 1e100
1372  if Cond_a > 1e100: Cond_a = 1e100
1373  # Eig_a = np.linalg.eig(H_a)[0] # Diagonalize Hessian
1374  # if np.min(Eig_a) < 1e-10:
1375  # Cond_a = 1e100
1376  # else:
1377  # Cond_a = np.abs(np.max(Eig_a)/np.min(Eig_a)) # Condition number
1378  dlog = logrskeys - newcond.prev_step
1379  nlog = np.sqrt(np.sum(dlog**2))
1380  # "Regularize" using the log deviations
1381  Reg = newcond.regularize * np.sum(logrskeys ** 2) / len(logrskeys)
1382  Obj = np.log(Cond_a) + Reg
1383  if newcond.verbose and newcond.step_n % 1000 == 0:
1384  logger.info("\rEval# %%6i: Step: %%9.3f Along: %%%is Condition: %%10.3e Regularize: %%8.3f Objective: %%8.3f\n" % max([len(k) for k in list(self.FF.rs_ord.keys())]) %
1385  (newcond.step_n, nlog, list(new_rsord.keys())[np.argmax(np.abs(dlog))], Cond_a, Reg, np.log(Cond_a) + Reg))
1386  # printcool_dictionary(answer['rs_ord'])
1387  elif newcond.verbose and Obj < newcond.best:
1388  logger.info("\rEval# %%6i: Step: %%9.3f Along: %%%is Condition: %%10.3e Regularize: %%8.3f Objective: %%8.3f (new minimum)\n" % max([len(k) for k in list(self.FF.rs_ord.keys())]) %
1389  (newcond.step_n, nlog, list(new_rsord.keys())[np.argmax(np.abs(dlog))], Cond_a, Reg, np.log(Cond_a) + Reg))
1390  # logger.info("New multipliers:" + ' '.join(['% .3f' % np.exp(s) for s in logrskeys])+'\n')
1391  newcond.best = Obj
1392  newcond.prev_step = logrskeys
1393  newcond.step_n += 1
1394  return Obj
1395  newcond.prev_step = np.zeros(len(self.FF.rs_ord.keys()),dtype=float)
1396  newcond.step_n = 0
1397  newcond.verbose = True
1398  newcond.regularize = 0.1
1399  newcond.best = np.inf
1400  # printcool_dictionary(self.FF.rs_ord)
1401  logrsmult = np.zeros(len(self.FF.rs_ord.keys()),dtype=float)
1402  # logrsmult[-1] = np.log(0.1)
1403  # Run the optimization algorithm.
1404  # optimized = optimize.fmin(newcond,logrsmult,ftol=0.1,xtol=0.1,maxiter=1000,maxfun=10000)
1405  logger.info("Basin-hopping optimization of condition number in the space of log rescaling factors\n")
1406  optmethod = 'basin'
1407  if optmethod == 'basin':
1408  optimized = optimize.basinhopping(newcond, logrsmult, stepsize=1.0, niter=self.maxstep, #disp=True,
1409  minimizer_kwargs={'method':'Powell','tol':0.1,'options':{'maxiter':1000}})
1410  optresult = optimized.x
1411  elif optmethod == 'seq':
1412  optout = optimize.fmin_powell(newcond, logrsmult, xtol=0.01, ftol=0.01, maxfun=1000, disp=False, full_output=True)
1413  bestval = optout[1]
1414  bestsol = optout[0].copy()
1415  logger.info("New multipliers:" + ' '.join(['% .3f' % np.exp(s) for s in bestsol])+'\n')
1416  logger.info("Sequential grid-scan + Powell in the space of log rescaling factors\n")
1417  outerval = bestval
1418  outersol = bestsol.copy()
1419  outeriter = 0
1420  maxouter = 3
1421  while True:
1422  for i in range(len(logrsmult)):
1423  for j in [np.log(0.1), np.log(10), np.log(0.01), np.log(100)]:
1424  # for j in [np.log(0.1), np.log(10)]:
1425  logrsmult = outersol.copy()
1426  logrsmult[i] += j
1427  logger.info("Trying new initial guess with element %i changed by %.3f:\n" % (i, j))
1428  # logger.info("New guess:" + ' '.join(['% .3f' % np.exp(s) for s in logrsmult])+'\n')
1429  optout = optimize.fmin_powell(newcond, logrsmult, xtol=0.01, ftol=0.01, maxfun=1000, disp=False, full_output=True)
1430  if optout[1] < bestval:
1431  bestval = optout[1]
1432  bestsol = optout[0].copy()
1433  logger.info("New multipliers:" + ' '.join(['% .3f' % np.exp(s) for s in bestsol])+'\n')
1434  logger.info("Done outer iteration %i\n" % outeriter)
1435  outeriter += 1
1436  if np.linalg.norm(outersol-bestsol) < 0.1:
1437  logger.info("Sequential optimization solution moved by less than 0.1 (%.3f)\n" % np.linalg.norm(outersol-bestsol))
1438  break
1439  if np.abs(outerval-bestval) < 0.1:
1440  logger.info("Sequential optimization value improved by less than 0.1 (%.3f)" % (outerval-bestval))
1441  break
1442  outerval = bestval
1443  outersol = bestsol.copy()
1444  if outeriter == maxouter:
1445  logger.info("Outer iterations reached maximum of %i\n" % maxouter)
1446  break
1447  optresult = outersol.copy()
1448  else:
1449  raise RuntimeError
1450  new_rsord = OrderedDict([(k, np.exp(optresult[i])) for i, k in enumerate(self.FF.rs_ord.keys())])
1451  answer = self.FF.make_rescale(new_rsord)
1452  newcond.regularize = 0.0
1453  newcond.verbose = False
1454  optval = np.exp(newcond(optresult))
1455  logger.info("\nOptimized condition number: %.3f\n" % optval)
1456  # The optimization algorithm may have changed some rescaling factors that had no effect.
1457  # Now we change them back.
1458  rezero = []
1459  nonzeros = []
1460  printkeys = []
1461  for i in range(len(optresult)):
1462  trial = optresult.copy()
1463  trial[i] = 1.0
1464  if np.abs(newcond(trial)-newcond(optresult)) == 0.0:
1465  rezero.append(i)
1466  else:
1467  nonzeros.append(optresult[i])
1468  printkeys.append(list(self.FF.rs_ord.keys())[i])
1469  # Now we make sure that the scale factors average to 1.0 in the log space. Otherwise they all grow larger / smaller.
1470  optresult -= np.mean(nonzeros)
1471  for i in rezero:
1472  optresult[i] = 0.0
1473  # We don't need any more than one significant digit of precision for the priors / scale factors.
1474  # The following values are the new scale factors themselves (i.e. not multiplying the old ones)
1475  opt_rsord = OrderedDict([(k, est124(np.exp(optresult[i])*self.FF.rs_ord[k])) for i, k in enumerate(self.FF.rs_ord.keys())])
1476  # Print the final answer
1477  answer = self.FF.make_rescale(opt_rsord, mvals=self.mvals0, H=H.copy(), multiply=False)
1478  logger.info("Condition Number after Rounding Factors -> %.3f\n" % (np.exp(newcond(np.log(list(opt_rsord.values())), multiply=False))))
1479  bar = printcool("Previous values of the rescaling factors / prior widths:")
1480  logger.info(''.join([" %-35s : %.5e\n" % (i, self.FF.rs_ord[i]) for i in self.FF.rs_ord.keys()]))
1481  logger.info(bar)
1482  opt_rsord = OrderedDict([(k, opt_rsord[k]) for k in opt_rsord.keys() if k in printkeys])
1483  bar = printcool("Recommended values (may be slightly stochastic):")
1484  logger.info(''.join([" %-35s : %.1e\n" % (k, opt_rsord[k]) for k in opt_rsord.keys()]))
1485  logger.info(bar)
1486  if np.linalg.norm(self.mvals0) != 0.0:
1487  bar = printcool("Mathematical parameters in the new space:",color=4)
1488  self.FF.print_map(answer['mvals'])
1489  logger.info(bar)
1490  outfnm = self.save_mvals_to_input(answer['mvals'], priors=opt_rsord, jobtype='optimize')
1491  # logger.info("Input file with optimization parameters saved to %s.\n" % outfnm)
1492  printcool("Input file with new priors/mvals saved to %s (jobtype set to optimize)." % (outfnm), color=0)
1493 
1494  def FDCheckG(self):
1495  """ Finite-difference checker for the objective function gradient.
1496 
1497  For each element in the gradient, use a five-point finite difference
1498  stencil to compute a finite-difference derivative, and compare it to
1499  the analytic result.
1500 
1501  """
1502 
1503  Adata = self.Objective.Full(self.mvals0,Order=1)['G']
1504  Fdata = np.zeros(self.FF.np)
1505  printcool("Checking first derivatives by finite difference!\n%-8s%-20s%13s%13s%13s%13s" \
1506  % ("Index", "Parameter ID","Analytic","Numerical","Difference","Fractional"),bold=1,color=5)
1507  for i in range(self.FF.np):
1508  Fdata[i] = f1d7p(fdwrap(self.Objective.Full,self.mvals0,i,'X',Order=0),self.h)
1509  Denom = max(abs(Adata[i]),abs(Fdata[i]))
1510  Denom = Denom > 1e-8 and Denom or 1e-8
1511  D = Adata[i] - Fdata[i]
1512  Q = (Adata[i] - Fdata[i])/Denom
1513  cD = abs(D) > 0.5 and "\x1b[1;91m" or (abs(D) > 1e-2 and "\x1b[91m" or (abs(D) > 1e-5 and "\x1b[93m" or "\x1b[92m"))
1514  cQ = abs(Q) > 0.5 and "\x1b[1;91m" or (abs(Q) > 1e-2 and "\x1b[91m" or (abs(Q) > 1e-5 and "\x1b[93m" or "\x1b[92m"))
1515  logger.info("\r %-8i%-20s% 13.4e% 13.4e%s% 13.4e%s% 13.4e\x1b[0m\n" \
1516  % (i, self.FF.plist[i][:20], Adata[i], Fdata[i], cD, D, cQ, Q))
1517 
1518  def FDCheckH(self):
1519  """ Finite-difference checker for the objective function Hessian.
1520 
1521  For each element in the Hessian, use a five-point stencil in both
1522  parameter indices to compute a finite-difference derivative, and
1523  compare it to the analytic result.
1524 
1525  This is meant to be a foolproof checker, so it is pretty slow. We
1526  could write a faster checker if we assumed we had accurate first
1527  derivatives, but it's better to not make that assumption.
1528 
1529  The second derivative is computed by double-wrapping the objective
1530  function via the 'wrap2' function.
1531 
1532  """
1533  Adata = self.Objective.Full(self.mvals0,Order=2)['H']
1534  Fdata = np.zeros((self.FF.np,self.FF.np))
1535  printcool("Checking second derivatives by finite difference!\n%-8s%-20s%-20s%13s%13s%13s%13s" \
1536  % ("Index", "Parameter1 ID", "Parameter2 ID", "Analytic","Numerical","Difference","Fractional"),bold=1,color=5)
1537 
1538  # Whee, our double-wrapped finite difference second derivative!
1539  def wrap2(mvals0,pidxi,pidxj):
1540  def func1(arg):
1541  mvals = list(mvals0)
1542  mvals[pidxj] += arg
1543  return f1d5p(fdwrap(self.Objective.Full,mvals,pidxi,'X',Order=0),self.h)
1544  return func1
1545 
1546  for i in range(self.FF.np):
1547  for j in range(i,self.FF.np):
1548  Fdata[i,j] = f1d5p(wrap2(self.mvals0,i,j),self.h)
1549  Denom = max(abs(Adata[i,j]),abs(Fdata[i,j]))
1550  Denom = Denom > 1e-8 and Denom or 1e-8
1551  D = Adata[i,j] - Fdata[i,j]
1552  Q = (Adata[i,j] - Fdata[i,j])/Denom
1553  cD = abs(D) > 0.5 and "\x1b[1;91m" or (abs(D) > 1e-2 and "\x1b[91m" or (abs(D) > 1e-5 and "\x1b[93m" or "\x1b[92m"))
1554  cQ = abs(Q) > 0.5 and "\x1b[1;91m" or (abs(Q) > 1e-2 and "\x1b[91m" or (abs(Q) > 1e-5 and "\x1b[93m" or "\x1b[92m"))
1555  logger.info("\r %-8i%-20s%-20s% 13.4e% 13.4e%s% 13.4e%s% 13.4e\x1b[0m\n" \
1556  % (i, self.FF.plist[i][:20], self.FF.plist[j][:20], Adata[i,j], Fdata[i,j], cD, D, cQ, Q))
1557 
1558  def readchk(self):
1559  """ Read the checkpoint file for the main optimizer. """
1560  self.chk = {}
1561  if self.rchk_fnm is not None:
1562  absfnm = os.path.join(self.root,self.rchk_fnm)
1563  if os.path.exists(absfnm):
1564  self.chk = pickle.load(open(absfnm))
1565  else:
1566  logger.info("\x1b[40m\x1b[1;92mWARNING:\x1b[0m read_chk is set to True, but checkpoint file not loaded (wrong filename or doesn't exist?)\n")
1567  return self.chk
1568 
1569  def writechk(self):
1570  """ Write the checkpoint file for the main optimizer. """
1571  if self.wchk_fnm is not None:
1572  logger.info("Writing the checkpoint file %s\n" % self.wchk_fnm)
1573  with wopen(os.path.join(self.root,self.wchk_fnm), binary=True) as f: pickle.dump(self.chk, f)
1574 
def FDCheckG(self)
Finite-difference checker for the objective function gradient.
Definition: optimizer.py:1530
def pmat2d(mat2d, precision=1, format="e", loglevel=INFO)
Printout of a 2-D array.
Definition: nifty.py:215
def NewtonCG(self)
Use SciPy&#39;s built-in Newton-CG (fmin_ncg) algorithm to optimize the parameters.
Definition: optimizer.py:1233
def Run(self)
Call the appropriate optimizer.
Definition: optimizer.py:321
Nifty functions, intended to be imported by any module within ForceBalance.
def Precondition(self)
An experimental method to determine the parameter scale factors that results in the best conditioned ...
Definition: optimizer.py:1342
iterinit
The initial iteration number (nonzero if we restart a previous run.)
Definition: optimizer.py:164
Input file parser for ForceBalance jobs.
def GeneticAlgorithm(self)
Genetic algorithm, under development.
Definition: optimizer.py:1105
def Gradient(self)
A single-point gradient computation.
Definition: optimizer.py:1322
def SinglePoint(self)
A single-point objective function computation.
Definition: optimizer.py:1316
def Anneal(self)
Use SciPy&#39;s built-in simulated annealing algorithm to optimize the parameters.
Definition: optimizer.py:1208
def invert_svd(X, thresh=1e-12)
Invert a matrix using singular value decomposition.
Definition: nifty.py:621
def MainOptimizer(self, b_BFGS=0)
The main ForceBalance adaptive trust-radius pseudo-Newton optimizer.
Definition: optimizer.py:416
def Hessian(self)
A single-point Hessian computation.
Definition: optimizer.py:1330
def Simplex(self)
Use SciPy&#39;s built-in simplex algorithm to optimize the parameters.
Definition: optimizer.py:1198
def flat(vec)
Given any list, array, or matrix, return a single-index array.
Definition: nifty.py:467
Objective
reset the global variable
Definition: optimizer.py:174
def BFGS(self)
Optimize the force field parameters using the BFGS method; currently the recommended choice (...
Definition: optimizer.py:956
def adjh(self, trust)
Definition: optimizer.py:382
def f1d7p(f, h)
A highly accurate seven-point finite difference stencil for computing derivatives of a function...
excision
The indices to be excluded from the Hessian update.
Definition: optimizer.py:189
goodstep
Specify whether the previous optimization step was good or bad.
Definition: optimizer.py:162
def BasinHopping(self)
Use SciPy&#39;s built-in basin hopping algorithm to optimize the parameters.
Definition: optimizer.py:1223
def set_goodstep(self, val)
Mark in each target that the previous optimization step was good or bad.
Definition: optimizer.py:247
def fdwrap(func, mvals0, pidx, key=None, kwargs)
A function wrapper for finite difference designed for differentiating &#39;get&#39;-type functions.
np
Number of parameters.
Definition: optimizer.py:192
def col(vec)
Given any list, array, or matrix, return a 1-column 2D array.
Definition: nifty.py:448
def __init__(self, options, Objective, FF)
Create an Optimizer object.
Definition: optimizer.py:59
bhyp
Whether the penalty function is hyperbolic.
Definition: optimizer.py:176
def est124(val)
Given any positive floating point value, return a value [124]e+xx that is closest to it in the log sp...
Definition: nifty.py:474
def ConjugateGradient(self)
Use SciPy&#39;s built-in conjugate gradient algorithm to optimize the parameters.
Definition: optimizer.py:1213
def Powell(self)
Use SciPy&#39;s built-in Powell direction-set algorithm to optimize the parameters.
Definition: optimizer.py:1203
failmsg
Print a special message on failure.
Definition: optimizer.py:160
def warn_press_key(warning, timeout=10)
Definition: nifty.py:1599
def printcool_dictionary(Dict, title="Dictionary Keys : Values", bold=False, color=2, keywidth=25, topwidth=50, center=True, leftpad=0)
See documentation for printcool; this is a nice way to print out keys/values in a dictionary...
Definition: nifty.py:366
def f1d5p(f, h)
A highly accurate five-point finite difference stencil for computing derivatives of a function...
def parse_inputs(input_file=None)
Parse through the input file and read all user-supplied options.
Definition: parser.py:471
def step(self, xk, data, trust)
Computes the next step in the parameter space.
Definition: optimizer.py:745
mvals_bak
The root directory.
Definition: optimizer.py:158
def save_mvals_to_input(self, vals, priors=None, jobtype=None)
Write a new input file (s_save.in) containing the current mathematical parameters.
Definition: optimizer.py:254
def printcool(text, sym="#", bold=False, color=2, ansi=None, bottom='-', minwidth=50, center=True, sym2="=")
Cool-looking printout for slick formatting of output.
Definition: nifty.py:321
def NewtonRaphson(self)
Optimize the force field parameters using the Newton-Raphson method (.
Definition: optimizer.py:951
mvals0
The original parameter values.
Definition: optimizer.py:196
def bak(path, dest=None, cwd=None, start=1)
Definition: nifty.py:1087
def Scan_Values(self, MathPhys=1)
Scan through parameter values.
Definition: optimizer.py:1259
def pvec1d(vec1d, precision=1, format="e", loglevel=INFO)
Printout of a 1-D vector.
Definition: nifty.py:199
def ScanMVals(self)
Scan through the mathematical parameter space.
Definition: optimizer.py:1306
iteration
The current iteration number.
Definition: optimizer.py:166
def readchk(self)
Read the checkpoint file for the main optimizer.
Definition: optimizer.py:1590
def ScanPVals(self)
Scan through the physical parameter space.
Definition: optimizer.py:1311
def ScipyOptimizer(self, Algorithm="None")
Driver for SciPy optimizations.
Definition: optimizer.py:969
FF
The force field itself.
Definition: optimizer.py:178
def writechk(self)
Write the checkpoint file for the main optimizer.
Definition: optimizer.py:1602
OptTab
A list of all the things we can ask the optimizer to do.
Definition: optimizer.py:63
def Counter()
Definition: optimizer.py:35
def wopen(dest, binary=False)
If trying to write to a symbolic link, remove it first.
Definition: nifty.py:1304
def TruncatedNewton(self)
Use SciPy&#39;s built-in truncated Newton (fmin_tnc) algorithm to optimize the parameters.
Definition: optimizer.py:1228
def Scipy_BFGS(self)
Use SciPy&#39;s built-in BFGS algorithm to optimize the parameters.
Definition: optimizer.py:1218
def row(vec)
Given any list, array, or matrix, return a 1-row 2D array.
Definition: nifty.py:458
Optimizer class.
Definition: optimizer.py:46
uncert
Target types which introduce uncertainty into the objective function.
Definition: optimizer.py:181
def FDCheckH(self)
Finite-difference checker for the objective function Hessian.
Definition: optimizer.py:1562