1 """@package forcebalance.objective 3 ForceBalance objective function.""" 4 from __future__
import division
6 from builtins
import range
7 from builtins
import object
12 from collections
import defaultdict, OrderedDict
15 from forcebalance.nifty import printcool_dictionary, createWorkQueue, getWorkQueue, wq_wait
18 from forcebalance.output
import getLogger
19 logger = getLogger(__name__)
22 from forcebalance.gmxio import AbInitio_GMX, BindingEnergy_GMX, Liquid_GMX, Lipid_GMX, Interaction_GMX, Moments_GMX, Vibration_GMX, Thermo_GMX
24 logger.warning(traceback.format_exc())
25 logger.warning(
"Gromacs module import failed\n")
28 from forcebalance.tinkerio import AbInitio_TINKER, Vibration_TINKER, BindingEnergy_TINKER, Moments_TINKER, Interaction_TINKER, Liquid_TINKER
30 logger.warning(traceback.format_exc())
31 logger.warning(
"Tinker module import failed\n")
34 from forcebalance.openmmio import AbInitio_OpenMM, Liquid_OpenMM, Interaction_OpenMM, BindingEnergy_OpenMM, Moments_OpenMM, Hydration_OpenMM, Vibration_OpenMM, OptGeoTarget_OpenMM, TorsionProfileTarget_OpenMM
36 logger.warning(traceback.format_exc())
37 logger.warning(
"OpenMM module import failed; check OpenMM package\n")
40 from forcebalance.smirnoffio
import AbInitio_SMIRNOFF, Liquid_SMIRNOFF, Vibration_SMIRNOFF, OptGeoTarget_SMIRNOFF, TorsionProfileTarget_SMIRNOFF, smirnoff_analyze_parameter_coverage
42 logger.warning(traceback.format_exc())
43 logger.warning(
"SMIRNOFF module import failed; check SMIRNOFF package\n")
46 from forcebalance.evaluator_io
import Evaluator_SMIRNOFF
48 logger.warning(traceback.format_exc())
49 logger.warning(
"openff.evaluator module import failed\n")
54 logger.warning(traceback.format_exc())
55 logger.warning(
"Internal energy fitting module import failed\n")
60 logger.warning(traceback.format_exc())
61 logger.warning(
"Counterpoise module import failed\n")
64 from forcebalance.amberio import AbInitio_AMBER, Interaction_AMBER, Vibration_AMBER, Liquid_AMBER
66 logger.warning(traceback.format_exc())
67 logger.warning(
"Amber module import failed\n")
72 logger.warning(traceback.format_exc())
73 logger.warning(
"PSI4 module import failed\n")
76 from forcebalance.target
import RemoteTarget
78 logger.warning(traceback.format_exc())
79 logger.warning(
"Remote Target import failed\n")
82 Implemented_Targets = {
83 'ABINITIO_GMX':AbInitio_GMX,
84 'ABINITIO_TINKER':AbInitio_TINKER,
85 'ABINITIO_OPENMM':AbInitio_OpenMM,
86 'ABINITIO_SMIRNOFF':AbInitio_SMIRNOFF,
87 'ABINITIO_AMBER':AbInitio_AMBER,
88 'ABINITIO_INTERNAL':AbInitio_Internal,
89 'VIBRATION_TINKER':Vibration_TINKER,
90 'VIBRATION_GMX':Vibration_GMX,
91 'VIBRATION_AMBER':Vibration_AMBER,
92 'VIBRATION_OPENMM':Vibration_OpenMM,
93 'VIBRATION_SMIRNOFF': Vibration_SMIRNOFF,
94 'THERMO_GMX':Thermo_GMX,
95 'LIQUID_OPENMM':Liquid_OpenMM,
96 'LIQUID_SMIRNOFF':Liquid_SMIRNOFF,
97 'LIQUID_TINKER':Liquid_TINKER,
98 'LIQUID_GMX':Liquid_GMX,
99 'LIQUID_AMBER':Liquid_AMBER,
100 'LIPID_GMX':Lipid_GMX,
101 'COUNTERPOISE':Counterpoise,
102 'THCDF_PSI4':THCDF_Psi4,
103 'RDVR3_PSI4':RDVR3_Psi4,
104 'INTERACTION_AMBER':Interaction_AMBER,
105 'INTERACTION_GMX':Interaction_GMX,
106 'INTERACTION_TINKER':Interaction_TINKER,
107 'INTERACTION_OPENMM':Interaction_OpenMM,
108 'BINDINGENERGY_TINKER':BindingEnergy_TINKER,
109 'BINDINGENERGY_GMX':BindingEnergy_GMX,
110 'BINDINGENERGY_OPENMM':BindingEnergy_OpenMM,
111 'MOMENTS_TINKER':Moments_TINKER,
112 'MOMENTS_GMX':Moments_GMX,
113 'MOMENTS_OPENMM':Moments_OpenMM,
114 'HYDRATION_OPENMM':Hydration_OpenMM,
115 'OPTGEO_OPENMM': OptGeoTarget_OpenMM,
116 'OPTGEO_SMIRNOFF': OptGeoTarget_SMIRNOFF,
117 'OPTGEOTARGET_OPENMM': OptGeoTarget_OpenMM,
118 'OPTGEOTARGET_SMIRNOFF': OptGeoTarget_SMIRNOFF,
119 'TORSIONPROFILE_OPENMM': TorsionProfileTarget_OpenMM,
120 'TORSIONPROFILE_SMIRNOFF': TorsionProfileTarget_SMIRNOFF,
121 'EVALUATOR_SMIRNOFF': Evaluator_SMIRNOFF,
122 'REMOTE_TARGET':RemoteTarget,
127 Letters = [
'X',
'G',
'H']
130 """ Objective function. 132 The objective function is a combination of contributions from the different 133 fitting targets. Basically, it loops through the targets, gets their 134 contributions to the objective function and then sums all of them 135 (although more elaborate schemes are conceivable). The return value is the 136 same data type as calling the target itself: a dictionary containing 137 the objective function, the gradient and the Hessian. 139 The penalty function is also computed here; it keeps the parameters from straying 140 too far from their initial values. 142 @param[in] mvals The mathematical parameters that enter into computing the objective function 143 @param[in] Order The requested order of differentiation 145 def __init__(self, options, tgt_opts, forcefield):
148 self.set_option(options,
'penalty_type')
149 self.set_option(options,
'penalty_additive')
150 self.set_option(options,
'penalty_multiplicative')
151 self.set_option(options,
'penalty_hyperbolic_b')
152 self.set_option(options,
'penalty_alpha')
153 self.set_option(options,
'penalty_power')
154 self.set_option(options,
'normalize_weights')
156 self.set_option(options,
'wq_port')
158 self.set_option(options,
'asynchronous')
162 enable_smirnoff_prints =
False 163 for opts
in tgt_opts:
164 if opts[
'type']
not in Implemented_Targets:
165 logger.error(
'The target type \x1b[1;91m%s\x1b[0m is not implemented!\n' % opts[
'type'])
167 elif opts[
'type'].endswith(
"SMIRNOFF"):
168 enable_smirnoff_prints =
True 173 if opts[
"remote"]
and self.wq_port != 0: Tgt = forcebalance.target.RemoteTarget(options, opts, forcefield)
174 else: Tgt = Implemented_Targets[opts[
'type']](options,opts,forcefield)
177 if len(set([Tgt.name
for Tgt
in self.
Targets])) != len([Tgt.name
for Tgt
in self.
Targets]):
178 logger.error(
"The list of target names is not unique!\n")
180 if enable_smirnoff_prints:
186 self.penalty_multiplicative,self.penalty_hyperbolic_b,
187 self.penalty_alpha,self.penalty_power)
189 if self.normalize_weights:
190 self.
WTot = np.sum([i.weight
for i
in self.
Targets])
197 if self.wq_port != 0:
199 logger.info(
'Work Queue is listening on %d\n' % self.wq_port)
204 def Target_Terms(self, mvals, Order=0, verbose=False, customdir=None):
206 Objective = {
'X':0.0,
'G':np.zeros(self.
FF.np),
'H':np.zeros((self.
FF.np,self.
FF.np))}
209 Tgt.stage(mvals, AGrad = Order >= 1, AHess = Order >= 2, customdir=customdir)
210 if self.asynchronous:
213 Need2Evaluate = self.
Targets[:]
218 while len(Need2Evaluate) > 0:
219 for Tgt
in Need2Evaluate:
220 if Tgt.wq_complete():
222 Funcs = [Tgt.get_X, Tgt.get_G, Tgt.get_H]
224 Ans = Funcs[Order](mvals, customdir=customdir)
227 Tgt.meta_indicate(customdir=customdir)
230 self.
ObjDict[Tgt.name] = {
'w' : Tgt.weight/self.
WTot ,
'x' : Ans[
'X']}
232 Objective[Letters[i]] += Ans[Letters[i]]*Tgt.weight/self.
WTot 233 Need2Evaluate.remove(Tgt)
245 Funcs = [Tgt.get_X, Tgt.get_G, Tgt.get_H]
247 Ans = Funcs[Order](mvals, customdir=customdir)
250 Tgt.meta_indicate(customdir=customdir)
253 self.
ObjDict[Tgt.name] = {
'w' : Tgt.weight/self.
WTot ,
'x' : Ans[
'X']}
255 Objective[Letters[i]] += Ans[Letters[i]]*Tgt.weight/self.
WTot 260 for i
in range(self.
FF.np):
261 if Objective[
'H'][i,i] == 0.0:
262 Objective[
'H'][i,i] = 1.0
266 """ Print objective function contributions. """ 267 PrintDict = OrderedDict()
271 for key, val
in self.
ObjDict.items():
272 if key ==
'Total' :
continue 281 PrintDict[key] =
"% 12.5f % 10.3f %s% 16.5e%s" % (val[
'x'],val[
'w'],color,val[
'x']*val[
'w'],
"\x1b[0m")
285 PrintDict[key] +=
" ( %+10.3e )" % (xnew - xold)
286 Total += val[
'x']*val[
'w']
294 PrintDict[
'Total'] =
"% 12s % 10s %s% 16.5e%s" % (
"",
"",color,Total,
"\x1b[0m")
298 PrintDict[
'Total'] +=
" ( %+10.3e )" % (xnew - xold)
299 Title =
"Objective Function Breakdown\n %-20s %55s" % (
"Target Name",
"Residual x Weight = Contribution (Current-Prev)")
301 Title =
"Objective Function Breakdown\n %-20s %40s" % (
"Target Name",
"Residual x Weight = Contribution")
305 def Full(self, vals, Order=0, verbose=False, customdir=None):
306 Objective = self.
Target_Terms(vals, Order, verbose, customdir)
308 if self.
FF.use_pvals:
309 Extra = self.
Penalty.compute(self.
FF.create_mvals(vals),Objective)
311 Extra = self.
Penalty.compute(vals,Objective)
312 Objective[
'X0'] = Objective[
'X']
313 Objective[
'G0'] = Objective[
'G'].copy()
314 Objective[
'H0'] = Objective[
'H'].copy()
316 self.
ObjDict[
'Regularization'] = {
'w' : 1.0,
'x' : Extra[0]}
320 Objective[Letters[i]] += Extra[i]
324 """ Penalty functions for regularizing the force field optimizer. 326 The purpose for this module is to improve the behavior of our optimizer; 327 essentially, our problem is fraught with 'linear dependencies', a.k.a. 328 directions in the parameter space that the objective function does not 329 respond to. This would happen if a parameter is just plain useless, or 330 if there are two or more parameters that describe the same thing. 332 To accomplish these objectives, a penalty function is added to the 333 objective function. Generally, the more the parameters change (i.e. 334 the greater the norm of the parameter vector), the greater the 335 penalty. Note that this is added on after all of the other 336 contributions have been computed. This only matters if the penalty 337 'multiplies' the objective function: Obj + Obj*Penalty, but we also 338 have the option of an additive penalty: Obj + Penalty. 340 Statistically, this is called regularization. If the penalty function 341 is the norm squared of the parameter vector, it is called ridge regression. 342 There is also the option of using simply the norm, and this is called lasso, 343 but I think it presents problems for the optimizer that I need to work out. 345 Note that the penalty functions can be considered as part of a 'maximum 346 likelihood' framework in which we assume a PRIOR PROBABILITY of the 347 force field parameters around their initial values. The penalty function 348 is related to the prior by an exponential. Ridge regression corresponds 349 to a Gaussian prior and lasso corresponds to an exponential prior. There 350 is also 'elastic net regression' which interpolates between Gaussian 351 and exponential using a tuning parameter. 353 Our priors are adjustable too - there is one parameter, which is the width 354 of the distribution. We can even use a noninformative prior for the 355 distribution widths (hyperprior!). These are all important things to 358 Importantly, note that here there is no code that treats the distribution 359 width. That is because the distribution width is wrapped up in the 360 rescaling factors, which is essentially a coordinate transformation 361 on the parameter space. More documentation on this will follow, perhaps 362 in the 'rsmake' method. 365 Pen_Names = {
'HYP' : 1,
'HYPER' : 1,
'HYPERBOLIC' : 1,
'L1' : 1,
'HYPERBOLA' : 1,
366 'PARA' : 2,
'PARABOLA' : 2,
'PARABOLIC' : 2,
'L2': 2,
'QUADRATIC' : 2,
367 'BOX' : 3,
'FUSE' : 4,
'FUSE-L0' : 5,
'FUSE-BARRIER' : 6}
369 def __init__(self, User_Option, ForceField, Factor_Add=0.0, Factor_Mult=0.0, Factor_B=0.1, Alpha=1.0, Power=2.0):
370 self.
fadd = Factor_Add
371 self.
fmul = Factor_Mult
378 if User_Option.upper() ==
'L1':
379 logger.info(
"L1 norm uses the hyperbolic penalty, make sure penalty_hyperbolic_b is set sufficiently small\n")
381 logger.info(
"Using hyperbolic regularization (Laplacian prior) with strength %.1e (+), %.1e (x) and tightness %.1e\n" % (Factor_Add, Factor_Mult, Factor_B))
384 logger.info(
"Using parabolic regularization (Gaussian prior) with strength %.1e (+), %.1e (x)\n" % (Factor_Add, Factor_Mult))
386 logger.info(
"Using customized L2-regularization with exponent %.1f, strength %.1e (+), %.1e (x)\n" % (Power, Factor_Add, Factor_Mult))
388 logger.error(
"In L2-regularization, penalty_power must be >= 2.0 (currently %.1f)\n" % (Power))
392 logger.info(
"Using box-style regularization with exponent %.1f, strength %.1e (+), %.1e (x): same as L2\n" % (Power, Factor_Add, Factor_Mult))
394 logger.info(
"Using box-style regularization with exponent %.1f, strength %.1e (+), %.1e (x)\n" % (Power, Factor_Add, Factor_Mult))
396 logger.error(
"In box-style regularization, penalty_power must be >= 2.0 (currently %.1f)\n" % (Power))
399 logger.info(
"Using L1 Fusion Penalty (only relevant for basis set optimizations at the moment) with strength %.1e\n" % Factor_Add)
401 logger.info(
"Using L0-L1 Fusion Penalty (only relevant for basis set optimizations at the moment) with strength %.1e and switching distance %.1e\n" % (Factor_Add, Alpha))
403 logger.info(
"Using L1 Fusion Penalty with Log Barrier (only relevant for basis set optimizations at the moment) with strength %.1e and barrier distance %.1e\n" % (Factor_Add, Alpha))
404 if self.
ptyp not in (2, 3)
and Power != 2.0:
405 logger.error(
"Custom power %.2f is only supported with L2 or box-style regularization (penalty_type L2 or box)\n" % Power)
409 if self.
ptyp in [4,5,6]:
413 def compute(self, mvals, Objective):
416 XAdd = K0 * self.
fadd 418 HAdd = K2 * self.
fadd 423 HAdd = np.zeros((NP, NP))
428 XAdd += ( X*K0 ) * self.
fmul 429 GAdd += np.array( G*K0 + X*K1 ) * self.
fmul 430 GK1 = np.reshape(G, (1, -1))*np.reshape(K1, (-1, 1))
431 K1G = np.reshape(K1, (1, -1))*np.reshape(G, (-1, 1))
432 HAdd += np.array( H*K0+GK1+K1G+X*K2 ) * self.
fmul 433 return XAdd, GAdd, HAdd
437 Harmonic L2-norm constraints. These are the ones that I use 438 the most often to regularize my optimization. 440 @param[in] mvals The parameter vector 441 @return DC0 The norm squared of the vector 442 @return DC1 The gradient of DC0 443 @return DC2 The Hessian (just a constant) 447 mvals = np.array(mvals)
448 DC0 = np.dot(mvals, mvals)
449 DC1 = 2*np.array(mvals)
450 DC2 = 2*np.eye(len(mvals))
452 mvals = np.array(mvals)
453 m2 = np.dot(mvals, mvals)
456 DC1 = p*(m2**(p/2-1))*mvals
457 DC2 = p*(m2**(p/2-1))*np.eye(len(mvals))
458 DC2 += p*(p-2)*(m2**(p/2-2))*np.outer(mvals, mvals)
461 def BOX(self, mvals):
463 Box-style constraints. A penalty term of mvals[i]^Power is added for each parameter. 465 If Power = 2.0 (default value of penalty_power) then this is the same as L2 regularization. 466 If set to a larger number such as 12.0, then this corresponds to adding a flat-bottomed 467 restraint to each parameter separately. 469 @param[in] mvals The parameter vector 470 @return DC0 The norm squared of the vector 471 @return DC1 The gradient of DC0 472 @return DC2 The Hessian (just a constant) 478 mvals = np.array(mvals)
480 DC0 = np.sum(mvals**self.
p)
481 DC1 = self.
p*(mvals**(self.
p-1))
482 DC2 = np.diag(self.
p*(self.
p-1)*(mvals**(self.
p-2)))
485 def HYP(self, mvals):
487 Hyperbolic constraints. Depending on the 'b' parameter, the smaller it is, 488 the closer we are to an L1-norm constraint. If we use these, we expect 489 a properly-behaving optimizer to make several of the parameters very nearly zero 490 (which would be cool). 492 @param[in] mvals The parameter vector 493 @return DC0 The hyperbolic penalty 494 @return DC1 The gradient 495 @return DC2 The Hessian 498 mvals = np.array(mvals)
500 sqt = (mvals**2 + self.
b**2)**0.5
501 DC0 = np.sum(sqt - self.
b)
502 DC1 = mvals*(1.0/sqt)
503 DC2 = np.diag(self.
b**2*(1.0/sqt**3))
507 def FUSE(self, mvals):
508 Groups = defaultdict(list)
509 for p, pid
in enumerate(self.
FF.plist):
510 if 'Exponent' not in pid
or len(pid.split()) != 1:
511 warn_press_key(
"Fusion penalty currently implemented only for basis set optimizations, where parameters are like this: Exponent:Elem=H,AMom=D,Bas=0,Con=0")
512 Data = dict([(i.split(
'=')[0],i.split(
'=')[1])
for i
in pid.split(
':')[1].split(
',')])
513 if 'Con' not in Data
or Data[
'Con'] !=
'0':
514 warn_press_key(
"More than one contraction coefficient found! You should expect the unexpected")
515 key = Data[
'Elem']+
'_'+Data[
'AMom']
516 Groups[key].append(p)
517 pvals = self.
FF.create_pvals(mvals)
519 DC1 = np.zeros(self.
FF.np)
520 DC2 = np.zeros(self.
FF.np)
521 for gnm, pidx
in Groups.items():
523 pvals_grp = pvals[pidx]
525 Order = np.argsort(pvals_grp)
528 for p
in range(len(Order) - 1):
531 pj = pidx[Order[p+1]]
534 dp = np.log(pvals[pj]) - np.log(pvals[pi])
536 DC0 += (dp**2 + self.
b**2)**0.5 - self.
b 537 DC1[pi] -= dp*(dp**2 + self.
b**2)**-0.5
538 DC1[pj] += dp*(dp**2 + self.
b**2)**-0.5
546 return DC0, DC1, np.diag(DC2)
549 Groups = defaultdict(list)
550 for p, pid
in enumerate(self.
FF.plist):
551 if 'Exponent' not in pid
or len(pid.split()) != 1:
552 warn_press_key(
"Fusion penalty currently implemented only for basis set optimizations, where parameters are like this: Exponent:Elem=H,AMom=D,Bas=0,Con=0")
553 Data = dict([(i.split(
'=')[0],i.split(
'=')[1])
for i
in pid.split(
':')[1].split(
',')])
554 if 'Con' not in Data
or Data[
'Con'] !=
'0':
555 warn_press_key(
"More than one contraction coefficient found! You should expect the unexpected")
556 key = Data[
'Elem']+
'_'+Data[
'AMom']
557 Groups[key].append(p)
558 pvals = self.
FF.create_pvals(mvals)
560 DC1 = np.zeros(self.
FF.np)
561 DC2 = np.zeros(self.
FF.np)
562 for gnm, pidx
in Groups.items():
564 pvals_grp = pvals[pidx]
566 Order = np.argsort(pvals_grp)
569 for p
in range(len(Order) - 1):
572 pj = pidx[Order[p+1]]
575 dp = np.log(pvals[pj]) - np.log(pvals[pi])
577 DC0 += (dp**2 + self.
b**2)**0.5 - self.
b - self.
a*np.log(dp) + self.
a*np.log(self.
a)
578 DC1[pi] -= dp*(dp**2 + self.
b**2)**-0.5 - self.
a/dp
579 DC1[pj] += dp*(dp**2 + self.
b**2)**-0.5 - self.
a/dp
587 return DC0, DC1, np.diag(DC2)
591 Groups = defaultdict(list)
592 for p, pid
in enumerate(self.
FF.plist):
593 if 'Exponent' not in pid
or len(pid.split()) != 1:
594 warn_press_key(
"Fusion penalty currently implemented only for basis set optimizations, where parameters are like this: Exponent:Elem=H,AMom=D,Bas=0,Con=0")
595 Data = dict([(i.split(
'=')[0],i.split(
'=')[1])
for i
in pid.split(
':')[1].split(
',')])
596 if 'Con' not in Data
or Data[
'Con'] !=
'0':
597 warn_press_key(
"More than one contraction coefficient found! You should expect the unexpected")
598 key = Data[
'Elem']+
'_'+Data[
'AMom']
599 Groups[key].append(p)
600 pvals = self.
FF.create_pvals(mvals)
603 DC1 = np.zeros(self.
FF.np)
604 DC2 = np.zeros((self.
FF.np,self.
FF.np))
605 for gnm, pidx
in Groups.items():
607 pvals_grp = pvals[pidx]
609 Order = np.argsort(pvals_grp)
614 for p
in range(len(Order) - 1):
617 pj = pidx[Order[p+1]]
620 dp = np.log(pvals[pj]) - np.log(pvals[pi])
622 dp2b2 = dp**2 + self.
b**2
623 h = self.
a*((dp2b2)**0.5 - self.
b)
624 hp = self.
a*(dp*(dp2b2)**-0.5)
625 hpp = self.
a*(self.
b**2*(dp2b2)**-1.5)
628 Contribs.append((1.0 - emh))
WTot
Obtain the denominator.
def HYP(self, mvals)
Hyperbolic constraints.
def compute(self, mvals, Objective)
Nifty functions, intended to be imported by any module within ForceBalance.
def smirnoff_analyze_parameter_coverage(forcefield, tgt_opts)
def Full(self, vals, Order=0, verbose=False, customdir=None)
def BOX(self, mvals)
Box-style constraints.
def createWorkQueue(wq_port, debug=True, name=package)
Penalty functions for regularizing the force field optimizer.
Match an empirical potential to the counterpoise correction for basis set superposition error (BSSE)...
Penalty
Initialize the penalty function.
def in_fd()
Invoking this function from anywhere will tell us whether we're being called by a finite-difference f...
def L2_norm(self, mvals)
Harmonic L2-norm constraints.
def FUSE_BARRIER(self, mvals)
def wq_wait(wq, wait_time=10, wait_intvl=10, print_time=60, verbose=False)
This function waits until the work queue is completely empty.
FF
The force field (it seems to be everywhere)
def __init__(self, options, tgt_opts, forcefield)
def Indicate(self)
Print objective function contributions.
def Target_Terms(self, mvals, Order=0, verbose=False, customdir=None)
def warn_press_key(warning, timeout=10)
def printcool_dictionary(Dict, title="Dictionary Keys : Values", bold=False, color=2, keywidth=25, topwidth=50, center=True, leftpad=0)
See documentation for printcool; this is a nice way to print out keys/values in a dictionary...
AMBER force field input/output.
spacings
Find exponential spacings.
Internal implementation of energy matching (for TIP3P water only)
Targets
Work Queue Port (The specific target itself may or may not actually use this.)
def __init__(self, User_Option, ForceField, Factor_Add=0.0, Factor_Mult=0.0, Factor_B=0.1, Alpha=1.0, Power=2.0)
PSI4 force field input/output.