1
## Automatically adapted for scipy Oct 07, 2005 by convertcode.py
4
# @(#) $Jeannot: tnc.py,v 1.7 2004/04/02 20:40:21 js Exp $
6
# Copyright (c) 2004, Jean-Sebastien Roy (js@jeannot.org)
8
# Permission is hereby granted, free of charge, to any person obtaining a
9
# copy of this software and associated documentation files (the
10
# "Software"), to deal in the Software without restriction, including
11
# without limitation the rights to use, copy, modify, merge, publish,
12
# distribute, sublicense, and/or sell copies of the Software, and to
13
# permit persons to whom the Software is furnished to do so, subject to
14
# the following conditions:
16
# The above copyright notice and this permission notice shall be included
17
# in all copies or substantial portions of the Software.
19
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
23
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
TNC: A python interface to the TNC non-linear optimizer
30
TNC is a non-linear optimizer. To use it, you must provide a function to
31
minimize. The function must take one argument: the list of coordinates where to
32
evaluate the function; and it must return either a tuple, whose first element is the
33
value of the function, and whose second argument is the gradient of the function
34
(as a list of values); or None, to abort the minimization.
38
from numpy import asarray
40
MSG_NONE = 0 # No messages
41
MSG_ITER = 1 # One line per iteration
42
MSG_INFO = 2 # Informational messages
43
MSG_VERS = 4 # Version info
44
MSG_EXIT = 8 # Exit reasons
45
MSG_ALL = MSG_ITER + MSG_INFO + MSG_VERS + MSG_EXIT
48
MSG_NONE : "No messages",
49
MSG_ITER : "One line per iteration",
50
MSG_INFO : "Informational messages",
51
MSG_VERS : "Version info",
52
MSG_EXIT : "Exit reasons",
53
MSG_ALL : "All messages"
56
HUGE_VAL=1e500 # No standard representation of Infinity in Python 2.3.3
57
# FIXME: can we use inf now that we have numpy and IEEE floats?
59
EINVAL = -2 # Invalid parameters (n<1)
60
INFEASIBLE = -1 # Infeasible (low > up)
61
LOCALMINIMUM = 0 # Local minima reach (|pg| ~= 0)
62
CONVERGED = 1 # Converged (|f_n-f_(n-1)| ~= 0)
63
MAXFUN = 2 # Max. number of function evaluations reach
64
LSFAIL = 3 # Linear search failed
65
CONSTANT = 4 # All lower bounds are equal to the upper bounds
66
NOPROGRESS = 5 # Unable to progress
67
USERABORT = 6 # User requested end of minimization
70
EINVAL : "Invalid parameters (n<1)",
71
INFEASIBLE : "Infeasible (low > up)",
72
LOCALMINIMUM : "Local minima reach (|pg| ~= 0)",
73
CONVERGED : "Converged (|f_n-f_(n-1)| ~= 0)",
74
MAXFUN : "Max. number of function evaluations reach",
75
LSFAIL : "Linear search failed",
76
CONSTANT : "All lower bounds are equal to the upper bounds",
77
NOPROGRESS : "Unable to progress",
78
USERABORT : "User requested end of minimization"
82
# Changes to interface made by Travis Oliphant, Apr. 2004 for inclusion in
86
approx_fprime = optimize.approx_fprime
89
def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0, bounds=None, epsilon=1e-8,
90
scale=None, messages=MSG_ALL, maxCGit=-1, maxfun=None, eta=-1,
91
stepmx=0, accuracy=0, fmin=0, ftol=0, rescale=-1):
92
"""Minimize a function with variables subject to bounds, using gradient
95
returns (rc, nfeval, x).
99
func -- function to minimize. Called as func(x, *args)
101
x0 -- initial guess to minimum
103
fprime -- gradient of func. If None, then func returns the function
104
value and the gradient ( f, g = func(x, *args) ).
105
Called as fprime(x, *args)
107
args -- arguments to pass to function
109
approx_grad -- if true, approximate the gradient numerically
111
bounds -- a list of (min, max) pairs for each element in x, defining
112
the bounds on that parameter. Use None for one of min or max
113
when there is no bound in that direction
115
scale : scaling factors to apply to each variable (a list of floats)
116
if None, the factors are up-low for interval bounded variables
117
and 1+|x] fo the others.
119
messages : bit mask used to select messages display during minimization
120
values defined in the optimize.tnc.MSGS dict.
121
defaults to optimize.tnc.MGS_ALL
122
maxCGit : max. number of hessian*vector evaluation per main iteration
123
if maxCGit == 0, the direction chosen is -gradient
124
if maxCGit < 0, maxCGit is set to max(1,min(50,n/2))
126
maxfun : max. number of function evaluation
127
if None, maxnfeval is set to max(1000, 100*len(x0))
129
eta : severity of the line search. if < 0 or > 1, set to 0.25
131
stepmx : maximum step for the line search. may be increased during call
132
if too small, will be set to 10.0
134
accuracy : relative precision for finite difference calculations
135
if <= machine_precision, set to sqrt(machine_precision)
137
fmin : minimum function value estimate
139
ftol : precision goal for the value of f in the stoping criterion
140
relative to the machine precision and the value of f.
141
if ftol < 0.0, ftol is set to 0.0
143
rescale : Scaling factor (in log10) used to trigger rescaling
144
if 0, rescale at each iteration
145
if a large value, never rescale
146
if < 0, rescale is set to 1.3
150
x : the solution (a list of floats)
151
nfeval : the number of function evaluations
152
rc : return code (corresponding message in optimize.tnc.RCSTRINGS)
156
fmin, fmin_powell, fmin_cg,
157
fmin_bfgs, fmin_ncg -- multivariate local optimizers
158
leastsq -- nonlinear least squares minimizer
160
fmin_l_bfgs_b, fmin_tnc,
161
fmin_cobyla -- constrained multivariate optimizers
163
anneal, brute -- global optimizers
165
fminbound, brent, golden, bracket -- local scalar minimizers
167
fsolve -- n-dimenstional root-finding
169
brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding
171
fixed_point -- scalar fixed-point finder
178
bounds = [(None,None)] * n
180
raise ValueError('length of x0 != length of bounds')
183
def func_and_grad(x):
186
g = approx_fprime(x, func, epsilon, *args)
189
def func_and_grad(x):
191
f, g = func(x, *args)
194
def func_and_grad(x):
217
maxfun = max(1000, 100*len(x0))
219
return moduleTNC.minimize(func_and_grad, x0, low, up, scale, messages,
220
maxCGit, maxfun, eta, stepmx, accuracy,
223
if __name__ == '__main__':
228
# A function to minimize
230
f = pow(x[0],2.0)+pow(abs(x[1]),3.0)
233
g[1] = 3.0*pow(abs(x[1]),2.0)
239
rc, nf, x = fmin_tnc(function, [-7, 3], bounds=([-10, 10], [1, 10]))
241
print "After", nf, "function evaluations, TNC returned:", RCSTRINGS[rc]
243
print "exact value = [0, 1]"
249
# These tests are taken from Prof. K. Schittkowski test examples for
250
# constrained nonlinear programming.
251
# http://www.uni-bayreuth.de/departments/math/~kschittkowski/home.htm
254
f = 100.0*pow((x[1]-pow(x[0],2)),2)+pow(1.0-x[0],2)
256
dif[1] = 200.0*(x[1]-pow(x[0],2))
257
dif[0] = -2.0*(x[0]*(dif[1]-1.0)+1.0)
259
tests.append ((test1fg, [-2,1], ([-HUGE_VAL,None],[-1.5,None]), [1,1]))
262
f = 100.0*pow((x[1]-pow(x[0],2)),2)+pow(1.0-x[0],2)
264
dif[1] = 200.0*(x[1]-pow(x[0],2))
265
dif[0] = -2.0*(x[0]*(dif[1]-1.0)+1.0)
267
tests.append ((test2fg, [-2,1], [(-HUGE_VAL,None),(1.5,None)], [-1.2210262419616387,1.5]))
270
f = x[1]+pow(x[1]-x[0],2)*1.0e-5
272
dif[0] = -2.0*(x[1]-x[0])*1.0e-5
275
tests.append ((test3fg, [10,1], [(-HUGE_VAL,None),(0.0, None)], [0,0]))
278
f = pow(x[0]+1.0,3)/3.0+x[1]
280
dif[0] = pow(x[0]+1.0,2)
283
tests.append ((test4fg, [1.125,0.125], [(1, None),(0, None)], [1,0]))
288
f = sin(x[0]+x[1])+pow(x[0]-x[1],2)-1.5*x[0]+2.5*x[1]+1.0
291
v2 = 2.0*(x[0]-x[1]);
296
tests.append ((test5fg, [0,0], [(-1.5, 4),(-3,3)], [-0.54719755119659763, -1.5471975511965976]))
299
f = (100.0*pow(x[1]-pow(x[0],2),2)+pow(1.0-x[0],2)+90.0*pow(x[3]-pow(x[2],2),2) \
300
+pow(1.0-x[2],2)+10.1*(pow(x[1]-1.0,2)+pow(x[3]-1.0,2)) \
301
+19.8*(x[1]-1.0)*(x[3]-1.0))*1.0e-5
303
dif[0] = (-400.0*x[0]*(x[1]-pow(x[0],2))-2.0*(1.0-x[0]))*1.0e-5
304
dif[1] = (200.0*(x[1]-pow(x[0],2))+20.2*(x[1]-1.0)+19.8*(x[3]-1.0))*1.0e-5
305
dif[2] = (-360.0*x[2]*(x[3]-pow(x[2],2))-2.0*(1.0-x[2]))*1.0e-5
306
dif[3] = (180.0*(x[3]-pow(x[2],2))+20.2*(x[3]-1.0)+19.8*(x[1]-1.0))*1.0e-5
308
tests.append ((test38fg, [-3,-1,-3,-1], [(-10,10)]*4, [1]*4))
311
f = 2.0-x[0]*x[1]*x[2]*x[3]*x[4]/120.0
313
dif[0] = -x[1]*x[2]*x[3]*x[4]/120.0
314
dif[1] = -x[0]*x[2]*x[3]*x[4]/120.0
315
dif[2] = -x[0]*x[1]*x[3]*x[4]/120.0
316
dif[3] = -x[0]*x[1]*x[2]*x[4]/120.0
317
dif[4] = -x[0]*x[1]*x[2]*x[3]/120.0
319
tests.append ((test45fg, [2]*5, [(0,1),(0,2),(0,3),(0,4),(0,5)], [1,2,3,4,5]))
321
def test(fg, x, bounds, xopt):
322
print "** Test", fg.__name__
323
rc, nf, x = fmin_tnc(fg, x, bounds=bounds, messages = MSG_NONE, maxnfeval = 200)
324
print "After", nf, "function evaluations, TNC returned:", RCSTRINGS[rc]
326
print "exact value =", xopt
329
for y,yo in zip(x, xopt):
330
enorm += (y-yo)*(y-yo)
332
e = pow(enorm/norm, 0.5)
335
raise "Test "+fg.__name__+" failed"
337
for fg, x, bounds, xopt in tests:
338
test(fg, x, bounds, xopt)
341
print "** All TNC tests passed."