1
"""Differantial Evolution Optimization
5
Copyright 2005 by Robert Kern.
9
from scipy import stats
12
# Notes: for future modifications:
13
# Ali, M. M., and A. Toern. Topographical differential evoltion using
14
# pre-calculated differentials. _Stochastic and Global Optimization_. 1--17.
17
# F = max(l_min, 1-min(abs(f_min/f_max), abs(f_max/f_min)))
18
# ~ 0.3 <= l_min <= 0.4
19
# ~ f_min and f_max are the minimum and maximum values in the initial
22
# Pre-calculated differentials:
23
# Keep a set of differentials A.
24
# For each x_i of the population S:
25
# Every M steps, randomly select 3 points x_r1, x_r2, x_r3 from S (not x_i).
26
# Compute new x_i using x_r1 + F*(x_r2-x_r3).
27
# Store differential in A.
29
# Randomly select x_r1 from S and a differential vector from A.
32
# Convergence criterion:
36
# Two populations S and Sa (auxiliary).
37
# Phase counter t = 0 and array shift[:] = False.
38
# Stopping condition: e.g. t >= 4.
39
# g << N, number of nearest neighbors to search for graph minima.
40
# Ng << N, number of points for graph.
41
# For each x_i in S, do DEPD as described above to get y_i.
43
# if shift[i] is False:
49
# Find graph minima of f(x) using the Ng best points in S.
50
# Do local search from each minimum.
51
# Replace worst Ng points in S with best Ng points in Sa.
52
# If best from this phase is better than previous best, t=0.
57
class DiffEvolver(object):
58
"""Minimize a function using differential evolution.
62
DiffEvolver(func, pop0, args=(), crossover_rate=0.5, scale=None,
63
strategy=('rand', 2, 'bin'), eps=1e-6)
64
func -- function to minimize
65
pop0 -- sequence of initial vectors
66
args -- additional arguments to apply to func
67
crossover_rate -- crossover probability [0..1] usually 0.5 or so
68
scale -- scaling factor to apply to differences [0..1] usually > 0.5
69
if None, then calculated from pop0 using a heuristic
70
strategy -- tuple specifying the differencing/crossover strategy
71
The first element is one of 'rand', 'best', 'rand-to-best' to specify
72
how to obtain an initial trial vector.
73
The second element is either 1 or 2 (or only 1 for 'rand-to-best') to
74
specify the number of difference vectors to add to the initial trial.
75
The third element is (currently) 'bin' to specify binomial crossover.
76
eps -- if the maximum and minimum function values of a given generation are
77
with eps of each other, convergence has been achieved.
79
DiffEvolver.frombounds(func, lbound, ubound, npop, crossover_rate=0.5,
80
scale=None, strategy=('rand', 2, 'bin'), eps=1e-6)
81
Randomly initialize the population within given rectangular bounds.
82
lbound -- lower bound vector
83
ubound -- upper bound vector
84
npop -- size of population
89
Run the minimizer for newgens more generations. Return the best parameter
90
vector from the whole run.
94
best_value -- lowest function value in the history
95
best_vector -- minimizing vector
96
best_val_history -- list of best_value's for each generation
97
best_vec_history -- list of best_vector's for each generation
98
population -- current population
99
pop_values -- respective function values for each of the current population
100
generations -- number of generations already computed
101
func, args, crossover_rate, scale, strategy, eps -- from constructor
103
def __init__(self, func, pop0, args=(), crossover_rate=0.5, scale=None,
104
strategy=('rand', 2, 'bin'), eps=1e-6):
106
self.population = sp.array(pop0)
107
self.npop, self.ndim = self.population.shape
109
self.crossover_rate = crossover_rate
110
self.strategy = strategy
113
self.pop_values = [self.func(m, *args) for m in self.population]
114
bestidx = sp.argmin(self.pop_values)
115
self.best_vector = self.population[bestidx]
116
self.best_value = self.pop_values[bestidx]
119
self.scale = self.calculate_scale()
124
self.best_val_history = []
125
self.best_vec_history = []
128
('rand', 1, 'bin'): (self.choose_rand, self.diff1, self.bin_crossover),
129
('rand', 2, 'bin'): (self.choose_rand, self.diff2, self.bin_crossover),
130
('best', 1, 'bin'): (self.choose_best, self.diff1, self.bin_crossover),
131
('best', 2, 'bin'): (self.choose_best, self.diff2, self.bin_crossover),
132
('rand-to-best', 1, 'bin'):
133
(self.choose_rand_to_best, self.diff1, self.bin_crossover),
137
self.best_val_history = []
138
self.best_vec_history = []
140
self.pop_values = [self.func(m, *self.args) for m in self.population]
142
def frombounds(cls, func, lbound, ubound, npop, crossover_rate=0.5,
143
scale=None, strategy=('rand', 2, 'bin'), eps=1e-6):
144
lbound = sp.asarray(lbound)
145
ubound = sp.asarray(ubound)
146
pop0 = stats.rand(npop, len(lbound))*(ubound-lbound) + lbound
147
return cls(func, pop0, crossover_rate=crossover_rate, scale=scale,
148
strategy=strategy, eps=eps)
149
frombounds = classmethod(frombounds)
151
def calculate_scale(self):
152
rat = abs(max(self.pop_values)/self.best_value)
153
rat = min(rat, 1./rat)
154
return max(0.3, 1.-rat)
156
def bin_crossover(self, oldgene, newgene):
157
mask = stats.rand(self.ndim) < self.crossover_rate
158
return sp.where(mask, newgene, oldgene)
160
def select_samples(self, candidate, nsamples):
161
possibilities = range(self.npop)
162
possibilities.remove(candidate)
163
return stats.permutation(possibilities)[:nsamples]
165
def diff1(self, candidate):
166
i1, i2 = self.select_samples(candidate, 2)
167
return self.scale * (self.population[i1] - self.population[i2])
169
def diff2(self, candidate):
170
i1, i2, i3, i4 = self.select_samples(candidate, 4)
171
return self.scale * (self.population[i1] - self.population[i2] +
172
self.population[i3] - self.population[i4])
174
def choose_best(self, candidate):
175
return self.best_vector
177
def choose_rand(self, candidate):
178
i = self.select_samples(candidate, 1)
179
return self.population[i]
181
def choose_rand_to_best(self, candidate):
182
return ((1-self.scale) * self.population[candidate] +
183
self.scale * self.best_vector)
185
def get_trial(self, candidate):
186
chooser, differ, crosser = self.jump_table[self.strategy]
187
trial = crosser(self.population[candidate],
188
chooser(candidate) + differ(candidate))
192
return max(self.pop_values) - min(self.pop_values) <= self.eps
194
def solve(self, newgens=100):
195
"""Run for newgens more generations.
197
Return best parameter vector from the entire run.
199
for gen in xrange(self.generations+1, self.generations+newgens+1):
200
for candidate in range(self.npop):
201
trial = self.get_trial(candidate)
202
trial_value = self.func(trial, *self.args)
203
if trial_value < self.pop_values[candidate]:
204
self.population[candidate] = trial
205
self.pop_values[candidate] = trial_value
206
if trial_value < self.best_value:
207
self.best_vector = trial
208
self.best_value = trial_value
209
self.best_val_history.append(self.best_value)
210
self.best_vec_history.append(self.best_vector)
213
self.generations = gen
214
return self.best_vector