PaGMO  1.1.5
de_1220.cpp
1 /*****************************************************************************
2  * Copyright (C) 2004-2015 The PaGMO development team, *
3  * Advanced Concepts Team (ACT), European Space Agency (ESA) *
4  * *
5  * https://github.com/esa/pagmo *
6  * *
7  * act@esa.int *
8  * *
9  * This program is free software; you can redistribute it and/or modify *
10  * it under the terms of the GNU General Public License as published by *
11  * the Free Software Foundation; either version 2 of the License, or *
12  * (at your option) any later version. *
13  * *
14  * This program is distributed in the hope that it will be useful, *
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17  * GNU General Public License for more details. *
18  * *
19  * You should have received a copy of the GNU General Public License *
20  * along with this program; if not, write to the *
21  * Free Software Foundation, Inc., *
22  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
23  *****************************************************************************/
24 
25 #include <boost/random/uniform_int.hpp>
26 #include <boost/random/uniform_real.hpp>
27 #include <boost/random/normal_distribution.hpp>
28 #include <boost/random/variate_generator.hpp>
29 #include <string>
30 #include <vector>
31 
32 #include "../exceptions.h"
33 #include "../population.h"
34 #include "../types.h"
35 #include "base.h"
36 #include "de_1220.h"
37 
38 
39 
40 namespace pagmo { namespace algorithm {
41 
43 
55 de_1220::de_1220(int gen, int variant_adptv, const std::vector<int> & allowed_variants, bool memory, double ftol, double xtol):base(), m_gen(gen),
56  m_variant_adptv(variant_adptv), m_allowed_variants(allowed_variants), m_memory(memory), m_ftol(ftol), m_xtol(xtol), m_f(0), m_cr(0), m_variants(0) {
57  if (gen < 0) {
58  pagmo_throw(value_error,"number of generations must be nonnegative");
59  }
60  if (variant_adptv < 1 || variant_adptv > 2) {
61  pagmo_throw(value_error,"the index describing the adaptive variant must be one of 1 ... 2");
62  }
63  for (unsigned int i = 0; i < allowed_variants.size(); ++i) {
64  if ( (allowed_variants[i] <1) || (allowed_variants[i]>18) ) {
65  pagmo_throw(value_error,"allowed variants must all be n [1,18]");
66  }
67  }
68 }
69 
72 {
73  return base_ptr(new de_1220(*this));
74 }
75 
77 
84 void de_1220::evolve(population &pop) const
85 {
86  // Let's store some useful variables.
87  const problem::base &prob = pop.problem();
88  const problem::base::size_type D = prob.get_dimension(), prob_i_dimension = prob.get_i_dimension(), prob_c_dimension = prob.get_c_dimension(), prob_f_dimension = prob.get_f_dimension();
89  const decision_vector &lb = prob.get_lb(), &ub = prob.get_ub();
90  const population::size_type NP = pop.size();
91  const problem::base::size_type Dc = D - prob_i_dimension;
92 
93 
94  //We perform some checks to determine wether the problem/population are suitable for DE
95  if ( Dc == 0 ) {
96  pagmo_throw(value_error,"There is no continuous part in the problem decision vector for DE to optimise");
97  }
98 
99  if ( prob_c_dimension != 0 ) {
100  pagmo_throw(value_error,"The problem is not box constrained and DE is not suitable to solve it");
101  }
102 
103  if ( prob_f_dimension != 1 ) {
104  pagmo_throw(value_error,"The problem is not single objective and DE is not suitable to solve it");
105  }
106 
107  if (NP < 8) {
108  pagmo_throw(value_error,"for DE Self-Adaptive at least 8 individuals in the population are needed");
109  }
110 
111  // Get out if there is nothing to do.
112  if (m_gen == 0) {
113  return;
114  }
115  // Some vectors used during evolution are allocated here.
116  decision_vector dummy(D), tmp(D); //dummy is used for initialisation purposes, tmp to contain the mutated candidate
117  std::vector<decision_vector> popold(NP,dummy), popnew(NP,dummy);
118  decision_vector gbX(D),gbIter(D);
119  fitness_vector newfitness(1); //new fitness of the mutaded candidate
120  fitness_vector gbfit(1); //global best fitness
121  std::vector<fitness_vector> fit(NP,gbfit);
122 
123  //We extract from pop the chromosomes and fitness associated
124  for (std::vector<double>::size_type i = 0; i < NP; ++i) {
125  popold[i] = pop.get_individual(i).cur_x;
126  fit[i] = pop.get_individual(i).cur_f;
127  }
128  popnew = popold;
129 
130  // Initialise the global bests
131  gbX=pop.champion().x;
132  gbfit=pop.champion().f;
133  // container for the best decision vector of generation
134  gbIter = gbX;
135 
136  // Initializing the random number generators
137  boost::normal_distribution<double> normal(0.0,1.0);
138  boost::variate_generator<boost::lagged_fibonacci607 &, boost::normal_distribution<double> > n_dist(m_drng,normal);
139  boost::uniform_real<double> uniform(0.0,1.0);
140  boost::variate_generator<boost::lagged_fibonacci607 &, boost::uniform_real<double> > r_dist(m_drng,uniform);
141  boost::uniform_int<int> r_p_idx(0,NP-1);
142  boost::variate_generator<boost::mt19937 &, boost::uniform_int<int> > p_idx(m_urng,r_p_idx);
143  boost::uniform_int<int> r_c_idx(0,Dc-1);
144  boost::variate_generator<boost::mt19937 &, boost::uniform_int<int> > c_idx(m_urng,r_c_idx);
145  boost::uniform_int<int> r_v_idx(0,m_allowed_variants.size()-1);
146  boost::variate_generator<boost::mt19937 &, boost::uniform_int<int> > v_idx(m_urng,r_v_idx);
147 
148 
149  // Initialize the F, CR and variant vectors
150  if ( (m_cr.size() != NP) || (m_f.size() != NP) || (m_variants.size() != NP) || (!m_memory) ) {
151  m_cr.resize(NP); m_f.resize(NP); m_variants.resize(NP);
152  if (m_variant_adptv==1) {
153  for (size_t i = 0; i < NP; ++i) {
154  m_cr[i] = r_dist();
155  m_f[i] = r_dist() * 0.9 + 0.1;
156  }
157  }
158  else if (m_variant_adptv==2) {
159  for (size_t i = 0; i < NP; ++i) {
160  m_cr[i] = n_dist() * 0.15 + 0.5;
161  m_f[i] = n_dist() * 0.15 + 0.5;
162  }
163  }
164  for (size_t i = 0; i < NP; ++i) {
165  int idx = v_idx();
166  m_variants[i] = m_allowed_variants[idx];
167  }
168  }
169  // We initialize the global best for F and CR as the first individual (this will soon be forgotten)
170  double gbIterF = m_f[0];
171  double gbIterCR = m_cr[0];
172 
173  // Main DE iterations
174  size_t r1,r2,r3,r4,r5,r6,r7; //indexes to the selected population members
175  for (int gen = 0; gen < m_gen; ++gen) {
176  //Start of the loop through the deme
177  for (size_t i = 0; i < NP; ++i) {
178  do { /* Pick a random population member */
179  /* Endless loop for NP < 2 !!! */
180  r1 = p_idx();
181  } while (r1==i);
182 
183  do { /* Pick a random population member */
184  /* Endless loop for NP < 3 !!! */
185  r2 = p_idx();
186  } while ((r2==i) || (r2==r1));
187 
188  do { /* Pick a random population member */
189  /* Endless loop for NP < 4 !!! */
190  r3 = p_idx();
191  } while ((r3==i) || (r3==r1) || (r3==r2));
192 
193  do { /* Pick a random population member */
194  /* Endless loop for NP < 5 !!! */
195  r4 = p_idx();
196  } while ((r4==i) || (r4==r1) || (r4==r2) || (r4==r3));
197 
198  do { /* Pick a random population member */
199  /* Endless loop for NP < 6 !!! */
200  r5 = p_idx();
201  } while ((r5==i) || (r5==r1) || (r5==r2) || (r5==r3) || (r5==r4));
202  do { /* Pick a random population member */
203  /* Endless loop for NP < 7 !!! */
204  r6 = p_idx();
205  } while ((r6==i) || (r6==r1) || (r6==r2) || (r6==r3) || (r6==r4) || (r6==r5));
206  do { /* Pick a random population member */
207  /* Endless loop for NP < 8 !!! */
208  r7 = p_idx();
209  } while ((r7==i) || (r7==r1) || (r7==r2) || (r7==r3) || (r7==r4) || (r7==r5) || (r7==r6));
210 
211  // Adapt amplification factor and crossover probability
212  double F=0, CR=0;
213  int VARIANT=0;
214  if (m_variant_adptv==1) {
215  F = (r_dist() < 0.9) ? m_f[i] : r_dist() * 0.9 + 0.1;
216  CR = (r_dist() < 0.9) ? m_cr[i] : r_dist();
217  }
218  VARIANT = (r_dist() < 0.9) ? m_variants[i] : m_allowed_variants[v_idx()];
219 
220  /*-------DE/best/1/exp--------------------------------------------------------------------*/
221  /*-------Our oldest strategy but still not bad. However, we have found several------------*/
222  /*-------optimization problems where misconvergence occurs.-------------------------------*/
223  if (VARIANT == 1) { /* strategy DE0 (not in our paper) */
224  if (m_variant_adptv==2) {
225  F = gbIterF + n_dist() * 0.5 * (m_f[r2]-m_f[r3]);
226  CR = gbIterCR + n_dist() * 0.5 * (m_cr[r2]-m_cr[r3]);
227  }
228  tmp = popold[i];
229  size_t n = c_idx(), L = 0;
230  do {
231  tmp[n] = gbIter[n] + F*(popold[r2][n]-popold[r3][n]);
232  n = (n+1)%Dc;
233  ++L;
234  } while ((r_dist() < CR) && (L < Dc));
235  }
236 
237  /*-------DE/rand/1/exp-------------------------------------------------------------------*/
238  /*-------This is one of my favourite strategies. It works especially well when the-------*/
239  /*-------"gbIter[]"-schemes experience misconvergence. Try e.g. m_f=0.7 and m_cr=0.5---------*/
240  /*-------as a first guess.---------------------------------------------------------------*/
241  else if (VARIANT == 2) { /* strategy DE1 in the techreport */
242  if (m_variant_adptv==2) {
243  F = m_f[r1] + n_dist() * 0.5 * (m_f[r2]-m_f[r3]);
244  CR = m_cr[r1] + n_dist() * 0.5 * (m_cr[r2]-m_cr[r3]);
245  }
246  tmp = popold[i];
247  size_t n = c_idx(), L = 0;
248  do {
249  tmp[n] = popold[r1][n] + F*(popold[r2][n]-popold[r3][n]);
250  n = (n+1)%Dc;
251  ++L;
252  } while ((r_dist() < CR) && (L < Dc));
253  }
254 
255  /*-------DE/rand-to-best/1/exp-----------------------------------------------------------*/
256  /*-------This strategy seems to be one of the best strategies. Try m_f=0.85 and c=1.------*/
257  /*-------If you get misconvergence try to increase NP. If this doesn't help you----------*/
258  /*-------should play around with all three control variables.----------------------------*/
259  else if (VARIANT == 3) { /* similiar to DE2 but generally better */
260  if (m_variant_adptv==2) {
261  F = m_f[i] + n_dist() * 0.5 * (gbIterF-m_f[i]) + n_dist() * 0.5 * (m_f[r1] - m_f[r2]);
262  CR = m_cr[i] + n_dist() * 0.5 * (gbIterCR-m_cr[i]) + n_dist() * 0.5 * (m_cr[r1] - m_cr[r2]);
263  }
264  tmp = popold[i];
265  size_t n = c_idx(), L = 0;
266  do {
267  tmp[n] = tmp[n] + F*(gbIter[n] - tmp[n]) + F*(popold[r1][n]-popold[r2][n]);
268  n = (n+1)%Dc;
269  ++L;
270  } while ((r_dist() < CR) && (L < Dc));
271  }
272  /*-------DE/best/2/exp is another powerful strategy worth trying--------------------------*/
273  else if (VARIANT == 4) {
274  if (m_variant_adptv==2) {
275  F = gbIterF + n_dist() * 0.5 * (m_f[r1] - m_f[r3]) + n_dist() * 0.5 * (m_f[r2] - m_f[r4]);
276  CR = gbIterCR + n_dist() * 0.5 * (m_cr[r1] - m_cr[r3]) + n_dist() * 0.5 * (m_cr[r2] - m_cr[r4]);
277  }
278  tmp = popold[i];
279  size_t n = c_idx(), L = 0;
280  do {
281  tmp[n] = gbIter[n] +
282  (popold[r1][n]+popold[r2][n]-popold[r3][n]-popold[r4][n])*F;
283  n = (n+1)%Dc;
284  ++L;
285  } while ((r_dist() < CR) && (L < Dc));
286  }
287  /*-------DE/rand/2/exp seems to be a robust optimizer for many functions-------------------*/
288  else if (VARIANT == 5) {
289  if (m_variant_adptv==2) {
290  F = m_f[r5] + n_dist() * 0.5 * (m_f[r1] - m_f[r3]) + n_dist() * 0.5 * (m_f[r2] - m_f[r4]);
291  CR = m_cr[r5] + n_dist() * 0.5 * (m_cr[r1] - m_cr[r3]) + n_dist() * 0.5 * (m_cr[r2] - m_cr[r4]);
292  }
293  tmp = popold[i];
294  size_t n = c_idx(), L = 0;
295  do {
296  tmp[n] = popold[r5][n] +
297  (popold[r1][n]+popold[r2][n]-popold[r3][n]-popold[r4][n])*F;
298  n = (n+1)%Dc;
299  ++L;
300  } while ((r_dist() < CR) && (L < Dc));
301  }
302 
303  /*=======Essentially same strategies but BINOMIAL CROSSOVER===============================*/
304 
305  /*-------DE/best/1/bin--------------------------------------------------------------------*/
306  else if (VARIANT == 6) {
307  if (m_variant_adptv==2) {
308  F = gbIterF + n_dist() * 0.5 * (m_f[r2]-m_f[r3]);
309  CR = gbIterCR + n_dist() * 0.5 * (m_cr[r2]-m_cr[r3]);
310  }
311  tmp = popold[i];
312  size_t n = c_idx();
313  for (size_t L = 0; L < Dc; ++L) { /* perform Dc binomial trials */
314  if ((r_dist() < CR) || L + 1 == Dc) { /* change at least one parameter */
315  tmp[n] = gbIter[n] + F*(popold[r2][n]-popold[r3][n]);
316  }
317  n = (n+1)%Dc;
318  }
319  }
320  /*-------DE/rand/1/bin-------------------------------------------------------------------*/
321  else if (VARIANT == 7) {
322  if (m_variant_adptv==2) {
323  F = m_f[r1] + n_dist() * 0.5 * (m_f[r2]-m_f[r3]);
324  CR = m_cr[r1] + n_dist() * 0.5 * (m_cr[r2]-m_cr[r3]);
325  }
326  tmp = popold[i];
327  size_t n = c_idx();
328  for (size_t L = 0; L < Dc; ++L) { /* perform Dc binomial trials */
329  if ((r_dist() < CR) || L + 1 == Dc) { /* change at least one parameter */
330  tmp[n] = popold[r1][n] + F*(popold[r2][n]-popold[r3][n]);
331  }
332  n = (n+1)%Dc;
333  }
334  }
335  /*-------DE/rand-to-best/1/bin-----------------------------------------------------------*/
336  else if (VARIANT == 8) {
337  if (m_variant_adptv==2) {
338  F = m_f[i] + n_dist() * 0.5 * (gbIterF-m_f[i]) + n_dist() * 0.5 * (m_f[r1] - m_f[r2]);
339  CR = m_cr[i] + n_dist() * 0.5 * (gbIterCR-m_cr[i]) + n_dist() * 0.5 * (m_cr[r1] - m_cr[r2]);
340  }
341  tmp = popold[i];
342  size_t n = c_idx();
343  for (size_t L = 0; L < Dc; ++L) { /* perform Dc binomial trials */
344  if ((r_dist() < CR) || L + 1 == Dc) { /* change at least one parameter */
345  tmp[n] = tmp[n] + F*(gbIter[n] - tmp[n]) + F*(popold[r1][n]-popold[r2][n]);
346  }
347  n = (n+1)%Dc;
348  }
349  }
350  /*-------DE/best/2/bin--------------------------------------------------------------------*/
351  else if (VARIANT == 9) {
352  if (m_variant_adptv==2) {
353  F = gbIterF + n_dist() * 0.5 * (m_f[r1] - m_f[r3]) + n_dist() * 0.5 * (m_f[r2] - m_f[r4]);
354  CR = gbIterCR + n_dist() * 0.5 * (m_cr[r1] - m_cr[r3]) + n_dist() * 0.5 * (m_cr[r2] - m_cr[r4]);
355  }
356  tmp = popold[i];
357  size_t n = c_idx();
358  for (size_t L = 0; L < Dc; ++L) { /* perform Dc binomial trials */
359  if ((r_dist() < CR) || L + 1 == Dc) { /* change at least one parameter */
360  tmp[n] = gbIter[n] +
361  (popold[r1][n]+popold[r2][n]-popold[r3][n]-popold[r4][n])*F;
362  }
363  n = (n+1)%Dc;
364  }
365  }
366  /*-------DE/rand/2/bin--------------------------------------------------------------------*/
367  else if (VARIANT == 10) {
368  if (m_variant_adptv==2) {
369  F = m_f[r5] + n_dist() * 0.5 * (m_f[r1] - m_f[r3]) + n_dist() * 0.5 * (m_f[r2] - m_f[r4]);
370  CR = m_cr[r5] + n_dist() * 0.5 * (m_cr[r1] - m_cr[r3]) + n_dist() * 0.5 * (m_cr[r2] - m_cr[r4]);
371  }
372  tmp = popold[i];
373  size_t n = c_idx();
374  for (size_t L = 0; L < Dc; ++L) { /* perform Dc binomial trials */
375  if ((r_dist() < CR) || L + 1 == Dc) { /* change at least one parameter */
376  tmp[n] = popold[r5][n] +
377  (popold[r1][n]+popold[r2][n]-popold[r3][n]-popold[r4][n])*F;
378  }
379  n = (n+1)%Dc;
380  }
381  }
382 
383  /*-------DE/best/3/exp--------------------------------------------------------------------*/
384  if (VARIANT == 11) {
385  if (m_variant_adptv==2) {
386  F = gbIterF + n_dist() * 0.5 * (m_f[r1] - m_f[r2]) + n_dist() * 0.5 * (m_f[r3] - m_f[r4]) + n_dist() * 0.5 * (m_f[r5] - m_f[r6]);
387  CR = gbIterCR + n_dist() * 0.5 * (m_cr[r1] - m_cr[r2]) + n_dist() * 0.5 * (m_cr[r3] - m_cr[r4]) + n_dist() * 0.5 * (m_cr[r5] - m_cr[r6]);
388  }
389  tmp = popold[i];
390  size_t n = c_idx(), L = 0;
391  do {
392  tmp[n] = gbIter[n] + F*(popold[r1][n]-popold[r2][n]) + F*(popold[r3][n]-popold[r4][n]) + F*(popold[r5][n]-popold[r6][n]);
393  n = (n+1)%Dc;
394  ++L;
395  } while ((r_dist() < CR) && (L < Dc));
396  }
397  /*-------DE/best/3/bin--------------------------------------------------------------------*/
398  else if (VARIANT == 12) {
399  if (m_variant_adptv==2) {
400  F = gbIterF + n_dist() * 0.5 * (m_f[r1] - m_f[r2]) + n_dist() * 0.5 * (m_f[r3] - m_f[r4]) + n_dist() * 0.5 * (m_f[r5] - m_f[r6]);
401  CR = gbIterCR + n_dist() * 0.5 * (m_cr[r1] - m_cr[r2]) + n_dist() * 0.5 * (m_cr[r3] - m_cr[r4]) + n_dist() * 0.5 * (m_cr[r5] - m_cr[r6]);
402  }
403  tmp = popold[i];
404  size_t n = c_idx();
405  for (size_t L = 0; L < Dc; ++L) { /* perform Dc binomial trials */
406  if ((r_dist() < CR) || L + 1 == Dc) { /* change at least one parameter */
407  tmp[n] = gbIter[n] + F*(popold[r1][n]-popold[r2][n]) + F*(popold[r3][n]-popold[r4][n]) + F*(popold[r5][n]-popold[r6][n]);
408  }
409  n = (n+1)%Dc;
410  }
411  }
412  /*-------DE/rand/3/exp--------------------------------------------------------------------*/
413  if (VARIANT == 13) {
414  if (m_variant_adptv==2) {
415  F = m_f[r7] + n_dist() * 0.5 * (m_f[r1] - m_f[r2]) + n_dist() * 0.5 * (m_f[r3] - m_f[r4]) + n_dist() * 0.5 * (m_f[r5] - m_f[r6]);
416  CR = m_cr[r7] + n_dist() * 0.5 * (m_cr[r1] - m_cr[r2]) + n_dist() * 0.5 * (m_cr[r3] - m_cr[r4]) + n_dist() * 0.5 * (m_cr[r5] - m_cr[r6]);
417  }
418  tmp = popold[i];
419  size_t n = c_idx(), L = 0;
420  do {
421  tmp[n] = popold[r7][n] + F*(popold[r1][n]-popold[r2][n]) + F*(popold[r3][n]-popold[r4][n]) + F*(popold[r5][n]-popold[r6][n]);
422  n = (n+1)%Dc;
423  ++L;
424  } while ((r_dist() < CR) && (L < Dc));
425  }
426  /*-------DE/rand/3/bin--------------------------------------------------------------------*/
427  else if (VARIANT == 14) {
428  if (m_variant_adptv==2) {
429  F = m_f[r7] + n_dist() * 0.5 * (m_f[r1] - m_f[r2]) + n_dist() * 0.5 * (m_f[r3] - m_f[r4]) + n_dist() * 0.5 * (m_f[r5] - m_f[r6]);
430  CR = m_cr[r7] + n_dist() * 0.5 * (m_cr[r1] - m_cr[r2]) + n_dist() * 0.5 * (m_cr[r3] - m_cr[r4]) + n_dist() * 0.5 * (m_cr[r5] - m_cr[r6]);
431  }
432  tmp = popold[i];
433  size_t n = c_idx();
434  for (size_t L = 0; L < Dc; ++L) { /* perform Dc binomial trials */
435  if ((r_dist() < CR) || L + 1 == Dc) { /* change at least one parameter */
436  tmp[n] = popold[r7][n] + F*(popold[r1][n]-popold[r2][n]) + F*(popold[r3][n]-popold[r4][n]) + F*(popold[r5][n]-popold[r6][n]);
437  }
438  n = (n+1)%Dc;
439  }
440  }
441  /*-------DE/rand-to-current/2/exp---------------------------------------------------------*/
442  if (VARIANT == 15) {
443  if (m_variant_adptv==2) {
444  F = m_f[r7] + n_dist() * 0.5 * (m_f[r1] - m_f[i]) + n_dist() * 0.5 * (m_f[r3] - m_f[r4]) + n_dist() * 0.5 * (m_f[r5] - m_f[r6]);
445  CR = m_cr[r7] + n_dist() * 0.5 * (m_cr[r1] - m_cr[i]) + n_dist() * 0.5 * (m_cr[r3] - m_cr[r4]) + n_dist() * 0.5 * (m_cr[r5] - m_cr[r6]);
446  }
447  tmp = popold[i];
448  size_t n = c_idx(), L = 0;
449  do {
450  tmp[n] = popold[r7][n] + F*(popold[r1][n]-popold[i][n]) + F*(popold[r3][n]-popold[r4][n]);
451  n = (n+1)%Dc;
452  ++L;
453  } while ((r_dist() < CR) && (L < Dc));
454  }
455  /*-------DE/rand-to-current/2/bin---------------------------------------------------------*/
456  else if (VARIANT == 16) {
457  if (m_variant_adptv==2) {
458  F = m_f[r7] + n_dist() * 0.5 * (m_f[r1] - m_f[i]) + n_dist() * 0.5 * (m_f[r3] - m_f[r4]) + n_dist() * 0.5 * (m_f[r5] - m_f[r6]);
459  CR = m_cr[r7] + n_dist() * 0.5 * (m_cr[r1] - m_cr[i]) + n_dist() * 0.5 * (m_cr[r3] - m_cr[r4]) + n_dist() * 0.5 * (m_cr[r5] - m_cr[r6]);
460  }
461  tmp = popold[i];
462  size_t n = c_idx();
463  for (size_t L = 0; L < Dc; ++L) { /* perform Dc binomial trials */
464  if ((r_dist() < CR) || L + 1 == Dc) { /* change at least one parameter */
465  tmp[n] = popold[r7][n] + F*(popold[r1][n]-popold[i][n]) + F*(popold[r3][n]-popold[r4][n]);
466  }
467  n = (n+1)%Dc;
468  }
469  }
470  /*-------DE/rand-to-best-and-current/2/exp------------------------------------------------*/
471  if (VARIANT == 17) {
472  if (m_variant_adptv==2) {
473  F = m_f[r7] + n_dist() * 0.5 * (m_f[r1] - m_f[i]) + n_dist() * 0.5 * (gbIterF - m_f[r4]);
474  CR = m_cr[r7] + n_dist() * 0.5 * (m_cr[r1] - m_cr[i]) + n_dist() * 0.5 * (gbIterCR - m_cr[r4]);
475  }
476  tmp = popold[i];
477  size_t n = c_idx(), L = 0;
478  do {
479  tmp[n] = popold[r7][n] + F*(popold[r1][n]-popold[i][n]) + F*(gbIter[n]-popold[r4][n]);
480  n = (n+1)%Dc;
481  ++L;
482  } while ((r_dist() < CR) && (L < Dc));
483  }
484  /*-------DE/rand-to-best-and-current/2/bin------------------------------------------------*/
485  else if (VARIANT == 18) {
486  if (m_variant_adptv==2) {
487  F = m_f[r7] + n_dist() * 0.5 * (m_f[r1] - m_f[i]) + n_dist() * 0.5 * (gbIterF - m_f[r4]);
488  CR = m_cr[r7] + n_dist() * 0.5 * (m_cr[r1] - m_cr[i]) + n_dist() * 0.5 * (gbIterCR - m_cr[r4]);
489  }
490  tmp = popold[i];
491  size_t n = c_idx();
492  for (size_t L = 0; L < Dc; ++L) { /* perform Dc binomial trials */
493  if ((r_dist() < CR) || L + 1 == Dc) { /* change at least one parameter */
494  tmp[n] = popold[r7][n] + F*(popold[r1][n]-popold[i][n]) + F*(gbIter[n]-popold[r4][n]);
495  }
496  n = (n+1)%Dc;
497  }
498  }
499 
500  /*=======Trial mutation now in tmp[]. force feasibility and how good this choice really was.==================*/
501  // a) feasibility
502  size_t i2 = 0;
503  while (i2<Dc) {
504  if ((tmp[i2] < lb[i2]) || (tmp[i2] > ub[i2]))
505  tmp[i2] = r_dist() * (ub[i2]-lb[i2]) + lb[i2];
506  ++i2;
507  }
508 
509  //b) how good?
510  prob.objfun(newfitness, tmp); /* Evaluate new vector in tmp[] */
511  if ( pop.problem().compare_fitness(newfitness,fit[i]) ) { /* improved objective function value ? */
512  fit[i]=newfitness;
513  popnew[i] = tmp;
514 
515  // Update the adapted parameters
516  m_cr[i] = CR;
517  m_f[i] = F;
518  m_variants[i] = VARIANT;
519 
520  // As a fitness improvment occured we move the point
521  // and thus can evaluate a new velocity
522  std::transform(tmp.begin(), tmp.end(), pop.get_individual(i).cur_x.begin(), tmp.begin(),std::minus<double>());
523 
524  //updates x and v (cache avoids to recompute the objective function)
525  pop.set_x(i,popnew[i]);
526  pop.set_v(i,tmp);
527  if ( pop.problem().compare_fitness(newfitness,gbfit) ) {
528  /* if so...*/
529  gbfit=newfitness; /* reset gbfit to new low...*/
530  gbX=popnew[i];
531  }
532  } else {
533  popnew[i] = popold[i];
534  }
535 
536  }//End of the loop through the deme
537 
538  /* Save best population member of current iteration */
539  gbIter = gbX;
540 
541  /* swap population arrays. New generation becomes old one */
542  std::swap(popold, popnew);
543 
544 
545  //9 - Check the exit conditions (every 40 generations)
546  if (gen%40) {
547  double dx = 0;
548  for (decision_vector::size_type i = 0; i < D; ++i) {
549  tmp[i] = pop.get_individual(pop.get_worst_idx()).best_x[i] - pop.get_individual(pop.get_best_idx()).best_x[i];
550  dx += std::fabs(tmp[i]);
551  }
552 
553  if ( dx < m_xtol ) {
554  if (m_screen_output) {
555  std::cout << "Exit condition -- xtol < " << m_xtol << std::endl;
556  }
557  return;
558  }
559 
560  double mah = std::fabs(pop.get_individual(pop.get_worst_idx()).best_f[0] - pop.get_individual(pop.get_best_idx()).best_f[0]);
561 
562  if (mah < m_ftol) {
563  if (m_screen_output) {
564  std::cout << "Exit condition -- ftol < " << m_ftol << std::endl;
565  }
566  return;
567  }
568  }
569 
570 
571  }//end main DE iterations
572  if (m_screen_output) {
573  std::cout << "Exit condition -- generations > " << m_gen << std::endl;
574  }
575 
576 }
577 
579 std::string de_1220::get_name() const
580 {
581  return "DE - 1220";
582 }
583 
585 
588 std::string de_1220::human_readable_extra() const
589 {
590  std::ostringstream s;
591  s << "gen:" << m_gen << ' ';
592  s << "self_adaptation:" << m_variant_adptv << ' ';
593  s << "variants:" << m_allowed_variants << ' ';
594  s << "memory:" << m_memory << ' ';
595  s << "ftol:" << m_ftol << ' ';
596  s << "xtol:" << m_xtol;
597 
598  return s.str();
599 }
600 
601 }} //namespaces
602 
603 BOOST_CLASS_EXPORT_IMPLEMENT(pagmo::algorithm::de_1220)
boost::shared_ptr< base > base_ptr
Alias for shared pointer to base algorithm.
Root PaGMO namespace.
std::vector< double > decision_vector
Decision vector type.
Definition: types.h:40
std::string get_name() const
Algorithm name.
Definition: de_1220.cpp:579
fitness_vector cur_f
Current fitness vector.
Definition: population.h:89
const individual_type & get_individual(const size_type &) const
Get constant reference to individual at position n.
Definition: population.cpp:277
Base algorithm class.
Base problem class.
Definition: problem/base.h:148
Population class.
Definition: population.h:70
Differential Evolution Algorithm - 1220 (our version!!)
Definition: de_1220.h:57
size_type get_dimension() const
Return global dimension.
decision_vector x
Decision vector.
Definition: population.h:149
de_1220(int=100, int=1, const std::vector< int > &=construct_default_strategies(), bool=true, double=1e-6, double=1e-6)
Constructor.
Definition: de_1220.cpp:55
bool compare_fitness(const fitness_vector &, const fitness_vector &) const
Compare fitness vectors.
fitness_vector f
Fitness vector.
Definition: population.h:153
fitness_vector objfun(const decision_vector &) const
Return fitness of pagmo::decision_vector.
void evolve(population &) const
Evolve implementation.
Definition: de_1220.cpp:84
bool m_screen_output
Indicates to the derived class whether to print stuff on screen.
size_type get_i_dimension() const
Return integer dimension.
std::vector< double > fitness_vector
Fitness vector type.
Definition: types.h:42
c_size_type get_c_dimension() const
Return global constraints dimension.
const decision_vector & get_ub() const
Upper bounds getter.
container_type::size_type size_type
Population size type.
Definition: population.h:192
decision_vector cur_x
Current decision vector.
Definition: population.h:83
rng_uint32 m_urng
Random number generator for unsigned integer values.
f_size_type get_f_dimension() const
Return fitness dimension.
const decision_vector & get_lb() const
Lower bounds getter.
std::string human_readable_extra() const
Extra human readable algorithm info.
Definition: de_1220.cpp:588
rng_double m_drng
Random number generator for double-precision floating point values.
decision_vector::size_type size_type
Problem's size type: the same as pagmo::decision_vector's size type.
Definition: problem/base.h:160
base_ptr clone() const
Clone method.
Definition: de_1220.cpp:71