PaGMO  1.1.5
jde.cpp
1 /*****************************************************************************
2  * Copyright (C) 2004-2015 The PaGMO development team, *
3  * Advanced Concepts Team (ACT), European Space Agency (ESA) *
4  * *
5  * https://github.com/esa/pagmo *
6  * *
7  * act@esa.int *
8  * *
9  * This program is free software; you can redistribute it and/or modify *
10  * it under the terms of the GNU General Public License as published by *
11  * the Free Software Foundation; either version 2 of the License, or *
12  * (at your option) any later version. *
13  * *
14  * This program is distributed in the hope that it will be useful, *
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17  * GNU General Public License for more details. *
18  * *
19  * You should have received a copy of the GNU General Public License *
20  * along with this program; if not, write to the *
21  * Free Software Foundation, Inc., *
22  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
23  *****************************************************************************/
24 
25 #include <boost/random/uniform_int.hpp>
26 #include <boost/random/uniform_real.hpp>
27 #include <boost/random/normal_distribution.hpp>
28 #include <boost/random/variate_generator.hpp>
29 #include <string>
30 #include <vector>
31 
32 #include "../exceptions.h"
33 #include "../population.h"
34 #include "../types.h"
35 #include "base.h"
36 #include "jde.h"
37 
38 namespace pagmo { namespace algorithm {
39 
41 
52 jde::jde(int gen, int variant, int variant_adptv, double ftol, double xtol, bool memory):base(), m_gen(gen), m_f(0), m_cr(0),
53  m_variant(variant), m_variant_adptv(variant_adptv), m_ftol(ftol), m_xtol(xtol), m_memory(memory) {
54  if (gen < 0) {
55  pagmo_throw(value_error,"number of generations must be nonnegative");
56  }
57  if (variant < 1 || variant > 18) {
58  pagmo_throw(value_error,"variant index must be one of 1 ... 18");
59  }
60  if (variant_adptv < 1 || variant_adptv > 2) {
61  pagmo_throw(value_error,"adaptive variant index must be one of 1 ... 2");
62  }
63 }
64 
67 {
68  return base_ptr(new jde(*this));
69 }
70 
72 
79 void jde::evolve(population &pop) const
80 {
81  // Let's store some useful variables.
82  const problem::base &prob = pop.problem();
83  const problem::base::size_type D = prob.get_dimension(), prob_i_dimension = prob.get_i_dimension(), prob_c_dimension = prob.get_c_dimension(), prob_f_dimension = prob.get_f_dimension();
84  const decision_vector &lb = prob.get_lb(), &ub = prob.get_ub();
85  const population::size_type NP = pop.size();
86  const problem::base::size_type Dc = D - prob_i_dimension;
87 
88 
89  //We perform some checks to determine wether the problem/population are suitable for DE
90  if ( Dc == 0 ) {
91  pagmo_throw(value_error,"There is no continuous part in the problem decision vector for DE to optimise");
92  }
93 
94  if ( prob_c_dimension != 0 ) {
95  pagmo_throw(value_error,"The problem is not box constrained and DE is not suitable to solve it");
96  }
97 
98  if ( prob_f_dimension != 1 ) {
99  pagmo_throw(value_error,"The problem is not single objective and DE is not suitable to solve it");
100  }
101 
102  if (NP < 8) {
103  pagmo_throw(value_error,"for jDE at least 8 individuals in the population are needed");
104  }
105 
106  // Get out if there is nothing to do.
107  if (m_gen == 0) {
108  return;
109  }
110  // Some vectors used during evolution are allocated here.
111  decision_vector dummy(D), tmp(D); //dummy is used for initialisation purposes, tmp to contain the mutated candidate
112  std::vector<decision_vector> popold(NP,dummy), popnew(NP,dummy);
113  decision_vector gbX(D),gbIter(D);
114  fitness_vector newfitness(1); //new fitness of the mutaded candidate
115  fitness_vector gbfit(1); //global best fitness
116  std::vector<fitness_vector> fit(NP,gbfit);
117 
118  //We extract from pop the chromosomes and fitness associated
119  for (std::vector<double>::size_type i = 0; i < NP; ++i) {
120  popold[i] = pop.get_individual(i).cur_x;
121  fit[i] = pop.get_individual(i).cur_f;
122  }
123  popnew = popold;
124 
125  // Initialise the global bests
126  gbX=pop.champion().x;
127  gbfit=pop.champion().f;
128  // container for the best decision vector of generation
129  gbIter = gbX;
130 
131  // Initializing the random number generators
132  boost::normal_distribution<double> normal(0.0,1.0);
133  boost::variate_generator<boost::lagged_fibonacci607 &, boost::normal_distribution<double> > n_dist(m_drng,normal);
134  boost::uniform_real<double> uniform(0.0,1.0);
135  boost::variate_generator<boost::lagged_fibonacci607 &, boost::uniform_real<double> > r_dist(m_drng,uniform);
136  boost::uniform_int<int> r_p_idx(0,NP-1);
137  boost::variate_generator<boost::mt19937 &, boost::uniform_int<int> > p_idx(m_urng,r_p_idx);
138  boost::uniform_int<int> r_c_idx(0,Dc-1);
139  boost::variate_generator<boost::mt19937 &, boost::uniform_int<int> > c_idx(m_urng,r_c_idx);
140 
141 
142  // Initialize the F and CR vectors
143  if ( (m_cr.size() != NP) || (m_f.size() != NP) || (!m_memory) ) {
144  m_cr.resize(NP); m_f.resize(NP);
145  if (m_variant_adptv==1) {
146  for (size_t i = 0; i < NP; ++i) {
147  m_cr[i] = r_dist();
148  m_f[i] = r_dist() * 0.9 + 0.1;
149  }
150  }
151  else if (m_variant_adptv==2) {
152  for (size_t i = 0; i < NP; ++i) {
153  m_cr[i] = n_dist() * 0.15 + 0.5;
154  m_f[i] = n_dist() * 0.15 + 0.5;
155  }
156  }
157  }
158  // We initialize the global best for F and CR as the first individual (this will soon be forgotten)
159  double gbIterF = m_f[0];
160  double gbIterCR = m_cr[0];
161 
162  // Main DE iterations
163  size_t r1,r2,r3,r4,r5,r6,r7; //indexes to the selected population members
164  for (int gen = 0; gen < m_gen; ++gen) {
165  //0 - Check the exit conditions (every 10 generations)
166  if (gen % 5 == 0) {
167  double dx = 0;
168  for (decision_vector::size_type i = 0; i < D; ++i) {
169  tmp[i] = pop.get_individual(pop.get_worst_idx()).best_x[i] - pop.get_individual(pop.get_best_idx()).best_x[i];
170  dx += std::fabs(tmp[i]);
171  }
172 
173  if ( dx < m_xtol ) {
174  if (m_screen_output) {
175  std::cout << "Exit condition -- xtol < " << m_xtol << std::endl;
176  }
177  return;
178  }
179 
180  double mah = std::fabs(pop.get_individual(pop.get_worst_idx()).best_f[0] - pop.get_individual(pop.get_best_idx()).best_f[0]);
181 
182  if (mah < m_ftol) {
183  if (m_screen_output) {
184  std::cout << "Exit condition -- ftol < " << m_ftol << std::endl;
185  }
186  return;
187  }
188  }
189 
190  //Start of the loop through the deme
191  for (size_t i = 0; i < NP; ++i) {
192  do { /* Pick a random population member */
193  /* Endless loop for NP < 2 !!! */
194  r1 = p_idx();
195  } while (r1==i);
196 
197  do { /* Pick a random population member */
198  /* Endless loop for NP < 3 !!! */
199  r2 = p_idx();
200  } while ((r2==i) || (r2==r1));
201 
202  do { /* Pick a random population member */
203  /* Endless loop for NP < 4 !!! */
204  r3 = p_idx();
205  } while ((r3==i) || (r3==r1) || (r3==r2));
206 
207  do { /* Pick a random population member */
208  /* Endless loop for NP < 5 !!! */
209  r4 = p_idx();
210  } while ((r4==i) || (r4==r1) || (r4==r2) || (r4==r3));
211 
212  do { /* Pick a random population member */
213  /* Endless loop for NP < 6 !!! */
214  r5 = p_idx();
215  } while ((r5==i) || (r5==r1) || (r5==r2) || (r5==r3) || (r5==r4));
216  do { /* Pick a random population member */
217  /* Endless loop for NP < 7 !!! */
218  r6 = p_idx();
219  } while ((r6==i) || (r6==r1) || (r6==r2) || (r6==r3) || (r6==r4) || (r6==r5));
220  do { /* Pick a random population member */
221  /* Endless loop for NP < 8 !!! */
222  r7 = p_idx();
223  } while ((r7==i) || (r7==r1) || (r7==r2) || (r7==r3) || (r7==r4) || (r7==r5) || (r7==r6));
224 
225  // Adapt amplification factor and crossover probability
226  double F=0, CR=0;
227  if (m_variant_adptv==1) {
228  F = (r_dist() < 0.9) ? m_f[i] : r_dist() * 0.9 + 0.1;
229  CR = (r_dist() < 0.9) ? m_cr[i] : r_dist();
230  }
231 
232  /*-------DE/best/1/exp--------------------------------------------------------------------*/
233  /*-------Our oldest strategy but still not bad. However, we have found several------------*/
234  /*-------optimization problems where misconvergence occurs.-------------------------------*/
235  if (m_variant == 1) { /* strategy DE0 (not in our paper) */
236  if (m_variant_adptv==2) {
237  F = gbIterF + n_dist() * 0.5 * (m_f[r2]-m_f[r3]);
238  CR = gbIterCR + n_dist() * 0.5 * (m_cr[r2]-m_cr[r3]);
239  }
240  tmp = popold[i];
241  size_t n = c_idx(), L = 0;
242  do {
243  tmp[n] = gbIter[n] + F*(popold[r2][n]-popold[r3][n]);
244  n = (n+1)%Dc;
245  ++L;
246  } while ((r_dist() < CR) && (L < Dc));
247  }
248 
249  /*-------DE/rand/1/exp-------------------------------------------------------------------*/
250  /*-------This is one of my favourite strategies. It works especially well when the-------*/
251  /*-------"gbIter[]"-schemes experience misconvergence. Try e.g. m_f=0.7 and m_cr=0.5---------*/
252  /*-------as a first guess.---------------------------------------------------------------*/
253  else if (m_variant == 2) { /* strategy DE1 in the techreport */
254  if (m_variant_adptv==2) {
255  F = m_f[r1] + n_dist() * 0.5 * (m_f[r2]-m_f[r3]);
256  CR = m_cr[r1] + n_dist() * 0.5 * (m_cr[r2]-m_cr[r3]);
257  }
258  tmp = popold[i];
259  size_t n = c_idx(), L = 0;
260  do {
261  tmp[n] = popold[r1][n] + F*(popold[r2][n]-popold[r3][n]);
262  n = (n+1)%Dc;
263  ++L;
264  } while ((r_dist() < CR) && (L < Dc));
265  }
266 
267  /*-------DE/rand-to-best/1/exp-----------------------------------------------------------*/
268  /*-------This strategy seems to be one of the best strategies. Try m_f=0.85 and c=1.------*/
269  /*-------If you get misconvergence try to increase NP. If this doesn't help you----------*/
270  /*-------should play around with all three control variables.----------------------------*/
271  else if (m_variant == 3) { /* similiar to DE2 but generally better */
272  if (m_variant_adptv==2) {
273  F = m_f[i] + n_dist() * 0.5 * (gbIterF-m_f[i]) + n_dist() * 0.5 * (m_f[r1] - m_f[r2]);
274  CR = m_cr[i] + n_dist() * 0.5 * (gbIterCR-m_cr[i]) + n_dist() * 0.5 * (m_cr[r1] - m_cr[r2]);
275  }
276  tmp = popold[i];
277  size_t n = c_idx(), L = 0;
278  do {
279  tmp[n] = tmp[n] + F*(gbIter[n] - tmp[n]) + F*(popold[r1][n]-popold[r2][n]);
280  n = (n+1)%Dc;
281  ++L;
282  } while ((r_dist() < CR) && (L < Dc));
283  }
284  /*-------DE/best/2/exp is another powerful strategy worth trying--------------------------*/
285  else if (m_variant == 4) {
286  if (m_variant_adptv==2) {
287  F = gbIterF + n_dist() * 0.5 * (m_f[r1] - m_f[r3]) + n_dist() * 0.5 * (m_f[r2] - m_f[r4]);
288  CR = gbIterCR + n_dist() * 0.5 * (m_cr[r1] - m_cr[r3]) + n_dist() * 0.5 * (m_cr[r2] - m_cr[r4]);
289  }
290  tmp = popold[i];
291  size_t n = c_idx(), L = 0;
292  do {
293  tmp[n] = gbIter[n] +
294  (popold[r1][n]+popold[r2][n]-popold[r3][n]-popold[r4][n])*F;
295  n = (n+1)%Dc;
296  ++L;
297  } while ((r_dist() < CR) && (L < Dc));
298  }
299  /*-------DE/rand/2/exp seems to be a robust optimizer for many functions-------------------*/
300  else if (m_variant == 5) {
301  if (m_variant_adptv==2) {
302  F = m_f[r5] + n_dist() * 0.5 * (m_f[r1] - m_f[r3]) + n_dist() * 0.5 * (m_f[r2] - m_f[r4]);
303  CR = m_cr[r5] + n_dist() * 0.5 * (m_cr[r1] - m_cr[r3]) + n_dist() * 0.5 * (m_cr[r2] - m_cr[r4]);
304  }
305  tmp = popold[i];
306  size_t n = c_idx(), L = 0;
307  do {
308  tmp[n] = popold[r5][n] +
309  (popold[r1][n]+popold[r2][n]-popold[r3][n]-popold[r4][n])*F;
310  n = (n+1)%Dc;
311  ++L;
312  } while ((r_dist() < CR) && (L < Dc));
313  }
314 
315  /*=======Essentially same strategies but BINOMIAL CROSSOVER===============================*/
316 
317  /*-------DE/best/1/bin--------------------------------------------------------------------*/
318  else if (m_variant == 6) {
319  if (m_variant_adptv==2) {
320  F = gbIterF + n_dist() * 0.5 * (m_f[r2]-m_f[r3]);
321  CR = gbIterCR + n_dist() * 0.5 * (m_cr[r2]-m_cr[r3]);
322  }
323  tmp = popold[i];
324  size_t n = c_idx();
325  for (size_t L = 0; L < Dc; ++L) { /* perform Dc binomial trials */
326  if ((r_dist() < CR) || L + 1 == Dc) { /* change at least one parameter */
327  tmp[n] = gbIter[n] + F*(popold[r2][n]-popold[r3][n]);
328  }
329  n = (n+1)%Dc;
330  }
331  }
332  /*-------DE/rand/1/bin-------------------------------------------------------------------*/
333  else if (m_variant == 7) {
334  if (m_variant_adptv==2) {
335  F = m_f[r1] + n_dist() * 0.5 * (m_f[r2]-m_f[r3]);
336  CR = m_cr[r1] + n_dist() * 0.5 * (m_cr[r2]-m_cr[r3]);
337  }
338  tmp = popold[i];
339  size_t n = c_idx();
340  for (size_t L = 0; L < Dc; ++L) { /* perform Dc binomial trials */
341  if ((r_dist() < CR) || L + 1 == Dc) { /* change at least one parameter */
342  tmp[n] = popold[r1][n] + F*(popold[r2][n]-popold[r3][n]);
343  }
344  n = (n+1)%Dc;
345  }
346  }
347  /*-------DE/rand-to-best/1/bin-----------------------------------------------------------*/
348  else if (m_variant == 8) {
349  if (m_variant_adptv==2) {
350  F = m_f[i] + n_dist() * 0.5 * (gbIterF-m_f[i]) + n_dist() * 0.5 * (m_f[r1] - m_f[r2]);
351  CR = m_cr[i] + n_dist() * 0.5 * (gbIterCR-m_cr[i]) + n_dist() * 0.5 * (m_cr[r1] - m_cr[r2]);
352  }
353  tmp = popold[i];
354  size_t n = c_idx();
355  for (size_t L = 0; L < Dc; ++L) { /* perform Dc binomial trials */
356  if ((r_dist() < CR) || L + 1 == Dc) { /* change at least one parameter */
357  tmp[n] = tmp[n] + F*(gbIter[n] - tmp[n]) + F*(popold[r1][n]-popold[r2][n]);
358  }
359  n = (n+1)%Dc;
360  }
361  }
362  /*-------DE/best/2/bin--------------------------------------------------------------------*/
363  else if (m_variant == 9) {
364  if (m_variant_adptv==2) {
365  F = gbIterF + n_dist() * 0.5 * (m_f[r1] - m_f[r3]) + n_dist() * 0.5 * (m_f[r2] - m_f[r4]);
366  CR = gbIterCR + n_dist() * 0.5 * (m_cr[r1] - m_cr[r3]) + n_dist() * 0.5 * (m_cr[r2] - m_cr[r4]);
367  }
368  tmp = popold[i];
369  size_t n = c_idx();
370  for (size_t L = 0; L < Dc; ++L) { /* perform Dc binomial trials */
371  if ((r_dist() < CR) || L + 1 == Dc) { /* change at least one parameter */
372  tmp[n] = gbIter[n] +
373  (popold[r1][n]+popold[r2][n]-popold[r3][n]-popold[r4][n])*F;
374  }
375  n = (n+1)%Dc;
376  }
377  }
378  /*-------DE/rand/2/bin--------------------------------------------------------------------*/
379  else if (m_variant == 10) {
380  if (m_variant_adptv==2) {
381  F = m_f[r5] + n_dist() * 0.5 * (m_f[r1] - m_f[r3]) + n_dist() * 0.5 * (m_f[r2] - m_f[r4]);
382  CR = m_cr[r5] + n_dist() * 0.5 * (m_cr[r1] - m_cr[r3]) + n_dist() * 0.5 * (m_cr[r2] - m_cr[r4]);
383  }
384  tmp = popold[i];
385  size_t n = c_idx();
386  for (size_t L = 0; L < Dc; ++L) { /* perform Dc binomial trials */
387  if ((r_dist() < CR) || L + 1 == Dc) { /* change at least one parameter */
388  tmp[n] = popold[r5][n] +
389  (popold[r1][n]+popold[r2][n]-popold[r3][n]-popold[r4][n])*F;
390  }
391  n = (n+1)%Dc;
392  }
393  }
394 
395  /*-------DE/best/3/exp--------------------------------------------------------------------*/
396  if (m_variant == 11) {
397  if (m_variant_adptv==2) {
398  F = gbIterF + n_dist() * 0.5 * (m_f[r1] - m_f[r2]) + n_dist() * 0.5 * (m_f[r3] - m_f[r4]) + n_dist() * 0.5 * (m_f[r5] - m_f[r6]);
399  CR = gbIterCR + n_dist() * 0.5 * (m_cr[r1] - m_cr[r2]) + n_dist() * 0.5 * (m_cr[r3] - m_cr[r4]) + n_dist() * 0.5 * (m_cr[r5] - m_cr[r6]);
400  }
401  tmp = popold[i];
402  size_t n = c_idx(), L = 0;
403  do {
404  tmp[n] = gbIter[n] + F*(popold[r1][n]-popold[r2][n]) + F*(popold[r3][n]-popold[r4][n]) + F*(popold[r5][n]-popold[r6][n]);
405  n = (n+1)%Dc;
406  ++L;
407  } while ((r_dist() < CR) && (L < Dc));
408  }
409  /*-------DE/best/3/bin--------------------------------------------------------------------*/
410  else if (m_variant == 12) {
411  if (m_variant_adptv==2) {
412  F = gbIterF + n_dist() * 0.5 * (m_f[r1] - m_f[r2]) + n_dist() * 0.5 * (m_f[r3] - m_f[r4]) + n_dist() * 0.5 * (m_f[r5] - m_f[r6]);
413  CR = gbIterCR + n_dist() * 0.5 * (m_cr[r1] - m_cr[r2]) + n_dist() * 0.5 * (m_cr[r3] - m_cr[r4]) + n_dist() * 0.5 * (m_cr[r5] - m_cr[r6]);
414  }
415  tmp = popold[i];
416  size_t n = c_idx();
417  for (size_t L = 0; L < Dc; ++L) { /* perform Dc binomial trials */
418  if ((r_dist() < CR) || L + 1 == Dc) { /* change at least one parameter */
419  tmp[n] = gbIter[n] + F*(popold[r1][n]-popold[r2][n]) + F*(popold[r3][n]-popold[r4][n]) + F*(popold[r5][n]-popold[r6][n]);
420  }
421  n = (n+1)%Dc;
422  }
423  }
424  /*-------DE/rand/3/exp--------------------------------------------------------------------*/
425  if (m_variant == 13) {
426  if (m_variant_adptv==2) {
427  F = m_f[r7] + n_dist() * 0.5 * (m_f[r1] - m_f[r2]) + n_dist() * 0.5 * (m_f[r3] - m_f[r4]) + n_dist() * 0.5 * (m_f[r5] - m_f[r6]);
428  CR = m_cr[r7] + n_dist() * 0.5 * (m_cr[r1] - m_cr[r2]) + n_dist() * 0.5 * (m_cr[r3] - m_cr[r4]) + n_dist() * 0.5 * (m_cr[r5] - m_cr[r6]);
429  }
430  tmp = popold[i];
431  size_t n = c_idx(), L = 0;
432  do {
433  tmp[n] = popold[r7][n] + F*(popold[r1][n]-popold[r2][n]) + F*(popold[r3][n]-popold[r4][n]) + F*(popold[r5][n]-popold[r6][n]);
434  n = (n+1)%Dc;
435  ++L;
436  } while ((r_dist() < CR) && (L < Dc));
437  }
438  /*-------DE/rand/3/bin--------------------------------------------------------------------*/
439  else if (m_variant == 14) {
440  if (m_variant_adptv==2) {
441  F = m_f[r7] + n_dist() * 0.5 * (m_f[r1] - m_f[r2]) + n_dist() * 0.5 * (m_f[r3] - m_f[r4]) + n_dist() * 0.5 * (m_f[r5] - m_f[r6]);
442  CR = m_cr[r7] + n_dist() * 0.5 * (m_cr[r1] - m_cr[r2]) + n_dist() * 0.5 * (m_cr[r3] - m_cr[r4]) + n_dist() * 0.5 * (m_cr[r5] - m_cr[r6]);
443  }
444  tmp = popold[i];
445  size_t n = c_idx();
446  for (size_t L = 0; L < Dc; ++L) { /* perform Dc binomial trials */
447  if ((r_dist() < CR) || L + 1 == Dc) { /* change at least one parameter */
448  tmp[n] = popold[r7][n] + F*(popold[r1][n]-popold[r2][n]) + F*(popold[r3][n]-popold[r4][n]) + F*(popold[r5][n]-popold[r6][n]);
449  }
450  n = (n+1)%Dc;
451  }
452  }
453  /*-------DE/rand-to-current/2/exp---------------------------------------------------------*/
454  if (m_variant == 15) {
455  if (m_variant_adptv==2) {
456  F = m_f[r7] + n_dist() * 0.5 * (m_f[r1] - m_f[i]) + n_dist() * 0.5 * (m_f[r3] - m_f[r4]) + n_dist() * 0.5 * (m_f[r5] - m_f[r6]);
457  CR = m_cr[r7] + n_dist() * 0.5 * (m_cr[r1] - m_cr[i]) + n_dist() * 0.5 * (m_cr[r3] - m_cr[r4]) + n_dist() * 0.5 * (m_cr[r5] - m_cr[r6]);
458  }
459  tmp = popold[i];
460  size_t n = c_idx(), L = 0;
461  do {
462  tmp[n] = popold[r7][n] + F*(popold[r1][n]-popold[i][n]) + F*(popold[r3][n]-popold[r4][n]);
463  n = (n+1)%Dc;
464  ++L;
465  } while ((r_dist() < CR) && (L < Dc));
466  }
467  /*-------DE/rand-to-current/2/bin---------------------------------------------------------*/
468  else if (m_variant == 16) {
469  if (m_variant_adptv==2) {
470  F = m_f[r7] + n_dist() * 0.5 * (m_f[r1] - m_f[i]) + n_dist() * 0.5 * (m_f[r3] - m_f[r4]) + n_dist() * 0.5 * (m_f[r5] - m_f[r6]);
471  CR = m_cr[r7] + n_dist() * 0.5 * (m_cr[r1] - m_cr[i]) + n_dist() * 0.5 * (m_cr[r3] - m_cr[r4]) + n_dist() * 0.5 * (m_cr[r5] - m_cr[r6]);
472  }
473  tmp = popold[i];
474  size_t n = c_idx();
475  for (size_t L = 0; L < Dc; ++L) { /* perform Dc binomial trials */
476  if ((r_dist() < CR) || L + 1 == Dc) { /* change at least one parameter */
477  tmp[n] = popold[r7][n] + F*(popold[r1][n]-popold[i][n]) + F*(popold[r3][n]-popold[r4][n]);
478  }
479  n = (n+1)%Dc;
480  }
481  }
482  /*-------DE/rand-to-best-and-current/2/exp------------------------------------------------*/
483  if (m_variant == 17) {
484  if (m_variant_adptv==2) {
485  F = m_f[r7] + n_dist() * 0.5 * (m_f[r1] - m_f[i]) + n_dist() * 0.5 * (gbIterF - m_f[r4]);
486  CR = m_cr[r7] + n_dist() * 0.5 * (m_cr[r1] - m_cr[i]) + n_dist() * 0.5 * (gbIterCR - m_cr[r4]);
487  }
488  tmp = popold[i];
489  size_t n = c_idx(), L = 0;
490  do {
491  tmp[n] = popold[r7][n] + F*(popold[r1][n]-popold[i][n]) + F*(gbIter[n]-popold[r4][n]);
492  n = (n+1)%Dc;
493  ++L;
494  } while ((r_dist() < CR) && (L < Dc));
495  }
496  /*-------DE/rand-to-best-and-current/2/bin------------------------------------------------*/
497  else if (m_variant == 18) {
498  if (m_variant_adptv==2) {
499  F = m_f[r7] + n_dist() * 0.5 * (m_f[r1] - m_f[i]) + n_dist() * 0.5 * (gbIterF - m_f[r4]);
500  CR = m_cr[r7] + n_dist() * 0.5 * (m_cr[r1] - m_cr[i]) + n_dist() * 0.5 * (gbIterCR - m_cr[r4]);
501  }
502  tmp = popold[i];
503  size_t n = c_idx();
504  for (size_t L = 0; L < Dc; ++L) { /* perform Dc binomial trials */
505  if ((r_dist() < CR) || L + 1 == Dc) { /* change at least one parameter */
506  tmp[n] = popold[r7][n] + F*(popold[r1][n]-popold[i][n]) + F*(gbIter[n]-popold[r4][n]);
507  }
508  n = (n+1)%Dc;
509  }
510  }
511 
512  /*=======Trial mutation now in tmp[]. force feasibility and how good this choice really was.==================*/
513  // a) feasibility
514  size_t i2 = 0;
515  while (i2<Dc) {
516  if ((tmp[i2] < lb[i2]) || (tmp[i2] > ub[i2]))
517  tmp[i2] = r_dist() * (ub[i2]-lb[i2]) + lb[i2];
518  ++i2;
519  }
520 
521  //b) how good?
522  prob.objfun(newfitness, tmp); /* Evaluate new vector in tmp[] */
523  if ( pop.problem().compare_fitness(newfitness,fit[i]) ) { /* improved objective function value ? */
524  fit[i]=newfitness;
525  popnew[i] = tmp;
526 
527  // Update the adapted parameters
528  m_cr[i] = CR;
529  m_f[i] = F;
530 
531  // As a fitness improvment occured we move the point
532  // and thus can evaluate a new velocity
533  std::transform(tmp.begin(), tmp.end(), pop.get_individual(i).cur_x.begin(), tmp.begin(),std::minus<double>());
534 
535  //updates x and v (cache avoids to recompute the objective function)
536  pop.set_x(i,popnew[i]);
537  pop.set_v(i,tmp);
538  if ( pop.problem().compare_fitness(newfitness,gbfit) ) {
539  /* if so...*/
540  gbfit=newfitness; /* reset gbfit to new low...*/
541  gbX=popnew[i];
542  }
543  } else {
544  popnew[i] = popold[i];
545  }
546 
547  }//End of the loop through the deme
548 
549  /* Save best population member of current iteration */
550  gbIter = gbX;
551 
552  /* swap population arrays. New generation becomes old one */
553  std::swap(popold, popnew);
554 
555  }//end main DE iterations
556  if (m_screen_output) {
557  std::cout << "Exit condition -- generations > " << m_gen << std::endl;
558  }
559 }
560 
562 std::string jde::get_name() const
563 {
564  return "jDE";
565 }
566 
568 
571 std::string jde::human_readable_extra() const
572 {
573  std::ostringstream s;
574  s << "gen:" << m_gen << ' ';
575  s << "variant:" << m_variant << ' ';
576  s << "self_adaptation:" << m_variant_adptv << ' ';
577  s << "memory:" << m_memory << ' ';
578  s << "ftol:" << m_ftol << ' ';
579  s << "xtol:" << m_xtol;
580 
581  return s.str();
582 }
583 
584 }} //namespaces
585 
586 BOOST_CLASS_EXPORT_IMPLEMENT(pagmo::algorithm::jde)
boost::shared_ptr< base > base_ptr
Alias for shared pointer to base algorithm.
Root PaGMO namespace.
std::vector< double > decision_vector
Decision vector type.
Definition: types.h:40
fitness_vector cur_f
Current fitness vector.
Definition: population.h:89
const individual_type & get_individual(const size_type &) const
Get constant reference to individual at position n.
Definition: population.cpp:277
Base algorithm class.
base_ptr clone() const
Clone method.
Definition: jde.cpp:66
Base problem class.
Definition: problem/base.h:148
Population class.
Definition: population.h:70
jde(int=100, int=2, int=1, double=1e-6, double=1e-6, bool=false)
Constructor.
Definition: jde.cpp:52
std::string human_readable_extra() const
Extra human readable algorithm info.
Definition: jde.cpp:571
size_type get_dimension() const
Return global dimension.
decision_vector x
Decision vector.
Definition: population.h:149
void evolve(population &) const
Evolve implementation.
Definition: jde.cpp:79
std::string get_name() const
Algorithm name.
Definition: jde.cpp:562
bool compare_fitness(const fitness_vector &, const fitness_vector &) const
Compare fitness vectors.
fitness_vector f
Fitness vector.
Definition: population.h:153
fitness_vector objfun(const decision_vector &) const
Return fitness of pagmo::decision_vector.
bool m_screen_output
Indicates to the derived class whether to print stuff on screen.
size_type get_i_dimension() const
Return integer dimension.
std::vector< double > fitness_vector
Fitness vector type.
Definition: types.h:42
c_size_type get_c_dimension() const
Return global constraints dimension.
const decision_vector & get_ub() const
Upper bounds getter.
container_type::size_type size_type
Population size type.
Definition: population.h:192
decision_vector cur_x
Current decision vector.
Definition: population.h:83
rng_uint32 m_urng
Random number generator for unsigned integer values.
f_size_type get_f_dimension() const
Return fitness dimension.
const decision_vector & get_lb() const
Lower bounds getter.
rng_double m_drng
Random number generator for double-precision floating point values.
decision_vector::size_type size_type
Problem's size type: the same as pagmo::decision_vector's size type.
Definition: problem/base.h:160
jDE - Differential Evolution Algorithm - Self-Adaptive C and R (2011)
Definition: jde.h:64