PaGMO  1.1.5
pso_generational.cpp
1 /*****************************************************************************
2  * Copyright (C) 2004-2015 The PaGMO development team, *
3  * Advanced Concepts Team (ACT), European Space Agency (ESA) *
4  * *
5  * https://github.com/esa/pagmo *
6  * *
7  * act@esa.int *
8  * *
9  * This program is free software; you can redistribute it and/or modify *
10  * it under the terms of the GNU General Public License as published by *
11  * the Free Software Foundation; either version 2 of the License, or *
12  * (at your option) any later version. *
13  * *
14  * This program is distributed in the hope that it will be useful, *
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17  * GNU General Public License for more details. *
18  * *
19  * You should have received a copy of the GNU General Public License *
20  * along with this program; if not, write to the *
21  * Free Software Foundation, Inc., *
22  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
23  *****************************************************************************/
24 
25 #include <boost/random/uniform_int.hpp>
26 #include <boost/random/uniform_real.hpp>
27 #include <vector>
28 #include <cmath>
29 #include <iostream>
30 
31 #include "pso_generational.h"
32 #include "../problem/base_stochastic.h"
33 
34 
35 
36 namespace pagmo { namespace algorithm {
37 
39 
58 pso_generational::pso_generational(int gen, double omega, double eta1, double eta2, double vcoeff, int variant, int neighb_type, int neighb_param):base(),m_gen(gen),m_omega(omega),m_eta1(eta1),m_eta2(eta2),m_vcoeff(vcoeff),m_variant(variant),m_neighb_type(neighb_type),m_neighb_param(neighb_param) {
59  if (gen < 0) {
60  pagmo_throw(value_error,"number of generations must be nonnegative");
61  }
62 
63  if (m_omega < 0 || m_omega > 1) {
64  if( variant < 5 )
65  // variants using Inertia weight
66  pagmo_throw(value_error,"the particles' inertia must be in the [0,1] range");
67  else
68  // variants using Constriction coefficient
69  pagmo_throw(value_error,"the particles' constriction coefficient must be in the [0,1] range");
70  }
71 
72  if (eta1 < 0 || eta2 < 0 || eta1 > 4 || eta2 > 4) {
73  pagmo_throw(value_error,"the eta parameters must be in the [0,4] range");
74  }
75 
76  if (vcoeff <= 0 || vcoeff > 1) {
77  pagmo_throw(value_error,"fraction of variables' range in which velocity may vary should be in ]0,1]");
78  }
79 
80  if (variant < 1 || variant > 6) {
81  pagmo_throw(value_error,"algorithm variant must be one of 1 ... 6");
82  }
83 
84  if (neighb_type < 1 || neighb_type > 4) {
85  pagmo_throw(value_error,"swarm topology variant must be one of 1 ... 4");
86  }
87  // And neighb param???
88 }
89 
90 
93 {
94  return base_ptr(new pso_generational(*this));
95 }
96 
97 
99 
105 {
106  // Let's store some useful variables.
107  const problem::base &prob = pop.problem();
108  const problem::base::size_type D = prob.get_dimension(), prob_i_dimension = prob.get_i_dimension(), prob_c_dimension = prob.get_c_dimension(), prob_f_dimension = prob.get_f_dimension();
109  const problem::base::size_type Dc = D - prob_i_dimension;
110  const decision_vector &lb = prob.get_lb(), &ub = prob.get_ub();
111  const population::size_type swarm_size = pop.size();
112 
113 
114  //We perform some checks to determine wether the problem/population are suitable for PSO
115  if( Dc == 0 ){
116  pagmo_throw(value_error,"There is no continuous part in the problem decision vector for PSO to optimise");
117  }
118 
119  if( prob_c_dimension != 0 ){
120  pagmo_throw(value_error,"The problem is not box constrained and PSO is not suitable to solve it");
121  }
122 
123  if( prob_f_dimension != 1 ){
124  pagmo_throw(value_error,"The problem is not single objective and PSO is not suitable to solve it");
125  }
126 
127  // Get out if there is nothing to do.
128  if (swarm_size == 0 || m_gen == 0) {
129  return;
130  }
131 
132  // Some vectors used during evolution are allocated here.
133  std::vector<double> dummy(Dc,0); // used for initialisation purposes
134 
135  std::vector<decision_vector> X(swarm_size,dummy); // particles' current positions
136  std::vector<fitness_vector> fit(swarm_size); // particles' current fitness values
137 
138  std::vector<decision_vector > V(swarm_size,dummy); // particles' velocities
139 
140  std::vector<decision_vector> lbX(swarm_size,dummy); // particles' previous best positions
141  std::vector<fitness_vector> lbfit(swarm_size); // particles' fitness values at their previous best positions
142 
143 
144  std::vector< std::vector<int> > neighb(swarm_size); // swarm topology (iterators over indexes of each particle's neighbors in the swarm)
145 
146  decision_vector best_neighb(Dc); // search space position of particles' best neighbor
147  fitness_vector best_fit; // fitness at the best found search space position (tracked only when using topologies 1 or 4)
148  bool best_fit_improved; // flag indicating whether the best solution's fitness improved (tracked only when using topologies 1 or 4)
149 
150 
151  decision_vector minv(Dc), maxv(Dc); // Maximum and minumum velocity allowed
152 
153  double vwidth; // Temporary variable
154  double new_x; // Temporary variable
155 
156  population::size_type p; // for iterating over particles
157  population::size_type n; // for iterating over particles's neighbours
158  problem::base::size_type d; // for iterating over problem dimensions
159 
160 
161  // Initialise the minimum and maximum velocity
162  for( d = 0; d < Dc; d++ ){
163  vwidth = ( ub[d] - lb[d] ) * m_vcoeff;
164  minv[d] = -1.0 * vwidth;
165  maxv[d] = vwidth;
166  }
167 
168  // Copy the particle positions, their velocities and their fitness
169  for( p = 0; p < swarm_size; p++ ){
170  X[p] = pop.get_individual(p).cur_x;
171  V[p] = pop.get_individual(p).cur_v;
172  fit[p] = pop.get_individual(p).cur_f;
173  }
174 
175  // Initialise particles' previous best positions
176  for( p = 0; p < swarm_size; p++ ){
177  lbX[p] = pop.get_individual(p).best_x;
178  lbfit[p] = pop.get_individual(p).best_f;
179  }
180 
181  // Initialize the Swarm's topology
182  switch( m_neighb_type ){
183  case 1: initialize_topology__gbest( pop, best_neighb, best_fit, neighb ); break;
184  case 3: initialize_topology__von( neighb ); break;
185  case 4: initialize_topology__adaptive_random( neighb );
186  best_fit = pop.champion().f; // need to track improvements in best found fitness, to know when to rewire
187  break;
188  case 2:
189  default: initialize_topology__lbest( neighb );
190  }
191 
192 
193  // auxiliary varibables specific to the Fully Informed Particle Swarm variant
194  double acceleration_coefficient = m_eta1 + m_eta2;
195  double sum_forces;
196 
197  double r1 = 0.0;
198  double r2 = 0.0;
199 
200  /* --- Main PSO loop ---
201  */
202  // For each generation
203  for( int g = 0; g < m_gen; ++g ){
204 
205  // Update Velocity
206  for( p = 0; p < swarm_size; p++ ){
207 
208  // identify the current particle's best neighbour
209  // . not needed if m_neighb_type == 1 (gbest): best_neighb directly tracked in this function
210  // . not needed if m_variant == 6 (FIPS): all neighbours are considered, no need to identify the best one
211  if( m_neighb_type != 1 && m_variant != 6)
212  best_neighb = particle__get_best_neighbor( p, neighb, lbX, lbfit, prob );
213 
214 
215  /*-------PSO canonical (with inertia weight) ---------------------------------------------*/
216  /*-------Original algorithm used in PaGMO paper-------------------------------------------*/
217  if( m_variant == 1 ){
218  for( d = 0; d < Dc; d++ ){
219  r1 = m_drng();
220  r2 = m_drng();
221  V[p][d] = m_omega * V[p][d] + m_eta1 * r1 * (lbX[p][d] - X[p][d]) + m_eta2 * r2 * (best_neighb[d] - X[p][d]);
222  }
223  }
224 
225  /*-------PSO canonical (with inertia weight) ---------------------------------------------*/
226  /*-------and with equal random weights of social and cognitive components-----------------*/
227  /*-------Check with Rastrigin-------------------------------------------------------------*/
228  else if( m_variant == 2 ){
229  for( d = 0; d < Dc; d++ ){
230  r1 = m_drng();
231  V[p][d] = m_omega * V[p][d] + m_eta1 * r1 * (lbX[p][d] - X[p][d]) + m_eta2 * r1 * (best_neighb[d] - X[p][d]);
232  }
233  }
234 
235  /*-------PSO variant (commonly mistaken in literature for the canonical)----------------*/
236  /*-------Same random number for all components------------------------------------------*/
237  else if( m_variant == 3 ){
238  r1 = m_drng();
239  r2 = m_drng();
240  for( d = 0; d < Dc; d++ ){
241  V[p][d] = m_omega * V[p][d] + m_eta1 * r1 * (lbX[p][d] - X[p][d]) + m_eta2 * r2 * (best_neighb[d] - X[p][d]);
242  }
243  }
244 
245  /*-------PSO variant (commonly mistaken in literature for the canonical)----------------*/
246  /*-------Same random number for all components------------------------------------------*/
247  /*-------and with equal random weights of social and cognitive components---------------*/
248  else if( m_variant == 4 ){
249  r1 = m_drng();
250  for( d = 0; d < Dc; d++ ){
251  V[p][d] = m_omega * V[p][d] + m_eta1 * r1 * (lbX[p][d] - X[p][d]) + m_eta2 * r1 * (best_neighb[d] - X[p][d]);
252  }
253  }
254 
255  /*-------PSO variant with constriction coefficients------------------------------------*/
256  /* ''Clerc's analysis of the iterative system led him to propose a strategy for the
257  * placement of "constriction coefficients" on the terms of the formulas; these
258  * coefficients controlled the convergence of the particle and allowed an elegant and
259  * well-explained method for preventing explosion, ensuring convergence, and
260  * eliminating the arbitrary Vmax parameter. The analysis also takes the guesswork
261  * out of setting the values of phi_1 and phi_2.''
262  * ''this is the canonical particle swarm algorithm of today.''
263  * [Poli et al., 2007] http://dx.doi.org/10.1007/s11721-007-0002-0
264  * [Clerc and Kennedy, 2002] http://dx.doi.org/10.1109/4235.985692
265  *
266  * This being the canonical PSO of today, this variant is set as the default in PaGMO.
267  *-------------------------------------------------------------------------------------*/
268  else if( m_variant == 5 ){
269  for( d = 0; d < Dc; d++ ){
270  r1 = m_drng();
271  r2 = m_drng();
272  V[p][d] = m_omega * ( V[p][d] + m_eta1 * r1 * (lbX[p][d] - X[p][d]) + m_eta2 * r2 * (best_neighb[d] - X[p][d]) );
273  }
274  }
275 
276  /*-------Fully Informed Particle Swarm-------------------------------------------------*/
277  /* ''Whereas in the traditional algorithm each particle is affected by its own
278  * previous performance and the single best success found in its neighborhood, in
279  * Mendes' fully informed particle swarm (FIPS), the particle is affected by all its
280  * neighbors, sometimes with no influence from its own previous success.''
281  * ''With good parameters, FIPS appears to find better solutions in fewer iterations
282  * than the canonical algorithm, but it is much more dependent on the population topology.''
283  * [Poli et al., 2007] http://dx.doi.org/10.1007/s11721-007-0002-0
284  * [Mendes et al., 2004] http://dx.doi.org/10.1109/TEVC.2004.826074
285  *-------------------------------------------------------------------------------------*/
286  else if( m_variant == 6 ){
287  for( d = 0; d < Dc; d++ ){
288  sum_forces = 0.0;
289  for( n = 0; n < neighb[p].size(); n++ )
290  sum_forces += m_drng() * acceleration_coefficient * ( lbX[ neighb[p][n] ][d] - X[p][d] );
291 
292  V[p][d] = m_omega * ( V[p][d] + sum_forces / neighb[p].size() );
293  }
294  }
295  }
296 
297  // Update Position
298  for( p = 0; p < swarm_size; p++ ){
299  // We now check that the velocity does not exceed the maximum allowed per component
300  // and we perform the position update and the feasibility correction
301  for( d = 0; d < Dc; d++ ){
302 
303  if( V[p][d] > maxv[d] )
304  V[p][d] = maxv[d];
305 
306  else if( V[p][d] < minv[d] )
307  V[p][d] = minv[d];
308 
309  // update position
310  new_x = X[p][d] + V[p][d];
311 
312  // feasibility correction
313  // (velocity updated to that which would have taken the previous position
314  // to the newly corrected feasible position)
315  if( new_x < lb[d] ){
316  new_x = lb[d];
317  V[p][d] = 0.0;
318 // new_x = boost::uniform_real<double>(lb[d],ub[d])(m_drng);
319 // V[p][d] = new_x - X[p][d];
320 // V[p][d] = 0;
321  }
322  else if( new_x > ub[d] ){
323  new_x = ub[d];
324  V[p][d] = 0.0;
325 // new_x = boost::uniform_real<double>(lb[d],ub[d])(m_drng);
326 // V[p][d] = new_x - X[p][d];
327 // V[p][d] = 0;
328  }
329  X[p][d] = new_x;
330  }
331  }
332 
333  // If the problem is a stochastic optimization chage the seed and re-evaluate taking care to update also best and local bests
334  try
335  {
336  dynamic_cast<const pagmo::problem::base_stochastic &>(prob).set_seed(m_urng());
337  pop.clear(); // Removes memory based on different seeds (champion and best_x, best_f, best_c)
338 
339  // Re-evaluate wrt new seed the particle position and memory
340  for( p = 0; p < swarm_size; p++ ){
341  // We evaluate here the new individual fitness
342  prob.objfun( fit[p], X[p] );
343  // We re-evaluate the fitness of the particle memory
344  prob.objfun( lbfit[p], lbX[p] );
346  pop.push_back(lbX[p]);
347  pop.set_x(p,X[p]);
348  pop.set_v(p,V[p]);
349  }
350  //UPDATE BEST_FIT and BEST to account for the new seed
351  best_fit = fit[0];
352  best_neighb = X[0];
353  for( p = 1; p < swarm_size; p++ ){
354  if( prob.compare_fitness( fit[p], best_fit ) ){
355  best_fit = fit[p];
356  best_neighb = X[p];
357  }
358  }
359 
360  }
361  catch (const std::bad_cast& e)
362  {
363  //Only evaluate new position
364  for( p = 0; p < swarm_size; p++ ){
365  // We evaluate here the new individual fitness
366  prob.objfun( fit[p], X[p] );
367  pop.set_x(p,X[p]);
368  pop.set_v(p,V[p]);
369  }
370  }
371 
372 
373 
374  // We update the particles memory if a better point has been reached
375  best_fit_improved = false;
376  for( p = 0; p < swarm_size; p++ ){
377  if( prob.compare_fitness( fit[p], lbfit[p] ) ){
378  // update the particle's previous best position
379  lbfit[p] = fit[p];
380  lbX[p] = X[p];
381 
382  // update the best position observed so far by any particle in the swarm
383  // (only performed if swarm topology is gbest or random varying)
384  if( ( m_neighb_type == 1 || m_neighb_type == 4 ) && prob.compare_fitness( fit[p], best_fit ) ){
385  best_neighb = X[p];
386  best_fit = fit[p];
387  best_fit_improved = true;
388  }
389  }
390  }
391 
392  // reset swarm topology if no improvement was observed in the best found fitness value
393  if( m_neighb_type == 4 && !best_fit_improved )
394  {
395  initialize_topology__adaptive_random( neighb );
396  }
397  } // end of main PSO loop
398 }
399 
400 
411 decision_vector pso_generational::particle__get_best_neighbor( population::size_type pidx, std::vector< std::vector<int> > &neighb, const std::vector<decision_vector> &lbX, const std::vector<fitness_vector> &lbfit, const problem::base &prob ) const
412 {
413  population::size_type nidx, bnidx; // neighbour index; best neighbour index
414 
415  switch( m_neighb_type ){
416  case 1: // { gbest }
417  // ERROR: execution should not reach this point, as the global best position is not tracked using the neighb vector
418  pagmo_throw(value_error,"particle__get_best_neighbor() invoked while using a gbest swarm topology");
419  break;
420  case 2: // { lbest }
421  case 3: // { von }
422  case 4: // { adaptive random }
423  default:
424  // iterate over indexes of the particle's neighbours, and identify the best
425  bnidx = neighb[pidx][0];
426  for( nidx = 1; nidx < neighb[pidx].size(); nidx++ )
427  if( prob.compare_fitness( lbfit[ neighb[pidx][nidx] ], lbfit[ bnidx ] ) )
428  bnidx = neighb[pidx][nidx];
429  return lbX[bnidx];
430  }
431 }
432 
433 
453 void pso_generational::initialize_topology__gbest( const population &pop, decision_vector &gbX, fitness_vector &gbfit, std::vector< std::vector<int> > &neighb ) const
454 {
455  // The best position already visited by the swarm will be tracked in pso::evolve() as particles are evaluated.
456  // Here we define the initial values of the variables that will do that tracking.
457  gbX = pop.champion().x;
458  gbfit = pop.champion().f;
459 
460  /* The usage of a gbest swarm topology along with a FIPS (fully informed particle swarm) velocity update formula
461  * is discouraged. However, because a user might still configure such a setup, we must ensure FIPS has access to
462  * the list of indices of particles' neighbours:
463  */
464  if( m_variant == 6 ){
465  unsigned int i;
466  for( i = 0; i < neighb.size(); i++ )
467  neighb[0].push_back( i );
468  for( i = 1; i < neighb.size(); i++ )
469  neighb[i] = neighb[0];
470  }
471 }
472 
473 
486 void pso_generational::initialize_topology__lbest( std::vector< std::vector<int> > &neighb ) const
487 {
488  int swarm_size = neighb.size();
489  int pidx; // for iterating over particles
490  int nidx, j; // for iterating over particles' neighbours
491 
492  int radius = m_neighb_param / 2;
493 
494  for( pidx = 0; pidx < swarm_size; pidx++ ){
495  for( j = -radius; j <= radius; j++ ){
496  if( j == 0 ) j++;
497  nidx = (pidx + j) % swarm_size;
498  if( nidx < 0 ) nidx = swarm_size + nidx;
499  neighb[pidx].push_back( nidx );
500  }
501  }
502 }
503 
504 
514 const int vonNeumann_neighb_diff[4][2] = { {-1,0}, {1,0}, {0,-1}, {0,1} };
515 
533 void pso_generational::initialize_topology__von( std::vector< std::vector<int> > &neighb ) const
534 {
535  int swarm_size = neighb.size();
536  int cols, rows; // lattice structure
537  int pidx, nidx; // particle and neighbour indices, in the swarm and neighbourhood vectors
538  int p_x, p_y; // particle's coordinates in the lattice
539  int n_x, n_y; // particle neighbor's coordinates in the lattice
540 
541  rows = std::sqrt( swarm_size );
542  while( swarm_size % rows != 0 )
543  rows -= 1;
544  cols = swarm_size / rows;
545 
546  for( pidx = 0; pidx < swarm_size; pidx++ ){
547  p_x = pidx % cols;
548  p_y = pidx / cols;
549 
550  for( nidx = 0; nidx < 4; nidx++ ){
551  n_x = ( p_x + vonNeumann_neighb_diff[nidx][0] ) % cols; if( n_x < 0 ) n_x = cols + n_x; // sign of remainder(%) in a division when at least one of the operands is negative is compiler implementation specific. The 'if' here ensures the same behaviour across compilers
552  n_y = ( p_y + vonNeumann_neighb_diff[nidx][1] ) % rows; if( n_y < 0 ) n_y = rows + n_y;
553 
554  neighb[pidx].push_back( n_y * cols + n_x );
555  }
556  }
557 }
558 
559 
582 void pso_generational::initialize_topology__adaptive_random( std::vector< std::vector<int> > &neighb ) const
583 {
584  int swarm_size = neighb.size();
585  int pidx; // for iterating over particles
586  int nidx, j; // for iterating over particles being connected to
587 
588  // clear previously defined topology
589  for( pidx = 0; pidx < swarm_size; pidx++ )
590  neighb[pidx].clear();
591 
592  // define new topology
593  for( pidx = 0; pidx < swarm_size; pidx++ ){
594 
595  // the particle always connects back to itself, thus guaranteeing a minimum indegree of 1
596  neighb[pidx].push_back( pidx );
597 
598  for( j = 1; j < m_neighb_param; j++ ){
599  nidx = m_drng() * swarm_size;
600  neighb[nidx].push_back( pidx );
601  // No check performed to see whether pidx is already in neighb[nidx],
602  // leading to a performance penalty in particle__get_best_neighbor() when it occurs.
603  }
604  }
605 }
606 
607 
609 std::string pso_generational::get_name() const
610 {
611  return "PSO - Generational";
612 }
613 
614 
616 
620 {
621  std::ostringstream s;
622  s << "gen:" << m_gen << ' ';
623  s << "omega:" << m_omega << ' ';
624  s << "eta1:" << m_eta1 << ' ';
625  s << "eta2:" << m_eta2 << ' ';
626  s << "variant:" << m_variant << ' ';
627  s << "topology:" << m_neighb_type << ' ';
628  if( m_neighb_type == 2 || m_neighb_type == 4 )
629  s << "topology param.:" << m_neighb_param << ' ';
630  return s.str();
631 }
632 
633 }} //namespaces
634 
635 BOOST_CLASS_EXPORT_IMPLEMENT(pagmo::algorithm::pso_generational)
boost::shared_ptr< base > base_ptr
Alias for shared pointer to base algorithm.
Root PaGMO namespace.
std::vector< double > decision_vector
Decision vector type.
Definition: types.h:40
fitness_vector cur_f
Current fitness vector.
Definition: population.h:89
Particle Swarm optimization generational.
const individual_type & get_individual(const size_type &) const
Get constant reference to individual at position n.
Definition: population.cpp:277
Base algorithm class.
std::string human_readable_extra() const
Extra human readable algorithm info.
const int vonNeumann_neighb_diff[4][2]
Von Neumann neighborhood (increments on particles' lattice coordinates that produce the coordinates o...
Definition: pso.cpp:474
Base problem class.
Definition: problem/base.h:148
Population class.
Definition: population.h:70
pso_generational(int gen=1, double omega=0.7298, double eta1=2.05, double eta2=2.05, double vcoeff=0.5, int variant=5, int neighb_type=2, int neighb_param=4)
Constructor.
size_type get_dimension() const
Return global dimension.
bool compare_fitness(const fitness_vector &, const fitness_vector &) const
Compare fitness vectors.
fitness_vector f
Fitness vector.
Definition: population.h:153
std::string get_name() const
Algorithm name.
fitness_vector objfun(const decision_vector &) const
Return fitness of pagmo::decision_vector.
size_type get_i_dimension() const
Return integer dimension.
fitness_vector best_f
Best fitness vector so far.
Definition: population.h:95
std::vector< double > fitness_vector
Fitness vector type.
Definition: types.h:42
c_size_type get_c_dimension() const
Return global constraints dimension.
decision_vector cur_v
Current velocity vector.
Definition: population.h:85
const decision_vector & get_ub() const
Upper bounds getter.
Base Stochastic Optimization Problem.
container_type::size_type size_type
Population size type.
Definition: population.h:192
decision_vector cur_x
Current decision vector.
Definition: population.h:83
rng_uint32 m_urng
Random number generator for unsigned integer values.
f_size_type get_f_dimension() const
Return fitness dimension.
const decision_vector & get_lb() const
Lower bounds getter.
base_ptr clone() const
Clone method.
rng_double m_drng
Random number generator for double-precision floating point values.
decision_vector best_x
Best decision vector so far.
Definition: population.h:91
decision_vector::size_type size_type
Problem's size type: the same as pagmo::decision_vector's size type.
Definition: problem/base.h:160
void evolve(population &) const
Evolve implementation.