AI-ParticleSwarmOptimization
view release on metacpan or search on metacpan
lib/AI/ParticleSwarmOptimization.pm view on Meta::CPAN
use constant kLogBetter => 1;
use constant kLogStall => 2;
use constant kLogIter => 4;
use constant kLogDetail => 8;
use constant kLogIterDetail => (kLogIter | kLogDetail);
sub new {
my ($class, %params) = @_;
my $self = bless {}, $class;
$self->setParams (%params);
return $self;
}
sub setParams {
my ($self, %params) = @_;
if (defined $params{-fitFunc}) {
# Process required parameters - -fitFunc and -dimensions
if ('ARRAY' eq ref $params{-fitFunc}) {
($self->{fitFunc}, @{$self->{fitParams}}) = @{$params{-fitFunc}};
} else {
$self->{fitFunc} = $params{-fitFunc};
}
$self->{fitParams} ||= [];
}
$self->{prtcls} = [] # Need to reinit if num dimensions changed
if defined $params{-dimensions}
and defined $self->{dimensions}
and $params{-dimensions} != $self->{dimensions};
$self->{$_} = $params{"-$_"} for grep {exists $params{"-$_"}} qw/
dimensions
exitFit
exitPlateau
exitPlateauDP
exitPlateauWindow
exitPlateauBurnin
inertia
iterations
meWeight
numNeighbors
numParticles
posMax
posMin
randSeed
randStartVelocity
stallSpeed
themWeight
verbose
/;
die "-dimensions must be greater than 0\n"
if exists $params{-dimensions} && $params{-dimensions} <= 0;
if (defined $self->{verbose} and 'ARRAY' eq ref $self->{verbose}) {
my @log = map {lc} @{$self->{verbose}};
my %logTypes = (
better => kLogBetter,
stall => kLogStall,
iter => kLogIter,
detail => kLogDetail,
);
$self->{verbose} = 0;
exists $logTypes{$_} and $self->{verbose} |= $logTypes{$_} for @log;
}
$self->{numParticles} ||= $self->{dimensions} * 10
if defined $self->{dimensions};
$self->{numNeighbors} ||= int sqrt $self->{numParticles}
if defined $self->{numParticles};
$self->{iterations} ||= 1000;
$self->{exitPlateauDP} ||= 10;
$self->{exitPlateauWindow} ||= $self->{iterations} * 0.1;
$self->{exitPlateauBurnin} ||= $self->{iterations} * 0.5;
$self->{posMax} = 100 unless defined $self->{posMax};
$self->{posMin} = -$self->{posMax} unless defined $self->{posMin};
$self->{meWeight} ||= 0.5;
$self->{themWeight} ||= 0.5;
$self->{inertia} ||= 0.9;
$self->{verbose} ||= 0;
return 1;
}
sub init {
my ($self) = @_;
die "-fitFunc must be set before init or optimize is called"
unless $self->{fitFunc} and 'CODE' eq ref $self->{fitFunc};
die
"-dimensions must be set to 1 or greater before init or optimize is called"
unless $self->{dimensions} and $self->{dimensions} >= 1;
my $seed =
int (exists $self->{randSeed} ? $self->{randSeed} : rand (0xffffffff));
$self->{rndGen} = Math::Random::MT->new ($seed);
$self->{usedRandSeed} = $seed;
$self->{prtcls} = [];
$self->{bestBest} = undef;
$self->{bestBestByIter} = undef;
$self->{bestsMean} = 0;
$self->_initParticles ();
$self->{iterCount} = 0;
# Normalise weights.
my $totalWeight =
$self->{inertia} + $self->{themWeight} + $self->{meWeight};
$self->{inertia} /= $totalWeight;
$self->{meWeight} /= $totalWeight;
$self->{themWeight} /= $totalWeight;
lib/AI/ParticleSwarmOptimization.pm view on Meta::CPAN
return $self->{fitFunc}->(@{$self->{fitParams}}, @$pos);
}
sub _swarm {
my ($self, $iterations) = @_;
for my $iter (1 .. $iterations) {
++$self->{iterCount};
last if defined $self->_moveParticles ($iter);
$self->_updateVelocities ($iter);
next if !$self->{exitPlateau} || !defined $self->{bestBest};
if ($iter >= $self->{exitPlateauBurnin} - $self->{exitPlateauWindow}) {
my $i = $iter % $self->{exitPlateauWindow};
$self->{bestsMean} -= $self->{bestBestByIter}[$i]
if defined $self->{bestBestByIter}[$i];
$self->{bestsMean} += $self->{bestBestByIter}[$i] =
$self->{bestBest} / $self->{exitPlateauWindow};
}
next if $iter <= $self->{exitPlateauBurnin};
#Round to the specified number of d.p.
my $format = "%.$self->{exitPlateauDP}f";
my $mean = sprintf $format, $self->{bestsMean};
my $current = sprintf $format, $self->{bestBest};
#Check if there is a sufficient plateau - stopping iterations if so
last if $mean == $current;
}
return $self->{bestBest};
}
sub _moveParticles {
my ($self, $iter) = @_;
print "Iter $iter\n" if $self->{verbose} & kLogIter;
for my $prtcl (@{$self->{prtcls}}) {
@{$prtcl->{currPos}} = @{$prtcl->{nextPos}};
$prtcl->{currFit} = $prtcl->{nextFit};
my $fit = $prtcl->{currFit};
if ($self->_betterFit ($fit, $prtcl->{bestFit})) {
# Save position - best fit for this particle so far
$self->_saveBest ($prtcl, $fit, $iter);
}
return $fit if defined $self->{exitFit} and $fit < $self->{exitFit};
next if !($self->{verbose} & kLogIterDetail);
printf "Part %3d fit %8.2f", $prtcl->{id}, $fit
if $self->{verbose} >= 2;
printf " (%s @ %s)",
join (', ', map {sprintf '%5.3f', $_} @{$prtcl->{velocity}}),
join (', ', map {sprintf '%5.2f', $_} @{$prtcl->{currPos}})
if $self->{verbose} & kLogDetail;
print "\n";
}
return undef;
}
sub _saveBest {
my ($self, $prtcl, $fit, $iter) = @_;
# for each dimension, set the best position as the current position
@{$prtcl->{bestPos}} = @{$prtcl->{currPos}};
$prtcl->{bestFit} = $fit;
return if !$self->_betterFit ($fit, $self->{bestBest});
if ($self->{verbose} & kLogBetter) {
my $velSq;
$velSq += $_**2 for @{$prtcl->{velocity}};
printf "#%05d: Particle $prtcl->{id} best: %.4f (vel: %.3f)\n",
$iter, $fit, sqrt ($velSq);
}
$self->{bestBest} = $fit;
}
sub _betterFit {
my ($self, $new, $old) = @_;
return !defined ($old) || ($new < $old);
}
sub _updateVelocities {
my ($self, $iter) = @_;
for my $prtcl (@{$self->{prtcls}}) {
my $bestN = $self->{prtcls}[$self->_getBestNeighbour ($prtcl)];
my $velSq;
for my $d (0 .. $self->{dimensions} - 1) {
my $meFactor =
$self->_randInRange (-$self->{meWeight}, $self->{meWeight});
my $themFactor =
$self->_randInRange (-$self->{themWeight}, $self->{themWeight});
my $meDelta = $prtcl->{bestPos}[$d] - $prtcl->{currPos}[$d];
my $themDelta = $bestN->{bestPos}[$d] - $prtcl->{currPos}[$d];
$prtcl->{velocity}[$d] =
$prtcl->{velocity}[$d] * $self->{inertia} +
$meFactor * $meDelta +
$themFactor * $themDelta;
$velSq += $prtcl->{velocity}[$d]**2;
}
my $vel = sqrt ($velSq);
if (!$vel or $self->{stallSpeed} and $vel <= $self->{stallSpeed}) {
lib/AI/ParticleSwarmOptimization.pm view on Meta::CPAN
$prtcl->{nextPos}[$d] = $prtcl->{currPos}[$d] + $prtcl->{velocity}[$d];
if ($prtcl->{nextPos}[$d] < $self->{posMin}) {
$prtcl->{nextPos}[$d] = $self->{posMin};
$prtcl->{velocity}[$d] = 0;
} elsif ($prtcl->{nextPos}[$d] > $self->{posMax}) {
$prtcl->{nextPos}[$d] = $self->{posMax};
$prtcl->{velocity}[$d] = 0;
}
}
$prtcl->{nextFit} = $self->_calcPosFit ($prtcl->{nextPos});
}
sub _randInRange {
my ($self, $min, $max) = @_;
return $min + $self->{rndGen}->rand ($max - $min);
}
sub _getBestNeighbour {
my ($self, $prtcl) = @_;
my $bestNFitness;
my $bestNIndex;
for my $neighbor (0 .. $self->{numNeighbors} - 1) {
my $prtclNIndex = ($prtcl + $neighbor) % $self->{numParticles};
if (!defined ($bestNFitness)
|| $self->{prtcls}[$prtclNIndex]{bestFit} < $bestNFitness)
{
$bestNFitness = $self->{prtcls}[$prtclNIndex]{bestFit};
$bestNIndex = $prtclNIndex;
}
}
return $bestNIndex;
}
1;
=head1 NAME
AI::ParticleSwarmOptimization - Particle Swarm Optimization (object oriented)
=head1 SYNOPSIS
use AI::ParticleSwarmOptimization;
my $pso = AI::ParticleSwarmOptimization->new (
fitFunc => \&calcFit,
dimensions => 3,
);
my $fitValue = $pso->optimize ();
my ($best) = $pso->getBestParticles (1);
my ($fit, @values) = $pso->getParticleBestPos ($best);
printf "Fit %.4f at (%s)\n",
$fit, join ', ', map {sprintf '%.4f', $_} @values;
sub calcFit {
my @values = @_;
my $offset = int (-@values / 2);
my $sum;
$sum += ($_ - $offset++) ** 2 for @values;
return $sum;
}
=head1 Description
The Particle Swarm Optimization technique uses communication of the current best
position found between a number of particles moving over a hyper surface as a
technique for locating the best location on the surface (where 'best' is the
minimum of some fitness function). For a Wikipedia discussion of PSO see
http://en.wikipedia.org/wiki/Particle_swarm_optimization.
This pure Perl module is an implementation of the Particle Swarm Optimization
technique for finding minima of hyper surfaces. It presents an object oriented
interface that facilitates easy configuration of the optimization parameters and
(in principle) allows the creation of derived classes to reimplement all aspects
of the optimization engine (a future version will describe the replaceable
engine components).
This implementation allows communication of a local best point between a
selected number of neighbours. It does not support a single global best position
that is known to all particles in the swarm.
=head1 Methods
AI::ParticleSwarmOptimization provides the following public methods. The parameter lists shown
for the methods denote optional parameters by showing them in [].
=over 4
=item new (%parameters)
Create an optimization object. The following parameters may be used:
=over 4
=item I<-dimensions>: positive number, required
The number of dimensions of the hypersurface being searched.
=item I<-exitFit>: number, optional
If provided I<-exitFit> allows early termination of optimize if the
fitness value becomes equal or less than I<-exitFit>.
=item I<-fitFunc>: required
I<-fitFunc> is a reference to the fitness function used by the search. If extra
parameters need to be passed to the fitness function an array ref may be used
with the code ref as the first array element and parameters to be passed into
the fitness function as following elements. User provided parameters are passed
as the first parameters to the fitness function when it is called:
( run in 0.693 second using v1.01-cache-2.11-cpan-39bf76dae61 )