AI-PSO
view release on metacpan or search on metacpan
- added perceptron c++ code that I wrote a long time ago ;)
- added an example (pso_ann.pl) for training a simple feed-forward neural network
- updated POD
0.82 Sat Nov 11 22:20:31 2006
- fixed POD to correctly 'use AI::PSO'
- fixed fitness function in PSO.t
- added research paper to package
- moved into a subversion repository
- removed requirement for perl 5.8.8
- removed printing of solution array in test
0.80 Sat Nov 11 14:22:27 2006
- changed namespace to AI::PSO
- added a pso_get_solution_array function
0.70 Fri Nov 10 23:50:32 2006
- added user callback fitness function
- added POD
- added tests
- fixed typos
examples/NeuralNet/NeuralNet.h view on Meta::CPAN
}
}
friend istream & operator>>(istream & in, NeuralNet & ann)
{
ann.read(in);
return in;
}
void print(ostream & out)
{
}
*/
protected:
///
/// \fn connectionize()
/// \brief builds a fully connected network once the Neurons are constructed
///
void connectionize()
examples/NeuralNet/pso_ann.pl view on Meta::CPAN
my $annConfig = "pso.ann";
my $annInputs = "pso.dat";
my $expectedValue = 3.5; # this is the value that we want to train the ANN to produce (just like the example in t/PTO.t)
sub test_fitness_function(@) {
my (@arr) = (@_);
&writeAnnConfig($annConfig, $numInputs, $numHidden, $xferFunc, @arr);
my $netValue = &runANN($annConfig, $annInputs);
print "network value = $netValue\n";
# the closer the network value gets to our desired value
# then we want to set the fitness closer to 1.
#
# This is a special case of the sigmoid, and looks an awful lot
# like the hyperbolic tangent ;)
#
my $magnitudeFromBest = abs($expectedValue - $netValue);
return 2 / (1 + exp($magnitudeFromBest));
}
examples/NeuralNet/pso_ann.pl view on Meta::CPAN
##### io #########
sub writeAnnConfig() {
my ($configFile, $inputs, $hidden, $func, @weights) = (@_);
open(ANN, ">$configFile");
print ANN "$inputs $hidden\n";
print ANN "$func\n";
foreach my $weight (@weights) {
print ANN "$weight ";
}
print ANN "\n";
close(ANN);
}
sub runANN($$) {
my ($configFile, $dataFile) = @_;
my $networkValue = `ann_compute $configFile $dataFile`;
chomp($networkValue);
return $networkValue;
}
lib/AI/PSO.pm view on Meta::CPAN
for(my $n = 0; $n < $numNeighbors; $n++) {
$particles[$p]{neighbor}[$n] = $particles[&get_index_of_neighbor($p, $n)];
}
}
}
sub dump_particle($) {
$| = 1;
my ($index) = @_;
print STDERR "[particle $index]\n";
print STDERR "\t[bestPos] ==> " . &compute_fitness(@{$particles[$index]{bestPos}}) . "\n";
foreach my $pos (@{$particles[$index]{bestPos}}) {
print STDERR "\t\t$pos\n";
}
print STDERR "\t[currPos] ==> " . &compute_fitness(@{$particles[$index]{currPos}}) . "\n";
foreach my $pos (@{$particles[$index]{currPos}}) {
print STDERR "\t\t$pos\n";
}
print STDERR "\t[nextPos] ==> " . &compute_fitness(@{$particles[$index]{nextPos}}) . "\n";
foreach my $pos (@{$particles[$index]{nextPos}}) {
print STDERR "\t\t$pos\n";
}
print STDERR "\t[velocity]\n";
foreach my $pos (@{$particles[$index]{velocity}}) {
print STDERR "\t\t$pos\n";
}
}
#
# swarm
# - runs the particle swarm algorithm
#
sub swarm() {
for(my $iter = 0; $iter < $maxIterations; $iter++) {
for(my $p = 0; $p < $numParticles; $p++) {
lib/AI/PSO.pm view on Meta::CPAN
if($fitness > &compute_fitness(@{$particles[$p]{bestPos}})) {
# for each dimension, set the best position as the current position
for(my $d2 = 0; $d2 < $dimensions; $d2++) {
$particles[$p]{bestPos}[$d2] = $particles[$p]{currPos}[$d2];
}
}
## check for exit criteria
if($fitness >= $exitFitness) {
#...write solution
print "Y:$iter:$p:$fitness\n";
&save_solution(@{$particles[$p]{bestPos}});
&dump_particle($p);
return 0;
} else {
if($verbose == 1) {
print "N:$iter:$p:$fitness\n"
}
if($verbose == 2) {
&dump_particle($p);
}
}
}
## at this point we've updated our position, but haven't reached the end of the search
## so we turn to our neighbors for help.
## (we see if they are doing any better than we are,
lib/AI/PSO.pm view on Meta::CPAN
# do the PSO position and velocity updates
$particles[$p]{velocity}[$d] = &clamp_velocity($delta);
$particles[$p]{nextPos}[$d] = $particles[$p]{currPos}[$d] + $particles[$p]{velocity}[$d];
}
}
}
}
#
# at this point we have exceeded the maximum number of iterations, so let's at least print out the best result so far
#
print STDERR "MAX ITERATIONS REACHED WITHOUT MEETING EXIT CRITERION...printing best solution\n";
my $bestFit = -1;
my $bestPartIndex = -1;
for(my $p = 0; $p < $numParticles; $p++) {
my $endFit = &compute_fitness(@{$particles[$p]{bestPos}});
if($endFit >= $bestFit) {
$bestFit = $endFit;
$bestPartIndex = $p;
}
}
lib/AI/PSO.pm view on Meta::CPAN
dimensions => 4, # number of parameters you want to optimize
deltaMin => -4.0, # minimum change in velocity during PSO update
deltaMax => 4.0, # maximum change in velocity during PSO update
meWeight => 2.0, # 'individuality' weighting constant (higher means more individuality)
meMin => 0.0, # 'individuality' minimum random weight
meMax => 1.0, # 'individuality' maximum random weight
themWeight => 2.0, # 'social' weighting constant (higher means trust group more)
themMin => 0.0, # 'social' minimum random weight
themMax => 1.0, # 'social' maximum random weight
exitFitness => 0.9, # minimum fitness to achieve before exiting
verbose => 0, # 0 prints solution
# 1 prints (Y|N):particle:fitness at each iteration
# 2 dumps each particle (+1)
psoRandomRange => 4.0, # setting this enables the original PSO algorithm and
# also subsequently ignores the me*/them* parameters
);
sub custom_fitness_function(@input) {
# this is a callback function.
# @input will be passed to this, you do not need to worry about setting it...
# ... do something with @input which is an array of floats
lib/AI/PSO.pm view on Meta::CPAN
where each particle is neighbors with all other particles
(numNeighbors == numParticles - 1) converges more quickly. However,
this will drastically increase the number of calls to your fitness
function. So, if your fitness function is the bottleneck, then you
should tune this value for the appropriate time/accuracy trade-off.
Also, I highly suggest you implement a simple fitness cache so you
don't end up recomputing fitness values. This can easily be done
with a perl hash that is keyed on the string concatenation of the
array values passed to your fitness function. Note that these are
floating point values, so determine how significant the values are
and you can use sprintf to essentially limit the precision of the
particle positions.
=item 4. Number of particles
The number of particles increases cooperation and search space
coverage at the expense of compute. Typical applications should
suffice using 20-40 particles.
=back
lib/AI/PSO.pm view on Meta::CPAN
make my code nicely formatted and good looking :)).
=item pso_optimize()
Runs the particle swarm optimization algorithm. This consists of
running iterations of search and many calls to the fitness function
you registered with pso_register_fitness_function()
=item pso_get_solution_array()
By default, pso_optimize() will print out to STDERR the first
solution, or the best solution so far if the max iterations were
reached. This function will simply return an array of the winning
(or best so far) position of the entire swarm system. It is an
array of floats to be used how you wish (like weights in a
neural network!).
=back
( run in 0.507 second using v1.01-cache-2.11-cpan-de7293f3b23 )