AI-PSO
view release on metacpan or search on metacpan
Revision history for Perl extension AI::PSO.
0.86 Tue Nov 21 20:41:23 2006
- updated documentation
- added support for original RE & JK algorithm
- abstracted initialization function
0.85 Wed Nov 15 22:30:47 2006
- corrected the fitness function in the test
- added perceptron c++ code that I wrote a long time ago ;)
- added an example (pso_ann.pl) for training a simple feed-forward neural network
- updated POD
0.82 Sat Nov 11 22:20:31 2006
- fixed POD to correctly 'use AI::PSO'
- fixed fitness function in PSO.t
- added research paper to package
- moved into a subversion repository
- removed requirement for perl 5.8.8
- removed printing of solution array in test
0.80 Sat Nov 11 14:22:27 2006
- changed namespace to AI::PSO
- added a pso_get_solution_array function
0.70 Fri Nov 10 23:50:32 2006
- added user callback fitness function
- added POD
- added tests
- fixed typos
- changed version to 0.70 because I like 0.7
0.01 Fri Nov 10 18:53:56 2006
- initial version
PSO version 0.81
================
INSTALLATION
To install this module type the following:
perl Makefile.PL
make
make test
make install
DEPENDENCIES
This module requires these other modules and libraries:
Math::Random
Callback
examples/NeuralNet/NeuralNet.h view on Meta::CPAN
void setInput(int index, double value)
{
if(index >= 0 && index < m_numInputs)
m_inputs[index].setValue(value);
}
///
/// \fn void setWeightsToOne()
/// \brief sets all of the connections weights to unity
/// \note this is really only used for testing/debugging purposes
///
void setWeightsToOne()
{
for(int i = 0; i < m_numHidden; i++)
for(int j = 0; j < m_hidden[i].numConnections(); j++)
m_hidden[i].setWeight(j, 1.0);
for(int k = 0; k < m_output.numConnections(); k++)
m_output.setWeight(k, 1.0);
}
examples/NeuralNet/main.cpp view on Meta::CPAN
/// \file main.cpp
/// \brief Source file for testing a simple three layer feed forward neural network class
///
/// \author Kyle Schlansker
/// \date August 2004
//////////////////////////////////////////////////////////////
#include <iostream>
#include <fstream>
#include <string>
using namespace std;
examples/NeuralNet/pso_ann.pl view on Meta::CPAN
#!/usr/bin/perl -w
use strict;
use AI::PSO;
my %test_params = (
numParticles => 4,
numNeighbors => 3,
maxIterations => 1000,
dimensions => 8, # 8 since we have 8 network weights we want to optimize for a 3 input 2 hidden 1 output feed-forward neural net
deltaMin => -2.0,
deltaMax => 4.0,
meWeight => 2.0,
meMin => 0.0,
meMax => 1.0,
themWeight => 2.0,
examples/NeuralNet/pso_ann.pl view on Meta::CPAN
my $numInputs = 3;
my $numHidden = 2;
my $xferFunc = "Logistic";
my $annConfig = "pso.ann";
my $annInputs = "pso.dat";
my $expectedValue = 3.5; # this is the value that we want to train the ANN to produce (just like the example in t/PTO.t)
sub test_fitness_function(@) {
my (@arr) = (@_);
&writeAnnConfig($annConfig, $numInputs, $numHidden, $xferFunc, @arr);
my $netValue = &runANN($annConfig, $annInputs);
print "network value = $netValue\n";
# the closer the network value gets to our desired value
# then we want to set the fitness closer to 1.
#
# This is a special case of the sigmoid, and looks an awful lot
# like the hyperbolic tangent ;)
#
my $magnitudeFromBest = abs($expectedValue - $netValue);
return 2 / (1 + exp($magnitudeFromBest));
}
pso_set_params(\%test_params);
pso_register_fitness_function('test_fitness_function');
pso_optimize();
#my @solution = pso_get_solution_array();
##### io #########
sub writeAnnConfig() {
my ($configFile, $inputs, $hidden, $func, @weights) = (@_);
lib/AI/PSO.pm view on Meta::CPAN
#
sub swarm() {
for(my $iter = 0; $iter < $maxIterations; $iter++) {
for(my $p = 0; $p < $numParticles; $p++) {
## update position
for(my $d = 0; $d < $dimensions; $d++) {
$particles[$p]{currPos}[$d] = $particles[$p]{nextPos}[$d];
}
## test _current_ fitness of position
my $fitness = &compute_fitness(@{$particles[$p]{currPos}});
# if this position in hyperspace is the best so far...
if($fitness > &compute_fitness(@{$particles[$p]{bestPos}})) {
# for each dimension, set the best position as the current position
for(my $d2 = 0; $d2 < $dimensions; $d2++) {
$particles[$p]{bestPos}[$d2] = $particles[$p]{currPos}[$d2];
}
}
## check for exit criteria
lib/AI/PSO.pm view on Meta::CPAN
=item pso_set_params()
Sets the particle swarm configuration parameters to use for the search.
=item pso_register_fitness_function()
Sets the user defined fitness function to call. The fitness function
should return a value between 0 and 1. Users may want to look into
the sigmoid function [1 / (1+e^(-x))] and it's variants to implement
this. Also, you may want to take a look at either t/PSO.t for the
simple test or examples/NeuralNetwork/pso_ann.pl for an example on
how to train a simple 3-layer feed forward neural network. (Note
that a real training application would have a real dataset with many
input-output pairs...pso_ann.pl is a _very_ simple example. Also note
that the neural network exmaple requires g++. Type 'make run' in the
examples/NeuralNetwork directory to run the example. Lastly, the
neural network c++ code is in a very different coding style. I did
indeed write this, but it was many years ago when I was striving to
make my code nicely formatted and good looking :)).
=item pso_optimize()
use Test::More tests => 9;
BEGIN { use_ok('AI::PSO') };
my %test_params = (
numParticles => 4,
numNeighbors => 3,
maxIterations => 5000,
dimensions => 4,
deltaMin => -2.0,
deltaMax => 4.0,
meWeight => 2.0,
meMin => 0.0,
meMax => 1.0,
themWeight => 2.0,
themMin => 0.0,
themMax => 1.0,
exitFitness => 0.99,
verbose => 1,
);
my %test_params2 = %test_params;
$test_params2{psoRandomRange} = 4.0;
# simple test function to sum the position values up to 3.5
my $testValue = 3.5;
sub test_fitness_function(@) {
my (@arr) = (@_);
my $sum = 0;
my $ret = 0;
foreach my $val (@arr) {
$sum += $val;
}
# sigmoid-like ==> squash the result to [0,1] and get as close to 3.5 as we can
return 2 / (1 + exp(abs($testValue - $sum)));
return $ret;
}
ok( pso_set_params(\%test_params) == 0 );
ok( pso_register_fitness_function('test_fitness_function') == 0 );
ok( pso_optimize() == 0 );
my @solution = pso_get_solution_array();
ok( $#solution == $test_params{numParticles} - 1 );
ok( pso_set_params(\%test_params2) == 0 );
ok( pso_register_fitness_function('test_fitness_function') == 0 );
ok( pso_optimize() == 0 );
my @solution2 = pso_get_solution_array();
ok( $#solution2 == $test_params2{numParticles} - 1 );
( run in 0.350 second using v1.01-cache-2.11-cpan-3cd7ad12f66 )