view release on metacpan or search on metacpan
BackProp.pm view on Meta::CPAN
return sprintf("%.10f",($diff/$a1s));
}
# Returns $fa as a percentage of $fb
sub p {
shift if(substr($_[0],0,4) eq 'AI::');
my ($fa,$fb)=(shift,shift);
sprintf("%.3f",((($fb-$fa)*((($fb-$fa)<0)?-1:1))/$fa)*100);
}
# This sub will take an array ref of a data set, which it expects in this format:
# my @data_set = ( [ ...inputs... ], [ ...outputs ... ],
# ... rows ...
# );
#
# This wil sub returns the percentage of 'forgetfullness' when the net learns all the
# data in the set in order. Usage:
#
# learn_set(\@data,[ options ]);
#
# Options are options in hash form. They can be of any form that $net->learn takes.
#
# It returns a percentage string.
#
sub learn_set {
my $self = shift if(substr($_[0],0,4) eq 'AI::');
my $data = shift;
my %args = @_;
my $len = $#{$data}/2-1;
my $inc = $args{inc};
my $max = $args{max};
my $error = $args{error};
my $p = (defined $args{flag}) ?$args{flag} :1;
my $row = (defined $args{pattern})?$args{pattern}*2+1:1;
my ($fa,$fb);
BackProp.pm view on Meta::CPAN
$data->[$row] = $self->crunch($data->[$row]) if($data->[$row] == 0);
if ($p) {
$res=pdiff($data->[$row],$self->run($data->[$row-1]));
} else {
$res=$data->[$row]->[0]-$self->run($data->[$row-1])->[0];
}
return $res;
}
# This sub will take an array ref of a data set, which it expects in this format:
# my @data_set = ( [ ...inputs... ], [ ...outputs ... ],
# ... rows ...
# );
#
# This wil sub returns the percentage of 'forgetfullness' when the net learns all the
# data in the set in RANDOM order. Usage:
#
# learn_set_rand(\@data,[ options ]);
#
# Options are options in hash form. They can be of any form that $net->learn takes.
#
# It returns a true value.
#
sub learn_set_rand {
my $self = shift if(substr($_[0],0,4) eq 'AI::');
my $data = shift;
my %args = @_;
my $len = $#{$data}/2-1;
my $inc = $args{inc};
my $max = $args{max};
my $error = $args{error};
my @learned;
while(1) {
_GET_X:
BackProp.pm view on Meta::CPAN
# to the appropiate ends of the neuron grids.
sub initialize_group() {
my $self = shift;
my $size = $self->{SIZE};
my $div = $self->{DIV};
my $out = $self->{OUT};
my $flag = $self->{FLAG};
my $x = 0;
my $y = 0;
# Reset map and run synapse counters.
$self->{RUN}->{REGISTRATION} = $self->{MAP}->{REGISTRATION} = 0;
AI::NeuralNet::BackProp::out2 "There will be $size neurons in this network group, with a divison value of $div.\n";
#print "There will be $size neurons in this network group, with a divison value of $div.\n";
# Create initial neuron packages in one long array for the entire group
for($y=0; $y<$size; $y++) {
#print "Initalizing neuron $y... \r";
$self->{NET}->[$y]=new AI::NeuralNet::BackProp::neuron();
}
BackProp.pm view on Meta::CPAN
# of neurons supposed to be in each layer
for($y=0; $y<$size; $y+=$div) {
if($y+$div>=$size) {
last;
}
# Inner loop connects every neuron in this 'layer' to one input of every neuron in
# the next 'layer'. Remeber, layers only exist in terms of where the connections
# are divided. For example, if a person requested 2 layers and 3 neurons per layer,
# then there would be 6 neurons in the {NET}->[] list, and $div would be set to
# 3. So we would loop over and every 3 neurons we would connect each of those 3
# neurons to one input of every neuron in the next set of 3 neurons. Of course, this
# is an example. 3 and 2 are set by the new() constructor.
# Flag values:
# 0 - (default) -
# My feed-foward style: Each neuron in layer X is connected to one input of every
# neuron in layer Y. The best and most proven flag style.
#
# ^ ^ ^
# O\ O\ /O Layer Y
# ^\\/^/\/^
# | //|\/\|
BackProp.pm view on Meta::CPAN
my @map = ();
my $value;
AI::NeuralNet::BackProp::out1 "Num output neurons: $out, Input neurons: $size, Division: $divide\n";
for(0..$out-1) {
$value=0;
for my $a (0..$divide-1) {
$value += $self->{OUTPUT}->[($_*$divide)+$a]->{VALUE};
AI::NeuralNet::BackProp::out1 "\$a is $a, index is ".(($_*$divide)+$a).", value is $self->{OUTPUT}->[($_*$divide)+$a]->{VALUE}\n";
}
$map[$_] = AI::NeuralNet::BackProp->intr($value/$divide);
AI::NeuralNet::BackProp::out1 "Map position $_ is $map[$_] in @{[\@map]} with self set to $self.\n";
$self->{OUTPUT}->[$_]->{FIRED} = 0;
}
my $ret=\@map;
return $self->{PARENT}->_range($ret);
}
1;
# load_pcx() wrapper package
package AI::NeuralNet::BackProp::PCX;
BackProp.pm view on Meta::CPAN
# Add a small amount of randomness to the network
$net->random(0.001);
# Demonstrate a simple learn() call
my @inputs = ( 0,0,1,1,1 );
my @ouputs = ( 1,0,1,0,1 );
print $net->learn(\@inputs, \@outputs),"\n";
# Create a data set to learn
my @set = (
[ 2,2,3,4,1 ], [ 1,1,1,1,1 ],
[ 1,1,1,1,1 ], [ 0,0,0,0,0 ],
[ 1,1,1,0,0 ], [ 0,0,0,1,1 ]
);
# Demo learn_set()
my $f = $net->learn_set(\@set);
print "Forgetfulness: $f unit\n";
# Crunch a bunch of strings and return array refs
my $phrase1 = $net->crunch("I love neural networks!");
my $phrase2 = $net->crunch("Jay Lenno is wierd.");
my $phrase3 = $net->crunch("The rain in spain...");
my $phrase4 = $net->crunch("Tired of word crunching yet?");
# Make a data set from the array refs
my @phrases = (
$phrase1, $phrase2,
$phrase3, $phrase4
);
# Learn the data set
$net->learn_set(\@phrases);
# Run a test phrase through the network
my $test_phrase = $net->crunch("I love neural networking!");
my $result = $net->run($test_phrase);
# Get this, it prints "Jay Leno is networking!" ... LOL!
print $net->uncrunch($result),"\n";
BackProp.pm view on Meta::CPAN
my $net = new AI::NeuralNet::BackProp(2,3,1);
The new() constructor accepts two arguments and one optional argument, $layers, $size,
and $outputs is optional (in this example, $layers is 2, $size is 3, and $outputs is 1).
$layers specifies the number of layers, including the input
and the output layer, to use in each neural grouping. A new
neural grouping is created for each pattern learned. Layers
is typically set to 2. Each layer has $size neurons in it.
Each neuron's output is connected to one input of every neuron
in the layer below it.
This diagram illustrates a simple network, created with a call
to "new AI::NeuralNet::BackProp(2,2,2)" (2 layers, 2 neurons/layer, 2 outputs).
input
/ \
O O
|\ /|
BackProp.pm view on Meta::CPAN
$learning_gradient is an optional value used to adjust the weights of the internal
connections. If $learning_gradient is ommitted, it defaults to 0.20.
$maximum_iterations is the maximum numbers of iteration the loop should do.
It defaults to 1024. Set it to 0 if you never want the loop to quit before
the pattern is perfectly learned.
$maximum_allowable_percentage_of_error is the maximum allowable error to have. If
this is set, then learn() will return when the perecentage difference between the
actual results and desired results falls below $maximum_allowable_percentage_of_error.
If you do not include 'error', or $maximum_allowable_percentage_of_error is set to -1,
then learn() will not return until it gets an exact match for the desired result OR it
reaches $maximum_iterations.
=item $net->learn_set(\@set, [ options ]);
UPDATE: Inputs and outputs in the dataset can now be strings. See information on auto-crunching
in learn()
This takes the same options as learn() and allows you to specify a set to learn, rather
than individual patterns. A dataset is an array refrence with at least two elements in the
array, each element being another array refrence (or now, a scalar string). For each pattern to
learn, you must specify an input array ref, and an ouput array ref as the next element. Example:
my @set = (
# inputs outputs
[ 1,2,3,4 ], [ 1,3,5,6 ],
[ 0,2,5,6 ], [ 0,2,1,2 ]
);
See the paragraph on measuring forgetfulness, below. There are
two learn_set()-specific option tags available:
flag => $flag
pattern => $row
If "flag" is set to some TRUE value, as in "flag => 1" in the hash of options, or if the option "flag"
is not set, then it will return a percentage represting the amount of forgetfullness. Otherwise,
learn_set() will return an integer specifying the amount of forgetfulness when all the patterns
are learned.
If "pattern" is set, then learn_set() will use that pattern in the data set to measure forgetfulness by.
If "pattern" is omitted, it defaults to the first pattern in the set. Example:
my @set = (
[ 0,1,0,1 ], [ 0 ],
[ 0,0,1,0 ], [ 1 ],
[ 1,1,0,1 ], [ 2 ], # <---
[ 0,1,1,0 ], [ 3 ]
);
If you wish to measure forgetfulness as indicated by the line with the arrow, then you would
pass 2 as the "pattern" option, as in "pattern => 2".
Now why the heck would anyone want to measure forgetfulness, you ask? Maybe you wonder how I
even measure that. Well, it is not a vital value that you have to know. I just put in a
"forgetfulness measure" one day because I thought it would be neat to know.
How the module measures forgetfulness is this: First, it learns all the patterns in the set provided,
then it will run the very first pattern (or whatever pattern is specified by the "row" option)
in the set after it has finished learning. It will compare the run() output with the desired output
as specified in the dataset. In a perfect world, the two should match exactly. What we measure is
how much that they don't match, thus the amount of forgetfulness the network has.
NOTE: In version 0.77 percentages were disabled because of a bug. Percentages are now enabled.
Example (from examples/ex_dow.pl):
# Data from 1989 (as far as I know..this is taken from example data on BrainMaker)
my @data = (
# Mo CPI CPI-1 CPI-3 Oil Oil-1 Oil-3 Dow Dow-1 Dow-3 Dow Ave (output)
[ 1, 229, 220, 146, 20.0, 21.9, 19.5, 2645, 2652, 2597], [ 2647 ],
[ 2, 235, 226, 155, 19.8, 20.0, 18.3, 2633, 2645, 2585], [ 2637 ],
[ 3, 244, 235, 164, 19.6, 19.8, 18.1, 2627, 2633, 2579], [ 2630 ],
[ 4, 261, 244, 181, 19.6, 19.6, 18.1, 2611, 2627, 2563], [ 2620 ],
[ 5, 276, 261, 196, 19.5, 19.6, 18.0, 2630, 2611, 2582], [ 2638 ],
[ 6, 287, 276, 207, 19.5, 19.5, 18.0, 2637, 2630, 2589], [ 2635 ],
[ 7, 296, 287, 212, 19.3, 19.5, 17.8, 2640, 2637, 2592], [ 2641 ]
);
# Learn the set
my $f = learn_set(\@data,
inc => 0.1,
max => 500,
p => 1
);
# Print it
print "Forgetfullness: $f%";
This is a snippet from the example script examples/ex_dow.pl, which demonstrates DOW average
prediction for the next month. A more simple set defenition would be as such:
my @data = (
[ 0,1 ], [ 1 ],
[ 1,0 ], [ 0 ]
);
$net->learn_set(\@data);
Same effect as above, but not the same data (obviously).
=item $net->learn_set_rand(\@set, [ options ]);
UPDATE: Inputs and outputs in the dataset can now be strings. See information on auto-crunching
in learn()
This takes the same options as learn() and allows you to specify a set to learn, rather
than individual patterns.
learn_set_rand() differs from learn_set() in that it learns the patterns in a random order,
each pattern once, rather than in the order that they are in the array. This returns a true
value (1) instead of a forgetfullnes factor.
Example:
my @data = (
[ 0,1 ], [ 1 ],
[ 1,0 ], [ 0 ]
);
$net->learn_set_rand(\@data);
=item $net->run($input_map_ref);
UPDATE: run() will now I<automatically> crunch() a string given as the input.
This method will apply the given array ref at the input layer of the neural network, and
it will return an array ref to the output of the network.
BackProp.pm view on Meta::CPAN
$net->uncrunch($net->run($input_map_ref));
All that run_uc() does is that it automatically calls uncrunch() on the output, regardless
of whether the input was crunch() -ed or not.
=item $net->range();
This allows you to limit the possible outputs to a specific set of values. There are several
ways you can specify the set of values to limit the output to. Each method is shown below.
When called without any arguements, it will disable output range limits. You will need to re-learn
any data previously learned after disabling ranging, as disabling range invalidates the current
weight matrix in the network.
range() automatically scales the networks outputs to fit inside the size of range you allow, and, therefore,
it keeps track of the maximum output it can expect to scale. Therefore, you will need to learn()
the whole data set again after calling range() on a network.
Subsequent calls to range() invalidate any previous calls to range()
NOTE: It is recomended, you call range() before you call learn() or else you will get unexpected
results from any run() call after range() .
=item $net->range($bottom..$top);
This is a common form often used in a C<for my $x (0..20)> type of for() constructor. It works
BackProp.pm view on Meta::CPAN
I used level 4 debugging quite often in creating the letters.pl example script and the small_1.pl
example script.
Toggles debuging off when called with no arguments.
=item $net->save($filename);
This will save the complete state of the network to disk, including all weights and any
words crunched with crunch() . Also saves any output ranges set with range() .
This has now been modified to use a simple flat-file text storage format, and it does not
depend on any external modules now.
=item $net->load($filename);
This will load from disk any network saved by save() and completly restore the internal
state at the point it was save() was called at.
BackProp.pm view on Meta::CPAN
=item $net->uncrunch($array_ref);
Uncrunches a map (array ref) into an scalar string of words seperated by ' ' and returns the
string. This is ment to be used as a counterpart to the crunch() method, above, possibly to
uncrunch() the output of a run() call. Consider the below code (also in ./examples/ex_crunch.pl):
use AI::NeuralNet::BackProp;
my $net = AI::NeuralNet::BackProp->new(2,3);
for (0..3) { # Note: The four learn() statements below could
# be replaced with learn_set() to do the same thing,
# but use this form here for clarity.
$net->learn($net->crunch("I love chips."), $net->crunch("That's Junk Food!"));
$net->learn($net->crunch("I love apples."), $net->crunch("Good, Healthy Food."));
$net->learn($net->crunch("I love pop."), $net->crunch("That's Junk Food!"));
$net->learn($net->crunch("I love oranges."),$net->crunch("Good, Healthy Food."));
}
my $response = $net->run($net->crunch("I love corn."));
BackProp.pm view on Meta::CPAN
=item $net->crunched($word);
This will return undef if the word is not in the internal crunch list, or it will return the
index of the word if it exists in the crunch list.
=item $net->col_width($width);
This is useful for formating the debugging output of Level 4 if you are learning simple
bitmaps. This will set the debugger to automatically insert a line break after that many
elements in the map output when dumping the currently run map during a learn loop.
It will return the current width when called with a 0 or undef value.
=item $net->random($rand);
This will set the randomness factor from the network. Default is 0.001. When called
with no arguments, or an undef value, it will return current randomness value. When
called with a 0 value, it will disable randomness in the network. See NOTES on learning
a 0 value in the input map with randomness disabled.
=item $net->load_pcx($filename);
Oh heres a treat... this routine will load a PCX-format file (yah, I know ... ancient format ... but
it is the only one I could find specs for to write it in Perl. If anyone can get specs for
BackProp.pm view on Meta::CPAN
=head1 NOTES
=item Learning 0s With Randomness Disabled
You can now use 0 values in any input maps. This is a good improvement over versions 0.40
and 0.42, where no 0s were allowed because the learning would never finish learning completly
with a 0 in the input.
Yet with the allowance of 0s, it requires one of two factors to learn correctly. Either you
must enable randomness with $net->random(0.0001) (Any values work [other than 0], see random() ),
or you must set an error-minimum with the 'error => 5' option (you can use some other error value
as well).
When randomness is enabled (that is, when you call random() with a value other than 0), it interjects
a bit of randomness into the output of every neuron in the network, except for the input and output
neurons. The randomness is interjected with rand()*$rand, where $rand is the value that was
passed to random() call. This assures the network that it will never have a pure 0 internally. It is
bad to have a pure 0 internally because the weights cannot change a 0 when multiplied by a 0, the
product stays a 0. Yet when a weight is multiplied by 0.00001, eventually with enough weight, it will
be able to learn. With a 0 value instead of 0.00001 or whatever, then it would never be able
to add enough weight to get anything other than a 0.
The second option to allow for 0s is to enable a maximum error with the 'error' option in
learn() , learn_set() , and learn_set_rand() . This allows the network to not worry about
learning an output perfectly.
For accuracy reasons, it is recomended that you work with 0s using the random() method.
If anyone has any thoughts/arguments/suggestions for using 0s in the network, let me know
at jdb@wcoil.com.
BackProp.pm view on Meta::CPAN
=head1 DOWNLOAD
You can always download the latest copy of AI::NeuralNet::BackProp
from http://www.josiah.countystart.com/modules/AI/cgi-bin/rec.pl
=head1 MAILING LIST
A mailing list has been setup for AI::NeuralNet::BackProp for discussion of AI and
neural net related topics as they pertain to AI::NeuralNet::BackProp. I will also
announce in the group each time a new release of AI::NeuralNet::BackProp is available.
The list address is at: ai-neuralnet-backprop@egroups.com
To subscribe, send a blank email to: ai-neuralnet-backprop-subscribe@egroups.com
=cut
my $net = new AI::NeuralNet::BackProp(1,5,5);
# Add a small amount of randomness to the network
$net->random(0.001);
# Demonstrate a simple learn() call
my @inputs = ( 0,0,1,1,1 );
my @ouputs = ( 1,0,1,0,1 );
print $net->learn(\@inputs, \@outputs),"\n";
# Create a data set to learn
my @set = (
[ 2,2,3,4,1 ], [ 1,1,1,1,1 ],
[ 1,1,1,1,1 ], [ 0,0,0,0,0 ],
[ 1,1,1,0,0 ], [ 0,0,0,1,1 ]
);
# Demo learn_set()
my $f = $net->learn_set(\@set);
print "Forgetfulness: $f unit\n";
# Crunch a bunch of strings and return array refs
my $phrase1 = $net->crunch("I love neural networks!");
my $phrase2 = $net->crunch("Jay Lenno is wierd.");
my $phrase3 = $net->crunch("The rain in spain...");
my $phrase4 = $net->crunch("Tired of word crunching yet?");
# Make a data set from the array refs
my @phrases = (
$phrase1, $phrase2,
$phrase3, $phrase4
);
# Learn the data set
$net->learn_set(\@phrases);
# Run a test phrase through the network
my $test_phrase = $net->crunch("I love neural networking!");
my $result = $net->run($test_phrase);
# Get this, it prints "Jay Leno is networking!" ... LOL!
print $net->uncrunch($result),"\n"
</PRE>
</P>
You constuct a new network via the new constructor:
<PRE>
my $net = new AI::NeuralNet::BackProp(2,3,1);</PRE>
<P>The <CODE>new()</CODE> constructor accepts two arguments and one optional argument, $layers, $size,
and $outputs is optional (in this example, $layers is 2, $size is 3, and $outputs is 1).</P>
<P>$layers specifies the number of layers, including the input
and the output layer, to use in each neural grouping. A new
neural grouping is created for each pattern learned. Layers
is typically set to 2. Each layer has $size neurons in it.
Each neuron's output is connected to one input of every neuron
in the layer below it.
</P>
This diagram illustrates a simple network, created with a call
to "new AI::NeuralNet::BackProp(2,2,2)" (2 layers, 2 neurons/layer, 2 outputs).
<PRE>
input
/ \
max => $maximum_iterations
error => $maximum_allowable_percentage_of_error</PRE>
<P>$learning_gradient is an optional value used to adjust the weights of the internal
connections. If $learning_gradient is ommitted, it defaults to 0.20.
</P>
<P>
$maximum_iterations is the maximum numbers of iteration the loop should do.
It defaults to 1024. Set it to 0 if you never want the loop to quit before
the pattern is perfectly learned.</P>
<P>$maximum_allowable_percentage_of_error is the maximum allowable error to have. If
this is set, then <A HREF="#item_learn"><CODE>learn()</CODE></A> will return when the perecentage difference between the
actual results and desired results falls below $maximum_allowable_percentage_of_error.
If you do not include 'error', or $maximum_allowable_percentage_of_error is set to -1,
then <A HREF="#item_learn"><CODE>learn()</CODE></A> will not return until it gets an exact match for the desired result OR it
reaches $maximum_iterations.</P>
<P></P>
<DT><STRONG><A NAME="item_learn_set">$net->learn_set(\@set, [ options ]);</A></STRONG><BR>
<DD>
<B>UPDATED:</B> Inputs and outputs in the dataset can now be strings. See information on auto-crunching
in <A HREF="#item_learn"><CODE>learn()</CODE></A>
<P>This takes the same options as <A HREF="#item_learn"><CODE>learn()</CODE></A> and allows you to specify a set to learn, rather
than individual patterns. A dataset is an array refrence with at least two elements in the
array, each element being another array refrence (or now, a scalar string). For each pattern to
learn, you must specify an input array ref, and an ouput array ref as the next element. Example:
</P>
<PRE>
my @set = (
# inputs outputs
[ 1,2,3,4 ], [ 1,3,5,6 ],
[ 0,2,5,6 ], [ 0,2,1,2 ]
);</PRE>
<P>See the paragraph on measuring forgetfulness, below. There are
two learn_set()-specific option tags available:</P>
<PRE>
flag => $flag
pattern => $row</PRE>
<P>If ``flag'' is set to some TRUE value, as in ``flag => 1'' in the hash of options, or if the option ``flag''
is not set, then it will return a percentage represting the amount of forgetfullness. Otherwise,
<A HREF="#item_learn_set"><CODE>learn_set()</CODE></A> will return an integer specifying the amount of forgetfulness when all the patterns
are learned.</P>
<P>If ``pattern'' is set, then <A HREF="#item_learn_set"><CODE>learn_set()</CODE></A> will use that pattern in the data set to measure forgetfulness by.
If ``pattern'' is omitted, it defaults to the first pattern in the set. Example:</P>
<PRE>
my @set = (
[ 0,1,0,1 ], [ 0 ],
[ 0,0,1,0 ], [ 1 ],
[ 1,1,0,1 ], [ 2 ], # <---
[ 0,1,1,0 ], [ 3 ]
);
</PRE>
<P>
If you wish to measure forgetfulness as indicated by the line with the arrow, then you would
pass 2 as the "pattern" option, as in "pattern => 2".</P>
<P>Now why the heck would anyone want to measure forgetfulness, you ask? Maybe you wonder how I
even measure that. Well, it is not a vital value that you have to know. I just put in a
``forgetfulness measure'' one day because I thought it would be neat to know.</P>
<P>How the module measures forgetfulness is this: First, it learns all the patterns in the set provided,
then it will run the very first pattern (or whatever pattern is specified by the ``row'' option)
in the set after it has finished learning. It will compare the <A HREF="#item_run"><CODE>run()</CODE></A> output with the desired output
as specified in the dataset. In a perfect world, the two should match exactly. What we measure is
how much that they don't match, thus the amount of forgetfulness the network has.</P>
<P>NOTE: In version 0.77 percentages were disabled because of a bug. Percentages are now enabled.</P>
<P>Example (from examples/ex_dow.pl):</P>
<PRE>
# Data from 1989 (as far as I know..this is taken from example data on BrainMaker)
my @data = (
# Mo CPI CPI-1 CPI-3 Oil Oil-1 Oil-3 Dow Dow-1 Dow-3 Dow Ave (output)
[ 1, 229, 220, 146, 20.0, 21.9, 19.5, 2645, 2652, 2597], [ 2647 ],
[ 2, 235, 226, 155, 19.8, 20.0, 18.3, 2633, 2645, 2585], [ 2637 ],
[ 3, 244, 235, 164, 19.6, 19.8, 18.1, 2627, 2633, 2579], [ 2630 ],
[ 4, 261, 244, 181, 19.6, 19.6, 18.1, 2611, 2627, 2563], [ 2620 ],
[ 5, 276, 261, 196, 19.5, 19.6, 18.0, 2630, 2611, 2582], [ 2638 ],
[ 6, 287, 276, 207, 19.5, 19.5, 18.0, 2637, 2630, 2589], [ 2635 ],
[ 7, 296, 287, 212, 19.3, 19.5, 17.8, 2640, 2637, 2592], [ 2641 ]
);
# Learn the set
my $f = learn_set(\@data,
inc => 0.1,
max => 500,
p => 1
);
# Print it
print "Forgetfullness: $f%";</PRE>
<P></P>
<P>
This is a snippet from the example script examples/ex_dow.pl, which demonstrates DOW average
prediction for the next month. A more simple set defenition would be as such:</P>
<PRE>
my @data = (
[ 0,1 ], [ 1 ],
[ 1,0 ], [ 0 ]
);
$net->learn_set(\@data);
</PRE>
Same effect as above, but not the same data (obviously).
<P></P>
<DT><STRONG><A NAME="item_learn_set_rand">$net->learn_set_rand(\@set, [ options ]);</A></STRONG><BR>
<DD>
<B>UPDATED:</B> Inputs and outputs in the dataset can now be strings. See information on auto-crunching
in <A HREF="#item_learn"><CODE>learn()</CODE></A>
<P>This takes the same options as <A HREF="#item_learn"><CODE>learn()</CODE></A> and allows you to specify a set to learn, rather
than individual patterns.</P>
<P><A HREF="#item_learn_set_rand"><CODE>learn_set_rand()</CODE></A> differs from <A HREF="#item_learn_set"><CODE>learn_set()</CODE></A> in that it learns the patterns in a random order,
each pattern once, rather than in the order that they are in the array. This returns a true
value (1) instead of a forgetfullnes factor.</P>
<P>Example:</P>
<PRE>
my @data = (
[ 0,1 ], [ 1 ],
[ 1,0 ], [ 0 ]
);
$net->learn_set_rand(\@data);</PRE>
<P></P>
<DT><STRONG><A NAME="item_run">$net->run($input_map_ref);</A></STRONG><BR>
<DD>
<B>UPDATED:</B> <A HREF="#item_run"><CODE>run()</CODE></A> will now <EM>automatically</EM> <A HREF="#item_crunch"><CODE>crunch()</CODE></A> a string given as the input.
<P>This method will apply the given array ref at the input layer of the neural network, and
it will return an array ref to the output of the network.</P>
<P>Example:
</P>
<PRE>
<DD>
This method does the same thing as this code:
<PRE>
$net->uncrunch($net->run($input_map_ref));</PRE>
<P>All that <A HREF="#item_run_uc"><CODE>run_uc()</CODE></A> does is that it automatically calls <A HREF="#item_uncrunch"><CODE>uncrunch()</CODE></A> on the output, regardless
of whether the input was <A HREF="#item_crunch"><CODE>crunch()</CODE></A> -ed or not.</P>
<P></P>
<DT><STRONG><A NAME="item_range">$net->range();</A></STRONG><BR>
<DD>
This allows you to limit the possible outputs to a specific set of values. There are several
ways you can specify the set of values to limit the output to. Each method is shown below.
When called without any arguements, it will disable output range limits. You will need to re-learn
any data previously learned after disabling ranging, as disabling range invalidates the current
weight matrix in the network.
<P><A HREF="#item_range"><CODE>range()</CODE></A> automatically scales the networks outputs to fit inside the size of range you allow, and, therefore,
it keeps track of the maximum output it can expect to scale. Therefore, you will need to <A HREF="#item_learn"><CODE>learn()</CODE></A>
the whole data set again after calling <A HREF="#item_range"><CODE>range()</CODE></A> on a network.</P>
<P>Subsequent calls to <A HREF="#item_range"><CODE>range()</CODE></A> invalidate any previous calls to <A HREF="#item_range"><CODE>range()</CODE></A></P>
<P>NOTE: It is recomended, you call <A HREF="#item_range"><CODE>range()</CODE></A> before you call <A HREF="#item_learn"><CODE>learn()</CODE></A> or else you will get unexpected
results from any <A HREF="#item_run"><CODE>run()</CODE></A> call after <A HREF="#item_range"><CODE>range()</CODE></A> .</P>
<P></P>
<DT><STRONG>$net->range($bottom..$top);</STRONG><BR>
<DD>
This is a common form often used in a <CODE>for my $x (0..20)</CODE> type of <CODE>for()</CODE> constructor. It works
the exact same way. It will allow all numbers from $bottom to $top, inclusive, to be given
as outputs of the network. No other values will be possible, other than those between $bottom
and $top, inclusive.
learning as a whole. Also prints the percentage difference for each loop between current network
results and desired results, as well as learning gradient ('incremenet').</P>
<P>Level 4 is useful for seeing if you need to give a smaller learning incrememnt to <A HREF="#item_learn"><CODE>learn()</CODE></A> .
I used level 4 debugging quite often in creating the letters.pl example script and the small_1.pl
example script.</P>
<P>Toggles debuging off when called with no arguments.</P>
<P></P>
<DT><STRONG><A NAME="item_save">$net->save($filename);</A></STRONG><BR>
<DD>
This will save the complete state of the network to disk, including all weights and any
words crunched with <A HREF="#item_crunch"><CODE>crunch()</CODE></A> . Also saves any output ranges set with <A HREF="#item_range"><CODE>range()</CODE></A> .
<P>This has now been modified to use a simple flat-file text storage format, and it does not
depend on any external modules now.</P>
<P></P>
<DT><STRONG><A NAME="item_load">$net->load($filename);</A></STRONG><BR>
<DD>
This will load from disk any network saved by <A HREF="#item_save"><CODE>save()</CODE></A> and completly restore the internal
state at the point it was <A HREF="#item_save"><CODE>save()</CODE></A> was called at.
<P></P>
<DT><STRONG><A NAME="item_join_cols">$net->join_cols($array_ref,$row_length_in_elements,$high_state_character,$low_state_character);</A></STRONG><BR>
<DD>
Uncrunches a map (array ref) into an scalar string of words seperated by ' ' and returns the
string. This is ment to be used as a counterpart to the <A HREF="#item_crunch"><CODE>crunch()</CODE></A> method, above, possibly to
<A HREF="#item_uncrunch"><CODE>uncrunch()</CODE></A> the output of a <A HREF="#item_run"><CODE>run()</CODE></A> call. Consider the below code (also in ./examples/ex_crunch.pl):
<PRE>
use AI::NeuralNet::BackProp;
my $net = AI::NeuralNet::BackProp->new(2,3);
for (0..3) { # Note: The four learn() statements below could
# be replaced with learn_set() to do the same thing,
# but use this form here for clarity.
$net->learn($net->crunch("I love chips."), $net->crunch("That's Junk Food!"));
$net->learn($net->crunch("I love apples."), $net->crunch("Good, Healthy Food."));
$net->learn($net->crunch("I love pop."), $net->crunch("That's Junk Food!"));
$net->learn($net->crunch("I love oranges."),$net->crunch("Good, Healthy Food."));
}
my $response = $net->run($net->crunch("I love corn."));
print $net->uncrunch($response),"\n";</PRE>
for some interesting demos!</P>
<P></P>
<DT><STRONG><A NAME="item_crunched">$net->crunched($word);</A></STRONG><BR>
<DD>
This will return undef if the word is not in the internal crunch list, or it will return the
index of the word if it exists in the crunch list.
<P></P>
<DT><STRONG><A NAME="item_col_width">$net->col_width($width);</A></STRONG><BR>
<DD>
This is useful for formating the debugging output of Level 4 if you are learning simple
bitmaps. This will set the debugger to automatically insert a line break after that many
elements in the map output when dumping the currently run map during a learn loop.
<P>It will return the current width when called with a 0 or undef value.</P>
<P></P>
<DT><STRONG><A NAME="item_random">$net->random($rand);</A></STRONG><BR>
<DD>
This will set the randomness factor from the network. Default is 0.001. When called
with no arguments, or an undef value, it will return current randomness value. When
called with a 0 value, it will disable randomness in the network. See NOTES on learning
a 0 value in the input map with randomness disabled.
<P></P>
<DT><STRONG><A NAME="item_load_pcx">$net->load_pcx($filename);</A></STRONG><BR>
<DD>
Oh heres a treat... this routine will load a PCX-format file (yah, I know ... ancient format ... but
it is the only one I could find specs for to write it in Perl. If anyone can get specs for
any other formats, or could write a loader for them, I would be very grateful!) Anyways, a PCX-format
file that is exactly 320x200 with 8 bits per pixel, with pure Perl. It returns a blessed refrence to
<HR SIZE=1 COLOR=BLACK>
<H1><A NAME="notes">NOTES</A></H1>
<DL>
<DT><STRONG><A NAME="item_Learning_0s_With_Randomness_Disabled">Learning 0s With Randomness Disabled</A></STRONG><BR>
<DD>
You can now use 0 values in any input maps. This is a good improvement over versions 0.40
and 0.42, where no 0s were allowed because the learning would never finish learning completly
with a 0 in the input.
<P>Yet with the allowance of 0s, it requires one of two factors to learn correctly. Either you
must enable randomness with $net-><A HREF="#item_random"><CODE>random(0.0001)</CODE></A> (Any values work [other than 0], see <A HREF="#item_random"><CODE>random()</CODE></A> ),
or you must set an error-minimum with the 'error => 5' option (you can use some other error value
as well).</P>
<P>When randomness is enabled (that is, when you call <A HREF="#item_random"><CODE>random()</CODE></A> with a value other than 0), it interjects
a bit of randomness into the output of every neuron in the network, except for the input and output
neurons. The randomness is interjected with rand()*$rand, where $rand is the value that was
passed to <A HREF="#item_random"><CODE>random()</CODE></A> call. This assures the network that it will never have a pure 0 internally. It is
bad to have a pure 0 internally because the weights cannot change a 0 when multiplied by a 0, the
product stays a 0. Yet when a weight is multiplied by 0.00001, eventually with enough weight, it will
be able to learn. With a 0 value instead of 0.00001 or whatever, then it would never be able
to add enough weight to get anything other than a 0.</P>
<P>The second option to allow for 0s is to enable a maximum error with the 'error' option in
<A HREF="#item_learn"><CODE>learn()</CODE></A> , <A HREF="#item_learn_set"><CODE>learn_set()</CODE></A> , and <A HREF="#item_learn_set_rand"><CODE>learn_set_rand()</CODE></A> . This allows the network to not worry about
learning an output perfectly.</P>
<P>For accuracy reasons, it is recomended that you work with 0s using the <A HREF="#item_random"><CODE>random()</CODE></A> method.</P>
<P>If anyone has any thoughts/arguments/suggestions for using 0s in the network, let me know
at <A HREF="mailto:jdb@wcoil.com.">jdb@wcoil.com.</A></P>
<P></P></DL>
<P>
<HR SIZE=1 COLOR=BLACK>
<H1><A NAME="other included packages">OTHER INCLUDED PACKAGES</A></H1>
<DL>
<DT><STRONG><A NAME="item_AI%3A%3ANeuralNet%3A%3ABackProp%3A%3Aneuron">AI::NeuralNet::BackProp::neuron</A></STRONG><BR>
which provides a good test bed for coming up with new ideas for the network. Thankyou for all of
your help, everybody.</P>
<P>
<HR SIZE=1 COLOR=BLACK>
<H1><A NAME="download">DOWNLOAD</A></H1>
<P>You can always download the latest copy of AI::NeuralNet::BackProp
from <A HREF="http://www.josiah.countystart.com/modules/AI/cgi-bin/rec.pl">http://www.josiah.countystart.com/modules/AI/cgi-bin/rec.pl</A></P>
<P>
<HR SIZE=1 COLOR=BLACK>
<H1><A NAME="mailing list">MAILING LIST</A></H1>
<P>A mailing list has been setup for AI::NeuralNet::BackProp for discussion of AI and
neural net related topics as they pertain to AI::NeuralNet::BackProp. I will also
announce in the group each time a new release of AI::NeuralNet::BackProp is available.</P>
<P>The list address is at: <A HREF="mailto:ai-neuralnet-backprop@egroups.com">ai-neuralnet-backprop@egroups.com</A></P>
<P>To subscribe, send a blank email to: <A HREF="mailto:ai-neuralnet-backprop-subscribe@egroups.com">ai-neuralnet-backprop-subscribe@egroups.com</A></P>
<P>
<HR SIZE=1 COLOR=BLACK>
<H1><A NAME="what can it do">WHAT CAN IT DO?</A></H1>
<P>Rodin Porrata asked on the ai-neuralnet-backprop malining list,
"What can they [Neural Networks] do?". In regards to that questioin,
examples/ex_add.pl view on Meta::CPAN
=begin
File: examples/ex_add.pl
Author: Josiah Bryan, <jdb@wcoil.com>
Desc:
This demonstrates the ability of a neural net to generalize and predict what the correct
result is for inputs that it has never seen before.
This teaches a network to add 7 sets of numbers, then it asks the user for two numbers to
add and it displays the results of the user's input.
=cut
use AI::NeuralNet::BackProp;
my $addition = new AI::NeuralNet::BackProp(2,2,1);
if(!$addition->load('add.dat')) {
$addition->learn_set([
[ 1, 1 ], [ 2 ] ,
[ 1, 2 ], [ 3 ],
[ 2, 2 ], [ 4 ],
[ 20, 20 ], [ 40 ],
[ 100, 100 ], [ 200 ],
[ 150, 150 ], [ 300 ],
[ 500, 500 ], [ 1000 ],
]);
$addition->save('add.dat');
}
examples/ex_add2.pl view on Meta::CPAN
=begin
File: examples/ex_add2.pl
Author: Rodin Porrata, <rodin@ursa.llnl.gov>
Desc:
This script runs a complete test of the networks ability to add and remember
data sets, as well as testing the optimum "inc" to learn and the optimum
number of layers for a network.
=cut
use AI::NeuralNet::BackProp;
use Benchmark;
use English;
my $ofile = "addnet_data.txt";
examples/ex_add2.pl view on Meta::CPAN
my @data = (
[ 2633, 2665, 2685], [ 2633 + 2665 + 2685 ],
[ 2623, 2645, 2585], [ 2623 + 2645 + 2585 ],
[ 2627, 2633, 2579], [ 2627 + 2633 + 2579 ],
[ 2611, 2627, 2563], [ 2611 + 2627 + 2563 ],
[ 2640, 2637, 2592], [ 2640 + 2637 + 2592 ]
);
print "Learning started, will cycle $top times with inc = $inc\n";
# Make it learn the whole dataset $top times
my @list;
my $t1=new Benchmark;
for my $a (1..$top)
{
print "Outer Loop: $a : ";
$forgetfulness = $net->learn_set( \@data,
inc => $inc,
max => 500,
error => -1);
print "Forgetfulness: $forgetfulness %\n";
}
my $t2=new Benchmark;
$runtime = timediff($t2,$t1);
examples/ex_add2.pl view on Meta::CPAN
my @input = ( [ 2222, 3333, 3200 ],
[ 1111, 1222, 3211 ],
[ 2345, 2543, 3000 ],
[ 2654, 2234, 2534 ] );
test_net( $net, @input );
}
#.....................................................................
sub test_net {
my @set;
my $fb;
my $net = shift;
my @data = @_;
undef @percent_diff; #@answers; undef @predictions;
for( $i=0; defined( $data[$i] ); $i++ ){
@set = @{ $data[$i] };
$fb = $net->run(\@set)->[0];
# Print output
print "Test Factors: (",join(',',@set),")\n";
$answer = eval( join( '+',@set ));
push @percent_diff, 100.0 * abs( $answer - $fb )/ $answer;
print "Prediction : $fb answer: $answer\n";
}
}
examples/ex_alpha.pl view on Meta::CPAN
1,2,2,2,1,
1,1,2,1,1,
2,1,2,1,2,
2,1,1,1,2,
2,2,1,2,2
]
];
if(!$net->load("letters.dat")) {
#$net->range(0..29);
$net->learn_set($letters);
$net->save("letters.dat");
}
# Build a test map
my $tmp = [2,1,1,1,2,
1,2,2,2,1,
1,2,2,2,1,
1,1,1,1,1,
1,2,2,2,1,
1,2,2,2,1,
examples/ex_bmp.pl view on Meta::CPAN
# Set resolution
my $xres=5;
my $yres=5;
# Create a new net with 3 layes, $xres*$yres inputs, and 1 output
my $net = AI::NeuralNet::BackProp->new(2,$xres*$yres,1);
# Disable debugging
$net->debug(4);
# Create datasets.
my @data = (
[ 2,1,1,2,2,
2,2,1,2,2,
2,2,1,2,2,
2,2,1,2,2,
2,1,1,1,2 ], [ 1 ],
[ 1,1,1,2,2,
2,2,2,1,2,
2,1,1,1,2,
examples/ex_bmp.pl view on Meta::CPAN
);
$net->range(1,5);
# If we havnt saved the net already, do the learning
if(!$net->load('images.net')) {
print "\nLearning started...\n";
# Make it learn the whole dataset $top times
my @list;
my $top=3;
for my $a (0..$top) {
my $t1=new Benchmark;
print "\n\nOuter Loop: $a\n";
# Test fogetfullness
my $f = $net->learn_set(\@data, inc => 0.1,
max => 500,
error => -1);
# Print it
print "\n\nForgetfullness: $f%\n";
# Save net to disk
$net->save('images.net');
my $t2=new Benchmark;
my $td=timediff($t2,$t1);
print "\nLoop $a took ",timestr($td),"\n";
}
}
my @set=( 2,1,1,1,2,
1,2,2,2,2,
1,1,1,2,2,
1,2,2,2,2,
2,1,1,1,2 );
# Image number
my $fb=$net->run(\@set)->[0];
# Print output
print "\nTest Map: \n";
$net->join_cols(\@set,5);
print "Image number matched: $fb\n";
examples/ex_dow.pl view on Meta::CPAN
use AI::NeuralNet::BackProp;
use Benchmark;
# Create a new net with 5 layes, 9 inputs, and 1 output
my $net = AI::NeuralNet::BackProp->new(2,9,1);
# Disable debugging
$net->debug(4);
# Create datasets.
# Note that these are ficticious values shown for illustration purposes
# only. In the example, CPI is a certain month's consumer price
# index, CPI-1 is the index one month before, CPI-3 is the the index 3
# months before, etc.
my @data = (
# Mo CPI CPI-1 CPI-3 Oil Oil-1 Oil-3 Dow Dow-1 Dow-3 Dow Ave (output)
[ 1, 229, 220, 146, 20.0, 21.9, 19.5, 2645, 2652, 2597], [ 2647 ],
[ 2, 235, 226, 155, 19.8, 20.0, 18.3, 2633, 2645, 2585], [ 2637 ],
[ 3, 244, 235, 164, 19.6, 19.8, 18.1, 2627, 2633, 2579], [ 2630 ],
examples/ex_dow.pl view on Meta::CPAN
[ 5, 276, 261, 196, 19.5, 19.6, 18.0, 2630, 2611, 2582], [ 2638 ],
[ 6, 287, 276, 207, 19.5, 19.5, 18.0, 2637, 2630, 2589], [ 2635 ],
[ 7, 296, 287, 212, 19.3, 19.5, 17.8, 2640, 2637, 2592], [ 2641 ]
);
# If we havnt saved the net already, do the learning
if(!$net->load('dow.dat')) {
print "\nLearning started...\n";
# Make it learn the whole dataset $top times
my @list;
my $top=1;
for my $a (0..$top) {
my $t1=new Benchmark;
print "\n\nOuter Loop: $a\n";
# Test fogetfullness
my $f = $net->learn_set(\@data, inc => 0.2,
max => 2000,
error => -1);
# Print it
print "\n\nForgetfullness: $f%\n";
# Save net to disk
$net->save('dow.dat');
my $t2=new Benchmark;
my $td=timediff($t2,$t1);
print "\nLoop $a took ",timestr($td),"\n";
}
}
# Run a prediction using fake data
# Month CPI CPI-1 CPI-3 Oil Oil-1 Oil-3 Dow Dow-1 Dow-3
my @set=( 10, 352, 309, 203, 18.3, 18.7, 16.1, 2592, 2641, 2651 );
# Dow Ave (output)
my $fb=$net->run(\@set)->[0];
# Print output
print "\nTest Factors: (",join(',',@set),")\n";
print "DOW Prediction for Month #11: $fb\n";
examples/ex_sub.pl view on Meta::CPAN
=begin
File: examples/ex_sub.pl
Author: Josiah Bryan, <jdb@wcoil.com>
Desc:
This demonstrates the ability of a neural net to generalize and predict what the correct
result is for inputs that it has never seen before.
This teaches a network to subtract 6 sets of numbers, then it asks the user for
two numbers to subtract and then it displays the results of the user's input.
=cut
use AI::NeuralNet::BackProp;
my $subtract = new AI::NeuralNet::BackProp(2,2,1);
if(!$subtract->load('sub.dat')) {
$subtract->learn_set([
[ 1, 1 ], [ 0 ] ,
[ 2, 1 ], [ 1 ],
[ 10, 5 ], [ 5 ],
[ 20, 10 ], [ 10 ],
[ 100, 50 ], [ 50 ],
[ 500, 200 ], [ 300 ],
]);
$subtract->save('sub.dat');
}
examples/ex_synop.pl view on Meta::CPAN
# Add a small amount of randomness to the network
$net->random(0.001);
# Demonstrate a simple learn() call
my @inputs = ( 0,0,1,1,1 );
my @ouputs = ( 1,0,1,0,1 );
print $net->learn(\@inputs, \@outputs),"\n";
# Create a data set to learn
my @set = (
[ 2,2,3,4,1 ], [ 1,1,1,1,1 ],
[ 1,1,1,1,1 ], [ 0,0,0,0,0 ],
[ 1,1,1,0,0 ], [ 0,0,0,1,1 ]
);
# Demo learn_set()
my $f = $net->learn_set(\@set);
print "Forgetfulness: $f unit\n";
# Crunch a bunch of strings and return array refs
my $phrase1 = $net->crunch("I love neural networks!");
my $phrase2 = $net->crunch("Jay Lenno is wierd.");
my $phrase3 = $net->crunch("The rain in spain...");
my $phrase4 = $net->crunch("Tired of word crunching yet?");
# Make a data set from the array refs
my @phrases = (
$phrase1, $phrase2,
$phrase3, $phrase4
);
# Learn the data set
$net->learn_set(\@phrases);
# Run a test phrase through the network
my $test_phrase = $net->crunch("I love neural networking!");
my $result = $net->run($test_phrase);
# Get this, it prints "Jay Leno is networking!" ... LOL!
print $net->uncrunch($result),"\n";
END {print "not ok 1\n" unless $loaded;}
sub t { my $f=shift;$t++;my $str=($f)?"ok $t":"not ok $t";print $str,"\n";}
use AI::NeuralNet::BackProp;
$loaded = 1;
t 1;
my $net = new AI::NeuralNet::BackProp(2,2,1);
t $net;
t ($net->intr(0.51) eq 1);
t ($net->intr(0.00001) eq 0);
t ($net->intr(0.50001) eq 1);
t $net->learn_set([
[ 1, 1 ], [ 2 ] ,
[ 1, 2 ], [ 3 ],
[ 2, 2 ], [ 4 ],
[ 20, 20 ], [ 40 ],
[ 100, 100 ], [ 200 ],
[ 150, 150 ], [ 300 ],
[ 500, 500 ], [ 1000 ],
]);
t ($net->run([60,40])->[0] eq 100);
t $net->save("add.dat");