view release on metacpan or search on metacpan
0.20 Tue Aug 22 18:01:04 2000
- Original version; created by Josiah Bryan
- Complete core re-write of AI::NeuralNet::BackProp
- 23% More accuracy in learning
- Increased learning speed
- Better connection topology
0.31 Fri Aug 25 05:10:11 20000
- Second release, by Josiah Bryan
- 3 Major features:
- seperate layer sizes
- custom node activations
- increased learning speed
0.43 Wed Sep 14 03:13:01 20000
- Third release, by Josiah Bryan
- Several bug fixes
- fixed 'flag' option on learn_set()
- fixed multiple-output bug
- fixed learning gradient error
# Initalize misc. variables
$self->{col_width} = 5;
$self->{random} = 0;
$self->{const} = 0.0001;
$self->{connector} = $AI::NeuralNet::Mesh::Connector;
# Build mesh
$self->_init();
# Initalize activation, thresholds, etc, if provided
if(ref($layers->[0]) eq "HASH") {
for (0..$self->{total_layers}) {
$self->activation($_,$layers->[$_]->{activation});
$self->threshold($_,$layers->[$_]->{threshold});
$self->mean($_,$layers->[$_]->{mean});
}
}
# Done!
return $self;
}
# Internal usage
sub extend_layer {
my $self = shift;
my $layer = shift || 0;
my $specs = shift;
if(!$specs) {
$self->{error} = "extend_layer(): You must provide specs to extend layer $layer with.\n";
return undef;
}
if(ref($specs) eq "HASH") {
$self->activation($layer,$specs->{activation}) if($specs->{activation});
$self->threshold($layer,$specs->{threshold}) if($specs->{threshold});
$self->mean($layer,$specs->{mean}) if($specs->{mean});
return $self->add_nodes($layer,$specs->{nodes});
} else {
return $self->add_nodes($layer,$specs);
}
return 1;
}
# Pseudo-internal usage
sub add_nodes {
sub learn {
my $self = shift;
my $inputs = shift; # input set
my $outputs = shift; # target outputs
my %args = @_; # get args into hash
my $inc = $args{inc} || 0.002; # learning gradient
my $max = $args{max} || 1024; # max iteterations
my $degrade = $args{degrade} || 0; # enable gradient degrading
my $error = ($args{error}>-1 && defined $args{error}) ? $args{error} : -1;
my $dinc = 0.0002; # amount to adjust gradient by
my $diff = 100; # error magin between results
my $start = new Benchmark;
$inputs = $self->crunch($inputs) if($inputs == 0);
$outputs = $self->crunch($outputs) if($outputs == 0);
my ($flag,$ldiff,$cdiff,$_mi,$loop,$y);
while(!$flag && ($max ? $loop<$max : 1)) {
my $b = new Benchmark;
my $got = $self->run($inputs);
$diff = pdiff($got,$outputs);
$flag = 1;
} else {
return $data->[$row]->[0]-$self->run($data->[$row-1])->[0];
}
}
# See POD for usage
sub run_set {
my $self = shift;
my $data = shift;
my $len = $#{$data}/2;
my (@results,$res);
for my $x (0..$len) {
$res = $self->run($data->[$x*2]);
for(0..$#{$res}){$results[$x]->[$_]=$res->[$_]}
d("Running set $x [$res->[0]]...\r",4);
}
return \@results;
}
#
# Loads a CSV-like dataset from disk
#
# Usage:
# my $set = $set->load_set($file, $column, $seperator);
#
# Returns a data set of the same format as required by the
# learn_set() method. $file is the disk file to load set from.
print FILE "c$a=$self->{_crunched}->{list}->[$a]\n";
}
my $n = 0;
for my $x (0..$self->{total_layers}) {
for my $y (0..$self->{layers}->[$x]-1) {
my $w='';
for my $z (0..$self->{layers}->[$x-1]-1) {
$w.="$self->{mesh}->[$n]->{_inputs}->[$z]->{weight},";
}
print FILE "n$n=$w$self->{mesh}->[$n]->{activation},$self->{mesh}->[$n]->{threshold},$self->{mesh}->[$n]->{mean}\n";
$n++;
}
}
close(FILE);
if(!(-f $file)) {
$self->{error} = "Error writing to \"$file\".";
return undef;
}
my $n = 0;
for my $x (0..$self->{total_layers}) {
for my $y (0..$self->{layers}->[$x]-1) {
my @l = split /\,/, $db{"n$n"};
for my $z (0..$self->{layers}->[$x-1]-1) {
$self->{mesh}->[$n]->{_inputs}->[$z]->{weight} = $l[$z];
}
my $z = $self->{layers}->[$x-1];
$self->{mesh}->[$n]->{activation} = $l[$z];
$self->{mesh}->[$n]->{threshold} = $l[$z+1];
$self->{mesh}->[$n]->{mean} = $l[$z+2];
$n++;
}
}
return $self;
}
# Load entire network state from disk.
sub load_old {
my $self = shift;
my $layer = shift || 0;
my $node = shift || 0;
my $value = shift || 'linear';
my $n = 0;
no strict 'refs';
for(0..$layer-1){$n+=$self->{layers}->[$_]}
$self->{mesh}->[$n+$node]->{activation} = $value;
}
# Set the activation threshold for a specific layer.
# Only applicable if that layer uses "sigmoid" or "sigmoid_2"
# usage: $net->threshold($layer,$threshold);
sub threshold {
my $self = shift;
my $layer = shift || 0;
my $value = shift || 0.5;
my $n = 0;
no strict 'refs';
for(0..$layer-1){$n+=$self->{layers}->[$_]}
for($n..$n+$self->{layers}->[$layer]-1) {
$self->{mesh}->[$_]->{threshold} = $value;
}
}
# Applies a threshold to a specific node
sub node_threshold {
my $self = shift;
my $layer = shift || 0;
my $node = shift || 0;
my $value = shift || 0.5;
my $n = 0;
no strict 'refs';
for(0..$layer-1){$n+=$self->{layers}->[$_]}
$self->{mesh}->[$n+$node]->{threshold} = $value;
}
# Set mean (avg.) flag for a layer.
# usage: $net->mean($layer,$flag);
# If $flag is true, it enables finding the mean for that layer,
# If $flag is false, disables mean.
sub mean {
my $self = shift;
my $layer = shift || 0;
my $value = shift || 0;
# comparative value
sub low {
shift if(substr($_[0],0,4) eq 'AI::');
my $ref1 = shift; my ($el,$len,$tmp); $tmp=0;
foreach $el (@{$ref1}) { $len++ }
for my $x (0..$len-1) { $tmp = $x if($ref1->[$x] < $ref1->[$tmp]) }
return $tmp;
}
# Following is a collection of a few nifty custom activation functions.
# range() is exported by default, the rest you can get with:
# use AI::NeuralNet::Mesh ':acts'
# The ':all' tag also gets these into your namespace.
#
# range() returns a closure limiting the output
# of that node to a specified set of values.
# Good for output layers.
#
# usage example:
# $net->activation(4,range(0..5));
# You can also pass an array containing the range
# values (not array ref), or you can pass a comma-
# seperated list of values as parameters:
#
# $net->activation(4,range(@numbers));
# $net->activation(4,range(6,15,26,106,28,3));
#
# Note: when using a range() activatior, train the
# net TWICE on the data set, because the first time
# the range() function searches for the top value in
# the inputs, and therefore, results could flucuate.
# The second learning cycle guarantees more accuracy.
#
sub range {
my @r=@_;
sub{$_[1]->{t}=$_[0]if($_[0]>$_[1]->{t});$r[intr($_[0]/$_[1]->{t}*$#r)]}
}
#
# ramp() preforms smooth ramp activation between 0 and 1 if $r is 1,
# or between -1 and 1 if $r is 2. $r defaults to 1, as you can see.
#
# Note: when using a ramp() activatior, train the
# net at least TWICE on the data set, because the first
# time the ramp() function searches for the top value in
# the inputs, and therefore, results could flucuate.
# The second learning cycle guarantees more accuracy.
#
sub ramp {
my $r=shift||1;my $t=($r<2)?0:-1;
sub{$_[1]->{t}=$_[0]if($_[0]>$_[1]->{t});$_[0]/$_[1]->{t}*$r-$b}
}
# Self explanitory, pretty much. $threshold is used to decide if an input
# is true or false (1 or 0). If an input is below $threshold, it is false.
sub and_gate {
my $threshold = shift || 0.5;
sub {
my $sum = shift;
my $self = shift;
for my $x (0..$self->{_inputs_size}-1) { return $self->{_parent}->{const} if!$self->{_inputs}->[$x]->{value}<$threshold }
return $sum/$self->{_inputs_size};
}
}
# Self explanitory, $threshold is used same as above.
sub or_gate {
my $threshold = shift || 0.5;
sub {
my $sum = shift;
my $self = shift;
for my $x (0..$self->{_inputs_size}-1) { return $sum/$self->{_inputs_size} if!$self->{_inputs}->[$x]->{value}<$threshold }
return $self->{_parent}->{const};
}
}
1;
package AI::NeuralNet::Mesh::node;
use strict;
for my $x (0..$self->{_inputs_size}-1) { $flag = 0 if(!$self->{_inputs}->[$x]->{fired}) }
if ($flag) {
$self->{_parent}->d("all inputs fired for $self.\n",1);
my $output = 0;
# Sum
for my $i (@{$self->{_inputs}}) {
$output += $i->{value};
}
# Handle activations, thresholds, and means
$output /= $self->{_inputs_size} if($self->{flag_mean});
#$output += (rand()*$self->{_parent}->{random});
$output = ($output>=$self->{threshold})?1:0 if(($self->{activation} eq "sigmoid") || ($self->{activation} eq "sigmoid_1"));
if($self->{activation} eq "sigmoid_2") {
$output = 1 if($output >$self->{threshold});
$output = -1 if($output <$self->{threshold});
$output = 0 if($output==$self->{threshold});
}
# Handle CODE refs
$output = &{$self->{activation}}($output,$self) if(ref($self->{activation}) eq "CODE");
# Send output
for my $o (@{$self->{_outputs}}) { $o->{node}->input($output,$o->{from_id}) }
} else {
$self->{_parent}->d("all inputs have NOT fired for $self.\n",1);
}
# Create a mesh with 2 layers, 2 nodes/layer, and one output node.
my $net = new AI::NeuralNet::Mesh(2,2,1);
# Teach the network the AND function
$net->learn([0,0],[0]);
$net->learn([0,1],[0]);
$net->learn([1,0],[0]);
$net->learn([1,1],[1]);
# Present it with two test cases
my $result_bit_1 = $net->run([0,1])->[0];
my $result_bit_2 = $net->run([1,1])->[0];
# Display the results
print "AND test with inputs (0,1): $result_bit_1\n";
print "AND test with inputs (1,1): $result_bit_2\n";
=head1 VERSION & UPDATES
This is version B<0.44>, an update release for version 0.43.
This fixed the usage conflict with perl 5.3.3.
With this version I have gone through and tuned up many area
of this module, including the descent algorithim in learn(),
=head1 DESCRIPTION
AI::NeuralNet::Mesh is an optimized, accurate neural network Mesh.
It was designed with accruacy and speed in mind.
This network model is very flexable. It will allow for clasic binary
operation or any range of integer or floating-point inputs you care
to provide. With this you can change activation types on a per node or
per layer basis (you can even include your own anonymous subs as
activation types). You can add sigmoid transfer functions and control
the threshold. You can learn data sets in batch, and load CSV data
set files. You can do almost anything you need to with this module.
This code is deigned to be flexable. Any new ideas for this module?
See AUTHOR, below, for contact info.
This module is designed to also be a customizable, extensable
neural network simulation toolkit. Through a combination of setting
the $Connection variable and using custom activation functions, as
well as basic package inheritance, you can simulate many different
types of neural network structures with very little new code written
by you.
In this module I have included a more accurate form of "learning" for the
mesh. This form preforms descent toward a local error minimum (0) on a
directional delta, rather than the desired value for that node. This allows
for better, and more accurate results with larger datasets. This module also
uses a simpler recursion technique which, suprisingly, is more accurate than
the original technique that I've used in other ANNs.
=head1 EXPORTS
This module exports three functions by default:
range
intr
pdiff
See range() intr() and pdiff() for description of their respective functions.
Also provided are several export tag sets for usage in the form of:
use AI::NeuralNet::Mesh ':tag';
Tag sets are:
:default
- These functions are always exported.
- Exports:
p()
high()
low()
:acts
- Exports:
ramp()
and_gate()
or_gate()
See the respective methods/functions for information about
each method/functions usage.
=head1 METHODS
=item AI::NeuralNet::Mesh->new();
There are four ways to construct a new network with new(). Each is detailed below.
P.S. Don't worry, the old C<new($layers, $nodes [, $outputs])> still works like always!
There is an optional parameter of $outputs, which specifies the number
of output neurons to provide. If $outputs is not specified, $outputs
defaults to equal $size.
=item AI::NeuralNet::Mesh->new($file);
This will automatically create a new network from the file C<$file>. It will
return undef if the file was of an incorrect format or non-existant. Otherwise,
it will return a blessed refrence to a network completly restored from C<$file>.
=item AI::NeuralNet::Mesh->new(\@layer_sizes);
This constructor will make a network with the number of layers corresponding to the length
in elements of the array ref passed. Each element in the array ref passed is expected
to contain an integer specifying the number of nodes (neurons) in that layer. The first
layer ($layer_sizes[0]) is to be the input layer, and the last layer in @layer_sizes is to be
the output layer.
Example:
my $net = AI::NeuralNet::Mesh->new([2,3,1]);
Creates a network with 2 input nodes, 3 hidden nodes, and 1 output node.
=item AI::NeuralNet::Mesh->new(\@array_of_hashes);
Another dandy constructor...this is my favorite. It allows you to tailor the number of layers,
the size of the layers, the activation type (you can even add anonymous inline subs with this one),
and even the threshold, all with one array ref-ed constructor.
Example:
my $net = AI::NeuralNet::Mesh->new([
{
nodes => 2,
activation => linear
},
{
nodes => 3,
activation => sub {
my $sum = shift;
return $sum + rand()*1;
}
},
{
nodes => 1,
activation => sigmoid,
threshold => 0.75
}
]);
Interesting, eh? What you are basically passing is this:
my @info = (
{ },
{ },
{ },
...
);
You are passing an array ref who's each element is a hash refrence. Each
hash refrence, or more precisely, each element in the array refrence you are passing
to the constructor, represents a layer in the network. Like the constructor above,
the first element is the input layer, and the last is the output layer. The rest are
hidden layers.
Each hash refrence is expected to have AT LEAST the "nodes" key set to the number
of nodes (neurons) in that layer. The other two keys are optional. If "activation" is left
out, it defaults to "linear". If "threshold" is left out, it defaults to 0.50.
The "activation" key can be one of four values:
linear ( simply use sum of inputs as output )
sigmoid [ sigmoid_1 ] ( only positive sigmoid )
sigmoid_2 ( positive / 0 /negative sigmoid )
\&code_ref;
"sigmoid_1" is an alias for "sigmoid".
The code ref is expected to return a value to be used as the output of the node.
The code ref also has access to all the data of that node through the second argument,
a blessed hash refrence to that node.
See CUSTOM ACTIVATION FUNCTIONS for information on several included activation functions
other than the ones listed above.
Three of the activation syntaxes are shown in the first constructor above, the "linear",
"sigmoid" and code ref types.
You can also set the activation and threshold values after network creation with the
activation() and threshold() methods.
=item $net->learn($input_map_ref, $desired_result_ref [, options ]);
NOTE: learn_set() now has increment-degrading turned OFF by default. See note
on the degrade flag, below.
This will 'teach' a network to associate an new input map with a desired
result. It will return a string containg benchmarking information.
You can also specify strings as inputs and ouputs to learn, and they will be
crunched automatically. Example:
$net->learn('corn', 'cob');
Note, the old method of calling crunch on the values still works just as well.
The first two arguments may be array refs (or now, strings), and they may be
$learning_gradient is an optional value used to adjust the weights of the internal
connections. If $learning_gradient is ommitted, it defaults to 0.002.
$maximum_iterations is the maximum numbers of iteration the loop should do.
It defaults to 1024. Set it to 0 if you never want the loop to quit before
the pattern is perfectly learned.
$maximum_allowable_percentage_of_error is the maximum allowable error to have. If
this is set, then learn() will return when the perecentage difference between the
actual results and desired results falls below $maximum_allowable_percentage_of_error.
If you do not include 'error', or $maximum_allowable_percentage_of_error is set to -1,
then learn() will not return until it gets an exact match for the desired result OR it
reaches $maximum_iterations.
$degrade_increment_flag is a simple flag used to allow/dissalow increment degrading
during learning based on a product of the error difference with several other factors.
$degrade_increment_flag is off by default. Setting $degrade_increment_flag to a true
value turns increment degrading on.
In previous module releases $degrade_increment_flag was not used, as increment degrading
was always on. In this release I have looked at several other network types as well
as several texts and decided that it would be better to not use increment degrading. The
Inputs and outputs in the dataset can also be strings.
See the paragraph on measuring forgetfulness, below. There are
two learn_set()-specific option tags available:
flag => $flag
pattern => $row
If "flag" is set to some TRUE value, as in "flag => 1" in the hash of options, or if the option "flag"
is not set, then it will return a percentage represting the amount of forgetfullness. Otherwise,
learn_set() will return an integer specifying the amount of forgetfulness when all the patterns
are learned.
If "pattern" is set, then learn_set() will use that pattern in the data set to measure forgetfulness by.
If "pattern" is omitted, it defaults to the first pattern in the set. Example:
my @set = (
[ 0,1,0,1 ], [ 0 ],
[ 0,0,1,0 ], [ 1 ],
[ 1,1,0,1 ], [ 2 ], # <---
[ 0,1,1,0 ], [ 3 ]
);
If you wish to measure forgetfulness as indicated by the line with the arrow, then you would
pass 2 as the "pattern" option, as in "pattern => 2".
Now why the heck would anyone want to measure forgetfulness, you ask? Maybe you wonder how I
even measure that. Well, it is not a vital value that you have to know. I just put in a
"forgetfulness measure" one day because I thought it would be neat to know.
How the module measures forgetfulness is this: First, it learns all the patterns
in the set provided, then it will run the very first pattern (or whatever pattern
is specified by the "row" option) in the set after it has finished learning. It
will compare the run() output with the desired output as specified in the dataset.
In a perfect world, the two should match exactly. What we measure is how much that
they don't match, thus the amount of forgetfulness the network has.
Example (from examples/ex_dow.pl):
# Data from 1989 (as far as I know..this is taken from example data on BrainMaker)
my @data = (
$net->uncrunch($net->run($input_map_ref));
All that run_uc() does is that it automatically calls uncrunch() on the output, regardless
of whether the input was crunch() -ed or not.
=item $net->run_set($set);
This takes an array ref of the same structure as the learn_set() method, above. It returns
an array ref. Each element in the returned array ref represents the output for the corresponding
element in the dataset passed. Uses run() internally.
=item $net->get_outs($set);
Simple utility function which takes an array ref of the same structure as the learn_set() method,
above. It returns an array ref of the same type as run_set() wherein each element contains an
output value. The output values are the target values specified in the $set passed. Each element
in the returned array ref represents the output value for the corrseponding row in the dataset
passed. (A row is two elements of the dataset together, see learn_set() for dataset structure.)
=item $net->load_set($file,$column,$seperator);
Loads a CSV-like dataset from disk
Returns a data set of the same structure as required by the
learn_set() method. $file is the disk file to load set from.
$column an optional variable specifying the column in the
data set to use as the class attribute. $class defaults to 0.
This method will return undef if there was a problem with writing the file. If there is an
error, it will set the internal error message, which you can retrive with the error() method,
below.
If there were no errors, it will return a refrence to $net.
=item $net->load($filename);
This will load from disk any network saved by save() and completly restore the internal
state at the point it was save() was called at.
If the file is of an invalid file type, then load() will
return undef. Use the error() method, below, to print the error message.
If there were no errors, it will return a refrence to $net.
UPDATE: $filename can now be a newline-seperated set of mesh data. This enables you
to do $net->load(join("\n",<DATA>)) and other fun things. I added this mainly
for a demo I'm writing but not qutie done with yet. So, Cheers!
$output = &$code_ref($sum_of_inputs, $self);
The code ref is expected to return a value to be used as the output of the node.
The code ref also has access to all the data of that node through the second argument,
a blessed hash refrence to that node.
See CUSTOM ACTIVATION FUNCTIONS for information on several included activation functions
other than the ones listed above.
The activation type for each layer is preserved across load/save calls.
EXCEPTION: Due to the constraints of Perl, I cannot load/save the actual subs that the code
ref option points to. Therefore, you must re-apply any code ref activation types after a
load() call.
=item $net->node_activation($layer,$node,$type);
This sets the activation function for a specific node in a layer. The same notes apply
here as to the activation() method above.
=item $net->threshold($layer,$value);
This sets the activation threshold for a specific layer. The threshold only is used
when activation is set to "sigmoid", "sigmoid_1", or "sigmoid_2".
=item $net->node_threshold($layer,$node,$value);
This sets the activation threshold for a specific node in a layer. The threshold only is used
when activation is set to "sigmoid", "sigmoid_1", or "sigmoid_2".
=item $net->join_cols($array_ref,$row_length_in_elements,$high_state_character,$low_state_character);
This is more of a utility function than any real necessary function of the package.
Instead of joining all the elements of the array together in one long string, like join() ,
it prints the elements of $array_ref to STDIO, adding a newline (\n) after every $row_length_in_elements
number of elements has passed. Additionally, if you include a $high_state_character and a $low_state_character,
it will print the $high_state_character (can be more than one character) for every element that
has a true value, and the $low_state_character for every element that has a false value.
If you do not supply a $high_state_character, or the $high_state_character is a null or empty or
undefined string, it join_cols() will just print the numerical value of each element seperated
by a null character (\0). join_cols() defaults to the latter behaviour.
=item $net->extend(\@array_of_hashes);
This allows you to re-apply any activations and thresholds with the same array ref which
you created a network with. This is useful for re-applying code ref activations after a load()
call without having to type the code ref twice.
You can also specify the extension in a simple array ref like this:
$net->extend([2,3,1]);
Which will simply add more nodes if needed to set the number of nodes in each layer to their
respective elements. This works just like the respective new() constructor, above.
NOTE: Your net will probably require re-training after adding nodes.
=item $net->extend_layer($layer,\%hash);
With this you can modify only one layer with its specifications in a hash refrence. This hash
refrence uses the same keys as for the last new() constructor form, above.
You can also specify just the number of nodes for the layer in this form:
This method was created mainly to service the extend*() group of functions, but it
can also be called independently. This will add nodes as needed to layer C<$layer> to
make the nodes in layer equal to $total_nodes.
NOTE: Your net will probably require re-training after adding nodes.
=item $net->p($a,$b);
Returns a floating point number which represents $a as a percentage of $b.
=item $net->intr($float);
Rounds a floating-point number rounded to an integer using sprintf() and int() , Provides
better rounding than just calling int() on the float. Also used very heavily internally.
This will dump a simple listing of all the weights of all the connections of every neuron
in the network to STDIO.
=item $net->crunch($string);
This splits a string passed with /[\s\t]/ into an array ref containing unique indexes
to the words. The words are stored in an intenal array and preserved across load() and save()
calls. This is designed to be used to generate unique maps sutible for passing to learn() and
run() directly. It returns an array ref.
The words are not duplicated internally. For example:
$net->crunch("How are you?");
Will probably return an array ref containing 1,2,3. A subsequent call of:
$net->crunch("How is Jane?");
$net->crunch("I love chips."), $net->crunch("That's Junk Food!")),
$net->crunch("I love apples."), $net->crunch("Good, Healthy Food.")),
$net->crunch("I love pop."), $net->crunch("That's Junk Food!")),
$net->crunch("I love oranges."),$net->crunch("Good, Healthy Food."))
]);
}
print $net->run_uc("I love corn.")),"\n";
On my system, this responds with, "Good, Healthy Food." If you try to run crunch() with
"I love pop.", though, you will probably get "Food! apples. apples." (At least it returns
that on my system.) As you can see, the associations are not yet perfect, but it can make
for some interesting demos!
=item $net->crunched($word);
This will return undef if the word is not in the internal crunch list, or it will return the
index of the word if it exists in the crunch list.
If the word is not in the list, it will set the internal error value with a text message
that you can retrive with the error() method, below.
=item $net->col_width($width);
This is useful for formating the debugging output of Level 4 if you are learning simple
bitmaps. This will set the debugger to automatically insert a line break after that many
elements in the map output when dumping the currently run map during a learn loop.
It will return the current width when called with a 0 or undef value.
The column width is preserved across load() and save() calls.
=item $net->random($rand);
This will set the randomness factor from the network. Default is 0. When called
with no arguments, or an undef value, it will return current randomness value. When
called with a 0 value, it will disable randomness in the network. The randomness factor
is preserved across load() and save() calls.
=item $net->const($const);
This sets the run const. for the network. The run const. is a value that is added
to every input line when a set of inputs are run() or learn() -ed, to prevent the
network from hanging on a 0 value. When called with no arguments, it returns the current
const. value. It defaults to 0.0001 on a newly-created network. The run const. value
is preserved across load() and save() calls.
=item $net->error();
Returns the last error message which occured in the mesh, or undef if no errors have
occured.
=item $net->load_pcx($filename);
You can also pass an array containing the range
values (not array ref), or you can pass a comma-
seperated list of values as parameters:
$net->activation(4,range(@numbers));
$net->activation(4,range(6,15,26,106,28,3));
Note: when using a range() activatior, train the
net TWICE on the data set, because the first time
the range() function searches for the top value in
the inputs, and therefore, results could flucuate.
The second learning cycle guarantees more accuracy.
The actual code that implements the range closure is
a bit convulted, so I will expand on it here as a simple
tutorial for custom activation functions.
= line 1 = sub {
= line 2 = my @values = ( 6..10 );
= line 3 = my $sum = shift;
= line 4 = my $self = shift;
or between -1 and 1 if $r is 2. $r defaults to 1.
You can get this into your namespace with the ':acts' export
tag as so:
use AI::NeuralNet::Mesh ':acts';
Note: when using a ramp() activatior, train the
net at least TWICE on the data set, because the first
time the ramp() function searches for the top value in
the inputs, and therefore, results could flucuate.
The second learning cycle guarantees more accuracy.
No code to show here, as it is almost exactly the same as range().
=item and_gate($threshold);
Self explanitory, pretty much. This turns the node into a basic AND gate.
$threshold is used to decide if an input is true or false (1 or 0). If
an input is below $threshold, it is false. $threshold defaults to 0.5.
You can get this into your namespace with the ':acts' export
tag as so:
use AI::NeuralNet::Mesh ':acts';
Let's look at the code real quick, as it shows how to get at the indivudal
input connections:
= line 1 = sub {
= line 2 = my $sum = shift;
= line 3 = my $self = shift;
= line 4 = my $threshold = 0.50;
= line 5 = for my $x (0..$self->{_inputs_size}-1) {
= line 6 = return 0.000001 if(!$self->{_inputs}->[$x]->{value}<$threshold)
= line 7 = }
= line 8 = return $sum/$self->{_inputs_size};
= line 9 = }
Line 2 and 3 pulls in our sum and self refrence. Line 5 opens a loop to go over
all the input lines into this node. Line 6 looks at each input line's value
and comparse it to the threshold. If the value of that line is below threshold, then
we return 0.000001 to signify a 0 value. (We don't return a 0 value so that the network
doen't get hung trying to multiply a 0 by a huge weight during training [it just will
keep getting a 0 as the product, and it will never learn]). Line 8 returns the mean
value of all the inputs if all inputs were above threshold.
Very simple, eh? :)
=item or_gate($threshold)
Self explanitory. Turns the node into a basic OR gate, $threshold is used same as above.
You can get this into your namespace with the ':acts' export
tag as so:
use AI::NeuralNet::Mesh ':acts';
=head1 VARIABLES
=item $AI::NeuralNet::Mesh::Connector
Rodin Porrata asked on the ai-neuralnet-backprop malining list,
"What can they [Neural Networks] do?". In regards to that questioin,
consider the following:
Neural Nets are formed by simulated neurons connected together much the same
way the brain's neurons are, neural networks are able to associate and
generalize without rules. They have solved problems in pattern recognition,
robotics, speech processing, financial predicting and signal processing, to
name a few.
One of the first impressive neural networks was NetTalk, which read in ASCII
text and correctly pronounced the words (producing phonemes which drove a
speech chip), even those it had never seen before. Designed by John Hopkins
biophysicist Terry Sejnowski and Charles Rosenberg of Princeton in 1986,
this application made the Backprogagation training algorithm famous. Using
the same paradigm, a neural network has been trained to classify sonar
returns from an undersea mine and rock. This classifier, designed by
Sejnowski and R. Paul Gorman, performed better than a nearest-neighbor
classifier.
The kinds of problems best solved by neural networks are those that people
are good at such as association, evaluation and pattern recognition.
Problems that are difficult to compute and do not require perfect answers,
just very good answers, are also best done with neural networks. A quick,
very good response is often more desirable than a more accurate answer which
takes longer to compute. This is especially true in robotics or industrial
controller applications. Predictions of behavior and general analysis of
data are also affairs for neural networks. In the financial arena, consumer
loan analysis and financial forecasting make good applications. New network
designers are working on weather forecasts by neural networks (Myself
included). Currently, doctors are developing medical neural networks as an
aid in diagnosis. Attorneys and insurance companies are also working on
neural networks to help estimate the value of claims.
Neural networks are poor at precise calculations and serial processing. They
are also unable to predict or recognize anything that does not inherently
contain some sort of pattern. For example, they cannot predict the lottery,
since this is a random process. It is unlikely that a neural network could
be built which has the capacity to think as well as a person does for two
reasons. Neural networks are terrible at deduction, or logical thinking and
the human brain is just too complex to completely simulate. Also, some
problems are too difficult for present technology. Real vision, for
example, is a long way off.
In short, Neural Networks are poor at precise calculations, but good at
association, evaluation, and pattern recognition.
=head1 EXAMPLES
Included are several example files in the "examples" directory from the
distribution ZIP file. Each of the examples includes a short explanation
appreciate it greatly if you could report them to me at F<E<lt>jdb@wcoil.comE<gt>>,
or, even better, try to patch them yourself and figure out why the bug is being buggy, and
send me the patched code, again at F<E<lt>jdb@wcoil.comE<gt>>.
=head1 AUTHOR
Josiah Bryan F<E<lt>jdb@wcoil.comE<gt>>
Copyright (c) 2000 Josiah Bryan. All rights reserved. This program is free software;
you can redistribute it and/or modify it under the same terms as Perl itself.
The C<AI::NeuralNet::Mesh> and related modules are free software. THEY COME WITHOUT WARRANTY OF ANY KIND.
$Id: AI::NeuralNet::Mesh.pm, v0.44 2000/15/09 03:29:08 josiah Exp $
=head1 THANKS
Below are a list of the people that have contributed in some way to this module (no particular order):
from http://www.josiah.countystart.com/modules/get.pl?mesh:pod
=head1 MAILING LIST
A mailing list has been setup for AI::NeuralNet::Mesh and AI::NeuralNet::BackProp.
The list is for discussion of AI and neural net related topics as they pertain to
AI::NeuralNet::BackProp and AI::NeuralNet::mesh. I will also announce in the group
each time a new release of AI::NeuralNet::Mesh is available.
The list address is at:
ai-neuralnet-backprop@egroups.com
To subscribe, send a blank email:
ai-neuralnet-backprop-subscribe@egroups.com
=cut
** What is this?
AI::NeuralNet::Mesh is an optimized, accurate neural network Mesh.
It was designed with accruacy and speed in mind.
This network model is very flexable. It will allow for clasic binary
operation or any range of integer or floating-point inputs you care
to provide. With this you can change activation types on a per node or
per layer basis (you can even include your own anonymous subs as
activation types). You can add sigmoid transfer functions and control
the threshold. You can learn data sets in batch, and load CSV data
set files. You can do almost anything you need to with this module.
This code is deigned to be flexable. Any new ideas for this module?
Contact Josiah Bryan at <jdb@wcoil.com>
This module is designed to also be a customizable, extensable
neural network simulation toolkit. Through a combination of setting
the $Connection variable and using custom activation functions, as
well as basic package inheritance, you can simulate many different
types of neural network structures with very little new code written
by you. (See ex_aln.pl)
As always, included is a cleaned, CSS-ed, HTML-format of the POD docs.
** What's new?
>From the POD:
This is version 0.44, an bug fix for the third release of the module.
This fixed a compatibilty issue that 0.43 had with Perl 5.3.3
examples/ex_add.pl view on Meta::CPAN
=begin
File: examples/ex_add.pl
Author: Josiah Bryan, <jdb@wcoil.com>
Desc:
This demonstrates the ability of a neural net to generalize
and predict what the correct result is for inputs that it has
never seen before.
This teaches a network to add 11 sets of numbers, then it asks
the user for two numbers to add and it displays the results of
the user's input.
=cut
use AI::neuralNet::Mesh;
my $addition = new AI::NeuralNet::Mesh(2,2,1);
if(!$addition->load('add.mesh')) {
$addition->learn_set([
examples/ex_aln.pl view on Meta::CPAN
This is a simple example of a _basic_ ALN implementation in
under 210 lines of code. In this demo we make use of the
custom node connector as described in the POD. We also
insert our own method over the node's internal adjust_weight()
method to make ALN learning a bit easire. This demo also adds
a temporary method to the network to print the logical type of
each node, called print_aln();
print_aln() prints simple diagram of the
network similar to this (this is for a $net=Tree(8,1) with
$net->learn([1,1,0,1,0,1,1,1],[0]), and each line represents
a layer):
L R L L L L L L
OR OR OR OR
OR OR
AND
All the standard methods that work on AI::NeuralNet::Mesh work
on the object returned by Tree(). load() and save() will correctly
preserve the gate structure and types of your network. learn_set()
and everything else works pretty much as expected. Only thing
that is useless is the crunch() method, as this only takes binary
inputs. But...for those of you who couldnt live without integers
in your network...I'm going to create a small package in the next
week, AI::NeuralNet::ALNTree, from this code. It will which includes
a integer-vectorizer (convert your integers into bit vectors), a bit
vector class to play with, as well as support for concating and
learning bit vectors. But, for now, enjoy this!
This file contains just a simple, functional, ALN implementation.
examples/ex_aln.pl view on Meta::CPAN
######################################################################
# Build a basic ALN tree network (_very_ basic, only implements
# the node types, and only two learning benefits from ALN theory are
# realized.) Also adds a method to the neural network gates, print_aln().
sub Tree {
# Grab our leaves and roots
my $leaves = shift;
my $roots = shift || $leaves;
# Replace the load function with a new one to preserve the
# load activations. We have to add this up here because next
# thing we do is check if they passed a file name as $leaves,
# and we need to have our new load sub already in place before
# we try to load anything in $leaves.
*{'AI::NeuralNet::Mesh::load'} = sub {
my $self = shift;
my $file = shift;
my $load_flag = shift;
if(!(-f $file)) {
examples/ex_aln.pl view on Meta::CPAN
my $n = 0;
for my $x (0..$self->{total_layers}) {
for my $y (0..$self->{layers}->[$x]-1) {
my @l = split /\,/, $db{"n$n"};
for my $z (0..$self->{layers}->[$x-1]-1) {
$self->{mesh}->[$n]->{_inputs}->[$z]->{weight} = $l[$z];
}
my $z = $self->{layers}->[$x-1];
$self->{mesh}->[$n]->{activation} = $l[$z];
$self->{mesh}->[$n]->{threshold} = $l[$z+1];
$self->{mesh}->[$n]->{mean} = $l[$z+2];
$n++;
}
}
$self->extend($self->{_original_specs});
return $self;
};
examples/ex_aln.pl view on Meta::CPAN
my $b2 = intr($self->{mesh}->[$_]->{_inputs}->[1]->{weight});
$f=1 if( $b1 && $b2);
$f=2 if(!$b1 && !$b2);
my $lo = $self->{_last_output};
if($lo!=$target) {
# Adjust right lead if $lo, else adjust left lead
($target && $lo)?$self->{_inputs}->[0]->{weight}++:$self->{_inputs}->[0]->{weight}--;
($target && !$lo)?$self->{_inputs}->[1]->{weight}++:$self->{_inputs}->[1]->{weight}--;
}
# Thanks to Rolf Mandersheidd for this set of nested conditions on one line
# This determines heuristic error responsibilty on the children
# and recurses the error up the tree.
if($lo!=$target || $f!=($lo?1:2)) {
$self->{_inputs}->[1]->{node}->adjust_weight($inc,$target) if($self->{_inputs}->[1]->{node});
} else {
$self->{_inputs}->[0]->{node}->adjust_weight($inc,$target) if($self->{_inputs}->[1]->{node});
}
};
# Set our custom node connector
$AI::NeuralNet::Mesh::Connector = 'main::_c_tree';
examples/ex_alpha.pl view on Meta::CPAN
=begin
File: examples/ex_alpha.pl
Author: Josiah Bryan, <jdb@wcoil.com>
Desc:
This demonstrates the ability of a neural net to
generalize and predict what the correct result is
for inputs that it has never seen before.
This teaches the network to classify some twenty-
nine seperate 35-byte bitmaps, and then it inputs
an never-before-seen bitmap and displays the
classification the network gives for the unknown bitmap.
=cut
use AI::NeuralNet::Mesh;
examples/ex_alpha.pl view on Meta::CPAN
nodes => 35,
activation => linear
},
{
nodes => 1,
activation => linear,
}
]);
# Debug level of 4 gives JUST learn loop iteteration benchmark and comparrison data
# as learning progresses.
$net->debug(4);
my $letters = [ # All prototype inputs
[
0,1,1,1,0, # Inputs are
1,0,0,0,1, # 5*7 digitalized caracters
1,0,0,0,1,
1,1,1,1,1,
1,0,0,0,1, # This is the alphabet of the
1,0,0,0,1, # HP 28S
examples/ex_alpha.pl view on Meta::CPAN
1,0,0,0,1,
1,1,1,1,1,
1,0,0,0,1,
1,0,0,0,1,
1,0,0,0,1];
# Display test map
print "\nTest map:\n";
$net->join_cols($tmp,5);
# Display network results
print "Letter index matched: ",$net->run($tmp)->[0],"\n";
examples/ex_and.pl view on Meta::CPAN
# Uses 1 layer and 2 nodes per layer, with one output node
my $net = new AI::NeuralNet::Mesh([
{
nodes => 2, # input layer, 2 nodes
activation => linear # linear transfer function
},
{
nodes => 1, # output layer, 1 node
activation => sigmoid, # sigmoid transfer function, (0/1)
threshold => 0.75 # set threshold for sigmoid fn to 0.75
}
]);
if(!$net->load('and.mesh')) {
$net->learn_set([
[1,1], [1],
[1,0], [0],
[0,1], [0],
[0,0], [0],
]);
examples/ex_bmp.pl view on Meta::CPAN
Author: Josiah Bryan, <jdb@wcoil.com>
Desc:
This demonstrates simple classification of 6x6 bitmaps.
=cut
use AI::NeuralNet::Mesh;
use Benchmark;
# Set resolution
my $xres=5;
my $yres=5;
# Create a new net with 3 layes, $xres*$yres inputs, and 1 output
my $net = AI::NeuralNet::Mesh->new(1,$xres*$yres,1);
# Enable debugging
$net->debug(4);
# Create datasets.
my @data = (
[ 0,1,1,0,0,
0,0,1,0,0,
0,0,1,0,0,
0,0,1,0,0,
examples/ex_bmp2.pl view on Meta::CPAN
=begin
File: examples/ex_bmp2.pl
Author: Josiah Bryan, <jdb@wcoil.com>
Desc:
This demonstrates the ability of a neural net
to generalize and predict what the correct
result is for inputs that it has never seen before.
This teaches a network to recognize a 5x7 bitmap of
the letter "J" then it presents the network with a
corrupted "J" and displays the results of the networks
output.
=cut
use AI::NeuralNet::Mesh;
# Create a new network with 2 layers and 35 neurons in each layer.
my $net = new AI::NeuralNet::Mesh(1,35,1);
# Debug level of 4 gives JUST learn loop iteteration benchmark and comparrison data
# as learning progresses.
$net->debug(4);
# Create our model input
my @map = (1,1,1,1,1,
0,0,1,0,0,
0,0,1,0,0,
0,0,1,0,0,
1,0,1,0,0,
1,0,1,0,0,
1,1,1,0,0);
examples/ex_crunch.pl view on Meta::CPAN
"I love oranges.", $good
];
#$net->debug(4);
for (0..2) {
my $f = $net->learn_set($set);
print "Forgotten: $f%\n";
}
# run() automatically crunches the string (run_uc() uses run() internally) and
# run_uc() automatically uncrunches the results.
print $net->run_uc("I love pop-tarts.");
examples/ex_mult.pl view on Meta::CPAN
=begin
File: examples/ex_mult.pl
Author: Josiah Bryan, <jdb@wcoil.com>
Desc:
This demonstrates the ability of a neural net to generalize and predict what the correct
result is for inputs that it has never seen before.
This teaches a network to multiply 6 sets of numbers, then it asks the user for
two numbers to multiply and then it displays the results of the user's input.
=cut
use AI::NeuralNet::Mesh;
my $multiply = new AI::NeuralNet::Mesh(2,2,1);
if(!$multiply->load('mult.mesh')) {
$multiply->learn_set([
[ 1, 1 ], [ 1 ] ,
examples/ex_or.pl view on Meta::CPAN
as an OR gate with no learning and only a sigmoid transfer function
on the output node.
=cut
use AI::neuralNet::Mesh;
# Uses 1 layer and 2 nodes per layer, with one output node
my $net = new AI::NeuralNet::Mesh(1,2,1);
# Example of alternate ways to set activation and thresholds
$net->activation(1,sigmoid);
$net->threshold( 1,0.5);
if(!$net->load('or.mesh')) {
$net->learn_set([
[1,1], [1],
[1,0], [1],
[0,1], [1],
[0,0], [0],
]);
$net->save('or.mesh');
}
examples/ex_pcx.pl view on Meta::CPAN
$net->{col_width} = $bx;
print "Done!\n";
print "Loading bitmap...";
my $img = $net->load_pcx("josiah.pcx");
print "Done!\n";
print "Comparing blocks...\n";
my $white = $img->get_block([0,0,$bx,$by]);
my ($x,$y,$tmp,@scores,$s,@blocks,$b);
for ($x=0;$x<320;$x+=$bx) {
for ($y=0;$y<200;$y+=$by) {
$blocks[$b++]=$img->get_block([$x,$y,$x+$bx,$y+$by]);
$score[$s++]=$net->pdiff($white,$blocks[$b-1]);
print "Block at [$x,$y], index [$s] scored ".$score[$s-1]."%\n";
}
}
print "Done!";
print "High score:\n";
examples/ex_sub.pl view on Meta::CPAN
=begin
File: examples/ex_sub.pl
Author: Josiah Bryan, <jdb@wcoil.com>
Desc:
This demonstrates the ability of a neural net to generalize and predict what the correct
result is for inputs that it has never seen before.
This teaches a network to subtract 6 sets of numbers, then it asks the user for
two numbers to subtract and then it displays the results of the user's input.
=cut
use AI::NeuralNet::Mesh;
my $subtract = new AI::NeuralNet::Mesh(2,2,1);
if(!$subtract->load('sub.mesh')) {
$subtract->learn_set([
[ 1, 1 ], [ 0 ] ,
examples/ex_synop.pl view on Meta::CPAN
my @phrases = (
$phrase1, $phrase2,
$phrase3, $phrase4
);
# Learn the data set
$net->learn_set(\@phrases);
# Run a test phrase through the network
my $test_phrase = $net->crunch("I love neural networking!");
my $result = $net->run($test_phrase);
# Get this, it prints "Jay Leno is networking!" ... LOL!
print $net->uncrunch($result),"\n";
examples/ex_wine.pl view on Meta::CPAN
=begin
File: examples/ex_wine.pl
Author: Josiah Bryan, <jdb@wcoil.com>
Desc:
This demonstrates wine cultivar prediction using the
AI::NeuralNet::Mesh module.
This script uses the data that is the results of a chemical analysis
of wines grown in the same region in Italy but derived from three
different cultivars. The analysis determined the quantities
of 13 constituents found in each of the three types of wines.
The inputs of the net represent 13 seperate attributes
of the wine's chemical analysis, as follows:
1) Alcohol
2) Malic acid
3) Ash
4) Alcalinity of ash
5) Magnesium
6) Total phenols
7) Flavanoids
8) Nonflavanoid phenols
examples/ex_wine.pl view on Meta::CPAN
# Set activation on output node to contstrain values
# to a specific range of values.
$net->activation(2,range(1..3));
my @cnts;
for(0..$#{$sets}) {
next if(!defined $sets->[$_]->[0]);
my $s=$#{$sets->[$_]}/2;
my $cnt=0;
print "Set: $_\n";
my $results=$net->run_set($sets->[$_]);
for my $x (0..$s-1) {
$cnt++ if($results->[$x]->[0]==$_);
}
$cnts[$_]=$cnt;
}
for(0..$#{$sets}) {
next if(!defined $sets->[$_]->[0]);
my $s=$#{$sets->[$_]}/2;
print "Class $_: $cnts[$_] correct out of $s (",$net->p($cnts[$_],$s),"%).\n";
}
# Create a mesh with 2 layers, 2 nodes/layer, and one output node.
my $net = new AI::NeuralNet::Mesh(2,2,1);
# Teach the network the AND function
$net->learn([0,0],[0]);
$net->learn([0,1],[0]);
$net->learn([1,0],[0]);
$net->learn([1,1],[1]);
# Present it with two test cases
my $result_bit_1 = $net->run([0,1])->[0];
my $result_bit_2 = $net->run([1,1])->[0];
# Display the results
print "AND test with inputs (0,1): $result_bit_1\n";
print "AND test with inputs (1,1): $result_bit_2\n";
</PRE>
<P>
<HR>
<H1><A NAME="version & updates">VERSION & UPDATES</A></H1>
<P>This is version <STRONG>0.43</STRONG>, the second release of this module.</P>
<P>With this version I have gone through and tuned up many area
of this module, including the descent algorithim in learn(),
as well as four custom activation functions, and several export
tag sets. With this release, I have also included a few
<P>
<HR>
<H1><A NAME="description">DESCRIPTION</A></H1>
<P>AI::NeuralNet::Mesh is an optimized, accurate neural network Mesh.
It was designed with accruacy and speed in mind.</P>
<P>This network model is very flexable. It will allow for clasic binary
operation or any range of integer or floating-point inputs you care
to provide. With this you can change activation types on a per node or
per layer basis (you can even include your own anonymous subs as
activation types). You can add sigmoid transfer functions and control
the threshold. You can learn data sets in batch, and load CSV data
set files. You can do almost anything you need to with this module.
This code is deigned to be flexable. Any new ideas for this module?
See AUTHOR, below, for contact info.</P>
<P>This module is designed to also be a customizable, extensable
neural network simulation toolkit. Through a combination of setting
the $Connection variable and using custom activation functions, as
well as basic package inheritance, you can simulate many different
types of neural network structures with very little new code written
by you.</P>
<P>In this module I have included a more accurate form of ``learning'' for the
mesh. This form preforms descent toward a local error minimum (0) on a
directional delta, rather than the desired value for that node. This allows
for better, and more accurate results with larger datasets. This module also
uses a simpler recursion technique which, suprisingly, is more accurate than
the original technique that I've used in other ANNs.</P>
<P>
<HR>
<H1><A NAME="exports">EXPORTS</A></H1>
<P>This module exports three functions by default:</P>
<PRE>
range
intr
pdiff
</PRE>
<P>See range() intr() and pdiff() for description of their respective functions.</P>
<P>Also provided are several export tag sets for usage in the form of:</P>
<PRE>
use AI::NeuralNet::Mesh ':tag';
</PRE>
<P>Tag sets are:</P>
<PRE>
:default
- These functions are always exported.
- Exports:
range()
p()
high()
low()
:acts
- Exports:
ramp()
and_gate()
or_gate()
</PRE>
<P>See the respective methods/functions for information about
each method/functions usage.</P>
<P>
<HR>
<H1><A NAME="methods">METHODS</A></H1>
<DL>
<DT><STRONG><A NAME="item_new">AI::NeuralNet::Mesh->new();</A></STRONG><BR>
<DD>
There are four ways to construct a new network with new(). Each is detailed below.
<P>P.S. Don't worry, the old <A HREF="#item_new"><CODE>new($layers, $nodes [, $outputs])</CODE></A> still works like always!</P>
<P></P>
object. The network will have <CODE>$layers</CODE> number of layers in it
and it will have <CODE>$nodes</CODE> number of nodes per layer.
<P>There is an optional parameter of $outputs, which specifies the number
of output neurons to provide. If $outputs is not specified, $outputs
defaults to equal $size.</P>
<P></P>
<DT><STRONG>AI::NeuralNet::Mesh->new($file);</STRONG><BR>
<DD>
This will automatically create a new network from the file <CODE>$file</CODE>. It will
return undef if the file was of an incorrect format or non-existant. Otherwise,
it will return a blessed refrence to a network completly restored from <CODE>$file</CODE>.
<P></P>
<DT><STRONG>AI::NeuralNet::Mesh->new(\@layer_sizes);</STRONG><BR>
<DD>
This constructor will make a network with the number of layers corresponding to the length
in elements of the array ref passed. Each element in the array ref passed is expected
to contain an integer specifying the number of nodes (neurons) in that layer. The first
layer ($layer_sizes[0]) is to be the input layer, and the last layer in @layer_sizes is to be
the output layer.
<P>Example:</P>
<PRE>
my $net = AI::NeuralNet::Mesh->new([2,3,1]);</PRE>
<P>Creates a network with 2 input nodes, 3 hidden nodes, and 1 output node.</P>
<P></P>
<DT><STRONG>AI::NeuralNet::Mesh->new(\@array_of_hashes);</STRONG><BR>
<DD>
Another dandy constructor...this is my favorite. It allows you to tailor the number of layers,
the size of the layers, the activation type (you can even add anonymous inline subs with this one),
and even the threshold, all with one array ref-ed constructor.
<P>Example:</P>
<PRE>
my $net = AI::NeuralNet::Mesh->new([
{
nodes => 2,
activation => linear
},
{
nodes => 3,
activation => sub {
my $sum = shift;
return $sum + rand()*1;
}
},
{
nodes => 1,
activation => sigmoid,
threshold => 0.75
}
]);
</PRE>
<P>Interesting, eh? What you are basically passing is this:</P>
<PRE>
my @info = (
{ },
{ },
{ },
...
);</PRE>
<P>You are passing an array ref who's each element is a hash refrence. Each
hash refrence, or more precisely, each element in the array refrence you are passing
to the constructor, represents a layer in the network. Like the constructor above,
the first element is the input layer, and the last is the output layer. The rest are
hidden layers.</P>
<P>Each hash refrence is expected to have AT LEAST the ``nodes'' key set to the number
of nodes (neurons) in that layer. The other two keys are optional. If ``activation'' is left
out, it defaults to ``linear''. If ``threshold'' is left out, it defaults to 0.50.</P>
<P>The ``activation'' key can be one of four values:</P>
<PRE>
linear ( simply use sum of inputs as output )
sigmoid [ sigmoid_1 ] ( only positive sigmoid )
sigmoid_2 ( positive / 0 /negative sigmoid )
\&code_ref;</PRE>
<P>``sigmoid_1'' is an alias for ``sigmoid''.</P>
<P>The code ref option allows you to have a custom activation function for that layer.
The code ref is called with this syntax:</P>
<PRE>
$output = &$code_ref($sum_of_inputs, $self);
</PRE>
<P>The code ref is expected to return a value to be used as the output of the node.
The code ref also has access to all the data of that node through the second argument,
a blessed hash refrence to that node.</P>
<P>See CUSTOM ACTIVATION FUNCTIONS for information on several included activation functions
other than the ones listed above.</P>
<P>Three of the activation syntaxes are shown in the first constructor above, the ``linear'',
``sigmoid'' and code ref types.</P>
<P>You can also set the activation and threshold values after network creation with the
<A HREF="#item_activation"><CODE>activation()</CODE></A> and <A HREF="#item_threshold"><CODE>threshold()</CODE></A> methods.</P>
<P></P>
<P></P>
<DT><STRONG><A NAME="item_learn">$net->learn($input_map_ref, $desired_result_ref [, options ]);</A></STRONG><BR>
<DD>
NOTE: <A HREF="#item_learn_set"><CODE>learn_set()</CODE></A> now has increment-degrading turned OFF by default. See note
on the degrade flag, below.
<P>This will 'teach' a network to associate an new input map with a desired
result. It will return a string containg benchmarking information.</P>
<P>You can also specify strings as inputs and ouputs to learn, and they will be
crunched automatically. Example:</P>
<PRE>
$net->learn('corn', 'cob');
</PRE>
<P>Note, the old method of calling crunch on the values still works just as well.</P>
<P>The first two arguments may be array refs (or now, strings), and they may be
of different lengths.</P>
<P>Options should be written on hash form. There are three options:
</P>
error => $maximum_allowable_percentage_of_error
degrade => $degrade_increment_flag</PRE>
<P>$learning_gradient is an optional value used to adjust the weights of the internal
connections. If $learning_gradient is ommitted, it defaults to 0.002.
</P>
<P>$maximum_iterations is the maximum numbers of iteration the loop should do.
It defaults to 1024. Set it to 0 if you never want the loop to quit before
the pattern is perfectly learned.</P>
<P>$maximum_allowable_percentage_of_error is the maximum allowable error to have. If
this is set, then <A HREF="#item_learn"><CODE>learn()</CODE></A> will return when the perecentage difference between the
actual results and desired results falls below $maximum_allowable_percentage_of_error.
If you do not include 'error', or $maximum_allowable_percentage_of_error is set to -1,
then <A HREF="#item_learn"><CODE>learn()</CODE></A> will not return until it gets an exact match for the desired result OR it
reaches $maximum_iterations.</P>
<P>$degrade_increment_flag is a simple flag used to allow/dissalow increment degrading
during learning based on a product of the error difference with several other factors.
$degrade_increment_flag is off by default. Setting $degrade_increment_flag to a true
value turns increment degrading on.</P>
<P>In previous module releases $degrade_increment_flag was not used, as increment degrading
was always on. In this release I have looked at several other network types as well
as several texts and decided that it would be better to not use increment degrading. The
option is still there for those that feel the inclination to use it. I have found some areas
that do need the degrade flag to work at a faster speed. See test.pl for an example. If
[ 1,2,3,4 ], [ 1,3,5,6 ],
[ 0,2,5,6 ], [ 0,2,1,2 ]
);</PRE>
<P>Inputs and outputs in the dataset can also be strings.</P>
<P>See the paragraph on measuring forgetfulness, below. There are
two learn_set()-specific option tags available:</P>
<PRE>
flag => $flag
pattern => $row</PRE>
<P>If ``flag'' is set to some TRUE value, as in ``flag => 1'' in the hash of options, or if the option ``flag''
is not set, then it will return a percentage represting the amount of forgetfullness. Otherwise,
<A HREF="#item_learn_set"><CODE>learn_set()</CODE></A> will return an integer specifying the amount of forgetfulness when all the patterns
are learned.</P>
<P>If ``pattern'' is set, then <A HREF="#item_learn_set"><CODE>learn_set()</CODE></A> will use that pattern in the data set to measure forgetfulness by.
If ``pattern'' is omitted, it defaults to the first pattern in the set. Example:</P>
<PRE>
my @set = (
[ 0,1,0,1 ], [ 0 ],
[ 0,0,1,0 ], [ 1 ],
[ 1,1,0,1 ], [ 2 ], # <---
[ 0,1,1,0 ], [ 3 ]
);
</PRE>
<P>If you wish to measure forgetfulness as indicated by the line with the arrow, then you would
pass 2 as the "pattern" option, as in "pattern => 2".</P>
<P>Now why the heck would anyone want to measure forgetfulness, you ask? Maybe you wonder how I
even measure that. Well, it is not a vital value that you have to know. I just put in a
``forgetfulness measure'' one day because I thought it would be neat to know.</P>
<P>How the module measures forgetfulness is this: First, it learns all the patterns
in the set provided, then it will run the very first pattern (or whatever pattern
is specified by the ``row'' option) in the set after it has finished learning. It
will compare the <A HREF="#item_run"><CODE>run()</CODE></A> output with the desired output as specified in the dataset.
In a perfect world, the two should match exactly. What we measure is how much that
they don't match, thus the amount of forgetfulness the network has.</P>
<P>Example (from examples/ex_dow.pl):</P>
<PRE>
# Data from 1989 (as far as I know..this is taken from example data on BrainMaker)
my @data = (
# Mo CPI CPI-1 CPI-3 Oil Oil-1 Oil-3 Dow Dow-1 Dow-3 Dow Ave (output)
This method does the same thing as this code:
<PRE>
$net->uncrunch($net->run($input_map_ref));</PRE>
<P>All that <A HREF="#item_run_uc"><CODE>run_uc()</CODE></A> does is that it automatically calls <A HREF="#item_uncrunch"><CODE>uncrunch()</CODE></A> on the output, regardless
of whether the input was <A HREF="#item_crunch"><CODE>crunch()</CODE></A> -ed or not.</P>
<P></P>
<DT><STRONG><A NAME="item_run_set">$net->run_set($set);</A></STRONG><BR>
<DD>
<P>This takes an array ref of the same structure as the learn_set() method, above. It returns
an array ref. Each element in the returned array ref represents the output for the corresponding
element in the dataset passed. Uses run() internally.</P>
<DT><STRONG><A NAME="item_get_outs">$net->get_outs($set);</A></STRONG><BR>
<DD>
Simple utility function which takes an array ref of the same structure as the <A HREF="#item_learn_set"><CODE>learn_set()</CODE></A> method,
above. It returns an array ref of the same type as <A HREF="#item_run_set"><CODE>run_set()</CODE></A> wherein each element contains an
output value. The output values are the target values specified in the $set passed. Each element
in the returned array ref represents the output value for the corrseponding row in the dataset
passed. (A row is two elements of the dataset together, see <A HREF="#item_learn_set"><CODE>learn_set()</CODE></A> for dataset structure.)
<P></P>
<DT><STRONG><A NAME="item_load_set">$net->load_set($file,$column,$seperator);</A></STRONG><BR>
<DD>
Loads a CSV-like dataset from disk
<P>Returns a data set of the same structure as required by the
<A HREF="#item_learn_set"><CODE>learn_set()</CODE></A> method. $file is the disk file to load set from.
$column an optional variable specifying the column in the
data set to use as the class attribute. $class defaults to 0.
$seperator is an optional variable specifying the seperator
after loading.</P>
<P>This uses a simple flat-file text storage format, and therefore the network files should
be fairly portable.</P>
<P>This method will return undef if there was a problem with writing the file. If there is an
error, it will set the internal error message, which you can retrive with the <A HREF="#item_error"><CODE>error()</CODE></A> method,
below.</P>
<P>If there were no errors, it will return a refrence to $net.</P>
<P></P>
<DT><STRONG><A NAME="item_load">$net->load($filename);</A></STRONG><BR>
<DD>
This will load from disk any network saved by <A HREF="#item_save"><CODE>save()</CODE></A> and completly restore the internal
state at the point it was <A HREF="#item_save"><CODE>save()</CODE></A> was called at.
<P>If the file is of an invalid file type, then <A HREF="#item_load"><CODE>load()</CODE></A> will
return undef. Use the <A HREF="#item_error"><CODE>error()</CODE></A> method, below, to print the error message.</P>
<P>If there were no errors, it will return a refrence to $net.</P>
<P>UPDATE: $filename can now be a newline-seperated set of mesh data. This enables you
to do $net->load(join(``\n'',<DATA>)) and other fun things. I added this mainly
for a demo I'm writing but not qutie done with yet. So, Cheers!</P>
<P></P>
<DT><STRONG><A NAME="item_activation">$net->activation($layer,$type);</A></STRONG><BR>
<DD>
<P>The code ref option allows you to have a custom activation function for that layer.
The code ref is called with this syntax:</P>
<PRE>
$output = &$code_ref($sum_of_inputs, $self);
</PRE>
<P>The code ref is expected to return a value to be used as the output of the node.
The code ref also has access to all the data of that node through the second argument,
a blessed hash refrence to that node.</P>
<P>See CUSTOM ACTIVATION FUNCTIONS for information on several included activation functions
other than the ones listed above.</P>
<P>The activation type for each layer is preserved across load/save calls.</P>
<P>EXCEPTION: Due to the constraints of Perl, I cannot load/save the actual subs that the code
ref option points to. Therefore, you must re-apply any code ref activation types after a
<A HREF="#item_load"><CODE>load()</CODE></A> call.</P>
<P></P>
<DT><STRONG><A NAME="item_node_activation">$net->node_activation($layer,$node,$type);</A></STRONG><BR>
<DD>
This sets the activation function for a specific node in a layer. The same notes apply
here as to the <A HREF="#item_activation"><CODE>activation()</CODE></A> method above.
<P></P>
<DT><STRONG><A NAME="item_threshold">$net->threshold($layer,$value);</A></STRONG><BR>
<DD>
This sets the activation threshold for a specific layer. The threshold only is used
when activation is set to ``sigmoid'', ``sigmoid_1'', or ``sigmoid_2''.
<P></P>
<DT><STRONG><A NAME="item_node_threshold">$net->node_threshold($layer,$node,$value);</A></STRONG><BR>
<DD>
This sets the activation threshold for a specific node in a layer. The threshold only is used
when activation is set to ``sigmoid'', ``sigmoid_1'', or ``sigmoid_2''.
<P></P>
<DT><STRONG><A NAME="item_join_cols">$net->join_cols($array_ref,$row_length_in_elements,$high_state_character,$low_state_character);</A></STRONG><BR>
<DD>
This is more of a utility function than any real necessary function of the package.
Instead of joining all the elements of the array together in one long string, like <CODE>join()</CODE> ,
it prints the elements of $array_ref to STDIO, adding a newline (\n) after every $row_length_in_elements
number of elements has passed. Additionally, if you include a $high_state_character and a $low_state_character,
it will print the $high_state_character (can be more than one character) for every element that
has a true value, and the $low_state_character for every element that has a false value.
If you do not supply a $high_state_character, or the $high_state_character is a null or empty or
undefined string, it <A HREF="#item_join_cols"><CODE>join_cols()</CODE></A> will just print the numerical value of each element seperated
by a null character (\0). <A HREF="#item_join_cols"><CODE>join_cols()</CODE></A> defaults to the latter behaviour.
<P></P>
<DT><STRONG><A NAME="item_extend">$net->extend(\@array_of_hashes);</A></STRONG><BR>
<DD>
This allows you to re-apply any activations and thresholds with the same array ref which
you created a network with. This is useful for re-applying code ref activations after a <A HREF="#item_load"><CODE>load()</CODE></A>
call without having to type the code ref twice.
<P>You can also specify the extension in a simple array ref like this:</P>
<PRE>
$net->extend([2,3,1]);
</PRE>
<P>Which will simply add more nodes if needed to set the number of nodes in each layer to their
respective elements. This works just like the respective new() constructor, above.</P>
<P>NOTE: Your net will probably require re-training after adding nodes.</P>
<P></P>
<DT><STRONG><A NAME="item_extend_layer">$net->extend_layer($layer,\%hash);</A></STRONG><BR>
<DD>
With this you can modify only one layer with its specifications in a hash refrence. This hash
refrence uses the same keys as for the last <A HREF="#item_new"><CODE>new()</CODE></A> constructor form, above.
<P>You can also specify just the number of nodes for the layer in this form:</P>
<PRE>
$net->extend_layer(0,5);</PRE>
<P>Which will set the number of nodes in layer 0 to 5 nodes. This is the same as calling:
<P></P>
<DT><STRONG><A NAME="item_add_nodes">$net->add_nodes($layer,$total_nodes);</A></STRONG><BR>
<DD>
This method was created mainly to service the extend*() group of functions, but it
can also be called independently. This will add nodes as needed to layer <CODE>$layer</CODE> to
make the nodes in layer equal to $total_nodes.
<P>NOTE: Your net will probably require re-training after adding nodes.</P>
<P></P>
<DT><STRONG><A NAME="item_p">$net->p($a,$b);</A></STRONG><BR>
<DD>
Returns a floating point number which represents $a as a percentage of $b.
<P></P>
<DT><STRONG><A NAME="item_intr">$net->intr($float);</A></STRONG><BR>
<DD>
Rounds a floating-point number rounded to an integer using <CODE>sprintf()</CODE> and <CODE>int()</CODE> , Provides
better rounding than just calling <CODE>int()</CODE> on the float. Also used very heavily internally.
<P></P>
<DT><STRONG><A NAME="item_high">$net->high($array_ref);</A></STRONG><BR>
<DD>
Returns the index of the element in array REF passed with the highest comparative value.
<P></P>
percent sting.
<P></P>
<DT><STRONG><A NAME="item_show">$net->show();</A></STRONG><BR>
<DD>
This will dump a simple listing of all the weights of all the connections of every neuron
in the network to STDIO.
<P></P>
<DT><STRONG><A NAME="item_crunch">$net->crunch($string);</A></STRONG><BR>
<DD>
This splits a string passed with /[\s\t]/ into an array ref containing unique indexes
to the words. The words are stored in an intenal array and preserved across <A HREF="#item_load"><CODE>load()</CODE></A> and <A HREF="#item_save"><CODE>save()</CODE></A>
calls. This is designed to be used to generate unique maps sutible for passing to <A HREF="#item_learn"><CODE>learn()</CODE></A> and
<A HREF="#item_run"><CODE>run()</CODE></A> directly. It returns an array ref.
<P>The words are not duplicated internally. For example:</P>
<PRE>
$net->crunch("How are you?");</PRE>
<P>Will probably return an array ref containing 1,2,3. A subsequent call of:</P>
<PRE>
$net->crunch("How is Jane?");</PRE>
<P>Will probably return an array ref containing 1,4,5. Notice, the first element stayed
the same. That is because it already stored the word ``How''. So, each word is stored
for (0..3) {
$net->learn_set([
$net->crunch("I love chips."), $net->crunch("That's Junk Food!")),
$net->crunch("I love apples."), $net->crunch("Good, Healthy Food.")),
$net->crunch("I love pop."), $net->crunch("That's Junk Food!")),
$net->crunch("I love oranges."),$net->crunch("Good, Healthy Food."))
]);
}
print $net->run_uc("I love corn.")),"\n";</PRE>
<P>On my system, this responds with, ``Good, Healthy Food.'' If you try to run <A HREF="#item_crunch"><CODE>crunch()</CODE></A> with
``I love pop.'', though, you will probably get ``Food! apples. apples.'' (At least it returns
that on my system.) As you can see, the associations are not yet perfect, but it can make
for some interesting demos!</P>
<P></P>
<DT><STRONG><A NAME="item_crunched">$net->crunched($word);</A></STRONG><BR>
<DD>
This will return undef if the word is not in the internal crunch list, or it will return the
index of the word if it exists in the crunch list.
<P>If the word is not in the list, it will set the internal error value with a text message
that you can retrive with the <A HREF="#item_error"><CODE>error()</CODE></A> method, below.</P>
<P></P>
<DT><STRONG><A NAME="item_word">$net->word($word);</A></STRONG><BR>
<DD>
A function alias for crunched().
<P></P>
<DT><STRONG><A NAME="item_col_width">$net->col_width($width);</A></STRONG><BR>
<DD>
This is useful for formating the debugging output of Level 4 if you are learning simple
bitmaps. This will set the debugger to automatically insert a line break after that many
elements in the map output when dumping the currently run map during a learn loop.
<P>It will return the current width when called with a 0 or undef value.</P>
<P>The column width is preserved across <A HREF="#item_load"><CODE>load()</CODE></A> and <A HREF="#item_save"><CODE>save()</CODE></A> calls.</P>
<P></P>
<DT><STRONG><A NAME="item_random">$net->random($rand);</A></STRONG><BR>
<DD>
This will set the randomness factor from the network. Default is 0. When called
with no arguments, or an undef value, it will return current randomness value. When
called with a 0 value, it will disable randomness in the network. The randomness factor
is preserved across <A HREF="#item_load"><CODE>load()</CODE></A> and <A HREF="#item_save"><CODE>save()</CODE></A> calls.
<P></P>
<DT><STRONG><A NAME="item_const">$net->const($const);</A></STRONG><BR>
<DD>
This sets the run const. for the network. The run const. is a value that is added
to every input line when a set of inputs are <A HREF="#item_run"><CODE>run()</CODE></A> or <A HREF="#item_learn"><CODE>learn()</CODE></A> -ed, to prevent the
network from hanging on a 0 value. When called with no arguments, it returns the current
const. value. It defaults to 0.0001 on a newly-created network. The run const. value
is preserved across <A HREF="#item_load"><CODE>load()</CODE></A> and <A HREF="#item_save"><CODE>save()</CODE></A> calls.
<P></P>
<DT><STRONG><A NAME="item_error">$net->error();</A></STRONG><BR>
<DD>
Returns the last error message which occured in the mesh, or undef if no errors have
occured.
<P></P>
<DT><STRONG><A NAME="item_load_pcx">$net->load_pcx($filename);</A></STRONG><BR>
<DD>
NOTE: To use this function, you must have PCX::Loader installed. If you do not have
PCX::Loader installed, it will return undef and store an error for you to retrive with
..
You can also pass an array containing the range
values (not array ref), or you can pass a comma-
seperated list of values as parameters:</P>
<PRE>
$net->activation(4,range(@numbers));
$net->activation(4,range(6,15,26,106,28,3));</PRE>
<P>Note: when using a <A HREF="#item_range"><CODE>range()</CODE></A> activatior, train the
net TWICE on the data set, because the first time
the <A HREF="#item_range"><CODE>range()</CODE></A> function searches for the top value in
the inputs, and therefore, results could flucuate.
The second learning cycle guarantees more accuracy.</P>
<P>The actual code that implements the range closure is
a bit convulted, so I will expand on it here as a simple
tutorial for custom activation functions.</P>
<PRE>
= line 1 = sub {
= line 2 = my @values = ( 6..10 );
= line 3 = my $sum = shift;
= line 4 = my $self = shift;
= line 5 = $self->{top_value}=$sum if($sum>$self->{top_value});
<A HREF="#item_ramp"><CODE>ramp()</CODE></A> preforms smooth ramp activation between 0 and 1 if $r is 1,
or between -1 and 1 if $r is 2. $r defaults to 1.
<P>You can get this into your namespace with the ':acts' export
tag as so:
</P>
<PRE>
use AI::NeuralNet::Mesh ':acts';</PRE>
<P>Note: when using a <A HREF="#item_ramp"><CODE>ramp()</CODE></A> activatior, train the
net at least TWICE on the data set, because the first
time the <A HREF="#item_ramp"><CODE>ramp()</CODE></A> function searches for the top value in
the inputs, and therefore, results could flucuate.
The second learning cycle guarantees more accuracy.</P>
<P>No code to show here, as it is almost exactly the same as range().</P>
<P></P>
<DT><STRONG><A NAME="item_and_gate">and_gate($threshold);</A></STRONG><BR>
<DD>
Self explanitory, pretty much. This turns the node into a basic AND gate.
$threshold is used to decide if an input is true or false (1 or 0). If
an input is below $threshold, it is false. $threshold defaults to 0.5.
<P>You can get this into your namespace with the ':acts' export
tag as so:
</P>
<PRE>
use AI::NeuralNet::Mesh ':acts';</PRE>
<P>Let's look at the code real quick, as it shows how to get at the indivudal
input connections:</P>
<PRE>
= line 1 = sub {
= line 2 = my $sum = shift;
= line 3 = my $self = shift;
= line 4 = my $threshold = 0.50;
= line 5 = for my $x (0..$self->{_inputs_size}-1) {
= line 6 = return 0.000001 if(!$self->{_inputs}->[$x]->{value}<$threshold)
= line 7 = }
= line 8 = return $sum/$self->{_inputs_size};
= line 9 = }</PRE>
<P>Line 2 and 3 pulls in our sum and self refrence. Line 5 opens a loop to go over
all the input lines into this node. Line 6 looks at each input line's value
and comparse it to the threshold. If the value of that line is below threshold, then
we return 0.000001 to signify a 0 value. (We don't return a 0 value so that the network
doen't get hung trying to multiply a 0 by a huge weight during training [it just will
keep getting a 0 as the product, and it will never learn]). Line 8 returns the mean
value of all the inputs if all inputs were above threshold.</P>
<P>Very simple, eh? :)
</P>
<P></P>
<DT><STRONG><A NAME="item_or_gate">or_gate($threshold);</A></STRONG><BR>
<DD>
<P>Self explanitory. Turns the node into a basic OR gate, $threshold is used same as above.</P>
<P>You can get this into your namespace with the ':acts' export
tag as so:
</P>
<PRE>
use AI::NeuralNet::Mesh ':acts';</PRE>
<P></P></DL>
<P>
<HR>
<H1><A NAME="variables">VARIABLES</A></H1>
<DL>
<HR>
<H1><A NAME="what can it do">WHAT CAN IT DO?</A></H1>
<P>Rodin Porrata asked on the ai-neuralnet-backprop malining list,
``What can they [Neural Networks] do?''. In regards to that questioin,
consider the following:</P>
<P>Neural Nets are formed by simulated neurons connected together much the same
way the brain's neurons are, neural networks are able to associate and
generalize without rules. They have solved problems in pattern recognition,
robotics, speech processing, financial predicting and signal processing, to
name a few.</P>
<P>One of the first impressive neural networks was NetTalk, which read in ASCII
text and correctly pronounced the words (producing phonemes which drove a
speech chip), even those it had never seen before. Designed by John Hopkins
biophysicist Terry Sejnowski and Charles Rosenberg of Princeton in 1986,
this application made the Backprogagation training algorithm famous. Using
the same paradigm, a neural network has been trained to classify sonar
returns from an undersea mine and rock. This classifier, designed by
Sejnowski and R. Paul Gorman, performed better than a nearest-neighbor
classifier.</P>
<P>The kinds of problems best solved by neural networks are those that people
are good at such as association, evaluation and pattern recognition.
Problems that are difficult to compute and do not require perfect answers,
just very good answers, are also best done with neural networks. A quick,
very good response is often more desirable than a more accurate answer which
takes longer to compute. This is especially true in robotics or industrial
controller applications. Predictions of behavior and general analysis of
data are also affairs for neural networks. In the financial arena, consumer
loan analysis and financial forecasting make good applications. New network
designers are working on weather forecasts by neural networks (Myself
included). Currently, doctors are developing medical neural networks as an
aid in diagnosis. Attorneys and insurance companies are also working on
neural networks to help estimate the value of claims.</P>
<P>Neural networks are poor at precise calculations and serial processing. They
are also unable to predict or recognize anything that does not inherently
contain some sort of pattern. For example, they cannot predict the lottery,
since this is a random process. It is unlikely that a neural network could
be built which has the capacity to think as well as a person does for two
reasons. Neural networks are terrible at deduction, or logical thinking and
the human brain is just too complex to completely simulate. Also, some
problems are too difficult for present technology. Real vision, for
example, is a long way off.</P>
<P>In short, Neural Networks are poor at precise calculations, but good at
association, evaluation, and pattern recognition.</P>
<P>
<HR>
<H1><A NAME="examples">EXAMPLES</A></H1>
<P>Included are several example files in the ``examples'' directory from the
distribution ZIP file. Each of the examples includes a short explanation
at the top of the file. Each of these are ment to demonstrate simple, yet
practical (for the most part :-) uses of this module.</P>
<H1><A NAME="bugs">BUGS</A></H1>
<P>This is a beta release of <CODE>AI::NeuralNet::Mesh</CODE>, and that holding true, I am sure
there are probably bugs in here which I just have not found yet. If you find bugs in this module, I would
appreciate it greatly if you could report them to me at <EM><<A HREF="mailto:jdb@wcoil.com">jdb@wcoil.com</A>></EM>,
or, even better, try to patch them yourself and figure out why the bug is being buggy, and
send me the patched code, again at <EM><<A HREF="mailto:jdb@wcoil.com">jdb@wcoil.com</A>></EM>.</P>
<P>
<HR>
<H1><A NAME="author">AUTHOR</A></H1>
<P>Josiah Bryan <EM><<A HREF="mailto:jdb@wcoil.com">jdb@wcoil.com</A>></EM></P>
<P>Copyright (c) 2000 Josiah Bryan. All rights reserved. This program is free software;
you can redistribute it and/or modify it under the same terms as Perl itself.</P>
<P>The <CODE>AI::NeuralNet::Mesh</CODE> and related modules are free software. THEY COME WITHOUT WARRANTY OF ANY KIND.</P>
<P>$Id: AI::NeuralNet::Mesh.pm, v0.43 2000/15/09 03:29:08 josiah Exp $</P>
<P>
<HR>
<H1><A NAME="thanks">THANKS</A></H1>
<P>Below are a list of the people that have contributed in some way to this module (no particular order):</P>
<PRE>
Rodin Porrata, rodin@ursa.llnl.gov
Randal L. Schwartz, merlyn@stonehedge.com
<H1><A NAME="download">DOWNLOAD</A></H1>
<P>You can always download the latest copy of AI::NeuralNet::Mesh
from <A HREF="http://www.josiah.countystart.com/modules/get.pl?mesh:pod">http://www.josiah.countystart.com/modules/get.pl?mesh:pod</A></P>
<P>
<HR>
<H1><A NAME="mailing list">MAILING LIST</A></H1>
<P>A mailing list has been setup for AI::NeuralNet::Mesh and AI::NeuralNet::BackProp.
The list is for discussion of AI and neural net related topics as they pertain to
AI::NeuralNet::BackProp and AI::NeuralNet::mesh. I will also announce in the group
each time a new release of AI::NeuralNet::Mesh is available.</P>
The list address is: <A HREF="mailto:ai-neuralnet-backprop@egroups.com">ai-neuralnet-backprop@egroups.com</A> <BR>
To subscribe, send a blank email to: <A HREF="mailto:ai-neuralnet-backprop-subscribe@egroups.com">ai-neuralnet-backprop-subscribe@egroups.com</A>
<BR><BR><BR>
<HR>
<A HREF="http://www.josiah.countystart.com/modules/get.pl?mesh:(c)"><B>AI::NeuralNet::Mesh</B></A> - An optimized, accurate neural network Mesh. By <A HREF="mailto:jdb@wcoil.com"><B>Josiah Bryan</B></A>.
</BODY>
</HTML>