AI-NeuralNet-Mesh

 view release on metacpan or  search on metacpan

Mesh.pm  view on Meta::CPAN

#!/usr/bin/perl	

# Copyright (c) 2000  Josiah Bryan  USA
#
# See AUTHOR section in pod text below for usage and distribution rights.   
#

BEGIN {
	 $AI::NeuralNet::Mesh::VERSION = "0.44";
	 $AI::NeuralNet::Mesh::ID = 
'$Id: AI::NeuralNet::Mesh.pm, v'.$AI::NeuralNet::Mesh::VERSION.' 2000/15/09 03:29:08 josiah Exp $';
}

package AI::NeuralNet::Mesh;
                                 
    require Exporter;
	@ISA         = qw(Exporter);
	@EXPORT      = qw(range intr pdiff);
	%EXPORT_TAGS = ( 
		'default'    => [ qw ( range intr pdiff )],
		'all'        => [ qw ( p low high ramp and_gate or_gate range intr pdiff ) ],
		'p'          => [ qw ( p low high intr pdiff ) ],
		'acts'       => [ qw ( ramp and_gate or_gate range ) ],
	);
    @EXPORT_OK   = ( @{ $EXPORT_TAGS{'all'} }, qw( p low high ramp and_gate or_gate ) );
    
    use strict;
    use Benchmark; 

   	# See POD for usage of this variable.
	$AI::NeuralNet::Mesh::Connector = '_c';
	
	# Debugging subs
	$AI::NeuralNet::Mesh::DEBUG  = 0;
	sub whowasi { (caller(1))[3] . '()' }
	sub debug { shift; $AI::NeuralNet::Mesh::DEBUG = shift || 0; } 
	sub d { shift if(substr($_[0],0,4) eq 'AI::'); my ($a,$b,$c)=(shift,shift,$AI::NeuralNet::Mesh::DEBUG); print $a if($c == $b); return $c }
	sub verbose {debug @_};
	sub verbosity {debug @_};
	sub v {debug @_};
	
	
	# Return version of ::ID string passed or current version of this
	# module if no string is passed. Used in load() to detect file versions.
	sub version {
		shift if(substr($_[0],0,4) eq 'AI::');
		substr((split(/\s/,(shift || $AI::NeuralNet::Mesh::ID)))[2],1);
	}                                  
	
	# Rounds a floating-point to an integer with int() and sprintf()
	sub intr  {
    	shift if(substr($_[0],0,4) eq 'AI::');
      	try   { return int(sprintf("%.0f",shift)) }
      	catch { return 0 }
	}
    
	# Package constructor
	sub new {
		no strict 'refs';
		my $type	=	shift;
		my $self	=	{};
		my $layers	=	shift;
		my $nodes	=	shift;
		my $outputs	=	shift || $nodes;
		my $inputs	=	shift || $nodes;
        
		bless $self, $type;
		                       
		# If $layers is a string, then it will be numerically equal to 0, so 
		# try to load it as a network file.
		if($layers == 0) {  
		    # We use a "1" flag as the second argument to indicate that we 
		    # want load() to call the new constructor to make a network the
		    # same size as in the file and return a refrence to the network,
		    # instead of just creating the network from pre-exisiting refrence
			return $self->load($layers,1);
		}
		
		# Looks like we got ourselves a layer specs array
		if(ref($layers) eq "ARRAY") { 
			if(ref($layers->[0]) eq "HASH") {
				$self->{total_nodes}	=	0;
				$self->{inputs}			=	$layers->[0]->{nodes};
				$self->{nodes}			=	$layers->[0]->{nodes};
				$self->{outputs}		=	$layers->[$#{$layers}]->{nodes};
				$self->{total_layers}	=	$#{$layers};
				for (0..$#{$layers}){$self->{layers}->[$_] = $layers->[$_]->{nodes}}
				for (0..$self->{total_layers}){$self->{total_nodes}+=$self->{layers}->[$_]}	
			} else {
				$self->{inputs}			= $layers->[0];
			    $self->{nodes}			= $layers->[0];
				$self->{outputs}		= $layers->[$#{$layers}];
				$self->{layers} 		= $layers;
				$self->{total_layers}	= $#{$self->{layers}};
				$self->{total_nodes}	= 0;
				for (0..$self->{total_layers}) {
					$self->{total_nodes}+=$self->{layers}->[$_];
				}
			}
		} else {
			$self->{total_nodes}	= $layers * $nodes + $outputs;
			$self->{total_layers}	= $layers;
			$self->{nodes}			= $nodes;
			$self->{inputs}			= $inputs;
			$self->{outputs}		= $outputs;
		}	
		
		# Initalize misc. variables
		$self->{col_width}		=	5;
		$self->{random}			=	0;
		$self->{const}			=	0.0001;
		$self->{connector}		=	$AI::NeuralNet::Mesh::Connector;
		
		# Build mesh
		$self->_init();	
		
		# Initalize activation, thresholds, etc, if provided
		if(ref($layers->[0]) eq "HASH") {
			for (0..$self->{total_layers}) {
				$self->activation($_,$layers->[$_]->{activation});
				$self->threshold($_,$layers->[$_]->{threshold});
				$self->mean($_,$layers->[$_]->{mean});
			}
		}
				
		# Done!
		return $self;
	}	
    

    # Internal usage
    # Connects one range of nodes to another range

Mesh.pm  view on Meta::CPAN

		return sprintf("%.10f",($diff/$a1s));
	}
	
	# Returns $fa as a percentage of $fb
	sub p {
		shift if(substr($_[0],0,4) eq 'AI::'); 
		my ($fa,$fb)=(shift,shift); 
		sprintf("%.3f",$fa/$fb*100); #((($fb-$fa)*((($fb-$fa)<0)?-1:1))/$fa)*100
	}
	
	# Returns the index of the element in array REF passed with the highest 
	# comparative value
	sub high {
		shift if(substr($_[0],0,4) eq 'AI::'); 
		my $ref1 = shift; my ($el,$len,$tmp); $tmp=0;
		foreach $el (@{$ref1}) { $len++ }
		for my $x (0..$len-1) { $tmp = $x if($ref1->[$x] > $ref1->[$tmp]) }
		return $tmp;
	}
	
	# Returns the index of the element in array REF passed with the lowest 
	# comparative value
	sub low {
		shift if(substr($_[0],0,4) eq 'AI::'); 
		my $ref1 = shift; my ($el,$len,$tmp); $tmp=0;
		foreach $el (@{$ref1}) { $len++ }
		for my $x (0..$len-1) { $tmp = $x if($ref1->[$x] < $ref1->[$tmp]) }
		return $tmp;
	}  
	
	# Following is a collection of a few nifty custom activation functions.
	# range() is exported by default, the rest you can get with:
	#	use AI::NeuralNet::Mesh ':acts'
	# The ':all' tag also gets these into your namespace.
	 
	#
	# range() returns a closure limiting the output 
	# of that node to a specified set of values.
	# Good for output layers.
	#
	# usage example:
	#	$net->activation(4,range(0..5));
	# or:
	#	..
	#	{ 
	#		nodes		=>	1,
	#		activation	=>	range 5..2
	#	}
	#	..
	# You can also pass an array containing the range
	# values (not array ref), or you can pass a comma-
	# seperated list of values as parameters:
	#
	#	$net->activation(4,range(@numbers));
	#	$net->activation(4,range(6,15,26,106,28,3));
	#
	# Note: when using a range() activatior, train the
	# net TWICE on the data set, because the first time
	# the range() function searches for the top value in
	# the inputs, and therefore, results could flucuate.
	# The second learning cycle guarantees more accuracy.
	#	
	sub range {
		my @r=@_;
		sub{$_[1]->{t}=$_[0]if($_[0]>$_[1]->{t});$r[intr($_[0]/$_[1]->{t}*$#r)]}
	}
	
	#
	# ramp() preforms smooth ramp activation between 0 and 1 if $r is 1, 
	# or between -1 and 1 if $r is 2. $r defaults to 1, as you can see.	
	#
	# Note: when using a ramp() activatior, train the
	# net at least TWICE on the data set, because the first 
	# time the ramp() function searches for the top value in
	# the inputs, and therefore, results could flucuate.
	# The second learning cycle guarantees more accuracy.
	#
	sub ramp {
		my $r=shift||1;my $t=($r<2)?0:-1;
		sub{$_[1]->{t}=$_[0]if($_[0]>$_[1]->{t});$_[0]/$_[1]->{t}*$r-$b}
	}

	# Self explanitory, pretty much. $threshold is used to decide if an input 
	# is true or false (1 or 0). If an input is below $threshold, it is false.
	sub and_gate {
		my $threshold = shift || 0.5;
		sub {
			my $sum  = shift;
			my $self = shift;
			for my $x (0..$self->{_inputs_size}-1) { return $self->{_parent}->{const} if!$self->{_inputs}->[$x]->{value}<$threshold }
			return $sum/$self->{_inputs_size};
		}
	}
	
	# Self explanitory, $threshold is used same as above.
	sub or_gate {
		my $threshold = shift || 0.5;
		sub {
			my $sum  = shift;
			my $self = shift;
			for my $x (0..$self->{_inputs_size}-1) { return $sum/$self->{_inputs_size} if!$self->{_inputs}->[$x]->{value}<$threshold }
			return $self->{_parent}->{const};
		}
	}
	
1;

package AI::NeuralNet::Mesh::node;
	
	use strict;

	# Node constructor
	sub new {
		my $type		=	shift;
		my $self		={ 
			_parent		=>	shift,
			_inputs		=>	[],
			_outputs	=>	[]
		};
		bless $self, $type;
	}

	# Receive inputs from other nodes, and also send
	# outputs on.	
	sub input {
		my $self	=	shift;
		my $input	=	shift;
		my $from_id	=	shift;
		
		$self->{_inputs}->[$from_id]->{value} = $input * $self->{_inputs}->[$from_id]->{weight};
		$self->{_inputs}->[$from_id]->{input} = $input;
		$self->{_inputs}->[$from_id]->{fired} = 1;
		
		$self->{_parent}->d("got input $input from id $from_id, weighted to $self->{_inputs}->[$from_id]->{value}.\n",1);
		
		my $flag	=	1;

Mesh.pm  view on Meta::CPAN

Another dandy constructor...this is my favorite. It allows you to tailor the number of layers,
the size of the layers, the activation type (you can even add anonymous inline subs with this one),
and even the threshold, all with one array ref-ed constructor.

Example:

	my $net = AI::NeuralNet::Mesh->new([
	    {
		    nodes        => 2,
		    activation   => linear
		},
		{
		    nodes        => 3,
		    activation   => sub {
		        my $sum  =  shift;
		        return $sum + rand()*1;
		    }
		},
		{
		    nodes        => 1,
		    activation   => sigmoid,
		    threshold    => 0.75
		}
	]);
	
	
Interesting, eh? What you are basically passing is this:

	my @info = ( 
		{ },
		{ },
		{ },
		...
	);

You are passing an array ref who's each element is a hash refrence. Each
hash refrence, or more precisely, each element in the array refrence you are passing
to the constructor, represents a layer in the network. Like the constructor above,
the first element is the input layer, and the last is the output layer. The rest are
hidden layers.

Each hash refrence is expected to have AT LEAST the "nodes" key set to the number
of nodes (neurons) in that layer. The other two keys are optional. If "activation" is left
out, it defaults to "linear". If "threshold" is left out, it defaults to 0.50.

The "activation" key can be one of four values:

	linear                    ( simply use sum of inputs as output )
	sigmoid    [ sigmoid_1 ]  ( only positive sigmoid )
	sigmoid_2                 ( positive / 0 /negative sigmoid )
	\&code_ref;

"sigmoid_1" is an alias for "sigmoid". 

The code ref option allows you to have a custom activation function for that layer.
The code ref is called with this syntax:

	$output = &$code_ref($sum_of_inputs, $self);
	
The code ref is expected to return a value to be used as the output of the node.
The code ref also has access to all the data of that node through the second argument,
a blessed hash refrence to that node.

See CUSTOM ACTIVATION FUNCTIONS for information on several included activation functions
other than the ones listed above.

Three of the activation syntaxes are shown in the first constructor above, the "linear",
"sigmoid" and code ref types.

You can also set the activation and threshold values after network creation with the
activation() and threshold() methods. 

	



=item $net->learn($input_map_ref, $desired_result_ref [, options ]);

NOTE: learn_set() now has increment-degrading turned OFF by default. See note
on the degrade flag, below.

This will 'teach' a network to associate an new input map with a desired 
result. It will return a string containg benchmarking information. 

You can also specify strings as inputs and ouputs to learn, and they will be 
crunched automatically. Example:

	$net->learn('corn', 'cob');
	
	
Note, the old method of calling crunch on the values still works just as well.	

The first two arguments may be array refs (or now, strings), and they may be 
of different lengths.

Options should be written on hash form. There are three options:
	 
	 inc      =>    $learning_gradient
	 max      =>    $maximum_iterations
	 error    =>    $maximum_allowable_percentage_of_error
	 degrade  =>    $degrade_increment_flag
	 

$learning_gradient is an optional value used to adjust the weights of the internal
connections. If $learning_gradient is ommitted, it defaults to 0.002.
 
$maximum_iterations is the maximum numbers of iteration the loop should do.
It defaults to 1024.  Set it to 0 if you never want the loop to quit before
the pattern is perfectly learned.

$maximum_allowable_percentage_of_error is the maximum allowable error to have. If 
this is set, then learn() will return when the perecentage difference between the
actual results and desired results falls below $maximum_allowable_percentage_of_error.
If you do not include 'error', or $maximum_allowable_percentage_of_error is set to -1,
then learn() will not return until it gets an exact match for the desired result OR it
reaches $maximum_iterations.

$degrade_increment_flag is a simple flag used to allow/dissalow increment degrading
during learning based on a product of the error difference with several other factors.
$degrade_increment_flag is off by default. Setting $degrade_increment_flag to a true
value turns increment degrading on. 

Mesh.pm  view on Meta::CPAN

having to display any of the internal variables. I use this in the ex_aln.pl demo,
as well as the ex_agents.pl demo.

Toggles debuging off when called with no arguments. 



=item $net->save($filename);

This will save the complete state of the network to disk, including all weights and any
words crunched with crunch() . Also saves the layer size and activations of the network.

NOTE: The only activation type NOT saved is the CODE ref type, which must be set again
after loading.

This uses a simple flat-file text storage format, and therefore the network files should
be fairly portable.

This method will return undef if there was a problem with writing the file. If there is an
error, it will set the internal error message, which you can retrive with the error() method,
below.

If there were no errors, it will return a refrence to $net.


=item $net->load($filename);

This will load from disk any network saved by save() and completly restore the internal
state at the point it was save() was called at.

If the file is of an invalid file type, then load() will
return undef. Use the error() method, below, to print the error message.

If there were no errors, it will return a refrence to $net.

UPDATE: $filename can now be a newline-seperated set of mesh data. This enables you
to do $net->load(join("\n",<DATA>)) and other fun things. I added this mainly
for a demo I'm writing but not qutie done with yet. So, Cheers!



=item $net->activation($layer,$type);

This sets the activation type for layer C<$layer>.

C<$type> can be one of four values:

	linear                    ( simply use sum of inputs as output )
	sigmoid    [ sigmoid_1 ]  ( only positive sigmoid )
	sigmoid_2                 ( positive / 0 /negative sigmoid )
	\&code_ref;

"sigmoid_1" is an alias for "sigmoid". 

The code ref option allows you to have a custom activation function for that layer.
The code ref is called with this syntax:

	$output = &$code_ref($sum_of_inputs, $self);
	
The code ref is expected to return a value to be used as the output of the node.
The code ref also has access to all the data of that node through the second argument,
a blessed hash refrence to that node.

See CUSTOM ACTIVATION FUNCTIONS for information on several included activation functions
other than the ones listed above.

The activation type for each layer is preserved across load/save calls. 

EXCEPTION: Due to the constraints of Perl, I cannot load/save the actual subs that the code
ref option points to. Therefore, you must re-apply any code ref activation types after a 
load() call.

=item $net->node_activation($layer,$node,$type);

This sets the activation function for a specific node in a layer. The same notes apply
here as to the activation() method above.


=item $net->threshold($layer,$value);

This sets the activation threshold for a specific layer. The threshold only is used
when activation is set to "sigmoid", "sigmoid_1", or "sigmoid_2". 


=item $net->node_threshold($layer,$node,$value);

This sets the activation threshold for a specific node in a layer. The threshold only is used
when activation is set to "sigmoid", "sigmoid_1", or "sigmoid_2".  

=item $net->join_cols($array_ref,$row_length_in_elements,$high_state_character,$low_state_character);

This is more of a utility function than any real necessary function of the package.
Instead of joining all the elements of the array together in one long string, like join() ,
it prints the elements of $array_ref to STDIO, adding a newline (\n) after every $row_length_in_elements
number of elements has passed. Additionally, if you include a $high_state_character and a $low_state_character,
it will print the $high_state_character (can be more than one character) for every element that
has a true value, and the $low_state_character for every element that has a false value. 
If you do not supply a $high_state_character, or the $high_state_character is a null or empty or 
undefined string, it join_cols() will just print the numerical value of each element seperated
by a null character (\0). join_cols() defaults to the latter behaviour.



=item $net->extend(\@array_of_hashes);

This allows you to re-apply any activations and thresholds with the same array ref which
you created a network with. This is useful for re-applying code ref activations after a load()
call without having to type the code ref twice.

You can also specify the extension in a simple array ref like this:

	$net->extend([2,3,1]);
	
Which will simply add more nodes if needed to set the number of nodes in each layer to their 
respective elements. This works just like the respective new() constructor, above.

NOTE: Your net will probably require re-training after adding nodes.


=item $net->extend_layer($layer,\%hash);

Mesh.pm  view on Meta::CPAN

=item $net->load_pcx($filename);

NOTE: To use this function, you must have PCX::Loader installed. If you do not have
PCX::Loader installed, it will return undef and store an error for you to retrive with 
the error() method, below.

This is a treat... this routine will load a PCX-format file (yah, I know ... ancient 
format ... but it is the only one I could find specs for to write it in Perl. If 
anyone can get specs for any other formats, or could write a loader for them, I 
would be very grateful!) Anyways, a PCX-format file that is exactly 320x200 with 8 bits 
per pixel, with pure Perl. It returns a blessed refrence to a PCX::Loader object, which 
supports the following routinges/members. See example files ex_pcx.pl and ex_pcxl.pl in 
the ./examples/ directory.

See C<perldoc PCX::Loader> for information on the methods of the object returned.

You can download PCX::Loader from 
	http://www.josiah.countystart.com/modules/get.pl?pcx-loader:mpod


=head1 CUSTOM ACTIVATION FUNCTIONS 

Included in this package are four custom activation functions meant to be used
as a guide to create your own, as well as to be useful to you in normal use of the
module. There is only one function exported by default into your namespace, which
is the range() functions. These are not meant to be used as methods, but as functions.
These functions return code refs to a Perl closure which does the actual work when
the time comes.


=item range(0..X);

=item range(@range);

=item range(A,B,C);

range() returns a closure limiting the output 
of that node to a specified set of values.
Good for use in output layers.

Usage example:
	$net->activation(4,range(0..5));
or (in the new() hash constructor form):
	..
	{ 
		nodes		=>	1,
		activation	=>	range 5..2
	}
	..
You can also pass an array containing the range
values (not array ref), or you can pass a comma-
seperated list of values as parameters:

	$net->activation(4,range(@numbers));
	$net->activation(4,range(6,15,26,106,28,3));

Note: when using a range() activatior, train the
net TWICE on the data set, because the first time
the range() function searches for the top value in
the inputs, and therefore, results could flucuate.
The second learning cycle guarantees more accuracy.

The actual code that implements the range closure is
a bit convulted, so I will expand on it here as a simple
tutorial for custom activation functions.

	= line 1 = 	sub {
	= line 2 =		my @values = ( 6..10 );
	= line 3 =		my $sum   = shift;
	= line 4 =		my $self  = shift;
	= line 5 =		$self->{top_value}=$sum if($sum>$self->{top_value});
	= line 6 =		my $index = intr($sum/$self->{top_value}*$#values);
	= line 7 =		return $values[$index];
	= line 8 =	}

Now, the actual function fits in one line of code, but I expanded it a bit
here. Line 1 creates our array of allowed output values. Lines two and
three grab our parameters off the stack which allow us access to the
internals of this node. Line 5 checks to see if the sum output of this
node is higher than any previously encountered, and, if so, it sets
the marker higher. This also shows that you can use the $self refrence
to maintain information across activations. This technique is also used
in the ramp() activator. Line 6 computes the index into the allowed
values array by first scaling the $sum to be between 0 and 1 and then
expanding it to fit smoothly inside the number of elements in the array. Then
we simply round to an integer and pluck that index from the array and
use it as the output value for that node. 

See? It's not that hard! Using custom activation functions, you could do
just about anything with the node that you want to, since you have
access to the node just as if you were a blessed member of that node's object.


=item ramp($r);

ramp() preforms smooth ramp activation between 0 and 1 if $r is 1, 
or between -1 and 1 if $r is 2. $r defaults to 1.	

You can get this into your namespace with the ':acts' export 
tag as so:
	
	use AI::NeuralNet::Mesh ':acts';

Note: when using a ramp() activatior, train the
net at least TWICE on the data set, because the first 
time the ramp() function searches for the top value in
the inputs, and therefore, results could flucuate.
The second learning cycle guarantees more accuracy.

No code to show here, as it is almost exactly the same as range().


=item and_gate($threshold);

Self explanitory, pretty much. This turns the node into a basic AND gate.
$threshold is used to decide if an input is true or false (1 or 0). If 
an input is below $threshold, it is false. $threshold defaults to 0.5.

You can get this into your namespace with the ':acts' export 
tag as so:
	
	use AI::NeuralNet::Mesh ':acts';

Let's look at the code real quick, as it shows how to get at the indivudal
input connections:

	= line 1 =	sub {
	= line 2 =		my $sum  = shift;
	= line 3 =		my $self = shift;
	= line 4 =		my $threshold = 0.50;
	= line 5 =		for my $x (0..$self->{_inputs_size}-1) { 
	= line 6 =			return 0.000001 if(!$self->{_inputs}->[$x]->{value}<$threshold)
	= line 7 =		}
	= line 8 =		return $sum/$self->{_inputs_size};
	= line 9 =	}

Line 2 and 3 pulls in our sum and self refrence. Line 5 opens a loop to go over
all the input lines into this node. Line 6 looks at each input line's value 
and comparse it to the threshold. If the value of that line is below threshold, then
we return 0.000001 to signify a 0 value. (We don't return a 0 value so that the network
doen't get hung trying to multiply a 0 by a huge weight during training [it just will
keep getting a 0 as the product, and it will never learn]). Line 8 returns the mean 
value of all the inputs if all inputs were above threshold. 

Very simple, eh? :)
	
=item or_gate($threshold)

Self explanitory. Turns the node into a basic OR gate, $threshold is used same as above.

You can get this into your namespace with the ':acts' export 
tag as so:
	
	use AI::NeuralNet::Mesh ':acts';


=head1 VARIABLES

=item $AI::NeuralNet::Mesh::Connector

This is an option is step up from average use of this module. This variable 
should hold the fully qualified name of the function used to make the actual connections
between the nodes in the network. This contains '_c' by default, but if you use
this variable, be sure to add the fully qualified name of the method. For example,
in the ALN example, I use a connector in the main package called tree() instead of
the default connector. Before I call the new() constructor, I use this line of code:

	$AI::NeuralNet::Mesh::Connector = 'main::tree'
	
The tree() function is called as a blessed method when it is used internally, providing
access to the bless refrence in the first argument. See notes on CUSTOM NETWORK CONNECTORS,
below, for more information on creating your own custom connector.


=item $AI::NeuralNet::Mesh::DEBUG

This variable controls the verbosity level. It will not hurt anything to set this 
directly, yet most people find it easier to set it using the debug() method, or 
any of its aliases.


=head1 CUSTOM NETWORK CONNECTORS

Creating custom network connectors is step up from average use of this module. 
However, it can be very useful in creating other styles of neural networks, other
than the default fully-connected feed-foward network. 

You create a custom connector by setting the variable $AI::NeuralNet::Mesh::Connector
to the fully qualified name of the function used to make the actual connections
between the nodes in the network. This variable contains '_c' by default, but if you use
this variable, be sure to add the fully qualified name of the method. For example,
in the ALN example, I use a connector in the main package called tree() instead of
the default connector. Before I call the new() constructor, I use this line of code:

	$AI::NeuralNet::Mesh::Connector = 'main::tree'
	
The tree() function is called as a blessed method when it is used internally, providing
access to the bless refrence in the first argument. 

Example connector:

	sub connect_three {
    	my $self	=	shift;
    	my $r1a		=	shift;
    	my $r1b		=	shift;
    	my $r2a		=	shift;
    	my $r2b		=	shift;
    	my $mesh	=	$self->{mesh};
    	     
	    for my $y (0..($r1b-$r1a)-1) {
			$mesh->[$y+$r1a]->add_output_node($mesh->[$y+$r2a-1]) if($y>0);
			$mesh->[$y+$r1a]->add_output_node($mesh->[$y+$r2a]) if($y<($r2b-$r2a));
			$mesh->[$y+$r1a]->add_output_node($mesh->[$y+$r2a+1]) if($y<($r2b-$r2a));
		}
	}
	
This is a very simple example. It feeds the outputs	of every node in the first layer
to the node directly above it, as well as the nodes on either side of the node directly
above it, checking for range sides, of course.

The network is stored internally as one long array of node objects. The goal here
is to connect one range of nodes in that array to another range of nodes. The calling
function has already calculated the indices into the array, and it passed it to you
as the four arguments after the $self refrence. The first two arguments we will call
$r1a and $r1b. These define the start and end indices of the first range, or "layer." Likewise,
the next two arguemnts, $r2a and $r2b, define the start and end indices of the second
layer. We also grab a refrence to the mesh array so we dont have to type the $self
refrence over and over.

The loop that folows the arguments in the above example is very simple. It opens
a for() loop over the range of numbers, calculating the size instead of just going
$r1a..$r1b because we use the loop index with the next layer up as well.

$y + $r1a give the index into the mesh array of the current node to connect the output FROM.
We need to connect this nodes output lines to the next layers input nodes. We do this
with a simple method of the outputing node (the node at $y+$r1a), called add_output_node().

add_output_node() takes one simple arguemnt: A blessed refrence to a node that it is supposed
to output its final value TO. We get this blessed refrence with more simple addition.

$y + $r2a gives us the node directly above the first node (supposedly...I'll get to the "supposedly"
part in a minute.) By adding or subtracting from this number we get the neighbor nodes.
In the above example you can see we check the $y index to see that we havn't come close to
any of the edges of the range.

Using $y+$r2a we get the index of the node to pass to add_output_node() on the first node at
$y+B<$r1a>. 

And that's all there is to it!

For the fun of it, we'll take a quick look at the default connector.
Below is the actual default connector code, albeit a bit cleaned up, as well as
line numbers added.

	= line 1  =	sub _c {
	= line 2  =    	my $self	=	shift;
	= line 3  =    	my $r1a		=	shift;
	= line 4  =    	my $r1b		=	shift;
	= line 5  =    	my $r2a		=	shift;
	= line 6  =    	my $r2b		=	shift;
	= line 7  =    	my $mesh	=	$self->{mesh};
	= line 8  =		for my $y ($r1a..$r1b-1) {
	= line 9  =			for my $z ($r2a..$r2b-1) {
	= line 10 =				$mesh->[$y]->add_output_node($mesh->[$z]);
	= line 11 =			}
	= line 12 =		}
	= line 12 =	}
    
Its that easy! The simplest connector (well almost anyways). It just connects each
node in the first layer defined by ($r1a..$r1b) to every node in the second layer as
defined by ($r2a..$r2b).

Those of you that are still reading, if you do come up with any new connection functions,
PLEASE SEND THEM TO ME. I would love to see what others are doing, as well as get new
network ideas. I will probably include any connectors you send over in future releases (with
propoer credit and permission, of course).

Anyways, happy coding!


=head1 WHAT CAN IT DO?

Rodin Porrata asked on the ai-neuralnet-backprop malining list,
"What can they [Neural Networks] do?". In regards to that questioin,
consider the following:

Neural Nets are formed by simulated neurons connected together much the same
way the brain's neurons are, neural networks are able to associate and
generalize without rules.  They have solved problems in pattern recognition,
robotics, speech processing, financial predicting and signal processing, to
name a few.

One of the first impressive neural networks was NetTalk, which read in ASCII
text and correctly pronounced the words (producing phonemes which drove a
speech chip), even those it had never seen before.  Designed by John Hopkins
biophysicist Terry Sejnowski and Charles Rosenberg of Princeton in 1986,
this application made the Backprogagation training algorithm famous.  Using
the same paradigm, a neural network has been trained to classify sonar
returns from an undersea mine and rock.  This classifier, designed by
Sejnowski and R.  Paul Gorman, performed better than a nearest-neighbor
classifier.

The kinds of problems best solved by neural networks are those that people
are good at such as association, evaluation and pattern recognition.
Problems that are difficult to compute and do not require perfect answers,
just very good answers, are also best done with neural networks.  A quick,
very good response is often more desirable than a more accurate answer which
takes longer to compute.  This is especially true in robotics or industrial
controller applications.  Predictions of behavior and general analysis of
data are also affairs for neural networks.  In the financial arena, consumer
loan analysis and financial forecasting make good applications.  New network
designers are working on weather forecasts by neural networks (Myself
included).  Currently, doctors are developing medical neural networks as an
aid in diagnosis.  Attorneys and insurance companies are also working on
neural networks to help estimate the value of claims.

Neural networks are poor at precise calculations and serial processing. They
are also unable to predict or recognize anything that does not inherently
contain some sort of pattern.  For example, they cannot predict the lottery,
since this is a random process.  It is unlikely that a neural network could
be built which has the capacity to think as well as a person does for two
reasons.  Neural networks are terrible at deduction, or logical thinking and
the human brain is just too complex to completely simulate.  Also, some
problems are too difficult for present technology.  Real vision, for
example, is a long way off.

In short, Neural Networks are poor at precise calculations, but good at
association, evaluation, and pattern recognition.




( run in 0.797 second using v1.01-cache-2.11-cpan-39bf76dae61 )