AI-NeuralNet-BackProp

 view release on metacpan or  search on metacpan

BackProp.pm  view on Meta::CPAN

	# It returns a percentage string.
	#
	sub learn_set {
		my $self	=	shift if(substr($_[0],0,4) eq 'AI::'); 
		my $data	=	shift;
		my %args	=	@_;
		my $len		=	$#{$data}/2-1;
		my $inc		=	$args{inc};
		my $max		=	$args{max};
	    my $error	=	$args{error};
	    my $p		=	(defined $args{flag})	?$args{flag}	   :1;
	    my $row		=	(defined $args{pattern})?$args{pattern}*2+1:1;
	    my ($fa,$fb);
		for my $x (0..$len) {
			print "\nLearning index $x...\n" if($AI::NeuralNet::BackProp::DEBUG);
			my $str = $self->learn( $data->[$x*2],			# The list of data to input to the net
					  		  		$data->[$x*2+1], 		# The output desired
					    			inc=>$inc,				# The starting learning gradient
					    			max=>$max,				# The maximum num of loops allowed
					    			error=>$error);			# The maximum (%) error allowed
			print $str if($AI::NeuralNet::BackProp::DEBUG); 

BackProp.pm  view on Meta::CPAN

	# Initialzes the base for a new neural network.
	# It is recomended that you call learn() before run()ing a pattern.
	# See documentation above for usage.
	sub new {
    	no strict;
    	my $type	=	shift;
		my $self	=	{};
		my $layers	=	shift;
		my $size	=	shift;
		my $out		=	shift || $size;
		my $flag	=	shift || 0;
		
		bless $self, $type;
		
		# If $layers is a string, then it will be nummerically equal to 0, so try to load it
		# as a network file.
		if($layers == 0) {  
		    # We use a "1" flag as the second argument to indicate that we want load()
		    # to call the new constructor to make a network the same size as in the file
		    # and return a refrence to the network, instead of just creating the network from
		    # pre-exisiting refrence
			return $self->load($layers,1);
		}
				
		
		#print "Creating $size neurons in each layer for $layers layer(s)...\n";
		
		AI::NeuralNet::BackProp::out2 "Creating $size neurons in each layer for $layers layer(s)...\n";

BackProp.pm  view on Meta::CPAN

		AI::NeuralNet::BackProp::out2 "Creating RUN and MAP systems for network...\n";
		#print "Creating RUN and MAP systems for network...\n";
		
		# Create a new runner and mapper for the network.
		$self->{RUN} = new AI::NeuralNet::BackProp::_run($self);
		$self->{MAP} = new AI::NeuralNet::BackProp::_map($self);
		
		$self->{SIZE}	=	$size;
		$self->{DIV}	=	$div;
		$self->{OUT}	=	$out;
		$self->{FLAG}	=	$flag;
		$self->{col_width}= 5;
		$self->{random} = 0.001;
		
		$self->initialize_group();
		
		return $self;
	}	

	# Save entire network state to disk.
	sub save {
		my $self	=	shift;
		my $file	=	shift;
		my $size	=	$self->{SIZE};
		my $div		=	$self->{DIV};
		my $out		=	$self->{OUT};
		my $flag	=	$self->{FLAG};

	    open(FILE,">$file");
	    
	    print FILE "size=$size\n";
	    print FILE "div=$div\n";
	    print FILE "out=$out\n";
	    print FILE "flag=$flag\n";
	    print FILE "rand=$self->{random}\n";
	    print FILE "cw=$self->{col_width}\n";
		print FILE "crunch=$self->{_CRUNCHED}->{_LENGTH}\n";
		print FILE "rA=$self->{rA}\n";
		print FILE "rB=$self->{rB}\n";
		print FILE "rS=$self->{rS}\n";
		print FILE "rRef=",(($self->{rRef})?join(',',@{$self->{rRef}}):''),"\n";
			
		for my $a (0..$self->{_CRUNCHED}->{_LENGTH}-1) {
			print FILE "c$a=$self->{_CRUNCHED}->{LIST}->[$a]\n";

BackProp.pm  view on Meta::CPAN

	
	    close(FILE);
	    
	    return $self;
	}
        
	# Load entire network state from disk.
	sub load {
		my $self	=	shift;
		my $file	=	shift;
		my $load_flag = shift || 0;
	    
	    return undef if(!(-f $file));
	    
	    open(FILE,"$file");
	    my @lines=<FILE>;
	    close(FILE);
	    
	    my %db;
	    for my $line (@lines) {
	    	chomp($line);
	    	my ($a,$b) = split /=/, $line;
	    	$db{$a}=$b;
	    }
	    
	    return undef if(!$db{"size"});
	    
	    if($load_flag) {
		    undef $self;
	
			# Create new network
		    $self = AI::NeuralNet::BackProp->new(intr($db{"size"}/$db{"div"}),
		    								     $db{"div"},
		    								     $db{"out"},
		    								     $db{"flag"});
		} else {
			$self->{DIV}	=	$db{"div"};
			$self->{SIZE}	=	$db{"size"};
			$self->{OUT}	=	$db{"out"};
			$self->{FLAG}	=	$db{"flag"};
		}
		
	    # Load variables
	    $self->{col_width}	= $db{"cw"};
	    $self->{random}		= $db{"rand"};
        $self->{rA}			= $db{"rA"};
		$self->{rB}			= $db{"rB"};
		$self->{rS}			= $db{"rS"};
		my @tmp				= split /\,/, $db{"rRef"};
		$self->{rRef}		= \@tmp;

BackProp.pm  view on Meta::CPAN

	# Used internally by new() and learn().
	# This is the sub block that actually creats
	# the connections between the synapse chains and
	# also connects the run packages and the map packages
	# to the appropiate ends of the neuron grids.
	sub initialize_group() {
		my $self	=	shift;
		my $size	=	$self->{SIZE};
		my $div		=	$self->{DIV};
		my $out		=	$self->{OUT};
		my $flag	=	$self->{FLAG};
		my $x		=	0; 
		my $y		=	0;
		
		# Reset map and run synapse counters.
		$self->{RUN}->{REGISTRATION} = $self->{MAP}->{REGISTRATION} = 0;
		
		AI::NeuralNet::BackProp::out2 "There will be $size neurons in this network group, with a divison value of $div.\n";
		#print "There will be $size neurons in this network group, with a divison value of $div.\n";
		
		# Create initial neuron packages in one long array for the entire group

BackProp.pm  view on Meta::CPAN

			# the next 'layer'. Remeber, layers only exist in terms of where the connections
			# are divided. For example, if a person requested 2 layers and 3 neurons per layer,
			# then there would be 6 neurons in the {NET}->[] list, and $div would be set to
			# 3. So we would loop over and every 3 neurons we would connect each of those 3 
			# neurons to one input of every neuron in the next set of 3 neurons. Of course, this
			# is an example. 3 and 2 are set by the new() constructor.
			
			# Flag values:
			# 0 - (default) - 
			# 	My feed-foward style: Each neuron in layer X is connected to one input of every
			#	neuron in layer Y. The best and most proven flag style.
			#
			#   ^   ^   ^               
			#	O\  O\ /O       Layer Y
			#   ^\\/^/\/^
			#	| //|\/\|
			#   |/ \|/ \|		
			#	O   O   O       Layer X
			#   ^   ^   ^
			#
			# 1	-
			#	In addition to flag 0, each neuron in layer X is connected to every input of 
			#	the neurons ahead of itself in layer X.
			# 2 - ("L-U Style") - 
			#	No, its not "Learning-Unit" style. It gets its name from this: In a 2 layer, 3
			#	neuron network, the connections form a L-U pair, or a W, however you want to look
			#	at it.   
			#
			#   ^   ^   ^
			#	|	|	|
			#	O-->O-->O
			#   ^   ^   ^
			#	|	|   |
			#   |   |   |
			#	O-->O-->O
			#   ^   ^   ^
			#	|	|	|
			#
			#	As you can see, each neuron is connected to the next one in its layer, as well
			#	as the neuron directly above itself.
			
			for ($z=0; $z<$div; $z++) {
				if((!$flag) || ($flag == 1)) {
					for ($aa=0; $aa<$div; $aa++) {      
						$self->{NET}->[$y+$z]->connect($self->{NET}->[$y+$div+$aa]);
					}
				}
				if($flag == 1) {
					for ($aa=$z+1; $aa<$div; $aa++) {      
						$self->{NET}->[$y+$z]->connect($self->{NET}->[$y+$aa]);
					}
				}
				if($flag == 2) {
					$self->{NET}->[$y+$z]->connect($self->{NET}->[$y+$div+$z]);
					$self->{NET}->[$y+$z]->connect($self->{NET}->[$y+$z+1]) if($z<$div-1);
				}
				AI::NeuralNet::BackProp::out1 "\n";
			}
			AI::NeuralNet::BackProp::out1 "\n";             
		}
		
		# These next two loops connect the _run and _map packages (the IO interface) to 
		# the start and end 'layers', respectively. These are how we insert data into

BackProp.pm  view on Meta::CPAN

		my %args    =   @_;
		my $inc 	=	$args{inc} || 0.20;
		my $max     =   $args{max} || 1024;
		my $_mx		=	intr($max/10);
		my $_mi		=	0;
		my $error   = 	($args{error}>-1 && defined $args{error}) ? $args{error} : -1;
  		my $div		=	$self->{DIV};
		my $size	=	$self->{SIZE};
		my $out		=	$self->{OUT};
		my $divide  =	AI::NeuralNet::BackProp->intr($div/$out);
		my ($a,$b,$y,$flag,$map,$loop,$diff,$pattern,$value);
		my ($t0,$it0);
		no strict 'refs';
		
		# Take care of crunching strings passed
		$omap = $self->crunch($omap) if($omap == 0);
		$res  = $self->crunch($res)  if($res  == 0);
		
		# Fill in empty spaces at end of results matrix with a 0
		if($#{$res}<$out) {
			for my $x ($#{$res}+1..$out) {
				#$res->[$x] = 0;
			}
		}
		
		# Debug
		AI::NeuralNet::BackProp::out1 "Num output neurons: $out, Input neurons: $size, Division: $divide\n";
		
		# Start benchmark timer and initalize a few variables
		$t0 	=	new Benchmark;
        $flag 	=	0; 
		$loop	=	0;   
		my $ldiff	=	0;
		my $dinc	=	0.0001;
		my $cdiff	=	0;
		$diff		=	100;
		$error 		= 	($error>-1)?$error:-1;
		
		# $flag only goes high when all neurons in output map compare exactly with
		# desired result map or $max loops is reached
		#	
		while(!$flag && ($max ? $loop<$max : 1)) {
			$it0 	=	new Benchmark;
			
			# Run the map
			$self->{RUN}->run($omap);
			
			# Retrieve last mapping  and initialize a few variables.
			$map	=	$self->map();
			$y		=	$size-$div;
			$flag	=	1;
			
			# Compare the result map we just ran with the desired result map.
			$diff 	=	pdiff($map,$res);
			
			# This adjusts the increment multiplier to decrease as the loops increase
			if($_mi > $_mx) {
				$dinc *= 0.1;
				$_mi   = 0;
			} 
			

BackProp.pm  view on Meta::CPAN

				$inc += ($dinc*$diff)+($dinc*$cdiff*10);
			} else {
				$cdiff=0;
			}
			
			# Save last $diff
			$ldiff = $diff;
			
			# This catches a max error argument and handles it
			if(!($error>-1 ? $diff>$error : 1)) {
				$flag=1;
				last;
			}
			
			# Debugging
			AI::NeuralNet::BackProp::out4 "Difference: $diff\%\t Increment: $inc\tMax Error: $error\%\n";
			AI::NeuralNet::BackProp::out1 "\n\nMapping results from $map:\n";
			
			# This loop compares each element of the output map with the desired result map.
			# If they don't match exactly, we call weight() on the offending output neuron 
			# and tell it what it should be aiming for, and then the offending neuron will

BackProp.pm  view on Meta::CPAN

				$a = $map->[$i];
				$b = $res->[$i];
				
				AI::NeuralNet::BackProp::out1 "\nmap[$i] is $a\n";
				AI::NeuralNet::BackProp::out1 "res[$i] is $b\n";
					
				for my $j (0..$divide-1) {
					if($a!=$b) {
						AI::NeuralNet::BackProp::out1 "Punishing $self->{NET}->[($i*$divide)+$j] at ",(($i*$divide)+$j)," ($i with $a) by $inc.\n";
						$l->[$y+($i*$divide)+$j]->weight($inc,$b) if($l->[$y+($i*$divide)+$j]);
						$flag	=	0;
					}
				}
			}
			
			# This counter is just used in the benchmarking operations.
			$loop++;
			
			AI::NeuralNet::BackProp::out1 "\n\n";
			
			# Benchmark this loop.

BackProp.pm  view on Meta::CPAN

	# This acts just like a regular neuron by receiving
	# values from input synapes. Yet, unlike a regularr
	# neuron, it doesnt weight the values, just stores
	# them to be retrieved by a call to map().
	sub input  {
		no strict 'refs';             
		my $self	=	shift;
		my $sid		=	shift;
		my $value	=	shift;
		my $size	=	$self->{PARENT}->{DIV};
		my $flag	=	1;
		$self->{OUTPUT}->[$sid]->{VALUE}	=	$self->{PARENT}->intr($value);
		$self->{OUTPUT}->[$sid]->{FIRED}	=	1;
		
		AI::NeuralNet::BackProp::out1 "Received value $self->{OUTPUT}->[$sid]->{VALUE} and sid $sid, self $self.\n";
	}
	
	# Here we simply collect the value of every neuron connected to this
	# one from the layer below us and return an array ref to the final map..
	sub map {
		my $self	=	shift;

BackProp.pm  view on Meta::CPAN


	$ perl some_script.pl > .out

Then I can simply go and use emacs or any other text editor and read the output at my leisure,
rather than have to wait or use some 'more' as it comes by on the screen.

=head2 METHODS

=over 4

=item new AI::NeuralNet::BackProp($layers, $size [, $outputs, $topology_flag])

Returns a newly created neural network from an C<AI::NeuralNet::BackProp>
object. The network will have C<$layers> number layers in it
and each layer will have C<$size> number of neurons in that layer.

There is an optional parameter of $outputs, which specifies the number
of output neurons to provide. If $outputs is not specified, $outputs
defaults to equal $size. $outputs may not exceed $size. If $outputs
exceeds $size, the new() constructor will return undef.

The optional parameter, $topology_flag, defaults to 0 when not used. There are
three valid topology flag values:

B<0> I<default>
My feed-foward style: Each neuron in layer X is connected to one input of every
neuron in layer Y. The best and most proven flag style.

	^   ^   ^               
	O\  O\ /O       Layer Y
	^\\/^/\/^
	| //|\/\|
	|/ \|/ \|		
	O   O   O       Layer X
	^   ^   ^


(Sorry about the bad art...I am no ASCII artist! :-)


B<1>
In addition to flag 0, each neuron in layer X is connected to every input of 
the neurons ahead of itself in layer X.

B<2> I<("L-U Style")>
No, its not "Learning-Unit" style. It gets its name from this: In a 2 layer, 3
neuron network, the connections form a L-U pair, or a W, however you want to look
at it.   


	^   ^   ^
	|   |   |

BackProp.pm  view on Meta::CPAN

	my @set = (
		# inputs        outputs
		[ 1,2,3,4 ],  [ 1,3,5,6 ],
		[ 0,2,5,6 ],  [ 0,2,1,2 ]
	);


See the paragraph on measuring forgetfulness, below. There are 
two learn_set()-specific option tags available:

	flag     =>  $flag
	pattern  =>  $row

If "flag" is set to some TRUE value, as in "flag => 1" in the hash of options, or if the option "flag"
is not set, then it will return a percentage represting the amount of forgetfullness. Otherwise,
learn_set() will return an integer specifying the amount of forgetfulness when all the patterns 
are learned. 

If "pattern" is set, then learn_set() will use that pattern in the data set to measure forgetfulness by.
If "pattern" is omitted, it defaults to the first pattern in the set. Example:

	my @set = (
		[ 0,1,0,1 ],  [ 0 ],
		[ 0,0,1,0 ],  [ 1 ],

docs.htm  view on Meta::CPAN

<P>If you do call $net-&gt;debug(1), it is a good 
idea to point STDIO of your script to a file, as a lot of information is output. I often
use this command line:</P>
<PRE>
        $ perl some_script.pl &gt; .out</PRE>
<P>Then I can simply go and use emacs or any other text editor and read the output at my leisure,
rather than have to wait or use some 'more' as it comes by on the screen.</P>
<P>
<H2><A NAME="methods">METHODS</A></H2>
<DL>
<DT><STRONG><A NAME="item_BackProp">new AI::NeuralNet::BackProp($layers, $size [, $outputs, $topology_flag])</A></STRONG><BR>
<DD>
Returns a newly created neural network from an <CODE>AI::NeuralNet::BackProp</CODE>
object. The network will have <CODE>$layers</CODE> number layers in it
and each layer will have <CODE>$size</CODE> number of neurons in that layer.
<P>There is an optional parameter of $outputs, which specifies the number
of output neurons to provide. If $outputs is not specified, $outputs
defaults to equal $size. $outputs may not exceed $size. If $outputs
exceeds $size, the <CODE>new()</CODE> constructor will return undef.</P>
<P>The optional parameter, $topology_flag, defaults to 0 when not used. There are
three valid topology flag values:</P>
<P><STRONG>0</STRONG> <EM>default</EM>
My feed-foward style: Each neuron in layer X is connected to one input of every
neuron in layer Y. The best and most proven flag style.</P>
<PRE>
        ^   ^   ^               
        O\  O\ /O       Layer Y
        ^\\/^/\/^
        | //|\/\|
        |/ \|/ \|               
        O   O   O       Layer X
        ^   ^   ^</PRE>
<P>(Sorry about the bad art...I am no ASCII artist! :-)</P>
<P><STRONG>1</STRONG>
In addition to flag 0, each neuron in layer X is connected to every input of 
the neurons ahead of itself in layer X.</P>
<P><STRONG>2</STRONG> <EM>(``L-U Style'')</EM>
No, its not ``Learning-Unit'' style. It gets its name from this: In a 2 layer, 3
neuron network, the connections form a L-U pair, or a W, however you want to look
at it.</P>
<PRE>
        ^   ^   ^
        |   |   |
        O--&gt;O--&gt;O
        ^   ^   ^

docs.htm  view on Meta::CPAN

</P>
<PRE>
        my @set = (
                # inputs        outputs
                [ 1,2,3,4 ],  [ 1,3,5,6 ],
                [ 0,2,5,6 ],  [ 0,2,1,2 ]
        );</PRE>
<P>See the paragraph on measuring forgetfulness, below. There are 
two learn_set()-specific option tags available:</P>
<PRE>
        flag     =&gt;  $flag
        pattern  =&gt;  $row</PRE>
<P>If ``flag'' is set to some TRUE value, as in ``flag =&gt; 1'' in the hash of options, or if the option ``flag''
is not set, then it will return a percentage represting the amount of forgetfullness. Otherwise,
<A HREF="#item_learn_set"><CODE>learn_set()</CODE></A> will return an integer specifying the amount of forgetfulness when all the patterns 
are learned.</P>
<P>If ``pattern'' is set, then <A HREF="#item_learn_set"><CODE>learn_set()</CODE></A> will use that pattern in the data set to measure forgetfulness by.
If ``pattern'' is omitted, it defaults to the first pattern in the set. Example:</P>
<PRE>
        my @set = (
                [ 0,1,0,1 ],  [ 0 ],
                [ 0,0,1,0 ],  [ 1 ],
                [ 1,1,0,1 ],  [ 2 ],  #  &lt;---



( run in 0.490 second using v1.01-cache-2.11-cpan-94b05bcf43c )