AI-NeuralNet-BackProp

 view release on metacpan or  search on metacpan

BackProp.pm  view on Meta::CPAN

		$self->{OUTPUTS}->{SIZE} = ++$oid;
		return $self->{OUTPUTS}->{LIST}->[$oid]->{ID};
	}
1;
			 
package AI::NeuralNet::BackProp;
	
	use Benchmark;          
	use strict;
	
	# Returns the number of elements in an array ref, undef on error
	sub _FETCHSIZE {
		my $a=$_[0];
		my ($b,$x);
		return undef if(substr($a,0,5) ne "ARRAY");
		foreach $b (@{$a}) { $x++ };
		return $x;
	}

	# Debugging subs
	$AI::NeuralNet::BackProp::DEBUG  = 0;

BackProp.pm  view on Meta::CPAN

	#
	# It returns a percentage string.
	#
	sub learn_set {
		my $self	=	shift if(substr($_[0],0,4) eq 'AI::'); 
		my $data	=	shift;
		my %args	=	@_;
		my $len		=	$#{$data}/2-1;
		my $inc		=	$args{inc};
		my $max		=	$args{max};
	    my $error	=	$args{error};
	    my $p		=	(defined $args{flag})	?$args{flag}	   :1;
	    my $row		=	(defined $args{pattern})?$args{pattern}*2+1:1;
	    my ($fa,$fb);
		for my $x (0..$len) {
			print "\nLearning index $x...\n" if($AI::NeuralNet::BackProp::DEBUG);
			my $str = $self->learn( $data->[$x*2],			# The list of data to input to the net
					  		  		$data->[$x*2+1], 		# The output desired
					    			inc=>$inc,				# The starting learning gradient
					    			max=>$max,				# The maximum num of loops allowed
					    			error=>$error);			# The maximum (%) error allowed
			print $str if($AI::NeuralNet::BackProp::DEBUG); 
		}
			
		
		my $res;
		$data->[$row] = $self->crunch($data->[$row]) if($data->[$row] == 0);
		
		if ($p) {
			$res=pdiff($data->[$row],$self->run($data->[$row-1]));
		} else {

BackProp.pm  view on Meta::CPAN

	#
	# It returns a true value.
	#
	sub learn_set_rand {
		my $self	=	shift if(substr($_[0],0,4) eq 'AI::'); 
		my $data	=	shift;
		my %args	=	@_;
		my $len		=	$#{$data}/2-1;
		my $inc		=	$args{inc};
		my $max		=	$args{max};
	    my $error	=	$args{error};
	    my @learned;
		while(1) {
			_GET_X:
			my $x=$self->intr(rand()*$len);
			goto _GET_X if($learned[$x]);
			$learned[$x]=1;
			print "\nLearning index $x...\n" if($AI::NeuralNet::BackProp::DEBUG); 
			my $str =  $self->learn($data->[$x*2],			# The list of data to input to the net
					  		  		$data->[$x*2+1], 		# The output desired
					    			inc=>$inc,				# The starting learning gradient
			 		    			max=>$max,				# The maximum num of loops allowed
					    			error=>$error);			# The maximum (%) error allowed
			print $str if($AI::NeuralNet::BackProp::DEBUG); 
		}
			
		
		return 1; 
	}

	# Returns the index of the element in array REF passed with the highest comparative value
	sub high {
		shift if(substr($_[0],0,4) eq 'AI::'); 

BackProp.pm  view on Meta::CPAN

	# results. See usage in POD.
	sub learn {
		my $self	=	shift;
		my $omap	=	shift;
		my $res		=	shift;
		my %args    =   @_;
		my $inc 	=	$args{inc} || 0.20;
		my $max     =   $args{max} || 1024;
		my $_mx		=	intr($max/10);
		my $_mi		=	0;
		my $error   = 	($args{error}>-1 && defined $args{error}) ? $args{error} : -1;
  		my $div		=	$self->{DIV};
		my $size	=	$self->{SIZE};
		my $out		=	$self->{OUT};
		my $divide  =	AI::NeuralNet::BackProp->intr($div/$out);
		my ($a,$b,$y,$flag,$map,$loop,$diff,$pattern,$value);
		my ($t0,$it0);
		no strict 'refs';
		
		# Take care of crunching strings passed
		$omap = $self->crunch($omap) if($omap == 0);

BackProp.pm  view on Meta::CPAN

		AI::NeuralNet::BackProp::out1 "Num output neurons: $out, Input neurons: $size, Division: $divide\n";
		
		# Start benchmark timer and initalize a few variables
		$t0 	=	new Benchmark;
        $flag 	=	0; 
		$loop	=	0;   
		my $ldiff	=	0;
		my $dinc	=	0.0001;
		my $cdiff	=	0;
		$diff		=	100;
		$error 		= 	($error>-1)?$error:-1;
		
		# $flag only goes high when all neurons in output map compare exactly with
		# desired result map or $max loops is reached
		#	
		while(!$flag && ($max ? $loop<$max : 1)) {
			$it0 	=	new Benchmark;
			
			# Run the map
			$self->{RUN}->run($omap);
			

BackProp.pm  view on Meta::CPAN

			if($diff eq $ldiff) {
				$cdiff++;
				$inc += ($dinc*$diff)+($dinc*$cdiff*10);
			} else {
				$cdiff=0;
			}
			
			# Save last $diff
			$ldiff = $diff;
			
			# This catches a max error argument and handles it
			if(!($error>-1 ? $diff>$error : 1)) {
				$flag=1;
				last;
			}
			
			# Debugging
			AI::NeuralNet::BackProp::out4 "Difference: $diff\%\t Increment: $inc\tMax Error: $error\%\n";
			AI::NeuralNet::BackProp::out1 "\n\nMapping results from $map:\n";
			
			# This loop compares each element of the output map with the desired result map.
			# If they don't match exactly, we call weight() on the offending output neuron 
			# and tell it what it should be aiming for, and then the offending neuron will
			# try to adjust the weights of its synapses to get closer to the desired output.
			# See comments in the weight() method of AI::NeuralNet::BackProp for how this works.
			my $l=$self->{NET};
			for my $i (0..$out-1) {
				$a = $map->[$i];

BackProp.pm  view on Meta::CPAN

UPDATED: You can now learn inputs with a 0 value. Beware though, it may not learn() a 0 value 
in the input map if you have randomness disabled. See NOTES on using a 0 value with randomness
disabled.

The first two arguments may be array refs (or now, strings), and they may be of different lengths.

Options should be written on hash form. There are three options:
	 
	 inc	=>	$learning_gradient
	 max	=>	$maximum_iterations
	 error	=>	$maximum_allowable_percentage_of_error
	 

$learning_gradient is an optional value used to adjust the weights of the internal
connections. If $learning_gradient is ommitted, it defaults to 0.20.
 
$maximum_iterations is the maximum numbers of iteration the loop should do.
It defaults to 1024.  Set it to 0 if you never want the loop to quit before
the pattern is perfectly learned.

$maximum_allowable_percentage_of_error is the maximum allowable error to have. If 
this is set, then learn() will return when the perecentage difference between the
actual results and desired results falls below $maximum_allowable_percentage_of_error.
If you do not include 'error', or $maximum_allowable_percentage_of_error is set to -1,
then learn() will not return until it gets an exact match for the desired result OR it
reaches $maximum_iterations.


=item $net->learn_set(\@set, [ options ]);

UPDATE: Inputs and outputs in the dataset can now be strings. See information on auto-crunching
in learn()

This takes the same options as learn() and allows you to specify a set to learn, rather

BackProp.pm  view on Meta::CPAN

=head1 NOTES

=item Learning 0s With Randomness Disabled

You can now use 0 values in any input maps. This is a good improvement over versions 0.40
and 0.42, where no 0s were allowed because the learning would never finish learning completly
with a 0 in the input. 

Yet with the allowance of 0s, it requires one of two factors to learn correctly. Either you
must enable randomness with $net->random(0.0001) (Any values work [other than 0], see random() ), 
or you must set an error-minimum with the 'error => 5' option (you can use some other error value 
as well). 

When randomness is enabled (that is, when you call random() with a value other than 0), it interjects
a bit of randomness into the output of every neuron in the network, except for the input and output
neurons. The randomness is interjected with rand()*$rand, where $rand is the value that was
passed to random() call. This assures the network that it will never have a pure 0 internally. It is
bad to have a pure 0 internally because the weights cannot change a 0 when multiplied by a 0, the
product stays a 0. Yet when a weight is multiplied by 0.00001, eventually with enough weight, it will
be able to learn. With a 0 value instead of 0.00001 or whatever, then it would never be able
to add enough weight to get anything other than a 0. 

The second option to allow for 0s is to enable a maximum error with the 'error' option in
learn() , learn_set() , and learn_set_rand() . This allows the network to not worry about
learning an output perfectly. 

For accuracy reasons, it is recomended that you work with 0s using the random() method.

If anyone has any thoughts/arguments/suggestions for using 0s in the network, let me know
at jdb@wcoil.com. 



README  view on Meta::CPAN


Now I know you people are out there that are using the module...
I can hear the fists hitting the keyboards in frustration. :-) Relieve
some of that frustration by e-mailing me and letting me know what
you think of the module and any suggestions you got. Especially you
guys in HP Labs and at Xerox! :-)

Use it, let me know what you all think. This is just a
groud-up write of a neural network, no code stolen or
anything else. It uses the -IDEA- of back-propagation
for error correction, with the -IDEA- of the delta
rule and hopefield theory, as I understand them. So, don't expect
a classicist view of nerual networking here. I simply wrote
from operating theory, not math theory. Any die-hard neural
networking gurus out there? Let me know how far off I am with
this code! :-)
	
Regards,

        ~ Josiah Bryan, <jdb@wcoil.com>

docs.htm  view on Meta::CPAN

<P>Note, the old method of calling crunch on the values still works just as well.</P>
<P><B>UPDATED:</B> You can now learn inputs with a 0 value. Beware though, it may not <A HREF="#item_learn"><CODE>learn()</CODE></A> a 0 value 
in the input map if you have randomness disabled. See NOTES on using a 0 value with randomness
disabled.</P>
<P>The first two arguments may be array refs (or now, strings), and they may be of different lengths.</P>
<P>Options should be written on hash form. There are three options:
</P>
<PRE>
         inc    =&gt;      $learning_gradient
         max    =&gt;      $maximum_iterations
         error  =&gt;      $maximum_allowable_percentage_of_error</PRE>
<P>$learning_gradient is an optional value used to adjust the weights of the internal
connections. If $learning_gradient is ommitted, it defaults to 0.20.
</P>
<P>
$maximum_iterations is the maximum numbers of iteration the loop should do.
It defaults to 1024.  Set it to 0 if you never want the loop to quit before
the pattern is perfectly learned.</P>
<P>$maximum_allowable_percentage_of_error is the maximum allowable error to have. If 
this is set, then <A HREF="#item_learn"><CODE>learn()</CODE></A> will return when the perecentage difference between the
actual results and desired results falls below $maximum_allowable_percentage_of_error.
If you do not include 'error', or $maximum_allowable_percentage_of_error is set to -1,
then <A HREF="#item_learn"><CODE>learn()</CODE></A> will not return until it gets an exact match for the desired result OR it
reaches $maximum_iterations.</P>
<P></P>
<DT><STRONG><A NAME="item_learn_set">$net-&gt;learn_set(\@set, [ options ]);</A></STRONG><BR>
<DD>
<B>UPDATED:</B> Inputs and outputs in the dataset can now be strings. See information on auto-crunching
in <A HREF="#item_learn"><CODE>learn()</CODE></A>
<P>This takes the same options as <A HREF="#item_learn"><CODE>learn()</CODE></A> and allows you to specify a set to learn, rather
than individual patterns. A dataset is an array refrence with at least two elements in the
array, each element being another array refrence (or now, a scalar string). For each pattern to

docs.htm  view on Meta::CPAN

<HR SIZE=1 COLOR=BLACK>
<H1><A NAME="notes">NOTES</A></H1>
<DL>
<DT><STRONG><A NAME="item_Learning_0s_With_Randomness_Disabled">Learning 0s With Randomness Disabled</A></STRONG><BR>
<DD>
You can now use 0 values in any input maps. This is a good improvement over versions 0.40
and 0.42, where no 0s were allowed because the learning would never finish learning completly
with a 0 in the input.
<P>Yet with the allowance of 0s, it requires one of two factors to learn correctly. Either you
must enable randomness with $net-&gt;<A HREF="#item_random"><CODE>random(0.0001)</CODE></A> (Any values work [other than 0], see <A HREF="#item_random"><CODE>random()</CODE></A> ), 
or you must set an error-minimum with the 'error =&gt; 5' option (you can use some other error value 
as well).</P>
<P>When randomness is enabled (that is, when you call <A HREF="#item_random"><CODE>random()</CODE></A> with a value other than 0), it interjects
a bit of randomness into the output of every neuron in the network, except for the input and output
neurons. The randomness is interjected with rand()*$rand, where $rand is the value that was
passed to <A HREF="#item_random"><CODE>random()</CODE></A> call. This assures the network that it will never have a pure 0 internally. It is
bad to have a pure 0 internally because the weights cannot change a 0 when multiplied by a 0, the
product stays a 0. Yet when a weight is multiplied by 0.00001, eventually with enough weight, it will
be able to learn. With a 0 value instead of 0.00001 or whatever, then it would never be able
to add enough weight to get anything other than a 0.</P>
<P>The second option to allow for 0s is to enable a maximum error with the 'error' option in
<A HREF="#item_learn"><CODE>learn()</CODE></A> , <A HREF="#item_learn_set"><CODE>learn_set()</CODE></A> , and <A HREF="#item_learn_set_rand"><CODE>learn_set_rand()</CODE></A> . This allows the network to not worry about
learning an output perfectly.</P>
<P>For accuracy reasons, it is recomended that you work with 0s using the <A HREF="#item_random"><CODE>random()</CODE></A> method.</P>
<P>If anyone has any thoughts/arguments/suggestions for using 0s in the network, let me know
at <A HREF="mailto:jdb@wcoil.com.">jdb@wcoil.com.</A></P>
<P></P></DL>
<P>
<HR SIZE=1 COLOR=BLACK>
<H1><A NAME="other included packages">OTHER INCLUDED PACKAGES</A></H1>
<DL>

examples/ex_add2.pl  view on Meta::CPAN

	  my @list;
	
	 my $t1=new Benchmark;
	 for my $a (1..$top)
	 {
	  print "Outer Loop: $a : ";
	
	  $forgetfulness = $net->learn_set( \@data,
	           inc  => $inc,
	           max  => 500,
	           error => -1);
	
	  print "Forgetfulness: $forgetfulness %\n";
	
	 }
	 my $t2=new Benchmark;
	
	 $runtime = timediff($t2,$t1);
	 print "run took ",timestr($runtime),"\n";
	
	

examples/ex_bmp.pl  view on Meta::CPAN

		# Make it learn the whole dataset $top times
		my @list;
		my $top=3;
		for my $a (0..$top) {
			my $t1=new Benchmark;
			print "\n\nOuter Loop: $a\n";
			
			# Test fogetfullness
			my $f = $net->learn_set(\@data,	inc		=>	0.1,	
											max		=>	500,
											error	=>	-1);
			
			# Print it 
			print "\n\nForgetfullness: $f%\n";

			# Save net to disk				
			$net->save('images.net');

			my $t2=new Benchmark;
			my $td=timediff($t2,$t1);
			print "\nLoop $a took ",timestr($td),"\n";

examples/ex_dow.pl  view on Meta::CPAN

		# Make it learn the whole dataset $top times
		my @list;
		my $top=1;
		for my $a (0..$top) {
			my $t1=new Benchmark;
			print "\n\nOuter Loop: $a\n";
			
			# Test fogetfullness
			my $f = $net->learn_set(\@data,	inc		=>	0.2,	
											max		=>	2000,
											error	=>	-1);
			
			# Print it 
			print "\n\nForgetfullness: $f%\n";

			# Save net to disk				
			$net->save('dow.dat');
            
			my $t2=new Benchmark;
			my $td=timediff($t2,$t1);
			print "\nLoop $a took ",timestr($td),"\n";



( run in 0.787 second using v1.01-cache-2.11-cpan-65fba6d93b7 )