AI-NeuralNet-BackProp

 view release on metacpan or  search on metacpan

BackProp.pm  view on Meta::CPAN

#!/usr/bin/perl	

# $Id: BackProp.pm,v 0.89 2000/08/12 01:05:27 josiah Exp $
#
# Copyright (c) 2000  Josiah Bryan  USA
#
# See AUTHOR section in pod text below for usage and distribution rights.   
# See UPDATES section in pod text below for info on what has changed in this release.
#

BEGIN {
	$AI::NeuralNet::BackProp::VERSION = "0.89";
}

#
# name:   AI::NeuralNet::BackProp
#
# author: Josiah Bryan 
# date:   Tuesday August 15 2000
# desc:   A simple back-propagation, feed-foward neural network with
#		  learning implemented via a generalization of Dobbs rule and
#		  several principals of Hoppfield networks. 
# online: http://www.josiah.countystart.com/modules/AI/cgi-bin/rec.pl
#

package AI::NeuralNet::BackProp::neuron;
	
	use strict;
	
	# Dummy constructor
    sub new {
    	bless {}, shift
	}	
	
	# Rounds floats to ints
	sub intr  {
    	shift if(substr($_[0],0,4) eq 'AI::');
      	try   { return int(sprintf("%.0f",shift)) }
      	catch { return 0 }
	}
    
	
	# Receives input from other neurons. They must
	# be registered as a synapse of this neuron to effectively
	# input.
	sub input {
		my $self 	 =	shift;
		my $sid		 =	shift;
		my $value	 =	shift;
		
		# We simply weight the value sent by the neuron. The neuron identifies itself to us
		# using the code we gave it when it registered itself with us. The code is in $sid, 
		# (synapse ID) and we use that to track the weight of the connection.
		# This line simply multiplies the value by its weight and gets the integer from it.
		$self->{SYNAPSES}->{LIST}->[$sid]->{VALUE}	=	intr($value	*	$self->{SYNAPSES}->{LIST}->[$sid]->{WEIGHT});
		$self->{SYNAPSES}->{LIST}->[$sid]->{FIRED}	=	1;                                 
		$self->{SYNAPSES}->{LIST}->[$sid]->{INPUT}	=	$value;
		
		# Debugger
		AI::NeuralNet::BackProp::out1("\nRecieved input of $value, weighted to $self->{SYNAPSES}->{LIST}->[$sid]->{VALUE}, synapse weight is $self->{SYNAPSES}->{LIST}->[$sid]->{WEIGHT} (sid is $sid for $self).\n");
		AI::NeuralNet::BackProp::out1((($self->input_complete())?"All synapses have fired":"Not all synapses have fired"));
		AI::NeuralNet::BackProp::out1(" for $self.\n");
		
		# Check and see if all synapses have fired that are connected to this one.
		# If they have, then generate the output value for this synapse.
		$self->output() if($self->input_complete());
	}
	
	# Loops thru and outputs to every neuron that this
	# neuron is registered as synapse of.
	sub output {
		my $self	=	shift;
		my $size	=	$self->{OUTPUTS}->{SIZE} || 0;
		my $value	=	$self->get_output();
		for (0..$size-1) {
			AI::NeuralNet::BackProp::out1("Outputing to $self->{OUTPUTS}->{LIST}->[$_]->{PKG}, index $_, a value of $value with ID $self->{OUTPUTS}->{LIST}->[$_]->{ID}.\n");
			$self->{OUTPUTS}->{LIST}->[$_]->{PKG}->input($self->{OUTPUTS}->{LIST}->[$_]->{ID},$value);
		}
	}
	
	# Used internally by output().
	sub get_output {
		my $self		=	shift;
		my $size		=	$self->{SYNAPSES}->{SIZE} || 0;
		my $value		=	0;
		my $state		= 	0;
		my (@map,@weight);
	
	    # We loop through all the syanpses connected to this one and add the weighted
	    # valyes together, saving in a debugging list.
		for (0..$size-1) {
			$value	+=	$self->{SYNAPSES}->{LIST}->[$_]->{VALUE};
			$self->{SYNAPSES}->{LIST}->[$_]->{FIRED} = 0;
			
			$map[$_]=$self->{SYNAPSES}->{LIST}->[$_]->{VALUE};
			$weight[$_]=$self->{SYNAPSES}->{LIST}->[$_]->{WEIGHT};
		}
		                                              
		# Debugger
		AI::NeuralNet::BackProp::join_cols(\@map,5) if(($AI::NeuralNet::BackProp::DEBUG eq 3) || ($AI::NeuralNet::BackProp::DEBUG eq 2));
		AI::NeuralNet::BackProp::out2("Weights: ".join(" ",@weight)."\n");
		
		# Simply average the values and get the integer of the average.
		$state	=	intr($value/$size);
		
		# Debugger
		AI::NeuralNet::BackProp::out1("From get_output, value is $value, so state is $state.\n");
		
		# Possible future exapnsion for self excitation. Not currently used.
		$self->{LAST_VALUE}	=	$value;
		
		# Just return the $state
		return $state;
	}
	
	# Used by input() to check if all registered synapses have fired.
	sub input_complete {
		my $self		=	shift;
		my $size		=	$self->{SYNAPSES}->{SIZE} || 0;
		my $retvalue	=	1;
		
		# Very simple loop. Doesn't need explaning.
		for (0..$size-1) {
			$retvalue = 0 if(!$self->{SYNAPSES}->{LIST}->[$_]->{FIRED});
		}
		return $retvalue;
	}
	
	# Used to recursively adjust the weights of synapse input channeles
	# to give a desired value. Designed to be called via 
	# AI::NeuralNet::BackProp::NeuralNetwork::learn().
	sub weight	{                
		my $self		=	shift;
		my $ammount		=	shift;
		my $what		=	shift;
		my $size		=	$self->{SYNAPSES}->{SIZE} || 0;
		my $value;
		AI::NeuralNet::BackProp::out1("Weight: ammount is $ammount, what is $what with size at $size.\n");
		      
		# Now this sub is the main cog in the learning wheel. It is called recursively on 
		# each neuron that has been bad (given incorrect output.)
		for my $i (0..$size-1) {
			$value		=	$self->{SYNAPSES}->{LIST}->[$i]->{VALUE};

if(0) {
       
       		# Formula by Steve Purkis
       		# Converges very fast for low-value inputs. Has trouble converging on high-value
       		# inputs. Feel free to play and try to get to work for high values.
			my $delta	=	$ammount * ($what - $value) * $self->{SYNAPSES}->{LIST}->[$i]->{INPUT};
			$self->{SYNAPSES}->{LIST}->[$i]->{WEIGHT}  +=  $delta;
			$self->{SYNAPSES}->{LIST}->[$i]->{PKG}->weight($ammount,$what);
}
			
			# This formula in use by default is original by me (Josiah Bryan) as far as I know.
			
			# If it is equal, then don't adjust
			#
			### Disabled because this soemtimes causes 
			### infinte loops when learning with range limits enabled
			#
			#next if($value eq $what);
			
			# Adjust increment by the weight of the synapse of 
			# this neuron & apply direction delta
			my $delta = 
					$ammount * 
						($value<$what?1:-1) * 
							$self->{SYNAPSES}->{LIST}->[$i]->{WEIGHT};
			
			#print "($value,$what) delta:$delta\n";
			
			# Recursivly apply
			$self->{SYNAPSES}->{LIST}->[$i]->{WEIGHT}  +=  $delta;
			$self->{SYNAPSES}->{LIST}->[$i]->{PKG}->weight($ammount,$what);
			    
		}
	}
	
	# Registers some neuron as a synapse of this neuron.           
	# This is called exclusively by connect(), except for
	# in initalize_group() to connect the _map() package.
	sub register_synapse {
		my $self	=	shift;
		my $synapse	=	shift;
		my $sid		=	$self->{SYNAPSES}->{SIZE} || 0;
		$self->{SYNAPSES}->{LIST}->[$sid]->{PKG}		=	$synapse;
		$self->{SYNAPSES}->{LIST}->[$sid]->{WEIGHT}		=	1.00		if(!$self->{SYNAPSES}->{LIST}->[$sid]->{WEIGHT});
		$self->{SYNAPSES}->{LIST}->[$sid]->{FIRED}		=	0;       
		AI::NeuralNet::BackProp::out1("$self: Registering sid $sid with weight $self->{SYNAPSES}->{LIST}->[$sid]->{WEIGHT}, package $self->{SYNAPSES}->{LIST}->[$sid]->{PKG}.\n");
		$self->{SYNAPSES}->{SIZE} = ++$sid;
		return ($sid-1);
	}
	
	# Called via AI::NeuralNet::BackProp::NeuralNetwork::initialize_group() to 
	# form the neuron grids.
	# This just registers another synapes as a synapse to output to from this one, and
	# then we ask that synapse to let us register as an input connection and we
	# save the sid that the ouput synapse returns.
	sub connect {
		my $self	=	shift;
		my $to		=	shift;
		my $oid		=	$self->{OUTPUTS}->{SIZE} || 0;
		AI::NeuralNet::BackProp::out1("Connecting $self to $to at $oid...\n");
		$self->{OUTPUTS}->{LIST}->[$oid]->{PKG}	=	$to;
 		$self->{OUTPUTS}->{LIST}->[$oid]->{ID}	=	$to->register_synapse($self);
		$self->{OUTPUTS}->{SIZE} = ++$oid;
		return $self->{OUTPUTS}->{LIST}->[$oid]->{ID};
	}
1;
			 
package AI::NeuralNet::BackProp;
	
	use Benchmark;          
	use strict;
	
	# Returns the number of elements in an array ref, undef on error
	sub _FETCHSIZE {
		my $a=$_[0];
		my ($b,$x);
		return undef if(substr($a,0,5) ne "ARRAY");
		foreach $b (@{$a}) { $x++ };
		return $x;
	}

	# Debugging subs
	$AI::NeuralNet::BackProp::DEBUG  = 0;
	sub whowasi { (caller(1))[3] . '()' }
	sub debug { shift; $AI::NeuralNet::BackProp::DEBUG = shift || 0; } 
	sub out1  { print  shift() if ($AI::NeuralNet::BackProp::DEBUG eq 1) }
	sub out2  { print  shift() if (($AI::NeuralNet::BackProp::DEBUG eq 1) || ($AI::NeuralNet::BackProp::DEBUG eq 2)) }
	sub out3  { print  shift() if ($AI::NeuralNet::BackProp::DEBUG) }
	sub out4  { print  shift() if ($AI::NeuralNet::BackProp::DEBUG eq 4) }
	
	# Rounds a floating-point to an integer with int() and sprintf()
	sub intr  {
    	shift if(substr($_[0],0,4) eq 'AI::');
      	try   { return int(sprintf("%.0f",shift)) }
      	catch { return 0 }
	}
    
	# Used to format array ref into columns
	# Usage: 
	#	join_cols(\@array,$row_length_in_elements,$high_state_character,$low_state_character);
	# Can also be called as method of your neural net.
	# If $high_state_character is null, prints actual numerical values of each element.
	sub join_cols {
		no strict 'refs';
		shift if(substr($_[0],0,4) eq 'AI::'); 
		my $map		=	shift;
		my $break   =	shift;
		my $a		=	shift;
		my $b		=	shift;
		my $x;
		foreach my $el (@{$map}) { 
			my $str = ((int($el))?$a:$b);
			$str=$el."\0" if(!$a);
			print $str;
			$x++;
			if($x>$break-1) {
				print "\n";
				$x=0;
			}
		}
		print "\n";
	}
	
	# Returns percentage difference between all elements of two
	# array refs of exact same length (in elements).
	# Now calculates actual difference in numerical value.
	sub pdiff {
		no strict 'refs';
		shift if(substr($_[0],0,4) eq 'AI::'); 
		my $a1	=	shift;
		my $a2	=	shift;
		my $a1s	=	$#{$a1}; #AI::NeuralNet::BackProp::_FETCHSIZE($a1);
		my $a2s	=	$#{$a2}; #AI::NeuralNet::BackProp::_FETCHSIZE($a2);
		my ($a,$b,$diff,$t);
		$diff=0;
		#return undef if($a1s ne $a2s);	# must be same length
		for my $x (0..$a1s) {
			$a = $a1->[$x];
			$b = $a2->[$x];
			if($a!=$b) {
				if($a<$b){$t=$a;$a=$b;$b=$t;}
				$a=1 if(!$a);
				$diff+=(($a-$b)/$a)*100;
			}
		}
		$a1s = 1 if(!$a1s);
		return sprintf("%.10f",($diff/$a1s));
	}
	
	# Returns $fa as a percentage of $fb
	sub p {
		shift if(substr($_[0],0,4) eq 'AI::'); 
		my ($fa,$fb)=(shift,shift);
		sprintf("%.3f",((($fb-$fa)*((($fb-$fa)<0)?-1:1))/$fa)*100);
	}
	
	# This sub will take an array ref of a data set, which it expects in this format:
	#   my @data_set = (	[ ...inputs... ], [ ...outputs ... ],
	#				   				   ... rows ...
	#				   );
	#
	# This wil sub returns the percentage of 'forgetfullness' when the net learns all the
	# data in the set in order. Usage:
	#
	#	 learn_set(\@data,[ options ]);
	#
	# Options are options in hash form. They can be of any form that $net->learn takes.
	#
	# It returns a percentage string.
	#
	sub learn_set {
		my $self	=	shift if(substr($_[0],0,4) eq 'AI::'); 
		my $data	=	shift;
		my %args	=	@_;
		my $len		=	$#{$data}/2-1;
		my $inc		=	$args{inc};
		my $max		=	$args{max};
	    my $error	=	$args{error};
	    my $p		=	(defined $args{flag})	?$args{flag}	   :1;
	    my $row		=	(defined $args{pattern})?$args{pattern}*2+1:1;
	    my ($fa,$fb);
		for my $x (0..$len) {
			print "\nLearning index $x...\n" if($AI::NeuralNet::BackProp::DEBUG);
			my $str = $self->learn( $data->[$x*2],			# The list of data to input to the net
					  		  		$data->[$x*2+1], 		# The output desired
					    			inc=>$inc,				# The starting learning gradient
					    			max=>$max,				# The maximum num of loops allowed
					    			error=>$error);			# The maximum (%) error allowed
			print $str if($AI::NeuralNet::BackProp::DEBUG); 
		}
			
		
		my $res;
		$data->[$row] = $self->crunch($data->[$row]) if($data->[$row] == 0);
		
		if ($p) {
			$res=pdiff($data->[$row],$self->run($data->[$row-1]));
		} else {
			$res=$data->[$row]->[0]-$self->run($data->[$row-1])->[0];
		}
		return $res;
	}
	
	# This sub will take an array ref of a data set, which it expects in this format:
	#   my @data_set = (	[ ...inputs... ], [ ...outputs ... ],
	#				   				   ... rows ...
	#				   );
	#
	# This wil sub returns the percentage of 'forgetfullness' when the net learns all the
	# data in the set in RANDOM order. Usage:
	#
	#	 learn_set_rand(\@data,[ options ]);
	#
	# Options are options in hash form. They can be of any form that $net->learn takes.
	#
	# It returns a true value.
	#
	sub learn_set_rand {
		my $self	=	shift if(substr($_[0],0,4) eq 'AI::'); 
		my $data	=	shift;
		my %args	=	@_;
		my $len		=	$#{$data}/2-1;
		my $inc		=	$args{inc};
		my $max		=	$args{max};
	    my $error	=	$args{error};
	    my @learned;
		while(1) {
			_GET_X:
			my $x=$self->intr(rand()*$len);
			goto _GET_X if($learned[$x]);
			$learned[$x]=1;
			print "\nLearning index $x...\n" if($AI::NeuralNet::BackProp::DEBUG); 
			my $str =  $self->learn($data->[$x*2],			# The list of data to input to the net
					  		  		$data->[$x*2+1], 		# The output desired
					    			inc=>$inc,				# The starting learning gradient
			 		    			max=>$max,				# The maximum num of loops allowed
					    			error=>$error);			# The maximum (%) error allowed
			print $str if($AI::NeuralNet::BackProp::DEBUG); 
		}
			
		
		return 1; 
	}

	# Returns the index of the element in array REF passed with the highest comparative value
	sub high {
		shift if(substr($_[0],0,4) eq 'AI::'); 
		my $ref1	=	shift;
		
		my ($el,$len,$tmp);
		foreach $el (@{$ref1}) {
			$len++;
		}
		$tmp=0;
		for my $x (0..$len-1) {
			$tmp = $x if((@{$ref1})[$x] > (@{$ref1})[$tmp]);
		}
		return $tmp;
	}
	
	# Returns the index of the element in array REF passed with the lowest comparative value
	sub low {
		shift if(substr($_[0],0,4) eq 'AI::'); 
		my $ref1	=	shift;
		
		my ($el,$len,$tmp);
		foreach $el (@{$ref1}) {
			$len++;
		}
		$tmp=0;
		for my $x (0..$len-1) {
			$tmp = $x if((@{$ref1})[$x] < (@{$ref1})[$tmp]);
		}
		return $tmp;
	}  
	
	# Returns a pcx object
	sub load_pcx {
		my $self	=	shift;
		return AI::NeuralNet::BackProp::PCX->new($self,shift);
	}	
	
	# Crunch a string of words into a map
	sub crunch {
		my $self	=	shift;
		my (@map,$ic);
		my @ws 		=	split(/[\s\t]/,shift);
		for my $a (0..$#ws) {
			$ic=$self->crunched($ws[$a]);
			if(!defined $ic) {
				$self->{_CRUNCHED}->{LIST}->[$self->{_CRUNCHED}->{_LENGTH}++]=$ws[$a];
				@map[$a]=$self->{_CRUNCHED}->{_LENGTH};
			} else {
				@map[$a]=$ic;
            }
		}
		return \@map;
	}
	
	# Finds if a word has been crunched.
	# Returns undef on failure, word index for success.
	sub crunched {
		my $self	=	shift;
		for my $a (0..$self->{_CRUNCHED}->{_LENGTH}-1) {
			return $a+1 if($self->{_CRUNCHED}->{LIST}->[$a] eq $_[0]);
		}
		return undef;
	}
	
	# Alias for crunched(), above
	sub word { crunched(@_) }
	
	# Uncrunches a map (array ref) into an array of words (not an array ref) and returns array
	sub uncrunch {
		my $self	=	shift;
		my $map = shift;
		my ($c,$el,$x);
		foreach $el (@{$map}) {
			$c .= $self->{_CRUNCHED}->{LIST}->[$el-1].' ';
		}
		return $c;
	}
	
	# Sets/gets randomness facter in the network. Setting a value of 0 disables random factors.
	sub random {
		my $self	=	shift;
		my $rand	=	shift;
		return $self->{random}	if(!(defined $rand));
		$self->{random}	=	$rand;
	}
	
	# Sets/gets column width for printing lists in debug modes 1,3, and 4.
	sub col_width {
		my $self	=	shift;
		my $width	=	shift;
		return $self->{col_width}	if(!$width);
		$self->{col_width}	=	$width;
	}
	
	# Sets/Removes value ranging
	sub range {
		my $self	=	shift;
		my $ref		=	shift;
		my $b		=	shift;
		if(substr($ref,0,5) ne "ARRAY") {
			if(($ref == 0) && (!defined $b)) {
				$ref	= $self->crunch($ref);
				#print "\$ref is a string, crunching to ",join(',',@{$ref}),"\n";
			} else {
    			my $a	= $ref;
    			$a		= $self->crunch($a)->[0] if($a == 0);
				$b		= $self->crunch($b)->[0] if($b == 0);
				$_[++$#_] = $a;
    			$_[++$#_] = $b;
    			$ref	= \@_;
				#print "Found ranged definition, joined to ",join(',',@{$ref}),"\n";
			}
		}
		my $rA		=	0;
		my $rB		=	$#{$ref};
		my $rS		=	0; #shift;
		if(!$rA && !$rB) {
			$self->{rA}=$self->{rB}=-1;
			return undef;
		}
		if($rB<$rA){my $t=$rA;$rA=$rB;$rB=$t};
		$self->{rA}=$rA;
		$self->{rB}=$rB;
		$self->{rS}=$rS if($rS);
		$self->{rRef} = $ref;
		return $ref;
	}
	
	# Used internally to scale outputs to fit range
	sub _range {
		my $self	=	shift;  
		my $in		=	shift;
		my $rA		=	$self->{rA};
		my $rB		=	$self->{rB};
		my $rS		=	$self->{rS};
		my $r		=	$rB;#-$rA+1;
		return $in if(!$rA && !$rB);
		my $l		=	$self->{OUT}-1;
		my $out 	=	[];
		# Adjust for a maximum outside what we have seen so far
		for my $i (0..$l) {
			$rS=$in->[$i] if($in->[$i]>$rS);
		}
		#print "\$l:$l,\$rA:$rA,\$rB:$rB,\$rS:$rS,\$r:$r\n";
		# Loop through, convert values to percentage of maximum, then multiply
		# percentage by range and add to base of range to get finaly value
		for my $i (0..$l) {
			#print "\$i:$i,\$in:$in->[$i]\n";
			$rS=1 if(!$rS);
			my $t=intr((($rS-$in->[$i])/$rS)*$r+$rA);
			#print "t:$t,$self->{rRef}->[$t],i:$i\n";
			$out->[$i] = $self->{rRef}->[$t];
		}
		$self->{rS}=$rS;
		return $out;
	}
			
		
	# Initialzes the base for a new neural network.
	# It is recomended that you call learn() before run()ing a pattern.
	# See documentation above for usage.
	sub new {
    	no strict;
    	my $type	=	shift;
		my $self	=	{};
		my $layers	=	shift;
		my $size	=	shift;
		my $out		=	shift || $size;
		my $flag	=	shift || 0;
		
		bless $self, $type;
		
		# If $layers is a string, then it will be nummerically equal to 0, so try to load it
		# as a network file.
		if($layers == 0) {  
		    # We use a "1" flag as the second argument to indicate that we want load()
		    # to call the new constructor to make a network the same size as in the file
		    # and return a refrence to the network, instead of just creating the network from
		    # pre-exisiting refrence
			return $self->load($layers,1);
		}
				
		
		#print "Creating $size neurons in each layer for $layers layer(s)...\n";
		
		AI::NeuralNet::BackProp::out2 "Creating $size neurons in each layer for $layers layer(s)...\n";
		
		# Error checking
		return undef if($out>$size);
		
		# When this is called, they tell us howmany layers and neurons in each layer.
		# But really what we store is a long line of neurons that are only divided in theory
		# when connecting the outputs and inputs.
		my $div = $size;
		my $size = $layers * $size;
		
		AI::NeuralNet::BackProp::out2 "Creating RUN and MAP systems for network...\n";
		#print "Creating RUN and MAP systems for network...\n";
		
		# Create a new runner and mapper for the network.
		$self->{RUN} = new AI::NeuralNet::BackProp::_run($self);
		$self->{MAP} = new AI::NeuralNet::BackProp::_map($self);
		
		$self->{SIZE}	=	$size;
		$self->{DIV}	=	$div;
		$self->{OUT}	=	$out;
		$self->{FLAG}	=	$flag;
		$self->{col_width}= 5;
		$self->{random} = 0.001;
		
		$self->initialize_group();
		
		return $self;
	}	

	# Save entire network state to disk.
	sub save {
		my $self	=	shift;
		my $file	=	shift;
		my $size	=	$self->{SIZE};
		my $div		=	$self->{DIV};
		my $out		=	$self->{OUT};
		my $flag	=	$self->{FLAG};

	    open(FILE,">$file");
	    
	    print FILE "size=$size\n";
	    print FILE "div=$div\n";
	    print FILE "out=$out\n";
	    print FILE "flag=$flag\n";
	    print FILE "rand=$self->{random}\n";
	    print FILE "cw=$self->{col_width}\n";
		print FILE "crunch=$self->{_CRUNCHED}->{_LENGTH}\n";
		print FILE "rA=$self->{rA}\n";
		print FILE "rB=$self->{rB}\n";
		print FILE "rS=$self->{rS}\n";
		print FILE "rRef=",(($self->{rRef})?join(',',@{$self->{rRef}}):''),"\n";
			
		for my $a (0..$self->{_CRUNCHED}->{_LENGTH}-1) {
			print FILE "c$a=$self->{_CRUNCHED}->{LIST}->[$a]\n";
		}
		
		my $w;
		for my $a (0..$self->{SIZE}-1) {
			$w="";
			for my $b (0..$self->{DIV}-1) {
				$w .= "$self->{NET}->[$a]->{SYNAPSES}->{LIST}->[$b]->{WEIGHT},";
			}
			chop($w);
			print FILE "n$a=$w\n";
		}
	
	    close(FILE);
	    
	    return $self;
	}
        
	# Load entire network state from disk.
	sub load {
		my $self	=	shift;
		my $file	=	shift;
		my $load_flag = shift || 0;
	    
	    return undef if(!(-f $file));
	    
	    open(FILE,"$file");
	    my @lines=<FILE>;
	    close(FILE);
	    
	    my %db;
	    for my $line (@lines) {
	    	chomp($line);
	    	my ($a,$b) = split /=/, $line;
	    	$db{$a}=$b;
	    }
	    
	    return undef if(!$db{"size"});
	    
	    if($load_flag) {
		    undef $self;
	
			# Create new network
		    $self = AI::NeuralNet::BackProp->new(intr($db{"size"}/$db{"div"}),
		    								     $db{"div"},
		    								     $db{"out"},
		    								     $db{"flag"});
		} else {
			$self->{DIV}	=	$db{"div"};
			$self->{SIZE}	=	$db{"size"};
			$self->{OUT}	=	$db{"out"};
			$self->{FLAG}	=	$db{"flag"};
		}
		
	    # Load variables
	    $self->{col_width}	= $db{"cw"};
	    $self->{random}		= $db{"rand"};
        $self->{rA}			= $db{"rA"};
		$self->{rB}			= $db{"rB"};
		$self->{rS}			= $db{"rS"};
		my @tmp				= split /\,/, $db{"rRef"};
		$self->{rRef}		= \@tmp;
		
	   	$self->{_CRUNCHED}->{_LENGTH}	=	$db{"crunch"};
		
		for my $a (0..$self->{_CRUNCHED}->{_LENGTH}-1) {
			$self->{_CRUNCHED}->{LIST}->[$a] = $db{"c$a"}; 
		}
		
		$self->initialize_group();
	    
		my ($w,@l);
		for my $a (0..$self->{SIZE}-1) {
			$w=$db{"n$a"};
			@l=split /\,/, $w;
			for my $b (0..$self->{DIV}-1) {
				$self->{NET}->[$a]->{SYNAPSES}->{LIST}->[$b]->{WEIGHT}=$l[$b];
			}
		}
	
	    return $self;
	}

	# Dumps the complete weight matrix of the network to STDIO
	sub show {
		my $self	=	shift;
		for my $a (0..$self->{SIZE}-1) {
			print "Neuron $a: ";
			for my $b (0..$self->{DIV}-1) {
				print $self->{NET}->[$a]->{SYNAPSES}->{LIST}->[$b]->{WEIGHT},"\t";
			}
			print "\n";
		}
	}
	
	# Used internally by new() and learn().
	# This is the sub block that actually creats
	# the connections between the synapse chains and
	# also connects the run packages and the map packages
	# to the appropiate ends of the neuron grids.
	sub initialize_group() {
		my $self	=	shift;
		my $size	=	$self->{SIZE};
		my $div		=	$self->{DIV};
		my $out		=	$self->{OUT};
		my $flag	=	$self->{FLAG};
		my $x		=	0; 
		my $y		=	0;
		
		# Reset map and run synapse counters.
		$self->{RUN}->{REGISTRATION} = $self->{MAP}->{REGISTRATION} = 0;
		
		AI::NeuralNet::BackProp::out2 "There will be $size neurons in this network group, with a divison value of $div.\n";
		#print "There will be $size neurons in this network group, with a divison value of $div.\n";
		
		# Create initial neuron packages in one long array for the entire group
		for($y=0; $y<$size; $y++) {
			#print "Initalizing neuron $y...     \r";
			$self->{NET}->[$y]=new AI::NeuralNet::BackProp::neuron();
		}
		
		AI::NeuralNet::BackProp::out2 "Creating synapse grid...\n";
		
		my $z  = 0;    
		my $aa = 0;
		my ($n0,$n1,$n2);
		
		# Outer loop loops over every neuron in group, incrementing by the number
		# of neurons supposed to be in each layer
		
		for($y=0; $y<$size; $y+=$div) {
			if($y+$div>=$size) {
				last;
			}
			
			# Inner loop connects every neuron in this 'layer' to one input of every neuron in
			# the next 'layer'. Remeber, layers only exist in terms of where the connections
			# are divided. For example, if a person requested 2 layers and 3 neurons per layer,
			# then there would be 6 neurons in the {NET}->[] list, and $div would be set to
			# 3. So we would loop over and every 3 neurons we would connect each of those 3 
			# neurons to one input of every neuron in the next set of 3 neurons. Of course, this
			# is an example. 3 and 2 are set by the new() constructor.
			
			# Flag values:
			# 0 - (default) - 
			# 	My feed-foward style: Each neuron in layer X is connected to one input of every
			#	neuron in layer Y. The best and most proven flag style.
			#
			#   ^   ^   ^               
			#	O\  O\ /O       Layer Y
			#   ^\\/^/\/^
			#	| //|\/\|
			#   |/ \|/ \|		
			#	O   O   O       Layer X
			#   ^   ^   ^
			#
			# 1	-
			#	In addition to flag 0, each neuron in layer X is connected to every input of 
			#	the neurons ahead of itself in layer X.
			# 2 - ("L-U Style") - 
			#	No, its not "Learning-Unit" style. It gets its name from this: In a 2 layer, 3

BackProp.pm  view on Meta::CPAN

			#   ^   ^   ^
			#	|	|   |
			#   |   |   |
			#	O-->O-->O
			#   ^   ^   ^
			#	|	|	|
			#
			#	As you can see, each neuron is connected to the next one in its layer, as well
			#	as the neuron directly above itself.
			
			for ($z=0; $z<$div; $z++) {
				if((!$flag) || ($flag == 1)) {
					for ($aa=0; $aa<$div; $aa++) {      
						$self->{NET}->[$y+$z]->connect($self->{NET}->[$y+$div+$aa]);
					}
				}
				if($flag == 1) {
					for ($aa=$z+1; $aa<$div; $aa++) {      
						$self->{NET}->[$y+$z]->connect($self->{NET}->[$y+$aa]);
					}
				}
				if($flag == 2) {
					$self->{NET}->[$y+$z]->connect($self->{NET}->[$y+$div+$z]);
					$self->{NET}->[$y+$z]->connect($self->{NET}->[$y+$z+1]) if($z<$div-1);
				}
				AI::NeuralNet::BackProp::out1 "\n";
			}
			AI::NeuralNet::BackProp::out1 "\n";             
		}
		
		# These next two loops connect the _run and _map packages (the IO interface) to 
		# the start and end 'layers', respectively. These are how we insert data into
		# the network and how we get data from the network. The _run and _map packages 
		# are connected to the neurons so that the neurons think that the IO packages are
		# just another neuron, sending data on. But the IO packs. are special packages designed
		# with the same methods as neurons, just meant for specific IO purposes. You will
		# never need to call any of the IO packs. directly. Instead, they are called whenever
		# you use the run(), map(), or learn() methods of your network.
        
    	AI::NeuralNet::BackProp::out2 "\nMapping I (_run package) connections to network...\n";
		
	    for($y=0; $y<$div; $y++) {
			$self->{_tmp_synapse} = $y;
			$self->{NET}->[$y]->register_synapse($self->{RUN});
			#$self->{NET}->[$y]->connect($self->{RUN});
		}
		
		AI::NeuralNet::BackProp::out2 "Mapping O (_map package) connections to network...\n\n";
		
		for($y=$size-$div; $y<$size; $y++) {
			$self->{_tmp_synapse} = $y;
			$self->{NET}->[$y]->connect($self->{MAP});
		}
		
		# And the group is done! 
	}
	

	# When called with an array refrence to a pattern, returns a refrence
	# to an array associated with that pattern. See usage in documentation.
	sub run {
		my $self	 =	  shift;
		my $map		 =	  shift;
		my $t0 		 =	new Benchmark;
        $self->{RUN}->run($map);
		$self->{LAST_TIME}=timestr(timediff(new Benchmark, $t0));
        return $self->map();
	}
    
    # This automatically uncrunches a response after running it
	sub run_uc {
    	$_[0]->uncrunch(run(@_));
    }

	# Returns benchmark and loop's ran or learned
	# for last run(), or learn()
	# operation preformed.
	#
	sub benchmarked {
		my $self	=	shift;
		return $self->{LAST_TIME};
	}
	    
	# Used to retrieve map from last internal run operation.
	sub map {
		my $self	 =	  shift;
		$self->{MAP}->map();
	}
	
	# Forces network to learn pattern passed and give desired
	# results. See usage in POD.
	sub learn {
		my $self	=	shift;
		my $omap	=	shift;
		my $res		=	shift;
		my %args    =   @_;
		my $inc 	=	$args{inc} || 0.20;
		my $max     =   $args{max} || 1024;
		my $_mx		=	intr($max/10);
		my $_mi		=	0;
		my $error   = 	($args{error}>-1 && defined $args{error}) ? $args{error} : -1;
  		my $div		=	$self->{DIV};
		my $size	=	$self->{SIZE};
		my $out		=	$self->{OUT};
		my $divide  =	AI::NeuralNet::BackProp->intr($div/$out);
		my ($a,$b,$y,$flag,$map,$loop,$diff,$pattern,$value);
		my ($t0,$it0);
		no strict 'refs';
		
		# Take care of crunching strings passed
		$omap = $self->crunch($omap) if($omap == 0);
		$res  = $self->crunch($res)  if($res  == 0);
		
		# Fill in empty spaces at end of results matrix with a 0
		if($#{$res}<$out) {
			for my $x ($#{$res}+1..$out) {
				#$res->[$x] = 0;
			}
		}
		
		# Debug
		AI::NeuralNet::BackProp::out1 "Num output neurons: $out, Input neurons: $size, Division: $divide\n";
		
		# Start benchmark timer and initalize a few variables
		$t0 	=	new Benchmark;
        $flag 	=	0; 
		$loop	=	0;   
		my $ldiff	=	0;
		my $dinc	=	0.0001;
		my $cdiff	=	0;
		$diff		=	100;
		$error 		= 	($error>-1)?$error:-1;
		
		# $flag only goes high when all neurons in output map compare exactly with
		# desired result map or $max loops is reached
		#	
		while(!$flag && ($max ? $loop<$max : 1)) {
			$it0 	=	new Benchmark;
			
			# Run the map
			$self->{RUN}->run($omap);
			
			# Retrieve last mapping  and initialize a few variables.
			$map	=	$self->map();
			$y		=	$size-$div;
			$flag	=	1;
			
			# Compare the result map we just ran with the desired result map.
			$diff 	=	pdiff($map,$res);
			
			# This adjusts the increment multiplier to decrease as the loops increase
			if($_mi > $_mx) {

BackProp.pm  view on Meta::CPAN

				last;
			}
			
			# Debugging
			AI::NeuralNet::BackProp::out4 "Difference: $diff\%\t Increment: $inc\tMax Error: $error\%\n";
			AI::NeuralNet::BackProp::out1 "\n\nMapping results from $map:\n";
			
			# This loop compares each element of the output map with the desired result map.
			# If they don't match exactly, we call weight() on the offending output neuron 
			# and tell it what it should be aiming for, and then the offending neuron will
			# try to adjust the weights of its synapses to get closer to the desired output.
			# See comments in the weight() method of AI::NeuralNet::BackProp for how this works.
			my $l=$self->{NET};
			for my $i (0..$out-1) {
				$a = $map->[$i];
				$b = $res->[$i];
				
				AI::NeuralNet::BackProp::out1 "\nmap[$i] is $a\n";
				AI::NeuralNet::BackProp::out1 "res[$i] is $b\n";
					
				for my $j (0..$divide-1) {
					if($a!=$b) {
						AI::NeuralNet::BackProp::out1 "Punishing $self->{NET}->[($i*$divide)+$j] at ",(($i*$divide)+$j)," ($i with $a) by $inc.\n";
						$l->[$y+($i*$divide)+$j]->weight($inc,$b) if($l->[$y+($i*$divide)+$j]);
						$flag	=	0;
					}
				}
			}
			
			# This counter is just used in the benchmarking operations.
			$loop++;
			
			AI::NeuralNet::BackProp::out1 "\n\n";
			
			# Benchmark this loop.
			AI::NeuralNet::BackProp::out4 "Learning itetration $loop complete, timed at".timestr(timediff(new Benchmark, $it0),'noc','5.3f')."\n";
		
			# Map the results from this loop.
			AI::NeuralNet::BackProp::out4 "Map: \n";
			AI::NeuralNet::BackProp::join_cols($map,$self->{col_width}) if ($AI::NeuralNet::BackProp::DEBUG);
			AI::NeuralNet::BackProp::out4 "Res: \n";
			AI::NeuralNet::BackProp::join_cols($res,$self->{col_width}) if ($AI::NeuralNet::BackProp::DEBUG);
		}
		
		# Compile benchmarking info for entire learn() process and return it, save it, and
		# display it.
		$self->{LAST_TIME}="$loop loops and ".timestr(timediff(new Benchmark, $t0));
        my $str = "Learning took $loop loops and ".timestr(timediff(new Benchmark, $t0),'noc','5.3f');
        AI::NeuralNet::BackProp::out2 $str;
		return $str;
	}		
		
1;

# Internal input class. Not to be used directly.
package AI::NeuralNet::BackProp::_run;
	
	use strict;
	
	# Dummy constructor.
	sub new {
		bless { PARENT => $_[1] }, $_[0]
	}
	
	# This is so we comply with the neuron interface.
	sub weight {}
	sub input  {}
	
	# Again, compliance with neuron interface.
	sub register_synapse {
		my $self	=	shift;		
		my $sid		=	$self->{REGISTRATION} || 0;
		$self->{REGISTRATION}	=	++$sid;
		$self->{RMAP}->{$sid-1}	= 	$self->{PARENT}->{_tmp_synapse};
		return $sid-1;
	}
	
	# Here is the real meat of this package.
	# run() does one thing: It fires values
	# into the first layer of the network.
	sub run {
		my $self	=	shift;
		my $map		=	shift;
		my $x		=	0;
		$map = $self->{PARENT}->crunch($map) if($map == 0);
		return undef if(substr($map,0,5) ne "ARRAY");
		foreach my $el (@{$map}) {
			# Catch ourself if we try to run more inputs than neurons
			return $x if($x>$self->{PARENT}->{DIV}-1);
			
			# Here we add a small ammount of randomness to the network.
			# This is to keep the network from getting stuck on a 0 value internally.
			$self->{PARENT}->{NET}->[$x]->input(0,$el+(rand()*$self->{ramdom}));
			$x++;
		};
		# Incase we tried to run less inputs than neurons, run const 1 in extra neurons
		if($x<$self->{PARENT}->{DIV}) {
			for my $y ($x..$self->{PARENT}->{DIV}-1) {
				$self->{PARENT}->{NET}->[$y]->input(0,1);
			}
		}
		return $x;
	}
	
	
1;

# Internal output class. Not to be used directly.
package AI::NeuralNet::BackProp::_map;
	
	use strict;
	
	# Dummy constructor.
	sub new {
		bless { PARENT => $_[1] }, $_[0]
	}
	
	# Compliance with neuron interface
	sub weight {}
	
	# Compliance with neuron interface
	sub register_synapse {
		my $self	=	shift;		
		my $sid		=	$self->{REGISTRATION} || 0;
		$self->{REGISTRATION}	=	++$sid;
		$self->{RMAP}->{$sid-1} = 	$self->{PARENT}->{_tmp_synapse};
		return $sid-1;
	}
	
	# This acts just like a regular neuron by receiving
	# values from input synapes. Yet, unlike a regularr
	# neuron, it doesnt weight the values, just stores
	# them to be retrieved by a call to map().
	sub input  {
		no strict 'refs';             
		my $self	=	shift;
		my $sid		=	shift;
		my $value	=	shift;
		my $size	=	$self->{PARENT}->{DIV};
		my $flag	=	1;
		$self->{OUTPUT}->[$sid]->{VALUE}	=	$self->{PARENT}->intr($value);
		$self->{OUTPUT}->[$sid]->{FIRED}	=	1;
		
		AI::NeuralNet::BackProp::out1 "Received value $self->{OUTPUT}->[$sid]->{VALUE} and sid $sid, self $self.\n";
	}
	
	# Here we simply collect the value of every neuron connected to this
	# one from the layer below us and return an array ref to the final map..
	sub map {
		my $self	=	shift;
		my $size	=	$self->{PARENT}->{DIV};
		my $out		=	$self->{PARENT}->{OUT};
		my $divide  =	AI::NeuralNet::BackProp->intr($size/$out);
		my @map = ();
		my $value;
		AI::NeuralNet::BackProp::out1 "Num output neurons: $out, Input neurons: $size, Division: $divide\n";
		for(0..$out-1) {
			$value=0;
			for my $a (0..$divide-1) {
				$value += $self->{OUTPUT}->[($_*$divide)+$a]->{VALUE};
				AI::NeuralNet::BackProp::out1 "\$a is $a, index is ".(($_*$divide)+$a).", value is $self->{OUTPUT}->[($_*$divide)+$a]->{VALUE}\n";
			}
			$map[$_]	=	AI::NeuralNet::BackProp->intr($value/$divide);
			AI::NeuralNet::BackProp::out1 "Map position $_ is $map[$_] in @{[\@map]} with self set to $self.\n";
			$self->{OUTPUT}->[$_]->{FIRED}	=	0;
		}
		my $ret=\@map;
		return $self->{PARENT}->_range($ret);
	}
1;
			      
# load_pcx() wrapper package
package AI::NeuralNet::BackProp::PCX;

	# Called by load_pcx in AI::NeuralNet::BackProp;
	sub new {
		my $type	=	shift;
		my $self	=	{ 
			parent  => $_[0],
			file    => $_[1]
		};
		my (@a,@b)=load_pcx($_[1]);
		$self->{image}=\@a;
		$self->{palette}=\@b;
		bless \%{$self}, $type;
	}

	# Returns a rectangular block defined by an array ref in the form of
	# 		[$x1,$y1,$x2,$y2]
	# Return value is an array ref
	sub get_block {
		my $self	=	shift;
		my $ref		=	shift;
		my ($x1,$y1,$x2,$y2)	=	@{$ref};
		my @block	=	();
		my $count	=	0;
		for my $x ($x1..$x2-1) {
			for my $y ($y1..$y2-1) {
				$block[$count++]	=	$self->get($x,$y);
			}
		}
		return \@block;
	}
			
	# Returns pixel at $x,$y
	sub get {
		my $self	=	shift;
		my ($x,$y)  =	(shift,shift);
		return $self->{image}->[$y*320+$x];
	}
	
	# Returns array of (r,g,b) value from palette index passed
	sub rgb {
		my $self	=	shift;
		my $color	=	shift;
		return ($self->{palette}->[$color]->{red},$self->{palette}->[$color]->{green},$self->{palette}->[$color]->{blue});
	}
		
	# Returns mean of (rgb) value of palette index passed
	sub avg {
		my $self	=	shift;
		my $color	=	shift;
		return $self->{parent}->intr(($self->{palette}->[$color]->{red}+$self->{palette}->[$color]->{green}+$self->{palette}->[$color]->{blue})/3);
	}
	
	# Loads and decompresses a PCX-format 320x200, 8-bit image file and returns 
	# two arrays, first is a 64000-byte long array, each element contains a palette
	# index, and the second array is a 255-byte long array, each element is a hash
	# ref with the keys 'red', 'green', and 'blue', each key contains the respective color
	# component for that color index in the palette.
	sub load_pcx {
		shift if(substr($_[0],0,4) eq 'AI::'); 
		
		# open the file
		open(FILE, "$_[0]");
		binmode(FILE);
		
		my $tmp;
		my @image;
		my @palette;
		my $data;
		
		# Read header
		read(FILE,$tmp,128);
		
		# load the data and decompress into buffer
		my $count=0;
		
		while($count<320*200) {
		     # get the first piece of data
		     read(FILE,$data,1);
	         $data=ord($data);
	         
		     # is this a rle?
		     if ($data>=192 && $data<=255) {
		        # how many bytes in run?
		        my $num_bytes = $data-192;
		
		        # get the actual $data for the run
		        read(FILE, $data, 1);
				$data=ord($data);
		        # replicate $data in buffer num_bytes times
		        while($num_bytes-->0) {
	            	$image[$count++] = $data;
		        } # end while
		     } else {
		        # actual $data, just copy it into buffer at next location
		        $image[$count++] = $data;
		     } # end else not rle
		}
		
		# move to end of file then back up 768 bytes i.e. to begining of palette
		seek(FILE,-768,2);
		
		# load the pallete into the palette
		for my $index (0..255) {
		    # get the red component
		    read(FILE,$tmp,1);
		    $palette[$index]->{red}   = ($tmp>>2);
		
		    # get the green component
		    read(FILE,$tmp,1);
			$palette[$index]->{green} = ($tmp>>2);
		
		    # get the blue component
		    read(FILE,$tmp,1);
			$palette[$index]->{blue}  = ($tmp>>2);
		
		}
		
		close(FILE);



( run in 0.346 second using v1.01-cache-2.11-cpan-1dc43b0fbd2 )