AI-NeuralNet-BackProp

 view release on metacpan or  search on metacpan

BackProp.pm  view on Meta::CPAN

			# If it is equal, then don't adjust
			#
			### Disabled because this soemtimes causes 
			### infinte loops when learning with range limits enabled
			#
			#next if($value eq $what);
			
			# Adjust increment by the weight of the synapse of 
			# this neuron & apply direction delta
			my $delta = 
					$ammount * 
						($value<$what?1:-1) * 
							$self->{SYNAPSES}->{LIST}->[$i]->{WEIGHT};
			
			#print "($value,$what) delta:$delta\n";
			
			# Recursivly apply
			$self->{SYNAPSES}->{LIST}->[$i]->{WEIGHT}  +=  $delta;
			$self->{SYNAPSES}->{LIST}->[$i]->{PKG}->weight($ammount,$what);
			    
		}
	}
	
	# Registers some neuron as a synapse of this neuron.           
	# This is called exclusively by connect(), except for
	# in initalize_group() to connect the _map() package.
	sub register_synapse {
		my $self	=	shift;
		my $synapse	=	shift;
		my $sid		=	$self->{SYNAPSES}->{SIZE} || 0;
		$self->{SYNAPSES}->{LIST}->[$sid]->{PKG}		=	$synapse;
		$self->{SYNAPSES}->{LIST}->[$sid]->{WEIGHT}		=	1.00		if(!$self->{SYNAPSES}->{LIST}->[$sid]->{WEIGHT});
		$self->{SYNAPSES}->{LIST}->[$sid]->{FIRED}		=	0;       
		AI::NeuralNet::BackProp::out1("$self: Registering sid $sid with weight $self->{SYNAPSES}->{LIST}->[$sid]->{WEIGHT}, package $self->{SYNAPSES}->{LIST}->[$sid]->{PKG}.\n");
		$self->{SYNAPSES}->{SIZE} = ++$sid;
		return ($sid-1);
	}
	
	# Called via AI::NeuralNet::BackProp::NeuralNetwork::initialize_group() to 
	# form the neuron grids.
	# This just registers another synapes as a synapse to output to from this one, and
	# then we ask that synapse to let us register as an input connection and we
	# save the sid that the ouput synapse returns.
	sub connect {
		my $self	=	shift;
		my $to		=	shift;
		my $oid		=	$self->{OUTPUTS}->{SIZE} || 0;
		AI::NeuralNet::BackProp::out1("Connecting $self to $to at $oid...\n");
		$self->{OUTPUTS}->{LIST}->[$oid]->{PKG}	=	$to;
 		$self->{OUTPUTS}->{LIST}->[$oid]->{ID}	=	$to->register_synapse($self);
		$self->{OUTPUTS}->{SIZE} = ++$oid;
		return $self->{OUTPUTS}->{LIST}->[$oid]->{ID};
	}
1;
			 
package AI::NeuralNet::BackProp;
	
	use Benchmark;          
	use strict;
	
	# Returns the number of elements in an array ref, undef on error
	sub _FETCHSIZE {
		my $a=$_[0];
		my ($b,$x);
		return undef if(substr($a,0,5) ne "ARRAY");
		foreach $b (@{$a}) { $x++ };
		return $x;
	}

	# Debugging subs
	$AI::NeuralNet::BackProp::DEBUG  = 0;
	sub whowasi { (caller(1))[3] . '()' }
	sub debug { shift; $AI::NeuralNet::BackProp::DEBUG = shift || 0; } 
	sub out1  { print  shift() if ($AI::NeuralNet::BackProp::DEBUG eq 1) }
	sub out2  { print  shift() if (($AI::NeuralNet::BackProp::DEBUG eq 1) || ($AI::NeuralNet::BackProp::DEBUG eq 2)) }
	sub out3  { print  shift() if ($AI::NeuralNet::BackProp::DEBUG) }
	sub out4  { print  shift() if ($AI::NeuralNet::BackProp::DEBUG eq 4) }
	
	# Rounds a floating-point to an integer with int() and sprintf()
	sub intr  {
    	shift if(substr($_[0],0,4) eq 'AI::');
      	try   { return int(sprintf("%.0f",shift)) }
      	catch { return 0 }
	}
    
	# Used to format array ref into columns
	# Usage: 
	#	join_cols(\@array,$row_length_in_elements,$high_state_character,$low_state_character);
	# Can also be called as method of your neural net.
	# If $high_state_character is null, prints actual numerical values of each element.
	sub join_cols {
		no strict 'refs';
		shift if(substr($_[0],0,4) eq 'AI::'); 
		my $map		=	shift;
		my $break   =	shift;
		my $a		=	shift;
		my $b		=	shift;
		my $x;
		foreach my $el (@{$map}) { 
			my $str = ((int($el))?$a:$b);
			$str=$el."\0" if(!$a);
			print $str;
			$x++;
			if($x>$break-1) {
				print "\n";
				$x=0;
			}
		}
		print "\n";
	}
	
	# Returns percentage difference between all elements of two
	# array refs of exact same length (in elements).
	# Now calculates actual difference in numerical value.
	sub pdiff {
		no strict 'refs';
		shift if(substr($_[0],0,4) eq 'AI::'); 
		my $a1	=	shift;
		my $a2	=	shift;
		my $a1s	=	$#{$a1}; #AI::NeuralNet::BackProp::_FETCHSIZE($a1);
		my $a2s	=	$#{$a2}; #AI::NeuralNet::BackProp::_FETCHSIZE($a2);
		my ($a,$b,$diff,$t);
		$diff=0;
		#return undef if($a1s ne $a2s);	# must be same length
		for my $x (0..$a1s) {
			$a = $a1->[$x];
			$b = $a2->[$x];
			if($a!=$b) {
				if($a<$b){$t=$a;$a=$b;$b=$t;}
				$a=1 if(!$a);
				$diff+=(($a-$b)/$a)*100;
			}
		}
		$a1s = 1 if(!$a1s);
		return sprintf("%.10f",($diff/$a1s));
	}
	
	# Returns $fa as a percentage of $fb
	sub p {
		shift if(substr($_[0],0,4) eq 'AI::'); 
		my ($fa,$fb)=(shift,shift);
		sprintf("%.3f",((($fb-$fa)*((($fb-$fa)<0)?-1:1))/$fa)*100);
	}
	
	# This sub will take an array ref of a data set, which it expects in this format:
	#   my @data_set = (	[ ...inputs... ], [ ...outputs ... ],
	#				   				   ... rows ...
	#				   );
	#
	# This wil sub returns the percentage of 'forgetfullness' when the net learns all the
	# data in the set in order. Usage:
	#
	#	 learn_set(\@data,[ options ]);
	#
	# Options are options in hash form. They can be of any form that $net->learn takes.
	#
	# It returns a percentage string.
	#
	sub learn_set {
		my $self	=	shift if(substr($_[0],0,4) eq 'AI::'); 
		my $data	=	shift;
		my %args	=	@_;
		my $len		=	$#{$data}/2-1;
		my $inc		=	$args{inc};
		my $max		=	$args{max};
	    my $error	=	$args{error};
	    my $p		=	(defined $args{flag})	?$args{flag}	   :1;
	    my $row		=	(defined $args{pattern})?$args{pattern}*2+1:1;
	    my ($fa,$fb);
		for my $x (0..$len) {
			print "\nLearning index $x...\n" if($AI::NeuralNet::BackProp::DEBUG);
			my $str = $self->learn( $data->[$x*2],			# The list of data to input to the net
					  		  		$data->[$x*2+1], 		# The output desired
					    			inc=>$inc,				# The starting learning gradient
					    			max=>$max,				# The maximum num of loops allowed
					    			error=>$error);			# The maximum (%) error allowed
			print $str if($AI::NeuralNet::BackProp::DEBUG); 
		}
			
		
		my $res;
		$data->[$row] = $self->crunch($data->[$row]) if($data->[$row] == 0);
		
		if ($p) {
			$res=pdiff($data->[$row],$self->run($data->[$row-1]));
		} else {
			$res=$data->[$row]->[0]-$self->run($data->[$row-1])->[0];
		}
		return $res;
	}
	
	# This sub will take an array ref of a data set, which it expects in this format:
	#   my @data_set = (	[ ...inputs... ], [ ...outputs ... ],
	#				   				   ... rows ...
	#				   );
	#
	# This wil sub returns the percentage of 'forgetfullness' when the net learns all the
	# data in the set in RANDOM order. Usage:
	#
	#	 learn_set_rand(\@data,[ options ]);
	#
	# Options are options in hash form. They can be of any form that $net->learn takes.
	#
	# It returns a true value.
	#
	sub learn_set_rand {
		my $self	=	shift if(substr($_[0],0,4) eq 'AI::'); 
		my $data	=	shift;
		my %args	=	@_;
		my $len		=	$#{$data}/2-1;
		my $inc		=	$args{inc};
		my $max		=	$args{max};
	    my $error	=	$args{error};
	    my @learned;
		while(1) {
			_GET_X:
			my $x=$self->intr(rand()*$len);
			goto _GET_X if($learned[$x]);
			$learned[$x]=1;
			print "\nLearning index $x...\n" if($AI::NeuralNet::BackProp::DEBUG); 
			my $str =  $self->learn($data->[$x*2],			# The list of data to input to the net
					  		  		$data->[$x*2+1], 		# The output desired
					    			inc=>$inc,				# The starting learning gradient
			 		    			max=>$max,				# The maximum num of loops allowed
					    			error=>$error);			# The maximum (%) error allowed
			print $str if($AI::NeuralNet::BackProp::DEBUG); 
		}
			
		
		return 1; 
	}

	# Returns the index of the element in array REF passed with the highest comparative value
	sub high {
		shift if(substr($_[0],0,4) eq 'AI::'); 
		my $ref1	=	shift;
		
		my ($el,$len,$tmp);
		foreach $el (@{$ref1}) {
			$len++;
		}
		$tmp=0;
		for my $x (0..$len-1) {
			$tmp = $x if((@{$ref1})[$x] > (@{$ref1})[$tmp]);
		}
		return $tmp;
	}
	
	# Returns the index of the element in array REF passed with the lowest comparative value
	sub low {
		shift if(substr($_[0],0,4) eq 'AI::'); 
		my $ref1	=	shift;
		
		my ($el,$len,$tmp);
		foreach $el (@{$ref1}) {
			$len++;
		}
		$tmp=0;
		for my $x (0..$len-1) {
			$tmp = $x if((@{$ref1})[$x] < (@{$ref1})[$tmp]);
		}
		return $tmp;
	}  
	
	# Returns a pcx object
	sub load_pcx {
		my $self	=	shift;
		return AI::NeuralNet::BackProp::PCX->new($self,shift);
	}	
	
	# Crunch a string of words into a map
	sub crunch {
		my $self	=	shift;
		my (@map,$ic);
		my @ws 		=	split(/[\s\t]/,shift);
		for my $a (0..$#ws) {
			$ic=$self->crunched($ws[$a]);
			if(!defined $ic) {
				$self->{_CRUNCHED}->{LIST}->[$self->{_CRUNCHED}->{_LENGTH}++]=$ws[$a];
				@map[$a]=$self->{_CRUNCHED}->{_LENGTH};
			} else {
				@map[$a]=$ic;
            }
		}
		return \@map;

BackProp.pm  view on Meta::CPAN

		
	    for($y=0; $y<$div; $y++) {
			$self->{_tmp_synapse} = $y;
			$self->{NET}->[$y]->register_synapse($self->{RUN});
			#$self->{NET}->[$y]->connect($self->{RUN});
		}
		
		AI::NeuralNet::BackProp::out2 "Mapping O (_map package) connections to network...\n\n";
		
		for($y=$size-$div; $y<$size; $y++) {
			$self->{_tmp_synapse} = $y;
			$self->{NET}->[$y]->connect($self->{MAP});
		}
		
		# And the group is done! 
	}
	

	# When called with an array refrence to a pattern, returns a refrence
	# to an array associated with that pattern. See usage in documentation.
	sub run {
		my $self	 =	  shift;
		my $map		 =	  shift;
		my $t0 		 =	new Benchmark;
        $self->{RUN}->run($map);
		$self->{LAST_TIME}=timestr(timediff(new Benchmark, $t0));
        return $self->map();
	}
    
    # This automatically uncrunches a response after running it
	sub run_uc {
    	$_[0]->uncrunch(run(@_));
    }

	# Returns benchmark and loop's ran or learned
	# for last run(), or learn()
	# operation preformed.
	#
	sub benchmarked {
		my $self	=	shift;
		return $self->{LAST_TIME};
	}
	    
	# Used to retrieve map from last internal run operation.
	sub map {
		my $self	 =	  shift;
		$self->{MAP}->map();
	}
	
	# Forces network to learn pattern passed and give desired
	# results. See usage in POD.
	sub learn {
		my $self	=	shift;
		my $omap	=	shift;
		my $res		=	shift;
		my %args    =   @_;
		my $inc 	=	$args{inc} || 0.20;
		my $max     =   $args{max} || 1024;
		my $_mx		=	intr($max/10);
		my $_mi		=	0;
		my $error   = 	($args{error}>-1 && defined $args{error}) ? $args{error} : -1;
  		my $div		=	$self->{DIV};
		my $size	=	$self->{SIZE};
		my $out		=	$self->{OUT};
		my $divide  =	AI::NeuralNet::BackProp->intr($div/$out);
		my ($a,$b,$y,$flag,$map,$loop,$diff,$pattern,$value);
		my ($t0,$it0);
		no strict 'refs';
		
		# Take care of crunching strings passed
		$omap = $self->crunch($omap) if($omap == 0);
		$res  = $self->crunch($res)  if($res  == 0);
		
		# Fill in empty spaces at end of results matrix with a 0
		if($#{$res}<$out) {
			for my $x ($#{$res}+1..$out) {
				#$res->[$x] = 0;
			}
		}
		
		# Debug
		AI::NeuralNet::BackProp::out1 "Num output neurons: $out, Input neurons: $size, Division: $divide\n";
		
		# Start benchmark timer and initalize a few variables
		$t0 	=	new Benchmark;
        $flag 	=	0; 
		$loop	=	0;   
		my $ldiff	=	0;
		my $dinc	=	0.0001;
		my $cdiff	=	0;
		$diff		=	100;
		$error 		= 	($error>-1)?$error:-1;
		
		# $flag only goes high when all neurons in output map compare exactly with
		# desired result map or $max loops is reached
		#	
		while(!$flag && ($max ? $loop<$max : 1)) {
			$it0 	=	new Benchmark;
			
			# Run the map
			$self->{RUN}->run($omap);
			
			# Retrieve last mapping  and initialize a few variables.
			$map	=	$self->map();
			$y		=	$size-$div;
			$flag	=	1;
			
			# Compare the result map we just ran with the desired result map.
			$diff 	=	pdiff($map,$res);
			
			# This adjusts the increment multiplier to decrease as the loops increase
			if($_mi > $_mx) {
				$dinc *= 0.1;
				$_mi   = 0;
			} 
			
			$_mi++;
				
			
			# We de-increment the loop ammount to prevent infinite learning loops.
			# In old versions of this module, if you used too high of an initial input
			# $inc, then the network would keep jumping back and forth over your desired 
			# results because the increment was too high...it would try to push close to
			# the desired result, only to fly over the other edge too far, therby trying
			# to come back, over shooting again. 
			# This simply adjusts the learning gradient proportionally to the ammount of
			# convergance left as the difference decreases.
           	$inc   -= ($dinc*$diff);
			$inc   = 0.0000000001 if($inc < 0.0000000001);
			
			# This prevents it from seeming to get stuck in one location
			# by attempting to boost the values out of the hole they seem to be in.
			if($diff eq $ldiff) {
				$cdiff++;
				$inc += ($dinc*$diff)+($dinc*$cdiff*10);
			} else {
				$cdiff=0;
			}
			
			# Save last $diff
			$ldiff = $diff;
			
			# This catches a max error argument and handles it
			if(!($error>-1 ? $diff>$error : 1)) {
				$flag=1;
				last;
			}
			
			# Debugging
			AI::NeuralNet::BackProp::out4 "Difference: $diff\%\t Increment: $inc\tMax Error: $error\%\n";
			AI::NeuralNet::BackProp::out1 "\n\nMapping results from $map:\n";
			
			# This loop compares each element of the output map with the desired result map.
			# If they don't match exactly, we call weight() on the offending output neuron 
			# and tell it what it should be aiming for, and then the offending neuron will
			# try to adjust the weights of its synapses to get closer to the desired output.
			# See comments in the weight() method of AI::NeuralNet::BackProp for how this works.
			my $l=$self->{NET};
			for my $i (0..$out-1) {
				$a = $map->[$i];
				$b = $res->[$i];
				
				AI::NeuralNet::BackProp::out1 "\nmap[$i] is $a\n";
				AI::NeuralNet::BackProp::out1 "res[$i] is $b\n";
					
				for my $j (0..$divide-1) {
					if($a!=$b) {
						AI::NeuralNet::BackProp::out1 "Punishing $self->{NET}->[($i*$divide)+$j] at ",(($i*$divide)+$j)," ($i with $a) by $inc.\n";
						$l->[$y+($i*$divide)+$j]->weight($inc,$b) if($l->[$y+($i*$divide)+$j]);
						$flag	=	0;
					}
				}
			}
			
			# This counter is just used in the benchmarking operations.
			$loop++;
			
			AI::NeuralNet::BackProp::out1 "\n\n";
			
			# Benchmark this loop.
			AI::NeuralNet::BackProp::out4 "Learning itetration $loop complete, timed at".timestr(timediff(new Benchmark, $it0),'noc','5.3f')."\n";
		
			# Map the results from this loop.
			AI::NeuralNet::BackProp::out4 "Map: \n";
			AI::NeuralNet::BackProp::join_cols($map,$self->{col_width}) if ($AI::NeuralNet::BackProp::DEBUG);
			AI::NeuralNet::BackProp::out4 "Res: \n";
			AI::NeuralNet::BackProp::join_cols($res,$self->{col_width}) if ($AI::NeuralNet::BackProp::DEBUG);
		}
		
		# Compile benchmarking info for entire learn() process and return it, save it, and
		# display it.
		$self->{LAST_TIME}="$loop loops and ".timestr(timediff(new Benchmark, $t0));
        my $str = "Learning took $loop loops and ".timestr(timediff(new Benchmark, $t0),'noc','5.3f');
        AI::NeuralNet::BackProp::out2 $str;
		return $str;
	}		
		
1;

# Internal input class. Not to be used directly.
package AI::NeuralNet::BackProp::_run;
	
	use strict;
	
	# Dummy constructor.
	sub new {
		bless { PARENT => $_[1] }, $_[0]
	}
	
	# This is so we comply with the neuron interface.

BackProp.pm  view on Meta::CPAN



(Sorry about the bad art...I am no ASCII artist! :-)


B<1>
In addition to flag 0, each neuron in layer X is connected to every input of 
the neurons ahead of itself in layer X.

B<2> I<("L-U Style")>
No, its not "Learning-Unit" style. It gets its name from this: In a 2 layer, 3
neuron network, the connections form a L-U pair, or a W, however you want to look
at it.   


	^   ^   ^
	|   |   |
	O-->O-->O
	^   ^   ^
	|   |   |
	|   |   |
	O-->O-->O
	^   ^   ^
	|   |   |



As you can see, each neuron is connected to the next one in its layer, as well
as the neuron directly above itself.
	

Before you can really do anything useful with your new neural network
object, you need to teach it some patterns. See the learn() method, below.

=item $net->learn($input_map_ref, $desired_result_ref [, options ]);

This will 'teach' a network to associate an new input map with a desired resuly.
It will return a string containg benchmarking information. You can retrieve the
pattern index that the network stored the new input map in after learn() is complete
with the pattern() method, below.

UPDATED: You can now specify strings as inputs and ouputs to learn, and they will be crunched
automatically. Example:

	$net->learn('corn', 'cob');
	# Before update, you have had to do this:
	# $net->learn($net->crunch('corn'), $net->crunch('cob'));

Note, the old method of calling crunch on the values still works just as well.	

UPDATED: You can now learn inputs with a 0 value. Beware though, it may not learn() a 0 value 
in the input map if you have randomness disabled. See NOTES on using a 0 value with randomness
disabled.

The first two arguments may be array refs (or now, strings), and they may be of different lengths.

Options should be written on hash form. There are three options:
	 
	 inc	=>	$learning_gradient
	 max	=>	$maximum_iterations
	 error	=>	$maximum_allowable_percentage_of_error
	 

$learning_gradient is an optional value used to adjust the weights of the internal
connections. If $learning_gradient is ommitted, it defaults to 0.20.
 
$maximum_iterations is the maximum numbers of iteration the loop should do.
It defaults to 1024.  Set it to 0 if you never want the loop to quit before
the pattern is perfectly learned.

$maximum_allowable_percentage_of_error is the maximum allowable error to have. If 
this is set, then learn() will return when the perecentage difference between the
actual results and desired results falls below $maximum_allowable_percentage_of_error.
If you do not include 'error', or $maximum_allowable_percentage_of_error is set to -1,
then learn() will not return until it gets an exact match for the desired result OR it
reaches $maximum_iterations.


=item $net->learn_set(\@set, [ options ]);

UPDATE: Inputs and outputs in the dataset can now be strings. See information on auto-crunching
in learn()

This takes the same options as learn() and allows you to specify a set to learn, rather
than individual patterns. A dataset is an array refrence with at least two elements in the
array, each element being another array refrence (or now, a scalar string). For each pattern to
learn, you must specify an input array ref, and an ouput array ref as the next element. Example:
	
	my @set = (
		# inputs        outputs
		[ 1,2,3,4 ],  [ 1,3,5,6 ],
		[ 0,2,5,6 ],  [ 0,2,1,2 ]
	);


See the paragraph on measuring forgetfulness, below. There are 
two learn_set()-specific option tags available:

	flag     =>  $flag
	pattern  =>  $row

If "flag" is set to some TRUE value, as in "flag => 1" in the hash of options, or if the option "flag"
is not set, then it will return a percentage represting the amount of forgetfullness. Otherwise,
learn_set() will return an integer specifying the amount of forgetfulness when all the patterns 
are learned. 

If "pattern" is set, then learn_set() will use that pattern in the data set to measure forgetfulness by.
If "pattern" is omitted, it defaults to the first pattern in the set. Example:

	my @set = (
		[ 0,1,0,1 ],  [ 0 ],
		[ 0,0,1,0 ],  [ 1 ],
		[ 1,1,0,1 ],  [ 2 ],  #  <---
		[ 0,1,1,0 ],  [ 3 ]
	);
	
If you wish to measure forgetfulness as indicated by the line with the arrow, then you would
pass 2 as the "pattern" option, as in "pattern => 2".

Now why the heck would anyone want to measure forgetfulness, you ask? Maybe you wonder how I 
even measure that. Well, it is not a vital value that you have to know. I just put in a 
"forgetfulness measure" one day because I thought it would be neat to know. 

How the module measures forgetfulness is this: First, it learns all the patterns in the set provided,
then it will run the very first pattern (or whatever pattern is specified by the "row" option)
in the set after it has finished learning. It will compare the run() output with the desired output
as specified in the dataset. In a perfect world, the two should match exactly. What we measure is
how much that they don't match, thus the amount of forgetfulness the network has.

NOTE: In version 0.77 percentages were disabled because of a bug. Percentages are now enabled.

Example (from examples/ex_dow.pl):

	# Data from 1989 (as far as I know..this is taken from example data on BrainMaker)

BackProp.pm  view on Meta::CPAN


=item $pcx->{palette}

This is an array ref to an AoH (array of hashes). Each element has the following three keys:
	
	$pcx->{palette}->[0]->{red};
	$pcx->{palette}->[0]->{green};
	$pcx->{palette}->[0]->{blue};

Each is in the range of 0..63, corresponding to their named color component.



=item $pcx->get_block($array_ref);

Returns a rectangular block defined by an array ref in the form of:
	
	[$left,$top,$right,$bottom]

These must be in the range of 0..319 for $left and $right, and the range of 0..199 for
$top and $bottom. The block is returned as an array ref with horizontal lines in sequental order.
I.e. to get a pixel from [2,5] in the block, and $left-$right was 20, then the element in 
the array ref containing the contents of coordinates [2,5] would be found by [5*20+2] ($y*$width+$x).
    
	print (@{$pcx->get_block(0,0,20,50)})[5*20+2];

This would print the contents of the element at block coords [2,5].



=item $pcx->get($x,$y);

Returns the value of pixel at image coordinates $x,$y.
$x must be in the range of 0..319 and $y must be in the range of 0..199.



=item $pcx->rgb($index);

Returns a 3-element array (not array ref) with each element corresponding to the red, green, or
blue color components, respecitvely.



=item $pcx->avg($index);	

Returns the mean value of the red, green, and blue values at the palette index in $index.
	


=head1 NOTES

=item Learning 0s With Randomness Disabled

You can now use 0 values in any input maps. This is a good improvement over versions 0.40
and 0.42, where no 0s were allowed because the learning would never finish learning completly
with a 0 in the input. 

Yet with the allowance of 0s, it requires one of two factors to learn correctly. Either you
must enable randomness with $net->random(0.0001) (Any values work [other than 0], see random() ), 
or you must set an error-minimum with the 'error => 5' option (you can use some other error value 
as well). 

When randomness is enabled (that is, when you call random() with a value other than 0), it interjects
a bit of randomness into the output of every neuron in the network, except for the input and output
neurons. The randomness is interjected with rand()*$rand, where $rand is the value that was
passed to random() call. This assures the network that it will never have a pure 0 internally. It is
bad to have a pure 0 internally because the weights cannot change a 0 when multiplied by a 0, the
product stays a 0. Yet when a weight is multiplied by 0.00001, eventually with enough weight, it will
be able to learn. With a 0 value instead of 0.00001 or whatever, then it would never be able
to add enough weight to get anything other than a 0. 

The second option to allow for 0s is to enable a maximum error with the 'error' option in
learn() , learn_set() , and learn_set_rand() . This allows the network to not worry about
learning an output perfectly. 

For accuracy reasons, it is recomended that you work with 0s using the random() method.

If anyone has any thoughts/arguments/suggestions for using 0s in the network, let me know
at jdb@wcoil.com. 




=head1 OTHER INCLUDED PACKAGES

=item AI::NeuralNet::BackProp::neuron

AI::NeuralNet::BackProp::neuron is the worker package for AI::NeuralNet::BackProp.
It implements the actual neurons of the nerual network.
AI::NeuralNet::BackProp::neuron is not designed to be created directly, as
it is used internally by AI::NeuralNet::BackProp.

=item AI::NeuralNet::BackProp::_run

=item AI::NeuralNet::BackProp::_map

These two packages, _run and _map are used to insert data into
the network and used to get data from the network. The _run and _map packages 
are connected to the neurons so that the neurons think that the IO packages are
just another neuron, sending data on. But the IO packs. are special packages designed
with the same methods as neurons, just meant for specific IO purposes. You will
never need to call any of the IO packs. directly. Instead, they are called whenever
you use the run() or learn() methods of your network.
        



=head1 BUGS

This is an alpha release of C<AI::NeuralNet::BackProp>, and that holding true, I am sure 
there are probably bugs in here which I just have not found yet. If you find bugs in this module, I would 
appreciate it greatly if you could report them to me at F<E<lt>jdb@wcoil.comE<gt>>,
or, even better, try to patch them yourself and figure out why the bug is being buggy, and
send me the patched code, again at F<E<lt>jdb@wcoil.comE<gt>>. 



=head1 AUTHOR

Josiah Bryan F<E<lt>jdb@wcoil.comE<gt>>

Copyright (c) 2000 Josiah Bryan. All rights reserved. This program is free software; 
you can redistribute it and/or modify it under the same terms as Perl itself.

The C<AI::NeuralNet::BackProp> and related modules are free software. THEY COME WITHOUT WARRANTY OF ANY KIND.

                                                             




=head1 THANKS



( run in 4.086 seconds using v1.01-cache-2.11-cpan-adec679a428 )