AI-NeuralNet-BackProp

 view release on metacpan or  search on metacpan

BackProp.pm  view on Meta::CPAN

			
			# This prevents it from seeming to get stuck in one location
			# by attempting to boost the values out of the hole they seem to be in.
			if($diff eq $ldiff) {
				$cdiff++;
				$inc += ($dinc*$diff)+($dinc*$cdiff*10);
			} else {
				$cdiff=0;
			}
			
			# Save last $diff
			$ldiff = $diff;
			
			# This catches a max error argument and handles it
			if(!($error>-1 ? $diff>$error : 1)) {
				$flag=1;
				last;
			}
			
			# Debugging
			AI::NeuralNet::BackProp::out4 "Difference: $diff\%\t Increment: $inc\tMax Error: $error\%\n";
			AI::NeuralNet::BackProp::out1 "\n\nMapping results from $map:\n";
			
			# This loop compares each element of the output map with the desired result map.
			# If they don't match exactly, we call weight() on the offending output neuron 
			# and tell it what it should be aiming for, and then the offending neuron will
			# try to adjust the weights of its synapses to get closer to the desired output.
			# See comments in the weight() method of AI::NeuralNet::BackProp for how this works.
			my $l=$self->{NET};
			for my $i (0..$out-1) {
				$a = $map->[$i];
				$b = $res->[$i];
				
				AI::NeuralNet::BackProp::out1 "\nmap[$i] is $a\n";
				AI::NeuralNet::BackProp::out1 "res[$i] is $b\n";
					
				for my $j (0..$divide-1) {
					if($a!=$b) {
						AI::NeuralNet::BackProp::out1 "Punishing $self->{NET}->[($i*$divide)+$j] at ",(($i*$divide)+$j)," ($i with $a) by $inc.\n";
						$l->[$y+($i*$divide)+$j]->weight($inc,$b) if($l->[$y+($i*$divide)+$j]);
						$flag	=	0;
					}
				}
			}
			
			# This counter is just used in the benchmarking operations.
			$loop++;
			
			AI::NeuralNet::BackProp::out1 "\n\n";
			
			# Benchmark this loop.
			AI::NeuralNet::BackProp::out4 "Learning itetration $loop complete, timed at".timestr(timediff(new Benchmark, $it0),'noc','5.3f')."\n";
		
			# Map the results from this loop.
			AI::NeuralNet::BackProp::out4 "Map: \n";
			AI::NeuralNet::BackProp::join_cols($map,$self->{col_width}) if ($AI::NeuralNet::BackProp::DEBUG);
			AI::NeuralNet::BackProp::out4 "Res: \n";
			AI::NeuralNet::BackProp::join_cols($res,$self->{col_width}) if ($AI::NeuralNet::BackProp::DEBUG);
		}
		
		# Compile benchmarking info for entire learn() process and return it, save it, and
		# display it.
		$self->{LAST_TIME}="$loop loops and ".timestr(timediff(new Benchmark, $t0));
        my $str = "Learning took $loop loops and ".timestr(timediff(new Benchmark, $t0),'noc','5.3f');
        AI::NeuralNet::BackProp::out2 $str;
		return $str;
	}		
		
1;

# Internal input class. Not to be used directly.
package AI::NeuralNet::BackProp::_run;
	
	use strict;
	
	# Dummy constructor.
	sub new {
		bless { PARENT => $_[1] }, $_[0]
	}
	
	# This is so we comply with the neuron interface.
	sub weight {}
	sub input  {}
	
	# Again, compliance with neuron interface.
	sub register_synapse {
		my $self	=	shift;		
		my $sid		=	$self->{REGISTRATION} || 0;
		$self->{REGISTRATION}	=	++$sid;
		$self->{RMAP}->{$sid-1}	= 	$self->{PARENT}->{_tmp_synapse};
		return $sid-1;
	}
	
	# Here is the real meat of this package.
	# run() does one thing: It fires values
	# into the first layer of the network.
	sub run {
		my $self	=	shift;
		my $map		=	shift;
		my $x		=	0;
		$map = $self->{PARENT}->crunch($map) if($map == 0);
		return undef if(substr($map,0,5) ne "ARRAY");
		foreach my $el (@{$map}) {
			# Catch ourself if we try to run more inputs than neurons
			return $x if($x>$self->{PARENT}->{DIV}-1);
			
			# Here we add a small ammount of randomness to the network.
			# This is to keep the network from getting stuck on a 0 value internally.
			$self->{PARENT}->{NET}->[$x]->input(0,$el+(rand()*$self->{ramdom}));
			$x++;
		};
		# Incase we tried to run less inputs than neurons, run const 1 in extra neurons
		if($x<$self->{PARENT}->{DIV}) {
			for my $y ($x..$self->{PARENT}->{DIV}-1) {
				$self->{PARENT}->{NET}->[$y]->input(0,1);
			}
		}
		return $x;
	}
	
	



( run in 3.249 seconds using v1.01-cache-2.11-cpan-f0fbb3f571b )