AI-NeuralNet-BackProp
view release on metacpan or search on metacpan
BackProp.pm view on Meta::CPAN
my $t0 = new Benchmark;
$self->{RUN}->run($map);
$self->{LAST_TIME}=timestr(timediff(new Benchmark, $t0));
return $self->map();
}
# This automatically uncrunches a response after running it
sub run_uc {
$_[0]->uncrunch(run(@_));
}
# Returns benchmark and loop's ran or learned
# for last run(), or learn()
# operation preformed.
#
sub benchmarked {
my $self = shift;
return $self->{LAST_TIME};
}
# Used to retrieve map from last internal run operation.
sub map {
my $self = shift;
$self->{MAP}->map();
}
# Forces network to learn pattern passed and give desired
# results. See usage in POD.
sub learn {
my $self = shift;
my $omap = shift;
my $res = shift;
my %args = @_;
my $inc = $args{inc} || 0.20;
my $max = $args{max} || 1024;
my $_mx = intr($max/10);
my $_mi = 0;
my $error = ($args{error}>-1 && defined $args{error}) ? $args{error} : -1;
my $div = $self->{DIV};
my $size = $self->{SIZE};
my $out = $self->{OUT};
my $divide = AI::NeuralNet::BackProp->intr($div/$out);
my ($a,$b,$y,$flag,$map,$loop,$diff,$pattern,$value);
my ($t0,$it0);
no strict 'refs';
# Take care of crunching strings passed
$omap = $self->crunch($omap) if($omap == 0);
$res = $self->crunch($res) if($res == 0);
# Fill in empty spaces at end of results matrix with a 0
if($#{$res}<$out) {
for my $x ($#{$res}+1..$out) {
#$res->[$x] = 0;
}
}
# Debug
AI::NeuralNet::BackProp::out1 "Num output neurons: $out, Input neurons: $size, Division: $divide\n";
# Start benchmark timer and initalize a few variables
$t0 = new Benchmark;
$flag = 0;
$loop = 0;
my $ldiff = 0;
my $dinc = 0.0001;
my $cdiff = 0;
$diff = 100;
$error = ($error>-1)?$error:-1;
# $flag only goes high when all neurons in output map compare exactly with
# desired result map or $max loops is reached
#
while(!$flag && ($max ? $loop<$max : 1)) {
$it0 = new Benchmark;
# Run the map
$self->{RUN}->run($omap);
# Retrieve last mapping and initialize a few variables.
$map = $self->map();
$y = $size-$div;
$flag = 1;
# Compare the result map we just ran with the desired result map.
$diff = pdiff($map,$res);
# This adjusts the increment multiplier to decrease as the loops increase
if($_mi > $_mx) {
$dinc *= 0.1;
$_mi = 0;
}
$_mi++;
# We de-increment the loop ammount to prevent infinite learning loops.
# In old versions of this module, if you used too high of an initial input
# $inc, then the network would keep jumping back and forth over your desired
# results because the increment was too high...it would try to push close to
# the desired result, only to fly over the other edge too far, therby trying
# to come back, over shooting again.
# This simply adjusts the learning gradient proportionally to the ammount of
# convergance left as the difference decreases.
$inc -= ($dinc*$diff);
$inc = 0.0000000001 if($inc < 0.0000000001);
# This prevents it from seeming to get stuck in one location
# by attempting to boost the values out of the hole they seem to be in.
if($diff eq $ldiff) {
$cdiff++;
$inc += ($dinc*$diff)+($dinc*$cdiff*10);
} else {
$cdiff=0;
}
# Save last $diff
$ldiff = $diff;
# This catches a max error argument and handles it
if(!($error>-1 ? $diff>$error : 1)) {
( run in 2.410 seconds using v1.01-cache-2.11-cpan-acebb50784d )