AI-NeuralNet-BackProp

 view release on metacpan or  search on metacpan

BackProp.pm  view on Meta::CPAN

327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
                print "\nLearning index $x...\n" if($AI::NeuralNet::BackProp::DEBUG);
                my $str = $self->learn( $data->[$x*2],                    # The list of data to input to the net
                                                                $data->[$x*2+1],             # The output desired
                                                        inc=>$inc,                           # The starting learning gradient
                                                        max=>$max,                           # The maximum num of loops allowed
                                                        error=>$error);                      # The maximum (%) error allowed
                print $str if($AI::NeuralNet::BackProp::DEBUG);
        }
                 
         
        my $res;
        $data->[$row] = $self->crunch($data->[$row]) if($data->[$row] == 0);
         
        if ($p) {
                $res=pdiff($data->[$row],$self->run($data->[$row-1]));
        } else {
                $res=$data->[$row]->[0]-$self->run($data->[$row-1])->[0];
        }
        return $res;
}
 
# This sub will take an array ref of a data set, which it expects in this format:
#   my @data_set = (    [ ...inputs... ], [ ...outputs ... ],
#                                                                  ... rows ...
#                                  );
#
# This wil sub returns the percentage of 'forgetfullness' when the net learns all the
# data in the set in RANDOM order. Usage:
#

BackProp.pm  view on Meta::CPAN

817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
                        if($flag == 2) {
                                $self->{NET}->[$y+$z]->connect($self->{NET}->[$y+$div+$z]);
                                $self->{NET}->[$y+$z]->connect($self->{NET}->[$y+$z+1]) if($z<$div-1);
                        }
                        AI::NeuralNet::BackProp::out1 "\n";
                }
                AI::NeuralNet::BackProp::out1 "\n";            
        }
         
        # These next two loops connect the _run and _map packages (the IO interface) to
        # the start and end 'layers', respectively. These are how we insert data into
        # the network and how we get data from the network. The _run and _map packages
        # are connected to the neurons so that the neurons think that the IO packages are
        # just another neuron, sending data on. But the IO packs. are special packages designed
        # with the same methods as neurons, just meant for specific IO purposes. You will
        # never need to call any of the IO packs. directly. Instead, they are called whenever
        # you use the run(), map(), or learn() methods of your network.
 
AI::NeuralNet::BackProp::out2 "\nMapping I (_run package) connections to network...\n";
         
    for($y=0; $y<$div; $y++) {

BackProp.pm  view on Meta::CPAN

855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
    # to an array associated with that pattern. See usage in documentation.
    sub run {
            my $self         =        shift;
            my $map          =        shift;
            my $t0           =      new Benchmark;
    $self->{RUN}->run($map);
            $self->{LAST_TIME}=timestr(timediff(new Benchmark, $t0));
    return $self->map();
    }
 
# This automatically uncrunches a response after running it
    sub run_uc {
    $_[0]->uncrunch(run(@_));
}
 
    # Returns benchmark and loop's ran or learned
    # for last run(), or learn()
    # operation preformed.
    #
    sub benchmarked {
            my $self        =       shift;
            return $self->{LAST_TIME};
    }
         
    # Used to retrieve map from last internal run operation.
    sub map {
            my $self         =        shift;
            $self->{MAP}->map();
    }
     
    # Forces network to learn pattern passed and give desired
    # results. See usage in POD.
    sub learn {
            my $self        =       shift;
            my $omap        =       shift;
            my $res         =       shift;
            my %args    =   @_;
            my $inc         =       $args{inc} || 0.20;
            my $max     =   $args{max} || 1024;
            my $_mx         =       intr($max/10);
            my $_mi         =       0;
            my $error   =   ($args{error}>-1 && defined $args{error}) ? $args{error} : -1;
            my $div         =       $self->{DIV};
            my $size        =       $self->{SIZE};
            my $out         =       $self->{OUT};
            my $divide  =   AI::NeuralNet::BackProp->intr($div/$out);
            my ($a,$b,$y,$flag,$map,$loop,$diff,$pattern,$value);
            my ($t0,$it0);
            no strict 'refs';
             
            # Take care of crunching strings passed
            $omap = $self->crunch($omap) if($omap == 0);
            $res  = $self->crunch($resif($res  == 0);
             
            # Fill in empty spaces at end of results matrix with a 0
            if($#{$res}<$out) {
                    for my $x ($#{$res}+1..$out) {
                            #$res->[$x] = 0;
                    }
            }
             
            # Debug
            AI::NeuralNet::BackProp::out1 "Num output neurons: $out, Input neurons: $size, Division: $divide\n";
             
            # Start benchmark timer and initalize a few variables
            $t0     =       new Benchmark;
    $flag   =       0;
            $loop   =       0;  
            my $ldiff       =       0;
            my $dinc        =       0.0001;
            my $cdiff       =       0;
            $diff           =       100;
            $error          =       ($error>-1)?$error:-1;
             
            # $flag only goes high when all neurons in output map compare exactly with
            # desired result map or $max loops is reached
            #      
            while(!$flag && ($max ? $loop<$max : 1)) {
                    $it0    =       new Benchmark;
                     
                    # Run the map
                    $self->{RUN}->run($omap);
                     
                    # Retrieve last mapping  and initialize a few variables.
                    $map    =       $self->map();
                    $y              =       $size-$div;
                    $flag   =       1;
                     
                    # Compare the result map we just ran with the desired result map.
                    $diff   =       pdiff($map,$res);
                     
                    # This adjusts the increment multiplier to decrease as the loops increase
                    if($_mi > $_mx) {
                            $dinc *= 0.1;
                            $_mi   = 0;
                    }
                     
                    $_mi++;
                             
                     
                    # We de-increment the loop ammount to prevent infinite learning loops.
                    # In old versions of this module, if you used too high of an initial input
                    # $inc, then the network would keep jumping back and forth over your desired
                    # results because the increment was too high...it would try to push close to
                    # the desired result, only to fly over the other edge too far, therby trying
                    # to come back, over shooting again.
                    # This simply adjusts the learning gradient proportionally to the ammount of
                    # convergance left as the difference decreases.
            $inc   -= ($dinc*$diff);
                    $inc   = 0.0000000001 if($inc < 0.0000000001);
                     
                    # This prevents it from seeming to get stuck in one location
                    # by attempting to boost the values out of the hole they seem to be in.
                    if($diff eq $ldiff) {
                            $cdiff++;

BackProp.pm  view on Meta::CPAN

976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
                $ldiff = $diff;
                 
                # This catches a max error argument and handles it
                if(!($error>-1 ? $diff>$error : 1)) {
                        $flag=1;
                        last;
                }
                 
                # Debugging
                AI::NeuralNet::BackProp::out4 "Difference: $diff\%\t Increment: $inc\tMax Error: $error\%\n";
                AI::NeuralNet::BackProp::out1 "\n\nMapping results from $map:\n";
                 
                # This loop compares each element of the output map with the desired result map.
                # If they don't match exactly, we call weight() on the offending output neuron
                # and tell it what it should be aiming for, and then the offending neuron will
                # try to adjust the weights of its synapses to get closer to the desired output.
                # See comments in the weight() method of AI::NeuralNet::BackProp for how this works.
                my $l=$self->{NET};
                for my $i (0..$out-1) {
                        $a = $map->[$i];
                        $b = $res->[$i];
                         
                        AI::NeuralNet::BackProp::out1 "\nmap[$i] is $a\n";
                        AI::NeuralNet::BackProp::out1 "res[$i] is $b\n";
                                 
                        for my $j (0..$divide-1) {
                                if($a!=$b) {
                                        AI::NeuralNet::BackProp::out1 "Punishing $self->{NET}->[($i*$divide)+$j] at ",(($i*$divide)+$j)," ($i with $a) by $inc.\n";
                                        $l->[$y+($i*$divide)+$j]->weight($inc,$b) if($l->[$y+($i*$divide)+$j]);
                                        $flag   =       0;
                                }
                        }
                }
                 
                # This counter is just used in the benchmarking operations.
                $loop++;
                 
                AI::NeuralNet::BackProp::out1 "\n\n";
                 
                # Benchmark this loop.
                AI::NeuralNet::BackProp::out4 "Learning itetration $loop complete, timed at".timestr(timediff(new Benchmark, $it0),'noc','5.3f')."\n";
         
                # Map the results from this loop.
                AI::NeuralNet::BackProp::out4 "Map: \n";
                AI::NeuralNet::BackProp::join_cols($map,$self->{col_width}) if ($AI::NeuralNet::BackProp::DEBUG);
                AI::NeuralNet::BackProp::out4 "Res: \n";
                AI::NeuralNet::BackProp::join_cols($res,$self->{col_width}) if ($AI::NeuralNet::BackProp::DEBUG);
        }
         
        # Compile benchmarking info for entire learn() process and return it, save it, and
        # display it.
        $self->{LAST_TIME}="$loop loops and ".timestr(timediff(new Benchmark, $t0));
my $str = "Learning took $loop loops and ".timestr(timediff(new Benchmark, $t0),'noc','5.3f');
AI::NeuralNet::BackProp::out2 $str;
        return $str;
}              
        

BackProp.pm  view on Meta::CPAN

1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
# Again, compliance with neuron interface.
sub register_synapse {
        my $self        =       shift;         
        my $sid         =       $self->{REGISTRATION} || 0;
        $self->{REGISTRATION}        =       ++$sid;
        $self->{RMAP}->{$sid-1}   =       $self->{PARENT}->{_tmp_synapse};
        return $sid-1;
}
 
# Here is the real meat of this package.
# run() does one thing: It fires values
# into the first layer of the network.
sub run {
        my $self        =       shift;
        my $map         =       shift;
        my $x           =       0;
        $map = $self->{PARENT}->crunch($map) if($map == 0);
        return undef if(substr($map,0,5) ne "ARRAY");
        foreach my $el (@{$map}) {
                # Catch ourself if we try to run more inputs than neurons
                return $x if($x>$self->{PARENT}->{DIV}-1);

BackProp.pm  view on Meta::CPAN

1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
sub register_synapse {
        my $self        =       shift;         
        my $sid         =       $self->{REGISTRATION} || 0;
        $self->{REGISTRATION}        =       ++$sid;
        $self->{RMAP}->{$sid-1} =         $self->{PARENT}->{_tmp_synapse};
        return $sid-1;
}
 
# This acts just like a regular neuron by receiving
# values from input synapes. Yet, unlike a regularr
# neuron, it doesnt weight the values, just stores
# them to be retrieved by a call to map().
sub input  {
        no strict 'refs';            
        my $self        =       shift;
        my $sid         =       shift;
        my $value       =       shift;
        my $size        =       $self->{PARENT}->{DIV};
        my $flag        =       1;
        $self->{OUTPUT}->[$sid]->{VALUE}       =       $self->{PARENT}->intr($value);
        $self->{OUTPUT}->[$sid]->{FIRED}       =       1;

BackProp.pm  view on Meta::CPAN

1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
        return ($self->{palette}->[$color]->{red},$self->{palette}->[$color]->{green},$self->{palette}->[$color]->{blue});
}
         
# Returns mean of (rgb) value of palette index passed
sub avg {
        my $self        =       shift;
        my $color       =       shift;
        return $self->{parent}->intr(($self->{palette}->[$color]->{red}+$self->{palette}->[$color]->{green}+$self->{palette}->[$color]->{blue})/3);
}
 
# Loads and decompresses a PCX-format 320x200, 8-bit image file and returns
# two arrays, first is a 64000-byte long array, each element contains a palette
# index, and the second array is a 255-byte long array, each element is a hash
# ref with the keys 'red', 'green', and 'blue', each key contains the respective color
# component for that color index in the palette.
sub load_pcx {
        shift if(substr($_[0],0,4) eq 'AI::');
         
        # open the file
        open(FILE, "$_[0]");
        binmode(FILE);
         
        my $tmp;
        my @image;
        my @palette;
        my $data;
         
        # Read header
        read(FILE,$tmp,128);
         
        # load the data and decompress into buffer
        my $count=0;
         
        while($count<320*200) {
             # get the first piece of data
             read(FILE,$data,1);
         $data=ord($data);
          
             # is this a rle?
             if ($data>=192 && $data<=255) {
                # how many bytes in run?

BackProp.pm  view on Meta::CPAN

1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
        my @phrases = (
                $phrase1, $phrase2,
                $phrase3, $phrase4
        );
 
        # Learn the data set   
        $net->learn_set(\@phrases);
         
        # Run a test phrase through the network
        my $test_phrase = $net->crunch("I love neural networking!");
        my $result = $net->run($test_phrase);
         
        # Get this, it prints "Jay Leno is  networking!" ...  LOL!
        print $net->uncrunch($result),"\n";
 
 
 
=head1 UPDATES
 
This is version 0.89. In this version I have included a new feature, output range limits, as
well as automatic crunching of run() and learn*() inputs. Included in the examples directory
are seven new practical-use example scripts. Also implemented in this version is a much cleaner
learning function for individual neurons which is more accurate than previous verions and is
based on the LMS rule. See range() for information on output range limits. I have also updated

BackProp.pm  view on Meta::CPAN

1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
weighting is taken care of by the receiving neuron.) This is the
method used to connect cells in every network built by this package.
 
Input is fed into the network via a call like this:
 
        use AI;
        my $net = new AI::NeuralNet::BackProp(2,2);
         
        my @map = (0,1);
         
        my $result = $net->run(\@map);
         
 
Now, this call would probably not give what you want, because
the network hasn't "learned" any patterns yet. But this
illustrates the call. Run now allows strings to be used as
input. See run() for more information.
 
 
Run returns a refrence with $size elements (Remember $size? $size
is what you passed as the second argument to the network
constructor.) This array contains the results of the mapping. If
you ran the example exactly as shown above, $result would probably
contain (1,1) as its elements.
 
To make the network learn a new pattern, you simply call the learn
method with a sample input and the desired result, both array
refrences of $size length. Example:
 
        use AI;
        my $net = new AI::NeuralNet::BackProp(2,2);
         
        my @map = (0,1);
        my @res = (1,0);
         
        $net->learn(\@map,\@res);
         
        my $result = $net->run(\@map);
 
Now $result will conain (1,0), effectivly flipping the input pattern
around. Obviously, the larger $size is, the longer it will take
to learn a pattern. Learn() returns a string in the form of
 
        Learning took X loops and X wallclock seconds (X.XXX usr + X.XXX sys = X.XXX CPU).
 
With the X's replaced by time or loop values for that loop call. So,
to view the learning stats for every learn call, you can just:
         
        print $net->learn(\@map,\@res);
         
 
If you call "$net->debug(4)" with $net being the
refrence returned by the new() constructor, you will get benchmarking
information for the learn function, as well as plenty of other information output.
See notes on debug() in the METHODS section, below.
 
If you do call $net->debug(1), it is a good
idea to point STDIO of your script to a file, as a lot of information is output. I often
use this command line:

BackProp.pm  view on Meta::CPAN

1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
 
 
As you can see, each neuron is connected to the next one in its layer, as well
as the neuron directly above itself.
         
 
Before you can really do anything useful with your new neural network
object, you need to teach it some patterns. See the learn() method, below.
 
=item $net->learn($input_map_ref, $desired_result_ref [, options ]);
 
This will 'teach' a network to associate an new input map with a desired resuly.
It will return a string containg benchmarking information. You can retrieve the
pattern index that the network stored the new input map in after learn() is complete
with the pattern() method, below.
 
UPDATED: You can now specify strings as inputs and ouputs to learn, and they will be crunched
automatically. Example:
 
        $net->learn('corn', 'cob');
        # Before update, you have had to do this:
        # $net->learn($net->crunch('corn'), $net->crunch('cob'));

BackProp.pm  view on Meta::CPAN

1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
$learning_gradient is an optional value used to adjust the weights of the internal
connections. If $learning_gradient is ommitted, it defaults to 0.20.
  
$maximum_iterations is the maximum numbers of iteration the loop should do.
It defaults to 1024.  Set it to 0 if you never want the loop to quit before
the pattern is perfectly learned.
 
$maximum_allowable_percentage_of_error is the maximum allowable error to have. If
this is set, then learn() will return when the perecentage difference between the
actual results and desired results falls below $maximum_allowable_percentage_of_error.
If you do not include 'error', or $maximum_allowable_percentage_of_error is set to -1,
then learn() will not return until it gets an exact match for the desired result OR it
reaches $maximum_iterations.
 
 
=item $net->learn_set(\@set, [ options ]);
 
UPDATE: Inputs and outputs in the dataset can now be strings. See information on auto-crunching
in learn()
 
This takes the same options as learn() and allows you to specify a set to learn, rather
than individual patterns. A dataset is an array refrence with at least two elements in the

BackProp.pm  view on Meta::CPAN

1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
        );
 
 
See the paragraph on measuring forgetfulness, below. There are
two learn_set()-specific option tags available:
 
        flag     =>  $flag
        pattern  =>  $row
 
If "flag" is set to some TRUE value, as in "flag => 1" in the hash of options, or if the option "flag"
is not set, then it will return a percentage represting the amount of forgetfullness. Otherwise,
learn_set() will return an integer specifying the amount of forgetfulness when all the patterns
are learned.
 
If "pattern" is set, then learn_set() will use that pattern in the data set to measure forgetfulness by.
If "pattern" is omitted, it defaults to the first pattern in the set. Example:
 
        my @set = (
                [ 0,1,0,1 ],  [ 0 ],
                [ 0,0,1,0 ],  [ 1 ],
                [ 1,1,0,1 ],  [ 2 ],  #  <---
                [ 0,1,1,0 ],  [ 3 ]
        );
         
If you wish to measure forgetfulness as indicated by the line with the arrow, then you would
pass 2 as the "pattern" option, as in "pattern => 2".
 
Now why the heck would anyone want to measure forgetfulness, you ask? Maybe you wonder how I
even measure that. Well, it is not a vital value that you have to know. I just put in a
"forgetfulness measure" one day because I thought it would be neat to know.
 
How the module measures forgetfulness is this: First, it learns all the patterns in the set provided,
then it will run the very first pattern (or whatever pattern is specified by the "row" option)
in the set after it has finished learning. It will compare the run() output with the desired output
as specified in the dataset. In a perfect world, the two should match exactly. What we measure is
how much that they don't match, thus the amount of forgetfulness the network has.
 
NOTE: In version 0.77 percentages were disabled because of a bug. Percentages are now enabled.
 
Example (from examples/ex_dow.pl):
 
        # Data from 1989 (as far as I know..this is taken from example data on BrainMaker)

BackProp.pm  view on Meta::CPAN

1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
any data previously learned after disabling ranging, as disabling range invalidates the current
weight matrix in the network.
 
range() automatically scales the networks outputs to fit inside the size of range you allow, and, therefore,
it keeps track of the maximum output it can expect to scale. Therefore, you will need to learn()
the whole data set again after calling range() on a network.
 
Subsequent calls to range() invalidate any previous calls to range()
 
NOTE: It is recomended, you call range() before you call learn() or else you will get unexpected
results from any run() call after range() .
 
 
=item $net->range($bottom..$top);
 
This is a common form often used in a C<for my $x (0..20)> type of for() constructor. It works
the exact same way. It will allow all numbers from $bottom to $top, inclusive, to be given
as outputs of the network. No other values will be possible, other than those between $bottom
and $top, inclusive.

BackProp.pm  view on Meta::CPAN

1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
as the network runs. In this mode, it is a good idea to pipe your STDIO to a file, especially
for large programs.
 
Level 2 ($level = 2) : A slightly-less verbose form of debugging, not as many internal
data dumps.
 
Level 3 ($level = 3) : JUST prints weight mapping as weights change.
 
Level 4 ($level = 4) : JUST prints the benchmark info for EACH learn loop iteteration, not just
learning as a whole. Also prints the percentage difference for each loop between current network
results and desired results, as well as learning gradient ('incremenet').  
 
Level 4 is useful for seeing if you need to give a smaller learning incrememnt to learn() .
I used level 4 debugging quite often in creating the letters.pl example script and the small_1.pl
example script.
 
Toggles debuging off when called with no arguments.
 
 
 
=item $net->save($filename);

BackProp.pm  view on Meta::CPAN

1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
This will save the complete state of the network to disk, including all weights and any
words crunched with crunch() . Also saves any output ranges set with range() .
 
This has now been modified to use a simple flat-file text storage format, and it does not
depend on any external modules now.
 
 
 
=item $net->load($filename);
 
This will load from disk any network saved by save() and completly restore the internal
state at the point it was save() was called at.
 
 
 
 
=item $net->join_cols($array_ref,$row_length_in_elements,$high_state_character,$low_state_character);
 
This is more of a utility function than any real necessary function of the package.
Instead of joining all the elements of the array together in one long string, like join() ,
it prints the elements of $array_ref to STDIO, adding a newline (\n) after every $row_length_in_elements

BackProp.pm  view on Meta::CPAN

1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
=item $net->pdiff($array_ref_A, $array_ref_B);
 
This function is used VERY heavily internally to calculate the difference in percent
between elements of the two array refs passed. It returns a %.10f (sprintf-format)
percent sting.
 
 
=item $net->p($a,$b);
 
Returns a floating point number which represents $a as a percentage of $b.
 
 
 
=item $net->intr($float);
 
Rounds a floating-point number rounded to an integer using sprintf() and int() , Provides
better rounding than just calling int() on the float. Also used very heavily internally.

BackProp.pm  view on Meta::CPAN

1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
 
 
 
=item $net->crunch($string);
 
UPDATE: Now you can use a variabled instead of using qw(). Strings will be split internally.
Do not use qw() to pass strings to crunch.
 
This splits a string passed with /[\s\t]/ into an array ref containing unique indexes
to the words. The words are stored in an intenal array and preserved across load() and save()
calls. This is designed to be used to generate unique maps sutible for passing to learn() and
run() directly. It returns an array ref.
 
The words are not duplicated internally. For example:
 
        $net->crunch("How are you?");
 
Will probably return an array ref containing 1,2,3. A subsequent call of:
 
    $net->crunch("How is Jane?");

BackProp.pm  view on Meta::CPAN

1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
        for (0..3) {            # Note: The four learn() statements below could
                                                # be replaced with learn_set() to do the same thing,
                                                # but use this form here for clarity.
 
                $net->learn($net->crunch("I love chips."),  $net->crunch("That's Junk Food!"));
                $net->learn($net->crunch("I love apples."), $net->crunch("Good, Healthy Food."));
                $net->learn($net->crunch("I love pop."),    $net->crunch("That's Junk Food!"));
                $net->learn($net->crunch("I love oranges."),$net->crunch("Good, Healthy Food."));
        }
         
        my $response = $net->run($net->crunch("I love corn."));
         
        print $net->uncrunch($response),"\n";
 
 
On my system, this responds with, "Good, Healthy Food." If you try to run crunch() with
"I love pop.", though, you will probably get "Food! apples. apples." (At least it returns
that on my system.) As you can see, the associations are not yet perfect, but it can make
for some interesting demos!
 
 
 
=item $net->crunched($word);
 
This will return undef if the word is not in the internal crunch list, or it will return the
index of the word if it exists in the crunch list.
 
 
=item $net->col_width($width);

BackProp.pm  view on Meta::CPAN

1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
This will set the randomness factor from the network. Default is 0.001. When called
with no arguments, or an undef value, it will return current randomness value. When
called with a 0 value, it will disable randomness in the network. See NOTES on learning
a 0 value in the input map with randomness disabled.
 
 
 
=item $net->load_pcx($filename);
 
Oh heres a treat... this routine will load a PCX-format file (yah, I know ... ancient format ... but
it is the only one I could find specs for to write it in Perl. If anyone can get specs for
any other formats, or could write a loader for them, I would be very grateful!) Anyways, a PCX-format
file that is exactly 320x200 with 8 bits per pixel, with pure Perl. It returns a blessed refrence to
a AI::NeuralNet::BackProp::PCX object, which supports the following routinges/members. See example
files ex_pcxl.pl and ex_pcx.pl in the ./examples/ directory.
 
 
 
=item $pcx->{image}
 
This is an array refrence to the entire image. The array containes exactly 64000 elements, each
element contains a number corresponding into an index of the palette array, details below.
 
 
 
=item $pcx->{palette}
 
This is an array ref to an AoH (array of hashes). Each element has the following three keys:
         
        $pcx->{palette}->[0]->{red};
        $pcx->{palette}->[0]->{green};
        $pcx->{palette}->[0]->{blue};
 
Each is in the range of 0..63, corresponding to their named color component.
 
 
 
=item $pcx->get_block($array_ref);
 
Returns a rectangular block defined by an array ref in the form of:
         
        [$left,$top,$right,$bottom]
 
These must be in the range of 0..319 for $left and $right, and the range of 0..199 for

BackProp.pm  view on Meta::CPAN

2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
=item $pcx->get($x,$y);
 
Returns the value of pixel at image coordinates $x,$y.
$x must be in the range of 0..319 and $y must be in the range of 0..199.
 
 
 
=item $pcx->rgb($index);
 
Returns a 3-element array (not array ref) with each element corresponding to the red, green, or
blue color components, respecitvely.
 
 
 
=item $pcx->avg($index);    
 
Returns the mean value of the red, green, and blue values at the palette index in $index.
         
 
 
=head1 NOTES
 
=item Learning 0s With Randomness Disabled
 
You can now use 0 values in any input maps. This is a good improvement over versions 0.40
and 0.42, where no 0s were allowed because the learning would never finish learning completly
with a 0 in the input.
 
Yet with the allowance of 0s, it requires one of two factors to learn correctly. Either you
must enable randomness with $net->random(0.0001) (Any values work [other than 0], see random() ),
or you must set an error-minimum with the 'error => 5' option (you can use some other error value
as well).
 
When randomness is enabled (that is, when you call random() with a value other than 0), it interjects
a bit of randomness into the output of every neuron in the network, except for the input and output
neurons. The randomness is interjected with rand()*$rand, where $rand is the value that was
passed to random() call. This assures the network that it will never have a pure 0 internally. It is
bad to have a pure 0 internally because the weights cannot change a 0 when multiplied by a 0, the
product stays a 0. Yet when a weight is multiplied by 0.00001, eventually with enough weight, it will
be able to learn. With a 0 value instead of 0.00001 or whatever, then it would never be able
to add enough weight to get anything other than a 0.
 
The second option to allow for 0s is to enable a maximum error with the 'error' option in
learn() , learn_set() , and learn_set_rand() . This allows the network to not worry about
learning an output perfectly.
 
For accuracy reasons, it is recomended that you work with 0s using the random() method.

BackProp.pm  view on Meta::CPAN

2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
appreciate it greatly if you could report them to me at F<E<lt>jdb@wcoil.comE<gt>>,
or, even better, try to patch them yourself and figure out why the bug is being buggy, and
send me the patched code, again at F<E<lt>jdb@wcoil.comE<gt>>.
 
 
 
=head1 AUTHOR
 
Josiah Bryan F<E<lt>jdb@wcoil.comE<gt>>
 
Copyright (c) 2000 Josiah Bryan. All rights reserved. This program is free software;
you can redistribute it and/or modify it under the same terms as Perl itself.
 
The C<AI::NeuralNet::BackProp> and related modules are free software. THEY COME WITHOUT WARRANTY OF ANY KIND.
 
                                                              
 
 
 
 
=head1 THANKS

BackProp.pm  view on Meta::CPAN

2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
You can always download the latest copy of AI::NeuralNet::BackProp
 
 
=head1 MAILING LIST
 
A mailing list has been setup for AI::NeuralNet::BackProp for discussion of AI and
neural net related topics as they pertain to AI::NeuralNet::BackProp. I will also
announce in the group each time a new release of AI::NeuralNet::BackProp is available.
 
The list address is at: ai-neuralnet-backprop@egroups.com
 
To subscribe, send a blank email to: ai-neuralnet-backprop-subscribe@egroups.com 
 
 
=cut

docs.htm  view on Meta::CPAN

88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
                $phrase1, $phrase2,
                $phrase3, $phrase4
        );
 
        # Learn the data set   
        $net-&gt;learn_set(\@phrases);
 
 
        # Run a test phrase through the network
        my $test_phrase = $net-&gt;crunch(&quot;I love neural networking!&quot;);
        my $result = $net-&gt;run($test_phrase);
 
        # Get this, it prints &quot;Jay Leno is  networking!&quot; ...  LOL!
        print $net-&gt;uncrunch($result),&quot;\n&quot;
         
</PRE>       
<P>
<HR SIZE=1 COLOR=BLACK>
<H1><A NAME="updates">UPDATES</A></H1>
<P>This is version 0.89. In this version I have included a new feature, output range limits, as
well as automatic crunching of <A HREF="#item_run"><CODE>run()</CODE></A> and learn*() inputs. Included in the examples directory
are seven new practical-use example scripts. Also implemented in this version is a much cleaner
learning function for individual neurons which is more accurate than previous verions and is
based on the LMS rule. See <A HREF="#item_range"><CODE>range()</CODE></A> for information on output range limits. I have also updated

docs.htm  view on Meta::CPAN

156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
its output has when it sends it, it just sends its output and the
weighting is taken care of by the receiving neuron.) This is the
method used to connect cells in every network built by this package.</P>
<P>Input is fed into the network via a call like this:</P>
<PRE>
        use AI;
        my $net = new AI::NeuralNet::BackProp(2,2);
 
        my @map = (0,1);
 
        my $result = $net-&gt;run(\@map);</PRE>
<P>Now, this call would probably not give what you want, because
the network hasn't ``learned'' any patterns yet. But this
illustrates the call. Run now allows strings to be used as
input. See <A HREF="#item_run"><CODE>run()</CODE></A> for more information.</P>
<P>Run returns a refrence with $size elements (Remember $size? $size
is what you passed as the second argument to the network
constructor.) This array contains the results of the mapping. If
you ran the example exactly as shown above, $result would probably
contain (1,1) as its elements.</P>
<P>To make the network learn a new pattern, you simply call the learn
method with a sample input and the desired result, both array
refrences of $size length. Example:</P>
<PRE>
        use AI;
        my $net = new AI::NeuralNet::BackProp(2,2);
 
        my @map = (0,1);
        my @res = (1,0);
 
        $net-&gt;learn(\@map,\@res);
 
        my $result = $net-&gt;run(\@map);</PRE>
<P>Now $result will conain (1,0), effectivly flipping the input pattern
around. Obviously, the larger $size is, the longer it will take
to learn a pattern. <CODE>Learn()</CODE> returns a string in the form of</P>
<PRE>
        Learning took X loops and X wallclock seconds (X.XXX usr + X.XXX sys = X.XXX CPU).</PRE>
<P>With the X's replaced by time or loop values for that loop call. So,
to view the learning stats for every learn call, you can just:
</P>
<PRE>
        print $net-&gt;learn(\@map,\@res);</PRE>
<P>If you call ``$net-&gt;debug(4)'' with $net being the
refrence returned by the <CODE>new()</CODE> constructor, you will get benchmarking
information for the learn function, as well as plenty of other information output.
See notes on <A HREF="#item_debug"><CODE>debug()</CODE></A> in the METHODS section, below.</P>
<P>If you do call $net-&gt;debug(1), it is a good
idea to point STDIO of your script to a file, as a lot of information is output. I often
use this command line:</P>
<PRE>
        $ perl some_script.pl &gt; .out</PRE>
<P>Then I can simply go and use emacs or any other text editor and read the output at my leisure,

docs.htm  view on Meta::CPAN

248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
        |   |   |
        |   |   |
        O--&gt;O--&gt;O
        ^   ^   ^
        |   |   |</PRE>
<P>As you can see, each neuron is connected to the next one in its layer, as well
as the neuron directly above itself.</P>
<P>Before you can really do anything useful with your new neural network
object, you need to teach it some patterns. See the <A HREF="#item_learn"><CODE>learn()</CODE></A> method, below.</P>
<P></P>
<DT><STRONG><A NAME="item_learn">$net-&gt;learn($input_map_ref, $desired_result_ref [, options ]);</A></STRONG><BR>
<DD>
This will 'teach' a network to associate an new input map with a desired resuly.
It will return a string containg benchmarking information. You can retrieve the
pattern index that the network stored the new input map in after <A HREF="#item_learn"><CODE>learn()</CODE></A> is complete
with the <CODE>pattern()</CODE> method, below.
<P><B>UPDATED:</B> You can now specify strings as inputs and ouputs to learn, and they will be crunched
automatically. Example:</P>
<PRE>
        $net-&gt;learn('corn', 'cob');
        # Before update, you have had to do this:
        # $net-&gt;learn($net-&gt;crunch('corn'), $net-&gt;crunch('cob'));</PRE>
<P>Note, the old method of calling crunch on the values still works just as well.</P>

docs.htm  view on Meta::CPAN

280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
         error  =&gt;      $maximum_allowable_percentage_of_error</PRE>
<P>$learning_gradient is an optional value used to adjust the weights of the internal
connections. If $learning_gradient is ommitted, it defaults to 0.20.
</P>
<P>
$maximum_iterations is the maximum numbers of iteration the loop should do.
It defaults to 1024.  Set it to 0 if you never want the loop to quit before
the pattern is perfectly learned.</P>
<P>$maximum_allowable_percentage_of_error is the maximum allowable error to have. If
this is set, then <A HREF="#item_learn"><CODE>learn()</CODE></A> will return when the perecentage difference between the
actual results and desired results falls below $maximum_allowable_percentage_of_error.
If you do not include 'error', or $maximum_allowable_percentage_of_error is set to -1,
then <A HREF="#item_learn"><CODE>learn()</CODE></A> will not return until it gets an exact match for the desired result OR it
reaches $maximum_iterations.</P>
<P></P>
<DT><STRONG><A NAME="item_learn_set">$net-&gt;learn_set(\@set, [ options ]);</A></STRONG><BR>
<DD>
<B>UPDATED:</B> Inputs and outputs in the dataset can now be strings. See information on auto-crunching
in <A HREF="#item_learn"><CODE>learn()</CODE></A>
<P>This takes the same options as <A HREF="#item_learn"><CODE>learn()</CODE></A> and allows you to specify a set to learn, rather
than individual patterns. A dataset is an array refrence with at least two elements in the
array, each element being another array refrence (or now, a scalar string). For each pattern to
learn, you must specify an input array ref, and an ouput array ref as the next element. Example:

docs.htm  view on Meta::CPAN

306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
                # inputs        outputs
                [ 1,2,3,4 ],  [ 1,3,5,6 ],
                [ 0,2,5,6 ],  [ 0,2,1,2 ]
        );</PRE>
<P>See the paragraph on measuring forgetfulness, below. There are
two learn_set()-specific option tags available:</P>
<PRE>
        flag     =&gt$flag
        pattern  =&gt$row</PRE>
<P>If ``flag'' is set to some TRUE value, as in ``flag =&gt; 1'' in the hash of options, or if the option ``flag''
is not set, then it will return a percentage represting the amount of forgetfullness. Otherwise,
<A HREF="#item_learn_set"><CODE>learn_set()</CODE></A> will return an integer specifying the amount of forgetfulness when all the patterns
are learned.</P>
<P>If ``pattern'' is set, then <A HREF="#item_learn_set"><CODE>learn_set()</CODE></A> will use that pattern in the data set to measure forgetfulness by.
If ``pattern'' is omitted, it defaults to the first pattern in the set. Example:</P>
<PRE>
        my @set = (
                [ 0,1,0,1 ],  [ 0 ],
                [ 0,0,1,0 ],  [ 1 ],
                [ 1,1,0,1 ],  [ 2 ],  #  &lt;---
                [ 0,1,1,0 ],  [ 3 ]
        );
</PRE>
<P>
If you wish to measure forgetfulness as indicated by the line with the arrow, then you would
pass 2 as the &quot;pattern&quot; option, as in &quot;pattern =&gt; 2&quot;.</P>
<P>Now why the heck would anyone want to measure forgetfulness, you ask? Maybe you wonder how I
even measure that. Well, it is not a vital value that you have to know. I just put in a
``forgetfulness measure'' one day because I thought it would be neat to know.</P>
<P>How the module measures forgetfulness is this: First, it learns all the patterns in the set provided,
then it will run the very first pattern (or whatever pattern is specified by the ``row'' option)
in the set after it has finished learning. It will compare the <A HREF="#item_run"><CODE>run()</CODE></A> output with the desired output
as specified in the dataset. In a perfect world, the two should match exactly. What we measure is
how much that they don't match, thus the amount of forgetfulness the network has.</P>
<P>NOTE: In version 0.77 percentages were disabled because of a bug. Percentages are now enabled.</P>
<P>Example (from examples/ex_dow.pl):</P>
<PRE>
        # Data from 1989 (as far as I know..this is taken from example data on BrainMaker)
        my @data = (
                #       Mo  CPI  CPI-1 CPI-3    Oil  Oil-1 Oil-3    Dow   Dow-1 Dow-3   Dow Ave (output)

docs.htm  view on Meta::CPAN

425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
This allows you to limit the possible outputs to a specific set of values. There are several
ways you can specify the set of values to limit the output to. Each method is shown below.
When called without any arguements, it will disable output range limits. You will need to re-learn
any data previously learned after disabling ranging, as disabling range invalidates the current
weight matrix in the network.
<P><A HREF="#item_range"><CODE>range()</CODE></A> automatically scales the networks outputs to fit inside the size of range you allow, and, therefore,
it keeps track of the maximum output it can expect to scale. Therefore, you will need to <A HREF="#item_learn"><CODE>learn()</CODE></A>
the whole data set again after calling <A HREF="#item_range"><CODE>range()</CODE></A> on a network.</P>
<P>Subsequent calls to <A HREF="#item_range"><CODE>range()</CODE></A> invalidate any previous calls to <A HREF="#item_range"><CODE>range()</CODE></A></P>
<P>NOTE: It is recomended, you call <A HREF="#item_range"><CODE>range()</CODE></A> before you call <A HREF="#item_learn"><CODE>learn()</CODE></A> or else you will get unexpected
results from any <A HREF="#item_run"><CODE>run()</CODE></A> call after <A HREF="#item_range"><CODE>range()</CODE></A> .</P>
<P></P>
<DT><STRONG>$net-&gt;range($bottom..$top);</STRONG><BR>
<DD>
This is a common form often used in a <CODE>for my $x (0..20)</CODE> type of <CODE>for()</CODE> constructor. It works
the exact same way. It will allow all numbers from $bottom to $top, inclusive, to be given
as outputs of the network. No other values will be possible, other than those between $bottom
and $top, inclusive.
<P></P>
<DT><STRONG>$net-&gt;range(\@values);</STRONG><BR>
<DD>

docs.htm  view on Meta::CPAN

496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
<P>Level 0 ($level = 0) : Default, no debugging information printed. All printing is
left to calling script.</P>
<P>Level 1 ($level = 1) : This causes ALL debugging information for the network to be dumped
as the network runs. In this mode, it is a good idea to pipe your STDIO to a file, especially
for large programs.</P>
<P>Level 2 ($level = 2) : A slightly-less verbose form of debugging, not as many internal
data dumps.</P>
<P>Level 3 ($level = 3) : JUST prints weight mapping as weights change.</P>
<P>Level 4 ($level = 4) : JUST prints the benchmark info for EACH learn loop iteteration, not just
learning as a whole. Also prints the percentage difference for each loop between current network
results and desired results, as well as learning gradient ('incremenet').</P>
<P>Level 4 is useful for seeing if you need to give a smaller learning incrememnt to <A HREF="#item_learn"><CODE>learn()</CODE></A> .
I used level 4 debugging quite often in creating the letters.pl example script and the small_1.pl
example script.</P>
<P>Toggles debuging off when called with no arguments.</P>
<P></P>
<DT><STRONG><A NAME="item_save">$net-&gt;save($filename);</A></STRONG><BR>
<DD>
This will save the complete state of the network to disk, including all weights and any
words crunched with <A HREF="#item_crunch"><CODE>crunch()</CODE></A> . Also saves any output ranges set with <A HREF="#item_range"><CODE>range()</CODE></A> .
<P>This has now been modified to use a simple flat-file text storage format, and it does not
depend on any external modules now.</P>
<P></P>
<DT><STRONG><A NAME="item_load">$net-&gt;load($filename);</A></STRONG><BR>
<DD>
This will load from disk any network saved by <A HREF="#item_save"><CODE>save()</CODE></A> and completly restore the internal
state at the point it was <A HREF="#item_save"><CODE>save()</CODE></A> was called at.
<P></P>
<DT><STRONG><A NAME="item_join_cols">$net-&gt;join_cols($array_ref,$row_length_in_elements,$high_state_character,$low_state_character);</A></STRONG><BR>
<DD>
This is more of a utility function than any real necessary function of the package.
Instead of joining all the elements of the array together in one long string, like <CODE>join()</CODE> ,
it prints the elements of $array_ref to STDIO, adding a newline (\n) after every $row_length_in_elements
number of elements has passed. Additionally, if you include a $high_state_character and a $low_state_character,
it will print the $high_state_character (can be more than one character) for every element that
has a true value, and the $low_state_character for every element that has a false value.

docs.htm  view on Meta::CPAN

534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
by a null character (\0). <A HREF="#item_join_cols"><CODE>join_cols()</CODE></A> defaults to the latter behaviour.
<P></P>
<DT><STRONG><A NAME="item_pdiff">$net-&gt;pdiff($array_ref_A, $array_ref_B);</A></STRONG><BR>
<DD>
This function is used VERY heavily internally to calculate the difference in percent
between elements of the two array refs passed. It returns a %.10f (sprintf-format)
percent sting.
<P></P>
<DT><STRONG><A NAME="item_p">$net-&gt;p($a,$b);</A></STRONG><BR>
<DD>
Returns a floating point number which represents $a as a percentage of $b.
<P></P>
<DT><STRONG><A NAME="item_intr">$net-&gt;intr($float);</A></STRONG><BR>
<DD>
Rounds a floating-point number rounded to an integer using <CODE>sprintf()</CODE> and <CODE>int()</CODE> , Provides
better rounding than just calling <CODE>int()</CODE> on the float. Also used very heavily internally.
<P></P>
<DT><STRONG><A NAME="item_high">$net-&gt;high($array_ref);</A></STRONG><BR>
<DD>
Returns the index of the element in array REF passed with the highest comparative value.
<P></P>

docs.htm  view on Meta::CPAN

559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
<DT><STRONG><A NAME="item_show">$net-&gt;show();</A></STRONG><BR>
<DD>
This will dump a simple listing of all the weights of all the connections of every neuron
in the network to STDIO.
<P></P>
<DT><STRONG><A NAME="item_crunch">$net-&gt;crunch($string);</A></STRONG><BR>
<DD>
<B>UPDATED:</B> Now you can use a variabled instead of using qw(). Strings will be split internally.
Do not use <CODE>qw()</CODE> to pass strings to crunch.
<P>This splits a string passed with /[\s\t]/ into an array ref containing unique indexes
to the words. The words are stored in an intenal array and preserved across <A HREF="#item_load"><CODE>load()</CODE></A> and <A HREF="#item_save"><CODE>save()</CODE></A>
calls. This is designed to be used to generate unique maps sutible for passing to <A HREF="#item_learn"><CODE>learn()</CODE></A> and
<A HREF="#item_run"><CODE>run()</CODE></A> directly. It returns an array ref.</P>
<P>The words are not duplicated internally. For example:</P>
<PRE>
        $net-&gt;crunch(&quot;How are you?&quot;);</PRE>
<P>Will probably return an array ref containing 1,2,3. A subsequent call of:</P>
<PRE>
    $net-&gt;crunch(&quot;How is Jane?&quot;);</PRE>
<P>Will probably return an array ref containing 1,4,5. Notice, the first element stayed
the same. That is because it already stored the word ``How''. So, each word is stored

docs.htm  view on Meta::CPAN

592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
        for (0..3) {            # Note: The four learn() statements below could
                                                # be replaced with learn_set() to do the same thing,
                                                # but use this form here for clarity.
                $net-&gt;learn($net-&gt;crunch(&quot;I love chips.&quot;),  $net-&gt;crunch(&quot;That's Junk Food!&quot;));
                $net-&gt;learn($net-&gt;crunch(&quot;I love apples.&quot;), $net-&gt;crunch(&quot;Good, Healthy Food.&quot;));
                $net-&gt;learn($net-&gt;crunch(&quot;I love pop.&quot;),    $net-&gt;crunch(&quot;That's Junk Food!&quot;));
                $net-&gt;learn($net-&gt;crunch(&quot;I love oranges.&quot;),$net-&gt;crunch(&quot;Good, Healthy Food.&quot;));
        }
 
        my $response = $net-&gt;run($net-&gt;crunch(&quot;I love corn.&quot;));
 
        print $net-&gt;uncrunch($response),&quot;\n&quot;;</PRE>
<P>On my system, this responds with, ``Good, Healthy Food.'' If you try to run <A HREF="#item_crunch"><CODE>crunch()</CODE></A> with
``I love pop.'', though, you will probably get ``Food! apples. apples.'' (At least it returns
that on my system.) As you can see, the associations are not yet perfect, but it can make
for some interesting demos!</P>
<P></P>
<DT><STRONG><A NAME="item_crunched">$net-&gt;crunched($word);</A></STRONG><BR>
<DD>
This will return undef if the word is not in the internal crunch list, or it will return the
index of the word if it exists in the crunch list.
<P></P>
<DT><STRONG><A NAME="item_col_width">$net-&gt;col_width($width);</A></STRONG><BR>
<DD>
This is useful for formating the debugging output of Level 4 if you are learning simple
bitmaps. This will set the debugger to automatically insert a line break after that many

docs.htm  view on Meta::CPAN

621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
<P></P>
<DT><STRONG><A NAME="item_random">$net-&gt;random($rand);</A></STRONG><BR>
<DD>
This will set the randomness factor from the network. Default is 0.001. When called
with no arguments, or an undef value, it will return current randomness value. When
called with a 0 value, it will disable randomness in the network. See NOTES on learning
a 0 value in the input map with randomness disabled.
<P></P>
<DT><STRONG><A NAME="item_load_pcx">$net-&gt;load_pcx($filename);</A></STRONG><BR>
<DD>
Oh heres a treat... this routine will load a PCX-format file (yah, I know ... ancient format ... but
it is the only one I could find specs for to write it in Perl. If anyone can get specs for
any other formats, or could write a loader for them, I would be very grateful!) Anyways, a PCX-format
file that is exactly 320x200 with 8 bits per pixel, with pure Perl. It returns a blessed refrence to
a AI::NeuralNet::BackProp::PCX object, which supports the following routinges/members. See example
files ex_pcxl.pl and ex_pcx.pl in the ./examples/ directory.
<P></P>
<DT><STRONG><A NAME="item_%24pcx%2D%3E%7Bimage%7D">$pcx-&gt;{image}</A></STRONG><BR>
<DD>
This is an array refrence to the entire image. The array containes exactly 64000 elements, each
element contains a number corresponding into an index of the palette array, details below.
<P></P>
<DT><STRONG><A NAME="item_%24pcx%2D%3E%7Bpalette%7D">$pcx-&gt;{palette}</A></STRONG><BR>
<DD>
This is an array ref to an AoH (array of hashes). Each element has the following three keys:
 
<PRE>
 
        $pcx-&gt;{palette}-&gt;[0]-&gt;{red};
        $pcx-&gt;{palette}-&gt;[0]-&gt;{green};
        $pcx-&gt;{palette}-&gt;[0]-&gt;{blue};</PRE>
<P>Each is in the range of 0..63, corresponding to their named color component.</P>
<P></P>
<DT><STRONG><A NAME="item_get_block">$pcx-&gt;get_block($array_ref);</A></STRONG><BR>
<DD>
Returns a rectangular block defined by an array ref in the form of:
 
<PRE>
        [$left,$top,$right,$bottom]</PRE>
<P>These must be in the range of 0..319 for $left and $right, and the range of 0..199 for
$top and $bottom. The block is returned as an array ref with horizontal lines in sequental order.
I.e. to get a pixel from [2,5] in the block, and $left-$right was 20, then the element in

docs.htm  view on Meta::CPAN

666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
        print (@{$pcx-&gt;get_block(0,0,20,50)})[5*20+2];</PRE>
<P>This would print the contents of the element at block coords [2,5].</P>
<P></P>
<DT><STRONG><A NAME="item_get">$pcx-&gt;get($x,$y);</A></STRONG><BR>
<DD>
Returns the value of pixel at image coordinates $x,$y.
$x must be in the range of 0..319 and $y must be in the range of 0..199.
<P></P>
<DT><STRONG><A NAME="item_rgb">$pcx-&gt;rgb($index);</A></STRONG><BR>
<DD>
Returns a 3-element array (not array ref) with each element corresponding to the red, green, or
blue color components, respecitvely.
<P></P>
<DT><STRONG><A NAME="item_avg">$pcx-&gt;avg($index);</A></STRONG><BR>
<DD>
Returns the mean value of the red, green, and blue values at the palette index in $index.
<P></P></DL>
<P>
<HR SIZE=1 COLOR=BLACK>
<H1><A NAME="notes">NOTES</A></H1>
<DL>
<DT><STRONG><A NAME="item_Learning_0s_With_Randomness_Disabled">Learning 0s With Randomness Disabled</A></STRONG><BR>
<DD>
You can now use 0 values in any input maps. This is a good improvement over versions 0.40
and 0.42, where no 0s were allowed because the learning would never finish learning completly
with a 0 in the input.
<P>Yet with the allowance of 0s, it requires one of two factors to learn correctly. Either you
must enable randomness with $net-&gt;<A HREF="#item_random"><CODE>random(0.0001)</CODE></A> (Any values work [other than 0], see <A HREF="#item_random"><CODE>random()</CODE></A> ),
or you must set an error-minimum with the 'error =&gt; 5' option (you can use some other error value
as well).</P>
<P>When randomness is enabled (that is, when you call <A HREF="#item_random"><CODE>random()</CODE></A> with a value other than 0), it interjects
a bit of randomness into the output of every neuron in the network, except for the input and output
neurons. The randomness is interjected with rand()*$rand, where $rand is the value that was
passed to <A HREF="#item_random"><CODE>random()</CODE></A> call. This assures the network that it will never have a pure 0 internally. It is
bad to have a pure 0 internally because the weights cannot change a 0 when multiplied by a 0, the
product stays a 0. Yet when a weight is multiplied by 0.00001, eventually with enough weight, it will
be able to learn. With a 0 value instead of 0.00001 or whatever, then it would never be able
to add enough weight to get anything other than a 0.</P>
<P>The second option to allow for 0s is to enable a maximum error with the 'error' option in
<A HREF="#item_learn"><CODE>learn()</CODE></A> , <A HREF="#item_learn_set"><CODE>learn_set()</CODE></A> , and <A HREF="#item_learn_set_rand"><CODE>learn_set_rand()</CODE></A> . This allows the network to not worry about
learning an output perfectly.</P>
<P>For accuracy reasons, it is recomended that you work with 0s using the <A HREF="#item_random"><CODE>random()</CODE></A> method.</P>
<P>If anyone has any thoughts/arguments/suggestions for using 0s in the network, let me know
at <A HREF="mailto:jdb@wcoil.com.">jdb@wcoil.com.</A></P>

docs.htm  view on Meta::CPAN

736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
<H1><A NAME="bugs">BUGS</A></H1>
<P>This is an alpha release of <CODE>AI::NeuralNet::BackProp</CODE>, and that holding true, I am sure
there are probably bugs in here which I just have not found yet. If you find bugs in this module, I would
appreciate it greatly if you could report them to me at <EM>&lt;<A HREF="mailto:jdb@wcoil.com">jdb@wcoil.com</A>&gt;</EM>,
or, even better, try to patch them yourself and figure out why the bug is being buggy, and
send me the patched code, again at <EM>&lt;<A HREF="mailto:jdb@wcoil.com">jdb@wcoil.com</A>&gt;</EM>.</P>
<P>
<HR SIZE=1 COLOR=BLACK>
<H1><A NAME="author">AUTHOR</A></H1>
<P>Josiah Bryan <EM>&lt;<A HREF="mailto:jdb@wcoil.com">jdb@wcoil.com</A>&gt;</EM></P>
<P>Copyright (c) 2000 Josiah Bryan. All rights reserved. This program is free software;
you can redistribute it and/or modify it under the same terms as Perl itself.</P>
<P>The <CODE>AI::NeuralNet::BackProp</CODE> and related modules are free software. THEY COME WITHOUT WARRANTY OF ANY KIND.</P>
<P></P>
<P>
<HR SIZE=1 COLOR=BLACK>
<H1><A NAME="thanks">THANKS</A></H1>
<P>Below is a list of people that have helped, made suggestions, patches, etc. No particular order.</P>
<PRE>
                Tobias Bronx, tobiasb@odin.funcom.com
                Pat Trainor, ptrainor@title14.com

docs.htm  view on Meta::CPAN

769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
<HR SIZE=1 COLOR=BLACK>
<H1><A NAME="download">DOWNLOAD</A></H1>
<P>You can always download the latest copy of AI::NeuralNet::BackProp
<P>
<HR SIZE=1 COLOR=BLACK>
<H1><A NAME="mailing list">MAILING LIST</A></H1>
<P>A mailing list has been setup for AI::NeuralNet::BackProp for discussion of AI and
neural net related topics as they pertain to AI::NeuralNet::BackProp. I will also
announce in the group each time a new release of AI::NeuralNet::BackProp is available.</P>
<P>The list address is at: <A HREF="mailto:ai-neuralnet-backprop@egroups.com">ai-neuralnet-backprop@egroups.com</A></P>
<P>To subscribe, send a blank email to: <A HREF="mailto:ai-neuralnet-backprop-subscribe@egroups.com">ai-neuralnet-backprop-subscribe@egroups.com</A></P>
 
<P>
<HR SIZE=1 COLOR=BLACK>
<H1><A NAME="what can it do">WHAT CAN IT DO?</A></H1>
<P>Rodin Porrata asked on the ai-neuralnet-backprop malining list,
"What can they [Neural Networks] do?". In regards to that questioin,
consider the following:</P>
 
<P>Neural Nets are formed by simulated neurons connected together much the same
way the brain's neurons are, neural networks are able to associate and
generalize without rules.  They have solved problems in pattern recognition,
robotics, speech processing, financial predicting and signal processing, to
name a few.</P>
 
<P>One of the first impressive neural networks was NetTalk, which read in ASCII
text and correctly pronounced the words (producing phonemes which drove a
speech chip), even those it had never seen before.  Designed by John Hopkins
biophysicist Terry Sejnowski and Charles Rosenberg of Princeton in 1986,
this application made the Backprogagation training algorithm famous.  Using
the same paradigm, a neural network has been trained to classify sonar
returns from an undersea mine and rock.  This classifier, designed by
Sejnowski and R.  Paul Gorman, performed better than a nearest-neighbor
classifier.</P>
 
<P>The kinds of problems best solved by neural networks are those that people
are good at such as association, evaluation and pattern recognition.
Problems that are difficult to compute and do not require perfect answers,
just very good answers, are also best done with neural networks.  A quick,
very good response is often more desirable than a more accurate answer which
takes longer to compute.  This is especially true in robotics or industrial
controller applications.  Predictions of behavior and general analysis of
data are also affairs for neural networks.  In the financial arena, consumer
loan analysis and financial forecasting make good applications.  New network
designers are working on weather forecasts by neural networks (Myself
included).  Currently, doctors are developing medical neural networks as an
aid in diagnosis.  Attorneys and insurance companies are also working on
neural networks to help estimate the value of claims.</P>
 
<P>Neural networks are poor at precise calculations and serial processing. They
are also unable to predict or recognize anything that does not inherently
contain some sort of pattern.  For example, they cannot predict the lottery,
since this is a random process.  It is unlikely that a neural network could
be built which has the capacity to think as well as a person does for two
reasons.  Neural networks are terrible at deduction, or logical thinking and
the human brain is just too complex to completely simulate.  Also, some
problems are too difficult for present technology.  Real vision, for
example, is a long way off.</P>
 
<P>In short, Neural Networks are poor at precise calculations, but good at
association, evaluation, and pattern recognition.
</P>
<P>
<HR SIZE=1 COLOR=BLACK>
<A HREF="http://www.josiah.countystart.com/modules/AI/rec.pl?docs.htm">AI::NeuralNet::BackProp</a> - <i>Written by Josiah Bryan, &lt;<A HREF="mailto:jdb@wcoil.com">jdb@wcoil.com</A>&gt;</I>
</BODY>

examples/ex_add.pl  view on Meta::CPAN

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
=begin
     
    File:       examples/ex_add.pl
        Author: Josiah Bryan, <jdb@wcoil.com>
        Desc:
 
                This demonstrates the ability of a neural net to generalize and predict what the correct
                result is for inputs that it has never seen before.
                 
                This teaches a network to add 7 sets of numbers, then it asks the user for two numbers to
                add and it displays the results of the user's input.
 
=cut
 
        use AI::NeuralNet::BackProp;
         
        my $addition = new AI::NeuralNet::BackProp(2,2,1);
         
        if(!$addition->load('add.dat')) {
                $addition->learn_set([      
                        [ 1,   1   ], [ 2    ] ,

examples/ex_alpha.pl  view on Meta::CPAN

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
=begin
     
    File:       examples/ex_alpha.pl
        Author: Josiah Bryan, <jdb@wcoil.com>
        Desc:
 
                This demonstrates the ability of a neural net to generalize and predict what the correct
                result is for inputs that it has never seen before.
                 
                This teaches the network to classify some twenty-nine seperate 35-byte bitmaps, and
                then it inputs an never-before-seen bitmap and displays the classification the network
                gives for the unknown bitmap.
 
=cut
 
        use AI::NeuralNet::BackProp;
 
        # Create a new network with 2 layers and 35 neurons in each layer, with 1 output neuron
        my $net = new AI::NeuralNet::BackProp(2,35,1);
         
        # Debug level of 4 gives JUST learn loop iteteration benchmark and comparrison data
        # as learning progresses.
        $net->debug(4);
 
        my $letters = [            # All prototype inputs       
        [
        2,1,1,1,2,             # Inputs are  
        1,2,2,2,1,             #  5*7 digitalized caracters
        1,2,2,2,1,             
        1,1,1,1,1,
        1,2,2,2,1,             # This is the alphabet of the
        1,2,2,2,1,             # HP 28S                     

examples/ex_alpha.pl  view on Meta::CPAN

278
279
280
281
282
283
284
285
286
287
288
289
290
                         1,2,2,2,1,
                         1,1,1,1,1,
                         1,2,2,2,1,
                         1,2,2,2,1,
                         1,2,2,2,1];
 
# Display test map
print "\nTest map:\n";
$net->join_cols($tmp,5);
 
# Display network results
print "Letter index matched: ",$net->run($tmp)->[0],"\n";

examples/ex_bmp.pl  view on Meta::CPAN

4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
        Author: Josiah Bryan, <jdb@wcoil.com>
        Desc:
         
                This demonstrates simple classification of 6x6 bitmaps.
 
=cut
 
        use AI::NeuralNet::BackProp;
        use Benchmark;
 
        # Set resolution
        my $xres=5;
        my $yres=5;
         
        # Create a new net with 3 layes, $xres*$yres inputs, and 1 output
        my $net = AI::NeuralNet::BackProp->new(2,$xres*$yres,1);
         
        # Disable debugging
        $net->debug(4);
         
        # Create datasets.
        my @data = (
                [       2,1,1,2,2,
                        2,2,1,2,2,
                        2,2,1,2,2,
                        2,2,1,2,2,

examples/ex_bmp2.pl  view on Meta::CPAN

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
=begin
     
    File:       examples/ex_bmp2.pl
        Author: Josiah Bryan, <jdb@wcoil.com>
        Desc:
 
                This demonstrates the ability of a neural net to generalize and predict what the correct
                result is for inputs that it has never seen before.
                 
                This teaches a network to recognize a 5x7 bitmap of the letter "J" then it presents
                the network with a corrupted "J" and displays the results of the networks output.
 
=cut
 
        use AI::NeuralNet::BackProp;
 
        # Create a new network with 2 layers and 35 neurons in each layer.
        my $net = new AI::NeuralNet::BackProp(2,35,1);
         
        # Debug level of 4 gives JUST learn loop iteteration benchmark and comparrison data
        # as learning progresses.
        $net->debug(4);
         
        # Create our model input
        my @map =       (1,1,1,1,1,
                                 0,0,1,0,0,
                                 0,0,1,0,0,
                                 0,0,1,0,0,
                                 1,0,1,0,0,
                                 1,0,1,0,0,
                                 1,1,1,0,0);

examples/ex_crunch.pl  view on Meta::CPAN

20
21
22
23
24
25
26
27
28
29
30
31
for (0..3) {
        # learn() can use strings in two ways: As an array ref from crunch(), or
        # directly as a string, which it then will crunch internally.
        $net->learn($net->crunch("I love chips."),  $bad);
        $net->learn($net->crunch("I love apples."), $good);
        $net->learn("I love pop.",                                   $bad);
        $net->learn("I love oranges.",                               $good);
}
 
# run() automatically crunches the string (run_uc() uses run() internally) and
# run_uc() automatically uncrunches the results.
print $net->run_uc("I love corn.");

examples/ex_pcx.pl  view on Meta::CPAN

17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
$net->{col_width} = $bx;
print "Done!\n";
 
print "Loading bitmap...";
my $img = $net->load_pcx("josiah.pcx");            
print "Done!\n";
 
print "Comparing blocks...\n";
my $white = $img->get_block([0,0,$bx,$by]);
 
my ($x,$y,$tmp,@scores,$s,@blocks,$b);
for ($x=0;$x<320;$x+=$bx) {
        for ($y=0;$y<200;$y+=$by) {
                $blocks[$b++]=$img->get_block([$x,$y,$x+$bx,$y+$by]);
                $score[$s++]=$net->pdiff($white,$blocks[$b-1]);
                print "Block at [$x,$y], index [$s] scored ".$score[$s-1]."%\n";
        }
}
print "Done!";
 
print "High score:\n";

examples/ex_sub.pl  view on Meta::CPAN

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
=begin
     
    File:       examples/ex_sub.pl
        Author: Josiah Bryan, <jdb@wcoil.com>
        Desc:
 
                This demonstrates the ability of a neural net to generalize and predict what the correct
                result is for inputs that it has never seen before.
                 
                This teaches a network to subtract 6 sets of numbers, then it asks the user for
                two numbers to subtract and then it displays the results of the user's input.
 
=cut
 
        use AI::NeuralNet::BackProp;
         
        my $subtract = new AI::NeuralNet::BackProp(2,2,1);
         
        if(!$subtract->load('sub.dat')) {
                $subtract->learn_set([      
                        [ 1,   1   ], [ 0      ] ,

examples/ex_synop.pl  view on Meta::CPAN

43
44
45
46
47
48
49
50
51
52
53
54
55
56
my @phrases = (
        $phrase1, $phrase2,
        $phrase3, $phrase4
);
 
# Learn the data set   
$net->learn_set(\@phrases);
 
# Run a test phrase through the network
my $test_phrase = $net->crunch("I love neural networking!");
my $result = $net->run($test_phrase);
 
# Get this, it prints "Jay Leno is  networking!" ...  LOL!
print $net->uncrunch($result),"\n";



( run in 0.745 second using v1.01-cache-2.11-cpan-a9ef4e587e4 )