AI-NeuralNet-BackProp

 view release on metacpan or  search on metacpan

BackProp.pm  view on Meta::CPAN

		return sprintf("%.10f",($diff/$a1s));
	}
	
	# Returns $fa as a percentage of $fb
	sub p {
		shift if(substr($_[0],0,4) eq 'AI::'); 
		my ($fa,$fb)=(shift,shift);
		sprintf("%.3f",((($fb-$fa)*((($fb-$fa)<0)?-1:1))/$fa)*100);
	}
	
	# This sub will take an array ref of a data set, which it expects in this format:
	#   my @data_set = (	[ ...inputs... ], [ ...outputs ... ],
	#				   				   ... rows ...
	#				   );
	#
	# This wil sub returns the percentage of 'forgetfullness' when the net learns all the
	# data in the set in order. Usage:
	#
	#	 learn_set(\@data,[ options ]);
	#
	# Options are options in hash form. They can be of any form that $net->learn takes.
	#
	# It returns a percentage string.
	#
	sub learn_set {
		my $self	=	shift if(substr($_[0],0,4) eq 'AI::'); 
		my $data	=	shift;
		my %args	=	@_;
		my $len		=	$#{$data}/2-1;
		my $inc		=	$args{inc};
		my $max		=	$args{max};
	    my $error	=	$args{error};
	    my $p		=	(defined $args{flag})	?$args{flag}	   :1;
	    my $row		=	(defined $args{pattern})?$args{pattern}*2+1:1;
	    my ($fa,$fb);
		for my $x (0..$len) {
			print "\nLearning index $x...\n" if($AI::NeuralNet::BackProp::DEBUG);
			my $str = $self->learn( $data->[$x*2],			# The list of data to input to the net
					  		  		$data->[$x*2+1], 		# The output desired
					    			inc=>$inc,				# The starting learning gradient
					    			max=>$max,				# The maximum num of loops allowed
					    			error=>$error);			# The maximum (%) error allowed
			print $str if($AI::NeuralNet::BackProp::DEBUG); 
		}
			
		
		my $res;
		$data->[$row] = $self->crunch($data->[$row]) if($data->[$row] == 0);
		
		if ($p) {
			$res=pdiff($data->[$row],$self->run($data->[$row-1]));
		} else {
			$res=$data->[$row]->[0]-$self->run($data->[$row-1])->[0];
		}
		return $res;
	}
	
	# This sub will take an array ref of a data set, which it expects in this format:
	#   my @data_set = (	[ ...inputs... ], [ ...outputs ... ],
	#				   				   ... rows ...
	#				   );
	#
	# This wil sub returns the percentage of 'forgetfullness' when the net learns all the
	# data in the set in RANDOM order. Usage:
	#
	#	 learn_set_rand(\@data,[ options ]);
	#
	# Options are options in hash form. They can be of any form that $net->learn takes.
	#
	# It returns a true value.
	#
	sub learn_set_rand {
		my $self	=	shift if(substr($_[0],0,4) eq 'AI::'); 
		my $data	=	shift;
		my %args	=	@_;
		my $len		=	$#{$data}/2-1;
		my $inc		=	$args{inc};
		my $max		=	$args{max};
	    my $error	=	$args{error};
	    my @learned;
		while(1) {
			_GET_X:
			my $x=$self->intr(rand()*$len);
			goto _GET_X if($learned[$x]);
			$learned[$x]=1;
			print "\nLearning index $x...\n" if($AI::NeuralNet::BackProp::DEBUG); 
			my $str =  $self->learn($data->[$x*2],			# The list of data to input to the net
					  		  		$data->[$x*2+1], 		# The output desired
					    			inc=>$inc,				# The starting learning gradient
			 		    			max=>$max,				# The maximum num of loops allowed
					    			error=>$error);			# The maximum (%) error allowed
			print $str if($AI::NeuralNet::BackProp::DEBUG); 
		}
			
		
		return 1; 
	}

BackProp.pm  view on Meta::CPAN

				if($flag == 2) {
					$self->{NET}->[$y+$z]->connect($self->{NET}->[$y+$div+$z]);
					$self->{NET}->[$y+$z]->connect($self->{NET}->[$y+$z+1]) if($z<$div-1);
				}
				AI::NeuralNet::BackProp::out1 "\n";
			}
			AI::NeuralNet::BackProp::out1 "\n";             
		}
		
		# These next two loops connect the _run and _map packages (the IO interface) to 
		# the start and end 'layers', respectively. These are how we insert data into
		# the network and how we get data from the network. The _run and _map packages 
		# are connected to the neurons so that the neurons think that the IO packages are
		# just another neuron, sending data on. But the IO packs. are special packages designed
		# with the same methods as neurons, just meant for specific IO purposes. You will
		# never need to call any of the IO packs. directly. Instead, they are called whenever
		# you use the run(), map(), or learn() methods of your network.
        
    	AI::NeuralNet::BackProp::out2 "\nMapping I (_run package) connections to network...\n";
		
	    for($y=0; $y<$div; $y++) {
			$self->{_tmp_synapse} = $y;
			$self->{NET}->[$y]->register_synapse($self->{RUN});
			#$self->{NET}->[$y]->connect($self->{RUN});

BackProp.pm  view on Meta::CPAN

	sub load_pcx {
		shift if(substr($_[0],0,4) eq 'AI::'); 
		
		# open the file
		open(FILE, "$_[0]");
		binmode(FILE);
		
		my $tmp;
		my @image;
		my @palette;
		my $data;
		
		# Read header
		read(FILE,$tmp,128);
		
		# load the data and decompress into buffer
		my $count=0;
		
		while($count<320*200) {
		     # get the first piece of data
		     read(FILE,$data,1);
	         $data=ord($data);
	         
		     # is this a rle?
		     if ($data>=192 && $data<=255) {
		        # how many bytes in run?
		        my $num_bytes = $data-192;
		
		        # get the actual $data for the run
		        read(FILE, $data, 1);
				$data=ord($data);
		        # replicate $data in buffer num_bytes times
		        while($num_bytes-->0) {
	            	$image[$count++] = $data;
		        } # end while
		     } else {
		        # actual $data, just copy it into buffer at next location
		        $image[$count++] = $data;
		     } # end else not rle
		}
		
		# move to end of file then back up 768 bytes i.e. to begining of palette
		seek(FILE,-768,2);
		
		# load the pallete into the palette
		for my $index (0..255) {
		    # get the red component
		    read(FILE,$tmp,1);

BackProp.pm  view on Meta::CPAN

	
	# Add a small amount of randomness to the network
	$net->random(0.001);

	# Demonstrate a simple learn() call
	my @inputs = ( 0,0,1,1,1 );
	my @ouputs = ( 1,0,1,0,1 );
	
	print $net->learn(\@inputs, \@outputs),"\n";

	# Create a data set to learn
	my @set = (
		[ 2,2,3,4,1 ], [ 1,1,1,1,1 ],
		[ 1,1,1,1,1 ], [ 0,0,0,0,0 ],
		[ 1,1,1,0,0 ], [ 0,0,0,1,1 ]	
	);
	
	# Demo learn_set()
	my $f = $net->learn_set(\@set);
	print "Forgetfulness: $f unit\n";
	
	# Crunch a bunch of strings and return array refs
	my $phrase1 = $net->crunch("I love neural networks!");
	my $phrase2 = $net->crunch("Jay Lenno is wierd.");
	my $phrase3 = $net->crunch("The rain in spain...");
	my $phrase4 = $net->crunch("Tired of word crunching yet?");

	# Make a data set from the array refs
	my @phrases = (
		$phrase1, $phrase2,
		$phrase3, $phrase4
	);

	# Learn the data set	
	$net->learn_set(\@phrases);
	
	# Run a test phrase through the network
	my $test_phrase = $net->crunch("I love neural networking!");
	my $result = $net->run($test_phrase);
	
	# Get this, it prints "Jay Leno is  networking!" ...  LOL!
	print $net->uncrunch($result),"\n";


BackProp.pm  view on Meta::CPAN

$maximum_allowable_percentage_of_error is the maximum allowable error to have. If 
this is set, then learn() will return when the perecentage difference between the
actual results and desired results falls below $maximum_allowable_percentage_of_error.
If you do not include 'error', or $maximum_allowable_percentage_of_error is set to -1,
then learn() will not return until it gets an exact match for the desired result OR it
reaches $maximum_iterations.


=item $net->learn_set(\@set, [ options ]);

UPDATE: Inputs and outputs in the dataset can now be strings. See information on auto-crunching
in learn()

This takes the same options as learn() and allows you to specify a set to learn, rather
than individual patterns. A dataset is an array refrence with at least two elements in the
array, each element being another array refrence (or now, a scalar string). For each pattern to
learn, you must specify an input array ref, and an ouput array ref as the next element. Example:
	
	my @set = (
		# inputs        outputs
		[ 1,2,3,4 ],  [ 1,3,5,6 ],
		[ 0,2,5,6 ],  [ 0,2,1,2 ]
	);


BackProp.pm  view on Meta::CPAN

two learn_set()-specific option tags available:

	flag     =>  $flag
	pattern  =>  $row

If "flag" is set to some TRUE value, as in "flag => 1" in the hash of options, or if the option "flag"
is not set, then it will return a percentage represting the amount of forgetfullness. Otherwise,
learn_set() will return an integer specifying the amount of forgetfulness when all the patterns 
are learned. 

If "pattern" is set, then learn_set() will use that pattern in the data set to measure forgetfulness by.
If "pattern" is omitted, it defaults to the first pattern in the set. Example:

	my @set = (
		[ 0,1,0,1 ],  [ 0 ],
		[ 0,0,1,0 ],  [ 1 ],
		[ 1,1,0,1 ],  [ 2 ],  #  <---
		[ 0,1,1,0 ],  [ 3 ]
	);
	
If you wish to measure forgetfulness as indicated by the line with the arrow, then you would
pass 2 as the "pattern" option, as in "pattern => 2".

Now why the heck would anyone want to measure forgetfulness, you ask? Maybe you wonder how I 
even measure that. Well, it is not a vital value that you have to know. I just put in a 
"forgetfulness measure" one day because I thought it would be neat to know. 

How the module measures forgetfulness is this: First, it learns all the patterns in the set provided,
then it will run the very first pattern (or whatever pattern is specified by the "row" option)
in the set after it has finished learning. It will compare the run() output with the desired output
as specified in the dataset. In a perfect world, the two should match exactly. What we measure is
how much that they don't match, thus the amount of forgetfulness the network has.

NOTE: In version 0.77 percentages were disabled because of a bug. Percentages are now enabled.

Example (from examples/ex_dow.pl):

	# Data from 1989 (as far as I know..this is taken from example data on BrainMaker)
	my @data = ( 
		#	Mo  CPI  CPI-1 CPI-3 	Oil  Oil-1 Oil-3    Dow   Dow-1 Dow-3   Dow Ave (output)
		[	1, 	229, 220,  146, 	20.0, 21.9, 19.5, 	2645, 2652, 2597], 	[	2647  ],
		[	2, 	235, 226,  155, 	19.8, 20.0, 18.3, 	2633, 2645, 2585], 	[	2637  ],
		[	3, 	244, 235,  164, 	19.6, 19.8, 18.1, 	2627, 2633, 2579], 	[	2630  ],
		[	4, 	261, 244,  181, 	19.6, 19.6, 18.1, 	2611, 2627, 2563], 	[	2620  ],
		[	5, 	276, 261,  196, 	19.5, 19.6, 18.0, 	2630, 2611, 2582], 	[	2638  ],
		[	6, 	287, 276,  207, 	19.5, 19.5, 18.0, 	2637, 2630, 2589], 	[	2635  ],
		[	7, 	296, 287,  212, 	19.3, 19.5, 17.8, 	2640, 2637, 2592], 	[	2641  ] 		
	);
	
	# Learn the set
	my $f = learn_set(\@data, 
					  inc	=>	0.1,	
					  max	=>	500,
					  p		=>	1
					 );
			
	# Print it 
	print "Forgetfullness: $f%";

    
This is a snippet from the example script examples/ex_dow.pl, which demonstrates DOW average
prediction for the next month. A more simple set defenition would be as such:

	my @data = (
		[ 0,1 ], [ 1 ],
		[ 1,0 ], [ 0 ]
	);
	
	$net->learn_set(\@data);
	
Same effect as above, but not the same data (obviously).

=item $net->learn_set_rand(\@set, [ options ]);

UPDATE: Inputs and outputs in the dataset can now be strings. See information on auto-crunching
in learn()

This takes the same options as learn() and allows you to specify a set to learn, rather
than individual patterns. 

learn_set_rand() differs from learn_set() in that it learns the patterns in a random order,
each pattern once, rather than in the order that they are in the array. This returns a true
value (1) instead of a forgetfullnes factor.

Example:

	my @data = (
		[ 0,1 ], [ 1 ],
		[ 1,0 ], [ 0 ]
	);
	
	$net->learn_set_rand(\@data);
	


=item $net->run($input_map_ref);

UPDATE: run() will now I<automatically> crunch() a string given as the input.

This method will apply the given array ref at the input layer of the neural network, and
it will return an array ref to the output of the network.

BackProp.pm  view on Meta::CPAN

All that run_uc() does is that it automatically calls uncrunch() on the output, regardless
of whether the input was crunch() -ed or not.
	


=item $net->range();

This allows you to limit the possible outputs to a specific set of values. There are several 
ways you can specify the set of values to limit the output to. Each method is shown below. 
When called without any arguements, it will disable output range limits. You will need to re-learn
any data previously learned after disabling ranging, as disabling range invalidates the current
weight matrix in the network.

range() automatically scales the networks outputs to fit inside the size of range you allow, and, therefore,
it keeps track of the maximum output it can expect to scale. Therefore, you will need to learn() 
the whole data set again after calling range() on a network.

Subsequent calls to range() invalidate any previous calls to range()

NOTE: It is recomended, you call range() before you call learn() or else you will get unexpected
results from any run() call after range() .


=item $net->range($bottom..$top);

This is a common form often used in a C<for my $x (0..20)> type of for() constructor. It works

BackProp.pm  view on Meta::CPAN

of debugging.

Level 0 ($level = 0) : Default, no debugging information printed. All printing is 
left to calling script.

Level 1 ($level = 1) : This causes ALL debugging information for the network to be dumped
as the network runs. In this mode, it is a good idea to pipe your STDIO to a file, especially
for large programs.

Level 2 ($level = 2) : A slightly-less verbose form of debugging, not as many internal 
data dumps.

Level 3 ($level = 3) : JUST prints weight mapping as weights change.

Level 4 ($level = 4) : JUST prints the benchmark info for EACH learn loop iteteration, not just
learning as a whole. Also prints the percentage difference for each loop between current network
results and desired results, as well as learning gradient ('incremenet').   

Level 4 is useful for seeing if you need to give a smaller learning incrememnt to learn() .
I used level 4 debugging quite often in creating the letters.pl example script and the small_1.pl
example script.

BackProp.pm  view on Meta::CPAN


AI::NeuralNet::BackProp::neuron is the worker package for AI::NeuralNet::BackProp.
It implements the actual neurons of the nerual network.
AI::NeuralNet::BackProp::neuron is not designed to be created directly, as
it is used internally by AI::NeuralNet::BackProp.

=item AI::NeuralNet::BackProp::_run

=item AI::NeuralNet::BackProp::_map

These two packages, _run and _map are used to insert data into
the network and used to get data from the network. The _run and _map packages 
are connected to the neurons so that the neurons think that the IO packages are
just another neuron, sending data on. But the IO packs. are special packages designed
with the same methods as neurons, just meant for specific IO purposes. You will
never need to call any of the IO packs. directly. Instead, they are called whenever
you use the run() or learn() methods of your network.
        



=head1 BUGS

This is an alpha release of C<AI::NeuralNet::BackProp>, and that holding true, I am sure 

docs.htm  view on Meta::CPAN

        my $net = new AI::NeuralNet::BackProp(1,5,5);

        # Add a small amount of randomness to the network
        $net-&gt;random(0.001);
        # Demonstrate a simple learn() call
        my @inputs = ( 0,0,1,1,1 );
        my @ouputs = ( 1,0,1,0,1 );

        print $net-&gt;learn(\@inputs, \@outputs),&quot;\n&quot;;

        # Create a data set to learn
        my @set = (
                [ 2,2,3,4,1 ], [ 1,1,1,1,1 ],
                [ 1,1,1,1,1 ], [ 0,0,0,0,0 ],
                [ 1,1,1,0,0 ], [ 0,0,0,1,1 ]    
        );

        # Demo learn_set()
        my $f = $net-&gt;learn_set(\@set);
        print &quot;Forgetfulness: $f unit\n&quot;;

        # Crunch a bunch of strings and return array refs
        my $phrase1 = $net-&gt;crunch(&quot;I love neural networks!&quot;);
        my $phrase2 = $net-&gt;crunch(&quot;Jay Lenno is wierd.&quot;);
        my $phrase3 = $net-&gt;crunch(&quot;The rain in spain...&quot;);
        my $phrase4 = $net-&gt;crunch(&quot;Tired of word crunching yet?&quot;);


        # Make a data set from the array refs
        my @phrases = (
                $phrase1, $phrase2,
                $phrase3, $phrase4
        );

        # Learn the data set    
        $net-&gt;learn_set(\@phrases);


        # Run a test phrase through the network
        my $test_phrase = $net-&gt;crunch(&quot;I love neural networking!&quot;);
        my $result = $net-&gt;run($test_phrase);

        # Get this, it prints &quot;Jay Leno is  networking!&quot; ...  LOL!
        print $net-&gt;uncrunch($result),&quot;\n&quot;
        

docs.htm  view on Meta::CPAN

the pattern is perfectly learned.</P>
<P>$maximum_allowable_percentage_of_error is the maximum allowable error to have. If 
this is set, then <A HREF="#item_learn"><CODE>learn()</CODE></A> will return when the perecentage difference between the
actual results and desired results falls below $maximum_allowable_percentage_of_error.
If you do not include 'error', or $maximum_allowable_percentage_of_error is set to -1,
then <A HREF="#item_learn"><CODE>learn()</CODE></A> will not return until it gets an exact match for the desired result OR it
reaches $maximum_iterations.</P>
<P></P>
<DT><STRONG><A NAME="item_learn_set">$net-&gt;learn_set(\@set, [ options ]);</A></STRONG><BR>
<DD>
<B>UPDATED:</B> Inputs and outputs in the dataset can now be strings. See information on auto-crunching
in <A HREF="#item_learn"><CODE>learn()</CODE></A>
<P>This takes the same options as <A HREF="#item_learn"><CODE>learn()</CODE></A> and allows you to specify a set to learn, rather
than individual patterns. A dataset is an array refrence with at least two elements in the
array, each element being another array refrence (or now, a scalar string). For each pattern to
learn, you must specify an input array ref, and an ouput array ref as the next element. Example:
</P>
<PRE>
        my @set = (
                # inputs        outputs
                [ 1,2,3,4 ],  [ 1,3,5,6 ],
                [ 0,2,5,6 ],  [ 0,2,1,2 ]
        );</PRE>
<P>See the paragraph on measuring forgetfulness, below. There are 
two learn_set()-specific option tags available:</P>
<PRE>
        flag     =&gt;  $flag
        pattern  =&gt;  $row</PRE>
<P>If ``flag'' is set to some TRUE value, as in ``flag =&gt; 1'' in the hash of options, or if the option ``flag''
is not set, then it will return a percentage represting the amount of forgetfullness. Otherwise,
<A HREF="#item_learn_set"><CODE>learn_set()</CODE></A> will return an integer specifying the amount of forgetfulness when all the patterns 
are learned.</P>
<P>If ``pattern'' is set, then <A HREF="#item_learn_set"><CODE>learn_set()</CODE></A> will use that pattern in the data set to measure forgetfulness by.
If ``pattern'' is omitted, it defaults to the first pattern in the set. Example:</P>
<PRE>
        my @set = (
                [ 0,1,0,1 ],  [ 0 ],
                [ 0,0,1,0 ],  [ 1 ],
                [ 1,1,0,1 ],  [ 2 ],  #  &lt;---
                [ 0,1,1,0 ],  [ 3 ]
        );
</PRE>
<P>
If you wish to measure forgetfulness as indicated by the line with the arrow, then you would
pass 2 as the &quot;pattern&quot; option, as in &quot;pattern =&gt; 2&quot;.</P>
<P>Now why the heck would anyone want to measure forgetfulness, you ask? Maybe you wonder how I 
even measure that. Well, it is not a vital value that you have to know. I just put in a 
``forgetfulness measure'' one day because I thought it would be neat to know.</P>
<P>How the module measures forgetfulness is this: First, it learns all the patterns in the set provided,
then it will run the very first pattern (or whatever pattern is specified by the ``row'' option)
in the set after it has finished learning. It will compare the <A HREF="#item_run"><CODE>run()</CODE></A> output with the desired output
as specified in the dataset. In a perfect world, the two should match exactly. What we measure is
how much that they don't match, thus the amount of forgetfulness the network has.</P>
<P>NOTE: In version 0.77 percentages were disabled because of a bug. Percentages are now enabled.</P>
<P>Example (from examples/ex_dow.pl):</P>
<PRE>
        # Data from 1989 (as far as I know..this is taken from example data on BrainMaker)
        my @data = ( 
                #       Mo  CPI  CPI-1 CPI-3    Oil  Oil-1 Oil-3    Dow   Dow-1 Dow-3   Dow Ave (output)
                [       1,      229, 220,  146,         20.0, 21.9, 19.5,       2645, 2652, 2597],      [       2647  ],
                [       2,      235, 226,  155,         19.8, 20.0, 18.3,       2633, 2645, 2585],      [       2637  ],
                [       3,      244, 235,  164,         19.6, 19.8, 18.1,       2627, 2633, 2579],      [       2630  ],
                [       4,      261, 244,  181,         19.6, 19.6, 18.1,       2611, 2627, 2563],      [       2620  ],
                [       5,      276, 261,  196,         19.5, 19.6, 18.0,       2630, 2611, 2582],      [       2638  ],
                [       6,      287, 276,  207,         19.5, 19.5, 18.0,       2637, 2630, 2589],      [       2635  ],
                [       7,      296, 287,  212,         19.3, 19.5, 17.8,       2640, 2637, 2592],      [       2641  ]                 
        );

        # Learn the set
        my $f = learn_set(\@data, 
                                          inc   =&gt;      0.1,    
                                          max   =&gt;      500,
                                          p             =&gt;      1
                                         );

        # Print it 
        print &quot;Forgetfullness: $f%&quot;;</PRE>
<P></P>
<P>
This is a snippet from the example script examples/ex_dow.pl, which demonstrates DOW average
prediction for the next month. A more simple set defenition would be as such:</P>
<PRE>
        my @data = (
                [ 0,1 ], [ 1 ],
                [ 1,0 ], [ 0 ]
        );

        $net-&gt;learn_set(\@data);
</PRE>
Same effect as above, but not the same data (obviously).
<P></P>
<DT><STRONG><A NAME="item_learn_set_rand">$net-&gt;learn_set_rand(\@set, [ options ]);</A></STRONG><BR>
<DD>
<B>UPDATED:</B> Inputs and outputs in the dataset can now be strings. See information on auto-crunching
in <A HREF="#item_learn"><CODE>learn()</CODE></A>
<P>This takes the same options as <A HREF="#item_learn"><CODE>learn()</CODE></A> and allows you to specify a set to learn, rather
than individual patterns.</P>
<P><A HREF="#item_learn_set_rand"><CODE>learn_set_rand()</CODE></A> differs from <A HREF="#item_learn_set"><CODE>learn_set()</CODE></A> in that it learns the patterns in a random order,
each pattern once, rather than in the order that they are in the array. This returns a true
value (1) instead of a forgetfullnes factor.</P>
<P>Example:</P>
<PRE>
        my @data = (
                [ 0,1 ], [ 1 ],
                [ 1,0 ], [ 0 ]
        );

        $net-&gt;learn_set_rand(\@data);</PRE>
<P></P>
<DT><STRONG><A NAME="item_run">$net-&gt;run($input_map_ref);</A></STRONG><BR>
<DD>
<B>UPDATED:</B> <A HREF="#item_run"><CODE>run()</CODE></A> will now <EM>automatically</EM> <A HREF="#item_crunch"><CODE>crunch()</CODE></A> a string given as the input.
<P>This method will apply the given array ref at the input layer of the neural network, and
it will return an array ref to the output of the network.</P>
<P>Example:
</P>
<PRE>

docs.htm  view on Meta::CPAN

<PRE>
        $net-&gt;uncrunch($net-&gt;run($input_map_ref));</PRE>
<P>All that <A HREF="#item_run_uc"><CODE>run_uc()</CODE></A> does is that it automatically calls <A HREF="#item_uncrunch"><CODE>uncrunch()</CODE></A> on the output, regardless
of whether the input was <A HREF="#item_crunch"><CODE>crunch()</CODE></A> -ed or not.</P>
<P></P>
<DT><STRONG><A NAME="item_range">$net-&gt;range();</A></STRONG><BR>
<DD>
This allows you to limit the possible outputs to a specific set of values. There are several 
ways you can specify the set of values to limit the output to. Each method is shown below. 
When called without any arguements, it will disable output range limits. You will need to re-learn
any data previously learned after disabling ranging, as disabling range invalidates the current
weight matrix in the network.
<P><A HREF="#item_range"><CODE>range()</CODE></A> automatically scales the networks outputs to fit inside the size of range you allow, and, therefore,
it keeps track of the maximum output it can expect to scale. Therefore, you will need to <A HREF="#item_learn"><CODE>learn()</CODE></A> 
the whole data set again after calling <A HREF="#item_range"><CODE>range()</CODE></A> on a network.</P>
<P>Subsequent calls to <A HREF="#item_range"><CODE>range()</CODE></A> invalidate any previous calls to <A HREF="#item_range"><CODE>range()</CODE></A></P>
<P>NOTE: It is recomended, you call <A HREF="#item_range"><CODE>range()</CODE></A> before you call <A HREF="#item_learn"><CODE>learn()</CODE></A> or else you will get unexpected
results from any <A HREF="#item_run"><CODE>run()</CODE></A> call after <A HREF="#item_range"><CODE>range()</CODE></A> .</P>
<P></P>
<DT><STRONG>$net-&gt;range($bottom..$top);</STRONG><BR>
<DD>
This is a common form often used in a <CODE>for my $x (0..20)</CODE> type of <CODE>for()</CODE> constructor. It works
the exact same way. It will allow all numbers from $bottom to $top, inclusive, to be given 
as outputs of the network. No other values will be possible, other than those between $bottom
and $top, inclusive.

docs.htm  view on Meta::CPAN

<DT><STRONG><A NAME="item_debug">$net-&gt;debug($level)</A></STRONG><BR>
<DD>
Toggles debugging off if called with $level = 0 or no arguments. There are four levels
of debugging.
<P>Level 0 ($level = 0) : Default, no debugging information printed. All printing is 
left to calling script.</P>
<P>Level 1 ($level = 1) : This causes ALL debugging information for the network to be dumped
as the network runs. In this mode, it is a good idea to pipe your STDIO to a file, especially
for large programs.</P>
<P>Level 2 ($level = 2) : A slightly-less verbose form of debugging, not as many internal 
data dumps.</P>
<P>Level 3 ($level = 3) : JUST prints weight mapping as weights change.</P>
<P>Level 4 ($level = 4) : JUST prints the benchmark info for EACH learn loop iteteration, not just
learning as a whole. Also prints the percentage difference for each loop between current network
results and desired results, as well as learning gradient ('incremenet').</P>
<P>Level 4 is useful for seeing if you need to give a smaller learning incrememnt to <A HREF="#item_learn"><CODE>learn()</CODE></A> .
I used level 4 debugging quite often in creating the letters.pl example script and the small_1.pl
example script.</P>
<P>Toggles debuging off when called with no arguments.</P>
<P></P>
<DT><STRONG><A NAME="item_save">$net-&gt;save($filename);</A></STRONG><BR>

docs.htm  view on Meta::CPAN

<DD>
AI::NeuralNet::BackProp::neuron is the worker package for AI::NeuralNet::BackProp.
It implements the actual neurons of the nerual network.
AI::NeuralNet::BackProp::neuron is not designed to be created directly, as
it is used internally by AI::NeuralNet::BackProp.
<P></P>
<DT><STRONG><A NAME="item_AI%3A%3ANeuralNet%3A%3ABackProp%3A%3A_run">AI::NeuralNet::BackProp::_run</A></STRONG><BR>
<DD>
<DT><STRONG><A NAME="item_AI%3A%3ANeuralNet%3A%3ABackProp%3A%3A_map">AI::NeuralNet::BackProp::_map</A></STRONG><BR>
<DD>
These two packages, _run and _map are used to insert data into
the network and used to get data from the network. The _run and _map packages 
are connected to the neurons so that the neurons think that the IO packages are
just another neuron, sending data on. But the IO packs. are special packages designed
with the same methods as neurons, just meant for specific IO purposes. You will
never need to call any of the IO packs. directly. Instead, they are called whenever
you use the <A HREF="#item_run"><CODE>run()</CODE></A> or <A HREF="#item_learn"><CODE>learn()</CODE></A> methods of your network.
<P></P></DL>
<P>
<HR SIZE=1 COLOR=BLACK>
<H1><A NAME="bugs">BUGS</A></H1>
<P>This is an alpha release of <CODE>AI::NeuralNet::BackProp</CODE>, and that holding true, I am sure 
there are probably bugs in here which I just have not found yet. If you find bugs in this module, I would 
appreciate it greatly if you could report them to me at <EM>&lt;<A HREF="mailto:jdb@wcoil.com">jdb@wcoil.com</A>&gt;</EM>,

docs.htm  view on Meta::CPAN

Sejnowski and R.  Paul Gorman, performed better than a nearest-neighbor
classifier.</P>

<P>The kinds of problems best solved by neural networks are those that people
are good at such as association, evaluation and pattern recognition.
Problems that are difficult to compute and do not require perfect answers,
just very good answers, are also best done with neural networks.  A quick,
very good response is often more desirable than a more accurate answer which
takes longer to compute.  This is especially true in robotics or industrial
controller applications.  Predictions of behavior and general analysis of
data are also affairs for neural networks.  In the financial arena, consumer
loan analysis and financial forecasting make good applications.  New network
designers are working on weather forecasts by neural networks (Myself
included).  Currently, doctors are developing medical neural networks as an
aid in diagnosis.  Attorneys and insurance companies are also working on
neural networks to help estimate the value of claims.</P>

<P>Neural networks are poor at precise calculations and serial processing. They
are also unable to predict or recognize anything that does not inherently
contain some sort of pattern.  For example, they cannot predict the lottery,
since this is a random process.  It is unlikely that a neural network could

examples/ex_add2.pl  view on Meta::CPAN

=begin
    
    File:	examples/ex_add2.pl
	Author: Rodin Porrata, <rodin@ursa.llnl.gov>
	Desc: 

		This script runs a complete test of the networks ability to add and remember 
		data sets, as well as testing the optimum "inc" to learn and the optimum 
		number of layers for a network.

=cut

	use AI::NeuralNet::BackProp;
	use Benchmark;
	use English;
	
	my $ofile = "addnet_data.txt";
	
	open( OUTFP, ">$ofile" ) or die "Could not open output file\n";
	
	my ( $layers, $inputs, $outputs, $top, $inc, $top, $runtime,
	$forgetfulness );
	my @answers;
	my @predictions;
	my @percent_diff;
	
	$inputs = 3;

examples/ex_add2.pl  view on Meta::CPAN

	#....................................................
	sub addnet
	{
	 print "\nCreate a new net with $layers layers, 3 inputs, and 1 output\n";
	 my $net = AI::NeuralNet::BackProp->new($layers,3,1);
	
	 # Disable debugging
	 $net->debug(0);
	
	
	 my @data = (
	  [   2633, 2665, 2685],  [ 2633 + 2665 + 2685 ],
	  [   2623, 2645, 2585],  [ 2623 + 2645 + 2585 ],
	  [  2627, 2633, 2579],  [ 2627 + 2633 + 2579 ],
	  [   2611, 2627, 2563],  [ 2611 + 2627 + 2563 ],
	  [  2640, 2637, 2592],  [ 2640 + 2637 + 2592 ]
	 );
	
	 print "Learning started, will cycle $top times with inc = $inc\n";
	
	  # Make it learn the whole dataset $top times
	  my @list;
	
	 my $t1=new Benchmark;
	 for my $a (1..$top)
	 {
	  print "Outer Loop: $a : ";
	
	  $forgetfulness = $net->learn_set( \@data,
	           inc  => $inc,
	           max  => 500,
	           error => -1);
	
	  print "Forgetfulness: $forgetfulness %\n";
	
	 }
	 my $t2=new Benchmark;
	
	 $runtime = timediff($t2,$t1);

examples/ex_add2.pl  view on Meta::CPAN

	      [ 2345, 2543, 3000 ],
	      [ 2654, 2234, 2534 ] );
	
	    test_net( $net, @input );
	}
	#.....................................................................
	 sub test_net {
	  my @set;
	  my $fb;
	  my $net = shift;
	  my @data = @_;
	  undef @percent_diff; #@answers; undef @predictions;
	
	  for( $i=0; defined( $data[$i] ); $i++ ){
	   @set = @{ $data[$i] };
	   $fb = $net->run(\@set)->[0];
	   # Print output
	   print "Test Factors: (",join(',',@set),")\n";
	   $answer = eval( join( '+',@set ));
	   push @percent_diff, 100.0 * abs( $answer - $fb )/ $answer;
	   print "Prediction : $fb      answer: $answer\n";
	  }
	 }
	
	

examples/ex_alpha.pl  view on Meta::CPAN

		then it inputs an never-before-seen bitmap and displays the classification the network 
		gives for the unknown bitmap.

=cut

	use AI::NeuralNet::BackProp;

	# Create a new network with 2 layers and 35 neurons in each layer, with 1 output neuron
	my $net = new AI::NeuralNet::BackProp(2,35,1);
	
	# Debug level of 4 gives JUST learn loop iteteration benchmark and comparrison data 
	# as learning progresses.
	$net->debug(4);

	my $letters = [            # All prototype inputs        
        [
        2,1,1,1,2,             # Inputs are   
        1,2,2,2,1,             #  5*7 digitalized caracters 
        1,2,2,2,1,              
        1,1,1,1,1,
        1,2,2,2,1,             # This is the alphabet of the

examples/ex_bmp.pl  view on Meta::CPAN

	# Set resolution
	my $xres=5;
	my $yres=5;
	
	# Create a new net with 3 layes, $xres*$yres inputs, and 1 output
	my $net = AI::NeuralNet::BackProp->new(2,$xres*$yres,1);
	
	# Disable debugging
	$net->debug(4);
	
	# Create datasets.
	my @data = ( 
		[	2,1,1,2,2,
			2,2,1,2,2,
			2,2,1,2,2,
			2,2,1,2,2,
			2,1,1,1,2	],		[	1	],
		
		[	1,1,1,2,2,
			2,2,2,1,2,
			2,1,1,1,2,
			1,2,2,2,2,

examples/ex_bmp.pl  view on Meta::CPAN

		
	);
    
    
	$net->range(1,5);
	
	# If we havnt saved the net already, do the learning
	if(!$net->load('images.net')) {
		print "\nLearning started...\n";
		
		# Make it learn the whole dataset $top times
		my @list;
		my $top=3;
		for my $a (0..$top) {
			my $t1=new Benchmark;
			print "\n\nOuter Loop: $a\n";
			
			# Test fogetfullness
			my $f = $net->learn_set(\@data,	inc		=>	0.1,	
											max		=>	500,
											error	=>	-1);
			
			# Print it 
			print "\n\nForgetfullness: $f%\n";

			# Save net to disk				
			$net->save('images.net');

			my $t2=new Benchmark;

examples/ex_bmp2.pl  view on Meta::CPAN

		This teaches a network to recognize a 5x7 bitmap of the letter "J" then it presents
		the network with a corrupted "J" and displays the results of the networks output.

=cut

	use AI::NeuralNet::BackProp;

	# Create a new network with 2 layers and 35 neurons in each layer.
	my $net = new AI::NeuralNet::BackProp(2,35,1);
	
	# Debug level of 4 gives JUST learn loop iteteration benchmark and comparrison data 
	# as learning progresses.
	$net->debug(4);
	
	# Create our model input
	my @map	=	(1,1,1,1,1,
				 0,0,1,0,0,
				 0,0,1,0,0,
				 0,0,1,0,0,
				 1,0,1,0,0,
				 1,0,1,0,0,

examples/ex_dow.pl  view on Meta::CPAN


	use AI::NeuralNet::BackProp;
	use Benchmark;

	# Create a new net with 5 layes, 9 inputs, and 1 output
	my $net = AI::NeuralNet::BackProp->new(2,9,1);
	
	# Disable debugging
	$net->debug(4);
	
	# Create datasets.
	#	Note that these are ficticious values shown for illustration purposes
	#	only.  In the example, CPI is a certain month's consumer price
	#	index, CPI-1 is the index one month before, CPI-3 is the the index 3
	#	months before, etc.

	my @data = ( 
		#	Mo  CPI  CPI-1 CPI-3 	Oil  Oil-1 Oil-3    Dow   Dow-1 Dow-3   Dow Ave (output)
		[	1, 	229, 220,  146, 	20.0, 21.9, 19.5, 	2645, 2652, 2597], 	[	2647  ],
		[	2, 	235, 226,  155, 	19.8, 20.0, 18.3, 	2633, 2645, 2585], 	[	2637  ],
		[	3, 	244, 235,  164, 	19.6, 19.8, 18.1, 	2627, 2633, 2579], 	[	2630  ],
		[	4, 	261, 244,  181, 	19.6, 19.6, 18.1, 	2611, 2627, 2563], 	[	2620  ],
		[	5, 	276, 261,  196, 	19.5, 19.6, 18.0, 	2630, 2611, 2582], 	[	2638  ],
		[	6, 	287, 276,  207, 	19.5, 19.5, 18.0, 	2637, 2630, 2589], 	[	2635  ],
		[	7, 	296, 287,  212, 	19.3, 19.5, 17.8, 	2640, 2637, 2592], 	[	2641  ] 		
	);
    
    
	# If we havnt saved the net already, do the learning
	if(!$net->load('dow.dat')) {
		print "\nLearning started...\n";
		
		# Make it learn the whole dataset $top times
		my @list;
		my $top=1;
		for my $a (0..$top) {
			my $t1=new Benchmark;
			print "\n\nOuter Loop: $a\n";
			
			# Test fogetfullness
			my $f = $net->learn_set(\@data,	inc		=>	0.2,	
											max		=>	2000,
											error	=>	-1);
			
			# Print it 
			print "\n\nForgetfullness: $f%\n";

			# Save net to disk				
			$net->save('dow.dat');
            
			my $t2=new Benchmark;
			my $td=timediff($t2,$t1);
			print "\nLoop $a took ",timestr($td),"\n";
		}
	}
                                                                          
	# Run a prediction using fake data
	#			Month	CPI  CPI-1 CPI-3 	Oil  Oil-1 Oil-3    Dow   Dow-1 Dow-3    
	my @set=(	10,		352, 309,  203, 	18.3, 18.7, 16.1, 	2592, 2641, 2651	  ); 
	
	# Dow Ave (output)	
	my $fb=$net->run(\@set)->[0];
	
	
	# Print output
	print "\nTest Factors: (",join(',',@set),")\n";
	print "DOW Prediction for Month #11: $fb\n";

examples/ex_synop.pl  view on Meta::CPAN

	
	# Add a small amount of randomness to the network
	$net->random(0.001);

	# Demonstrate a simple learn() call
	my @inputs = ( 0,0,1,1,1 );
	my @ouputs = ( 1,0,1,0,1 );
	
	print $net->learn(\@inputs, \@outputs),"\n";

	# Create a data set to learn
	my @set = (
		[ 2,2,3,4,1 ], [ 1,1,1,1,1 ],
		[ 1,1,1,1,1 ], [ 0,0,0,0,0 ],
		[ 1,1,1,0,0 ], [ 0,0,0,1,1 ]	
	);
	
	# Demo learn_set()
	my $f = $net->learn_set(\@set);
	print "Forgetfulness: $f unit\n";
	
	# Crunch a bunch of strings and return array refs
	my $phrase1 = $net->crunch("I love neural networks!");
	my $phrase2 = $net->crunch("Jay Lenno is wierd.");
	my $phrase3 = $net->crunch("The rain in spain...");
	my $phrase4 = $net->crunch("Tired of word crunching yet?");

	# Make a data set from the array refs
	my @phrases = (
		$phrase1, $phrase2,
		$phrase3, $phrase4
	);

	# Learn the data set	
	$net->learn_set(\@phrases);
	
	# Run a test phrase through the network
	my $test_phrase = $net->crunch("I love neural networking!");
	my $result = $net->run($test_phrase);
	
	# Get this, it prints "Jay Leno is  networking!" ...  LOL!
	print $net->uncrunch($result),"\n";



( run in 0.353 second using v1.01-cache-2.11-cpan-2b0bae70ee8 )