AI-NeuralNet-Mesh

 view release on metacpan or  search on metacpan

Changes  view on Meta::CPAN

0.31  Fri Aug 25 05:10:11 20000
        - Second release, by Josiah Bryan
        - 3 Major features:
                - seperate layer sizes
                - custom node activations
                - increased learning speed

0.43  Wed Sep 14 03:13:01 20000
        - Third release, by Josiah Bryan
        - Several bug fixes
                - fixed 'flag' option on learn_set()
                - fixed multiple-output bug
                - fixed learning gradient error
        - Improved learning function to not degrade increment automatically
        - Added CSV-style dataset loader
        - Added Export tags
        - Added four custom node activations, including range and ramp
        - Added several misc. extra functions
        - Added ALN example demo


Mesh.pm  view on Meta::CPAN

		my $layers	=	shift;
		my $nodes	=	shift;
		my $outputs	=	shift || $nodes;
		my $inputs	=	shift || $nodes;
        
		bless $self, $type;
		                       
		# If $layers is a string, then it will be numerically equal to 0, so 
		# try to load it as a network file.
		if($layers == 0) {  
		    # We use a "1" flag as the second argument to indicate that we 
		    # want load() to call the new constructor to make a network the
		    # same size as in the file and return a refrence to the network,
		    # instead of just creating the network from pre-exisiting refrence
			return $self->load($layers,1);
		}
		
		# Looks like we got ourselves a layer specs array
		if(ref($layers) eq "ARRAY") { 
			if(ref($layers->[0]) eq "HASH") {
				$self->{total_nodes}	=	0;

Mesh.pm  view on Meta::CPAN

    	my %args	=	@_;						# get args into hash
    	my $inc		=	$args{inc} || 0.002;	# learning gradient
    	my $max     =   $args{max} || 1024;     # max iteterations
    	my $degrade =   $args{degrade} || 0;    # enable gradient degrading
		my $error   = 	($args{error}>-1 && defined $args{error}) ? $args{error} : -1;
  		my $dinc	=	0.0002;					# amount to adjust gradient by
		my $diff	=	100;					# error magin between results
		my $start	=	new Benchmark;			
		$inputs		=	$self->crunch($inputs)  if($inputs == 0); 
		$outputs	=	$self->crunch($outputs) if($outputs == 0);
		my ($flag,$ldiff,$cdiff,$_mi,$loop,$y); 
		while(!$flag && ($max ? $loop<$max : 1)) {
    		my $b	=	new Benchmark;
    		my $got	=	$self->run($inputs);
    		$diff 	=	pdiff($got,$outputs);
		    $flag	=	1;
    		    		
		    if(($error>-1 ? $diff<$error : 0) || !$diff) {
				$flag=1;
				last;
			}
			
			if($degrade) {
				$inc   -= ($dinc*$diff);
				
				if($diff eq $ldiff) {
					$cdiff++;
					$inc += ($dinc*$diff)+($dinc*$cdiff*10);
				} else {
					$cdiff=0;
				}
				$ldiff = $diff;
			}
				
    		for my $x (0..$self->{outputs}-1) { 
    			my $a	=	$got->[$x];
    			my $b	=	$outputs->[$x];
    			d("got: $a, wanted: $b\n",2);
    			if ($a != 	$b) {
    				$flag	=	0;
    				$y 		=	$self->{total_nodes}-$self->{outputs}+$x;
    				$self->{mesh}->[$y]->adjust_weight(($a<$b?1:-1)*$inc,$b);
   				}
   			}
   			
   			$loop++;
   			
   			d("===============================Loop: [$loop]===================================\n",4);
   			d("Current Error: $diff\tCurrent Increment: $inc\n",4);
   			d("Benchmark: ".timestr(timediff(new Benchmark,$b))."\n",4);

Mesh.pm  view on Meta::CPAN

	# See POD for usage
	sub learn_set {
		my $self	=	shift;
		my $data	=	shift;
		my %args	=	@_;
		my $len		=	$#{$data}/2;
		my $inc		=	$args{inc};
		my $max		=	$args{max};
	    my $error	=	$args{error};
	    my $degrade	=	$args{degrade};
	    my $p		=	(defined $args{flag}) ?$args{flag} :1;
	    my $row		=	(defined $args{row})  ?$args{row}+1:1;
	    my $leave	=	(defined $args{leave})?$args{leave}:0;
		for my $x (0..$len-$leave) {
			d("Learning set $x...\n",4);
			my $str = $self->learn( $data->[$x*2],
					  		  		$data->[$x*2+1],
					    			inc=>$inc,
					    			max=>$max,
					    			error=>$error,
					    			degrade=>$degrade);

Mesh.pm  view on Meta::CPAN

	    	return undef;
	    }
	    
	    return $self;
	}
        
	# Load entire network state from disk.
	sub load {
		my $self		=	shift;
		my $file		=	shift;  
		my $load_flag   =	shift;
		
	    my @lines;
	    
	    if(-f $file) {
		    open(FILE,"$file");
		    @lines=<FILE>;
	    	close(FILE);
	    } else {
	    	@lines=split /\n/, $file;
	    }

Mesh.pm  view on Meta::CPAN

	    	$db{$a}=$b;
	    }
	    
	    if(!$db{"header"}) {
	    	$self->{error} = "Invalid format.";
	    	return undef;
	    }
	    
	    return $self->load_old($file) if($self->version($db{"header"})<0.21);
	    
	    if($load_flag) {
		    undef $self;
	        $self = AI::NeuralNet::Mesh->new([split(',',$db{layers})]);
		} else {
			$self->{inputs}			= $db{inputs};
		    $self->{nodes}			= $db{nodes};
			$self->{outputs}		= $db{outputs};
			$self->{layers} 		= [split(',',$db{layers})];
			$self->{total_layers}	= $db{total_layers};
			$self->{total_nodes}	= $db{total_nodes};
		}

Mesh.pm  view on Meta::CPAN

			}
		}
		
		return $self;
	}
	
	# Load entire network state from disk.
	sub load_old {
		my $self		=	shift;
		my $file		=	shift;  
		my $load_flag   =	shift;
		
	    if(!(-f $file)) {
	    	$self->{error} = "File \"$file\" does not exist.";
	    	return undef;
	    }
	    
	    open(FILE,"$file");
	    my @lines=<FILE>;
	    close(FILE);
	    

Mesh.pm  view on Meta::CPAN

	    	chomp($line);
	    	my ($a,$b) = split /=/, $line;
	    	$db{$a}=$b;
	    }
	    
	    if(!$db{"header"}) {
	    	$self->{error} = "Invalid format.";
	    	return undef;
	    }
	    
	    if($load_flag) {
		    undef $self;
	
			# Create new network
			$self = AI::NeuralNet::Mesh->new($db{"layers"},
		    			 				 	 $db{"nodes"},
		    						      	 $db{"outputs"});
		} else {
			$self->{total_layers}	=	$db{"layers"};
			$self->{nodes}			=	$db{"nodes"};
			$self->{outputs}		=	$db{"outputs"};

Mesh.pm  view on Meta::CPAN

		my $self	=	shift;
		my $layer	=	shift || 0;
		my $node	=	shift || 0;
		my $value	=	shift || 0.5; 
		my $n		=	0;
		no strict 'refs';
		for(0..$layer-1){$n+=$self->{layers}->[$_]}
		$self->{mesh}->[$n+$node]->{threshold} = $value;
	}
	
	# Set mean (avg.) flag for a layer.
	# usage: $net->mean($layer,$flag);
	# If $flag is true, it enables finding the mean for that layer,
	# If $flag is false, disables mean.
	sub mean {
		my $self	=	shift;
		my $layer	=	shift || 0;
		my $value	=	shift || 0;
		my $n		=	0;
		no strict 'refs';
		for(0..$layer-1){$n+=$self->{layers}->[$_]}
		for($n..$n+$self->{layers}->[$layer]-1) {
			$self->{mesh}->[$_]->{mean} = $value;
		}

Mesh.pm  view on Meta::CPAN

		my $self	=	shift;
		my $input	=	shift;
		my $from_id	=	shift;
		
		$self->{_inputs}->[$from_id]->{value} = $input * $self->{_inputs}->[$from_id]->{weight};
		$self->{_inputs}->[$from_id]->{input} = $input;
		$self->{_inputs}->[$from_id]->{fired} = 1;
		
		$self->{_parent}->d("got input $input from id $from_id, weighted to $self->{_inputs}->[$from_id]->{value}.\n",1);
		
		my $flag	=	1;
		for my $x (0..$self->{_inputs_size}-1) { $flag = 0 if(!$self->{_inputs}->[$x]->{fired}) }
		if ($flag) {
			$self->{_parent}->d("all inputs fired for $self.\n",1);
			my $output	=	0;   
			
			# Sum
			for my $i (@{$self->{_inputs}}) {                        
				$output += $i->{value};
			}
		
			# Handle activations, thresholds, and means
			$output	   /=  $self->{_inputs_size} if($self->{flag_mean});
			#$output    += (rand()*$self->{_parent}->{random});
			$output		= ($output>=$self->{threshold})?1:0 if(($self->{activation} eq "sigmoid") || ($self->{activation} eq "sigmoid_1"));
			if($self->{activation} eq "sigmoid_2") {
				$output =  1 if($output >$self->{threshold});
				$output = -1 if($output <$self->{threshold});
				$output =  0 if($output==$self->{threshold});
			}
			
			# Handle CODE refs
			$output = &{$self->{activation}}($output,$self) if(ref($self->{activation}) eq "CODE");

Mesh.pm  view on Meta::CPAN

You can also set the activation and threshold values after network creation with the
activation() and threshold() methods. 

	



=item $net->learn($input_map_ref, $desired_result_ref [, options ]);

NOTE: learn_set() now has increment-degrading turned OFF by default. See note
on the degrade flag, below.

This will 'teach' a network to associate an new input map with a desired 
result. It will return a string containg benchmarking information. 

You can also specify strings as inputs and ouputs to learn, and they will be 
crunched automatically. Example:

	$net->learn('corn', 'cob');
	
	
Note, the old method of calling crunch on the values still works just as well.	

The first two arguments may be array refs (or now, strings), and they may be 
of different lengths.

Options should be written on hash form. There are three options:
	 
	 inc      =>    $learning_gradient
	 max      =>    $maximum_iterations
	 error    =>    $maximum_allowable_percentage_of_error
	 degrade  =>    $degrade_increment_flag
	 

$learning_gradient is an optional value used to adjust the weights of the internal
connections. If $learning_gradient is ommitted, it defaults to 0.002.
 
$maximum_iterations is the maximum numbers of iteration the loop should do.
It defaults to 1024.  Set it to 0 if you never want the loop to quit before
the pattern is perfectly learned.

$maximum_allowable_percentage_of_error is the maximum allowable error to have. If 
this is set, then learn() will return when the perecentage difference between the
actual results and desired results falls below $maximum_allowable_percentage_of_error.
If you do not include 'error', or $maximum_allowable_percentage_of_error is set to -1,
then learn() will not return until it gets an exact match for the desired result OR it
reaches $maximum_iterations.

$degrade_increment_flag is a simple flag used to allow/dissalow increment degrading
during learning based on a product of the error difference with several other factors.
$degrade_increment_flag is off by default. Setting $degrade_increment_flag to a true
value turns increment degrading on. 

In previous module releases $degrade_increment_flag was not used, as increment degrading
was always on. In this release I have looked at several other network types as well
as several texts and decided that it would be better to not use increment degrading. The
option is still there for those that feel the inclination to use it. I have found some areas
that do need the degrade flag to work at a faster speed. See test.pl for an example. If
the degrade flag wasn't in test.pl, it would take a very long time to learn.



=item $net->learn_set(\@set, [ options ]);

This takes the same options as learn() (learn_set() uses learn() internally) 
and allows you to specify a set to learn, rather than individual patterns. 
A dataset is an array refrence with at least two elements in the array, 
each element being another array refrence (or now, a scalar string). For 
each pattern to learn, you must specify an input array ref, and an ouput 

Mesh.pm  view on Meta::CPAN

		[ 1,2,3,4 ],  [ 1,3,5,6 ],
		[ 0,2,5,6 ],  [ 0,2,1,2 ]
	);


Inputs and outputs in the dataset can also be strings.

See the paragraph on measuring forgetfulness, below. There are 
two learn_set()-specific option tags available:

	flag     =>  $flag
	pattern  =>  $row

If "flag" is set to some TRUE value, as in "flag => 1" in the hash of options, or if the option "flag"
is not set, then it will return a percentage represting the amount of forgetfullness. Otherwise,
learn_set() will return an integer specifying the amount of forgetfulness when all the patterns 
are learned. 

If "pattern" is set, then learn_set() will use that pattern in the data set to measure forgetfulness by.
If "pattern" is omitted, it defaults to the first pattern in the set. Example:

	my @set = (
		[ 0,1,0,1 ],  [ 0 ],
		[ 0,0,1,0 ],  [ 1 ],

examples/ex_aln.pl  view on Meta::CPAN

		my $roots  = shift || $leaves;
	    
	    # Replace the load function with a new one to preserve the
	    # load activations. We have to add this up here because next
	    # thing we do is check if they passed a file name as $leaves,
	    # and we need to have our new load sub already in place before
	    # we try to load anything in $leaves.
	    *{'AI::NeuralNet::Mesh::load'} = sub {
	        my $self		=	shift;
			my $file		=	shift;  
			my $load_flag   =	shift;
			
		    if(!(-f $file)) {
		    	$self->{error} = "File \"$file\" does not exist.";
		    	return undef;
		    }
		    
		    open(FILE,"$file");
		    my @lines=<FILE>;
		    close(FILE);
		    

examples/ex_aln.pl  view on Meta::CPAN

		    	$db{$a}=$b;
		    }
		    
		    if(!$db{"header"}) {
		    	$self->{error} = "Invalid format.";
		    	return undef;
		    }
		    
		    return $self->load_old($file) if($self->version($db{"header"})<0.21);
		    
		    if($load_flag) {
			    undef $self;
		        $self = Tree($db{inputs},$db{outputs});
			} else {
				$self->{inputs}			= $db{inputs};
			    $self->{nodes}			= $db{nodes};
				$self->{outputs}		= $db{outputs};
				$self->{layers} 		= [split(',',$db{layers})];
				$self->{total_layers}	= $db{total_layers};
				$self->{total_nodes}	= $db{total_nodes};
			}

examples/ex_aln.pl  view on Meta::CPAN

			}
			
	    	$self->extend($self->{_original_specs});
	
			return $self;
	    };
	    
		# If $leavesis a string, then it will be numerically equal to 0, so 
		# try to load it as a network file.
		if($leaves == 0) {  
		    # We use a "1" flag as the second argument to indicate that we 
		    # want load() to call the new constructor to make a network the
		    # same size as in the file and return a refrence to the network,
		    # instead of just creating the network from pre-exisiting refrence
			my $self = AI::NeuralNet::Mesh->new(1,1);
			return $self->load($leaves,1);
		}
		
		# Initalize our counter and our specs ref
		my $specs  = [];
		my $level  = 0;

mesh.htm  view on Meta::CPAN

other than the ones listed above.</P>
<P>Three of the activation syntaxes are shown in the first constructor above, the ``linear'',
``sigmoid'' and code ref types.</P>
<P>You can also set the activation and threshold values after network creation with the
<A HREF="#item_activation"><CODE>activation()</CODE></A> and <A HREF="#item_threshold"><CODE>threshold()</CODE></A> methods.</P>
<P></P>
<P></P>
<DT><STRONG><A NAME="item_learn">$net-&gt;learn($input_map_ref, $desired_result_ref [, options ]);</A></STRONG><BR>
<DD>
NOTE: <A HREF="#item_learn_set"><CODE>learn_set()</CODE></A> now has increment-degrading turned OFF by default. See note
on the degrade flag, below.
<P>This will 'teach' a network to associate an new input map with a desired 
result. It will return a string containg benchmarking information.</P>
<P>You can also specify strings as inputs and ouputs to learn, and they will be 
crunched automatically. Example:</P>
<PRE>
        $net-&gt;learn('corn', 'cob');
</PRE>
<P>Note, the old method of calling crunch on the values still works just as well.</P>
<P>The first two arguments may be array refs (or now, strings), and they may be 
of different lengths.</P>
<P>Options should be written on hash form. There are three options:
</P>
<PRE>
         inc      =&gt;    $learning_gradient
         max      =&gt;    $maximum_iterations
         error    =&gt;    $maximum_allowable_percentage_of_error
         degrade  =&gt;    $degrade_increment_flag</PRE>
<P>$learning_gradient is an optional value used to adjust the weights of the internal
connections. If $learning_gradient is ommitted, it defaults to 0.002.
</P>
<P>$maximum_iterations is the maximum numbers of iteration the loop should do.
It defaults to 1024.  Set it to 0 if you never want the loop to quit before
the pattern is perfectly learned.</P>
<P>$maximum_allowable_percentage_of_error is the maximum allowable error to have. If 
this is set, then <A HREF="#item_learn"><CODE>learn()</CODE></A> will return when the perecentage difference between the
actual results and desired results falls below $maximum_allowable_percentage_of_error.
If you do not include 'error', or $maximum_allowable_percentage_of_error is set to -1,
then <A HREF="#item_learn"><CODE>learn()</CODE></A> will not return until it gets an exact match for the desired result OR it
reaches $maximum_iterations.</P>
<P>$degrade_increment_flag is a simple flag used to allow/dissalow increment degrading
during learning based on a product of the error difference with several other factors.
$degrade_increment_flag is off by default. Setting $degrade_increment_flag to a true
value turns increment degrading on.</P>
<P>In previous module releases $degrade_increment_flag was not used, as increment degrading
was always on. In this release I have looked at several other network types as well
as several texts and decided that it would be better to not use increment degrading. The
option is still there for those that feel the inclination to use it. I have found some areas
that do need the degrade flag to work at a faster speed. See test.pl for an example. If
the degrade flag wasn't in test.pl, it would take a very long time to learn.</P>
<P></P>
<DT><STRONG><A NAME="item_learn_set">$net-&gt;learn_set(\@set, [ options ]);</A></STRONG><BR>
<DD>
This takes the same options as <A HREF="#item_learn"><CODE>learn()</CODE></A> (learn_set() uses <A HREF="#item_learn"><CODE>learn()</CODE></A> internally) 
and allows you to specify a set to learn, rather than individual patterns. 
A dataset is an array refrence with at least two elements in the array, 
each element being another array refrence (or now, a scalar string). For 
each pattern to learn, you must specify an input array ref, and an ouput 
array ref as the next element. Example:

mesh.htm  view on Meta::CPAN


        my @set = (
                # inputs        outputs
                [ 1,2,3,4 ],  [ 1,3,5,6 ],
                [ 0,2,5,6 ],  [ 0,2,1,2 ]
        );</PRE>
<P>Inputs and outputs in the dataset can also be strings.</P>
<P>See the paragraph on measuring forgetfulness, below. There are 
two learn_set()-specific option tags available:</P>
<PRE>
        flag     =&gt;  $flag
        pattern  =&gt;  $row</PRE>
<P>If ``flag'' is set to some TRUE value, as in ``flag =&gt; 1'' in the hash of options, or if the option ``flag''
is not set, then it will return a percentage represting the amount of forgetfullness. Otherwise,
<A HREF="#item_learn_set"><CODE>learn_set()</CODE></A> will return an integer specifying the amount of forgetfulness when all the patterns 
are learned.</P>
<P>If ``pattern'' is set, then <A HREF="#item_learn_set"><CODE>learn_set()</CODE></A> will use that pattern in the data set to measure forgetfulness by.
If ``pattern'' is omitted, it defaults to the first pattern in the set. Example:</P>
<PRE>
        my @set = (
                [ 0,1,0,1 ],  [ 0 ],
                [ 0,0,1,0 ],  [ 1 ],
                [ 1,1,0,1 ],  [ 2 ],  #  &lt;---



( run in 3.945 seconds using v1.01-cache-2.11-cpan-94b05bcf43c )