AI-NeuralNet-BackProp

 view release on metacpan or  search on metacpan

BackProp.pm  view on Meta::CPAN

			$self->{OUTPUTS}->{LIST}->[$_]->{PKG}->input($self->{OUTPUTS}->{LIST}->[$_]->{ID},$value);
		}
	}
	
	# Used internally by output().
	sub get_output {
		my $self		=	shift;
		my $size		=	$self->{SYNAPSES}->{SIZE} || 0;
		my $value		=	0;
		my $state		= 	0;
		my (@map,@weight);
	
	    # We loop through all the syanpses connected to this one and add the weighted
	    # valyes together, saving in a debugging list.
		for (0..$size-1) {
			$value	+=	$self->{SYNAPSES}->{LIST}->[$_]->{VALUE};
			$self->{SYNAPSES}->{LIST}->[$_]->{FIRED} = 0;
			
			$map[$_]=$self->{SYNAPSES}->{LIST}->[$_]->{VALUE};
			$weight[$_]=$self->{SYNAPSES}->{LIST}->[$_]->{WEIGHT};
		}
		                                              
		# Debugger
		AI::NeuralNet::BackProp::join_cols(\@map,5) if(($AI::NeuralNet::BackProp::DEBUG eq 3) || ($AI::NeuralNet::BackProp::DEBUG eq 2));
		AI::NeuralNet::BackProp::out2("Weights: ".join(" ",@weight)."\n");
		
		# Simply average the values and get the integer of the average.
		$state	=	intr($value/$size);
		
		# Debugger
		AI::NeuralNet::BackProp::out1("From get_output, value is $value, so state is $state.\n");
		
		# Possible future exapnsion for self excitation. Not currently used.
		$self->{LAST_VALUE}	=	$value;

BackProp.pm  view on Meta::CPAN

			
			# Recursivly apply
			$self->{SYNAPSES}->{LIST}->[$i]->{WEIGHT}  +=  $delta;
			$self->{SYNAPSES}->{LIST}->[$i]->{PKG}->weight($ammount,$what);
			    
		}
	}
	
	# Registers some neuron as a synapse of this neuron.           
	# This is called exclusively by connect(), except for
	# in initalize_group() to connect the _map() package.
	sub register_synapse {
		my $self	=	shift;
		my $synapse	=	shift;
		my $sid		=	$self->{SYNAPSES}->{SIZE} || 0;
		$self->{SYNAPSES}->{LIST}->[$sid]->{PKG}		=	$synapse;
		$self->{SYNAPSES}->{LIST}->[$sid]->{WEIGHT}		=	1.00		if(!$self->{SYNAPSES}->{LIST}->[$sid]->{WEIGHT});
		$self->{SYNAPSES}->{LIST}->[$sid]->{FIRED}		=	0;       
		AI::NeuralNet::BackProp::out1("$self: Registering sid $sid with weight $self->{SYNAPSES}->{LIST}->[$sid]->{WEIGHT}, package $self->{SYNAPSES}->{LIST}->[$sid]->{PKG}.\n");
		$self->{SYNAPSES}->{SIZE} = ++$sid;
		return ($sid-1);

BackProp.pm  view on Meta::CPAN

	}
    
	# Used to format array ref into columns
	# Usage: 
	#	join_cols(\@array,$row_length_in_elements,$high_state_character,$low_state_character);
	# Can also be called as method of your neural net.
	# If $high_state_character is null, prints actual numerical values of each element.
	sub join_cols {
		no strict 'refs';
		shift if(substr($_[0],0,4) eq 'AI::'); 
		my $map		=	shift;
		my $break   =	shift;
		my $a		=	shift;
		my $b		=	shift;
		my $x;
		foreach my $el (@{$map}) { 
			my $str = ((int($el))?$a:$b);
			$str=$el."\0" if(!$a);
			print $str;
			$x++;
			if($x>$break-1) {
				print "\n";
				$x=0;
			}
		}
		print "\n";

BackProp.pm  view on Meta::CPAN

		}
		return $tmp;
	}  
	
	# Returns a pcx object
	sub load_pcx {
		my $self	=	shift;
		return AI::NeuralNet::BackProp::PCX->new($self,shift);
	}	
	
	# Crunch a string of words into a map
	sub crunch {
		my $self	=	shift;
		my (@map,$ic);
		my @ws 		=	split(/[\s\t]/,shift);
		for my $a (0..$#ws) {
			$ic=$self->crunched($ws[$a]);
			if(!defined $ic) {
				$self->{_CRUNCHED}->{LIST}->[$self->{_CRUNCHED}->{_LENGTH}++]=$ws[$a];
				@map[$a]=$self->{_CRUNCHED}->{_LENGTH};
			} else {
				@map[$a]=$ic;
            }
		}
		return \@map;
	}
	
	# Finds if a word has been crunched.
	# Returns undef on failure, word index for success.
	sub crunched {
		my $self	=	shift;
		for my $a (0..$self->{_CRUNCHED}->{_LENGTH}-1) {
			return $a+1 if($self->{_CRUNCHED}->{LIST}->[$a] eq $_[0]);
		}
		return undef;
	}
	
	# Alias for crunched(), above
	sub word { crunched(@_) }
	
	# Uncrunches a map (array ref) into an array of words (not an array ref) and returns array
	sub uncrunch {
		my $self	=	shift;
		my $map = shift;
		my ($c,$el,$x);
		foreach $el (@{$map}) {
			$c .= $self->{_CRUNCHED}->{LIST}->[$el-1].' ';
		}
		return $c;
	}
	
	# Sets/gets randomness facter in the network. Setting a value of 0 disables random factors.
	sub random {
		my $self	=	shift;
		my $rand	=	shift;
		return $self->{random}	if(!(defined $rand));

BackProp.pm  view on Meta::CPAN

		
		# When this is called, they tell us howmany layers and neurons in each layer.
		# But really what we store is a long line of neurons that are only divided in theory
		# when connecting the outputs and inputs.
		my $div = $size;
		my $size = $layers * $size;
		
		AI::NeuralNet::BackProp::out2 "Creating RUN and MAP systems for network...\n";
		#print "Creating RUN and MAP systems for network...\n";
		
		# Create a new runner and mapper for the network.
		$self->{RUN} = new AI::NeuralNet::BackProp::_run($self);
		$self->{MAP} = new AI::NeuralNet::BackProp::_map($self);
		
		$self->{SIZE}	=	$size;
		$self->{DIV}	=	$div;
		$self->{OUT}	=	$out;
		$self->{FLAG}	=	$flag;
		$self->{col_width}= 5;
		$self->{random} = 0.001;
		
		$self->initialize_group();
		

BackProp.pm  view on Meta::CPAN

			for my $b (0..$self->{DIV}-1) {
				print $self->{NET}->[$a]->{SYNAPSES}->{LIST}->[$b]->{WEIGHT},"\t";
			}
			print "\n";
		}
	}
	
	# Used internally by new() and learn().
	# This is the sub block that actually creats
	# the connections between the synapse chains and
	# also connects the run packages and the map packages
	# to the appropiate ends of the neuron grids.
	sub initialize_group() {
		my $self	=	shift;
		my $size	=	$self->{SIZE};
		my $div		=	$self->{DIV};
		my $out		=	$self->{OUT};
		my $flag	=	$self->{FLAG};
		my $x		=	0; 
		my $y		=	0;
		
		# Reset map and run synapse counters.
		$self->{RUN}->{REGISTRATION} = $self->{MAP}->{REGISTRATION} = 0;
		
		AI::NeuralNet::BackProp::out2 "There will be $size neurons in this network group, with a divison value of $div.\n";
		#print "There will be $size neurons in this network group, with a divison value of $div.\n";
		
		# Create initial neuron packages in one long array for the entire group
		for($y=0; $y<$size; $y++) {
			#print "Initalizing neuron $y...     \r";
			$self->{NET}->[$y]=new AI::NeuralNet::BackProp::neuron();
		}

BackProp.pm  view on Meta::CPAN

				}
				if($flag == 2) {
					$self->{NET}->[$y+$z]->connect($self->{NET}->[$y+$div+$z]);
					$self->{NET}->[$y+$z]->connect($self->{NET}->[$y+$z+1]) if($z<$div-1);
				}
				AI::NeuralNet::BackProp::out1 "\n";
			}
			AI::NeuralNet::BackProp::out1 "\n";             
		}
		
		# These next two loops connect the _run and _map packages (the IO interface) to 
		# the start and end 'layers', respectively. These are how we insert data into
		# the network and how we get data from the network. The _run and _map packages 
		# are connected to the neurons so that the neurons think that the IO packages are
		# just another neuron, sending data on. But the IO packs. are special packages designed
		# with the same methods as neurons, just meant for specific IO purposes. You will
		# never need to call any of the IO packs. directly. Instead, they are called whenever
		# you use the run(), map(), or learn() methods of your network.
        
    	AI::NeuralNet::BackProp::out2 "\nMapping I (_run package) connections to network...\n";
		
	    for($y=0; $y<$div; $y++) {
			$self->{_tmp_synapse} = $y;
			$self->{NET}->[$y]->register_synapse($self->{RUN});
			#$self->{NET}->[$y]->connect($self->{RUN});
		}
		
		AI::NeuralNet::BackProp::out2 "Mapping O (_map package) connections to network...\n\n";
		
		for($y=$size-$div; $y<$size; $y++) {
			$self->{_tmp_synapse} = $y;
			$self->{NET}->[$y]->connect($self->{MAP});
		}
		
		# And the group is done! 
	}
	

	# When called with an array refrence to a pattern, returns a refrence
	# to an array associated with that pattern. See usage in documentation.
	sub run {
		my $self	 =	  shift;
		my $map		 =	  shift;
		my $t0 		 =	new Benchmark;
        $self->{RUN}->run($map);
		$self->{LAST_TIME}=timestr(timediff(new Benchmark, $t0));
        return $self->map();
	}
    
    # This automatically uncrunches a response after running it
	sub run_uc {
    	$_[0]->uncrunch(run(@_));
    }

	# Returns benchmark and loop's ran or learned
	# for last run(), or learn()
	# operation preformed.
	#
	sub benchmarked {
		my $self	=	shift;
		return $self->{LAST_TIME};
	}
	    
	# Used to retrieve map from last internal run operation.
	sub map {
		my $self	 =	  shift;
		$self->{MAP}->map();
	}
	
	# Forces network to learn pattern passed and give desired
	# results. See usage in POD.
	sub learn {
		my $self	=	shift;
		my $omap	=	shift;
		my $res		=	shift;
		my %args    =   @_;
		my $inc 	=	$args{inc} || 0.20;
		my $max     =   $args{max} || 1024;
		my $_mx		=	intr($max/10);
		my $_mi		=	0;
		my $error   = 	($args{error}>-1 && defined $args{error}) ? $args{error} : -1;
  		my $div		=	$self->{DIV};
		my $size	=	$self->{SIZE};
		my $out		=	$self->{OUT};
		my $divide  =	AI::NeuralNet::BackProp->intr($div/$out);
		my ($a,$b,$y,$flag,$map,$loop,$diff,$pattern,$value);
		my ($t0,$it0);
		no strict 'refs';
		
		# Take care of crunching strings passed
		$omap = $self->crunch($omap) if($omap == 0);
		$res  = $self->crunch($res)  if($res  == 0);
		
		# Fill in empty spaces at end of results matrix with a 0
		if($#{$res}<$out) {
			for my $x ($#{$res}+1..$out) {
				#$res->[$x] = 0;
			}
		}
		
		# Debug

BackProp.pm  view on Meta::CPAN

		# Start benchmark timer and initalize a few variables
		$t0 	=	new Benchmark;
        $flag 	=	0; 
		$loop	=	0;   
		my $ldiff	=	0;
		my $dinc	=	0.0001;
		my $cdiff	=	0;
		$diff		=	100;
		$error 		= 	($error>-1)?$error:-1;
		
		# $flag only goes high when all neurons in output map compare exactly with
		# desired result map or $max loops is reached
		#	
		while(!$flag && ($max ? $loop<$max : 1)) {
			$it0 	=	new Benchmark;
			
			# Run the map
			$self->{RUN}->run($omap);
			
			# Retrieve last mapping  and initialize a few variables.
			$map	=	$self->map();
			$y		=	$size-$div;
			$flag	=	1;
			
			# Compare the result map we just ran with the desired result map.
			$diff 	=	pdiff($map,$res);
			
			# This adjusts the increment multiplier to decrease as the loops increase
			if($_mi > $_mx) {
				$dinc *= 0.1;
				$_mi   = 0;
			} 
			
			$_mi++;
				
			

BackProp.pm  view on Meta::CPAN

			$ldiff = $diff;
			
			# This catches a max error argument and handles it
			if(!($error>-1 ? $diff>$error : 1)) {
				$flag=1;
				last;
			}
			
			# Debugging
			AI::NeuralNet::BackProp::out4 "Difference: $diff\%\t Increment: $inc\tMax Error: $error\%\n";
			AI::NeuralNet::BackProp::out1 "\n\nMapping results from $map:\n";
			
			# This loop compares each element of the output map with the desired result map.
			# If they don't match exactly, we call weight() on the offending output neuron 
			# and tell it what it should be aiming for, and then the offending neuron will
			# try to adjust the weights of its synapses to get closer to the desired output.
			# See comments in the weight() method of AI::NeuralNet::BackProp for how this works.
			my $l=$self->{NET};
			for my $i (0..$out-1) {
				$a = $map->[$i];
				$b = $res->[$i];
				
				AI::NeuralNet::BackProp::out1 "\nmap[$i] is $a\n";
				AI::NeuralNet::BackProp::out1 "res[$i] is $b\n";
					
				for my $j (0..$divide-1) {
					if($a!=$b) {
						AI::NeuralNet::BackProp::out1 "Punishing $self->{NET}->[($i*$divide)+$j] at ",(($i*$divide)+$j)," ($i with $a) by $inc.\n";
						$l->[$y+($i*$divide)+$j]->weight($inc,$b) if($l->[$y+($i*$divide)+$j]);
						$flag	=	0;
					}
				}
			}

BackProp.pm  view on Meta::CPAN

			# This counter is just used in the benchmarking operations.
			$loop++;
			
			AI::NeuralNet::BackProp::out1 "\n\n";
			
			# Benchmark this loop.
			AI::NeuralNet::BackProp::out4 "Learning itetration $loop complete, timed at".timestr(timediff(new Benchmark, $it0),'noc','5.3f')."\n";
		
			# Map the results from this loop.
			AI::NeuralNet::BackProp::out4 "Map: \n";
			AI::NeuralNet::BackProp::join_cols($map,$self->{col_width}) if ($AI::NeuralNet::BackProp::DEBUG);
			AI::NeuralNet::BackProp::out4 "Res: \n";
			AI::NeuralNet::BackProp::join_cols($res,$self->{col_width}) if ($AI::NeuralNet::BackProp::DEBUG);
		}
		
		# Compile benchmarking info for entire learn() process and return it, save it, and
		# display it.
		$self->{LAST_TIME}="$loop loops and ".timestr(timediff(new Benchmark, $t0));
        my $str = "Learning took $loop loops and ".timestr(timediff(new Benchmark, $t0),'noc','5.3f');
        AI::NeuralNet::BackProp::out2 $str;
		return $str;

BackProp.pm  view on Meta::CPAN

		$self->{REGISTRATION}	=	++$sid;
		$self->{RMAP}->{$sid-1}	= 	$self->{PARENT}->{_tmp_synapse};
		return $sid-1;
	}
	
	# Here is the real meat of this package.
	# run() does one thing: It fires values
	# into the first layer of the network.
	sub run {
		my $self	=	shift;
		my $map		=	shift;
		my $x		=	0;
		$map = $self->{PARENT}->crunch($map) if($map == 0);
		return undef if(substr($map,0,5) ne "ARRAY");
		foreach my $el (@{$map}) {
			# Catch ourself if we try to run more inputs than neurons
			return $x if($x>$self->{PARENT}->{DIV}-1);
			
			# Here we add a small ammount of randomness to the network.
			# This is to keep the network from getting stuck on a 0 value internally.
			$self->{PARENT}->{NET}->[$x]->input(0,$el+(rand()*$self->{ramdom}));
			$x++;
		};
		# Incase we tried to run less inputs than neurons, run const 1 in extra neurons
		if($x<$self->{PARENT}->{DIV}) {

BackProp.pm  view on Meta::CPAN

				$self->{PARENT}->{NET}->[$y]->input(0,1);
			}
		}
		return $x;
	}
	
	
1;

# Internal output class. Not to be used directly.
package AI::NeuralNet::BackProp::_map;
	
	use strict;
	
	# Dummy constructor.
	sub new {
		bless { PARENT => $_[1] }, $_[0]
	}
	
	# Compliance with neuron interface
	sub weight {}

BackProp.pm  view on Meta::CPAN

		my $self	=	shift;		
		my $sid		=	$self->{REGISTRATION} || 0;
		$self->{REGISTRATION}	=	++$sid;
		$self->{RMAP}->{$sid-1} = 	$self->{PARENT}->{_tmp_synapse};
		return $sid-1;
	}
	
	# This acts just like a regular neuron by receiving
	# values from input synapes. Yet, unlike a regularr
	# neuron, it doesnt weight the values, just stores
	# them to be retrieved by a call to map().
	sub input  {
		no strict 'refs';             
		my $self	=	shift;
		my $sid		=	shift;
		my $value	=	shift;
		my $size	=	$self->{PARENT}->{DIV};
		my $flag	=	1;
		$self->{OUTPUT}->[$sid]->{VALUE}	=	$self->{PARENT}->intr($value);
		$self->{OUTPUT}->[$sid]->{FIRED}	=	1;
		
		AI::NeuralNet::BackProp::out1 "Received value $self->{OUTPUT}->[$sid]->{VALUE} and sid $sid, self $self.\n";
	}
	
	# Here we simply collect the value of every neuron connected to this
	# one from the layer below us and return an array ref to the final map..
	sub map {
		my $self	=	shift;
		my $size	=	$self->{PARENT}->{DIV};
		my $out		=	$self->{PARENT}->{OUT};
		my $divide  =	AI::NeuralNet::BackProp->intr($size/$out);
		my @map = ();
		my $value;
		AI::NeuralNet::BackProp::out1 "Num output neurons: $out, Input neurons: $size, Division: $divide\n";
		for(0..$out-1) {
			$value=0;
			for my $a (0..$divide-1) {
				$value += $self->{OUTPUT}->[($_*$divide)+$a]->{VALUE};
				AI::NeuralNet::BackProp::out1 "\$a is $a, index is ".(($_*$divide)+$a).", value is $self->{OUTPUT}->[($_*$divide)+$a]->{VALUE}\n";
			}
			$map[$_]	=	AI::NeuralNet::BackProp->intr($value/$divide);
			AI::NeuralNet::BackProp::out1 "Map position $_ is $map[$_] in @{[\@map]} with self set to $self.\n";
			$self->{OUTPUT}->[$_]->{FIRED}	=	0;
		}
		my $ret=\@map;
		return $self->{PARENT}->_range($ret);
	}
1;
			      
# load_pcx() wrapper package
package AI::NeuralNet::BackProp::PCX;

	# Called by load_pcx in AI::NeuralNet::BackProp;
	sub new {
		my $type	=	shift;

BackProp.pm  view on Meta::CPAN

	                             	
     input
     /  \
    O    O
    |\  /|
    | \/ |
    | /\ |
    |/  \|
    O    O
     \  /
    mapper

In this diagram, each neuron is connected to one input of every
neuron in the layer below it, but there are not connections
between neurons in the same layer. Weights of the connection
are controlled by the neuron it is connected to, not the connecting
neuron. (E.g. the connecting neuron has no idea how much weight
its output has when it sends it, it just sends its output and the
weighting is taken care of by the receiving neuron.) This is the 
method used to connect cells in every network built by this package.

Input is fed into the network via a call like this:

	use AI;
	my $net = new AI::NeuralNet::BackProp(2,2);
	
	my @map = (0,1);
	
	my $result = $net->run(\@map);
	

Now, this call would probably not give what you want, because
the network hasn't "learned" any patterns yet. But this
illustrates the call. Run now allows strings to be used as
input. See run() for more information.


Run returns a refrence with $size elements (Remember $size? $size
is what you passed as the second argument to the network
constructor.) This array contains the results of the mapping. If
you ran the example exactly as shown above, $result would probably 
contain (1,1) as its elements. 

To make the network learn a new pattern, you simply call the learn
method with a sample input and the desired result, both array
refrences of $size length. Example:

	use AI;
	my $net = new AI::NeuralNet::BackProp(2,2);
	
	my @map = (0,1);
	my @res = (1,0);
	
	$net->learn(\@map,\@res);
	
	my $result = $net->run(\@map);

Now $result will conain (1,0), effectivly flipping the input pattern
around. Obviously, the larger $size is, the longer it will take
to learn a pattern. Learn() returns a string in the form of

	Learning took X loops and X wallclock seconds (X.XXX usr + X.XXX sys = X.XXX CPU).

With the X's replaced by time or loop values for that loop call. So,
to view the learning stats for every learn call, you can just:
	
	print $net->learn(\@map,\@res);
	

If you call "$net->debug(4)" with $net being the 
refrence returned by the new() constructor, you will get benchmarking 
information for the learn function, as well as plenty of other information output. 
See notes on debug() in the METHODS section, below. 

If you do call $net->debug(1), it is a good 
idea to point STDIO of your script to a file, as a lot of information is output. I often
use this command line:

BackProp.pm  view on Meta::CPAN




As you can see, each neuron is connected to the next one in its layer, as well
as the neuron directly above itself.
	

Before you can really do anything useful with your new neural network
object, you need to teach it some patterns. See the learn() method, below.

=item $net->learn($input_map_ref, $desired_result_ref [, options ]);

This will 'teach' a network to associate an new input map with a desired resuly.
It will return a string containg benchmarking information. You can retrieve the
pattern index that the network stored the new input map in after learn() is complete
with the pattern() method, below.

UPDATED: You can now specify strings as inputs and ouputs to learn, and they will be crunched
automatically. Example:

	$net->learn('corn', 'cob');
	# Before update, you have had to do this:
	# $net->learn($net->crunch('corn'), $net->crunch('cob'));

Note, the old method of calling crunch on the values still works just as well.	

UPDATED: You can now learn inputs with a 0 value. Beware though, it may not learn() a 0 value 
in the input map if you have randomness disabled. See NOTES on using a 0 value with randomness
disabled.

The first two arguments may be array refs (or now, strings), and they may be of different lengths.

Options should be written on hash form. There are three options:
	 
	 inc	=>	$learning_gradient
	 max	=>	$maximum_iterations
	 error	=>	$maximum_allowable_percentage_of_error
	 

BackProp.pm  view on Meta::CPAN


	my @data = (
		[ 0,1 ], [ 1 ],
		[ 1,0 ], [ 0 ]
	);
	
	$net->learn_set_rand(\@data);
	


=item $net->run($input_map_ref);

UPDATE: run() will now I<automatically> crunch() a string given as the input.

This method will apply the given array ref at the input layer of the neural network, and
it will return an array ref to the output of the network.

Example:
	
	my $inputs  = [ 1,1,0,1 ];
	my $outputs = $net->run($inputs);

With the new update you can do this:

	my $outputs = $net->run('cloudy, wind is 5 MPH NW');
	# Old method:
	# my $outputs = $net->run($net->crunch('cloudy, wind is 5 MPH NW'));

See also run_uc() below.


=item $net->run_uc($input_map_ref);

This method does the same thing as this code:
	
	$net->uncrunch($net->run($input_map_ref));

All that run_uc() does is that it automatically calls uncrunch() on the output, regardless
of whether the input was crunch() -ed or not.
	


=item $net->range();

This allows you to limit the possible outputs to a specific set of values. There are several 
ways you can specify the set of values to limit the output to. Each method is shown below. 

BackProp.pm  view on Meta::CPAN

Level 0 ($level = 0) : Default, no debugging information printed. All printing is 
left to calling script.

Level 1 ($level = 1) : This causes ALL debugging information for the network to be dumped
as the network runs. In this mode, it is a good idea to pipe your STDIO to a file, especially
for large programs.

Level 2 ($level = 2) : A slightly-less verbose form of debugging, not as many internal 
data dumps.

Level 3 ($level = 3) : JUST prints weight mapping as weights change.

Level 4 ($level = 4) : JUST prints the benchmark info for EACH learn loop iteteration, not just
learning as a whole. Also prints the percentage difference for each loop between current network
results and desired results, as well as learning gradient ('incremenet').   

Level 4 is useful for seeing if you need to give a smaller learning incrememnt to learn() .
I used level 4 debugging quite often in creating the letters.pl example script and the small_1.pl
example script.

Toggles debuging off when called with no arguments. 

BackProp.pm  view on Meta::CPAN




=item $net->crunch($string);

UPDATE: Now you can use a variabled instead of using qw(). Strings will be split internally.
Do not use qw() to pass strings to crunch.

This splits a string passed with /[\s\t]/ into an array ref containing unique indexes
to the words. The words are stored in an intenal array and preserved across load() and save()
calls. This is designed to be used to generate unique maps sutible for passing to learn() and 
run() directly. It returns an array ref.

The words are not duplicated internally. For example:

	$net->crunch("How are you?");

Will probably return an array ref containing 1,2,3. A subsequent call of:

    $net->crunch("How is Jane?");

Will probably return an array ref containing 1,4,5. Notice, the first element stayed
the same. That is because it already stored the word "How". So, each word is stored
only once internally and the returned array ref reflects that.


=item $net->uncrunch($array_ref);

Uncrunches a map (array ref) into an scalar string of words seperated by ' ' and returns the 
string. This is ment to be used as a counterpart to the crunch() method, above, possibly to 
uncrunch() the output of a run() call. Consider the below code (also in ./examples/ex_crunch.pl):
                           
	use AI::NeuralNet::BackProp;
	my $net = AI::NeuralNet::BackProp->new(2,3);
	
	for (0..3) {		# Note: The four learn() statements below could 
						# be replaced with learn_set() to do the same thing,
						# but use this form here for clarity.

BackProp.pm  view on Meta::CPAN


=item $net->crunched($word);

This will return undef if the word is not in the internal crunch list, or it will return the
index of the word if it exists in the crunch list. 


=item $net->col_width($width);

This is useful for formating the debugging output of Level 4 if you are learning simple 
bitmaps. This will set the debugger to automatically insert a line break after that many
elements in the map output when dumping the currently run map during a learn loop.

It will return the current width when called with a 0 or undef value.


=item $net->random($rand);

This will set the randomness factor from the network. Default is 0.001. When called 
with no arguments, or an undef value, it will return current randomness value. When
called with a 0 value, it will disable randomness in the network. See NOTES on learning 
a 0 value in the input map with randomness disabled.



=item $net->load_pcx($filename);

Oh heres a treat... this routine will load a PCX-format file (yah, I know ... ancient format ... but
it is the only one I could find specs for to write it in Perl. If anyone can get specs for
any other formats, or could write a loader for them, I would be very grateful!) Anyways, a PCX-format
file that is exactly 320x200 with 8 bits per pixel, with pure Perl. It returns a blessed refrence to 
a AI::NeuralNet::BackProp::PCX object, which supports the following routinges/members. See example 

BackProp.pm  view on Meta::CPAN

=item $pcx->avg($index);	

Returns the mean value of the red, green, and blue values at the palette index in $index.
	


=head1 NOTES

=item Learning 0s With Randomness Disabled

You can now use 0 values in any input maps. This is a good improvement over versions 0.40
and 0.42, where no 0s were allowed because the learning would never finish learning completly
with a 0 in the input. 

Yet with the allowance of 0s, it requires one of two factors to learn correctly. Either you
must enable randomness with $net->random(0.0001) (Any values work [other than 0], see random() ), 
or you must set an error-minimum with the 'error => 5' option (you can use some other error value 
as well). 

When randomness is enabled (that is, when you call random() with a value other than 0), it interjects
a bit of randomness into the output of every neuron in the network, except for the input and output

BackProp.pm  view on Meta::CPAN


=item AI::NeuralNet::BackProp::neuron

AI::NeuralNet::BackProp::neuron is the worker package for AI::NeuralNet::BackProp.
It implements the actual neurons of the nerual network.
AI::NeuralNet::BackProp::neuron is not designed to be created directly, as
it is used internally by AI::NeuralNet::BackProp.

=item AI::NeuralNet::BackProp::_run

=item AI::NeuralNet::BackProp::_map

These two packages, _run and _map are used to insert data into
the network and used to get data from the network. The _run and _map packages 
are connected to the neurons so that the neurons think that the IO packages are
just another neuron, sending data on. But the IO packs. are special packages designed
with the same methods as neurons, just meant for specific IO purposes. You will
never need to call any of the IO packs. directly. Instead, they are called whenever
you use the run() or learn() methods of your network.
        



=head1 BUGS

docs.htm  view on Meta::CPAN


     input
     /  \
    O    O
    |\  /|
    | \/ |
    | /\ |
    |/  \|
    O    O
     \  /
    mapper</PRE>
<P>In this diagram, each neuron is connected to one input of every
neuron in the layer below it, but there are not connections
between neurons in the same layer. Weights of the connection
are controlled by the neuron it is connected to, not the connecting
neuron. (E.g. the connecting neuron has no idea how much weight
its output has when it sends it, it just sends its output and the
weighting is taken care of by the receiving neuron.) This is the 
method used to connect cells in every network built by this package.</P>
<P>Input is fed into the network via a call like this:</P>
<PRE>
        use AI;
        my $net = new AI::NeuralNet::BackProp(2,2);

        my @map = (0,1);

        my $result = $net-&gt;run(\@map);</PRE>
<P>Now, this call would probably not give what you want, because
the network hasn't ``learned'' any patterns yet. But this
illustrates the call. Run now allows strings to be used as
input. See <A HREF="#item_run"><CODE>run()</CODE></A> for more information.</P>
<P>Run returns a refrence with $size elements (Remember $size? $size
is what you passed as the second argument to the network
constructor.) This array contains the results of the mapping. If
you ran the example exactly as shown above, $result would probably 
contain (1,1) as its elements.</P>
<P>To make the network learn a new pattern, you simply call the learn
method with a sample input and the desired result, both array
refrences of $size length. Example:</P>
<PRE>
        use AI;
        my $net = new AI::NeuralNet::BackProp(2,2);

        my @map = (0,1);
        my @res = (1,0);

        $net-&gt;learn(\@map,\@res);

        my $result = $net-&gt;run(\@map);</PRE>
<P>Now $result will conain (1,0), effectivly flipping the input pattern
around. Obviously, the larger $size is, the longer it will take
to learn a pattern. <CODE>Learn()</CODE> returns a string in the form of</P>
<PRE>
        Learning took X loops and X wallclock seconds (X.XXX usr + X.XXX sys = X.XXX CPU).</PRE>
<P>With the X's replaced by time or loop values for that loop call. So,
to view the learning stats for every learn call, you can just:
</P>
<PRE>
        print $net-&gt;learn(\@map,\@res);</PRE>
<P>If you call ``$net-&gt;debug(4)'' with $net being the 
refrence returned by the <CODE>new()</CODE> constructor, you will get benchmarking 
information for the learn function, as well as plenty of other information output. 
See notes on <A HREF="#item_debug"><CODE>debug()</CODE></A> in the METHODS section, below.</P>
<P>If you do call $net-&gt;debug(1), it is a good 
idea to point STDIO of your script to a file, as a lot of information is output. I often
use this command line:</P>
<PRE>
        $ perl some_script.pl &gt; .out</PRE>
<P>Then I can simply go and use emacs or any other text editor and read the output at my leisure,

docs.htm  view on Meta::CPAN

        |   |   |
        |   |   |
        O--&gt;O--&gt;O
        ^   ^   ^
        |   |   |</PRE>
<P>As you can see, each neuron is connected to the next one in its layer, as well
as the neuron directly above itself.</P>
<P>Before you can really do anything useful with your new neural network
object, you need to teach it some patterns. See the <A HREF="#item_learn"><CODE>learn()</CODE></A> method, below.</P>
<P></P>
<DT><STRONG><A NAME="item_learn">$net-&gt;learn($input_map_ref, $desired_result_ref [, options ]);</A></STRONG><BR>
<DD>
This will 'teach' a network to associate an new input map with a desired resuly.
It will return a string containg benchmarking information. You can retrieve the
pattern index that the network stored the new input map in after <A HREF="#item_learn"><CODE>learn()</CODE></A> is complete
with the <CODE>pattern()</CODE> method, below.
<P><B>UPDATED:</B> You can now specify strings as inputs and ouputs to learn, and they will be crunched
automatically. Example:</P>
<PRE>
        $net-&gt;learn('corn', 'cob');
        # Before update, you have had to do this:
        # $net-&gt;learn($net-&gt;crunch('corn'), $net-&gt;crunch('cob'));</PRE>
<P>Note, the old method of calling crunch on the values still works just as well.</P>
<P><B>UPDATED:</B> You can now learn inputs with a 0 value. Beware though, it may not <A HREF="#item_learn"><CODE>learn()</CODE></A> a 0 value 
in the input map if you have randomness disabled. See NOTES on using a 0 value with randomness
disabled.</P>
<P>The first two arguments may be array refs (or now, strings), and they may be of different lengths.</P>
<P>Options should be written on hash form. There are three options:
</P>
<PRE>
         inc    =&gt;      $learning_gradient
         max    =&gt;      $maximum_iterations
         error  =&gt;      $maximum_allowable_percentage_of_error</PRE>
<P>$learning_gradient is an optional value used to adjust the weights of the internal
connections. If $learning_gradient is ommitted, it defaults to 0.20.

docs.htm  view on Meta::CPAN

value (1) instead of a forgetfullnes factor.</P>
<P>Example:</P>
<PRE>
        my @data = (
                [ 0,1 ], [ 1 ],
                [ 1,0 ], [ 0 ]
        );

        $net-&gt;learn_set_rand(\@data);</PRE>
<P></P>
<DT><STRONG><A NAME="item_run">$net-&gt;run($input_map_ref);</A></STRONG><BR>
<DD>
<B>UPDATED:</B> <A HREF="#item_run"><CODE>run()</CODE></A> will now <EM>automatically</EM> <A HREF="#item_crunch"><CODE>crunch()</CODE></A> a string given as the input.
<P>This method will apply the given array ref at the input layer of the neural network, and
it will return an array ref to the output of the network.</P>
<P>Example:
</P>
<PRE>

        my $inputs  = [ 1,1,0,1 ];
        my $outputs = $net-&gt;run($inputs);</PRE>
<P>With the new update you can do this:</P>
<PRE>
        my $outputs = $net-&gt;run('cloudy, wind is 5 MPH NW');
        # Old method:
        # my $outputs = $net-&gt;run($net-&gt;crunch('cloudy, wind is 5 MPH NW'));</PRE>
<P>See also <A HREF="#item_run_uc"><CODE>run_uc()</CODE></A> below.</P>
<P></P>
<DT><STRONG><A NAME="item_run_uc">$net-&gt;run_uc($input_map_ref);</A></STRONG><BR>
<DD>
This method does the same thing as this code:

<PRE>
        $net-&gt;uncrunch($net-&gt;run($input_map_ref));</PRE>
<P>All that <A HREF="#item_run_uc"><CODE>run_uc()</CODE></A> does is that it automatically calls <A HREF="#item_uncrunch"><CODE>uncrunch()</CODE></A> on the output, regardless
of whether the input was <A HREF="#item_crunch"><CODE>crunch()</CODE></A> -ed or not.</P>
<P></P>
<DT><STRONG><A NAME="item_range">$net-&gt;range();</A></STRONG><BR>
<DD>
This allows you to limit the possible outputs to a specific set of values. There are several 
ways you can specify the set of values to limit the output to. Each method is shown below. 
When called without any arguements, it will disable output range limits. You will need to re-learn
any data previously learned after disabling ranging, as disabling range invalidates the current
weight matrix in the network.

docs.htm  view on Meta::CPAN

<DD>
Toggles debugging off if called with $level = 0 or no arguments. There are four levels
of debugging.
<P>Level 0 ($level = 0) : Default, no debugging information printed. All printing is 
left to calling script.</P>
<P>Level 1 ($level = 1) : This causes ALL debugging information for the network to be dumped
as the network runs. In this mode, it is a good idea to pipe your STDIO to a file, especially
for large programs.</P>
<P>Level 2 ($level = 2) : A slightly-less verbose form of debugging, not as many internal 
data dumps.</P>
<P>Level 3 ($level = 3) : JUST prints weight mapping as weights change.</P>
<P>Level 4 ($level = 4) : JUST prints the benchmark info for EACH learn loop iteteration, not just
learning as a whole. Also prints the percentage difference for each loop between current network
results and desired results, as well as learning gradient ('incremenet').</P>
<P>Level 4 is useful for seeing if you need to give a smaller learning incrememnt to <A HREF="#item_learn"><CODE>learn()</CODE></A> .
I used level 4 debugging quite often in creating the letters.pl example script and the small_1.pl
example script.</P>
<P>Toggles debuging off when called with no arguments.</P>
<P></P>
<DT><STRONG><A NAME="item_save">$net-&gt;save($filename);</A></STRONG><BR>
<DD>

docs.htm  view on Meta::CPAN

<DD>
This will dump a simple listing of all the weights of all the connections of every neuron
in the network to STDIO.
<P></P>
<DT><STRONG><A NAME="item_crunch">$net-&gt;crunch($string);</A></STRONG><BR>
<DD>
<B>UPDATED:</B> Now you can use a variabled instead of using qw(). Strings will be split internally.
Do not use <CODE>qw()</CODE> to pass strings to crunch.
<P>This splits a string passed with /[\s\t]/ into an array ref containing unique indexes
to the words. The words are stored in an intenal array and preserved across <A HREF="#item_load"><CODE>load()</CODE></A> and <A HREF="#item_save"><CODE>save()</CODE></A>
calls. This is designed to be used to generate unique maps sutible for passing to <A HREF="#item_learn"><CODE>learn()</CODE></A> and 
<A HREF="#item_run"><CODE>run()</CODE></A> directly. It returns an array ref.</P>
<P>The words are not duplicated internally. For example:</P>
<PRE>
        $net-&gt;crunch(&quot;How are you?&quot;);</PRE>
<P>Will probably return an array ref containing 1,2,3. A subsequent call of:</P>
<PRE>
    $net-&gt;crunch(&quot;How is Jane?&quot;);</PRE>
<P>Will probably return an array ref containing 1,4,5. Notice, the first element stayed
the same. That is because it already stored the word ``How''. So, each word is stored
only once internally and the returned array ref reflects that.</P>
<P></P>
<DT><STRONG><A NAME="item_uncrunch">$net-&gt;uncrunch($array_ref);</A></STRONG><BR>
<DD>
Uncrunches a map (array ref) into an scalar string of words seperated by ' ' and returns the 
string. This is ment to be used as a counterpart to the <A HREF="#item_crunch"><CODE>crunch()</CODE></A> method, above, possibly to 
<A HREF="#item_uncrunch"><CODE>uncrunch()</CODE></A> the output of a <A HREF="#item_run"><CODE>run()</CODE></A> call. Consider the below code (also in ./examples/ex_crunch.pl):

<PRE>

        use AI::NeuralNet::BackProp;
        my $net = AI::NeuralNet::BackProp-&gt;new(2,3);

        for (0..3) {            # Note: The four learn() statements below could 
                                                # be replaced with learn_set() to do the same thing,

docs.htm  view on Meta::CPAN

for some interesting demos!</P>
<P></P>
<DT><STRONG><A NAME="item_crunched">$net-&gt;crunched($word);</A></STRONG><BR>
<DD>
This will return undef if the word is not in the internal crunch list, or it will return the
index of the word if it exists in the crunch list.
<P></P>
<DT><STRONG><A NAME="item_col_width">$net-&gt;col_width($width);</A></STRONG><BR>
<DD>
This is useful for formating the debugging output of Level 4 if you are learning simple 
bitmaps. This will set the debugger to automatically insert a line break after that many
elements in the map output when dumping the currently run map during a learn loop.
<P>It will return the current width when called with a 0 or undef value.</P>
<P></P>
<DT><STRONG><A NAME="item_random">$net-&gt;random($rand);</A></STRONG><BR>
<DD>
This will set the randomness factor from the network. Default is 0.001. When called 
with no arguments, or an undef value, it will return current randomness value. When
called with a 0 value, it will disable randomness in the network. See NOTES on learning 
a 0 value in the input map with randomness disabled.
<P></P>
<DT><STRONG><A NAME="item_load_pcx">$net-&gt;load_pcx($filename);</A></STRONG><BR>
<DD>
Oh heres a treat... this routine will load a PCX-format file (yah, I know ... ancient format ... but
it is the only one I could find specs for to write it in Perl. If anyone can get specs for
any other formats, or could write a loader for them, I would be very grateful!) Anyways, a PCX-format
file that is exactly 320x200 with 8 bits per pixel, with pure Perl. It returns a blessed refrence to 
a AI::NeuralNet::BackProp::PCX object, which supports the following routinges/members. See example 
files ex_pcxl.pl and ex_pcx.pl in the ./examples/ directory.
<P></P>

docs.htm  view on Meta::CPAN

<DT><STRONG><A NAME="item_avg">$pcx-&gt;avg($index);</A></STRONG><BR>
<DD>
Returns the mean value of the red, green, and blue values at the palette index in $index.
<P></P></DL>
<P>
<HR SIZE=1 COLOR=BLACK>
<H1><A NAME="notes">NOTES</A></H1>
<DL>
<DT><STRONG><A NAME="item_Learning_0s_With_Randomness_Disabled">Learning 0s With Randomness Disabled</A></STRONG><BR>
<DD>
You can now use 0 values in any input maps. This is a good improvement over versions 0.40
and 0.42, where no 0s were allowed because the learning would never finish learning completly
with a 0 in the input.
<P>Yet with the allowance of 0s, it requires one of two factors to learn correctly. Either you
must enable randomness with $net-&gt;<A HREF="#item_random"><CODE>random(0.0001)</CODE></A> (Any values work [other than 0], see <A HREF="#item_random"><CODE>random()</CODE></A> ), 
or you must set an error-minimum with the 'error =&gt; 5' option (you can use some other error value 
as well).</P>
<P>When randomness is enabled (that is, when you call <A HREF="#item_random"><CODE>random()</CODE></A> with a value other than 0), it interjects
a bit of randomness into the output of every neuron in the network, except for the input and output
neurons. The randomness is interjected with rand()*$rand, where $rand is the value that was
passed to <A HREF="#item_random"><CODE>random()</CODE></A> call. This assures the network that it will never have a pure 0 internally. It is

docs.htm  view on Meta::CPAN

<DL>
<DT><STRONG><A NAME="item_AI%3A%3ANeuralNet%3A%3ABackProp%3A%3Aneuron">AI::NeuralNet::BackProp::neuron</A></STRONG><BR>
<DD>
AI::NeuralNet::BackProp::neuron is the worker package for AI::NeuralNet::BackProp.
It implements the actual neurons of the nerual network.
AI::NeuralNet::BackProp::neuron is not designed to be created directly, as
it is used internally by AI::NeuralNet::BackProp.
<P></P>
<DT><STRONG><A NAME="item_AI%3A%3ANeuralNet%3A%3ABackProp%3A%3A_run">AI::NeuralNet::BackProp::_run</A></STRONG><BR>
<DD>
<DT><STRONG><A NAME="item_AI%3A%3ANeuralNet%3A%3ABackProp%3A%3A_map">AI::NeuralNet::BackProp::_map</A></STRONG><BR>
<DD>
These two packages, _run and _map are used to insert data into
the network and used to get data from the network. The _run and _map packages 
are connected to the neurons so that the neurons think that the IO packages are
just another neuron, sending data on. But the IO packs. are special packages designed
with the same methods as neurons, just meant for specific IO purposes. You will
never need to call any of the IO packs. directly. Instead, they are called whenever
you use the <A HREF="#item_run"><CODE>run()</CODE></A> or <A HREF="#item_learn"><CODE>learn()</CODE></A> methods of your network.
<P></P></DL>
<P>
<HR SIZE=1 COLOR=BLACK>
<H1><A NAME="bugs">BUGS</A></H1>
<P>This is an alpha release of <CODE>AI::NeuralNet::BackProp</CODE>, and that holding true, I am sure 

examples/ex_alpha.pl  view on Meta::CPAN

=begin
    
    File:	examples/ex_alpha.pl
	Author: Josiah Bryan, <jdb@wcoil.com>
	Desc: 

		This demonstrates the ability of a neural net to generalize and predict what the correct
		result is for inputs that it has never seen before.
		
		This teaches the network to classify some twenty-nine seperate 35-byte bitmaps, and
		then it inputs an never-before-seen bitmap and displays the classification the network 
		gives for the unknown bitmap.

=cut

	use AI::NeuralNet::BackProp;

	# Create a new network with 2 layers and 35 neurons in each layer, with 1 output neuron
	my $net = new AI::NeuralNet::BackProp(2,35,1);
	
	# Debug level of 4 gives JUST learn loop iteteration benchmark and comparrison data 
	# as learning progresses.

examples/ex_alpha.pl  view on Meta::CPAN

        2,2,1,2,2
        ]
     ];
	
	if(!$net->load("letters.dat")) {
		#$net->range(0..29);
		$net->learn_set($letters);
		$net->save("letters.dat");
	}
			
	# Build a test map 
	my $tmp	=	[2,1,1,1,2,
				 1,2,2,2,1,
				 1,2,2,2,1,
				 1,1,1,1,1,
				 1,2,2,2,1,
				 1,2,2,2,1,
				 1,2,2,2,1];
	
	# Display test map
	print "\nTest map:\n";
	$net->join_cols($tmp,5);
	
	# Display network results
	print "Letter index matched: ",$net->run($tmp)->[0],"\n";
	

examples/ex_bmp.pl  view on Meta::CPAN

=begin

	File:   examples/ex_bmp.pl
	Author: Josiah Bryan, <jdb@wcoil.com>
	Desc:
	
		This demonstrates simple classification of 6x6 bitmaps.

=cut

	use AI::NeuralNet::BackProp;
	use Benchmark;

	# Set resolution
	my $xres=5;
	my $yres=5;
	

examples/ex_bmp2.pl  view on Meta::CPAN

=begin
    
    File:	examples/ex_bmp2.pl
	Author: Josiah Bryan, <jdb@wcoil.com>
	Desc: 

		This demonstrates the ability of a neural net to generalize and predict what the correct
		result is for inputs that it has never seen before.
		
		This teaches a network to recognize a 5x7 bitmap of the letter "J" then it presents
		the network with a corrupted "J" and displays the results of the networks output.

=cut

	use AI::NeuralNet::BackProp;

	# Create a new network with 2 layers and 35 neurons in each layer.
	my $net = new AI::NeuralNet::BackProp(2,35,1);
	
	# Debug level of 4 gives JUST learn loop iteteration benchmark and comparrison data 
	# as learning progresses.
	$net->debug(4);
	
	# Create our model input
	my @map	=	(1,1,1,1,1,
				 0,0,1,0,0,
				 0,0,1,0,0,
				 0,0,1,0,0,
				 1,0,1,0,0,
				 1,0,1,0,0,
				 1,1,1,0,0);
				 
	
	print "\nLearning started...\n";
	
	print $net->learn(\@map,'J');
	
	print "Learning done.\n";
		
	# Build a test map 
	my @tmp	=	(0,0,1,1,1,
				 1,1,1,0,0,
				 0,0,0,1,0,
				 0,0,0,1,0,
				 0,0,0,1,0,                                          
				 0,0,0,0,0,
				 0,1,1,0,0);
	
	# Display test map
	print "\nTest map:\n";
	$net->join_cols(\@tmp,5,'');
	
	print "Running test...\n";
		                    
	# Run the actual test and get network output
	print "Result: ",$net->run_uc(\@tmp),"\n";
	
	print "Test run complete.\n";
	
	

examples/ex_pcx.pl  view on Meta::CPAN

=begin
	
	File:   examplex/ex_pcx.pl
	Author: Josiah Bryan, <jdb@wcoil.com>
	Desc:
		This teaches the network to classify 10x10 bitmaps from a PCX file based on their 
		whiteness. (This was taught on a b&w 320x200 PCX of the author at an early age :-)
=cut
	
	use AI::NeuralNet::BackProp;
	
	# Set block sizes
	my ($bx,$by)=(10,10);
	
	print "Creating Neural Net...";
	my $net=AI::NeuralNet::BackProp->new(1,$bx*$by,1);
	$net->{col_width} = $bx;
	print "Done!\n";
	
	print "Loading bitmap...";
	my $img = $net->load_pcx("josiah.pcx");             
	print "Done!\n";
	
	print "Comparing blocks...\n";
	my $white = $img->get_block([0,0,$bx,$by]);
	
	my ($x,$y,$tmp,@scores,$s,@blocks,$b);
	for ($x=0;$x<320;$x+=$bx) {
		for ($y=0;$y<200;$y+=$by) {
			$blocks[$b++]=$img->get_block([$x,$y,$x+$bx,$y+$by]);

examples/ex_pcx.pl  view on Meta::CPAN

	
	print "Result: ",$net->run($blocks[rand()*$b])->[0],"\n";
	
	print "Bencmark for run: ", $net->benchmarked(), "\n";
	
	$net->save("pcx2.net");
	
	sub print_ref {
		no strict 'refs';
		shift if(substr($_[0],0,4) eq 'AI::'); 
		my $map		=	shift;
		my $break   =	shift;
		my $x;
		my @els = (' ','.',',',':',';','%','#');
		foreach my $el (@{$map}) { 
			$str=$el/255*6;
			print $els[$str];
			$x++;
			if($x>$break-1) {
				print "\n";
				$x=0;
			}
		}
		print "\n";
	}



( run in 0.420 second using v1.01-cache-2.11-cpan-49f99fa48dc )