AI-NeuralNet-BackProp
view release on metacpan or search on metacpan
BackProp.pm view on Meta::CPAN
$map[$_]=$self->{SYNAPSES}->{LIST}->[$_]->{VALUE};
$weight[$_]=$self->{SYNAPSES}->{LIST}->[$_]->{WEIGHT};
}
# Debugger
AI::NeuralNet::BackProp::join_cols(\@map,5) if(($AI::NeuralNet::BackProp::DEBUG eq 3) || ($AI::NeuralNet::BackProp::DEBUG eq 2));
AI::NeuralNet::BackProp::out2("Weights: ".join(" ",@weight)."\n");
# Simply average the values and get the integer of the average.
$state = intr($value/$size);
# Debugger
AI::NeuralNet::BackProp::out1("From get_output, value is $value, so state is $state.\n");
# Possible future exapnsion for self excitation. Not currently used.
$self->{LAST_VALUE} = $value;
# Just return the $state
return $state;
}
# Used by input() to check if all registered synapses have fired.
sub input_complete {
my $self = shift;
my $size = $self->{SYNAPSES}->{SIZE} || 0;
my $retvalue = 1;
# Very simple loop. Doesn't need explaning.
for (0..$size-1) {
$retvalue = 0 if(!$self->{SYNAPSES}->{LIST}->[$_]->{FIRED});
}
return $retvalue;
}
# Used to recursively adjust the weights of synapse input channeles
# to give a desired value. Designed to be called via
# AI::NeuralNet::BackProp::NeuralNetwork::learn().
sub weight {
my $self = shift;
my $ammount = shift;
my $what = shift;
my $size = $self->{SYNAPSES}->{SIZE} || 0;
my $value;
AI::NeuralNet::BackProp::out1("Weight: ammount is $ammount, what is $what with size at $size.\n");
# Now this sub is the main cog in the learning wheel. It is called recursively on
# each neuron that has been bad (given incorrect output.)
for my $i (0..$size-1) {
$value = $self->{SYNAPSES}->{LIST}->[$i]->{VALUE};
if(0) {
# Formula by Steve Purkis
# Converges very fast for low-value inputs. Has trouble converging on high-value
# inputs. Feel free to play and try to get to work for high values.
my $delta = $ammount * ($what - $value) * $self->{SYNAPSES}->{LIST}->[$i]->{INPUT};
$self->{SYNAPSES}->{LIST}->[$i]->{WEIGHT} += $delta;
$self->{SYNAPSES}->{LIST}->[$i]->{PKG}->weight($ammount,$what);
}
# This formula in use by default is original by me (Josiah Bryan) as far as I know.
# If it is equal, then don't adjust
#
### Disabled because this soemtimes causes
### infinte loops when learning with range limits enabled
#
#next if($value eq $what);
# Adjust increment by the weight of the synapse of
# this neuron & apply direction delta
my $delta =
$ammount *
($value<$what?1:-1) *
$self->{SYNAPSES}->{LIST}->[$i]->{WEIGHT};
#print "($value,$what) delta:$delta\n";
# Recursivly apply
$self->{SYNAPSES}->{LIST}->[$i]->{WEIGHT} += $delta;
$self->{SYNAPSES}->{LIST}->[$i]->{PKG}->weight($ammount,$what);
}
}
# Registers some neuron as a synapse of this neuron.
# This is called exclusively by connect(), except for
# in initalize_group() to connect the _map() package.
sub register_synapse {
my $self = shift;
my $synapse = shift;
my $sid = $self->{SYNAPSES}->{SIZE} || 0;
$self->{SYNAPSES}->{LIST}->[$sid]->{PKG} = $synapse;
$self->{SYNAPSES}->{LIST}->[$sid]->{WEIGHT} = 1.00 if(!$self->{SYNAPSES}->{LIST}->[$sid]->{WEIGHT});
$self->{SYNAPSES}->{LIST}->[$sid]->{FIRED} = 0;
AI::NeuralNet::BackProp::out1("$self: Registering sid $sid with weight $self->{SYNAPSES}->{LIST}->[$sid]->{WEIGHT}, package $self->{SYNAPSES}->{LIST}->[$sid]->{PKG}.\n");
$self->{SYNAPSES}->{SIZE} = ++$sid;
return ($sid-1);
}
# Called via AI::NeuralNet::BackProp::NeuralNetwork::initialize_group() to
# form the neuron grids.
# This just registers another synapes as a synapse to output to from this one, and
# then we ask that synapse to let us register as an input connection and we
# save the sid that the ouput synapse returns.
sub connect {
my $self = shift;
my $to = shift;
my $oid = $self->{OUTPUTS}->{SIZE} || 0;
AI::NeuralNet::BackProp::out1("Connecting $self to $to at $oid...\n");
$self->{OUTPUTS}->{LIST}->[$oid]->{PKG} = $to;
$self->{OUTPUTS}->{LIST}->[$oid]->{ID} = $to->register_synapse($self);
$self->{OUTPUTS}->{SIZE} = ++$oid;
return $self->{OUTPUTS}->{LIST}->[$oid]->{ID};
}
1;
package AI::NeuralNet::BackProp;
use Benchmark;
use strict;
BackProp.pm view on Meta::CPAN
sub show {
my $self = shift;
for my $a (0..$self->{SIZE}-1) {
print "Neuron $a: ";
for my $b (0..$self->{DIV}-1) {
print $self->{NET}->[$a]->{SYNAPSES}->{LIST}->[$b]->{WEIGHT},"\t";
}
print "\n";
}
}
# Used internally by new() and learn().
# This is the sub block that actually creats
# the connections between the synapse chains and
# also connects the run packages and the map packages
# to the appropiate ends of the neuron grids.
sub initialize_group() {
my $self = shift;
my $size = $self->{SIZE};
my $div = $self->{DIV};
my $out = $self->{OUT};
my $flag = $self->{FLAG};
my $x = 0;
my $y = 0;
# Reset map and run synapse counters.
$self->{RUN}->{REGISTRATION} = $self->{MAP}->{REGISTRATION} = 0;
AI::NeuralNet::BackProp::out2 "There will be $size neurons in this network group, with a divison value of $div.\n";
#print "There will be $size neurons in this network group, with a divison value of $div.\n";
# Create initial neuron packages in one long array for the entire group
for($y=0; $y<$size; $y++) {
#print "Initalizing neuron $y... \r";
$self->{NET}->[$y]=new AI::NeuralNet::BackProp::neuron();
}
AI::NeuralNet::BackProp::out2 "Creating synapse grid...\n";
my $z = 0;
my $aa = 0;
my ($n0,$n1,$n2);
# Outer loop loops over every neuron in group, incrementing by the number
# of neurons supposed to be in each layer
for($y=0; $y<$size; $y+=$div) {
if($y+$div>=$size) {
last;
}
# Inner loop connects every neuron in this 'layer' to one input of every neuron in
# the next 'layer'. Remeber, layers only exist in terms of where the connections
# are divided. For example, if a person requested 2 layers and 3 neurons per layer,
# then there would be 6 neurons in the {NET}->[] list, and $div would be set to
# 3. So we would loop over and every 3 neurons we would connect each of those 3
# neurons to one input of every neuron in the next set of 3 neurons. Of course, this
# is an example. 3 and 2 are set by the new() constructor.
# Flag values:
# 0 - (default) -
# My feed-foward style: Each neuron in layer X is connected to one input of every
# neuron in layer Y. The best and most proven flag style.
#
# ^ ^ ^
# O\ O\ /O Layer Y
# ^\\/^/\/^
# | //|\/\|
# |/ \|/ \|
# O O O Layer X
# ^ ^ ^
#
# 1 -
# In addition to flag 0, each neuron in layer X is connected to every input of
# the neurons ahead of itself in layer X.
# 2 - ("L-U Style") -
# No, its not "Learning-Unit" style. It gets its name from this: In a 2 layer, 3
# neuron network, the connections form a L-U pair, or a W, however you want to look
# at it.
#
# ^ ^ ^
# | | |
# O-->O-->O
# ^ ^ ^
# | | |
# | | |
# O-->O-->O
# ^ ^ ^
# | | |
#
# As you can see, each neuron is connected to the next one in its layer, as well
# as the neuron directly above itself.
for ($z=0; $z<$div; $z++) {
if((!$flag) || ($flag == 1)) {
for ($aa=0; $aa<$div; $aa++) {
$self->{NET}->[$y+$z]->connect($self->{NET}->[$y+$div+$aa]);
}
}
if($flag == 1) {
for ($aa=$z+1; $aa<$div; $aa++) {
$self->{NET}->[$y+$z]->connect($self->{NET}->[$y+$aa]);
}
}
if($flag == 2) {
$self->{NET}->[$y+$z]->connect($self->{NET}->[$y+$div+$z]);
$self->{NET}->[$y+$z]->connect($self->{NET}->[$y+$z+1]) if($z<$div-1);
}
AI::NeuralNet::BackProp::out1 "\n";
}
AI::NeuralNet::BackProp::out1 "\n";
}
# These next two loops connect the _run and _map packages (the IO interface) to
# the start and end 'layers', respectively. These are how we insert data into
# the network and how we get data from the network. The _run and _map packages
# are connected to the neurons so that the neurons think that the IO packages are
# just another neuron, sending data on. But the IO packs. are special packages designed
# with the same methods as neurons, just meant for specific IO purposes. You will
# never need to call any of the IO packs. directly. Instead, they are called whenever
# you use the run(), map(), or learn() methods of your network.
BackProp.pm view on Meta::CPAN
=head1 SYNOPSIS
use AI::NeuralNet::BackProp;
# Create a new network with 1 layer, 5 inputs, and 5 outputs.
my $net = new AI::NeuralNet::BackProp(1,5,5);
# Add a small amount of randomness to the network
$net->random(0.001);
# Demonstrate a simple learn() call
my @inputs = ( 0,0,1,1,1 );
my @ouputs = ( 1,0,1,0,1 );
print $net->learn(\@inputs, \@outputs),"\n";
# Create a data set to learn
my @set = (
[ 2,2,3,4,1 ], [ 1,1,1,1,1 ],
[ 1,1,1,1,1 ], [ 0,0,0,0,0 ],
[ 1,1,1,0,0 ], [ 0,0,0,1,1 ]
);
# Demo learn_set()
my $f = $net->learn_set(\@set);
print "Forgetfulness: $f unit\n";
# Crunch a bunch of strings and return array refs
my $phrase1 = $net->crunch("I love neural networks!");
my $phrase2 = $net->crunch("Jay Lenno is wierd.");
my $phrase3 = $net->crunch("The rain in spain...");
my $phrase4 = $net->crunch("Tired of word crunching yet?");
# Make a data set from the array refs
my @phrases = (
$phrase1, $phrase2,
$phrase3, $phrase4
);
# Learn the data set
$net->learn_set(\@phrases);
# Run a test phrase through the network
my $test_phrase = $net->crunch("I love neural networking!");
my $result = $net->run($test_phrase);
# Get this, it prints "Jay Leno is networking!" ... LOL!
print $net->uncrunch($result),"\n";
=head1 UPDATES
This is version 0.89. In this version I have included a new feature, output range limits, as
well as automatic crunching of run() and learn*() inputs. Included in the examples directory
are seven new practical-use example scripts. Also implemented in this version is a much cleaner
learning function for individual neurons which is more accurate than previous verions and is
based on the LMS rule. See range() for information on output range limits. I have also updated
the load() and save() methods so that they do not depend on Storable anymore. In this version
you also have the choice between three network topologies, two not as stable, and the third is
the default which has been in use for the previous four versions.
=head1 DESCRIPTION
AI::NeuralNet::BackProp implements a nerual network similar to a feed-foward,
back-propagtion network; learning via a mix of a generalization
of the Delta rule and a disection of Hebbs rule. The actual
neruons of the network are implemented via the AI::NeuralNet::BackProp::neuron package.
You constuct a new network via the new constructor:
my $net = new AI::NeuralNet::BackProp(2,3,1);
The new() constructor accepts two arguments and one optional argument, $layers, $size,
and $outputs is optional (in this example, $layers is 2, $size is 3, and $outputs is 1).
$layers specifies the number of layers, including the input
and the output layer, to use in each neural grouping. A new
neural grouping is created for each pattern learned. Layers
is typically set to 2. Each layer has $size neurons in it.
Each neuron's output is connected to one input of every neuron
in the layer below it.
This diagram illustrates a simple network, created with a call
to "new AI::NeuralNet::BackProp(2,2,2)" (2 layers, 2 neurons/layer, 2 outputs).
input
/ \
O O
|\ /|
| \/ |
| /\ |
|/ \|
O O
\ /
mapper
In this diagram, each neuron is connected to one input of every
neuron in the layer below it, but there are not connections
between neurons in the same layer. Weights of the connection
are controlled by the neuron it is connected to, not the connecting
neuron. (E.g. the connecting neuron has no idea how much weight
its output has when it sends it, it just sends its output and the
weighting is taken care of by the receiving neuron.) This is the
method used to connect cells in every network built by this package.
Input is fed into the network via a call like this:
use AI;
my $net = new AI::NeuralNet::BackProp(2,2);
my @map = (0,1);
my $result = $net->run(\@map);
Now, this call would probably not give what you want, because
the network hasn't "learned" any patterns yet. But this
illustrates the call. Run now allows strings to be used as
BackProp.pm view on Meta::CPAN
Run returns a refrence with $size elements (Remember $size? $size
is what you passed as the second argument to the network
constructor.) This array contains the results of the mapping. If
you ran the example exactly as shown above, $result would probably
contain (1,1) as its elements.
To make the network learn a new pattern, you simply call the learn
method with a sample input and the desired result, both array
refrences of $size length. Example:
use AI;
my $net = new AI::NeuralNet::BackProp(2,2);
my @map = (0,1);
my @res = (1,0);
$net->learn(\@map,\@res);
my $result = $net->run(\@map);
Now $result will conain (1,0), effectivly flipping the input pattern
around. Obviously, the larger $size is, the longer it will take
to learn a pattern. Learn() returns a string in the form of
Learning took X loops and X wallclock seconds (X.XXX usr + X.XXX sys = X.XXX CPU).
With the X's replaced by time or loop values for that loop call. So,
to view the learning stats for every learn call, you can just:
print $net->learn(\@map,\@res);
If you call "$net->debug(4)" with $net being the
refrence returned by the new() constructor, you will get benchmarking
information for the learn function, as well as plenty of other information output.
See notes on debug() in the METHODS section, below.
If you do call $net->debug(1), it is a good
idea to point STDIO of your script to a file, as a lot of information is output. I often
use this command line:
$ perl some_script.pl > .out
Then I can simply go and use emacs or any other text editor and read the output at my leisure,
rather than have to wait or use some 'more' as it comes by on the screen.
=head2 METHODS
=over 4
=item new AI::NeuralNet::BackProp($layers, $size [, $outputs, $topology_flag])
Returns a newly created neural network from an C<AI::NeuralNet::BackProp>
object. The network will have C<$layers> number layers in it
and each layer will have C<$size> number of neurons in that layer.
There is an optional parameter of $outputs, which specifies the number
of output neurons to provide. If $outputs is not specified, $outputs
defaults to equal $size. $outputs may not exceed $size. If $outputs
exceeds $size, the new() constructor will return undef.
The optional parameter, $topology_flag, defaults to 0 when not used. There are
three valid topology flag values:
B<0> I<default>
My feed-foward style: Each neuron in layer X is connected to one input of every
neuron in layer Y. The best and most proven flag style.
^ ^ ^
O\ O\ /O Layer Y
^\\/^/\/^
| //|\/\|
|/ \|/ \|
O O O Layer X
^ ^ ^
(Sorry about the bad art...I am no ASCII artist! :-)
B<1>
In addition to flag 0, each neuron in layer X is connected to every input of
the neurons ahead of itself in layer X.
B<2> I<("L-U Style")>
No, its not "Learning-Unit" style. It gets its name from this: In a 2 layer, 3
neuron network, the connections form a L-U pair, or a W, however you want to look
at it.
^ ^ ^
| | |
O-->O-->O
^ ^ ^
| | |
| | |
O-->O-->O
^ ^ ^
| | |
As you can see, each neuron is connected to the next one in its layer, as well
as the neuron directly above itself.
Before you can really do anything useful with your new neural network
object, you need to teach it some patterns. See the learn() method, below.
=item $net->learn($input_map_ref, $desired_result_ref [, options ]);
This will 'teach' a network to associate an new input map with a desired resuly.
It will return a string containg benchmarking information. You can retrieve the
pattern index that the network stored the new input map in after learn() is complete
with the pattern() method, below.
UPDATED: You can now specify strings as inputs and ouputs to learn, and they will be crunched
automatically. Example:
$net->learn('corn', 'cob');
# Before update, you have had to do this:
# $net->learn($net->crunch('corn'), $net->crunch('cob'));
Note, the old method of calling crunch on the values still works just as well.
UPDATED: You can now learn inputs with a 0 value. Beware though, it may not learn() a 0 value
in the input map if you have randomness disabled. See NOTES on using a 0 value with randomness
disabled.
The first two arguments may be array refs (or now, strings), and they may be of different lengths.
Options should be written on hash form. There are three options:
inc => $learning_gradient
max => $maximum_iterations
error => $maximum_allowable_percentage_of_error
$learning_gradient is an optional value used to adjust the weights of the internal
connections. If $learning_gradient is ommitted, it defaults to 0.20.
$maximum_iterations is the maximum numbers of iteration the loop should do.
It defaults to 1024. Set it to 0 if you never want the loop to quit before
the pattern is perfectly learned.
$maximum_allowable_percentage_of_error is the maximum allowable error to have. If
this is set, then learn() will return when the perecentage difference between the
actual results and desired results falls below $maximum_allowable_percentage_of_error.
If you do not include 'error', or $maximum_allowable_percentage_of_error is set to -1,
then learn() will not return until it gets an exact match for the desired result OR it
reaches $maximum_iterations.
=item $net->learn_set(\@set, [ options ]);
UPDATE: Inputs and outputs in the dataset can now be strings. See information on auto-crunching
in learn()
This takes the same options as learn() and allows you to specify a set to learn, rather
than individual patterns. A dataset is an array refrence with at least two elements in the
array, each element being another array refrence (or now, a scalar string). For each pattern to
learn, you must specify an input array ref, and an ouput array ref as the next element. Example:
my @set = (
# inputs outputs
[ 1,2,3,4 ], [ 1,3,5,6 ],
[ 0,2,5,6 ], [ 0,2,1,2 ]
);
See the paragraph on measuring forgetfulness, below. There are
two learn_set()-specific option tags available:
flag => $flag
pattern => $row
If "flag" is set to some TRUE value, as in "flag => 1" in the hash of options, or if the option "flag"
is not set, then it will return a percentage represting the amount of forgetfullness. Otherwise,
learn_set() will return an integer specifying the amount of forgetfulness when all the patterns
are learned.
If "pattern" is set, then learn_set() will use that pattern in the data set to measure forgetfulness by.
If "pattern" is omitted, it defaults to the first pattern in the set. Example:
my @set = (
[ 0,1,0,1 ], [ 0 ],
[ 0,0,1,0 ], [ 1 ],
[ 1,1,0,1 ], [ 2 ], # <---
[ 0,1,1,0 ], [ 3 ]
);
If you wish to measure forgetfulness as indicated by the line with the arrow, then you would
pass 2 as the "pattern" option, as in "pattern => 2".
Now why the heck would anyone want to measure forgetfulness, you ask? Maybe you wonder how I
even measure that. Well, it is not a vital value that you have to know. I just put in a
"forgetfulness measure" one day because I thought it would be neat to know.
How the module measures forgetfulness is this: First, it learns all the patterns in the set provided,
then it will run the very first pattern (or whatever pattern is specified by the "row" option)
in the set after it has finished learning. It will compare the run() output with the desired output
as specified in the dataset. In a perfect world, the two should match exactly. What we measure is
how much that they don't match, thus the amount of forgetfulness the network has.
NOTE: In version 0.77 percentages were disabled because of a bug. Percentages are now enabled.
Example (from examples/ex_dow.pl):
# Data from 1989 (as far as I know..this is taken from example data on BrainMaker)
my @data = (
# Mo CPI CPI-1 CPI-3 Oil Oil-1 Oil-3 Dow Dow-1 Dow-3 Dow Ave (output)
[ 1, 229, 220, 146, 20.0, 21.9, 19.5, 2645, 2652, 2597], [ 2647 ],
[ 2, 235, 226, 155, 19.8, 20.0, 18.3, 2633, 2645, 2585], [ 2637 ],
[ 3, 244, 235, 164, 19.6, 19.8, 18.1, 2627, 2633, 2579], [ 2630 ],
[ 4, 261, 244, 181, 19.6, 19.6, 18.1, 2611, 2627, 2563], [ 2620 ],
[ 5, 276, 261, 196, 19.5, 19.6, 18.0, 2630, 2611, 2582], [ 2638 ],
[ 6, 287, 276, 207, 19.5, 19.5, 18.0, 2637, 2630, 2589], [ 2635 ],
[ 7, 296, 287, 212, 19.3, 19.5, 17.8, 2640, 2637, 2592], [ 2641 ]
);
# Learn the set
my $f = learn_set(\@data,
inc => 0.1,
max => 500,
p => 1
);
# Print it
print "Forgetfullness: $f%";
This is a snippet from the example script examples/ex_dow.pl, which demonstrates DOW average
prediction for the next month. A more simple set defenition would be as such:
my @data = (
[ 0,1 ], [ 1 ],
[ 1,0 ], [ 0 ]
);
$net->learn_set(\@data);
Same effect as above, but not the same data (obviously).
BackProp.pm view on Meta::CPAN
=item $net->debug($level)
Toggles debugging off if called with $level = 0 or no arguments. There are four levels
of debugging.
Level 0 ($level = 0) : Default, no debugging information printed. All printing is
left to calling script.
Level 1 ($level = 1) : This causes ALL debugging information for the network to be dumped
as the network runs. In this mode, it is a good idea to pipe your STDIO to a file, especially
for large programs.
Level 2 ($level = 2) : A slightly-less verbose form of debugging, not as many internal
data dumps.
Level 3 ($level = 3) : JUST prints weight mapping as weights change.
Level 4 ($level = 4) : JUST prints the benchmark info for EACH learn loop iteteration, not just
learning as a whole. Also prints the percentage difference for each loop between current network
results and desired results, as well as learning gradient ('incremenet').
Level 4 is useful for seeing if you need to give a smaller learning incrememnt to learn() .
I used level 4 debugging quite often in creating the letters.pl example script and the small_1.pl
example script.
Toggles debuging off when called with no arguments.
=item $net->save($filename);
This will save the complete state of the network to disk, including all weights and any
words crunched with crunch() . Also saves any output ranges set with range() .
This has now been modified to use a simple flat-file text storage format, and it does not
depend on any external modules now.
=item $net->load($filename);
This will load from disk any network saved by save() and completly restore the internal
state at the point it was save() was called at.
=item $net->join_cols($array_ref,$row_length_in_elements,$high_state_character,$low_state_character);
This is more of a utility function than any real necessary function of the package.
Instead of joining all the elements of the array together in one long string, like join() ,
it prints the elements of $array_ref to STDIO, adding a newline (\n) after every $row_length_in_elements
number of elements has passed. Additionally, if you include a $high_state_character and a $low_state_character,
it will print the $high_state_character (can be more than one character) for every element that
has a true value, and the $low_state_character for every element that has a false value.
If you do not supply a $high_state_character, or the $high_state_character is a null or empty or
undefined string, it join_cols() will just print the numerical value of each element seperated
by a null character (\0). join_cols() defaults to the latter behaviour.
=item $net->pdiff($array_ref_A, $array_ref_B);
This function is used VERY heavily internally to calculate the difference in percent
between elements of the two array refs passed. It returns a %.10f (sprintf-format)
percent sting.
=item $net->p($a,$b);
Returns a floating point number which represents $a as a percentage of $b.
=item $net->intr($float);
Rounds a floating-point number rounded to an integer using sprintf() and int() , Provides
better rounding than just calling int() on the float. Also used very heavily internally.
=item $net->high($array_ref);
Returns the index of the element in array REF passed with the highest comparative value.
=item $net->low($array_ref);
Returns the index of the element in array REF passed with the lowest comparative value.
=item $net->show();
This will dump a simple listing of all the weights of all the connections of every neuron
in the network to STDIO.
=item $net->crunch($string);
UPDATE: Now you can use a variabled instead of using qw(). Strings will be split internally.
Do not use qw() to pass strings to crunch.
This splits a string passed with /[\s\t]/ into an array ref containing unique indexes
to the words. The words are stored in an intenal array and preserved across load() and save()
calls. This is designed to be used to generate unique maps sutible for passing to learn() and
run() directly. It returns an array ref.
The words are not duplicated internally. For example:
$net->crunch("How are you?");
Will probably return an array ref containing 1,2,3. A subsequent call of:
$net->crunch("How is Jane?");
( run in 0.705 second using v1.01-cache-2.11-cpan-39bf76dae61 )