AI-Nerl
view release on metacpan or search on metacpan
received the program in object code or executable form alone.)
Source code for a work means the preferred form of the work for making
modifications to it. For an executable file, complete source code means
all the source code for all modules it contains; but, as a special
exception, it need not include source code for modules which are standard
libraries that accompany the operating system on which the executable
file runs, or for standard header files or definitions files that
accompany that operating system.
4. You may not copy, modify, sublicense, distribute or transfer the
Program except as expressly provided under this General Public License.
Any attempt otherwise to copy, modify, sublicense, distribute or transfer
the Program is void, and will automatically terminate your rights to use
the Program under this License. However, parties who have received
copies, or rights to use copies, from you under this General Public
License will not have their licenses terminated so long as such parties
remain in full compliance.
5. By copying, distributing or modifying the Program (or any work based
on the Program) you indicate your acceptance of this license to do so,
and all its terms and conditions.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the original
licensor to copy, distribute or modify the Program subject to these
terms and conditions. You may not impose any further restrictions on the
recipients' exercise of the rights granted herein.
7. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of the license which applies to it and "any
may not charge a fee for this Package itself. However, you may distribute this
Package in aggregate with other (possibly commercial) programs as part of a
larger (possibly commercial) software distribution provided that you do not
advertise this Package as a product of your own.
6. The scripts and library files supplied as input to or produced as output
from the programs of this Package do not automatically fall under the copyright
of this Package, but belong to whomever generated them, and may be sold
commercially, and may be aggregated with this Package.
7. C or perl subroutines supplied by you and linked into this Package shall not
be considered part of this Package.
8. The name of the Copyright Holder may not be used to endorse or promote
products derived from this software without specific prior written permission.
9. THIS PACKAGE IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
The End
examples/digits/digits.pl view on Meta::CPAN
if ($pass%200==0){
warn $delta(100:104);
warn $out_neurons(100:104);
}
show784($delta(:,0));
show784($delta(:,6));
show784($delta(:,4));
}
#die join (',',$nncost->dims);
use PDL::Graphics2D;
sub show784{
my $w = shift;
$w = $w->squeeze;
my $min = $w->minimum;
$w -= $min;
my $max = $w->maximum;
$w /= $max;
$w = $w->reshape(28,28);
imag2d $w;
}
sub sigmoid{
my $foo = shift;
return 1/(1+E**-$foo);
}
sub logistic{
#find sigmoid before calling this.
#grad=logistic(sigmoid(foo))
my $foo = shift;
return $foo * (1-$foo);
}
examples/digits/idx_to_fits.pl view on Meta::CPAN
use File::Slurp;
use PDL::IO::FITS;
use FindBin qw($Bin);
chdir $Bin;
die 'filename' unless $ARGV[0];
my $img_filename = $ARGV[0];
my $img_data = read_file( $img_filename, binmode => ':raw' ) ;
my @header = map {ord} split ('', substr ($img_data, 0, 4, ''));
my $numdims = $header[3];
my @dims = map {ord} split ('',substr($img_data, 0, 4*$numdims, ''));
#'IDX' format described here: http://yann.lecun.com/exdb/mnist/
for (0..$numdims-1){
$dims[$_] = 256*$dims[4*$_+2] + $dims[4*$_+3];
}
@dims=@dims[0..$numdims-1];
#die join ' ',@dims;
#my @img_data = map{ord}split('',$img_data);
my $img_pdl = pdl(unpack('C*',$img_data));
lib/AI/Nerl.pm view on Meta::CPAN
);
has basis => (
is => 'ro',
isa => 'AI::Nerl',
required => 0,
);
#initialize $self->network, but don't train.
# any parameters AI::Nerl::Network takes are fine here.
sub init_network{
my $self = shift;
my %nn_params = @_;
#input layer size:
unless ($nn_params{l1}){
if ($self->basis){
$nn_params{l1} = $self->basis->network->l1 + $self->basis->network->l2;
} elsif($self->train_x) {
$nn_params{l1} ||= $self->train_x->dim(1);
}
}
lib/AI/Nerl.pm view on Meta::CPAN
}
$nn_params{l2} ||= $self->l2;
$nn_params{scale_input} ||= $self->scale_input;
my $nn = AI::Nerl::Network->new(
%nn_params
);
$self->network($nn);
}
sub build_network{
my $self = shift;
my $nn = AI::Nerl::Network->new(
l1 => $self->train_x->dim(1),
l2 => $self->l2,
l3 => $self->train_y->dim(1),
scale_input => $self->scale_input,
);
$nn->train($self->train_x, $self->train_y, passes=>$self->passes);
$self->network($nn);
}
sub append_l2{
my ($self,$x) = @_;
if($self->basis){
$x = $self->basis->append_l2($x);
}
return $self->network->append_l2($x);
}
sub run{
my ($self,$x) = @_;
$x->sever;
if($self->basis){
$x = $self->basis->append_l2($x);
}
return $self->network->run($x);
}
sub train{
my ($self,$x,$y) = @_;
$x->sever;
if($self->basis){
$x = $self->basis->append_l2($x);
}
return $self->network->train($x,$y);
}
sub cost{
my ($self,$x,$y) = @_;
$x->sever();
if($self->basis){
$x = $self->basis->append_l2($x);
}
return $self->network->cost($x,$y);
}
'a neural network has your dog.';
lib/AI/Nerl/Network.pm view on Meta::CPAN
isa => 'Num',
is => 'rw',
default => .6,
);
has lambda => (
isa => 'Num',
is => 'rw',
default => .01,
);
sub _mk_theta1{
my $self = shift;
return grandom($self->l1, $self->l2) * .01;
}
sub _mk_theta2{
my $self = shift;
return grandom($self->l2, $self->l3) * .01;
}
sub _mk_b1{
my $self = shift;
return grandom($self->l2) * .01;
}
sub _mk_b2{
my $self = shift;
return grandom($self->l3) * .01;
}
sub train{
my ($self,$x,$y, %params) = @_;
$x->sever();
my $passes = $params{passes} // 10;
if ($self->scale_input){
$x *= $self->scale_input;
}
my $num_examples = $x->dim(0);
for my $pass (1..$passes){
lib/AI/Nerl/Network.pm view on Meta::CPAN
warn "delta1: $delta1\n";
warn "delta2: $delta2\n";
}
$self->{theta2} -= $self->alpha * ($delta2 / $num_examples + $self->theta2 * $self->lambda);
$self->{theta1} -= $self->alpha * ($delta1 / $num_examples + $self->theta1 * $self->lambda);
$self->{b1} -= $self->alpha * $deltab1 / $num_examples;
$self->{b2} -= $self->alpha * $deltab2 / $num_examples;
}
}
sub run{
my ($self,$x) = @_;
$x->sever();
if ($self->scale_input){
$x *= $self->scale_input;
}
$x = $x->transpose if $self->l1 != $x->dim(1);
my $y = $self->theta1 x $x;
$y += $self->b1->transpose;
$y->inplace()->tanh;# = tanhx($y);
$y = $self->theta2 x $y;
$y += $self->b2->transpose;
$y->inplace()->tanh();# = tanhx($y);
return $y;
}
sub append_l2{
my ($self,$x) = @_;
$x->sever();
if ($self->scale_input){
$x *= $self->scale_input;
}
$x = $x->transpose if $self->l1 != $x->dim(1);
my $l2 = $self->theta1 x $x;
$l2 += $self->b1->transpose;
$l2->inplace()->tanh;
# warn join ',',$x->dims;
# warn join ',',$l2->dims;
return $x->glue(1,$l2);
}
sub cost{
my ($self,$x,$y) = @_;
$x->sever();# = $x->copy();
my $n = $x->dim(0);
if ($self->scale_input){
$x *= $self->scale_input;
}
my $num_correct = 0;
#die join(',',$x->dims) .',,,'. join(',',$y->dims);
my $total_cost = 0;
for my $i (0..$n-1){
lib/AI/Nerl/Network.pm view on Meta::CPAN
$total_cost += ($y(($i))-$a3)->abs()->power(2,0)->sum()/2;
#warn $a3->maximum_ind . ' ' . $y(($i))->maximum_ind;;
$num_correct++ if $a3->maximum_ind == $y(($i))->maximum_ind;
}
$total_cost /= $n;
$total_cost += $self->theta1->flat->power(2,0)->sum * $self->lambda;
$total_cost += $self->theta2->flat->power(2,0)->sum * $self->lambda;
return ($total_cost, $num_correct);
}
sub tanhx{ #don't use this. pdl has $pdl->tanh which can be used in place.
my $foo = shift;
my $p = E**$foo;
my $n = E**-$foo;
return (($p-$n)/($p+$n));
}
sub tanhxderivative{ #use: tanhxderivative($pdl->tanh()). save time by finding tanh first.
my $tanhx = shift;
return (1 - $tanhx**2);
}
sub sigmoid{
my $foo = shift;
return 1/(1+E**-$foo);
}
sub logistic{
#find sigmoid before calling this.
#grad=logistic(sigmoid(foo))
my $foo = shift;
return $foo * (1-$foo);
}
my $g2d_tried = 0;
my $g2d_failed = '';
sub USE_G2D{
return 0 if $g2d_tried and $g2d_failed;
return 1 if $g2d_tried;
eval{
require PDL::Graphics2D;
PDL::Graphics2d->import('imag2d');
1;
} or do {
$g2d_failed = $@;
warn "PDL::Graphics2d failed to load. perhaps $g2d_failed";
};
$g2d_tried = 1;
return USE_G2D();
}
#display 28x28 grayscale pdl.
sub show784{
return unless USE_G2D();
my $w = shift;
$w = $w->copy;
#warn join',', $w->dims;
$w = $w->squeeze;
my $min = $w->minimum;
$w -= $min;
my $max = $w->maximum;
$w /= $max;
$w = $w->reshape(28,28);
imag2d $w;
}
sub show_neuron{
unless (USE_G2D()){
warn 'Can\'t display neuron. Get OpenGL?';
return;
}
my $self = shift;
my $n = shift // 0;
my $x = shift || 28;
my $y = shift || 28;
my $w = $self->theta1->slice(":,$n")->copy;
$w = $w->squeeze;
( run in 0.688 second using v1.01-cache-2.11-cpan-88abd93f124 )