AI-Nerl
view release on metacpan or search on metacpan
lib/AI/Nerl.pm view on Meta::CPAN
Zach Morgan
=cut
has scale_input => (
is => 'ro',
isa => 'Num',
required => 0,
default => 0,
);
has l2 => ( #hidden layer.
is => 'ro',
isa => 'Num',
default => 30,
);
has [qw/ train_x
train_y /] => (
is => 'ro',
isa => 'PDL',
required => 0, #training can be done manually.
);
has [qw/ test_x cv_x
test_y cv_y /] => (
lib/AI/Nerl.pm view on Meta::CPAN
has network => (
required=>0,
is => 'rw',
isa => 'AI::Nerl::Network',
);
has passes=> (
is => 'rw',
isa => 'Int',
default => 10,
);
has basis => (
is => 'ro',
isa => 'AI::Nerl',
required => 0,
);
#initialize $self->network, but don't train.
# any parameters AI::Nerl::Network takes are fine here.
lib/AI/Nerl/Network.pm view on Meta::CPAN
=cut
# Simple nn with 1 hidden layer
# train with $nn->train(data,labels);
has scale_input => (
is => 'ro',
required => 0,
isa => 'Num',
default => 0,
);
# number of input,hidden,output neurons
has [qw/ l1 l2 l3 /] => (
is => 'ro',
isa => 'Int',
);
has theta1 => (
is => 'ro',
lib/AI/Nerl/Network.pm view on Meta::CPAN
);
has b2 => (
is => 'ro',
isa => 'PDL',
lazy => 1,
builder => '_mk_b2',
);
has alpha => ( #learning rate
isa => 'Num',
is => 'rw',
default => .6,
);
has lambda => (
isa => 'Num',
is => 'rw',
default => .01,
);
sub _mk_theta1{
my $self = shift;
return grandom($self->l1, $self->l2) * .01;
}
sub _mk_theta2{
my $self = shift;
return grandom($self->l2, $self->l3) * .01;
}
( run in 0.741 second using v1.01-cache-2.11-cpan-0a6323c29d9 )