view release on metacpan or search on metacpan
examples/char_lstm.pl view on Meta::CPAN
'help' => sub { HelpMessage(0) },
) or HelpMessage(1);
=head1 NAME
char_lstm.pl - Example of training char LSTM RNN on tiny shakespeare using high level RNN interface
with optional inferred sampling (RNN generates Shakespeare like text)
=head1 SYNOPSIS
--num-layers number of stacked RNN layers, default=2
--num-hidden hidden layer size, default=256
--num-embed embed size, default=10
--num-seq sequence size, default=60
--gpus list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu.
Increase batch size when using multiple gpus for best performance.
--kv-store key-value store type, default='device'
--num-epochs max num of epochs, default=25
--lr initial learning rate, default=0.01
--optimizer the optimizer type, default='adam'
--mom momentum for sgd, default=0.0
--wd weight decay for sgd, default=0.00001
--batch-size the batch size type, default=32
--bidirectional use bidirectional cell, default false (0)
--disp-batches show progress for every n batches, default=50
--chkp-prefix prefix for checkpoint files, default='lstm_'
--cell-mode RNN cell mode (LSTM, GRU, RNN, default=LSTM)
--sample-size a size of inferred sample text (default=10000) after each epoch
--chkp-epoch save checkpoint after this many epoch, default=1 (saving every checkpoint)
=cut
package AI::MXNet::RNN::IO::ASCIIIterator;
use Mouse;
extends AI::MXNet::DataIter;
has 'data' => (is => 'ro', isa => 'PDL', required => 1);
has 'seq_size' => (is => 'ro', isa => 'Int', required => 1);
has '+batch_size' => (is => 'ro', isa => 'Int', required => 1);
has 'data_name' => (is => 'ro', isa => 'Str', default => 'data');
has 'label_name' => (is => 'ro', isa => 'Str', default => 'softmax_label');
has 'dtype' => (is => 'ro', isa => 'Dtype', default => 'float32');
has [qw/nd counter seq_counter vocab_size
data_size provide_data provide_label idx/] => (is => 'rw', init_arg => undef);
sub BUILD
{
my $self = shift;
$self->data_size($self->data->nelem);
my $segments = int(($self->data_size-$self->seq_size)/($self->batch_size*$self->seq_size));
$self->idx([0..$segments-1]);
$self->vocab_size($self->data->uniq->shape->at(0));
examples/cudnn_lstm_bucketing.pl view on Meta::CPAN
'dropout=f', => \(my $dropout = 0 ),
'help' => sub { HelpMessage(0) },
) or HelpMessage(1);
=head1 NAME
char_lstm.pl - Example of training char LSTM RNN on tiny shakespeare using high level RNN interface
=head1 SYNOPSIS
--test Whether to test or train (default 0)
--num-layers number of stacked RNN layers, default=2
--num-hidden hidden layer size, default=200
--num-seq sequence size, default=32
--gpus list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu.
Increase batch size when using multiple gpus for best performance.
--kv-store key-value store type, default='device'
--num-epochs max num of epochs, default=25
--lr initial learning rate, default=0.01
--optimizer the optimizer type, default='adam'
--mom momentum for sgd, default=0.0
--wd weight decay for sgd, default=0.00001
--batch-size the batch size type, default=32
--disp-batches show progress for every n batches, default=50
--model-prefix prefix for checkpoint files for loading/saving, default='lstm_'
--load-epoch load from epoch
--stack-rnn stack rnn to reduce communication overhead (1,0 default 0)
--bidirectional whether to use bidirectional layers (1,0 default 0)
--dropout dropout probability (1.0 - keep probability), default 0
=cut
$bidirectional = $bidirectional ? 1 : 0;
$stack_rnn = $stack_rnn ? 1 : 0;
func tokenize_text($fname, :$vocab=, :$invalid_label=-1, :$start_label=0)
{
open(F, $fname) or die "Can't open $fname: $!";
my @lines = map { my $l = [split(/ /)]; shift(@$l); $l } (<F>);
my $sentences;
examples/cudnn_lstm_bucketing.pl view on Meta::CPAN
{
$contexts = [map { mx->gpu($_) } split(/,/, $gpus)];
}
else
{
$contexts = mx->cpu(0);
}
my $model = mx->mod->BucketingModule(
sym_gen => $sym_gen,
default_bucket_key => $data_train->default_bucket_key,
context => $contexts
);
my ($arg_params, $aux_params);
if($load_epoch)
{
(undef, $arg_params, $aux_params) = mx->rnn->load_rnn_checkpoint(
$cell, $model_prefix, $load_epoch);
}
$model->fit(
examples/cudnn_lstm_bucketing.pl view on Meta::CPAN
}
my ($arg_params, $aux_params);
if($load_epoch)
{
(undef, $arg_params, $aux_params) = mx->rnn->load_rnn_checkpoint(
$stack, $model_prefix, $load_epoch);
}
my $model = mx->mod->BucketingModule(
sym_gen => $sym_gen,
default_bucket_key => $data_val->default_bucket_key,
context => $contexts
);
$model->bind(
data_shapes => $data_val->provide_data,
label_shapes => $data_val->provide_label,
for_training => 0,
force_rebind => 0
);
$model->set_params($arg_params, $aux_params);
my $score = $model->score($data_val,
examples/lstm_bucketing.pl view on Meta::CPAN
'chkp-epoch=i' => \(my $chkp_epoch = 0 ),
'help' => sub { HelpMessage(0) },
) or HelpMessage(1);
=head1 NAME
lstm_bucketing.pl - Example of training LSTM RNN on Penn Tree Bank data using high level RNN interface
=head1 SYNOPSIS
--num-layers number of stacked RNN layers, default=2
--num-hidden hidden layer size, default=200
--num-embed embedding layer size, default=200
--gpus list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu.
Increase batch size when using multiple gpus for best performance.
--kv-store key-value store type, default='device'
--num-epochs max num of epochs, default=25
--lr initial learning rate, default=0.01
--optimizer the optimizer type, default='sgd'
--mom momentum for sgd, default=0.0
--wd weight decay for sgd, default=0.00001
--batch-size the batch size type, default=32
--disp-batches show progress for every n batches, default=50
--chkp-prefix prefix for checkpoint files, default='lstm_'
--chkp-epoch save checkpoint after this many epoch, default=0 (saving checkpoints is disabled)
=cut
func tokenize_text($fname, :$vocab=, :$invalid_label=-1, :$start_label=0)
{
open(F, $fname) or die "Can't open $fname: $!";
my @lines = map { my $l = [split(/ /)]; shift(@$l); $l } (<F>);
my $sentences;
($sentences, $vocab) = mx->rnn->encode_sentences(
\@lines,
vocab => $vocab,
examples/lstm_bucketing.pl view on Meta::CPAN
{
$contexts = [map { mx->gpu($_) } split(/,/, $gpus)];
}
else
{
$contexts = mx->cpu(0);
}
my $model = mx->mod->BucketingModule(
sym_gen => $sym_gen,
default_bucket_key => $data_train->default_bucket_key,
context => $contexts
);
$model->fit(
$data_train,
eval_data => $data_val,
eval_metric => mx->metric->Perplexity($invalid_label),
kvstore => $kv_store,
optimizer => $optimizer,
optimizer_params => {
lib/AI/MXNet/Callback.pm view on Meta::CPAN
Calculate and log training speed periodically.
Parameters
----------
batch_size: int
batch_size of data
frequent: int
How many batches between calculations.
Defaults to calculating & logging every 50 batches.
auto_reset: Bool
Reset the metric after each log, defaults to true.
=cut
has 'batch_size' => (is => 'ro', isa => 'Int', required => 1);
has 'frequent' => (is => 'ro', isa => 'Int', default => 50);
has 'init' => (is => 'rw', isa => 'Int', default => 0);
has 'tic' => (is => 'rw', isa => 'Num', default => 0);
has 'last_count' => (is => 'rw', isa => 'Int', default => 0);
has 'auto_reset' => (is => 'ro', isa => 'Bool', default => 1);
method call(AI::MXNet::BatchEndParam $param)
{
my $count = $param->nbatch;
if($self->last_count > $count)
{
$self->init(0);
}
$self->last_count($count);
lib/AI/MXNet/Callback.pm view on Meta::CPAN
AI::MXNet::ProgressBar - A callback to show a progress bar.
=head1 DESCRIPTION
Shows a progress bar.
Parameters
----------
total: Int
batch size, default is 1
length: Int
the length of the progress bar, default is 80 chars
=cut
has 'length' => (is => 'ro', isa => 'Int', default => 80);
has 'total' => (is => 'ro', isa => 'Int', required => 1);
method call(AI::MXNet::BatchEndParam $param)
{
my $count = $param->nbatch;
my $filled_len = int(0.5 + $self->length * $count / $self->total);
my $percents = int(100.0 * $count / $self->total) + 1;
my $prog_bar = ('=' x $filled_len) . ('-' x ($self->length - $filled_len));
print "[$prog_bar] $percents%\r";
}
lib/AI/MXNet/Context.pm view on Meta::CPAN
device_id => $_[0]->device_id
) if @_ == 1 and blessed $_[0];
return $class->$orig(device_type => $_[0], device_id => $_[0])
if @_ == 2 and $_[0] =~ /^(?:cpu|gpu|cpu_pinned)$/;
return $class->$orig(@_);
};
has 'device_type' => (
is => 'rw',
isa => enum([qw[cpu gpu cpu_pinned]]),
default => 'cpu'
);
has 'device_type_id' => (
is => 'rw',
isa => enum([1, 2, 3]),
default => sub { devstr2type->{ shift->device_type } },
lazy => 1
);
has 'device_id' => (
is => 'rw',
isa => 'Int',
default => 0
);
use overload
'==' => sub {
my ($self, $other) = @_;
return 0 unless blessed($other) and $other->isa(__PACKAGE__);
return "$self" eq "$other";
},
'""' => sub {
my ($self) = @_;
lib/AI/MXNet/Context.pm view on Meta::CPAN
=head2
Constructing a context.
Parameters
----------
device_type : {'cpu', 'gpu'} or Context.
String representing the device type
device_id : int (default=0)
The device id of the device, needed for GPU
=cut
=head2 cpu
Returns a CPU context.
Parameters
----------
device_id : int, optional
lib/AI/MXNet/Context.pm view on Meta::CPAN
{
return $self->new(device_type => 'gpu', device_id => $device_id);
}
=head2 current_context
Returns the current context.
Returns
-------
$default_ctx : AI::MXNet::Context
=cut
method current_ctx()
{
return $AI::MXNet::current_ctx;
}
method deepcopy()
{
return __PACKAGE__->new(
lib/AI/MXNet/Contrib/AutoGrad.pm view on Meta::CPAN
}
=head2 backward
Compute the gradients of outputs w.r.t variables.
Parameters
----------
outputs: array ref of NDArray
out_grads: array ref of NDArray or undef
retain_graph: bool, defaults to false
=cut
method backward(
ArrayRef[AI::MXNet::NDArray] $outputs,
Maybe[ArrayRef[AI::MXNet::NDArray|Undef]] $out_grads=,
Bool $retain_graph=0
)
{
my @output_handles = map { $_->handle } @{ $outputs };
lib/AI/MXNet/Executor/Group.pm view on Meta::CPAN
## TODO
## this class is here because of https://github.com/gfx/p5-Mouse/pull/67
## once 2.4.7 version of Mouse in Ubuntu for affected Perl version
## these accessors should be merged into main class
package AI::MXNet::DataParallelExecutorGroup::_private;
use Mouse;
has [qw/output_layouts label_layouts arg_names aux_names
batch_size slices execs data_arrays
label_arrays param_arrays grad_arrays aux_arrays
data_layouts shared_data_arrays input_grad_arrays
_default_execs state_arrays/
] => (is => 'rw', init_arg => undef);
package AI::MXNet::DataParallelExecutorGroup;
use Mouse;
use AI::MXNet::Base;
use List::Util qw(sum);
=head1 DESCRIPTION
DataParallelExecutorGroup is a group of executors that lives on a group of devices.
lib/AI/MXNet/Executor/Group.pm view on Meta::CPAN
group corresponding to a different bucket. In other words, it will correspond to a different
symbol with the same set of parameters (e.g. unrolled RNNs with different lengths).
In this case the memory regions of the parameters will be shared.
logger : Logger
Default is AI::MXNet::Logging->get_logger.
fixed_param_names: Maybe[ArrayRef[Str]]
Indicate parameters to be fixed during training. Parameters in this array ref will not allocate
space for gradient, nor do gradient calculation.
grad_req : ArrayRef[GradReq]|HashRef[GradReq]|GradReq
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(default to 'write').
Can be specified globally (str) or for each argument (array ref, hash ref).
state_names: Maybe[ArrayRef[Str]]
=cut
has 'symbol' => (is => 'ro', isa => 'AI::MXNet::Symbol', required => 1);
has 'contexts' => (is => 'ro', isa => 'ArrayRef[AI::MXNet::Context]', required => 1);
has 'workload' => (is => 'ro', isa => 'ArrayRef[Num]', default => sub { [] });
has 'data_shapes' => (is => 'rw', isa => 'ArrayRef[NameShape|AI::MXNet::DataDesc]', required => 1);
has 'label_shapes' => (is => 'rw', isa => 'Maybe[ArrayRef[NameShape|AI::MXNet::DataDesc]]');
has 'param_names' => (is => 'ro', isa => 'ArrayRef[Str]', required => 1);
has 'for_training' => (is => 'ro', isa => 'Bool', required => 1);
has 'inputs_need_grad' => (is => 'ro', isa => 'Bool', default => 0);
has 'shared_group' => (is => 'ro', isa => 'Maybe[AI::MXNet::DataParallelExecutorGroup]');
has 'logger' => (is => 'ro', default => sub { AI::MXNet::Logging->get_logger });
has 'fixed_param_names' => (is => 'rw', isa => 'Maybe[ArrayRef[Str]]');
has 'state_names' => (is => 'rw', isa => 'Maybe[ArrayRef[Str]]');
has 'grad_req' => (is => 'rw', isa => 'ArrayRef[GradReq]|HashRef[GradReq]|GradReq', default=>'write');
has '_p' => (is => 'rw', init_arg => undef);
sub BUILD
{
my $self = shift;
my $p = AI::MXNet::DataParallelExecutorGroup::_private->new;
$p->arg_names($self->symbol->list_arguments);
$p->aux_names($self->symbol->list_auxiliary_states);
$p->execs([]);
$self->_p($p);
$self->grad_req('null') if not $self->for_training;
lib/AI/MXNet/Executor/Group.pm view on Meta::CPAN
{
my $data_shapes_i = $self->_sliced_shape($data_shapes, $i, $self->_p->data_layouts);
my $label_shapes_i = [];
if(defined $label_shapes)
{
$label_shapes_i = $self->_sliced_shape($label_shapes, $i, $self->_p->label_layouts);
}
if($reshape)
{
my %combined_hash = map { $_->name => $_->shape } (@{ $data_shapes_i }, @{ $label_shapes_i });
$self->_p->execs->[$i] = $self->_p->_default_execs->[$i]->reshape(
\%combined_hash,
allow_up_sizing => 1,
);
}
else
{
push @{ $self->_p->execs }, $self->_bind_ith_exec($i, $data_shapes_i, $label_shapes_i, $shared_group);
}
}
$self->data_shapes($data_shapes);
lib/AI/MXNet/Executor/Group.pm view on Meta::CPAN
$label_shapes : Maybe[ArrayRef[AI::MXNet::DataDesc]]
=cut
method reshape(
ArrayRef[AI::MXNet::DataDesc] $data_shapes,
Maybe[ArrayRef[AI::MXNet::DataDesc]] $label_shapes=
)
{
return if($data_shapes eq $self->data_shapes and $label_shapes eq $self->label_shapes);
if (not defined $self->_p->_default_execs)
{
$self->_p->_default_execs([@{ $self->_p->execs }]);
}
$self->bind_exec($data_shapes, $label_shapes, undef, 1);
}
=head2 set_params
Assign, i.e. copy parameters to all the executors.
Parameters
----------
lib/AI/MXNet/Function/Parameters.pm view on Meta::CPAN
package AI::MXNet::Function::Parameters;
use strict;
use warnings;
use Function::Parameters ();
use AI::MXNet::Types ();
sub import {
Function::Parameters->import(
{
func => {
defaults => 'function_strict',
runtime => 1,
reify_type => sub {
Mouse::Util::TypeConstraints::find_or_create_isa_type_constraint($_[0])
}
},
method => {
defaults => 'method_strict',
runtime => 1,
reify_type => sub {
Mouse::Util::TypeConstraints::find_or_create_isa_type_constraint($_[0])
}
},
}
);
}
{
lib/AI/MXNet/IO.pm view on Meta::CPAN
=head1 NAME
AI::MXNet::IO - NDArray interface of mxnet.
=cut
# Convert data into canonical form.
method init_data(
AcceptableInput|HashRef[AcceptableInput]|ArrayRef[AcceptableInput]|Undef $data,
Undef|Int :$allow_empty=,
Str :$default_name
)
{
Carp::confess("data must be defined or allow_empty set to true value")
if(not defined $data and not $allow_empty);
$data //= [];
if(ref($data) and ref($data) ne 'ARRAY' and ref($data) ne 'HASH')
{
$data = [$data];
}
Carp::confess("data must not be empty or allow_empty set to true value")
if(ref($data) eq 'ARRAY' and not @{ $data } and not $allow_empty);
my @ret;
if(ref($data) eq 'ARRAY')
{
if(@{ $data } == 1)
{
@ret = ([$default_name, $data->[0]]);
}
else
{
my $i = -1;
@ret = map { $i++; ["_${i}_$default_name", $_] } @{ $data };
}
}
if(ref($data) eq 'HASH')
{
while(my ($k, $v) = each %{ $data })
{
push @ret, [$k, $v];
}
}
for my $d (@ret)
lib/AI/MXNet/IO.pm view on Meta::CPAN
method DataDesc(@args) { AI::MXNet::DataDesc->new(@args) }
method DataBatch(@args) { AI::MXNet::DataBatch->new(@args) }
package AI::MXNet::DataDesc;
use Mouse;
use overload '""' => \&stringify,
'@{}' => \&to_nameshape;
has 'name' => (is => 'ro', isa => "Str", required => 1);
has 'shape' => (is => 'ro', isa => "Shape", required => 1);
has 'dtype' => (is => 'ro', isa => "Dtype", default => 'float32');
has 'layout' => (is => 'ro', isa => "Str", default => 'NCHW');
around BUILDARGS => sub {
my $orig = shift;
my $class = shift;
if(@_ >= 2 and ref $_[1] eq 'ARRAY')
{
my $name = shift;
my $shape = shift;
return $class->$orig(name => $name, shape => $shape, @_);
}
lib/AI/MXNet/IO.pm view on Meta::CPAN
package AI::MXNet::DataIter;
use Mouse;
use overload '<>' => sub { shift->next },
'@{}' => sub { shift->list };
=head1 NAME
AI::MXNet::DataIter - A parent class for MXNet data iterators.
=cut
has 'batch_size' => (is => 'rw', isa => 'Int', default => 0);
=head2 reset
Reset the iterator.
=cut
method reset(){}
=head2 list
lib/AI/MXNet/IO.pm view on Meta::CPAN
Parameters
----------
data_iter : DataIter
Internal data iterator.
size : number of batches per epoch to resize to.
reset_internal : whether to reset internal iterator on ResizeIter.reset
=cut
has 'data_iter' => (is => 'ro', isa => 'AI::MXnet::DataIter', required => 1);
has 'size' => (is => 'ro', isa => 'Int', required => 1);
has 'reset_internal' => (is => 'rw', isa => 'Int', default => 1);
has 'cur' => (is => 'rw', isa => 'Int', default => 0);
has 'current_batch' => (is => 'rw', isa => 'Maybe[AI::MXNet::DataBatch]');
has [qw/provide_data
default_bucket_key
provide_label
batch_size/] => (is => 'rw', init_arg => undef);
sub BUILD
{
my $self = shift;
$self->provide_data($self->data_iter->provide_data);
$self->provide_label($self->data_iter->provide_label);
$self->batch_size($self->data_iter->batch_size);
if($self->data_iter->can('default_bucket_key'))
{
$self->default_bucket_key($self->data_iter->default_bucket_key);
}
}
method reset()
{
$self->cur(0);
if($self->reset_internal)
{
$self->data_iter->reset;
}
lib/AI/MXNet/IO.pm view on Meta::CPAN
Note
----
This iterator will pad, discard or roll over the last batch if
the size of data does not match batch_size. Roll over is intended
for training and can cause problems if used for prediction.
=cut
has 'data' => (is => 'rw', isa => 'Maybe[AcceptableInput|HashRef[AcceptableInput]|ArrayRef[AcceptableInput]]');
has 'data_list' => (is => 'rw', isa => 'ArrayRef[AI::MXNet::NDArray]');
has 'label' => (is => 'rw', isa => 'Maybe[AcceptableInput|HashRef[AcceptableInput]|ArrayRef[AcceptableInput]]');
has 'batch_size' => (is => 'rw', isa => 'Int', default => 1);
has '_shuffle' => (is => 'rw', init_arg => 'shuffle', isa => 'Bool', default => 0);
has 'last_batch_handle' => (is => 'rw', isa => 'Str', default => 'pad');
has 'label_name' => (is => 'rw', isa => 'Str', default => 'softmax_label');
has 'num_source' => (is => 'rw', isa => 'Int');
has 'cursor' => (is => 'rw', isa => 'Int');
has 'num_data' => (is => 'rw', isa => 'Int');
around BUILDARGS => sub {
my $orig = shift;
my $class = shift;
if(@_%2)
{
my $data = shift;
return $class->$orig(data => $data, @_);
}
return $class->$orig(@_);
};
sub BUILD
{
my $self = shift;
my $data = AI::MXNet::IO->init_data($self->data, allow_empty => 0, default_name => 'data');
my $label = AI::MXNet::IO->init_data($self->label, allow_empty => 1, default_name => $self->label_name);
my $num_data = $data->[0][1]->shape->[0];
confess("size of data dimension 0 $num_data < batch_size ${\ $self->batch_size }")
unless($num_data >= $self->batch_size);
if($self->_shuffle)
{
my @idx = shuffle(0..$num_data-1);
$_->[1] = AI::MXNet::NDArray->array(pdl_shuffle($_->[1]->aspdl, \@idx)) for @$data;
$_->[1] = AI::MXNet::NDArray->array(pdl_shuffle($_->[1]->aspdl, \@idx)) for @$label;
}
if($self->last_batch_handle eq 'discard')
lib/AI/MXNet/IO.pm view on Meta::CPAN
use AI::MXNet::Base;
extends 'AI::MXNet::DataIter';
=head1 NAME
AI::MXNet::MXDataIter - A data iterator pre-built in C++ layer of MXNet.
=cut
has 'handle' => (is => 'ro', isa => 'DataIterHandle', required => 1);
has '_debug_skip_load' => (is => 'rw', isa => 'Int', default => 0);
has '_debug_at_begin' => (is => 'rw', isa => 'Int', default => 0);
has 'data_name' => (is => 'ro', isa => 'Str', default => 'data');
has 'label_name' => (is => 'ro', isa => 'Str', default => 'softmax_label');
has [qw/first_batch
provide_data
provide_label
batch_size/] => (is => 'rw', init_arg => undef);
sub BUILD
{
my $self = shift;
$self->first_batch($self->next);
my $data = $self->first_batch->data->[0];
lib/AI/MXNet/Image.pm view on Meta::CPAN
Decode an image from string. Requires OpenCV to work.
Parameters
----------
$buf : str, array ref, pdl, ndarray
Binary image data.
:$flag : int
0 for grayscale. 1 for colored.
:$to_rgb : int
0 for BGR format (OpenCV default). 1 for RGB format (MXNet default).
:$out : NDArray
Output buffer. Do not specify for automatic allocation.
=cut
method imdecode(Str|PDL $buf, Int :$flag=1, Int :$to_rgb=1, Maybe[AI::MXNet::NDArray] :$out=)
{
if(not ref $buf)
{
my $pdl_type = PDL::Type->new(DTYPE_MX_TO_PDL->{'uint8'});
my $len; { use bytes; $len = length $buf; }
lib/AI/MXNet/Image.pm view on Meta::CPAN
Partition index
num_parts : int
Total number of partitions.
data_name='data' Str
label_name='softmax_label' Str
kwargs : hash ref with any additional arguments for augmenters
=cut
has 'batch_size' => (is => 'ro', isa => 'Int', required => 1);
has 'data_shape' => (is => 'ro', isa => 'Shape', required => 1);
has 'label_width' => (is => 'ro', isa => 'Int', default => 1);
has 'data_name' => (is => 'ro', isa => 'Str', default => 'data');
has 'label_name' => (is => 'ro', isa => 'Str', default => 'softmax_label');
has [qw/path_imgrec
path_imglist
path_root
path_imgidx
/] => (is => 'ro', isa => 'Str');
has 'shuffle' => (is => 'ro', isa => 'Bool', default => 0);
has 'part_index' => (is => 'ro', isa => 'Int', default => 0);
has 'num_parts' => (is => 'ro', isa => 'Int', default => 0);
has 'aug_list' => (is => 'rw', isa => 'ArrayRef[CodeRef]');
has 'imglist' => (is => 'rw', isa => 'ArrayRef|HashRef');
has 'kwargs' => (is => 'ro', isa => 'HashRef');
has [qw/imgidx
imgrec
seq
cur
provide_data
provide_label
/] => (is => 'rw', init_arg => undef);
lib/AI/MXNet/Initializer.pm view on Meta::CPAN
=head2 new
Parameters
---------
name : str
name of variable
attrs : hash ref of str to str
attributes of this variable taken from AI::MXNet::Symbol->attr_dict
=cut
has 'name' => (is => 'ro', isa => 'Str', required => 1);
has 'attrs' => (is => 'rw', isa => 'HashRef[Str]', lazy => 1, default => sub { +{} });
use overload '""' => sub { shift->name };
around BUILDARGS => sub {
my $orig = shift;
my $class = shift;
return $class->$orig(name => $_[0]) if @_ == 1;
return $class->$orig(@_);
};
# Base class for Initializers
package AI::MXNet::Initializer;
lib/AI/MXNet/Initializer.pm view on Meta::CPAN
'""' => sub {
my $self = shift;
my ($name) = ref($self) =~ /::(\w+)$/;
encode_json(
[lc $name,
$self->kwargs//{ map { $_ => "".$self->$_ } $self->meta->get_attribute_list }
]);
},
fallback => 1;
has 'kwargs' => (is => 'rw', init_arg => undef, isa => 'HashRef');
has '_verbose' => (is => 'rw', isa => 'Bool', lazy => 1, default => 0);
has '_print_func' => (is => 'rw', isa => 'CodeRef', lazy => 1,
default => sub {
return sub {
my $x = shift;
return ($x->norm/sqrt($x->size))->asscalar;
};
}
);
=head1 NAME
AI::MXNet::Initializer - Base class for all Initializers
lib/AI/MXNet/Initializer.pm view on Meta::CPAN
{
$desc = "$desc";
if($desc =~ /(weight|bias|gamma|beta)$/)
{
my $method = "_init_$1";
$self->$method($desc, $arr);
$self->_verbose_print($desc, $1, $arr);
}
else
{
$self->_init_default($desc, $arr)
}
}
}
method _legacy_init(Str $name, AI::MXNet::NDArray $arr)
{
warnings::warnif(
'deprecated',
'Calling initializer with init($str, $NDArray) has been deprecated.'.
lib/AI/MXNet/Initializer.pm view on Meta::CPAN
elsif($name =~ /moving_inv_var$/)
{
$self->_init_zero($name, $arr);
}
elsif($name =~ /moving_avg$/)
{
$self->_init_zero($name, $arr);
}
else
{
$self->_init_default($name, $arr);
}
}
*slice = *call;
method _init_bilinear($name, $arr)
{
my $pdl_type = PDL::Type->new(DTYPE_MX_TO_PDL->{ 'float32' });
my $weight = pzeros(
PDL::Type->new(DTYPE_MX_TO_PDL->{ 'float32' }),
lib/AI/MXNet/Initializer.pm view on Meta::CPAN
method _init_beta($name, $arr)
{
$arr .= 0;
}
method _init_weight($name, $arr)
{
confess("Virtual method, subclass must override it");
}
method _init_default($name, $arr)
{
confess(
"Unknown initialization pattern for $name. "
.'Default initialization is now limited to '
.'"weight", "bias", "gamma" (1.0), and "beta" (0.0).'
.'Please use mx.sym.Variable(init=mx.init.*) to set initialization pattern'
);
}
=head1 NAME
AI::MXNet::Load - Initialize by loading a pretrained param from a hash ref.
=cut
=head2 new
Parameters
----------
param: HashRef[AI::MXNet::NDArray]
default_init: Initializer
default initializer when a name is not found in the param hash ref.
verbose: bool
log the names when initializing.
=cut
package AI::MXNet::Load;
use Mouse;
extends 'AI::MXNet::Initializer';
has 'param' => (is => "rw", isa => 'HashRef[AI::MXNet::NDArray]', required => 1);
has 'default_init' => (is => "rw", isa => "AI::MXNet::Initializer");
has 'verbose' => (is => "rw", isa => "Int", default => 0);
sub BUILD
{
my $self = shift;
my $param = AI::MXNet::NDArray->load($self->param) unless ref $self->param;
my %self_param;
while(my ($name, $arr) = each %{ $self->param })
{
$name =~ s/^(?:arg|aux)://;
$self_param{ $name } = $arr;
lib/AI/MXNet/Initializer.pm view on Meta::CPAN
"Parameter $name cannot be initialized from loading. "
."Shape mismatch, target $target_shape vs loaded $param_shape"
) unless $target_shape eq $param_shape;
$arr .= $self->param->{ $name };
AI::MXNet::Log->info("Initialized $name by loading") if $self->verbose;
}
else
{
confess(
"Cannot Initialize $name. Not found in loaded param "
."and no default Initializer is provided."
) unless defined $self->default_init;
$self->default_init($name, $arr);
AI::MXNet::Log->info("Initialized $name by default") if $self->verbose;
}
}
*slice = *call;
=head1 NAME
AI::MXNet::Mixed - A container for multiple initializer patterns.
=cut
lib/AI/MXNet/Initializer.pm view on Meta::CPAN
for my $pattern (keys %{ $self->map })
{
if($name =~ /$pattern/)
{
&{$self->map->{$pattern}}($name, $arr);
return;
}
}
confess(
"Parameter name $name did not match any pattern. Consider"
."add a \".*\" pattern at the and with default Initializer."
);
}
package AI::MXNet::Zero;
use Mouse;
extends 'AI::MXNet::Initializer';
method _init_weight(Str $name, AI::MXNet::NDArray $arr)
{
$arr .= 0;
}
lib/AI/MXNet/Initializer.pm view on Meta::CPAN
Parameters
----------
scale : float, optional
The scale of the uniform distribution.
=cut
package AI::MXNet::Uniform;
use Mouse;
extends 'AI::MXNet::Initializer';
has "scale" => (is => "ro", isa => "Num", default => 0.7);
around BUILDARGS => sub {
my $orig = shift;
my $class = shift;
return $class->$orig(scale => $_[0]) if @_ == 1;
return $class->$orig(@_);
};
method _init_weight(Str $name, AI::MXNet::NDArray $arr)
{
AI::MXNet::Random->uniform(-$self->scale, $self->scale, { out => $arr });
lib/AI/MXNet/Initializer.pm view on Meta::CPAN
Parameters
----------
sigma : float, optional
Standard deviation for the gaussian distribution.
=cut
package AI::MXNet::Normal;
use Mouse;
extends 'AI::MXNet::Initializer';
has "sigma" => (is => "ro", isa => "Num", default => 0.01);
around BUILDARGS => sub {
my $orig = shift;
my $class = shift;
return $class->$orig(sigma => $_[0]) if @_ == 1;
return $class->$orig(@_);
};
method _init_weight(Str $name, AI::MXNet::NDArray $arr)
{
AI::MXNet::Random->normal(0, $self->sigma, { out => $arr });
lib/AI/MXNet/Initializer.pm view on Meta::CPAN
---------
Exact solutions to the nonlinear dynamics of learning in deep linear neural networks
arXiv preprint arXiv:1312.6120 (2013).
=cut
package AI::MXNet::Orthogonal;
use AI::MXNet::Base;
use Mouse;
use AI::MXNet::Types;
extends 'AI::MXNet::Initializer';
has "scale" => (is => "ro", isa => "Num", default => 1.414);
has "rand_type" => (is => "ro", isa => enum([qw/uniform normal/]), default => 'uniform');
method _init_weight(Str $name, AI::MXNet::NDArray $arr)
{
my @shape = @{ $arr->shape };
my $nout = $shape[0];
my $nin = AI::MXNet::NDArray->size([@shape[1..$#shape]]);
my $tmp = AI::MXNet::NDArray->zeros([$nout, $nin]);
if($self->rand_type eq 'uniform')
{
AI::MXNet::Random->uniform(-1, 1, { out => $tmp });
lib/AI/MXNet/Initializer.pm view on Meta::CPAN
factor_type: str, optional
Use avg, in, or out.
magnitude: float, optional
The scale of the random number range.
=cut
package AI::MXNet::Xavier;
use Mouse;
use AI::MXNet::Types;
extends 'AI::MXNet::Initializer';
has "magnitude" => (is => "rw", isa => "Num", default => 3);
has "rnd_type" => (is => "ro", isa => enum([qw/uniform gaussian/]), default => 'uniform');
has "factor_type" => (is => "ro", isa => enum([qw/avg in out/]), default => 'avg');
method _init_weight(Str $name, AI::MXNet::NDArray $arr)
{
my @shape = @{ $arr->shape };
my $hw_scale = 1;
if(@shape > 2)
{
$hw_scale = AI::MXNet::NDArray->size([@shape[2..$#shape]]);
}
my ($fan_in, $fan_out) = ($shape[1] * $hw_scale, $shape[0] * $hw_scale);
lib/AI/MXNet/Initializer.pm view on Meta::CPAN
factor_type: str, optional
Use avg, in, or out.
slope: float, optional
initial slope of any PReLU (or similar) nonlinearities.
=cut
package AI::MXNet::MSRAPrelu;
use Mouse;
extends 'AI::MXNet::Xavier';
has '+rnd_type' => (default => "gaussian");
has '+factor_type' => (default => "avg");
has 'slope' => (is => 'ro', isa => 'Num', default => 0.25);
sub BUILD
{
my $self = shift;
my $magnitude = 2 / (1 + $self->slope ** 2);
$self->magnitude($magnitude);
$self->kwargs({ slope => $self->slope, factor_type => $self->factor_type });
}
__PACKAGE__->register;
lib/AI/MXNet/Initializer.pm view on Meta::CPAN
FusedRNNCell constructor.
num_hidden : int
num_layers : int
mode : str
bidirectional : bool
forget_bias : float
=cut
has 'init' => (is => 'rw', isa => 'Str|AI::MXNet::Initializer', required => 1);
has 'forget_bias' => (is => 'ro', isa => 'Num', default => 1);
has [qw/num_hidden
num_layers/] => (is => 'ro', isa => 'Int', required => 1);
has 'mode' => (is => 'ro', isa => 'Str', required => 1);
has 'bidirectional' => (is => 'ro', isa => 'Bool', default => 0);
sub BUILD
{
my $self = shift;
if(not blessed $self->init)
{
my ($klass, $kwargs);
eval {
($klass, $kwargs) = @{ decode_json($self->init) };
};
lib/AI/MXNet/KVStoreServer.pm view on Meta::CPAN
=head2 new
Initialize a new KVStoreServer.
Parameters
----------
kvstore : KVStore
=cut
has 'kvstore' => (is => 'ro', isa => 'AI::MXNet::KVStore', required => 1);
has 'handle' => (is => 'ro', isa => 'KVStoreHandle', default => sub { shift->kvstore->handle }, lazy => 1);
has 'init_logging' => (is => 'rw', isa => 'Int', default => 0);
# return the server controller
method _controller()
{
return sub {
my ($cmd_id, $cmd_body) = @_;
if (not $self->init_logging)
{
## TODO write logging
lib/AI/MXNet/LRScheduler.pm view on Meta::CPAN
=cut
=head1 DESCRIPTION
Learning rate scheduler, which adaptively changes the learning rate based on the
progress.
=cut
=head2 new
base_lr : float (optional, default 0.01)
the initial learning rate
=cut
has 'base_lr' => (is => 'rw', isa => 'Num', default => 0.01);
=head2 call
Call to schedule current learning rate
The training progress is presented by num_update, which can be roughly
viewed as the number of minibatches executed so far. Its value is
non-decreasing, and increases at most by one.
The exact value is the upper bound of the number of updates applied to
lib/AI/MXNet/LRScheduler.pm view on Meta::CPAN
----------
step: int
schedule the learning rate update after n updates
factor: float
the factor by which to reduce the learning rate.
=cut
use Mouse;
extends 'AI::MXNet::LRScheduler';
has 'step' => (is => 'ro', isa => 'Int', required => 1);
has 'factor' => (is => 'ro', isa => 'Num', default => 1);
has 'count' => (is => 'rw', isa => 'Int', default => 1);
has 'stop_factor_lr' => (is => 'ro', isa => 'Num', default => 1e-8);
sub BUILD
{
my $self = shift;
confess("Schedule step must be greater or equal than 1")
if $self->step < 1;
confess("Factor must be no more than 1 to make lr reduce")
if $self->factor > 1;
}
lib/AI/MXNet/LRScheduler.pm view on Meta::CPAN
----------
step: array ref of int
schedule learning rate after n updates
factor: float
the factor for reducing the learning rate
=cut
use Mouse;
extends 'AI::MXNet::LRScheduler';
has 'step' => (is => 'ro', isa => 'ArrayRef[Int]', required => 1);
has 'factor' => (is => 'ro', isa => 'Num', default => 1);
has 'cur_step_ind' => (is => 'rw', isa => 'Int', default => 0);
has 'count' => (is => 'rw', isa => 'Int', default => 0);
sub BUILD
{
my $self = shift;
confess("step array must have at least one member")
unless @{ $self->step } >=1 ;
for (my $i = 0; $i < @{ $self->step }; $i++)
{
confess("Schedule step must be an increasing integer list")
if($i and $self->step->[$i] <= $self->step->[$i-1]);
lib/AI/MXNet/Metric.pm view on Meta::CPAN
$value = [$value] unless ref $value;
my %ret;
@ret{ @$name } = @$value;
return \%ret;
}
package AI::MXNet::CompositeEvalMetric;
use Mouse;
extends 'AI::MXNet::EvalMetric';
has 'metrics' => (is => 'rw', isa => 'ArrayRef[AI::MXNet::EvalMetric]', default => sub { [] });
has '+name' => (default => 'composite');
# Add a child metric.
method add(AI::MXNet::EvalMetric $metric)
{
push @{ $self->metrics }, $metric;
}
# Get a child metric.
method get_metric(int $index)
{
lib/AI/MXNet/Metric.pm view on Meta::CPAN
########################
# CLASSIFICATION METRICS
########################
package AI::MXNet::Accuracy;
use Mouse;
use AI::MXNet::Base;
extends 'AI::MXNet::EvalMetric';
has '+name' => (default => 'accuracy');
method update(ArrayRef[AI::MXNet::NDArray] $labels, ArrayRef[AI::MXNet::NDArray] $preds)
{
AI::MXNet::Metric::check_label_shapes($labels, $preds);
zip(sub {
my ($label, $pred_label) = @_;
if(join(',', @{$pred_label->shape}) ne join(',', @{$label->shape}))
{
$pred_label = AI::MXNet::NDArray->argmax_channel($pred_label);
}
lib/AI/MXNet/Metric.pm view on Meta::CPAN
$self->sum_metric($self->sum_metric + $sum);
$self->num_inst($self->num_inst + $pred_label->size);
}, $labels, $preds);
}
package AI::MXNet::TopKAccuracy;
use Mouse;
use List::Util qw/min/;
use AI::MXNet::Base;
extends 'AI::MXNet::EvalMetric';
has '+name' => (default => 'top_k_accuracy');
has 'top_k' => (is => 'rw', isa => 'int', default => 1);
sub BUILD
{
my $self = shift;
confess("Please use Accuracy if top_k is no more than 1")
unless $self->top_k > 1;
$self->name($self->name . "_" . $self->top_k);
}
method update(ArrayRef[AI::MXNet::NDArray] $labels, ArrayRef[AI::MXNet::NDArray] $preds)
lib/AI/MXNet/Metric.pm view on Meta::CPAN
}
$self->num_inst($self->num_inst + $num_samples);
}, $labels, $preds);
}
# Calculate the F1 score of a binary classification problem.
package AI::MXNet::F1;
use Mouse;
use AI::MXNet::Base;
extends 'AI::MXNet::EvalMetric';
has '+name' => (default => 'f1');
method update(ArrayRef[AI::MXNet::NDArray] $labels, ArrayRef[AI::MXNet::NDArray] $preds)
{
AI::MXNet::Metric::check_label_shapes($labels, $preds);
zip(sub {
my ($label, $pred_label) = @_;
AI::MXNet::Metric::check_label_shapes($label, $pred_label);
$pred_label = $pred_label->aspdl->maximum_ind;
$label = $label->astype('int32')->aspdl;
confess("F1 currently only supports binary classification.")
lib/AI/MXNet/Metric.pm view on Meta::CPAN
}
$self->sum_metric($self->sum_metric + $f1_score);
$self->num_inst($self->num_inst + 1);
}, $labels, $preds);
}
package AI::MXNet::Perplexity;
use Mouse;
use AI::MXNet::Base;
extends 'AI::MXNet::EvalMetric';
has '+name' => (default => 'Perplexity');
has 'ignore_label' => (is => 'ro', isa => 'Maybe[Int]');
has 'axis' => (is => 'ro', isa => 'Int', default => -1);
around BUILDARGS => sub {
my $orig = shift;
my $class = shift;
return $class->$orig(ignore_label => $_[0]) if @_ == 1;
return $class->$orig(@_);
};
=head1 NAME
AI::MXNet::Perplexity
lib/AI/MXNet/Metric.pm view on Meta::CPAN
=head1 DESCRIPTION
Calculate perplexity.
Parameters
----------
ignore_label : int or undef
index of invalid label to ignore when
counting. usually should be -1. Include
all entries if undef.
axis : int (default -1)
The axis from prediction that was used to
compute softmax. By default uses the last
axis.
=cut
method update(ArrayRef[AI::MXNet::NDArray] $labels, ArrayRef[AI::MXNet::NDArray] $preds)
{
AI::MXNet::Metric::check_label_shapes($labels, $preds);
my ($loss, $num) = (0, 0);
zip(sub {
my ($label, $pred) = @_;
my $label_shape = $label->shape;
lib/AI/MXNet/Metric.pm view on Meta::CPAN
####################
# REGRESSION METRICS
####################
# Calculate Mean Absolute Error loss
package AI::MXNet::MAE;
use Mouse;
use AI::MXNet::Base;
extends 'AI::MXNet::EvalMetric';
has '+name' => (default => 'mae');
method update(ArrayRef[AI::MXNet::NDArray] $labels, ArrayRef[AI::MXNet::NDArray] $preds)
{
AI::MXNet::Metric::check_label_shapes($labels, $preds);
zip(sub {
my ($label, $pred) = @_;
$label = $label->aspdl;
$pred = $pred->aspdl;
if($label->ndims == 1)
{
lib/AI/MXNet/Metric.pm view on Meta::CPAN
$self->sum_metric($self->sum_metric + ($label - $pred)->abs->avg);
$self->num_inst($self->num_inst + 1);
}, $labels, $preds);
}
# Calculate Mean Squared Error loss
package AI::MXNet::MSE;
use Mouse;
use AI::MXNet::Base;
extends 'AI::MXNet::EvalMetric';
has '+name' => (default => 'mse');
method update(ArrayRef[AI::MXNet::NDArray] $labels, ArrayRef[AI::MXNet::NDArray] $preds)
{
AI::MXNet::Metric::check_label_shapes($labels, $preds);
zip(sub {
my ($label, $pred) = @_;
$label = $label->aspdl;
$pred = $pred->aspdl;
if($label->ndims == 1)
{
lib/AI/MXNet/Metric.pm view on Meta::CPAN
$self->sum_metric($self->sum_metric + (($label - $pred)**2)->avg);
$self->num_inst($self->num_inst + 1);
}, $labels, $preds);
}
# Calculate Root Mean Squred Error loss
package AI::MXNet::RMSE;
use Mouse;
use AI::MXNet::Base;
extends 'AI::MXNet::EvalMetric';
has '+name' => (default => 'rmse');
method update(ArrayRef[AI::MXNet::NDArray] $labels, ArrayRef[AI::MXNet::NDArray] $preds)
{
AI::MXNet::Metric::check_label_shapes($labels, $preds);
zip(sub {
my ($label, $pred) = @_;
$label = $label->aspdl;
$pred = $pred->aspdl;
if($label->ndims == 1)
{
lib/AI/MXNet/Metric.pm view on Meta::CPAN
$self->sum_metric($self->sum_metric + sqrt((($label - $pred)**2)->avg));
$self->num_inst($self->num_inst + 1);
}, $labels, $preds);
}
# Calculate Cross Entropy loss
package AI::MXNet::CrossEntropy;
use Mouse;
use AI::MXNet::Base;
extends 'AI::MXNet::EvalMetric';
has '+name' => (default => 'cross-entropy');
has 'eps' => (is => 'ro', isa => 'Num', default => 1e-8);
around BUILDARGS => sub {
my $orig = shift;
my $class = shift;
return $class->$orig(eps => $_[0]) if @_ == 1;
return $class->$orig(@_);
};
method update(ArrayRef[AI::MXNet::NDArray] $labels, ArrayRef[AI::MXNet::NDArray] $preds)
{
AI::MXNet::Metric::check_label_shapes($labels, $preds);
lib/AI/MXNet/Metric.pm view on Meta::CPAN
my $prob = $pred->index($label);
$self->sum_metric($self->sum_metric + (-($prob + $self->eps)->log)->sum);
$self->num_inst($self->num_inst + $label_shape);
}, $labels, $preds);
}
package AI::MXNet::PearsonCorrelation;
use Mouse;
use AI::MXNet::Base;
extends 'AI::MXNet::EvalMetric';
has '+name' => (default => 'pearson-correlation');
=head1 NAME
AI::MXNet::PearsonCorrelation
=cut
=head1 DESCRIPTION
Computes Pearson correlation.
lib/AI/MXNet/Metric.pm view on Meta::CPAN
This is useful in RNN, where the states are also produced
in outputs for forwarding.
=cut
package AI::MXNet::CustomMetric;
use Mouse;
use AI::MXNet::Base;
extends 'AI::MXNet::EvalMetric';
has 'eval_function' => (is => 'ro', isa => 'CodeRef');
has 'allow_extra_outputs' => (is => 'ro', isa => 'Int', default => 0);
method update(ArrayRef[AI::MXNet::NDArray] $labels, ArrayRef[AI::MXNet::NDArray] $preds)
{
AI::MXNet::Metric::check_label_shapes($labels, $preds)
unless $self->allow_extra_outputs;
zip(sub {
my ($label, $pred) = @_;
$label = $label->aspdl;
$pred = $pred->aspdl;
my $value = $self->eval_function->($label, $pred);
lib/AI/MXNet/Module.pm view on Meta::CPAN
=cut
extends 'AI::MXNet::Module::Base';
has '_symbol' => (is => 'ro', init_arg => 'symbol', isa => 'AI::MXNet::Symbol', required => 1);
has '_data_names' => (is => 'ro', init_arg => 'data_names', isa => 'ArrayRef[Str]');
has '_label_names' => (is => 'ro', init_arg => 'label_names', isa => 'Maybe[ArrayRef[Str]]');
has 'work_load_list' => (is => 'rw', isa => 'Maybe[ArrayRef[Int]]');
has 'fixed_param_names' => (is => 'rw', isa => 'Maybe[ArrayRef[Str]]');
has 'state_names' => (is => 'rw', isa => 'Maybe[ArrayRef[Str]]');
has 'logger' => (is => 'ro', default => sub { AI::MXNet::Logging->get_logger });
has '_p' => (is => 'rw', init_arg => undef);
has 'context' => (
is => 'ro',
isa => 'AI::MXNet::Context|ArrayRef[AI::MXNet::Context]',
default => sub { AI::MXNet::Context->cpu }
);
around BUILDARGS => sub {
my $orig = shift;
my $class = shift;
if(@_%2)
{
my $symbol = shift;
return $class->$orig(symbol => $symbol, @_);
}
lib/AI/MXNet/Module/Base.pm view on Meta::CPAN
this value might not be well defined.
When those intermediate-level API are implemented properly, the following
high-level API will be automatically available for a module:
- fit: train the module parameters on a data set
- predict: run prediction on a data set and collect outputs
- score: run prediction on a data set and evaluate performance
=cut
has 'logger' => (is => 'rw', default => sub { AI::MXNet::Logging->get_logger });
has '_symbol' => (is => 'rw', init_arg => 'symbol', isa => 'AI::MXNet::Symbol');
has [
qw/binded for_training inputs_need_grad
params_initialized optimizer_initialized/
] => (is => 'rw', isa => 'Bool', init_arg => undef, default => 0);
################################################################################
# High Level API
################################################################################
=head2 forward_backward
A convenient function that calls both forward and backward.
=cut
lib/AI/MXNet/Module/Base.pm view on Meta::CPAN
:$merge_batches=1 : Bool
Default is 1.
:$reset=1 : Bool
Default is 1, indicating whether we should reset the data iter before start
doing prediction.
:$always_output_list=0 : Bool
Default is 0, see the doc for return values.
Returns
-------
When $merge_batches is 1 (by default), the return value will be an array ref
[$out1, $out2, $out3] where each element is concatenation of the outputs for
all the mini-batches. If $always_output_list` also is 0 (by default),
then in the case of a single output, $out1 is returned in stead of [$out1].
When $merge_batches is 0, the return value will be a nested array ref like
[[$out1_batch1, $out2_batch1], [$out1_batch2], ...]. This mode is useful because
in some cases (e.g. bucketing), the module does not necessarily produce the same
number of outputs.
The objects in the results are AI::MXNet::NDArray`s. If you need to work with pdl array,
just call ->aspdl() on each AI::MXNet::NDArray.
=cut
lib/AI/MXNet/Module/Base.pm view on Meta::CPAN
# evaluation on validation set
if(defined $eval_data)
{
my $res = $self->score(
$eval_data,
$validation_metric,
score_end_callback => $eval_end_callback,
batch_end_callback => $eval_batch_end_callback,
epoch => $epoch
);
#TODO: pull this into default
while(my ($name, $val) = each %{ $res })
{
$self->logger->info('Epoch[%d] Validation-%s=%f', $epoch, $name, $val);
}
}
# end of 1 epoch, reset the data-iter for another epoch
$train_data->reset;
}
}
lib/AI/MXNet/Module/Base.pm view on Meta::CPAN
of modules.
:$force_rebind=0 : Bool
Default is 0. This function does nothing if the executors are already
binded. But with this as 1, the executors will be forced to rebind.
:$shared_module= : A subclass of AI::MXNet::Module::Base
Default is undef. This is used in bucketing. When not undef, the shared module
essentially corresponds to a different bucket -- a module with different symbol
but with the same sets of parameters (e.g. unrolled RNNs with different lengths).
:$grad_req='write' : Str|ArrayRef[Str]|HashRef[Str]
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(defaults to 'write').
Can be specified globally (str) or for each argument (array ref, hash ref).
=cut
method bind(
ArrayRef[AI::MXNet::DataDesc] $data_shapes,
Maybe[ArrayRef[AI::MXNet::DataDesc]] :$label_shapes=,
Bool :$for_training=1,
Bool :$inputs_need_grad=0,
Bool :$force_rebind=0,
Maybe[AI::MXNet::BaseModule] :$shared_module=,
lib/AI/MXNet/Module/Bucketing.pm view on Meta::CPAN
{
$contexts = [map { mx->gpu($_) } split(/,/, $gpus)];
}
else
{
$contexts = mx->cpu(0);
}
my $model = mx->mod->BucketingModule(
sym_gen => $sym_gen,
default_bucket_key => $data_train->default_bucket_key,
context => $contexts
);
$model->fit(
$data_train,
eval_data => $data_val,
eval_metric => mx->metric->Perplexity($invalid_label),
kvstore => $kv_store,
optimizer => $optimizer,
optimizer_params => {
lib/AI/MXNet/Module/Bucketing.pm view on Meta::CPAN
=cut
=head2 new
Parameters
----------
$sym_gen : subref or any perl object that overloads &{} op
A sub when called with a bucket key, returns a list with triple
of ($symbol, $data_names, $label_names).
$default_bucket_key : str or anything else
The key for the default bucket.
$logger : Logger
$context : AI::MXNet::Context or array ref of AI::MXNet::Context objects
Default is cpu(0)
$work_load_list : array ref of Num
Default is undef, indicating uniform workload.
$fixed_param_names: arrayref of str
Default is undef, indicating no network parameters are fixed.
$state_names : arrayref of str
states are similar to data and label, but not provided by data iterator.
Instead they are initialized to 0 and can be set by set_states()
=cut
extends 'AI::MXNet::Module::Base';
has '_sym_gen' => (is => 'ro', init_arg => 'sym_gen', required => 1);
has '_default_bucket_key' => (is => 'rw', init_arg => 'default_bucket_key', required => 1);
has '_context' => (
is => 'ro', isa => 'AI::MXNet::Context|ArrayRef[AI::MXNet::Context]',
lazy => 1, default => sub { AI::MXNet::Context->cpu },
init_arg => 'context'
);
has '_work_load_list' => (is => 'rw', init_arg => 'work_load_list', isa => 'ArrayRef[Num]');
has '_curr_module' => (is => 'rw', init_arg => undef);
has '_curr_bucket_key' => (is => 'rw', init_arg => undef);
has '_buckets' => (is => 'rw', init_arg => undef, default => sub { +{} });
has '_fixed_param_names' => (is => 'rw', isa => 'ArrayRef[Str]', init_arg => 'fixed_param_names');
has '_state_names' => (is => 'rw', isa => 'ArrayRef[Str]', init_arg => 'state_names');
has '_params_dirty' => (is => 'rw', init_arg => undef);
sub BUILD
{
my ($self, $original_params) = @_;
$self->_fixed_param_names([]) unless defined $original_params->{fixed_param_names};
$self->_state_names([]) unless defined $original_params->{state_names};
$self->_params_dirty(0);
my ($symbol, $data_names, $label_names) = &{$self->_sym_gen}($self->_default_bucket_key);
$self->_check_input_names($symbol, $data_names//[], "data", 1);
$self->_check_input_names($symbol, $label_names//[], "label", 0);
$self->_check_input_names($symbol, $self->_state_names, "state", 1);
$self->_check_input_names($symbol, $self->_fixed_param_names, "fixed_param", 1);
}
method _reset_bind()
{
$self->binded(0);
$self->_buckets({});
lib/AI/MXNet/Module/Bucketing.pm view on Meta::CPAN
}
method data_names()
{
if($self->binded)
{
return $self->_curr_module->data_names;
}
else
{
return (&{$self->_sym_gen}($self->_default_bucket_key))[1];
}
}
method output_names()
{
if($self->binded)
{
return $self->_curr_module->ouput_names;
}
else
{
my ($symbol) = &{$self->_sym_gen}($self->_default_bucket_key);
return $symbol->list_ouputs;
}
}
method data_shapes()
{
assert($self->binded);
return $self->_curr_module->data_shapes;
}
lib/AI/MXNet/Module/Bucketing.pm view on Meta::CPAN
method set_states(:$states=, :$value=)
{
assert($self->binded and $self->params_initialized);
$self->_curr_module->set_states(states => $states, value => $value);
}
=head2 bind
Binding for a AI::MXNet::Module::Bucketing means setting up the buckets and bind the
executor for the default bucket key. Executors corresponding to other keys are
binded afterwards with switch_bucket.
Parameters
----------
:$data_shapes : ArrayRef[AI::MXNet::DataDesc|NameShape]
This should correspond to the symbol for the default bucket.
:$label_shapes= : Maybe[ArrayRef[AI::MXNet::DataDesc|NameShape]]
This should correspond to the symbol for the default bucket.
:$for_training : Bool
Default is 1.
:$inputs_need_grad : Bool
Default is 0.
:$force_rebind : Bool
Default is 0.
:$shared_module : AI::MXNet::Module::Bucketing
Default is undef. This value is currently not used.
:$grad_req : str, array ref of str, hash ref of str to str
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(defaults to 'write').
Can be specified globally (str) or for each argument (array ref, hash ref).
:$bucket_key : str
bucket key for binding. by default is to use the ->default_bucket_key
=cut
method bind(
ArrayRef[AI::MXNet::DataDesc|NameShape] :$data_shapes,
Maybe[ArrayRef[AI::MXNet::DataDesc|NameShape]] :$label_shapes=,
Bool :$for_training=1,
Bool :$inputs_need_grad=0,
Bool :$force_rebind=0,
Maybe[AI::MXNet::BaseModule] :$shared_module=,
Str|ArrayRef[Str]|HashRef[Str] :$grad_req='write',
lib/AI/MXNet/Module/Bucketing.pm view on Meta::CPAN
$self->logger->warning('Already binded, ignoring bind()');
return;
}
assert((not defined $shared_module), 'shared_module for BucketingModule is not supported');
$self->for_training($for_training);
$self->inputs_need_grad($inputs_need_grad);
$self->binded(1);
my ($symbol, $data_names, $label_names) = &{$self->_sym_gen}($bucket_key//$self->_default_bucket_key);
my $module = AI::MXNet::Module->new(
symbol => $symbol,
data_names => $data_names,
label_names => $label_names,
logger => $self->logger,
context => $self->_context,
work_load_list => $self->_work_load_list,
state_names => $self->_state_names,
fixed_param_names => $self->_fixed_param_names
);
$module->bind(
data_shapes => $data_shapes,
label_shapes => $label_shapes,
for_training => $for_training,
inputs_need_grad => $inputs_need_grad,
force_rebind => 0,
shared_module => undef,
grad_req => $grad_req
);
$self->_curr_module($module);
$self->_curr_bucket_key($self->_default_bucket_key);
$self->_buckets->{ $self->_default_bucket_key } = $module;
# copy back saved params, if already initialized
if($self->params_initialized)
{
$self->set_params($arg_params, $aux_params);
}
}
=head2 switch_bucket
lib/AI/MXNet/Module/Bucketing.pm view on Meta::CPAN
logger => $self->logger,
context => $self->_context,
work_load_list => $self->_work_load_list
);
$module->bind(
data_shapes => $data_shapes,
label_shapes => $label_shapes,
for_training => $self->_curr_module->for_training,
inputs_need_grad => $self->_curr_module->inputs_need_grad,
force_rebind => 0,
shared_module => $self->_buckets->{ $self->_default_bucket_key },
);
$self->_buckets->{ $bucket_key } = $module;
}
$self->_curr_module($self->_buckets->{ $bucket_key });
$self->_curr_bucket_key($bucket_key);
}
method init_optimizer(
Str :$kvstore='local',
Optimizer :$optimizer='sgd',
lib/AI/MXNet/Module/Bucketing.pm view on Meta::CPAN
method symbol()
{
assert($self->binded);
return $self->_curr_module->symbol;
}
method get_symbol()
{
assert($self->binded);
return $self->_buckets->{ $self->_default_bucket_key }->symbol;
}
method install_monitor(AI::MXNet::Monitor $mon)
{
assert($self->binded);
for my $mod (values %{ $self->_buckets })
{
$mod->install_monitor($mon);
}
}
lib/AI/MXNet/Monitor.pm view on Meta::CPAN
=head1 DESCRIPTION
Monitor outputs, weights, and gradients for debugging.
Parameters
----------
interval : int
Number of batches between printing.
stat_func : function
a function that computes statistics of tensors.
Takes a NDArray and returns a NDArray. defaults to mean
absolute value |x|/size(x).
pattern : str
A regular expression specifying which tensors to monitor.
Only tensors with names that match name_pattern will be included.
For example, '.*weight|.*output' will print all weights and outputs;
'.*backward.*' will print all gradients.
=cut
has 'interval' => (is => 'ro', isa => 'Int', required => 1);
has 'stat_func' => (
is => 'ro',
isa => 'CodeRef',
default => sub {
return sub {
# returns |x|/size(x), async execution.
my ($x) = @_;
return $x->norm/sqrt($x->size);
}
},
lazy => 1
);
has 'pattern' => (is => 'ro', isa => 'Str', default => '.*');
has '_sort' => (is => 'ro', isa => 'Bool', init_arg => 'sort', default => 0);
has [qw/queue exes/] => (is => 'rw', init_arg => undef, default => sub { [] });
has [qw/step activated/] => (is => 'rw', init_arg => undef, default => 0);
has 're_pattern' => (
is => 'ro',
init_arg => undef,
default => sub {
my $pattern = shift->pattern;
my $re = eval { qr/$pattern/ };
confess("pattern $pattern failed to compile as a regexp $@")
if $@;
return $re;
},
lazy => 1
);
has 'stat_helper' => (
is => 'ro',
init_arg => undef,
default => sub {
my $self = shift;
return sub {
my ($name, $handle) = @_;
return if(not $self->activated or not $name =~ $self->re_pattern);
my $array = AI::MXNet::NDArray->new(handle => $handle, writable => 0);
push @{ $self->queue }, [$self->step, $name, $self->stat_func->($array)];
}
},
lazy => 1
);
lib/AI/MXNet/NDArray.pm view on Meta::CPAN
'==' => \&equal,
'!=' => \¬_equal,
'>' => \&greater,
'>=' => \&greater_equal,
'<' => \&lesser,
'<=' => \&lesser_equal,
'.=' => \&set,
'=' => sub { $_[0] };
extends 'AI::MXNet::NDArray::Base';
has 'writable' => (is => 'rw', isa => 'Int', default => 1, lazy => 1);
has 'handle' => (is => 'rw', isa => 'NDArrayHandle', required => 1);
sub DEMOLISH
{
check_call(AI::MXNetCAPI::NDArrayFree(shift->handle));
}
method STORABLE_freeze($cloning)
{
my $buf = check_call(AI::MXNetCAPI::NDArraySaveRawBytes($self->handle));
lib/AI/MXNet/NDArray.pm view on Meta::CPAN
}
=head2 dtype
The data type of current NDArray.
Returns
-------
a data type string ('float32', 'float64', 'float16', 'uint8', 'int32')
representing the data type of the ndarray.
'float32' is the default dtype for the ndarray class.
=cut
method dtype()
{
my $dtype = check_call(
AI::MXNetCAPI::NDArrayGetDType(
$self->handle
)
);
return DTYPE_MX_TO_STR->{ $dtype };
lib/AI/MXNet/NDArray.pm view on Meta::CPAN
=head2 empty
Creates an empty uninitialized NDArray, with the specified shape.
Parameters
----------
$shape : Shape
shape of the NDArray.
:$ctx : AI::MXNet::Context, optional
The context of the NDArray, defaults to current default context.
:$dtype : Dtype, optional
The dtype of the NDArray, defaults to 'float32'.
Returns
-------
out: Array
The created NDArray.
=cut
method empty(Shape $shape, AI::MXNet::Context :$ctx=AI::MXNet::Context->current_ctx, Dtype :$dtype='float32')
{
return __PACKAGE__->new(
lib/AI/MXNet/NDArray.pm view on Meta::CPAN
=head2 zeros
Creates a new NDArray filled with 0, with specified shape.
Parameters
----------
$shape : Shape
shape of the NDArray.
:$ctx : AI::MXNet::Context, optional
The context of the NDArray, defaults to current default context.
:$dtype : Dtype, optional
The dtype of the NDArray, defaults to 'float32'.
Returns
-------
out: Array
The created NDArray.
=cut
method zeros(
Shape $shape,
AI::MXNet::Context :$ctx=AI::MXNet::Context->current_ctx,
lib/AI/MXNet/NDArray.pm view on Meta::CPAN
=head2 ones
Creates a new NDArray filled with 1, with specified shape.
Parameters
----------
$shape : Shape
shape of the NDArray.
:$ctx : AI::MXNet::Context, optional
The context of the NDArray, defaults to current default context.
:$dtype : Dtype, optional
The dtype of the NDArray, defaults to 'float32'.
Returns
-------
out: Array
The created NDArray.
=cut
method ones(
Shape $shape,
AI::MXNet::Context :$ctx=AI::MXNet::Context->current_ctx,
lib/AI/MXNet/NDArray.pm view on Meta::CPAN
Parameters
----------
$shape : Shape
shape of the NDArray.
val : float or int
The value to be filled with.
:$ctx : AI::MXNet::Context, optional
The context of the NDArray, defaults to current default context.
:$dtype : Dtype, optional
The dtype of the NDArray, defaults to 'float32'.
Returns
-------
out: Array
The created NDArray.
=cut
method full(
Shape $shape, Num $val,
AI::MXNet::Context :$ctx=AI::MXNet::Context->current_ctx,
lib/AI/MXNet/NDArray.pm view on Meta::CPAN
=head2 array
Creates a new NDArray that is a copy of the source_array.
Parameters
----------
$source_array : AI::MXNet::NDArray PDL, PDL::Matrix, Array ref in PDL::pdl format
Source data to create NDArray from.
:$ctx : AI::MXNet::Context, optional
The context of the NDArray, defaults to current default context.
:$dtype : Dtype, optional
The dtype of the NDArray, defaults to 'float32'.
Returns
-------
out: Array
The created NDArray.
=cut
method array(PDL|PDL::Matrix|ArrayRef|AI::MXNet::NDArray $source_array, AI::MXNet::Context :$ctx=AI::MXNet::Context->current_ctx, Dtype :$dtype='float32')
{
if(blessed $source_array and $source_array->isa('AI::MXNet::NDArray'))
lib/AI/MXNet/NDArray.pm view on Meta::CPAN
}
=head2 arange
Similar function in the MXNet ndarray as numpy.arange
See Also https://docs.scipy.org/doc/numpy/reference/generated/numpy.arange.html.
Parameters
----------
:$start=0 : number, optional
Start of interval. The interval includes this value. The default start value is 0.
$stop= : number, optional
End of interval. The interval does not include this value.
:$step=1 : number, optional
Spacing between the values
:$repeat=1 : number, optional
The repeating time of all elements.
E.g repeat=3, the element a will be repeated three times --> a, a, a.
:$ctx : Context, optional
The context of the NDArray, defaultw to current default context.
:$dtype : data type, optional
The value type of the NDArray, defaults to float32
Returns
-------
$out : NDArray
The created NDArray
=cut
method arange(Index :$start=0, Index :$stop=, Index :$step=1, Index :$repeat=1,
AI::MXNet::Context :$ctx=AI::MXNet::Context->current_ctx, Dtype :$dtype='float32')
{
lib/AI/MXNet/Optimizer.pm view on Meta::CPAN
return $opt_registry{ lc $name }->new(
rescale_grad => $rescale_grad,
%kwargs
);
}
confess("Cannot find optimizer $name");
}
*create = \&create_optimizer;
has 'rescale_grad' => (is => "rw", isa => "Num", default=>1);
has 'lr' => (is => "rw", isa => "Num");
has 'learning_rate' => (is => "rw", isa => "Num", default => 0.01);
has 'lr_scheduler' => (is => "rw", isa => "Maybe[AI::MXNet::LRScheduler]");
has 'wd' => (is => "rw", isa => "Num", default => 0);
has 'lr_mult' => (is => "rw", isa => "HashRef", default => sub { +{} });
has 'wd_mult' => (is => "rw", isa => "HashRef", , default => sub { +{} });
has 'num_update' => (is => "rw", isa => "Int");
has 'begin_num_update' => (is => "rw", isa => "Int", default => 0);
has '_index_update_count' => (is => "rw", isa => "HashRef", default => sub { +{} });
has 'clip_gradient' => (is => "rw", isa => "Maybe[Num]");
has 'param_idx2name' => (is => "rw", isa => "HashRef[Str]", default => sub { +{} });
has 'idx2name' => (is => "rw", isa => "HashRef[Str]");
has 'sym' => (is => "rw", isa => "Maybe[AI::MXNet::Symbol]");
sub BUILD
{
my $self = shift;
if($self->lr_scheduler)
{
$self->lr_scheduler->base_lr($self->learning_rate);
}
lib/AI/MXNet/Optimizer.pm view on Meta::CPAN
$self->lr_mult->{ $name } = $attr->{ $name }{ __lr_mult__ };
}
}
}
$self->lr_mult({ %{ $self->lr_mult }, %{ $args_lr_mult } });
}
=head2 set_wd_mult
Set individual weight decay multipler for parameters.
By default wd multipler is 0 for all params whose name doesn't
end with _weight, if param_idx2name is provided.
Parameters
----------
args_wd_mult : dict of string/int to float
set the wd multipler for name/index to float.
setting multipler by index is supported for backward compatibility,
but we recommend using name and symbol.
=cut
lib/AI/MXNet/Optimizer.pm view on Meta::CPAN
rescaling factor of gradient. Normally should be 1/batch_size.
clip_gradient : float, optional
clip gradient in range [-clip_gradient, clip_gradient]
param_idx2name : hash of string/int to float, optional
special treat weight decay in parameter ends with bias, gamma, and beta
multi_precision: bool, optional
Flag to control the internal precision of the optimizer.
False results in using the same precision as the weights (default),
True makes internal 32-bit copy of the weights and applies gradients
in 32-bit precision even if actual weights used in the model have lower precision.
Turning this on can improve convergence and accuracy when training with float16.
=cut
package AI::MXNet::SGD;
use Mouse;
extends 'AI::MXNet::Optimizer';
has 'kwargs' => (is => "rw", isa => "HashRef[Num]");
has 'momentum' => (is => "rw", isa => "Num", default => 0);
has 'multi_precision' => (is => "ro", isa => "Bool", default => 0);
sub BUILD
{
my $self = shift;
$self->kwargs({ rescale_grad => $self->rescale_grad });
if($self->momentum)
{
$self->kwargs->{momentum} = $self->momentum;
}
if($self->clip_gradient)
lib/AI/MXNet/Optimizer.pm view on Meta::CPAN
rescale_grad : float, optional
rescaling factor of gradient. Normally should be 1/batch_size.
clip_gradient : float, optional
clip gradient in range [-clip_gradient, clip_gradient]
param_idx2name : hash ref of string/int to float, optional
special treat weight decay in parameter ends with bias, gamma, and beta
=cut
has 'momentum' => (is => 'ro', isa => 'Num', default => 0);
has 'lamda' => (is => 'ro', isa => 'Num', default => 0.04);
has 'weight_previous' => (is => 'rw', init_arg => undef);
sub BUILD
{
my $self = shift;
$self->weight_previous({});
}
method create_state(Index $index, AI::MXNet::NDArray $weight)
{
lib/AI/MXNet/Optimizer.pm view on Meta::CPAN
clip_gradient : float, optional
clip gradient in range [-clip_gradient, clip_gradient]
=cut
package AI::MXNet::Adam;
use Mouse;
extends 'AI::MXNet::Optimizer';
has 'kwargs' => (is => "rw", isa => "HashRef[Num]");
has '+learning_rate' => (default => 0.001);
has 'beta1' => (is => "rw", isa => "Num", default => 0.9);
has 'beta2' => (is => "rw", isa => "Num", default => 0.999);
has 'epsilon' => (is => "rw", isa => "Num", default => 1e-8);
has 'decay_factor' => (is => "rw", isa => "Num", default => (1 - 1e-8));
sub BUILD
{
my $self = shift;
$self->kwargs({
rescale_grad => $self->rescale_grad,
beta1 => $self->beta1,
beta2 => $self->beta2,
epsilon => $self->epsilon
});
lib/AI/MXNet/Optimizer.pm view on Meta::CPAN
Default value is set to 1e-7.
clip_gradient : float, optional
clip gradient in range [-clip_gradient, clip_gradient]
=cut
package AI::MXNet::AdaGrad;
use Mouse;
extends 'AI::MXNet::Optimizer';
has 'float_stable_eps' => (is => "rw", isa => "Num", default => 1e-7);
has '+learning_rate' => (default => 0.05);
method create_state(Index $index, AI::MXNet::NDArray $weight)
{
return AI::MXNet::NDArray->zeros(
$weight->shape,
ctx => $weight->context
); # history
}
method update(
lib/AI/MXNet/Optimizer.pm view on Meta::CPAN
clip gradient in range [-clip_gradient, clip_gradient]
clip_weights : float, optional
clip weights in range [-clip_weights, clip_weights]
=cut
package AI::MXNet::RMSProp;
use Mouse;
extends 'AI::MXNet::Optimizer';
has '+learning_rate' => (default => 0.001);
has 'gamma1' => (is => "ro", isa => "Num", default => 0.9);
has 'gamma2' => (is => "ro", isa => "Num", default => 0.9);
has 'epsilon' => (is => "ro", isa => "Num", default => 1e-8);
has 'centered' => (is => "ro", isa => "Bool", default => 0);
has 'clip_weights' => (is => "ro", isa => "Num");
has 'kwargs' => (is => "rw", init_arg => undef);
sub BUILD
{
my $self = shift;
$self->kwargs({
rescale_grad => $self->rescale_grad,
gamma1 => $self->gamma1,
epsilon => $self->epsilon
lib/AI/MXNet/Optimizer.pm view on Meta::CPAN
rescale_grad : float, optional
rescaling factor of gradient. Normally should be 1/batch_size.
clip_gradient : float, optional
clip gradient in range [-clip_gradient, clip_gradient]
=cut
package AI::MXNet::AdaDelta;
use Mouse;
extends 'AI::MXNet::Optimizer';
has 'rho' => (is => "rw", isa => "Num", default => 0.9);
has 'epsilon' => (is => "rw", isa => "Num", default => 1e-5);
method create_state(Index $index, AI::MXNet::NDArray $weight)
{
return [
AI::MXNet::NDArray->zeros(
$weight->shape,
ctx => $weight->context
), # accumulated g
AI::MXNet::NDArray->zeros(
$weight->shape,
lib/AI/MXNet/Optimizer.pm view on Meta::CPAN
learning_rate : float, optional
The initial learning rate.
beta : float, optional
Per-coordinate learning rate correlation parameter.
eta_{t,i}=frac{learning_rate}{beta+sqrt{sum_{s=1^}tg_{s,i}^t}
=cut
use Mouse;
extends 'AI::MXNet::Optimizer';
has '+learning_rate' => (default => 0.1);
has 'beta' => (is => "ro", isa => "Num", default => 1);
has 'lambda1' => (is => "ro", isa => "Num", default => 0.9);
method create_state(Index $index, AI::MXNet::NDArray $weight)
{
return [
AI::MXNet::NDArray->zeros(
$weight->shape,
ctx => $weight->context
), # dn
AI::MXNet::NDArray->zeros(
$weight->shape,
lib/AI/MXNet/Optimizer.pm view on Meta::CPAN
Parameters
----------
beta1 : float, optional
Exponential decay rate for the first moment estimates.
beta2 : float, optional
Exponential decay rate for the second moment estimates.
=cut
use Mouse;
extends 'AI::MXNet::Optimizer';
has '+learning_rate' => (default => 0.002);
has 'beta1' => (is => "ro", isa => "Num", default => 0.9);
has 'beta2' => (is => "ro", isa => "Num", default => 0.999);
method create_state(Index $index, AI::MXNet::NDArray $weight)
{
return [
AI::MXNet::NDArray->zeros(
$weight->shape,
ctx => $weight->context,
dtype => $weight->dtype
), # mean
AI::MXNet::NDArray->zeros(
lib/AI/MXNet/Optimizer.pm view on Meta::CPAN
beta2 : float, optional
Exponential decay rate for the second moment estimates.
epsilon : float, optional
Small value to avoid division by 0.
schedule_decay : float, optional
Exponential decay rate for the momentum schedule
=cut
use Mouse;
extends 'AI::MXNet::Optimizer';
has '+learning_rate' => (default => 0.001);
has 'beta1' => (is => "ro", isa => "Num", default => 0.9);
has 'beta2' => (is => "ro", isa => "Num", default => 0.999);
has 'epsilon' => (is => "ro", isa => "Num", default => 1e-8);
has 'schedule_decay' => (is => "ro", isa => "Num", default => 0.004);
has 'm_schedule' => (is => "rw", default => 1, init_arg => undef);
method create_state(Index $index, AI::MXNet::NDArray $weight)
{
return [
AI::MXNet::NDArray->zeros(
$weight->shape,
ctx => $weight->context,
dtype => $weight->dtype
), # mean
AI::MXNet::NDArray->zeros(
lib/AI/MXNet/Optimizer.pm view on Meta::CPAN
__PACKAGE__->register;
# updater for kvstore
package AI::MXNet::Updater;
use Mouse;
use Storable qw(thaw freeze);
use overload "&{}" => sub { my $self = shift; sub { $self->call(@_) } },
fallback => 1;
has "optimizer" => (is => "rw", isa => "AI::MXNet::Optimizer");
has "states" => (is => "rw", isa => "HashRef", default => sub { +{} });
has "states_synced" => (is => "rw", isa => "HashRef", default => sub { +{} });
method call(Index $index, AI::MXNet::NDArray $grad, AI::MXNet::NDArray $weight)
{
if(not exists $self->states->{ $index })
{
$self->states->{ $index } = $self->optimizer->create_state($index, $weight);
$self->states_synced->{ $index } = 1;
}
elsif(not $self->states_synced->{ $index })
{
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
A container for holding variables.
Used by RNN cells for parameter sharing between cells.
Parameters
----------
prefix : str
All variables name created by this container will
be prepended with the prefix
=cut
has '_prefix' => (is => 'ro', init_arg => 'prefix', isa => 'Str', default => '');
has '_params' => (is => 'rw', init_arg => undef);
around BUILDARGS => sub {
my $orig = shift;
my $class = shift;
return $class->$orig(prefix => $_[0]) if @_ == 1;
return $class->$orig(@_);
};
sub BUILD
{
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
prefix for name of layers
(and name of weight if params is undef)
params : AI::MXNet::RNN::Params or undef
container for weight sharing between cells.
created if undef.
=cut
use AI::MXNet::Base;
use Mouse;
use overload "&{}" => sub { my $self = shift; sub { $self->call(@_) } };
has '_prefix' => (is => 'rw', init_arg => 'prefix', isa => 'Str', default => '');
has '_params' => (is => 'rw', init_arg => 'params', isa => 'Maybe[AI::MXNet::RNN::Params]');
has [qw/_own_params
_modified
_init_counter
_counter
/] => (is => 'rw', init_arg => undef);
around BUILDARGS => sub {
my $orig = shift;
my $class = shift;
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
{
confess("Not Implemented");
}
=head2 begin_state
Initial state for this cell.
Parameters
----------
:$func : sub ref, default is AI::MXNet::Symbol->can('zeros')
Function for creating initial state.
Can be AI::MXNet::Symbol->can('zeros'),
AI::MXNet::Symbol->can('uniform'), AI::MXNet::Symbol->can('Variable') etc.
Use AI::MXNet::Symbol->can('Variable') if you want to directly
feed the input as states.
@kwargs :
more keyword arguments passed to func. For example
mean, std, dtype, etc.
Returns
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
=cut
=head1 DESCRIPTION
Simple recurrent neural network cell
Parameters
----------
num_hidden : int
number of units in output symbol
activation : str or Symbol, default 'tanh'
type of activation function
prefix : str, default 'rnn_'
prefix for name of layers
(and name of weight if params is undef)
params : AI::MXNet::RNNParams or undef
container for weight sharing between cells.
created if undef.
=cut
has '_num_hidden' => (is => 'ro', init_arg => 'num_hidden', isa => 'Int', required => 1);
has 'forget_bias' => (is => 'ro', isa => 'Num');
has '_activation' => (
is => 'ro',
init_arg => 'activation',
isa => 'Activation',
default => 'tanh'
);
has '+_prefix' => (default => 'rnn_');
has [qw/_iW _iB
_hW _hB/] => (is => 'rw', init_arg => undef);
around BUILDARGS => sub {
my $orig = shift;
my $class = shift;
return $class->$orig(num_hidden => $_[0]) if @_ == 1;
return $class->$orig(@_);
};
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
=cut
=head1 DESCRIPTION
Long-Short Term Memory (LSTM) network cell.
Parameters
----------
num_hidden : int
number of units in output symbol
prefix : str, default 'lstm_'
prefix for name of layers
(and name of weight if params is undef)
params : AI::MXNet::RNN::Params or None
container for weight sharing between cells.
created if undef.
forget_bias : bias added to forget gate, default 1.0.
Jozefowicz et al. 2015 recommends setting this to 1.0
=cut
has '+_prefix' => (default => 'lstm_');
has '+_activation' => (init_arg => undef);
has '+forget_bias' => (is => 'ro', isa => 'Num', default => 1);
method state_info()
{
return [{ shape => [0, $self->_num_hidden], __layout__ => 'NC' } , { shape => [0, $self->_num_hidden], __layout__ => 'NC' }];
}
method _gate_names()
{
[qw/_i _f _c _o/];
}
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
=head1 DESCRIPTION
Gated Rectified Unit (GRU) network cell.
Note: this is an implementation of the cuDNN version of GRUs
(slight modification compared to Cho et al. 2014).
Parameters
----------
num_hidden : int
number of units in output symbol
prefix : str, default 'gru_'
prefix for name of layers
(and name of weight if params is undef)
params : AI::MXNet::RNN::Params or undef
container for weight sharing between cells.
created if undef.
=cut
has '+_prefix' => (default => 'gru_');
method _gate_names()
{
[qw/_r _z _o/];
}
method call(AI::MXNet::Symbol $inputs, SymbolOrArrayOfSymbols $states)
{
$self->_counter($self->_counter + 1);
my $name = sprintf('%st%d_', $self->_prefix, $self->_counter);
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
=cut
=head1 DESCRIPTION
Fusing RNN layers across time step into one kernel.
Improves speed but is less flexible. Currently only
supported if using cuDNN on GPU.
=cut
has '_num_hidden' => (is => 'ro', isa => 'Int', init_arg => 'num_hidden', required => 1);
has '_num_layers' => (is => 'ro', isa => 'Int', init_arg => 'num_layers', default => 1);
has '_dropout' => (is => 'ro', isa => 'Num', init_arg => 'dropout', default => 0);
has '_get_next_state' => (is => 'ro', isa => 'Bool', init_arg => 'get_next_state', default => 0);
has '_bidirectional' => (is => 'ro', isa => 'Bool', init_arg => 'bidirectional', default => 0);
has 'forget_bias' => (is => 'ro', isa => 'Num', default => 1);
has 'initializer' => (is => 'rw', isa => 'Maybe[Initializer]');
has '_mode' => (
is => 'ro',
isa => enum([qw/rnn_relu rnn_tanh lstm gru/]),
init_arg => 'mode',
default => 'lstm'
);
has [qw/_parameter
_directions/] => (is => 'rw', init_arg => undef);
around BUILDARGS => sub {
my $orig = shift;
my $class = shift;
return $class->$orig(num_hidden => $_[0]) if @_ == 1;
return $class->$orig(@_);
};
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
=head1 DESCRIPTION
Bidirectional RNN cell
Parameters
----------
l_cell : AI::MXNet::RNN::Cell::Base
cell for forward unrolling
r_cell : AI::MXNet::RNN::Cell::Base
cell for backward unrolling
output_prefix : str, default 'bi_'
prefix for name of output
=cut
has 'l_cell' => (is => 'ro', isa => 'AI::MXNet::RNN::Cell::Base', required => 1);
has 'r_cell' => (is => 'ro', isa => 'AI::MXNet::RNN::Cell::Base', required => 1);
has '_output_prefix' => (is => 'ro', init_arg => 'output_prefix', isa => 'Str', default => 'bi_');
has [qw/_override_cell_params _cells/] => (is => 'rw', init_arg => undef);
around BUILDARGS => sub {
my $orig = shift;
my $class = shift;
if(@_ >= 2 and blessed $_[0] and blessed $_[1])
{
my $l_cell = shift(@_);
my $r_cell = shift(@_);
return $class->$orig(
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
has '_h2h_kernel' => (is => 'ro', isa => 'Shape', init_arg => 'h2h_kernel');
has '_h2h_dilate' => (is => 'ro', isa => 'Shape', init_arg => 'h2h_dilate');
has '_h2h_pad' => (is => 'rw', isa => 'Shape', init_arg => undef);
has '_i2h_kernel' => (is => 'ro', isa => 'Shape', init_arg => 'i2h_kernel');
has '_i2h_stride' => (is => 'ro', isa => 'Shape', init_arg => 'i2h_stride');
has '_i2h_dilate' => (is => 'ro', isa => 'Shape', init_arg => 'i2h_dilate');
has '_i2h_pad' => (is => 'ro', isa => 'Shape', init_arg => 'i2h_pad');
has '_num_hidden' => (is => 'ro', isa => 'DimSize', init_arg => 'num_hidden');
has '_input_shape' => (is => 'ro', isa => 'Shape', init_arg => 'input_shape');
has '_conv_layout' => (is => 'ro', isa => 'Str', init_arg => 'conv_layout', default => 'NCHW');
has '_activation' => (is => 'ro', init_arg => 'activation');
has '_state_shape' => (is => 'rw', init_arg => undef);
has [qw/i2h_weight_initializer h2h_weight_initializer
i2h_bias_initializer h2h_bias_initializer/] => (is => 'rw', isa => 'Maybe[Initializer]');
sub BUILD
{
my $self = shift;
assert (
($self->_h2h_kernel->[0] % 2 == 1 and $self->_h2h_kernel->[1] % 2 == 1),
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
=head1 DESCRIPTION
Convolutional RNN cells
Parameters
----------
input_shape : array ref of int
Shape of input in single timestep.
num_hidden : int
Number of units in output symbol.
h2h_kernel : array ref of int, default (3, 3)
Kernel of Convolution operator in state-to-state transitions.
h2h_dilate : array ref of int, default (1, 1)
Dilation of Convolution operator in state-to-state transitions.
i2h_kernel : array ref of int, default (3, 3)
Kernel of Convolution operator in input-to-state transitions.
i2h_stride : array ref of int, default (1, 1)
Stride of Convolution operator in input-to-state transitions.
i2h_pad : array ref of int, default (1, 1)
Pad of Convolution operator in input-to-state transitions.
i2h_dilate : array ref of int, default (1, 1)
Dilation of Convolution operator in input-to-state transitions.
activation : str or Symbol,
default functools.partial(symbol.LeakyReLU, act_type='leaky', slope=0.2)
Type of activation function.
prefix : str, default 'ConvRNN_'
Prefix for name of layers (and name of weight if params is None).
params : RNNParams, default None
Container for weight sharing between cells. Created if None.
conv_layout : str, , default 'NCHW'
Layout of ConvolutionOp
=cut
has '+_h2h_kernel' => (default => sub { [3, 3] });
has '+_h2h_dilate' => (default => sub { [1, 1] });
has '+_i2h_kernel' => (default => sub { [3, 3] });
has '+_i2h_stride' => (default => sub { [1, 1] });
has '+_i2h_dilate' => (default => sub { [1, 1] });
has '+_i2h_pad' => (default => sub { [1, 1] });
has '+_prefix' => (default => 'ConvRNN_');
has '+_activation' => (default => sub { sub { AI::MXNet::Symbol->LeakyReLU(@_, act_type => 'leaky', slope => 0.2) } });
has '+i2h_bias_initializer' => (default => 'zeros');
has '+h2h_bias_initializer' => (default => 'zeros');
has 'forget_bias' => (is => 'ro', isa => 'Num');
has [qw/_iW _iB
_hW _hB/] => (is => 'rw', init_arg => undef);
sub BUILD
{
my $self = shift;
$self->_iW($self->_params->get('i2h_weight', init => $self->i2h_weight_initializer));
$self->_hW($self->_params->get('h2h_weight', init => $self->h2h_weight_initializer));
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
$self->_counter($self->_counter + 1);
my $name = sprintf('%st%d_', $self->_prefix, $self->_counter);
my ($i2h, $h2h) = $self->_conv_forward($inputs, $states, $name);
my $output = $self->_get_activation($i2h + $h2h, $self->_activation, name => "${name}out");
return ($output, [$output]);
}
package AI::MXNet::RNN::ConvLSTMCell;
use Mouse;
extends 'AI::MXNet::RNN::ConvCell';
has '+forget_bias' => (default => 1);
has '+_prefix' => (default => 'ConvLSTM_');
=head1 NAME
AI::MXNet::RNN::ConvLSTMCell
=cut
=head1 DESCRIPTION
Convolutional LSTM network cell.
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
my $next_h = AI::MXNet::Symbol->_mul(
$out_gate, $self->_get_activation($next_c, $self->_activation),
name => "${name}out"
);
return ($next_h, [$next_h, $next_c]);
}
package AI::MXNet::RNN::ConvGRUCell;
use Mouse;
extends 'AI::MXNet::RNN::ConvCell';
has '+_prefix' => (default => 'ConvGRU_');
=head1 NAME
AI::MXNet::RNN::ConvGRUCell
=cut
=head1 DESCRIPTION
Convolutional GRU network cell.
=cut
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
}
method call(AI::MXNet::Symbol $inputs, SymbolOrArrayOfSymbols $states)
{
confess("Not Implemented");
}
package AI::MXNet::RNN::DropoutCell;
use Mouse;
extends 'AI::MXNet::RNN::ModifierCell';
has [qw/dropout_outputs dropout_states/] => (is => 'ro', isa => 'Num', default => 0);
=head1 NAME
AI::MXNet::RNN::DropoutCell
=cut
=head1 DESCRIPTION
Apply the dropout on base cell
=cut
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
{
$states = [map { AI::MXNet::Symbol->Dropout(data => $_, p => $self->dropout_states) } @{ $states }];
}
return ($output, $states);
}
package AI::MXNet::RNN::ZoneoutCell;
use Mouse;
use AI::MXNet::Base;
extends 'AI::MXNet::RNN::ModifierCell';
has [qw/zoneout_outputs zoneout_states/] => (is => 'ro', isa => 'Num', default => 0);
has 'prev_output' => (is => 'rw', init_arg => undef);
=head1 NAME
AI::MXNet::RNN::ZoneoutCell
=cut
=head1 DESCRIPTION
Apply Zoneout on base cell.
lib/AI/MXNet/RNN/IO.pm view on Meta::CPAN
from string tokens to integer indices. Unknown keys
will be added to vocabulary.
Parameters
----------
$sentences : array ref of array refs of str
A array ref of sentences to encode. Each sentence
should be a array ref of string tokens.
:$vocab : undef or hash ref of str -> int
Optional input Vocabulary
:$invalid_label : int, default -1
Index for invalid token, like <end-of-sentence>
:$invalid_key : str, default '\n'
Key for invalid token. Uses '\n' for end
of sentence by default.
:$start_label=0 : int
lowest index.
Returns
-------
$result : array ref of array refs of int
encoded sentences
$vocab : hash ref of str -> int
result vocabulary
=cut
lib/AI/MXNet/RNN/IO.pm view on Meta::CPAN
=cut
=head2 new
Parameters
----------
sentences : array ref of array refs of int
encoded sentences
batch_size : int
batch_size of data
invalid_label : int, default -1
key for invalid label, e.g. <end-of-sentence>
dtype : str, default 'float32'
data type
buckets : array ref of int
size of data buckets. Automatically generated if undef.
data_name : str, default 'data'
name of data
label_name : str, default 'softmax_label'
name of label
layout : str
format of data and label. 'NT' means (batch_size, length)
and 'TN' means (length, batch_size).
=cut
use Mouse;
use AI::MXNet::Base;
use List::Util qw(shuffle max);
extends 'AI::MXNet::DataIter';
has 'sentences' => (is => 'ro', isa => 'ArrayRef[ArrayRef]', required => 1);
has '+batch_size' => (is => 'ro', isa => 'Int', required => 1);
has 'invalid_label' => (is => 'ro', isa => 'Int', default => -1);
has 'data_name' => (is => 'ro', isa => 'Str', default => 'data');
has 'label_name' => (is => 'ro', isa => 'Str', default => 'softmax_label');
has 'dtype' => (is => 'ro', isa => 'Dtype', default => 'float32');
has 'layout' => (is => 'ro', isa => 'Str', default => 'NT');
has 'buckets' => (is => 'rw', isa => 'Maybe[ArrayRef[Int]]');
has [qw/data nddata ndlabel
major_axis default_bucket_key
provide_data provide_label
idx curr_idx
/] => (is => 'rw', init_arg => undef);
sub BUILD
{
my $self = shift;
if(not defined $self->buckets)
{
my @buckets;
lib/AI/MXNet/RNN/IO.pm view on Meta::CPAN
)->aspdl;
$buff->slice([0, @{ $self->sentences->[$i] }-1]) .= pdl($self->sentences->[$i]);
push @{ $self->data->[$buck] }, $buff;
}
$self->data([map { pdl(PDL::Type->new(DTYPE_MX_TO_PDL->{$self->dtype}), $_) } @{$self->data}]);
AI::MXNet::Logging->warning("discarded $ndiscard sentences longer than the largest bucket.")
if $ndiscard;
$self->nddata([]);
$self->ndlabel([]);
$self->major_axis(index($self->layout, 'N'));
$self->default_bucket_key(max(@{ $self->buckets }));
my $shape;
if($self->major_axis == 0)
{
$shape = [$self->batch_size, $self->default_bucket_key];
}
elsif($self->major_axis == 1)
{
$shape = [$self->default_bucket_key, $self->batch_size];
}
else
{
confess("Invalid layout ${\ $self->layout }: Must by NT (batch major) or TN (time major)");
}
$self->provide_data([
AI::MXNet::DataDesc->new(
name => $self->data_name,
shape => $shape,
dtype => $self->dtype,
lib/AI/MXNet/Random.pm view on Meta::CPAN
as well as results from executors that contains Random number
such as Dropout operators.
Parameters
----------
seed_state : int
The random number seed to set to all devices.
Notes
-----
The random number generator of mxnet is by default device specific.
This means if you set the same seed, the random number sequence
generated from GPU0 can be different from CPU.
=cut
method seed(Int $seed_state)
{
check_call(AI::MXNetCAPI::RandomSeed($seed_state));
}
for my $method (
lib/AI/MXNet/Symbol.pm view on Meta::CPAN
with a call to `forward` (executor method).
For the common use case, where you might repeatedly evaluate with same arguments,
eval is slow.
In that case, you should call `bind` once and then repeatedly call forward.
Eval allows simpler syntax for less cumbersome introspection.
Parameters
----------
:$ctx : Context
The device context the generated executor to run on.
Optional, defaults to cpu(0)
:$args array ref of NDArray or hash ref of NDArray
- If the type is an array ref of NDArray, the position is in the same order of list_arguments.
- If the type is a hash of str to NDArray, then it maps the name of the argument
to the corresponding NDArray.
- In either case, all arguments must be provided.
Returns
----------
lib/AI/MXNet/Symbol.pm view on Meta::CPAN
Optionally, one can specify the shape of a variable. This will be used during
shape inference. If user specified a different shape for this variable using
keyword argument when calling shape inference, this shape information will be ignored.
lr_mult : float
Specify learning rate muliplier for this variable.
wd_mult : float
Specify weight decay muliplier for this variable.
dtype : Dtype
Similar to shape, we can specify dtype for this variable.
init : initializer (mx->init->*)
Specify initializer for this variable to override the default initializer
kwargs : hash ref
other additional attribute variables
Returns
-------
variable : Symbol
The created variable symbol.
=cut
method Variable(
Str $name,
lib/AI/MXNet/Symbol.pm view on Meta::CPAN
}
=head2 arange
Simlar function in the MXNet ndarray as numpy.arange
See Also https://docs.scipy.org/doc/numpy/reference/generated/numpy.arange.html.
Parameters
----------
start : number
Start of interval. The interval includes this value. The default start value is 0.
stop : number, optional
End of interval. The interval does not include this value.
step : number, optional
Spacing between values
repeat : int, optional
"The repeating time of all elements.
E.g repeat=3, the element a will be repeated three times --> a, a, a.
dtype : type, optional
The value type of the NDArray, default to np.float32
Returns
-------
out : Symbol
The created Symbol
=cut
method arange(Index :$start=0, Index :$stop=, Num :$step=1.0, Index :$repeat=1, Maybe[Str] :$name=, Dtype :$dtype='float32')
{
return __PACKAGE__->_arange({
lib/AI/MXNet/Symbol/NameManager.pm view on Meta::CPAN
=head1
NameManager that does an automatic naming.
A user can also inherit this object to change the naming behavior.
=cut
has 'counter' => (
is => 'ro',
isa => 'HashRef',
default => sub { +{} }
);
our $current;
=head2 get
Get the canonical name for a symbol.
This is default implementation.
When user specified a name,
the user specified name will be used.
When user did not, we will automatically generate a
name based on hint string.
Parameters
----------
name : str or undef
The name the user has specified.
lib/AI/MXNet/TestUtils.pm view on Meta::CPAN
use warnings;
use PDL;
use Carp;
use Scalar::Util qw(blessed);
use AI::MXNet::Function::Parameters;
use Exporter;
use base qw(Exporter);
@AI::MXNet::TestUtils::EXPORT_OK = qw(same reldiff almost_equal GetMNIST_ubyte
GetCifar10 pdl_maximum pdl_minimum mlp2 conv
check_consistency zip assert enumerate same_array dies_like);
use constant default_numerical_threshold => 1e-6;
=head1 NAME
AI::MXNet::TestUtils - Convenience subs used in tests.
=head2 same
Test if two pdl arrays are the same
Parameters
----------
lib/AI/MXNet/TestUtils.pm view on Meta::CPAN
return $ret;
}
=head2 almost_equal
Test if two pdl arrays are almost equal.
=cut
func almost_equal(PDL $a, PDL $b, Maybe[Num] $threshold=)
{
$threshold //= default_numerical_threshold;
my $rel = reldiff($a, $b);
return $rel <= $threshold;
}
func GetMNIST_ubyte()
{
if(not -d "data")
{
mkdir "data";
}
lib/AI/MXNet/Visualization.pm view on Meta::CPAN
symbol: AI::MXNet::Symbol
symbol to be visualized
shape: HashRef[Shape]
If supplied, the visualization will include the shape
of each tensor on the edges between nodes.
node_attrs: HashRef of node's attributes
for example:
{shape => "oval",fixedsize => "false"}
means to plot the network in "oval"
hide_weights: Bool
if True (default) then inputs with names like `*_weight`
or `*_bias` will be hidden
Returns
------
dot: Diagraph
dot object of symbol
=cut
method plot_network(
t/test_init.t view on Meta::CPAN
use strict;
use warnings;
use Test::More tests => 4;
use AI::MXNet qw(mx);
sub test_default_init
{
my $data = mx->sym->Variable('data');
my $sym = mx->sym->LeakyReLU(data => $data, act_type => 'prelu');
my $mod = mx->mod->Module($sym);
$mod->bind(data_shapes=>[['data', [10,10]]]);
$mod->init_params;
ok((((values %{ ($mod->get_params)[0] }))[0]->aspdl == 0.25)->all);
}
sub test_variable_init
t/test_init.t view on Meta::CPAN
{
my $data = mx->sym->Variable('data');
my $sym = mx->sym->BatchNorm(data => $data, name => 'bn');
my $mod = mx->mod->Module($sym);
$mod->bind(data_shapes=>[['data', [10, 10, 3, 3]]]);
$mod->init_params();
ok((($mod->get_params)[1]->{bn_moving_var}->aspdl == 1)->all);
ok((($mod->get_params)[1]->{bn_moving_mean}->aspdl == 0)->all);
}
test_default_init();
test_variable_init();
test_aux_init();
t/test_module.t view on Meta::CPAN
ok(not almost_equal($x1->aspdl, $x2->aspdl, 1e-3));
}, $out1, $out2);
}
sub test_module_switch_bucket
{
my $vocab_dim = 5000;
my $num_hidden = 100;
my $num_embedding = 100;
my $num_layer = 2;
my $default_key = 10;
my $test_key = 5;
my $batch_size = 32;
my $contexts = [mx->cpu(0)];
my $initializer = mx->init->Xavier(factor_type=>"in", magnitude=>2.34);
#generate symbols for an LSTM network
my $gen_sym = sub {
my $seq_len = shift;
my $data = mx->sym->Variable('data');
my $label = mx->sym->Variable('softmax_label');
t/test_module.t view on Meta::CPAN
$pred = mx->sym->FullyConnected(data=>$pred, num_hidden=>$vocab_dim, name=>'pred');
$label = mx->sym->Reshape($label, shape=>[-1]);
$pred = mx->sym->SoftmaxOutput(data=>$pred, label=>$label, name=>'softmax');
return ($pred, ['data'], ['softmax_label']);
};
my $create_bucketing_module = sub { my $key = shift;
my $model = mx->mod->BucketingModule(
sym_gen => $gen_sym,
default_bucket_key => $key,
context => $contexts
);
$model->bind(data_shapes=>[['data', [$batch_size, $key]]],
label_shapes=>[['softmax_label', [$batch_size, $key]]]
);
$model->init_params(initializer=>$initializer);
return $model;
};
#initialize the bucketing module with the default bucket key
my $bucketing_model = $create_bucketing_module->($default_key);
#switch to test_key
$bucketing_model->switch_bucket(
bucket_key => $test_key,
data_shapes => [['data', [$batch_size, $test_key]]],
label_shapes => [['softmax_label', [$batch_size, $test_key]]]
);
delete $bucketing_model->_buckets->{$test_key};
$bucketing_model->switch_bucket(
t/test_optimizers.t view on Meta::CPAN
package PerlAdam;
use strict;
use warnings;
use AI::MXNet qw(mx);
use Mouse;
use AI::MXNet::Function::Parameters;
extends 'AI::MXNet::Optimizer';
has 'beta1' => (is => 'rw', default => 0.9);
has 'beta2' => (is => 'rw', default => 0.999);
has 'epsilon' => (is => 'rw', default => 1e-8);
has 'rescale_grad' => (is => 'rw', default => 1);
has 'decay_factor' => (is => 'rw', default => (1-1e-8));
around BUILDARGS => \&init;
func init($code, $class, %kwargs)
{
return $class->$code(learning_rate => 0.001, wd => 0.9, %kwargs);
}
=begin
Create additional optimizer state: mean, variance
t/test_optimizers.t view on Meta::CPAN
rescaling factor of gradient.
clip_gradient : float, optional
clip gradient in range [-clip_gradient, clip_gradient]
clip_weights : float, optional
clip weights in range [-clip_weights, clip_weights]
=cut
package PerlRMSProp;
use Mouse;
extends 'AI::MXNet::Optimizer';
has '+learning_rate' => (default => 0.001);
has 'gamma1' => (is => "ro", isa => "Num", default => 0.9);
has 'gamma2' => (is => "ro", isa => "Num", default => 0.9);
has 'epsilon' => (is => "ro", isa => "Num", default => 1e-8);
has 'centered' => (is => "ro", isa => "Bool", default => 0);
has 'clip_weights' => (is => "ro", isa => "Num");
# For centered=False: n
# For centered=True: n, g, delta
method create_state(Index $index, AI::MXNet::NDArray $weight)
{
return [
$self->centered
? (
AI::MXNet::NDArray->zeros(
t/test_optimizers.t view on Meta::CPAN
if($self->clip_weights)
{
mx->nd->clip($weight, -$self->clip_weights, $self->clip_weights, { out => $weight });
}
}
package PerlSGD;
# perl reference implemenation of sgd
use Mouse;
extends 'AI::MXNet::Optimizer';
has '+learning_rate' => (default => 0.01);
has 'momentum' => (is => "ro", isa => "Num", default => 0);
has 'multi_precision' => (is => 'ro', isa => 'Bool', default => 0);
# Create additional optimizer state: momentum
method create_state(Index $index, AI::MXNet::NDArray $weight)
{
my $momentum;
my $weight_master_copy;
my $do_multi_precision = ($self->multi_precision and $weight->dtype eq 'float16');
if($do_multi_precision)
{
if($self->momentum != 0)