view release on metacpan or search on metacpan
examples/char_lstm.pl view on Meta::CPAN
],
);
}
package main;
my $file = "data/input.txt";
open(F, $file) or die "can't open $file: $!";
my $fdata;
{ local($/) = undef; $fdata = <F>; close(F) };
my %vocabulary; my $i = 0;
$fdata = pdl(map{ exists $vocabulary{$_} ? $vocabulary{$_} : ($vocabulary{$_} = $i++) } split(//, $fdata));
my $data_iter = AI::MXNet::RNN::IO::ASCIIIterator->new(
batch_size => $batch_size,
data => $fdata,
seq_size => $seq_size
);
my %reverse_vocab = reverse %vocabulary;
my $mode = "${cell_mode}Cell";
my $stack = mx->rnn->SequentialRNNCell();
for my $i (0..$num_layers-1)
{
examples/char_lstm.pl view on Meta::CPAN
$stack->reset;
my ($outputs, $states) = $stack->unroll($seq_size, inputs => $embed, merge_outputs => 1);
my $pred = mx->sym->Reshape($outputs, shape => [-1, $num_hidden*(1+($bidirectional ? 1 : 0))]);
$pred = mx->sym->FullyConnected(data => $pred, num_hidden => $data_iter->vocab_size, name => 'pred');
$label = mx->sym->Reshape($label, shape => [-1]);
my $net = mx->sym->SoftmaxOutput(data => $pred, label => $label, name => 'softmax');
my $contexts;
if(defined $gpus)
{
$contexts = [map { mx->gpu($_) } split(/,/, $gpus)];
}
else
{
$contexts = mx->cpu(0);
}
my $model = mx->mod->Module(
symbol => $net,
context => $contexts
);
examples/cudnn_lstm_bucketing.pl view on Meta::CPAN
--bidirectional whether to use bidirectional layers (1,0 default 0)
--dropout dropout probability (1.0 - keep probability), default 0
=cut
$bidirectional = $bidirectional ? 1 : 0;
$stack_rnn = $stack_rnn ? 1 : 0;
func tokenize_text($fname, :$vocab=, :$invalid_label=-1, :$start_label=0)
{
open(F, $fname) or die "Can't open $fname: $!";
my @lines = map { my $l = [split(/ /)]; shift(@$l); $l } (<F>);
my $sentences;
($sentences, $vocab) = mx->rnn->encode_sentences(
\@lines,
vocab => $vocab,
invalid_label => $invalid_label,
start_label => $start_label
);
return ($sentences, $vocab);
}
examples/cudnn_lstm_bucketing.pl view on Meta::CPAN
my $pred = mx->sym->Reshape($output, shape=>[-1, $num_hidden*(1+$bidirectional)]);
$pred = mx->sym->FullyConnected(data=>$pred, num_hidden=>scalar(keys %$vocab), name=>'pred');
$label = mx->sym->Reshape($label, shape=>[-1]);
$pred = mx->sym->SoftmaxOutput(data=>$pred, label=>$label, name=>'softmax');
return ($pred, ['data'], ['softmax_label']);
};
my $contexts;
if(defined $gpus)
{
$contexts = [map { mx->gpu($_) } split(/,/, $gpus)];
}
else
{
$contexts = mx->cpu(0);
}
my $model = mx->mod->BucketingModule(
sym_gen => $sym_gen,
default_bucket_key => $data_train->default_bucket_key,
context => $contexts
examples/cudnn_lstm_bucketing.pl view on Meta::CPAN
my ($outputs, $states) = $stack->unroll($seq_len, inputs => $embed, merge_outputs => 1);
my $pred = mx->sym->Reshape($outputs, shape => [-1, $num_hidden*(1+$bidirectional)]);
$pred = mx->sym->FullyConnected(data => $pred, num_hidden => scalar(keys %$vocab), name => 'pred');
$label = mx->sym->Reshape($label, shape => [-1]);
$pred = mx->sym->SoftmaxOutput(data => $pred, label => $label, name => 'softmax');
return ($pred, ['data'], ['softmax_label']);
};
my $contexts;
if($gpus)
{
$contexts = [map { mx->gpu($_) } split(/,/, $gpus)];
}
else
{
$contexts = mx->cpu(0);
}
my ($arg_params, $aux_params);
if($load_epoch)
{
(undef, $arg_params, $aux_params) = mx->rnn->load_rnn_checkpoint(
examples/lstm_bucketing.pl view on Meta::CPAN
--wd weight decay for sgd, default=0.00001
--batch-size the batch size type, default=32
--disp-batches show progress for every n batches, default=50
--chkp-prefix prefix for checkpoint files, default='lstm_'
--chkp-epoch save checkpoint after this many epoch, default=0 (saving checkpoints is disabled)
=cut
func tokenize_text($fname, :$vocab=, :$invalid_label=-1, :$start_label=0)
{
open(F, $fname) or die "Can't open $fname: $!";
my @lines = map { my $l = [split(/ /)]; shift(@$l); $l } (<F>);
my $sentences;
($sentences, $vocab) = mx->rnn->encode_sentences(
\@lines,
vocab => $vocab,
invalid_label => $invalid_label,
start_label => $start_label
);
return ($sentences, $vocab);
}
examples/lstm_bucketing.pl view on Meta::CPAN
my $pred = mx->sym->Reshape($outputs, shape => [-1, $num_hidden]);
$pred = mx->sym->FullyConnected(data => $pred, num_hidden => scalar(keys %$vocabulary), name => 'pred');
$label = mx->sym->Reshape($label, shape => [-1]);
$pred = mx->sym->SoftmaxOutput(data => $pred, label => $label, name => 'softmax');
return ($pred, ['data'], ['softmax_label']);
};
my $contexts;
if(defined $gpus)
{
$contexts = [map { mx->gpu($_) } split(/,/, $gpus)];
}
else
{
$contexts = mx->cpu(0);
}
my $model = mx->mod->BucketingModule(
sym_gen => $sym_gen,
default_bucket_key => $data_train->default_bucket_key,
context => $contexts
examples/mnist.pl view on Meta::CPAN
sub show_sample {
print 'label: ', $train_lbl->slice('0:9'), "\n";
my $hbox = Gtk2::HBox->new(0, 2);
for my $i (0 .. 9) {
my $img = $train_img->slice(":,:,$i");
my($w, $h) = $img->dims;
$img->make_physical();
# ugh, pixbufs don't have a grayscale colorspace?!
# burst it to rgb I guess.
my $data = pack 'c*', map { $_, $_, $_ } unpack 'c*', ${$img->get_dataref};
$hbox->add(Gtk2::Image->new_from_pixbuf(
Gtk2::Gdk::Pixbuf->new_from_data($data, 'rgb', 0, 8, $w, $h, $w * 3)
));
}
my $win = Gtk2::Window->new('toplevel');
$win->signal_connect(delete_event => sub { Gtk2->main_quit() });
$win->add($hbox);
$win->show_all();
Gtk2->main();
}
lib/AI/MXNet/Base.pm view on Meta::CPAN
@array_refs
=cut
sub zip
{
my ($sub, @arrays) = @_;
my $len = @{ $arrays[0] };
for (my $i = 0; $i < $len; $i++)
{
$sub->(map { $_->[$i] } @arrays);
}
}
=head2 enumerate
Same as zip, but the argument list in the anonymous sub is prepended
by the iteration count.
=cut
sub enumerate
lib/AI/MXNet/Base.pm view on Meta::CPAN
}
=head2 product
Calculates the product of the input agruments.
=cut
sub product
{
my $p = 1;
map { $p = $p * $_ } @_;
return $p;
}
=head2 bisect_left
https://hg.python.org/cpython/file/2.7/Lib/bisect.py
=cut
sub bisect_left
{
lib/AI/MXNet/CachedOp.pm view on Meta::CPAN
}
}
else
{
$out = [];
}
my $output = check_call(
AI::MXNetCAPI::InvokeCachedOp(
$self->handle,
scalar(@args),
[map { $_->handle } @args],
[map { $_->handle } @$out]
)
);
return $original_output if defined $original_output;
if(@$output == 1)
{
return AI::MXNet::NDArray->new(handle => $output->[0]);
}
else
{
return [map { AI::MXNet::NDArray->new(handle => $_) } @$output];
}
}
1;
lib/AI/MXNet/Contrib/AutoGrad.pm view on Meta::CPAN
gradients: array ref of AI::MXNet::NDArrays
grad_reqs: array ref of strings
=cut
method mark_variables(
ArrayRef[AI::MXNet::NDArray] $variables,
ArrayRef[AI::MXNet::NDArray] $gradients,
GradReq|ArrayRef[GradReq] $grad_reqs='write'
)
{
my @variable_handles = map { $_->handle } @{ $variables };
my @gradient_handles = map { $_->handle } @{ $gradients };
my @grad_reqs;
if(not ref $grad_reqs)
{
@grad_reqs = (GRAD_REQ_MAP->{ $grad_reqs }) x scalar(@variable_handles);
}
else
{
@grad_reqs = map { GRAD_REQ_MAP->{ $_ } } @{ $grad_reqs };
}
check_call(
AI::MXNetCAPI::AutogradMarkVariables(
scalar(@variable_handles),
\@variable_handles,
\@grad_reqs,
\@gradient_handles
)
);
}
lib/AI/MXNet/Contrib/AutoGrad.pm view on Meta::CPAN
retain_graph: bool, defaults to false
=cut
method backward(
ArrayRef[AI::MXNet::NDArray] $outputs,
Maybe[ArrayRef[AI::MXNet::NDArray|Undef]] $out_grads=,
Bool $retain_graph=0
)
{
my @output_handles = map { $_->handle } @{ $outputs };
if(not defined $out_grads)
{
check_call(
AI::MXNetCAPI::AutogradBackward(
scalar(@output_handles),
\@output_handles,
[],
$retain_graph
)
);
lib/AI/MXNet/Contrib/AutoGrad.pm view on Meta::CPAN
=cut
method grad_and_loss(CodeRef $func, Maybe[Int|ArrayRef[Int]] $argnum=)
{
return sub {
my @args = @_;
my @variables = @_;
if(defined $argnum)
{
my @argnum = ref $argnum ? @$argnum : ($argnum);
@variables = map { $_[$_] } @argnum;
}
map {
assert(
(blessed($_) and $_->isa('AI::MXNet::NDArray')),
"type of autograd input should NDArray")
} @variables;
my @grads = map { $_->zeros_like } @variables;
__PACKAGE__->mark_variables(\@variables, \@grads);
my $prev = __PACKAGE__->set_is_training(1);
my $outputs = $func->(@args);
__PACKAGE__->set_is_training(0) unless $prev;
__PACKAGE__->compute_gradient(ref $outputs eq 'ARRAY' ? $outputs : [$outputs]);
return (\@grads, $outputs);
};
}
=head2 grad
lib/AI/MXNet/Executor.pm view on Meta::CPAN
The output ndarrays bound to this executor.
Returns
-------
An array ref with AI::MXNet::NDArray objects bound to the heads of the executor.
=cut
method _get_outputs()
{
return [
map {
AI::MXNet::NDArray->new(handle => $_)
}
@{ check_call(AI::MXNetCAPI::ExecutorOutputs($self->handle)) }
];
}
=head2 forward
Calculate the outputs specified by the bound symbol.
lib/AI/MXNet/Executor.pm view on Meta::CPAN
$out_grads = [$out_grads];
}
elsif(ref $out_grads eq 'HASH')
{
$out_grads = [ @{ $out_grads }{ @{ $self->symbol->list_outputs() } } ];
}
check_call(
AI::MXNetCAPI::ExecutorBackward(
$self->handle,
scalar(@{ $out_grads }),
[map { $_->handle } @{ $out_grads }]
)
);
if(not $self->_output_dirty)
{
AI::MXNet::Logging->warning(
"Calling backward without calling forward(is_train=True) "
."first. Behavior is undefined."
);
}
$self->_output_dirty(0);
lib/AI/MXNet/Executor.pm view on Meta::CPAN
);
}
=head2 arg_dict
Get a hash ref representation of the argument arrays.
Returns
-------
arg_dict : HashRef[AI::MXNet::NDArray]
The map that maps a name of the arguments to the NDArrays.
=cut
method arg_dict()
{
if(not defined $self->_arg_dict)
{
$self->_arg_dict(_get_dict(
$self->_symbol->list_arguments(),
$self->arg_arrays
)
lib/AI/MXNet/Executor.pm view on Meta::CPAN
return $self->_arg_dict;
}
=head2 grad_dict
Get a hash ref representation of the gradient arrays.
Returns
-------
grad_dict : HashRef[AI::MXNet::NDArray]
The map that maps a name of the arguments to the gradient NDArrays.
=cut
method grad_dict()
{
if(not defined $self->_grad_dict)
{
$self->_grad_dict(_get_dict(
$self->_symbol->list_arguments(),
$self->grad_arrays
)
lib/AI/MXNet/Executor.pm view on Meta::CPAN
return $self->_grad_dict;
}
=head2 aux_dict
Get a hash ref representation of the auxiliary states arrays.
Returns
-------
aux_dict : HashRef[AI::MXNet::NDArray]
The map that maps a name of the auxiliary states to the NDArrays.
=cut
method aux_dict()
{
if(not defined $self->_aux_dict)
{
$self->_aux_dict(_get_dict(
$self->_symbol->list_auxiliary_states(),
$self->aux_arrays()
)
lib/AI/MXNet/Executor.pm view on Meta::CPAN
return $self->_aux_dict;
}
=head2 output_dict
Get a hash ref representation of the output arrays.
Returns
-------
output_dict : HashRef[AI::MXNet::NDArray]
The map that maps a name of the outputs to the NDArrays.
=cut
method output_dict()
{
if(not defined $self->_output_dict)
{
$self->_output_dict(_get_dict(
$self->_symbol->list_outputs(),
$self->outputs
)
lib/AI/MXNet/Executor/Group.pm view on Meta::CPAN
use AI::MXNet::Function::Parameters;
=head1 NAME
AI::MXNet::Executor::Group - Manager for a group of executors working in different contexts.
=cut
func _split_input_slice($batch_size, $work_load_list)
{
my $total_work_load = sum(@{ $work_load_list });
my @batch_num_list = map { # perl does not have builtin round
int(($_ * $batch_size / $total_work_load) + 0.5)
} @{ $work_load_list };
my $batch_num_sum = sum(@batch_num_list);
my @slices;
if($batch_num_sum < $batch_size)
{
$batch_num_list[-1] += $batch_size - $batch_num_sum;
}
my $end = 0;
for my $batch_num (@batch_num_list)
lib/AI/MXNet/Executor/Group.pm view on Meta::CPAN
my ($tensors, $axis) = @_;
if($axis >= 0)
{
if(@$tensors == 1)
{
push @rets, $tensors->[0];
}
else
{
my $ctx = $tensors->[0]->context;
push @rets, AI::MXNet::NDArray->concat((map { $_->as_in_context($ctx) } @$tensors), { dim => $axis });
}
}
else
{
# negative axis means the there is no batch_size axis, and all the
# results should be the same on each device. We simply take the
# first one, without checking they are actually the same
push @rets, $tensors->[0];
}
}, $outputs, $major_axis);
lib/AI/MXNet/Executor/Group.pm view on Meta::CPAN
{
my $label_shapes = [];
for my $l (@{ $self->label_shapes })
{
$l = AI::MXNet::DataDesc->new(name => $l->[0], shape => $l->[1])
unless blessed $l;
push @{ $label_shapes }, $l;
}
$self->label_shapes($label_shapes);
}
my %data_names = map { $_->name => 1 } @{ $self->data_shapes };
my %param_names = map { $_ => 1 } @{ $self->param_names };
my %fixed_param_names = map { $_ => 1 } @{ $self->fixed_param_names };
my %grad_req;
if(not ref $self->grad_req)
{
for my $k (@{ $self->_p->arg_names })
{
if(exists $param_names{ $k })
{
$grad_req{$k} = exists $fixed_param_names{ $k } ? 'null' : $self->grad_req;
}
elsif(exists $data_names{ $k })
lib/AI/MXNet/Executor/Group.pm view on Meta::CPAN
}
%grad_req = (%grad_req, %{ $self->grad_req });
}
$self->grad_req(\%grad_req);
if(defined $self->shared_group)
{
$self->_p->shared_data_arrays($self->shared_group->_p->shared_data_arrays);
}
else
{
$self->_p->shared_data_arrays([map { +{} } 0..@{ $self->contexts }-1]);
}
$self->_p->output_layouts([
map {
AI::MXNet::DataDesc->get_batch_axis($self->symbol->slice($_)->attr('__layout__'))
} @{ $self->symbol->list_outputs }
]);
$self->bind_exec($self->data_shapes, $self->label_shapes, $self->shared_group);
}
=decide_slices
Decide the slices for each context according to the workload.
Parameters
----------
$data_shapes : ArrayRef[AI::MXNet::DataDesc]
=cut
method decide_slices(ArrayRef[AI::MXNet::DataDesc] $data_shapes)
{
confess("empty data_shapes array") unless @{ $data_shapes } > 0;
my $major_axis = [map { AI::MXNet::DataDesc->get_batch_axis($_->layout) } @{ $data_shapes }];
zip(sub {
my ($desc, $axis) = @_;
return if($axis == -1);
my $batch_size = $desc->shape->[$axis];
if(defined $self->_p->batch_size)
{
confess(
"all data must have the same batch size: "
. sprintf("batch_size = %d, but ", $self->_p->batch_size)
. sprintf("%s has shape %s", $desc->name, '('. join(',', @{ $desc->shape }) . ')')
lib/AI/MXNet/Executor/Group.pm view on Meta::CPAN
my $name = $l->name;
my @tmp;
for my $i (0..@{ $self->_p->execs }-1)
{
push @tmp, [ $self->_p->slices->[$i], $self->_p->execs->[$i]->arg_dict->{$name} ];
}
push @{ $self->_p->label_arrays }, \@tmp;
}
}
$self->_p->param_arrays([]);
my %param_names = map { $_ => 1 } @{ $self->param_names };
for my $i (0..@{ $self->_p->arg_names }-1)
{
my $name = $self->_p->arg_names->[$i];
if(exists $param_names{$name})
{
my @tmp;
for my $exec (@{ $self->_p->execs })
{
push @tmp, $exec->arg_arrays->[$i];
}
lib/AI/MXNet/Executor/Group.pm view on Meta::CPAN
{
my @tmp;
for my $exec (@{ $self->_p->execs })
{
push @tmp, $exec->grad_arrays->[$i];
}
push @{ $self->_p->grad_arrays }, \@tmp;
}
}
}
my @data_names = map { $_->name } @{ $self->data_shapes };
my $j = 0; my %arg_names = map { $_ => $j++ } @{ $self->_p->arg_names };
if($self->inputs_need_grad)
{
$self->_p->input_grad_arrays([]);
for my $name (@data_names)
{
next unless exists $arg_names{$name};
my @tmp;
for my $exec (@{ $self->_p->execs })
{
push @tmp, $exec->grad_arrays->[$arg_names{$name}];
lib/AI/MXNet/Executor/Group.pm view on Meta::CPAN
for my $i (0..@{ $self->contexts }-1)
{
my $data_shapes_i = $self->_sliced_shape($data_shapes, $i, $self->_p->data_layouts);
my $label_shapes_i = [];
if(defined $label_shapes)
{
$label_shapes_i = $self->_sliced_shape($label_shapes, $i, $self->_p->label_layouts);
}
if($reshape)
{
my %combined_hash = map { $_->name => $_->shape } (@{ $data_shapes_i }, @{ $label_shapes_i });
$self->_p->execs->[$i] = $self->_p->_default_execs->[$i]->reshape(
\%combined_hash,
allow_up_sizing => 1,
);
}
else
{
push @{ $self->_p->execs }, $self->_bind_ith_exec($i, $data_shapes_i, $label_shapes_i, $shared_group);
}
}
lib/AI/MXNet/Executor/Group.pm view on Meta::CPAN
$self->bind_exec($data_shapes, $label_shapes, undef, 1);
}
=head2 set_params
Assign, i.e. copy parameters to all the executors.
Parameters
----------
$arg_params : HashRef[AI::MXNet::NDArray]
A dictionary of name to AI::MXNet::NDArray parameter mapping.
$aux_params : HashRef[AI::MXNet::NDArray]
A dictionary of name to AI::MXNet::NDArray auxiliary variable mapping.
=cut
method set_params(HashRef[AI::MXNet::NDArray] $arg_params, HashRef[AI::MXNet::NDArray] $aux_params, Bool $allow_extra=0)
{
$_->copy_params_from($arg_params, $aux_params, $allow_extra) for @{ $self->_p->execs };
}
=head2 get_params
Copy data from each executor to arg_params and aux_params.
lib/AI/MXNet/Executor/Group.pm view on Meta::CPAN
Notes
-----
- This function will inplace update the NDArrays in arg_params and aux_params.
=cut
method get_params(HashRef[AI::MXNet::NDArray] $arg_params, HashRef[AI::MXNet::NDArray] $aux_params)
{
my $weight = 0;
zip(sub {
my ($name, $block) = @_;
my $weight = sum(map { $_->copyto(AI::MXNet::Context->cpu) } @{ $block }) / @{ $block };
$weight->astype($arg_params->{$name}->dtype)->copyto($arg_params->{$name});
}, $self->param_names, $self->_p->param_arrays);
zip(sub {
my ($name, $block) = @_;
my $weight = sum(map { $_->copyto(AI::MXNet::Context->cpu) } @{ $block }) / @{ $block };
$weight->astype($aux_params->{$name}->dtype)->copyto($aux_params->{$name});
}, $self->_p->aux_names, $self->_p->aux_arrays);
}
method get_states($merge_multi_context=1)
{
assert((not $merge_multi_context), "merge_multi_context=True is not supported for get_states yet.");
return $self->_p->state_arrays;
lib/AI/MXNet/Executor/Group.pm view on Meta::CPAN
AI::MXNet::Executor::Group::_load_label($data_batch, $self->_p->label_arrays, $self->_p->label_layouts);
}
}
$_->forward($is_train) for @{ $self->_p->execs };
}
# Get the shapes of the outputs
method get_output_shapes()
{
my @shapes = map { $_->shape } @{ $self->execs->[0]->outputs };
my @concat_shapes;
zip(sub {
my ($key, $shape, $axis) = @_;
my @the_shape = @{ $shape };
if($axis >= 0)
{
$the_shape[$axis] = $self->_p->batch_size;
}
push @concat_shapes, AI::MXNet::DataDesc->new(name => $key, shape => \@the_shape);
}, $self->symbol->list_outputs, \@shapes, $self->_p->output_layouts);
lib/AI/MXNet/Executor/Group.pm view on Meta::CPAN
method _bind_ith_exec(
Int $i,
ArrayRef[AI::MXNet::DataDesc] $data_shapes,
Maybe[ArrayRef[AI::MXNet::DataDesc]] $label_shapes,
Maybe[AI::MXNet::DataParallelExecutorGroup] $shared_group
)
{
my $shared_exec = $shared_group ? $shared_group->_p->execs->[$i] : undef;
my $context = $self->contexts->[$i];
my $shared_data_arrays = $self->_p->shared_data_arrays->[$i];
my %input_shapes = map { $_->name => $_->shape } @{ $data_shapes };
if(defined $label_shapes)
{
%input_shapes = (%input_shapes, map { $_->name => $_->shape } @{ $label_shapes });
}
my %input_types = map { $_->name => $_->dtype } @{ $data_shapes };
my $executor = $self->symbol->simple_bind(
ctx => $context,
grad_req => $self->grad_req,
type_dict => \%input_types,
shared_arg_names => $self->param_names,
shared_exec => $shared_exec,
shared_buffer => $shared_data_arrays,
shapes => \%input_shapes
);
return $executor;
lib/AI/MXNet/IO.pm view on Meta::CPAN
my @ret;
if(ref($data) eq 'ARRAY')
{
if(@{ $data } == 1)
{
@ret = ([$default_name, $data->[0]]);
}
else
{
my $i = -1;
@ret = map { $i++; ["_${i}_$default_name", $_] } @{ $data };
}
}
if(ref($data) eq 'HASH')
{
while(my ($k, $v) = each %{ $data })
{
push @ret, [$k, $v];
}
}
for my $d (@ret)
lib/AI/MXNet/IO.pm view on Meta::CPAN
Parameters
----------
$shapes : HashRef[Shape]
$types= : Maybe[HashRef[Dtype]]
=cut
method get_list(HashRef[Shape] $shapes, Maybe[HashRef[Dtype]] $types=)
{
$types //= {};
return [
map {
AI::MXNet::DataDesc->new(
name => $_,
shape => $shapes->{$_},
(exists $types->{$_} ? (type => $types->{$_}) : ())
)
} keys %{ $shapes }
];
}
package AI::MXNet::DataBatch;
lib/AI/MXNet/IO.pm view on Meta::CPAN
my @idx = shuffle(0..$num_data-1);
$_->[1] = AI::MXNet::NDArray->array(pdl_shuffle($_->[1]->aspdl, \@idx)) for @$data;
$_->[1] = AI::MXNet::NDArray->array(pdl_shuffle($_->[1]->aspdl, \@idx)) for @$label;
}
if($self->last_batch_handle eq 'discard')
{
my $new_n = $num_data - $num_data % $self->batch_size - 1;
$_->[1] = $_->[1]->slice([0, $new_n]) for @$data;
$_->[1] = $_->[1]->slice([0, $new_n]) for @$label;
}
my $data_list = [map { $_->[1] } (@{ $data }, @{ $label })];
my $num_source = @{ $data_list };
my $cursor = -$self->batch_size;
$self->data($data);
$self->data_list($data_list);
$self->label($label);
$self->num_source($num_source);
$self->cursor($cursor);
$self->num_data($num_data);
}
# The name and shape of data provided by this iterator
method provide_data()
{
return [map {
my ($k, $v) = @{ $_ };
my $shape = $v->shape;
$shape->[0] = $self->batch_size;
AI::MXNet::DataDesc->new(name => $k, shape => $shape, dtype => $v->dtype)
} @{ $self->data }];
}
# The name and shape of label provided by this iterator
method provide_label()
{
return [map {
my ($k, $v) = @{ $_ };
my $shape = $v->shape;
$shape->[0] = $self->batch_size;
AI::MXNet::DataDesc->new(name => $k, shape => $shape, dtype => $v->dtype)
} @{ $self->label }];
}
# Ignore roll over data and set to start
method hard_reset()
{
lib/AI/MXNet/IO.pm view on Meta::CPAN
}
}
# Load data from underlying arrays, internal use only
method _getdata($data_source)
{
confess("DataIter needs reset.") unless $self->cursor < $self->num_data;
if(($self->cursor + $self->batch_size) <= $self->num_data)
{
return [
map {
$_->[1]->slice([$self->cursor,$self->cursor+$self->batch_size-1])
} @{ $data_source }
];
}
else
{
my $pad = $self->batch_size - $self->num_data + $self->cursor - 1;
return [
map {
AI::MXNet::NDArray->concatenate(
[
$_->[1]->slice([$self->cursor, -1]),
$_->[1]->slice([0, $pad])
]
)
} @{ $data_source }
];
}
}
lib/AI/MXNet/Image.pm view on Meta::CPAN
my ($label, $s) = $self->next_sample;
last if not defined $label;
my $data = [AI::MXNet::Image->imdecode($s)];
if(@{ $data->[0]->shape } == 0)
{
AI::MXNet::Logging->debug('Invalid image, skipping.');
next;
}
for my $aug (@{ $self->aug_list })
{
$data = [map { @{ $aug->($_) } } @$data];
}
for my $d (@$data)
{
assert(($i < $batch_size), 'Batch size must be multiples of augmenter output length');
$batch_data->at($i) .= AI::MXNet::NDArray->transpose($d, { axes=>[2, 0, 1] });
$batch_label->at($i) .= $label;
$i++;
}
}
return undef if not $i;
lib/AI/MXNet/Initializer.pm view on Meta::CPAN
use Mouse;
use AI::MXNet::Base qw(:DEFAULT pzeros pceil);
use AI::MXNet::NDArray;
use JSON::PP;
use overload "&{}" => sub { my $self = shift; sub { $self->call(@_) } },
'""' => sub {
my $self = shift;
my ($name) = ref($self) =~ /::(\w+)$/;
encode_json(
[lc $name,
$self->kwargs//{ map { $_ => "".$self->$_ } $self->meta->get_attribute_list }
]);
},
fallback => 1;
has 'kwargs' => (is => 'rw', init_arg => undef, isa => 'HashRef');
has '_verbose' => (is => 'rw', isa => 'Bool', lazy => 1, default => 0);
has '_print_func' => (is => 'rw', isa => 'CodeRef', lazy => 1,
default => sub {
return sub {
my $x = shift;
return ($x->norm/sqrt($x->size))->asscalar;
lib/AI/MXNet/Initializer.pm view on Meta::CPAN
patterns: array ref of str
array ref of regular expression patterns to match parameter names.
initializers: array ref of AI::MXNet::Initializer objects.
array ref of Initializers corresponding to the patterns.
=cut
package AI::MXNet::Mixed;
use Mouse;
extends 'AI::MXNet::Initializer';
has "map" => (is => "rw", init_arg => undef);
has "patterns" => (is => "ro", isa => 'ArrayRef[Str]');
has "initializers" => (is => "ro", isa => 'ArrayRef[AI::MXnet::Initializer]');
sub BUILD
{
my $self = shift;
confess("patterns count != initializers count")
unless (@{ $self->patterns } == @{ $self->initializers });
my %map;
@map{ @{ $self->patterns } } = @{ $self->initializers };
$self->map(\%map);
}
method call(Str $name, AI::MXNet::NDArray $arr)
{
for my $pattern (keys %{ $self->map })
{
if($name =~ /$pattern/)
{
&{$self->map->{$pattern}}($name, $arr);
return;
}
}
confess(
"Parameter name $name did not match any pattern. Consider"
."add a \".*\" pattern at the and with default Initializer."
);
}
package AI::MXNet::Zero;
lib/AI/MXNet/KVStore.pm view on Meta::CPAN
>>> $kv = mx->kv->create('local')
>>> $kv->init(3, mx->nd->ones($shape)*2)
>>> $a = mx->nd->zeros($shape)
>>> $kv->pull(3, out=>$a)
>>> print $a->aspdl
[[ 2 2 2]
[ 2 2 2]]
>>> # init a list of key-value pairs
>>> $keys = [5, 7, 9]
>>> $kv->init(keys, [map { mx->nd->ones($shape) } 0..@$keys-1])
=cut
method init(
Str|ArrayRef[Str] $key,
AI::MXNet::NDArray|ArrayRef[AI::MXNet::NDArray]|ArrayRef[ArrayRef[AI::MXNet::NDArray]] $value
)
{
my ($keys, $vals) = _key_value($key, $value);
check_call(
AI::MXNetCAPI::KVStoreInitEx(
lib/AI/MXNet/KVStore.pm view on Meta::CPAN
Examples
--------
>>> # push a single key-value pair
>>> $kv->push(3, mx->nd->ones($shape)*8)
>>> $kv->pull(3, out=>$a) # pull out the value
>>> print $a->aspdl()
[[ 8. 8. 8.]
[ 8. 8. 8.]]
>>> # aggregate the value and the push
>>> $gpus = [map { mx->gpu($_) } 0..3]
>>> $b = [map { mx->nd->ones($shape, ctx => $_) } @$gpus]
>>> $kv->push(3, $b)
>>> $kv->pull(3, out=>$a)
>>> print $a->aspdl
[[ 4. 4. 4.]
[ 4. 4. 4.]]
>>> # push a list of keys.
>>> # single device
>>> $kv->push($keys, [map { mx->nd->ones($shape) } 0..@$keys-1)
>>> $b = [map { mx->nd->zeros(shape) } 0..@$keys-1]
>>> $kv->pull($keys, out=>$b)
>>> print $b->[1]->aspdl
[[ 1. 1. 1.]
[ 1. 1. 1.]]
>>> # multiple devices:
>>> $b = [map { [map { mx->nd->ones($shape, ctx => $_) } @$gpus] } @$keys-1]
>>> $kv->push($keys, $b)
>>> $kv->pull($keys, out=>$b)
>>> print $b->[1][1]->aspdl()
[[ 4. 4. 4.]
[ 4. 4. 4.]]
=cut
method push(
Str|ArrayRef[Str] $key,
AI::MXNet::NDArray|ArrayRef[AI::MXNet::NDArray]|ArrayRef[ArrayRef[AI::MXNet::NDArray]] $value,
lib/AI/MXNet/KVStore.pm view on Meta::CPAN
Examples
--------
>>> # pull a single key-value pair
>>> $a = mx->nd->zeros($shape)
>>> $kv->pull(3, out=>$a)
>>> print $a->aspdl
[[ 2. 2. 2.]
[ 2. 2. 2.]]
>>> # pull into multiple devices
>>> $b = [map { mx->nd->ones($shape, $_) } @$gpus]
>>> $kv->pull(3, out=>$b)
>>> print $b->[1]->aspdl()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
>>> # pull a list of key-value pairs.
>>> # On single device
>>> $keys = [5, 7, 9]
>>> $b = [map { mx->nd->zeros($shape) } 0..@$keys-1]
>>> $kv->pull($keys, out=>$b)
>>> print $b->[1]->aspdl()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
>>> # On multiple devices
>>> $b = [map { [map { mx->nd->ones($shape, ctx => $_) } @$gpus ] } 0..@$keys-1]
>>> $kv->pull($keys, out=>$b)
>>> print $b->[1][1]->aspdl()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
=cut
method pull(
Str|ArrayRef[Str] $key,
AI::MXNet::NDArray|ArrayRef[AI::MXNet::NDArray]|ArrayRef[ArrayRef[AI::MXNet::NDArray]] :$out,
Int :$priority=0
lib/AI/MXNet/KVStore.pm view on Meta::CPAN
{
if(blessed $vals)
{
return ([$keys], [$vals->handle]);
}
else
{
for my $value (@{ $vals })
{
assert(blessed($value) and $value->isa('AI::MXNet::NDArray'));
return ([($keys)x@$vals], [map { $_->handle } @$vals]);
}
}
}
else
{
assert(not blessed($vals) and @$keys == @$vals);
my @c_keys;
my @c_vals;
zip(sub {
my ($key, $val) = @_;
lib/AI/MXNet/Metric.pm view on Meta::CPAN
{
return ($self->name, 'nan');
}
else
{
return ($self->name, $self->sum_metric / $self->num_inst);
}
}
else
{
my $names = [map { sprintf('%s_%d', $self->name, $_) } 0..$self->num-1];
my $values = [];
for (my $i = 0; $i < @{ $self->sum_metric }; $i++)
{
my ($x, $y) = ($self->sum_metric->[$i], $self->num_inst->[$i]);
if($y != 0)
{
push (@$values, $x/$y);
}
else
{
lib/AI/MXNet/Module.pm view on Meta::CPAN
if($num_device == 1 and $kvstore !~ /dist/)
{
# no need to use kv for single device and single machine
}
else
{
$kv = AI::MXNet::KVStore->create($kvstore);
if($kvstore eq 'local')
{
# automatically select a proper local
my $max_size = max(map { product(@{ $_->shape }) } values %{ $arg_params });
if($max_size > 1024 * 1024 * 16)
{
$update_on_kvstore = 0;
}
}
}
}
}
$update_on_kvstore = 0 if not $kv;
lib/AI/MXNet/Module.pm view on Meta::CPAN
{
$work_load_list = [(1)x@{$self->_p->_context}];
}
assert(@{ $work_load_list } == @{ $self->_p->_context });
$self->_p->_work_load_list($work_load_list);
my @data_names = @{ $self->_data_names//['data'] };
my @label_names = @{ $self->_label_names//['softmax_label'] };
my @state_names = @{ $self->state_names//[] };
my $arg_names = $self->_symbol->list_arguments;
my @input_names = (@data_names, @label_names, @state_names);
my %input_names = map { $_ => 1 } @input_names;
$self->_p->_param_names([grep { not exists $input_names{$_} } @{ $arg_names }]);
$self->_p->_fixed_param_names($self->fixed_param_names//[]);
$self->_p->_state_names(\@state_names);
$self->_p->_aux_names($self->_symbol->list_auxiliary_states);
$self->_p->_data_names(\@data_names);
$self->_p->_label_names(\@label_names);
$self->_p->_output_names($self->_symbol->list_outputs);
$self->_p->_params_dirty(0);
$self->_check_input_names($self->_symbol, $self->_p->_data_names, "data", 1);
$self->_check_input_names($self->_symbol, $self->_p->_label_names, "label", 0);
lib/AI/MXNet/Module.pm view on Meta::CPAN
elsif($self->params_initialized)
{
# if the parameters are already initialized, we are re-binding
# so automatically copy the already initialized params
$self->_p->_exec_group->set_params($self->_p->_arg_params, $self->_p->_aux_params);
}
else
{
assert(not defined $self->_p->_arg_params and not $self->_p->_aux_params);
my @param_arrays = (
map { AI::MXNet::NDArray->zeros($_->[0]->shape, dtype => $_->[0]->dtype) }
@{ $self->_p->_exec_group->_p->param_arrays }
);
my %arg_params;
@arg_params{ @{ $self->_p->_param_names } } = @param_arrays;
$self->_p->_arg_params(\%arg_params);
my @aux_arrays = (
map { AI::MXNet::NDArray->zeros($_->[0]->shape, dtype => $_->[0]->dtype) }
@{ $self->_p->_exec_group->_p->aux_arrays }
);
my %aux_params;
@aux_params{ @{ $self->_p->_aux_names } } = @aux_arrays;
$self->_p->_aux_params(\%aux_params);
}
if($shared_module and $shared_module->optimizer_initialized)
{
$self->borrow_optimizer($shared_module)
}
lib/AI/MXNet/Module.pm view on Meta::CPAN
{
my %idx2name;
if($update_on_kvstore)
{
@idx2name{ 0..@{$self->_p->_exec_group->param_names}-1 } = @{$self->_p->_exec_group->param_names};
}
else
{
for my $k (0..@{$self->_p->_context}-1)
{
@idx2name{ map { $_ + $k } 0..@{$self->_p->_exec_group->param_names}-1 } = @{$self->_p->_exec_group->param_names};
}
}
if(not exists $optimizer_params->{rescale_grad})
{
$optimizer_params->{rescale_grad} = $rescale_grad;
}
$optimizer = AI::MXNet::Optimizer->create(
$optimizer,
sym => $self->symbol,
param_idx2name => \%idx2name,
lib/AI/MXNet/Module.pm view on Meta::CPAN
$self->optimizer_initialized(1);
}
method forward(
AI::MXNet::DataBatch $data_batch,
Maybe[Bool] :$is_train=
)
{
assert($self->binded and $self->params_initialized);
my @curr_data_shapes = map { $_->shape } @{ $self->data_shapes };
my @new_data_shapes = map { $_->shape } @{ $data_batch->data };
if(Data::Dumper->Dump(\@curr_data_shapes) ne Data::Dumper->Dump(\@new_data_shapes))
{
my $new_dshape;
if($data_batch->can('provide_data') and $data_batch->provide_data)
{
$new_dshape = $data_batch->provide_data;
}
else
{
$new_dshape = [];
lib/AI/MXNet/Module/Base.pm view on Meta::CPAN
# Check that all input names are in symbol's argument
method _check_input_names(
AI::MXNet::Symbol $symbol,
ArrayRef[Str] $names,
Str $typename,
Bool $throw
)
{
my @candidates;
my %args = map {
push @candidates, $_ if not /_(?:weight|bias|gamma|beta)$/;
$_ => 1
} @{ $symbol->list_arguments };
for my $name (@$names)
{
my $msg;
if(not exists $args{$name} and $name ne 'softmax_label')
{
$msg = sprintf("\033[91mYou created Module with Module(..., %s_names=%s) but "
."input with name '%s' is not found in symbol.list_arguments(). "
lib/AI/MXNet/Module/Base.pm view on Meta::CPAN
# Check that input names matches input data descriptors
method _check_names_match(
ArrayRef[Str] $data_names,
ArrayRef[NameShapeOrDataDesc] $data_shapes,
Str $name,
Bool $throw
)
{
return if (not @$data_shapes and @$data_names == 1 and $data_names->[0] eq 'softmax_label');
my @actual = map { @{$_}[0] } @{ $data_shapes };
if("@$data_names" ne "@actual")
{
my $msg = sprintf(
"Data provided by %s_shapes don't match names specified by %s_names (%s vs. %s)",
$name, $name, "@$data_shapes", "@$data_names"
);
if($throw)
{
confess($msg);
}
lib/AI/MXNet/Module/Base.pm view on Meta::CPAN
}
}
method _parse_data_desc(
ArrayRef[Str] $data_names,
Maybe[ArrayRef[Str]] $label_names,
ArrayRef[NameShapeOrDataDesc] $data_shapes,
Maybe[ArrayRef[NameShapeOrDataDesc]] $label_shapes
)
{
$data_shapes = [map { blessed $_ ? $_ : AI::MXNet::DataDesc->new(@$_) } @$data_shapes];
$self->_check_names_match($data_names, $data_shapes, 'data', 1);
if($label_shapes)
{
$label_shapes = [map { blessed $_ ? $_ : AI::MXNet::DataDesc->new(@$_) } @$label_shapes];
$self->_check_names_match($label_names, $label_shapes, 'label', 0);
}
else
{
$self->_check_names_match($label_names, [], 'label', 0);
}
return ($data_shapes, $label_shapes);
}
=head1 DESCRIPTION
lib/AI/MXNet/Module/Base.pm view on Meta::CPAN
- data_shapes: am array ref of [name, shape]. In theory, since the memory is allocated,
we could directly provide the data arrays. But in the case of data parallelization,
the data arrays might not be of the same shape as viewed from the external world.
- label_shapes: an array ref of [name, shape]. This might be [] if the module does
not need labels (e.g. it does not contains a loss function at the top), or a module
is not binded for training.
- output_shapes: an array ref of [name, shape] for outputs of the module.
- parameters (for modules with parameters)
- get_params(): return an array ($arg_params, $aux_params). Each of those
is a hash ref of name to NDArray mapping. Those NDArrays always on
CPU. The actual parameters used for computing might be on other devices (GPUs),
this function will retrieve (a copy of) the latest parameters. Therefore, modifying
- get_params($arg_params, $aux_params): assign parameters to the devices
doing the computation.
- init_params(...): a more flexible interface to assign or initialize the parameters.
- setup
- bind(): prepare environment for computation.
- init_optimizer(): install optimizer for parameter updating.
lib/AI/MXNet/Module/Base.pm view on Meta::CPAN
$eval_data->reset;
}
my $nbatch = 0;
my @out;
while(my $eval_batch = <$eval_data>)
{
last if defined $num_batch and $nbatch == $num_batch;
$self->forward($eval_batch, is_train => 0);
my $pad = $eval_batch->pad;
my $outputs = [
map { $_->slice([0, $_->shape->[0] - ($pad//0) - 1]) } @{ $self->get_outputs() }
];
push @out, [$outputs, $nbatch, $eval_batch];
$nbatch++;
}
return @out;
}
=head2 predict
Run prediction and collect the outputs.
lib/AI/MXNet/Module/Base.pm view on Meta::CPAN
assert($self->binded and $self->params_initialized);
$eval_data->reset() if $reset;
my @output_list;
my $nbatch = 0;
while(my $eval_batch = <$eval_data>)
{
last if defined $num_batch and $nbatch == $num_batch;
$self->forward($eval_batch, is_train => 0);
my $pad = $eval_batch->pad;
my $outputs = [map { $_->slice([0, $_->shape->[0]-($pad//0)-1])->copy } @{ $self->get_outputs }];
push @output_list, $outputs;
}
return () unless @output_list;
if($merge_batches)
{
my $num_outputs = @{ $output_list[0] };
for my $out (@output_list)
{
unless(@{ $out } == $num_outputs)
{
confess('Cannot merge batches, as num of outputs is not the same '
.'in mini-batches. Maybe bucketing is used?');
}
}
my @output_list2;
for my $i (0..$num_outputs-1)
{
push @output_list2,
AI::MXNet::NDArray->concatenate([map { $_->[$i] } @output_list]);
}
if($num_outputs == 1 and not $always_output_list)
{
return $output_list2[0];
}
return @output_list2;
}
return @output_list;
}
lib/AI/MXNet/Module/Base.pm view on Meta::CPAN
# Parameters of a module
################################################################################
=head2 get_params
The parameters, these are potentially a copies of the the actual parameters used
to do computation on the device.
Returns
-------
($arg_params, $aux_params), a pair of hash refs of name to value mapping.
=cut
method get_params() { confess("NotImplemented") }
=head2 init_params
Initialize the parameters and auxiliary states.
Parameters
----------
lib/AI/MXNet/Module/Base.pm view on Meta::CPAN
confess("NotImplemented");
}
=head2 set_params
Assign parameter and aux state values.
Parameters
----------
$arg_params= : Maybe[HashRef[AI::MXNet::NDArray]]
Hash ref of name to value (NDArray) mapping.
$aux_params= : Maybe[HashRef[AI::MXNet::NDArray]]
Hash Ref of name to value (`NDArray`) mapping.
:$allow_missing=0 : Bool
If true, params could contain missing values, and the initializer will be
called to fill those missing params.
:$force_init=0 : Bool
If true, will force re-initialize even if already initialized.
:$allow_extra=0 : Bool
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
=cut
lib/AI/MXNet/Module/Bucketing.pm view on Meta::CPAN
my $pred = mx->sym->Reshape($outputs, shape => [-1, $num_hidden]);
$pred = mx->sym->FullyConnected(data => $pred, num_hidden => scalar(keys %$vocabulary), name => 'pred');
$label = mx->sym->Reshape($label, shape => [-1]);
$pred = mx->sym->SoftmaxOutput(data => $pred, label => $label, name => 'softmax');
return ($pred, ['data'], ['softmax_label']);
};
my $contexts;
if(defined $gpus)
{
$contexts = [map { mx->gpu($_) } split(/,/, $gpus)];
}
else
{
$contexts = mx->cpu(0);
}
my $model = mx->mod->BucketingModule(
sym_gen => $sym_gen,
default_bucket_key => $data_train->default_bucket_key,
context => $contexts
lib/AI/MXNet/NDArray.pm view on Meta::CPAN
my $shape = $self->shape;
my $dsize = @$shape;
my $isize = @slices;
confess("Dimensions size $dsize < slices size $isize")
if $dsize < $isize;
confess("Dimensions size $dsize != slices size $isize,
ndarray only supports either ->slice on dimension 0
or full crop")
if $isize > 1 and $dsize != $isize;
my $i = -1;
@slices = map {
++$i;
ref $_ ? (@$_ == 1 ? [$_->[0], $shape->[$i] - 1] : $_) : ($_ eq 'X' ? [0, $shape->[$i] - 1] : [$_, $_]);
} @slices;
zip(sub {
my ($slice, $dim_size) = @_;
my ($begin, $end, $stride) = @$slice;
confess("NDArray does not support slice strides != 1")
if ($stride//0) > 1;
confess("Dimension $i mismatch slice begin : $begin >= Dim Size: $dim_size")
if $begin >= $dim_size or ($begin + $dim_size) < 0;
lib/AI/MXNet/NDArray.pm view on Meta::CPAN
my $ndary_shape_str = join(',', @{ $self->shape });
if($pdl_shape_str ne $ndary_shape_str)
{
confess("Shape inconsistant: expected $ndary_shape_str vs got $pdl_shape_str")
}
my $perl_pack_type = DTYPE_MX_TO_PERL->{$dtype};
my $buf;
## special handling for float16
if($perl_pack_type eq 'S')
{
$buf = pack("S*", map { AI::MXNetCAPI::_float_to_half($_) } unpack ("f*", ${$source_array->get_dataref}));
}
else
{
$buf = ${$source_array->get_dataref};
}
check_call(AI::MXNetCAPI::NDArraySyncCopyFromCPU($self->handle, $buf, $self->size));
return $self;
}
=head2 aspdl
lib/AI/MXNet/NDArray.pm view on Meta::CPAN
{
my $dtype = $self->dtype;
my $pdl_type = PDL::Type->new(DTYPE_MX_TO_PDL->{ $dtype });
my $pdl = PDL->new_from_specification($pdl_type, reverse @{ $self->shape });
my $perl_pack_type = DTYPE_MX_TO_PERL->{$dtype};
my $buf = pack("$perl_pack_type*", (0)x$self->size);
check_call(AI::MXNetCAPI::NDArraySyncCopyToCPU($self->handle, $buf, $self->size));
## special handling for float16
if($perl_pack_type eq 'S')
{
$buf = pack("f*", map { AI::MXNetCAPI::_half_to_float($_) } unpack("S*", $buf));
}
${$pdl->get_dataref} = $buf;
$pdl->upd_data;
return $pdl;
}
=head2 asmpdl
Returns copied PDL::Matrix objectt of current array.
lib/AI/MXNet/NDArray.pm view on Meta::CPAN
{
my $dtype = $self->dtype;
my $pdl_type = PDL::Type->new(DTYPE_MX_TO_PDL->{ $dtype });
my $pdl = PDL::Matrix->new_from_specification($pdl_type, @{ $self->shape });
my $perl_pack_type = DTYPE_MX_TO_PERL->{$dtype};
my $buf = pack("$perl_pack_type*", (0)x$self->size);
check_call(AI::MXNetCAPI::NDArraySyncCopyToCPU($self->handle, $buf, $self->size));
## special handling for float16
if($perl_pack_type eq 'S')
{
$buf = pack("f*", map { AI::MXNetCAPI::_half_to_float($_) } unpack("S*", $buf));
}
${$pdl->get_dataref} = $buf;
$pdl->upd_data;
return $pdl;
}
=head2 _slice
Returns sliced NDArray that shares memory with the current one.
lib/AI/MXNet/NDArray.pm view on Meta::CPAN
Parameters
----------
new_shape : Shape
new shape of NDArray
=cut
method reshape(ArrayRef[Int] $new_shape)
{
my $i = -1;
my @inferred = map { $i++; $_ == -1 ? ($i) : () } @$new_shape;
assert((@inferred <= 1), 'Only one dimension can be inferred.');
if(@inferred)
{
$new_shape->[$inferred[0]] = product(@{ $self->shape })/product(map { abs($_) } @{ $new_shape });
}
my $handle = check_call(
AI::MXNetCAPI::NDArrayReshape(
$self->handle,
scalar(@$new_shape),
$new_shape
)
);
return __PACKAGE__->new(handle => $handle, writable => $self->writable);
}
lib/AI/MXNet/NDArray.pm view on Meta::CPAN
Broadcasting the current NDArray into the given shape.
Parameters
---------
Shape $shape : the shape to broadcast
=cut
method broadcast_to(Shape $shape)
{
my $cur_shape = $self->shape;
my $err_str = "operands could not be broadcast together with remapped shapes"
."[original->remapped]: [@$cur_shape] and requested shape [@$shape]";
if(@$shape < @$cur_shape)
{
confess($err_str);
}
@$cur_shape = ((1)x(@$shape - @$cur_shape), @$cur_shape);
my $cur_shape_arr = pdl($cur_shape);
my $broadcasting_axes = ($cur_shape_arr != pdl($shape))->which->unpdl;
if (grep { $cur_shape->[$_] != 1 } @$broadcasting_axes)
{
confess($err_str);
lib/AI/MXNet/NDArray.pm view on Meta::CPAN
}
=head2 size
Number of elements in the array.
=cut
method size(Shape|Undef $shape=)
{
my $size = 1;
map { $size *= $_ } @{ $shape//$self->shape };
return $size;
}
=head2 context
The context of the NDArray.
Returns
-------
lib/AI/MXNet/NDArray.pm view on Meta::CPAN
Returns
-------
$out : array ref of NDArrays or hash ref with NDArrays
=cut
method load(Str $filename)
{
my ($handles, $names) = check_call(AI::MXNetCAPI::NDArrayLoad($filename));
if (not @$names)
{
return [map { __PACKAGE__->new(handle => $_) } @$handles];
}
else
{
my $n = @$names;
my $h = @$handles;
confess("Handles [$h] and names [$n] count mismatch") unless $h == $n;
my %ret;
@ret{ @$names } = map { __PACKAGE__->new(handle => $_) } @$handles;
return \%ret;
}
}
=head2 save
Save array ref of NDArray or hash of str->NDArray to a binary file.
You can also use Storable to do the job if you only work with Perl.
The advantage of load/save is the file is language agnostic.
lib/AI/MXNet/NDArray.pm view on Meta::CPAN
if(ref $data eq 'HASH')
{
for my $name (keys %$data)
{
push @$names, $name;
push @$handles, $data->{ $name }->handle;
}
}
else
{
@$handles = map { $_->handle } @$data;
}
check_call(
AI::MXNetCAPI::NDArraySave(
$filename,
scalar(@$handles),
$handles,
$names
)
);
}
lib/AI/MXNet/NDArray.pm view on Meta::CPAN
1,
[$self->handle],
[defined $out_grad ? $out_grad->handle : undef],
$retain_graph
)
)
}
method CachedOp(@args) { AI::MXNet::CachedOp->new(@args) }
my $lvalue_methods = join "\n", map {"use attributes 'AI::MXNet::NDArray', \\&AI::MXNet::NDArray::$_, 'lvalue';"}
qw/at slice aspdl asmpdl reshape copy sever T astype as_in_context copyto empty zero ones full
array/;
eval << "EOV" if ($^V and $^V >= 5.006007);
{
no warnings qw(misc);
$lvalue_methods
}
EOV
__PACKAGE__->meta->make_immutable;
lib/AI/MXNet/NDArray/Base.pm view on Meta::CPAN
}
for my $key (keys %kwargs)
{
$kwargs{ $key } = "(" .join(", ", @{ $kwargs{ $key } }) .")"
if ref $kwargs{ $key } eq 'ARRAY';
}
my $out = check_call(AI::MXNetCAPI::ImperativeInvoke(
$handle,
scalar(@ndargs),
\@ndargs,
[map { $_->handle } @$output_vars],
scalar(keys %kwargs),
\%kwargs)
);
return $original_output if $original_output;
if(@$out == 1)
{
return $class->new(handle => $out->[0]);
}
else
{
return [map { $class->new(handle => $_) } @$out];
}
};
$function_meta{ $generic_ndarray_function }{__name__} = $func_name;
$function_meta{ $generic_ndarray_function }{__doc__} = $doc_str;
return $generic_ndarray_function;
}
method _init_ndarray_module()
{
my $op_names = check_call(AI::MXNetCAPI::ListAllOpNames());
lib/AI/MXNet/Optimizer.pm view on Meta::CPAN
*slice = *call;
method sync_state_context(Maybe[AI::MXNet::NDArray|ArrayRef[AI::MXNet::NDArray]] $state, AI::MXNet::Context $context)
{
if(blessed $state)
{
return $state->as_in_context($context);
}
elsif(ref $state)
{
return [map { $self->sync_state_context($_, $context) } @{ $state }];
}
return $state;
}
method set_states($states)
{
my $thawed_states = thaw($states);
$self->states($thawed_states);
%{ $self->states_synced } = map { $_ => 0 } keys %{ $thawed_states };
}
method get_states()
{
return freeze($self->states);
}
package AI::MXNet::Optimizer;
method get_updater(AI::MXNet::Optimizer $optimizer)
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
return $self->_params;
}
=head2 state_shape
shape(s) of states
=cut
method state_shape()
{
return [map { $_->{shape} } @{ $self->state_info }];
}
=head2 state_info
shape and layout information of states
=cut
method state_info()
{
confess("Not Implemented");
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
Str :$input_prefix='',
Str :$layout='NTC',
Maybe[Bool] :$merge_outputs=
)
{
$self->reset;
my $axis = index($layout, 'T');
if(not defined $inputs)
{
$inputs = [
map { AI::MXNet::Symbol->Variable("${input_prefix}t${_}_data") } (0..$length-1)
];
}
elsif(blessed($inputs))
{
assert(
(@{ $inputs->list_outputs() } == 1),
"unroll doesn't allow grouped symbol as input. Please "
."convert to list first or let unroll handle slicing"
);
$inputs = AI::MXNet::Symbol->SliceChannel(
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
{
my $output;
($output, $states) = &{$self}(
$inputs[$i],
$states
);
push @$outputs, $output;
}
if($merge_outputs)
{
@$outputs = map { AI::MXNet::Symbol->expand_dims($_, axis => $axis) } @$outputs;
$outputs = AI::MXNet::Symbol->Concat(@$outputs, dim => $axis);
}
return($outputs, $states);
}
method _get_activation($inputs, $activation, @kwargs)
{
if(not ref $activation)
{
return AI::MXNet::Symbol->Activation($inputs, act_type => $activation, @kwargs);
}
else
{
return &{$activation}($inputs, @kwargs);
}
}
method _cells_state_shape($cells)
{
return [map { @{ $_->state_shape } } @$cells];
}
method _cells_state_info($cells)
{
return [map { @{ $_->state_info } } @$cells];
}
method _cells_begin_state($cells, @kwargs)
{
return [map { @{ $_->begin_state(@kwargs) } } @$cells];
}
method _cells_unpack_weights($cells, $args)
{
$args = $_->unpack_weights($args) for @$cells;
return $args;
}
method _cells_pack_weights($cells, $args)
{
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
}
$self->_parameter($self->params->get('parameters', init => $self->initializer));
$self->_directions($self->_bidirectional ? [qw/l r/] : ['l']);
}
method state_info()
{
my $b = @{ $self->_directions };
my $n = $self->_mode eq 'lstm' ? 2 : 1;
return [map { +{ shape => [$b*$self->_num_layers, 0, $self->_num_hidden], __layout__ => 'LNC' } } 0..$n-1];
}
method _gate_names()
{
return {
rnn_relu => [''],
rnn_tanh => [''],
lstm => [qw/_i _f _c _o/],
gru => [qw/_r _z _o/]
}->{ $self->_mode };
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
method unpack_weights(HashRef[AI::MXNet::NDArray] $args)
{
my %args = %{ $args };
my $arr = delete $args{ $self->_parameter->name };
my $b = @{ $self->_directions };
my $m = $self->_num_gates;
my $h = $self->_num_hidden;
my $num_input = int(int(int($arr->size/$b)/$h)/$m) - ($self->_num_layers - 1)*($h+$b*$h+2) - $h - 2;
my %nargs = $self->_slice_weights($arr, $num_input, $self->_num_hidden);
%args = (%args, map { $_ => $nargs{$_}->copy } keys %nargs);
return \%args
}
method pack_weights(HashRef[AI::MXNet::NDArray] $args)
{
my %args = %{ $args };
my $b = @{ $self->_directions };
my $m = $self->_num_gates;
my @c = @{ $self->_gate_names };
my $h = $self->_num_hidden;
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
$inputs = AI::MXNet::Symbol->SwapAxis($inputs, dim1 => 0, dim2 => 1);
}
else
{
assert($axis == 0, "Unsupported layout $layout");
}
}
else
{
assert(@$inputs == $length);
$inputs = [map { AI::MXNet::Symbol->expand_dims($_, axis => 0) } @{ $inputs }];
$inputs = AI::MXNet::Symbol->Concat(@{ $inputs }, dim => 0);
}
$begin_state //= $self->begin_state;
my $states = $begin_state;
my @states = @{ $states };
my %states;
if($self->_mode eq 'lstm')
{
%states = (state => $states[0], state_cell => $states[1]);
}
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
my $p = 0;
for my $cell (@{ $self->_cells })
{
assert(not $cell->isa('AI::MXNet::BidirectionalCell'));
my $n = scalar(@{ $cell->state_info });
my $state = [@{ $states }[$p..$p+$n-1]];
$p += $n;
($inputs, $state) = &{$cell}($inputs, $state);
push @next_states, $state;
}
return ($inputs, [map { @$_} @next_states]);
}
method unroll(
Int $length,
Maybe[AI::MXNet::Symbol|ArrayRef[AI::MXNet::Symbol]] :$inputs=,
Maybe[AI::MXNet::Symbol|ArrayRef[AI::MXNet::Symbol]] :$begin_state=,
Str :$input_prefix='',
Str :$layout='NTC',
Maybe[Bool] :$merge_outputs=
)
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
($inputs, $states) = $cell->unroll(
$length,
inputs => $inputs,
input_prefix => $input_prefix,
begin_state => $states,
layout => $layout,
merge_outputs => ($i < $num_cells-1) ? undef : $merge_outputs
);
push @next_states, $states;
}, $self->_cells);
return ($inputs, [map { @{ $_ } } @next_states]);
}
package AI::MXNet::RNN::BidirectionalCell;
use Mouse;
use AI::MXNet::Base;
extends 'AI::MXNet::RNN::Cell::Base';
=head1 NAME
AI::MXNet::RNN::BidirectionalCell
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
Str :$input_prefix='',
Str :$layout='NTC',
Maybe[Bool] :$merge_outputs=
)
{
my $axis = index($layout, 'T');
if(not defined $inputs)
{
$inputs = [
map { AI::MXNet::Symbol->Variable("${input_prefix}t${_}_data") } (0..$length-1)
];
}
elsif(blessed($inputs))
{
assert(
(@{ $inputs->list_outputs() } == 1),
"unroll doesn't allow grouped symbol as input. Please "
."convert to list first or let unroll handle slicing"
);
$inputs = [ @{ AI::MXNet::Symbol->SliceChannel(
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
method call(AI::MXNet::Symbol $inputs, SymbolOrArrayOfSymbols $states)
{
my ($output, $states) = &{$self->base_cell}($inputs, $states);
if($self->dropout_outputs > 0)
{
$output = AI::MXNet::Symbol->Dropout(data => $output, p => $self->dropout_outputs);
}
if($self->dropout_states > 0)
{
$states = [map { AI::MXNet::Symbol->Dropout(data => $_, p => $self->dropout_states) } @{ $states }];
}
return ($output, $states);
}
package AI::MXNet::RNN::ZoneoutCell;
use Mouse;
use AI::MXNet::Base;
extends 'AI::MXNet::RNN::ModifierCell';
has [qw/zoneout_outputs zoneout_states/] => (is => 'ro', isa => 'Num', default => 0);
has 'prev_output' => (is => 'rw', init_arg => undef);
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
num_outputs => $length,
squeeze_axis => 1
) }];
}
}
else
{
assert(not defined $length or @$inputs == $length);
if($merge)
{
$inputs = [map { AI::MXNet::Symbol->expand_dims($_, axis=>$axis) } @{ $inputs }];
$inputs = AI::MXNet::Symbol->Concat(@{ $inputs }, dim=>$axis);
$in_axis = $axis;
}
}
if(blessed($inputs) and $axis != $in_axis)
{
$inputs = AI::MXNet::Symbol->swapaxes($inputs, dim0=>$axis, dim1=>$in_axis);
}
return ($inputs, $axis);
lib/AI/MXNet/RNN/IO.pm view on Meta::CPAN
AI::MXNet::RNN::IO - Functions for constructing recurrent neural networks.
=cut
=head1 DESCRIPTION
Functions for constructing recurrent neural networks.
=cut
=head2 encode_sentences
Encode sentences and (optionally) build a mapping
from string tokens to integer indices. Unknown keys
will be added to vocabulary.
Parameters
----------
$sentences : array ref of array refs of str
A array ref of sentences to encode. Each sentence
should be a array ref of string tokens.
:$vocab : undef or hash ref of str -> int
Optional input Vocabulary
lib/AI/MXNet/RNN/IO.pm view on Meta::CPAN
provide_data provide_label
idx curr_idx
/] => (is => 'rw', init_arg => undef);
sub BUILD
{
my $self = shift;
if(not defined $self->buckets)
{
my @buckets;
my $p = pdl([map { scalar(@$_) } @{ $self->sentences }]);
enumerate(sub {
my ($i, $j) = @_;
if($j >= $self->batch_size)
{
push @buckets, $i;
}
}, $p->histogram(1,0,$p->max+1)->unpdl);
$self->buckets(\@buckets);
}
@{ $self->buckets } = sort { $a <=> $b } @{ $self->buckets };
my $ndiscard = 0;
$self->data([map { [] } 0..@{ $self->buckets }-1]);
for my $i (0..@{$self->sentences}-1)
{
my $buck = bisect_left($self->buckets, scalar(@{ $self->sentences->[$i] }));
if($buck == @{ $self->buckets })
{
$ndiscard += 1;
next;
}
my $buff = AI::MXNet::NDArray->full(
[$self->buckets->[$buck]],
$self->invalid_label,
dtype => $self->dtype
)->aspdl;
$buff->slice([0, @{ $self->sentences->[$i] }-1]) .= pdl($self->sentences->[$i]);
push @{ $self->data->[$buck] }, $buff;
}
$self->data([map { pdl(PDL::Type->new(DTYPE_MX_TO_PDL->{$self->dtype}), $_) } @{$self->data}]);
AI::MXNet::Logging->warning("discarded $ndiscard sentences longer than the largest bucket.")
if $ndiscard;
$self->nddata([]);
$self->ndlabel([]);
$self->major_axis(index($self->layout, 'N'));
$self->default_bucket_key(max(@{ $self->buckets }));
my $shape;
if($self->major_axis == 0)
{
$shape = [$self->batch_size, $self->default_bucket_key];
lib/AI/MXNet/RecordIO.pm view on Meta::CPAN
around BUILDARGS => sub {
my $orig = shift;
my $class = shift;
if(@_ == 4)
{
return $class->$orig(flag => $_[0], label => $_[1], id => $_[2], id2 => $_[3]);
}
return $class->$orig(@_);
};
my @order = qw/flag label id id2/;
use overload '@{}' => sub { my $self = shift; [map { $self->$_ } @order] };
package AI::MXNet::RecordIO;
=head2 unpack
unpack a MXImageRecord to a string
Parameters
----------
s : str
lib/AI/MXNet/Rtc.pm view on Meta::CPAN
{
confess("grid_dims must be size of 3")
unless @{ $grid_dims } == 3;
confess("block_dims must be size of 3")
unless @{ $block_dims } == 3;
check_call(
AI::MXNetCAPI::RtcPush(
$self->handle,
scalar(@$inputs),
scalar(@$outputs),
[map { $_->handle } @$inputs],
[map { $_->handle } @$outputs],
@{ $grid_dims },
@{ $block_dims }
)
);
}
1;
lib/AI/MXNet/Symbol.pm view on Meta::CPAN
'/=' => \&idivide,
'**' => \&power,
'%' => \&mod,
'==' => \&equal,
'!=' => \¬_equal,
'>' => \&greater,
'>=' => \&greater_equal,
'<' => \&lesser,
'<=' => \&lesser_equal,
'&{}' => sub { my $self = shift; sub { $self->call(@_) } },
'@{}' => sub { my $self = shift; [map { $self->slice($_) } @{ $self->list_outputs }] };
extends 'AI::MXNet::Symbol::Base';
has 'handle' => (is => 'rw', isa => 'SymbolHandle', required => 1);
sub DEMOLISH
{
check_call(AI::NNVMCAPI::SymbolFree(shift->handle));
}
method STORABLE_freeze($cloning)
lib/AI/MXNet/Symbol.pm view on Meta::CPAN
return $success ? $attr : undef;
}
=head2 list_attr
Get all attributes from the symbol.
Returns
-------
ret : hash ref of str to str
a dicitonary mapping attribute keys to values
=cut
method list_attr()
{
my %ret;
my @attrs = @{ check_call(AI::MXNetCAPI::SymbolListAttrShallow($self->handle)) };
while(@attrs)
{
my $k = shift(@attrs);
my $v = shift(@attrs);
lib/AI/MXNet/Symbol.pm view on Meta::CPAN
}
=head2 attr_dict
Recursively get all attributes from the symbol and its childrens
Returns
-------
ret : hash ref of str to hash ref.
Returns a dict whose keys are names of the symbol and its children.
Values of the returned dict are dictionaries that map attribute keys to values.
=cut
method attr_dict()
{
my %ret;
my @attrs = @{ check_call(AI::MXNetCAPI::SymbolListAttr($self->handle)) };
my $size = @attrs/2;
for (my $i = 0; $i < $size; $i++)
{
my ($name, $key) = split(/\$/, $attrs[$i*2]);
lib/AI/MXNet/Symbol.pm view on Meta::CPAN
=cut
method infer_type(Str|Undef @args)
{
my ($positional_arguments, $kwargs, $kwargs_order) = _parse_arguments("Dtype", @args);
my $sdata = [];
my $keys = [];
if(@$positional_arguments)
{
@{ $sdata } = map { defined($_) ? DTYPE_STR_TO_MX->{ $_ } : -1 } @{ $positional_arguments };
}
else
{
@{ $keys } = @{ $kwargs_order };
@{ $sdata } = map { DTYPE_STR_TO_MX->{ $_ } } @{ $kwargs }{ @{ $kwargs_order } };
}
my ($arg_type, $out_type, $aux_type, $complete) = check_call(AI::MXNetCAPI::SymbolInferType(
$self->handle,
scalar(@{ $sdata }),
$keys,
$sdata
)
);
if($complete)
{
return (
[ map { DTYPE_MX_TO_STR->{ $_ } } @{ $arg_type }],
[ map { DTYPE_MX_TO_STR->{ $_ } } @{ $out_type }],
[ map { DTYPE_MX_TO_STR->{ $_ } } @{ $aux_type }]
);
}
else
{
return (undef, undef, undef);
}
}
=head2 infer_shape
lib/AI/MXNet/Symbol.pm view on Meta::CPAN
HashRef[AI::MXNet::NDArray]|ArrayRef[AI::MXNet::NDArray] $args,
ArrayRef[Str] $arg_names,
Bool $allow_missing=0
)
{
my ($arg_handles, $arg_arrays) = ([], []);
if(ref $args eq 'ARRAY')
{
confess("Length of $arg_key do not match number of arguments")
unless @$args == @$arg_names;
@{ $arg_handles } = map { $_->handle } @{ $args };
$arg_arrays = $args;
}
else
{
my %tmp = ((map { $_ => undef } @$arg_names), %$args);
if(not $allow_missing and grep { not defined } values %tmp)
{
my ($missing) = grep { not defined $tmp{ $_ } } (keys %tmp);
confess("key $missing is missing in $arg_key");
}
for my $name (@$arg_names)
{
push @$arg_handles, defined($tmp{ $name }) ? $tmp{ $name }->handle : undef;
push @$arg_arrays, defined($tmp{ $name }) ? $tmp{ $name } : undef;
}
lib/AI/MXNet/Symbol.pm view on Meta::CPAN
The device context the generated executor to run on.
:$grad_req: string
{'write', 'add', 'null'}, or list of str or dict of str to str, optional
Specifies how we should update the gradient to the args_grad.
- 'write' means everytime gradient is write to specified args_grad NDArray.
- 'add' means everytime gradient is add to the specified NDArray.
- 'null' means no action is taken, the gradient may not be calculated.
:$type_dict : hash ref of str->Dtype
Input type map, name->dtype
:$group2ctx : hash ref of string to AI::MXNet::Context
The mapping of the ctx_group attribute to the context assignment.
:$shapes : hash ref of str->Shape
Input shape map, name->shape
:$shared_arg_names : Maybe[ArrayRef[Str]]
The argument names whose 'NDArray' of shared_exec can be reused for initializing
the current executor.
:$shared_exec : Maybe[AI::MXNet::Executor]
The executor whose arg_arrays, arg_arrays, grad_arrays, and aux_arrays can be
reused for initializing the current executor.
:$shared_buffer : Maybe[HashRef[AI::MXNet::NDArray]]
The dict mapping argument names to the `NDArray` that can be reused for initializing
the current executor. This buffer will be checked for reuse if one argument name
of the current executor is not found in `shared_arg_names`.
Returns
-------
$executor : AI::MXNet::Executor
The generated Executor
=cut
method simple_bind(
lib/AI/MXNet/Symbol.pm view on Meta::CPAN
{
assert((keys %{ $grad_req } != 0), 'grad_req in simple_bind cannot be an empty hash');
while(my ($k, $v) = each %{ $grad_req })
{
push @provided_grad_req_names, $k;
push @provided_grad_req_types, $v;
}
$provided_req_type_list_len = @provided_grad_req_types;
}
}
my $num_ctx_map_keys = 0;
my @ctx_map_keys;
my @ctx_map_dev_types;
my @ctx_map_dev_ids;
if(defined $group2ctx)
{
while(my ($k, $v) = each %{ $group2ctx })
{
push @ctx_map_keys, $k;
push @ctx_map_dev_types, $v->device_type_id;
push @ctx_map_dev_ids, $v->device_id;
}
$num_ctx_map_keys = @ctx_map_keys;
}
my @shared_arg_name_list;
if(defined $shared_arg_names)
{
@shared_arg_name_list = @{ $shared_arg_names };
}
my %shared_data;
if(defined $shared_buffer)
{
lib/AI/MXNet/Symbol.pm view on Meta::CPAN
$exe_handle
);
eval {
($updated_shared_data, $in_arg_handles, $arg_grad_handles, $aux_state_handles, $exe_handle)
=
check_call(
AI::MXNetCAPI::ExecutorSimpleBind(
$self->handle,
$ctx->device_type_id,
$ctx->device_id,
$num_ctx_map_keys,
\@ctx_map_keys,
\@ctx_map_dev_types,
\@ctx_map_dev_ids,
$provided_req_type_list_len,
\@provided_grad_req_names,
\@provided_grad_req_types,
scalar(@provided_arg_shape_names),
\@provided_arg_shape_names,
\@provided_arg_shape_data,
\@provided_arg_shape_idx,
$num_provided_arg_types,
\@provided_arg_type_names,
\@provided_arg_type_data,
lib/AI/MXNet/Symbol.pm view on Meta::CPAN
)->Purity(1)->Deepcopy(1)->Terse(1)->Dump
);
}
if(defined $shared_buffer)
{
while(my ($k, $v) = each %{ $updated_shared_data })
{
$shared_buffer->{$k} = AI::MXNet::NDArray->new(handle => $v);
}
}
my @arg_arrays = map { AI::MXNet::NDArray->new(handle => $_) } @{ $in_arg_handles };
my @grad_arrays = map { defined $_ ? AI::MXNet::NDArray->new(handle => $_) : undef } @{ $arg_grad_handles };
my @aux_arrays = map { AI::MXNet::NDArray->new(handle => $_) } @{ $aux_state_handles };
my $executor = AI::MXNet::Executor->new(
handle => $exe_handle,
symbol => $self,
ctx => $ctx,
grad_req => $grad_req,
group2ctx => $group2ctx
);
$executor->arg_arrays(\@arg_arrays);
$executor->grad_arrays(\@grad_arrays);
$executor->aux_arrays(\@aux_arrays);
lib/AI/MXNet/Symbol.pm view on Meta::CPAN
Bind current symbol to get an executor.
Parameters
----------
:$ctx : AI::MXNet::Context
The device context the generated executor to run on.
:$args : HashRef[AI::MXNet::NDArray]|ArrayRef[AI::MXNet::NDArray]
Input arguments to the symbol.
- If type is array ref of NDArray, the position is in the same order of list_arguments.
- If type is hash ref of str to NDArray, then it maps the name of arguments
to the corresponding NDArray.
- In either case, all the arguments must be provided.
:$args_grad : Maybe[HashRef[AI::MXNet::NDArray]|ArrayRef[AI::MXNet::NDArray]]
When specified, args_grad provide NDArrays to hold
the result of gradient value in backward.
- If type is array ref of NDArray, the position is in the same order of list_arguments.
- If type is hash ref of str to NDArray, then it maps the name of arguments
to the corresponding NDArray.
- When the type is hash ref of str to NDArray, users only need to provide the dict
for needed argument gradient.
Only the specified argument gradient will be calculated.
:$grad_req : {'write', 'add', 'null'}, or array ref of str or hash ref of str to str, optional
Specifies how we should update the gradient to the args_grad.
- 'write' means everytime gradient is write to specified args_grad NDArray.
- 'add' means everytime gradient is add to the specified NDArray.
- 'null' means no action is taken, the gradient may not be calculated.
:$aux_states : array ref of NDArray, or hash ref of str to NDArray, optional
Input auxiliary states to the symbol, only need to specify when
list_auxiliary_states is not empty.
- If type is array ref of NDArray, the position is in the same order of list_auxiliary_states
- If type is hash ref of str to NDArray, then it maps the name of auxiliary_states
to the corresponding NDArray,
- In either case, all the auxiliary_states need to be provided.
:$group2ctx : hash ref of string to AI::MXNet::Context
The mapping of the ctx_group attribute to the context assignment.
:$shared_exec : AI::MXNet::Executor
Executor to share memory with. This is intended for runtime reshaping, variable length
sequences, etc. The returned executor shares state with shared_exec, and should not be
used in parallel with it.
Returns
-------
$executor : AI::MXNet::Executor
The generated Executor
lib/AI/MXNet/Symbol.pm view on Meta::CPAN
if(not defined $aux_states)
{
$aux_states = [];
}
($aux_args_handle, $aux_states) = $self->_get_ndarray_inputs(
'aux_states', $aux_states, $self->list_auxiliary_states()
);
# setup requirements
my $req_map = { null => 0, write => 1, add => 3 };
my $req_array = [];
if(not ref $grad_req)
{
confess('grad_req must be one of "null,write,add"')
unless exists $req_map->{ $grad_req };
@{ $req_array } = (($req_map->{ $grad_req }) x @{ $listed_arguments });
}
elsif(ref $grad_req eq 'ARRAY')
{
@{ $req_array } = map { $req_map->{ $_ } } @{ $grad_req };
}
else
{
for my $name (@{ $listed_arguments })
{
if(exists $grad_req->{ $name })
{
push @{ $req_array }, $req_map->{ $grad_req->{ $name } };
}
else
{
push @{ $req_array }, 0;
}
}
}
my $ctx_map_keys = [];
my $ctx_map_dev_types = [];
my $ctx_map_dev_ids = [];
if(defined $group2ctx)
{
while(my ($key, $val) = each %{ $group2ctx })
{
push @{ $ctx_map_keys } , $key;
push @{ $ctx_map_dev_types }, $val->device_type_id;
push @{ $ctx_map_dev_ids }, $val->device_id;
}
}
my $shared_handle = $shared_exec->handle if $shared_exec;
my $handle = check_call(AI::MXNetCAPI::ExecutorBindEX(
$self->handle,
$ctx->device_type_id,
$ctx->device_id,
scalar(@{ $ctx_map_keys }),
$ctx_map_keys,
$ctx_map_dev_types,
$ctx_map_dev_ids,
scalar(@{ $args }),
$args_handle,
$args_grad_handle,
$req_array,
scalar(@{ $aux_states }),
$aux_args_handle,
$shared_handle
)
);
my $executor = AI::MXNet::Executor->new(
lib/AI/MXNet/Symbol.pm view on Meta::CPAN
Parameters
----------
:$ctx : Context
The device context the generated executor to run on.
Optional, defaults to cpu(0)
:$args array ref of NDArray or hash ref of NDArray
- If the type is an array ref of NDArray, the position is in the same order of list_arguments.
- If the type is a hash of str to NDArray, then it maps the name of the argument
to the corresponding NDArray.
- In either case, all arguments must be provided.
Returns
----------
result : an array ref of NDArrays corresponding to the values
taken by each symbol when evaluated on given args.
When called on a single symbol (not a group),
the result will be an array ref with one element.
lib/AI/MXNet/Symbol.pm view on Meta::CPAN
List of symbols to be grouped.
Returns
-------
sym : Symbol
The created group symbol.
=cut
method Group(ArrayRef[AI::MXNet::Symbol] $symbols)
{
my @handles = map { $_->handle } @{ $symbols };
my $handle = check_call(AI::MXNetCAPI::SymbolCreateGroup(scalar(@handles), \@handles));
return __PACKAGE__->new(handle => $handle);
}
=head2 load
Load symbol from a JSON file.
You can also use Storable to do the job if you only work with Perl.
The advantage of load/save is the file is language agnostic.
lib/AI/MXNet/Symbol/Base.pm view on Meta::CPAN
}
my $num_args = scalar(@args) + scalar(keys %kwargs);
my $keys = [];
my $args = [];
for my $key (keys %kwargs)
{
push @$keys, $key;
push @$args, $kwargs{ $key }->handle;
}
@$args = map { $_->handle } @args if @args;
check_call(
AI::NNVMCAPI::SymbolCompose(
$self->handle, $name, $num_args, $keys, $args
)
);
}
# Create an atomic symbol function by handle and funciton name
func _make_atomic_symbol_function($handle, $name)
{
lib/AI/MXNet/TestUtils.pm view on Meta::CPAN
while(my ($name, $arr) = each %arg_dict)
{
$arr .= $arg_params->{$name};
}
%aux_dict = %{ $exe->aux_dict };
while(my ($name, $arr) = each %aux_dict)
{
$arr .= $aux_params->{$name};
}
}
my @dtypes = map { $_->outputs->[0]->dtype } @exe_list;
my $max_idx = pdl(map { $dtypes{$_} } @dtypes)->maximum_ind;
my $gt = $ground_truth;
if(not defined $gt)
{
$gt = { %{ $exe_list[$max_idx]->output_dict } };
if($grad_req ne 'null')
{
%{$gt} = (%{$gt}, %{ $exe_list[$max_idx]->grad_dict });
}
}
lib/AI/MXNet/TestUtils.pm view on Meta::CPAN
}
return $gt;
}
sub zip
{
my ($sub, @arrays) = @_;
my $len = @{ $arrays[0] };
for (my $i = 0; $i < $len; $i++)
{
$sub->(map { $_->[$i] } @arrays);
}
}
sub enumerate
{
my ($sub, @arrays) = @_;
my $len = @{ $arrays[0] };
zip($sub, [0..$len-1], @arrays);
}
lib/AI/MXNet/Visualization.pm view on Meta::CPAN
{
$show_shape = 1;
my $interals = $symbol->get_internals;
my (undef, $out_shapes, undef) = $interals->infer_shape(%{ $shape });
Carp::confess("Input shape is incomplete")
unless defined $out_shapes;
@shape_dict{ @{ $interals->list_outputs } } = @{ $out_shapes };
}
my $conf = decode_json($symbol->tojson);
my $nodes = $conf->{nodes};
my %heads = map { $_ => 1 } @{ $conf->{heads}[0] };
if($positions->[-1] <= 1)
{
$positions = [map { int($line_length * $_) } @{ $positions }];
}
# header names for the different log elements
my $to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Previous Layer'];
my $print_row = sub { my ($fields, $positions) = @_;
my $line = '';
enumerate(sub {
my ($i, $field) = @_;
$line .= $field//'';
$line = substr($line, 0, $positions->[$i]);
$line .= ' ' x ($positions->[$i] - length($line));
lib/AI/MXNet/Visualization.pm view on Meta::CPAN
my $nodes = $conf->{nodes};
my %node_attr = (
qw/ shape box fixedsize true
width 1.3 height 0.8034 style filled/,
%{ $node_attrs }
);
my $dot = AI::MXNet::Visualization::PythonGraphviz->new(
graph => GraphViz->new(name => $title),
format => $save_format
);
# color map
my @cm = (
"#8dd3c7", "#fb8072", "#ffffb3", "#bebada", "#80b1d3",
"#fdb462", "#b3de69", "#fccde5"
);
# make nodes
my %hidden_nodes;
for my $node (@{ $nodes })
{
my $op = $node->{op};
my $name = $node->{name};
lib/AI/MXNet/Visualization.pm view on Meta::CPAN
}
return $dot;
}
package AI::MXNet::Visualization::PythonGraphviz;
use Mouse;
use AI::MXNet::Types;
has 'format' => (
is => 'ro',
isa => enum([qw/debug canon text ps hpgl pcl mif
pic gd gd2 gif jpeg png wbmp cmapx
imap vdx vrml vtx mp fig svg svgz
plain/]
)
);
has 'graph' => (is => 'ro', isa => 'GraphViz');
method render($output=)
{
my $method = 'as_' . $self->format;
return $self->graph->$method($output);
}
t/test_kvstore.t view on Meta::CPAN
my $shape = [4, 4];
my $keys = [5,7,9];
sub init_kv
{
# init kv
my $kv = mx->kv->create();
# single
$kv->init(3, mx->nd->zeros($shape));
# list
$kv->init($keys, [map { mx->nd->zeros($shape) } 0..@$keys-1]);
return $kv;
}
sub check_diff_to_scalar
{
# assert A == x
my ($A, $x) = @_;
ok(($A - $x)->aspdl->abs->sum == 0);
}
t/test_kvstore.t view on Meta::CPAN
$kv->init(3, mx->nd->ones($shape)*4);
my $a = mx->nd->zeros($shape);
$kv->pull(3, out=>$a);
check_diff_to_scalar($a, 4);
}
sub test_list_kv_pair
{
# list key-value pair push & pull
my $kv = init_kv();
$kv->push($keys, [map {mx->nd->ones($shape)*4} 0..@$keys-1]);
my $val = [map { mx->nd->empty($shape) } 0..@$keys-1];
$kv->pull($keys, out => $val);
for my $v (@$val)
{
check_diff_to_scalar($v, 4);
}
}
sub test_aggregator
{
# aggregate value on muliple devices
my $kv = init_kv();
# devices
my $num_devs = 4;
my $devs = [map { mx->cpu($_) } 0..$num_devs-1];
# single
my $vals = [map { mx->nd->ones($shape, ctx => $_) } @$devs];
$kv->push(3, $vals);
$kv->pull(3, out => $vals);
for my $v (@$vals)
{
check_diff_to_scalar($v, $num_devs);
}
# list
$vals = [map { [map { mx->nd->ones($shape, ctx => $_)*2 } @$devs] } 0..@$keys-1];
$kv->push($keys, $vals);
$kv->pull($keys, out => $vals);
for my $vv (@{ $vals })
{
for my $v (@{ $vv })
{
check_diff_to_scalar($v, $num_devs * 2);
}
}
t/test_kvstore.t view on Meta::CPAN
sub test_updater
{
my ($dev) = @_;
$dev //= 'cpu';
my $kv = init_kv();
$kv->_set_updater(\&updater);
# devices
my $num_devs = 4;
my $devs = [map { mx->$dev($_) } 0..$num_devs-1];
# single
my $vals = [map { mx->nd->ones($shape, ctx => $_) } @$devs];
$kv->push(3, $vals);
$kv->pull(3, out => $vals);
for my $v (@$vals)
{
check_diff_to_scalar($v, $num_devs);
}
# list
$vals = [map { [map { mx->nd->ones($shape, ctx => $_) } @$devs] } 0..@$keys-1];
my $num_push = 10;
for my $i (0..$num_push-1)
{
$kv->push($keys, $vals);
}
$kv->pull($keys, out => $vals);
for my $vv (@{ $vals })
t/test_model_parallel.t view on Meta::CPAN
my $exec1 = $net->bind(
ctx => $ctx1,
args => $arr,
args_grad => $arr_grad,
group2ctx => { dev1 => $ctx1, dev2 => $ctx2 }
);
$arr->[0] .= 1;
$arr->[1] .= 2;
$arr->[2] .= 3;
my $arr2 = [map { $_->copyto($ctx1) } @$arr];
my $arr_grad2 = [map { $_->copyto($ctx1) } @$arr_grad];
my $exec2 = $net->bind(
ctx => $ctx1,
args => $arr2,
args_grad => $arr_grad2
);
$exec1->forward(1);
$exec2->forward(1);
ok(reldiff($exec1->outputs->[0]->aspdl, $exec2->outputs->[0]->aspdl) < 1e-6);
my $out_grad = mx->nd->empty($shape, ctx => $ctx1);