view release on metacpan or search on metacpan
lib/AI/MXNet/CachedOp.pm view on Meta::CPAN
%kwargs = @_;
}
my $out = delete $kwargs{out};
if(%kwargs)
{
confess(
"AI::MXNet::CachedOp::call got unexpected keyword argument(s): ".
join(', ', keys %kwargs)
);
}
my $original_output;
if(defined $out)
{
$original_output = $out;
if(blessed($out))
{
$out = [$out];
}
}
else
{
$out = [];
}
my $output = check_call(
AI::MXNetCAPI::InvokeCachedOp(
$self->handle,
scalar(@args),
[map { $_->handle } @args],
[map { $_->handle } @$out]
)
);
return $original_output if defined $original_output;
if(@$output == 1)
{
return AI::MXNet::NDArray->new(handle => $output->[0]);
}
else
{
return [map { AI::MXNet::NDArray->new(handle => $_) } @$output];
}
}
lib/AI/MXNet/Executor.pm view on Meta::CPAN
The returned executor shares state with the current one,
and cannot be used in parallel with it.
Parameters
----------
$kwargs : HashRef[Shape]
new shape for arguments.
:$partial_shaping : bool
Whether to allow changing the shape of unspecified arguments.
:$allow_up_sizing : bool
Whether to allow allocating new ndarrays that's larger than the original.
Returns
-------
$exec : AI::MXNet::Executor
A new executor that shares memory with self.
=cut
method reshape(HashRef[Shape] $kwargs, Int :$partial_shaping=0, Int :$allow_up_sizing=0)
{
lib/AI/MXNet/Executor.pm view on Meta::CPAN
$partial_shaping
or
exists $kwargs->{ $name }
or
join(',', @{ $new_shape }) eq join(',', @{ $arr->shape })
)
{
if(AI::MXNet::NDArray->size($new_shape) > $arr->size)
{
confess(
"New shape of arg:$name larger than original. "
."First making a big executor and then down sizing it "
."is more efficient than the reverse."
."If you really want to up size, set \$allow_up_sizing=1 "
."to enable allocation of new arrays."
) unless $allow_up_sizing;
$new_arg_dict{ $name } = AI::MXNet::NDArray->empty(
$new_shape,
ctx => $arr->context,
dtype => $arr->dtype
);
lib/AI/MXNet/Executor.pm view on Meta::CPAN
$i = 0;
for my $name (@{ $self->_symbol->list_auxiliary_states() })
{
my $new_shape = $aux_shapes->[$i];
my $arr = $self->aux_arrays->[$i];
if($partial_shaping or join(',', @{ $new_shape }) eq join (',', @{ $arr->shape }))
{
if(AI::MXNet::NDArray->size($new_shape) > $arr->size)
{
confess(
"New shape of arg:$name larger than original. "
."First making a big executor and then down sizing it "
."is more efficient than the reverse."
."If you really want to up size, set \$allow_up_sizing=1 "
."to enable allocation of new arrays."
) unless $allow_up_sizing;
$new_aux_dict{ $name } = AI::MXNet::NDArray->empty(
$new_shape,
ctx => $arr->context,
dtype => $arr->dtype
);
lib/AI/MXNet/Executor/Group.pm view on Meta::CPAN
return $executor;
}
=head2 _sliced_shape
Get the sliced shapes for the i-th executor.
Parameters
----------
shapes : array ref of (str, array ref)
The original (name, shape) pairs.
i : int
Which executor we are dealing with.
=cut
method _sliced_shape(ArrayRef[AI::MXNet::DataDesc] $shapes, Int $i, ArrayRef[Int] $major_axis)
{
my @sliced_shapes;
zip(sub {
my ($desc, $axis) = @_;
my @shape = @{ $desc->shape };
lib/AI/MXNet/Module/Bucketing.pm view on Meta::CPAN
has '_work_load_list' => (is => 'rw', init_arg => 'work_load_list', isa => 'ArrayRef[Num]');
has '_curr_module' => (is => 'rw', init_arg => undef);
has '_curr_bucket_key' => (is => 'rw', init_arg => undef);
has '_buckets' => (is => 'rw', init_arg => undef, default => sub { +{} });
has '_fixed_param_names' => (is => 'rw', isa => 'ArrayRef[Str]', init_arg => 'fixed_param_names');
has '_state_names' => (is => 'rw', isa => 'ArrayRef[Str]', init_arg => 'state_names');
has '_params_dirty' => (is => 'rw', init_arg => undef);
sub BUILD
{
my ($self, $original_params) = @_;
$self->_fixed_param_names([]) unless defined $original_params->{fixed_param_names};
$self->_state_names([]) unless defined $original_params->{state_names};
$self->_params_dirty(0);
my ($symbol, $data_names, $label_names) = &{$self->_sym_gen}($self->_default_bucket_key);
$self->_check_input_names($symbol, $data_names//[], "data", 1);
$self->_check_input_names($symbol, $label_names//[], "label", 0);
$self->_check_input_names($symbol, $self->_state_names, "state", 1);
$self->_check_input_names($symbol, $self->_fixed_param_names, "fixed_param", 1);
}
method _reset_bind()
{
lib/AI/MXNet/Module/Bucketing.pm view on Meta::CPAN
$mod->borrow_optimizer($self->_curr_module);
}
}
$self->optimizer_initialized(1);
}
method prepare(AI::MXNet::DataBatch $data_batch)
{
assert($self->binded and $self->params_initialized);
## perform bind if have not done so yet
my $original_bucket_key = $self->_curr_bucket_key;
$self->switch_bucket(
bucket_key => $data_batch->bucket_key,
data_shapes => $data_batch->provide_data,
label_shapes => $data_batch->provide_label
);
# switch back
$self->switch_bucket(bucket_key => $original_bucket_key);
}
method forward(
AI::MXNet::DataBatch $data_batch,
Bool :$is_train=
)
{
assert($self->binded and $self->params_initialized);
$self->switch_bucket(
bucket_key => $data_batch->bucket_key,
lib/AI/MXNet/NDArray.pm view on Meta::CPAN
=cut
method ndim()
{
scalar(@{ $self->shape });
}
=head2 moveaxis
Moves the 'source' axis into the 'destination' position
while leaving the other axes in their original order
Parameters
----------
source : int
Original position of the axes to move.
destination : int
Destination position for each of the original axes.
Returns
-------
result :NDArray
Array with moved axes.
Examples
--------
> $X = mx->nd->array([[1, 2, 3],
[4, 5, 6]]);
lib/AI/MXNet/NDArray.pm view on Meta::CPAN
Parameters
---------
Shape $shape : the shape to broadcast
=cut
method broadcast_to(Shape $shape)
{
my $cur_shape = $self->shape;
my $err_str = "operands could not be broadcast together with remapped shapes"
."[original->remapped]: [@$cur_shape] and requested shape [@$shape]";
if(@$shape < @$cur_shape)
{
confess($err_str);
}
@$cur_shape = ((1)x(@$shape - @$cur_shape), @$cur_shape);
my $cur_shape_arr = pdl($cur_shape);
my $broadcasting_axes = ($cur_shape_arr != pdl($shape))->which->unpdl;
if (grep { $cur_shape->[$_] != 1 } @$broadcasting_axes)
{
confess($err_str);
lib/AI/MXNet/NDArray/Base.pm view on Meta::CPAN
else
{
push @pos_args, $i;
}
if(@pos_args > @arguments)
{
die "Too many positional arguments";
}
}
@kwargs{ @arguments[0..$#pos_args] } = @pos_args;
my $original_output;
my $output_vars;
if(grep { $_ eq 'out' } keys %kwargs)
{
$output_vars = delete $kwargs{out};
$original_output = $output_vars;
unless(ref($output_vars) and ref($output_vars) eq 'ARRAY')
{
$output_vars = [$output_vars];
}
}
else
{
$output_vars = [];
}
for my $key (keys %kwargs)
lib/AI/MXNet/NDArray/Base.pm view on Meta::CPAN
if ref $kwargs{ $key } eq 'ARRAY';
}
my $out = check_call(AI::MXNetCAPI::ImperativeInvoke(
$handle,
scalar(@ndargs),
\@ndargs,
[map { $_->handle } @$output_vars],
scalar(keys %kwargs),
\%kwargs)
);
return $original_output if $original_output;
if(@$out == 1)
{
return $class->new(handle => $out->[0]);
}
else
{
return [map { $class->new(handle => $_) } @$out];
}
};
$function_meta{ $generic_ndarray_function }{__name__} = $func_name;
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
----------
params : AI::MXNet::RNN::Params or undef
container for weight sharing between cells.
created if undef.
=cut
has [qw/_override_cell_params _cells/] => (is => 'rw', init_arg => undef);
sub BUILD
{
my ($self, $original_arguments) = @_;
$self->_override_cell_params(defined $original_arguments->{params});
$self->_cells([]);
}
=head2 add
Append a cell to the stack.
Parameters
----------
$cell : AI::MXNet::RNN::Cell::Base
lib/AI/MXNet/RNN/Cell.pm view on Meta::CPAN
l_cell => $l_cell,
r_cell => $r_cell,
@_
);
}
return $class->$orig(@_);
};
sub BUILD
{
my ($self, $original_arguments) = @_;
$self->_override_cell_params(defined $original_arguments->{params});
if($self->_override_cell_params)
{
assert(
($self->l_cell->_own_params and $self->r_cell->_own_params),
"Either specify params for BidirectionalCell ".
"or child cells, not both."
);
%{ $self->l_cell->params->_params } = (%{ $self->l_cell->params->_params }, %{ $self->params->_params });
%{ $self->r_cell->params->_params } = (%{ $self->r_cell->params->_params }, %{ $self->params->_params });
}
lib/AI/MXNet/Symbol.pm view on Meta::CPAN
method get_internals()
{
my $handle = check_call(AI::MXNetCAPI::SymbolGetInternals($self->handle));
return __PACKAGE__->new(handle => $handle);
}
=head2 get_children
Get a new grouped symbol whose output contains
inputs to output nodes of the original symbol
Returns
-------
sgroup : Symbol or undef
The children of the head node. If the symbol has no
inputs undef will be returned.
=cut
method get_children()
t/test_module.t view on Meta::CPAN
my $dshape2 = [10, 3, 32, 32];
my $lshape = [10];
my $mod = mx->mod->Module(symbol=>$sym, data_names=>['data1', 'data2'],
label_names=>['softmax_label']);
$mod->bind(data_shapes=>[['data1', $dshape1], ['data2', $dshape2]],
label_shapes=>[['softmax_label', $lshape]]);
$mod->init_params();
$mod->init_optimizer(optimizer_params=>{learning_rate => 0.01});
# Train with original data shapes
my $data_batch = mx->io->DataBatch(data=>[mx->nd->random_uniform(0, 9, $dshape1),
mx->nd->random_uniform(5, 15, $dshape2)],
label=>[mx->nd->ones($lshape)]);
$mod->forward($data_batch);
is_deeply($mod->get_outputs->[0]->shape, [$lshape->[0], $num_class]);
$mod->backward();
$mod->update();
# Train with different batch size
$dshape1 = [3, 3, 64, 64];