AI-MXNet

 view release on metacpan or  search on metacpan

lib/AI/MXNet/Executor/Group.pm  view on Meta::CPAN

has [qw/output_layouts label_layouts arg_names aux_names
        batch_size slices execs data_arrays
        label_arrays param_arrays grad_arrays aux_arrays
        data_layouts shared_data_arrays input_grad_arrays
        _default_execs state_arrays/
    ] => (is => 'rw', init_arg => undef);

package AI::MXNet::DataParallelExecutorGroup;
use Mouse;
use AI::MXNet::Base;
use List::Util qw(sum);

=head1 DESCRIPTION

    DataParallelExecutorGroup is a group of executors that lives on a group of devices.
    This is a helper class used to implement data parallelization. Each mini-batch will
    be split and run on the devices.

    Parameters for constructor
    ----------
    symbol : AI::MXNet::Symbol
        The common symbolic computation graph for all executors.
    contexts : ArrayRef[AI::MXNet::Context]
        A array ref of contexts.
    workload : ArrayRef[Num]
        If not undef, could be an array ref of numbers that specify the workload to be assigned
        to different context. Larger number indicate heavier workload.
    data_shapes : ArrayRef[NameShape|AI::MXNet::DataDesc]
        Should be a array ref of [name, shape] array refs, for the shapes of data. Note the order is
        important and should be the same as the order that the `DataIter` provide the data.
    label_shapes : Maybe[ArrayRef[NameShape|AI::MXNet::DataDesc]]
        Should be a array ref of [$name, $shape] array refs, for the shapes of label. Note the order is
        important and should be the same as the order that the `DataIter` provide the label.
    param_names : ArrayRef[Str]
        A array ref of strings, indicating the names of parameters (e.g. weights, filters, etc.)
        in the computation graph.
    for_training : Bool
        Indicate whether the executors should be bind for training. When not doing training,
        the memory for gradients will not be allocated.
    inputs_need_grad : Bool
        Indicate whether the gradients for the input data should be computed. This is currently
        not used. It will be useful for implementing composition of modules.
    shared_group : AI::MXNet::DataParallelExecutorGroup
        Default is undef. This is used in bucketing. When not undef, it should be a executor
        group corresponding to a different bucket. In other words, it will correspond to a different
        symbol with the same set of parameters (e.g. unrolled RNNs with different lengths).
        In this case the memory regions of the parameters will be shared.
    logger : Logger
        Default is AI::MXNet::Logging->get_logger.
    fixed_param_names: Maybe[ArrayRef[Str]]
        Indicate parameters to be fixed during training. Parameters in this array ref will not allocate
        space for gradient, nor do gradient calculation.
    grad_req : ArrayRef[GradReq]|HashRef[GradReq]|GradReq
        Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
        (default to 'write').
        Can be specified globally (str) or for each argument (array ref, hash ref).
    state_names: Maybe[ArrayRef[Str]]
=cut

has 'symbol'            => (is => 'ro', isa => 'AI::MXNet::Symbol', required => 1);
has 'contexts'          => (is => 'ro', isa => 'ArrayRef[AI::MXNet::Context]', required => 1);
has 'workload'          => (is => 'ro', isa => 'ArrayRef[Num]', default => sub { [] });
has 'data_shapes'       => (is => 'rw', isa => 'ArrayRef[NameShape|AI::MXNet::DataDesc]', required => 1);
has 'label_shapes'      => (is => 'rw', isa => 'Maybe[ArrayRef[NameShape|AI::MXNet::DataDesc]]');
has 'param_names'       => (is => 'ro', isa => 'ArrayRef[Str]', required => 1);
has 'for_training'      => (is => 'ro', isa => 'Bool', required => 1);
has 'inputs_need_grad'  => (is => 'ro', isa => 'Bool', default  => 0);
has 'shared_group'      => (is => 'ro', isa => 'Maybe[AI::MXNet::DataParallelExecutorGroup]');
has 'logger'            => (is => 'ro', default => sub { AI::MXNet::Logging->get_logger });
has 'fixed_param_names' => (is => 'rw', isa => 'Maybe[ArrayRef[Str]]');
has 'state_names'       => (is => 'rw', isa => 'Maybe[ArrayRef[Str]]');
has 'grad_req'          => (is => 'rw', isa => 'ArrayRef[GradReq]|HashRef[GradReq]|GradReq', default=>'write');
has '_p'                => (is => 'rw', init_arg => undef);
sub BUILD
{
    my $self = shift;
    my $p = AI::MXNet::DataParallelExecutorGroup::_private->new;
    $p->arg_names($self->symbol->list_arguments);
    $p->aux_names($self->symbol->list_auxiliary_states);
    $p->execs([]);
    $self->_p($p);
    $self->grad_req('null') if not $self->for_training;
    $self->fixed_param_names([]) unless defined $self->fixed_param_names;
    $self->state_names([]) unless defined $self->state_names;
    my $data_shapes = [];
    for my $d (@{ $self->data_shapes })
    {
        $d = AI::MXNet::DataDesc->new(name => $d->[0], shape => $d->[1])
            unless blessed $d;
        push @{ $data_shapes }, $d;
    }
    $self->data_shapes($data_shapes);
    if(defined $self->label_shapes)
    {
        my $label_shapes = [];
        for my $l (@{ $self->label_shapes })
        {
            $l = AI::MXNet::DataDesc->new(name => $l->[0], shape => $l->[1])
                unless blessed $l;
            push @{ $label_shapes }, $l;
        }
        $self->label_shapes($label_shapes);
    }
    my %data_names  = map { $_->name => 1 } @{ $self->data_shapes };
    my %param_names = map { $_    =>    1 } @{ $self->param_names };
    my %fixed_param_names = map { $_ => 1 } @{ $self->fixed_param_names };
    my %grad_req;
    if(not ref $self->grad_req)
    {
        for my $k (@{ $self->_p->arg_names })
        {
            if(exists $param_names{ $k })
            {
                $grad_req{$k} = exists $fixed_param_names{ $k } ? 'null' : $self->grad_req;
            }
            elsif(exists $data_names{ $k })
            {
                $grad_req{$k} = $self->inputs_need_grad ? $self->grad_req : 'null';
            }
            else
            {
                $grad_req{$k} = 'null';
            }
        }
    }

 view all matches for this distribution
 view release on metacpan -  search on metacpan

( run in 2.621 seconds using v1.00-cache-2.02-grep-82fe00e-cpan-72ae3ad1e6da )