Incorrect search filter: invalid characters - *.p[ml]
AI-MXNet

 view release on metacpan or  search on metacpan

Changes  view on Meta::CPAN

Revision history for Perl extension AI::MXNet

1.0102 Sun Aug  6 16:55:08 PDT 2017
        - bugfixes in Image.pm, updated tests, added PearsonCorrelation metric, added Convolutional RNN modules.

1.0101  Sun Jul  2 17:16:01 PDT 2017
        - reworked CachedOp, two new optimizers, auto module reshape, using strings to index the kvstore.

1.01    Sat Jun 10 23:57:27 PDT 2017
        - sync with python.

0.9507  Thu May 11 17:04:44 PDT 2017
        - added AutoGrad, bugfixes.

0.9506  Sat Apr 29 20:26:50 PDT 2017
        - Ftrl optimizer, new tests, bugfixes.

0.9505  Sun Apr 23 21:26:04 PDT 2017
        - Perplexity bugfix, two new examples.

0.9504  Wed Apr 19 18:59:45 PDT 2017
        - LR Scheduler bugfix.

0.9503  Wed Apr 19 13:33:57 PDT 2017
        - added an example of generation of inferred text via pre-trained RNN.
        - bugfixes/tests.

0.9502  Sat Apr 15 17:18:21 PDT 2017
        - optimizations/bugfixes.

0.9501  Sat Apr  8 13:01:00 PDT 2017
        - ZoneoutCell, nd inferred reshape and moveaxis, cosmetic changes to Image iter,
           pod reworked to be readable via metacpan.

0.95  Sun Mar 26 17:42:02 PDT 2017
        - docs, bugfixes, tests in order to be visible on http://mxnet.io

0.03  Tue Feb 14 07:28:11 PST 2017
        - sync up with current state of the Python inteface.
        - high level RNN support.

0.02  Tue Feb 14 07:28:11 PST 2017
        - prepared for inclusion to the mxnet code repository.

MANIFEST  view on Meta::CPAN

examples/calculator.pl
examples/plot_network.pl
examples/char_lstm.pl
examples/get_ptb_data.sh
examples/lstm_bucketing.pl
examples/mnist.pl
examples/cudnn_lstm_bucketing.pl
Makefile.PL
Changes
META.json
t/test_recordio.t
t/test_random.t
t/test_init.t
t/test_model_parallel.t
t/test_optimizers.t
t/test_multi_device_exec.t
t/test_ndarray.t
t/test_io.t
t/AI-MXNet.t
t/test_kvstore.t
t/test_attr.t
t/test_module.t
t/test_symbol.t
t/test_conv.t
t/test_viz.t
t/test_rnn.t
t/test_io_image.t
t/test_executor.t
t/test_infer_shape.t
lib/AI/MXNet.pm
lib/AI/MXNet/Random.pm
lib/AI/MXNet/CachedOp.pm
lib/AI/MXNet/Context.pm
lib/AI/MXNet/Contrib/AutoGrad.pm
lib/AI/MXNet/Contrib/Symbol.pm
lib/AI/MXNet/Contrib/NDArray.pm
lib/AI/MXNet/Profiler.pm
lib/AI/MXNet/Module.pm
lib/AI/MXNet/Monitor.pm

META.json  view on Meta::CPAN

      "runtime" : {
         "requires" : {
            "AI::MXNetCAPI" : "1.0102",
            "AI::NNVMCAPI" : "1.01",
            "Function::Parameters" : "1.0705",
            "GraphViz" : "2.14",
            "Mouse" : "v2.1.0",
            "PDL" : "2.007"
         }
      },
      "test" : {
         "requires" : {}
      }
   },
   "release_status" : "stable",
   "version" : "1.0102"
}

Makefile.PL  view on Meta::CPAN

  "PREREQ_PM" => {
    "AI::MXNetCAPI" => "1.0102",
    "AI::NNVMCAPI" => "1.01",
    "Function::Parameters" => "1.0705",
    "Mouse" => "v2.1.0",
    "PDL" => "2.007",
    "GraphViz" => "2.14"
  },
  "TEST_REQUIRES" => {},
  "VERSION" => "1.0101",
  "test" => {
    "TESTS" => "t/*.t"
  }
);


my %FallbackPrereqs = (
  "AI::MXNetCAPI" => "1.0102",
  "AI::NNVMCAPI" => "1.01",
  "Function::Parameters" => "1.0705",
  "Mouse" => "v2.1.0",

examples/cudnn_lstm_bucketing.pl  view on Meta::CPAN

#!/usr/bin/perl
use strict;
use warnings;
use AI::MXNet qw(mx);
use AI::MXNet::Function::Parameters;
use AI::MXNet::Base;
use Getopt::Long qw(HelpMessage);

GetOptions(
    'test'            => \(my $do_test                ),
    'num-layers=i'    => \(my $num_layers   = 2       ),
    'num-hidden=i'    => \(my $num_hidden   = 256     ),
    'num-embed=i'     => \(my $num_embed    = 256     ),
    'num-seq=i'       => \(my $seq_size     = 32      ),
    'gpus=s'          => \(my $gpus                   ),
    'kv-store=s'      => \(my $kv_store     = 'device'),
    'num-epoch=i'     => \(my $num_epoch    = 25      ),
    'lr=f'            => \(my $lr           = 0.01    ),
    'optimizer=s'     => \(my $optimizer    = 'adam'  ),
    'mom=f'           => \(my $mom          = 0       ),

examples/cudnn_lstm_bucketing.pl  view on Meta::CPAN

    'dropout=f',      => \(my $dropout      = 0       ),
    'help'           => sub { HelpMessage(0) },
) or HelpMessage(1);

=head1 NAME

    char_lstm.pl - Example of training char LSTM RNN on tiny shakespeare using high level RNN interface

=head1 SYNOPSIS

    --test           Whether to test or train (default 0)
    --num-layers     number of stacked RNN layers, default=2
    --num-hidden     hidden layer size, default=200
    --num-seq        sequence size, default=32
    --gpus           list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu.
                     Increase batch size when using multiple gpus for best performance.
    --kv-store       key-value store type, default='device'
    --num-epochs     max num of epochs, default=25
    --lr             initial learning rate, default=0.01
    --optimizer      the optimizer type, default='adam'
    --mom            momentum for sgd, default=0.0

examples/cudnn_lstm_bucketing.pl  view on Meta::CPAN

my $start_label   = 1;
my $invalid_label = 0;

func get_data($layout)
{
    my ($train_sentences, $vocabulary) = tokenize_text(
        './data/ptb.train.txt', start_label => $start_label,
        invalid_label => $invalid_label
    );
    my ($validation_sentences) = tokenize_text(
        './data/ptb.test.txt', vocab => $vocabulary,
        start_label => $start_label, invalid_label => $invalid_label
    );
    my $data_train  = mx->rnn->BucketSentenceIter(
        $train_sentences, $batch_size, buckets => $buckets,
        invalid_label => $invalid_label,
        layout        => $layout
    );
    my $data_val    = mx->rnn->BucketSentenceIter(
        $validation_sentences, $batch_size, buckets => $buckets,
        invalid_label => $invalid_label,

examples/cudnn_lstm_bucketing.pl  view on Meta::CPAN

                                wd            => $wd,
                            },
        begin_epoch         => $load_epoch,
        initializer         => mx->init->Xavier(factor_type => "in", magnitude => 2.34),
        num_epoch           => $num_epoch,
        batch_end_callback  => mx->callback->Speedometer($batch_size, $disp_batches),
        ($model_prefix ? (epoch_end_callback  => mx->rnn->do_rnn_checkpoint($cell, $model_prefix, 1)) : ())
    );
};

my $test = sub {
    assert($model_prefix, "Must specifiy path to load from");
    my (undef, $data_val, $vocab) = get_data('NT');
    my $stack;
    if($stack_rnn)
    {
        $stack = mx->rnn->SequentialRNNCell();
        for my $i (0..$num_layers-1)
        {
            my $cell = mx->rnn->LSTMCell(num_hidden => $num_hidden, prefix => "lstm_${i}l0_");
            if($bidirectional)

examples/cudnn_lstm_bucketing.pl  view on Meta::CPAN

        mx->metric->Perplexity($invalid_label),
        batch_end_callback=>mx->callback->Speedometer($batch_size, 5)
    );
};

if($num_layers >= 4 and split(/,/,$gpus) >= 4 and not $stack_rnn)
{
    print("WARNING: stack-rnn is recommended to train complex model on multiple GPUs\n");
}

if($do_test)
{
    # Demonstrates how to load a model trained with CuDNN RNN and predict
    # with non-fused MXNet symbol
    $test->();
}
else
{
    $train->();
}

examples/get_ptb_data.sh  view on Meta::CPAN

RNN_DIR=$(cd `dirname $0`; pwd)
DATA_DIR="${RNN_DIR}/data/"

if [[ ! -d "${DATA_DIR}" ]]; then
  echo "${DATA_DIR} doesn't exist, will create one";
  mkdir -p ${DATA_DIR}
fi

wget -P ${DATA_DIR} https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/ptb/ptb.train.txt;
wget -P ${DATA_DIR} https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/ptb/ptb.valid.txt;
wget -P ${DATA_DIR} https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/ptb/ptb.test.txt;
wget -P ${DATA_DIR} https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/tinyshakespeare/input.txt;

examples/lstm_bucketing.pl  view on Meta::CPAN


my $buckets = [10, 20, 30, 40, 50, 60];
my $start_label   = 1;
my $invalid_label = 0;

my ($train_sentences, $vocabulary) = tokenize_text(
    './data/ptb.train.txt', start_label => $start_label,
    invalid_label => $invalid_label
);
my ($validation_sentences) = tokenize_text(
    './data/ptb.test.txt', vocab => $vocabulary,
    start_label => $start_label, invalid_label => $invalid_label
);
my $data_train  = mx->rnn->BucketSentenceIter(
    $train_sentences, $batch_size, buckets => $buckets,
    invalid_label => $invalid_label
);
my $data_val    = mx->rnn->BucketSentenceIter(
    $validation_sentences, $batch_size, buckets => $buckets,
    invalid_label => $invalid_label
);

lib/AI/MXNet.pm  view on Meta::CPAN

=head1 SYNOPSIS

    ## Convolutional NN for recognizing hand-written digits in MNIST dataset
    ## It's considered "Hello, World" for Neural Networks
    ## For more info about the MNIST problem please refer to http://neuralnetworksanddeeplearning.com/chap1.html

    use strict;
    use warnings;
    use AI::MXNet qw(mx);
    use AI::MXNet::TestUtils qw(GetMNIST_ubyte);
    use Test::More tests => 1;

    # symbol net
    my $batch_size = 100;

    ### model
    my $data = mx->symbol->Variable('data');
    my $conv1= mx->symbol->Convolution(data => $data, name => 'conv1', num_filter => 32, kernel => [3,3], stride => [2,2]);
    my $bn1  = mx->symbol->BatchNorm(data => $conv1, name => "bn1");
    my $act1 = mx->symbol->Activation(data => $bn1, name => 'relu1', act_type => "relu");
    my $mp1  = mx->symbol->Pooling(data => $act1, name => 'mp1', kernel => [2,2], stride =>[2,2], pool_type=>'max');

lib/AI/MXNet/Contrib/AutoGrad.pm  view on Meta::CPAN

    };
}

method train_section(CodeRef $sub)
{
    my $prev = __PACKAGE__->set_is_training(1);
    $sub->();
    __PACKAGE__->set_is_training(0) unless $prev;
}

method test_section(CodeRef $sub)
{
    my $prev = __PACKAGE__->set_is_training(0);
    $sub->();
    __PACKAGE__->set_is_training(1) if $prev;
}

1;

lib/AI/MXNet/IO.pm  view on Meta::CPAN


=head1 DESCRIPTION

    Predefined NDArray iterator. Accepts PDL or AI::MXNet::NDArray object as an input.

    Parameters
    ----------
    data: Maybe[AcceptableInput|HashRef[AcceptableInput]|ArrayRef[AcceptableInput]].
        NDArrayIter supports single or multiple data and label.
    label: Maybe[AcceptableInput|HashRef[AcceptableInput]|ArrayRef[AcceptableInput]].
        Same as data, but is not given to the model during testing.
    batch_size=1: Int
        Batch Size
    shuffle=0: Bool
        Whether to shuffle the data
    last_batch_handle='pad': 'pad', 'discard' or 'roll_over'
        How to handle the last batch

    Note
    ----
    This iterator will pad, discard or roll over the last batch if

lib/AI/MXNet/IO.pm  view on Meta::CPAN

sub DEMOLISH
{
    check_call(AI::MXNetCAPI::DataIterFree(shift->handle));
}

=head2 debug_skip_load

    Set the iterator to simply return always first batch.
    Notes
    -----
    This can be used to test the speed of network without taking
    the loading delay into account.
=cut

method debug_skip_load()
{
    $self->_debug_skip_load(1);
    AI::MXNet::Logging->info('Set debug_skip_load to be true, will simply return first batch');
}

method reset()

lib/AI/MXNet/Module.pm  view on Meta::CPAN

    ArrayRef[AI::MXNet::NDArray] $labels
)
{
    $self->_p->_exec_group->update_metric($eval_metric, $labels);
}

=head2 _sync_params_from_devices

    Synchronize parameters from devices to CPU. This function should be called after
    calling 'update' that updates the parameters on the devices, before one can read the
    latest parameters from $self->_arg_params and $self->_aux_params.
=cut

method _sync_params_from_devices()
{
    $self->_p->_exec_group->get_params($self->_p->_arg_params, $self->_p->_aux_params);
    $self->_p->_params_dirty(0);
}

method save_optimizer_states(Str $fname)
{

lib/AI/MXNet/Module/Base.pm  view on Meta::CPAN

        the data arrays might not be of the same shape as viewed from the external world.
        - label_shapes: an array ref of [name, shape]. This might be [] if the module does
        not need labels (e.g. it does not contains a loss function at the top), or a module
        is not binded for training.
        - output_shapes: an array ref of [name, shape] for outputs of the module.

    - parameters (for modules with parameters)
        - get_params(): return an array ($arg_params, $aux_params). Each of those
        is a hash ref of name to NDArray mapping. Those NDArrays always on
        CPU. The actual parameters used for computing might be on other devices (GPUs),
        this function will retrieve (a copy of) the latest parameters. Therefore, modifying
        - get_params($arg_params, $aux_params): assign parameters to the devices
        doing the computation.
        - init_params(...): a more flexible interface to assign or initialize the parameters.

    - setup
        - bind(): prepare environment for computation.
        - init_optimizer(): install optimizer for parameter updating.

    - computation
        - forward(data_batch): forward operation.

lib/AI/MXNet/Module/Bucketing.pm  view on Meta::CPAN


    my $buckets = [10, 20, 30, 40, 50, 60];
    my $start_label   = 1;
    my $invalid_label = 0;

    my ($train_sentences, $vocabulary) = tokenize_text(
        './data/ptb.train.txt', start_label => $start_label,
        invalid_label => $invalid_label
    );
    my ($validation_sentences) = tokenize_text(
        './data/ptb.test.txt', vocab => $vocabulary,
        start_label => $start_label, invalid_label => $invalid_label
    );
    my $data_train  = mx->rnn->BucketSentenceIter(
        $train_sentences, $batch_size, buckets => $buckets,
        invalid_label => $invalid_label
    );
    my $data_val    = mx->rnn->BucketSentenceIter(
        $validation_sentences, $batch_size, buckets => $buckets,
        invalid_label => $invalid_label
    );

lib/AI/MXNet/Optimizer.pm  view on Meta::CPAN

                            /
                        ($acc_g + $self->epsilon)->sqrt
                            *
                        $grad;
    $acc_delta .= $self->rho * $acc_delta + (1 - $self->rho) * $current_delta * $current_delta;
    $weight -= $current_delta + $wd * $weight;
}

__PACKAGE__->register;

# For test use
package AI::MXNet::Test;
use Mouse;

extends 'AI::MXNet::Optimizer';

# Create a state to duplicate weight
method create_state(Index $index, AI::MXNet::NDArray $weight)
{
    return AI::MXNet::NDArray->zeros(
                $weight->shape, 

lib/AI/MXNet/TestUtils.pm  view on Meta::CPAN

use Scalar::Util qw(blessed);
use AI::MXNet::Function::Parameters;
use Exporter;
use base qw(Exporter);
@AI::MXNet::TestUtils::EXPORT_OK = qw(same reldiff almost_equal GetMNIST_ubyte
                                      GetCifar10 pdl_maximum pdl_minimum mlp2 conv
                                      check_consistency zip assert enumerate same_array dies_like);
use constant default_numerical_threshold => 1e-6;
=head1 NAME

    AI::MXNet::TestUtils - Convenience subs used in tests.

=head2 same

    Test if two pdl arrays are the same

    Parameters
    ----------
    a : pdl
    b : pdl
=cut

lib/AI/MXNet/TestUtils.pm  view on Meta::CPAN

    return $softmax;
}

=head2 check_consistency

    Check symbol gives the same output for different running context

    Parameters
    ----------
    sym : Symbol or list of Symbols
        symbol(s) to run the consistency test
    ctx_list : list
        running context. See example for more detail.
    scale : float, optional
        standard deviation of the inner normal distribution. Used in initialization
    grad_req : str or list of str or dict of str to str
        gradient requirement.
=cut

my %dtypes = (
    float32 => 0,

lib/AI/MXNet/TestUtils.pm  view on Meta::CPAN

    my $gt = $ground_truth;
    if(not defined $gt)
    {
        $gt = { %{ $exe_list[$max_idx]->output_dict } };
        if($grad_req ne 'null')
        {
            %{$gt} = (%{$gt}, %{ $exe_list[$max_idx]->grad_dict });
        }
    }

    # test
    for my $exe (@exe_list)
    {
        $exe->forward(0);
    }
    enumerate(sub {
        my ($i, $exe) = @_;
        if($i == $max_idx)
        {
            return;
        }

t/AI-MXNet.t  view on Meta::CPAN

use strict;
use warnings;
use Test::More tests => 1;
BEGIN { use_ok('AI::MXNet') };

t/test_attr.t  view on Meta::CPAN

use strict;
use warnings;
use Test::More tests => 14;
use AI::MXNet qw(mx);
use Storable;

sub contains
{
    my ($x, $y) = @_;
    while(my ($k, $v) = each %$x)
    {
        return 0 unless exists $y->{$k};
        if(ref $y->{$k} and ref $y->{$k} eq 'HASH')

t/test_attr.t  view on Meta::CPAN

            return 0 unless contains($v, $y->{$k});
        }
        elsif($y->{$k} ne $v)
        {
            return 0;
        }
    }
    return 1;
}

sub test_attr_basic
{
    my ($data, $gdata);
    {
        local($mx::AttrScope) = mx->AttrScope(group=>'4', data=>'great');
        $data = mx->symbol->Variable(
            'data',
            attr => {
                qw/ dtype data
                    group 1
                    force_mirroring 1/

t/test_attr.t  view on Meta::CPAN

    ok($gdata->attr('group') == 4);
    ok($data->attr('group') == 1);
    ok($data->attr('lr_mult') == 1);
    ok($data->attr('__lr_mult__') == 1);
    ok($data->attr('force_mirroring') == 1);
    ok($data->attr('__force_mirroring__') == 1);
    my $data2 = Storable::thaw(Storable::freeze($data));
    ok($data->attr('dtype') eq $data2->attr('dtype'));
}

sub test_operator
{
    my $data = mx->symbol->Variable('data');
    my ($fc1, $fc2);
    {
        local($mx::AttrScope) = mx->AttrScope(__group__=>'4', __data__=>'great');
        $fc1 = mx->symbol->Activation($data, act_type=>'relu');
        {
            local($mx::AttrScope) = mx->AttrScope(__init_bias__ => 0, 
                __group__=>'4', __data__=>'great');
            $fc2 = mx->symbol->FullyConnected($fc1, num_hidden=>10, name=>'fc2');
        }
    }
    ok($fc1->attr('__data__') eq 'great');
    ok($fc2->attr('__data__') eq 'great');
    ok($fc2->attr('__init_bias__') == 0);
    my $fc2copy = Storable::thaw(Storable::freeze($fc2));
    ok($fc2copy->tojson() eq $fc2->tojson());
    ok($fc2->get_internals()->slice('fc2_weight'));
}

sub test_list_attr
{
    my $data = mx->sym->Variable('data', attr=>{'mood', 'angry'});
    my $op = mx->sym->Convolution(
        data=>$data, name=>'conv', kernel=>[1, 1],
        num_filter=>1, attr => {'__mood__'=> 'so so', 'wd_mult'=> 'x'}
    );
    ok(contains({'__mood__'=> 'so so', 'wd_mult'=> 'x', '__wd_mult__'=> 'x'}, $op->list_attr()));
}

sub test_attr_dict
{
    my $data = mx->sym->Variable('data', attr=>{'mood'=> 'angry'});
    my $op = mx->sym->Convolution(
        data=>$data, name=>'conv', kernel=>[1, 1],
        num_filter=>1, attr=>{'__mood__'=> 'so so'}, lr_mult=>1
    );
    ok(
        contains(
            {
                'data'=> {'mood'=> 'angry'},

t/test_attr.t  view on Meta::CPAN

                    'kernel'=> '(1, 1)', '__mood__'=> 'so so', 
                    'num_filter'=> '1', 'lr_mult'=> '1', '__lr_mult__'=> '1'
                },
                'conv_bias'=> {'__mood__'=> 'so so'}
            },
            $op->attr_dict()
        )
    );
}

test_attr_basic();
test_operator();
test_list_attr();
test_attr_dict();

t/test_conv.t  view on Meta::CPAN

use strict;
use warnings;
use AI::MXNet qw(mx);
use AI::MXNet::TestUtils qw(GetMNIST_ubyte);
use Test::More tests => 1;

## speed up the tests when gpu present
my $gpu_present = (`perl -e 'use AI::MXNet qw(mx); print mx->nd->ones([1], ctx => mx->gpu(0))->asscalar' 2>/dev/null` eq '1');

# symbol net
my $batch_size = 100;

### model
my $data = mx->symbol->Variable('data');
my $conv1= mx->symbol->Convolution(data => $data, name => 'conv1', num_filter => 32, kernel => [3,3], stride => [2,2]);
my $bn1  = mx->symbol->BatchNorm(data => $conv1, name => "bn1");
my $act1 = mx->symbol->Activation(data => $bn1, name => 'relu1', act_type => "relu");

t/test_executor.t  view on Meta::CPAN

use strict;
use warnings;
use Test::More tests => 2283;
use AI::MXNet qw(mx);
use AI::MXNet::TestUtils qw(reldiff pdl_maximum pdl_minimum);
use PDL;

sub check_bind_with_uniform
{
    my ($uf, $gf, $dim, $sf, $lshape, $rshape) = @_;
    my $shape = (random($dim)*int(1000**(1.0/$dim))+1)->floor->unpdl;
    my $lhs = mx->symbol->Variable('lhs');
    my $rhs = mx->symbol->Variable('rhs');

t/test_executor.t  view on Meta::CPAN

    $executor->forward(1);
    $exec3->forward(1);
    $exec4->forward(1);
    my $out2 = $executor->outputs->[0]->aspdl;
    my $out1 = &{$uf}($lhs_arr->aspdl, $rhs_arr->aspdl);
    my $out3 = $exec3->outputs->[0]->aspdl;
    my $out4 = $exec4->outputs->[0]->aspdl;
    ok(reldiff($out1, $out2) < 1e-6);
    ok(reldiff($out1, $out3) < 1e-6);
    ok(reldiff($out1, $out4) < 1e-6);
    # test gradient

    my $out_grad = mx->nd->ones([reverse @{$out2->shape->unpdl}]);
    my ($lhs_grad2, $rhs_grad2) = &{$gf}(
        $out_grad->aspdl,
        $lhs_arr->aspdl,
        $rhs_arr->aspdl
    );
    $executor->backward([$out_grad]);

    ok(reldiff($lhs_grad->aspdl, $lhs_grad2) < 1e-6);
    ok(reldiff($rhs_grad->aspdl, $rhs_grad2) < 1e-6);
}

sub test_bind
{
    my ($disable_bulk_exec) = @_;
    my ($prev_fwd_var, $prev_bwd_var);
    if($disable_bulk_exec)
    {
        $prev_fwd_var = $ENV{MXNET_EXEC_BULK_FWD_THRESHOLD_TRAIN}//1;
        $prev_bwd_var = $ENV{MXNET_EXEC_BULK_BWD_TRAIN}//1;
        $ENV{MXNET_EXEC_BULK_FWD_THRESHOLD_TRAIN} = 0;
        $ENV{MXNET_EXEC_BULK_BWD_TRAIN} = 0;
    }

t/test_executor.t  view on Meta::CPAN

        }
    }
    if($disable_bulk_exec)
    {
        $ENV{MXNET_EXEC_BULK_FWD_THRESHOLD_TRAIN} = $prev_fwd_var;
        $ENV{MXNET_EXEC_BULK_BWD_TRAIN}           = $prev_bwd_var;
    }
}


sub test_dot
{
    srand(0);
    my $nrepeat = 9;
    my $maxdim = 4;
    for my $repeat (0..$nrepeat)
    {
        my $shape = (random(3)*500+1)->floor->unpdl;
        check_bind_with_uniform(sub { my ($x, $y) = @_; $x x $y },
                                sub { my ($g, $x, $y) = @_; ($g x $y->transpose, $x->transpose x $g) },
                                2,

t/test_executor.t  view on Meta::CPAN

        check_bind_with_uniform(sub { my ($x, $y) = @_; $x x $y->transpose },
                                sub { my ($g, $x, $y) = @_; ($g * $y, $g * $x) },
                                2,
                                sub { mx->symbol->dot(@_) },
                                [@{$shape}[0]],
                                [@{$shape}[0]],
        );
    }
}

sub test_reshape
{
    my $x = mx->sym->Variable('x');
    my $y = mx->sym->FullyConnected($x, num_hidden=>4);
    my $exe = $y->simple_bind(ctx => mx->cpu(), shapes => { x=>[5,4] }, grad_req=>'null');
    $exe->arg_arrays->[0] .= 1;
    $exe->arg_arrays->[1] .= mx->nd->ones([4,4]);
    $exe->arg_arrays->[2] .= 0;
    my $new_exe = $exe->reshape({ x=>[3,4] });
    $new_exe->forward(0);
    # test sub exec forward
    ok(($new_exe->outputs->[0]->aspdl == 4)->all);
    # test shared memory
    ok(($exe->outputs->[0]->aspdl->slice('X', [0,2]) == 4)->all);
    # test base exec forward
    $exe->forward(0);
    ok(($new_exe->outputs->[0]->aspdl == 4)->all);
}

test_bind(0);
test_bind(1);
test_dot();
test_reshape();

t/test_infer_shape.t  view on Meta::CPAN

use strict;
use warnings;
use Test::More tests => 18;
use AI::MXNet qw(mx);
use AI::MXNet::TestUtils qw(mlp2);

sub _test_shapes
{
    my ($sym, $arg_shapes, %expected_shapes) = @_;
    my %arg_shape_dict;
    @arg_shape_dict{ @{ $sym->list_arguments() } } = @{ $arg_shapes };
    while(my ($k, $v) = each %expected_shapes)
    {
        is_deeply($arg_shape_dict{$k}, $v);
    }
}

sub test_mlp2_infer_shape
{
    # Build MLP
    my $out = mlp2();
    # infer shape
    my $data_shape = [100, 100];
    my($arg_shapes, $out_shapes, $aux_shapes) = $out->infer_shape(data=>$data_shape);
    ok(@$out_shapes == 1);
    is_deeply($out_shapes->[0], [100, 10]);
    my %true_shapes = (
        fc2_bias   => [10],
        fc2_weight => [10, 1000],
        fc1_bias   => [1000],
        fc1_weight => [1000,100]
    );
    _test_shapes($out, $arg_shapes, %true_shapes);
}

sub test_mlp2_infer_error
{
    # Test shape inconsistent case
    my $out = mlp2();
    my $weight_shape = [1, 100];
    my $data_shape   = [100, 100];
    eval { $out->infer_shape(data=>$data_shape, fc1_weight=>$weight_shape) };
    like($@, qr/Shape inconsistent/);
}

sub test_backward_infer
{
    my $w = mx->sym->Variable("weight");
    my $wshift = mx->sym->Variable("wshift", shape=>[1]);
    my $data = mx->sym->Variable("data");
    # broadcast add here, not being able to deduce shape correctly
    my $wt = mx->sym->broadcast_add($w, $wshift);
    # shape constraint, this is what enables backward shape inference
    $wt = mx->sym->_identity_with_attr_like_rhs($wt, $w);
    my $net = mx->sym->FullyConnected(data=>$data, weight=>$wt, num_hidden=>11, no_bias=>1);
    my $data_shape = [7, 100];
    my ($arg_shapes, $out_shapes, $aux_shapes) = $net->infer_shape(data=>$data_shape);
    _test_shapes($net, $arg_shapes, weight=>[11,100]);
}

sub test_incomplete_infer_elewise
{
    my $a = mx->sym->Variable('a', shape=>[0, 10]);
    my $b = mx->sym->Variable('b', shape=>[12, 0]);
    my $c = $a + $b;
    my ($arg_shapes) = $c->infer_shape();
    _test_shapes($c, $arg_shapes, a=>[12,10], b=>[12,10]);
}

sub test_incomplete_infer_mlp
{
    my $a = mx->sym->Variable('a', shape=>[0, 10]);
    my $b = mx->sym->FullyConnected(data=>$a, num_hidden=>21);
    my $c = mx->sym->Variable('c', shape=>[5, 0]);
    my $d = $b + $c;
    my ($arg_shapes) = $d->infer_shape();
    _test_shapes($d, $arg_shapes, a=>[5,10], c=>[5,21]);
}

sub test_incomplete_infer_slicechannel
{
    my $a = mx->sym->Variable('a', shape=>[0, 10]);
    my $b = mx->sym->SliceChannel(data=>$a, num_outputs=>10, axis=>1, squeeze_axis=>1);
    my $c = mx->sym->Variable('c', shape=>[5]);
    my $d = @{$b}[1] + $c;
    my ($arg_shapes) = $d->infer_shape();
    _test_shapes($d, $arg_shapes, a=>[5,10]);

    $a = mx->sym->Variable('a', shape=>[0, 15, 0]);
    $b = mx->sym->SliceChannel(data=>$a, num_outputs=>3, squeeze_axis=>0);
    $c = mx->sym->Variable('c', shape=>[3, 5, 2]);
    $d = @{$b}[1] + $c;
    ($arg_shapes) = $d->infer_shape();
    _test_shapes($d, $arg_shapes, a=>[3,15,2]);
}

sub test_incomplete_infer_convolution
{
    my $a = mx->sym->Variable('a', shape=>[0, 10, 0, 0]);
    my $b = mx->sym->Convolution(data=>$a, num_filter=>21, kernel=>[3, 3], dilate=>[1, 1], pad=>[1, 1]);
    my $c = mx->sym->Variable('c', shape=>[5, 21, 32, 32]);
    my $d = $b + $c;
    my ($arg_shapes) = $d->infer_shape();
    _test_shapes($d, $arg_shapes, a=>[5, 10, 32, 32]);
}

sub test_incomplete_infer_concat
{
    my $a = mx->sym->Variable('a', shape=>[0, 10]);
    my $b = mx->sym->Variable('b', shape=>[0, 5]);
    my $c = mx->sym->Concat($a, $b, num_args=>2, dim=>1);
    my $d = mx->sym->Variable('d', shape=>[2, 0]);
    $d = $d + $c;
    my ($arg_shapes) = $d->infer_shape();
    _test_shapes($d, $arg_shapes, a=>[2,10], b=>[2,5], d=>[2,15]);
}

test_mlp2_infer_shape();
test_mlp2_infer_error();
test_backward_infer();
test_incomplete_infer_elewise();
test_incomplete_infer_mlp();
test_incomplete_infer_slicechannel();
test_incomplete_infer_convolution();
test_incomplete_infer_concat();

t/test_init.t  view on Meta::CPAN

use strict;
use warnings;
use Test::More tests => 4;
use AI::MXNet qw(mx);

sub test_default_init
{
    my $data = mx->sym->Variable('data');
    my $sym  = mx->sym->LeakyReLU(data => $data, act_type => 'prelu');
    my $mod  = mx->mod->Module($sym);
    $mod->bind(data_shapes=>[['data', [10,10]]]);
    $mod->init_params;
    ok((((values %{ ($mod->get_params)[0] }))[0]->aspdl == 0.25)->all);
}

sub test_variable_init
{
    my $data  = mx->sym->Variable('data');
    my $gamma = mx->sym->Variable('gamma', init => mx->init->One());
    my $sym   = mx->sym->LeakyReLU(data => $data, gamma => $gamma, act_type => 'prelu');
    my $mod   = mx->mod->Module($sym);
    $mod->bind(data_shapes=>[['data', [10,10]]]);
    $mod->init_params();
    ok((((values %{ ($mod->get_params)[0] }))[0]->aspdl == 1)->all);
}

sub test_aux_init
{
    my $data = mx->sym->Variable('data');
    my $sym  = mx->sym->BatchNorm(data => $data, name => 'bn');
    my $mod  = mx->mod->Module($sym);
    $mod->bind(data_shapes=>[['data', [10, 10, 3, 3]]]);
    $mod->init_params();
    ok((($mod->get_params)[1]->{bn_moving_var}->aspdl == 1)->all);
    ok((($mod->get_params)[1]->{bn_moving_mean}->aspdl == 0)->all);
}

test_default_init();
test_variable_init();
test_aux_init();

t/test_io.t  view on Meta::CPAN

use AI::MXNet qw(mx);
use Test::More tests => 31;
use AI::MXNet::TestUtils qw(same reldiff GetMNIST_ubyte GetCifar10);
use PDL;
use PDL::Types;
use PDL::NiceSlice;
$|++;


sub test_Cifar10Rec()
{
    GetCifar10();
    my $dataiter = mx->io->ImageRecordIter({
            path_imgrec => "data/cifar/train.rec",
            mean_img => "data/cifar/cifar10_mean.bin",
            rand_crop => 0,
            and_mirror => 0,
            shuffle => 0,
            data_shape => [3,28,28],
            batch_size => 100,

t/test_io.t  view on Meta::CPAN

        {
            $labelcount[int($nplabel->at($i)->asscalar)] += 1;
        }
    }
    for my $i (0..9)
    {
        ok($labelcount[$i] == 5000);
    }
}

sub test_NDArrayIter()
{
    my $datas  = ones(PDL::Type->new(6), 2, 2, 1000);
    my $labels = ones(PDL::Type->new(6), 1, 1000);
    for my $i (0..999)
    {
        $datas(:,:,$i) .= $i / 100;
        $labels(:,$i) .= $i / 100;
    }
    my $dataiter = mx->io->NDArrayIter(
        data => $datas,

t/test_io.t  view on Meta::CPAN

        {
            ok($labelcount[$i] == 124);
        }
        else
        {
            ok($labelcount[$i] == 100);
        }
    }
}

sub test_MNISTIter()
{
    GetMNIST_ubyte();

    my $batch_size = 100;
    my $train_dataiter = mx->io->MNISTIter({
            image => "data/train-images-idx3-ubyte",
            label => "data/train-labels-idx1-ubyte",
            data_shape => [784],
            batch_size => $batch_size,
            shuffle => 1,
            flat => 1,
            silent => 0,
            seed => 10
    });
    # test_loop
    my $nbatch = 60000 / $batch_size;
    my $batch_count = 0;
    for my $batch (@{ $train_dataiter})
    {
        $batch_count += 1;
    }
    ok($nbatch == $batch_count);
    # test_reset
    $train_dataiter->reset();
    $train_dataiter->iter_next();
    my $label_0 = $train_dataiter->getlabel->aspdl->flat;
    $train_dataiter->iter_next;
    $train_dataiter->iter_next;
    $train_dataiter->iter_next;
    $train_dataiter->reset;
    $train_dataiter->iter_next;
    my $label_1 = $train_dataiter->getlabel->aspdl->flat;
    ok(sum($label_0 - $label_1) == 0);
}

test_NDArrayIter();
test_MNISTIter();
test_Cifar10Rec();

t/test_io_image.t  view on Meta::CPAN

use strict;
use warnings;
use Test::More tests => 1;
use AI::MXNet qw(mx);
use Time::HiRes qw(time);

sub run_imageiter
{
    my ($path_rec, $n, $batch_size) = @_;
    $batch_size //= 32;
    my $data = mx->img->ImageIter(
        batch_size=>$batch_size,
        data_shape=>[3, 224, 224],

t/test_io_image.t  view on Meta::CPAN

    $data->reset();
    my $tic = time;
    for my $i (1..$n)
    {
        $data->next;
        mx->nd->waitall;
        warn("average speed after iteration $i is " . $batch_size*$i/(time - $tic) . " samples/sec");
    }
}

run_imageiter('data/cifar/test.rec', 20);
ok(1);

t/test_kvstore.t  view on Meta::CPAN

use strict;
use warnings;
use Test::More tests => 38;
use AI::MXNet qw(mx);

my $shape = [4, 4];
my $keys  = [5,7,9];

sub init_kv
{
    # init kv
    my $kv = mx->kv->create();
    # single

t/test_kvstore.t  view on Meta::CPAN

    return $kv;
}

sub check_diff_to_scalar
{
    # assert A == x
    my ($A, $x) = @_;
    ok(($A - $x)->aspdl->abs->sum == 0);
}

sub test_single_kv_pair
{
    # single key-value pair push & pull
    my $kv = init_kv();
    $kv->push(3, mx->nd->ones($shape));
    my $val = mx->nd->empty($shape);
    $kv->pull(3, out => $val);
    check_diff_to_scalar($val, 1);
}

sub test_init
{
    my $kv = mx->kv->create();
    $kv->init(3, mx->nd->ones($shape)*4);
    my $a = mx->nd->zeros($shape);
    $kv->pull(3, out=>$a);
    check_diff_to_scalar($a, 4);
}

sub test_list_kv_pair
{
    # list key-value pair push & pull
    my $kv = init_kv();
    $kv->push($keys, [map {mx->nd->ones($shape)*4} 0..@$keys-1]);
    my $val = [map { mx->nd->empty($shape) } 0..@$keys-1];
    $kv->pull($keys, out => $val);
    for my $v (@$val)
    {
        check_diff_to_scalar($v, 4);
    }
}

sub test_aggregator
{
    # aggregate value on muliple devices

    my $kv = init_kv();

    # devices
    my $num_devs = 4;
    my $devs = [map { mx->cpu($_) } 0..$num_devs-1];

    # single

t/test_kvstore.t  view on Meta::CPAN

        }
    }
}

sub updater
{
    my ($key, $recv, $local) = @_;
    $local += $recv;
}

sub test_updater
{
    my ($dev) = @_;
    $dev //= 'cpu';
    my $kv = init_kv();
    $kv->_set_updater(\&updater);

    # devices
    my $num_devs = 4;
    my $devs = [map { mx->$dev($_) } 0..$num_devs-1];

t/test_kvstore.t  view on Meta::CPAN


    for my $vv (@{ $vals })
    {
        for my $v (@{ $vv })
        {
            check_diff_to_scalar($v, $num_devs * $num_push);
        }
    }
}

sub test_get_type
{
    my $kvtype = 'local_allreduce_cpu';
    my $kv = mx->kv->create($kvtype);
    is($kv->type, $kvtype);
}

test_init();
test_get_type();
test_single_kv_pair();
test_list_kv_pair();
test_aggregator();
test_updater();

t/test_model_parallel.t  view on Meta::CPAN

use strict;
use warnings;
use Test::More tests => 4;
use AI::MXNet qw(mx);
use AI::MXNet::TestUtils qw(reldiff);
use AI::MXNet::Base;

sub test_chain
{
    my $ctx1 = mx->cpu(0);
    my $ctx2 = mx->cpu(1);
    my $n = 2;
    my $data1 = mx->sym->Variable('data1');
    my $data2 = mx->sym->Variable('data2');
    my $data3 = mx->sym->Variable('data2');
    my $net;
    {
        local($mx::AttrScope) = mx->AttrScope(ctx_group=>'dev1');

t/test_model_parallel.t  view on Meta::CPAN

    my $out_grad = mx->nd->empty($shape, ctx => $ctx1);
    $out_grad .= 1;
    $exec1->backward([$out_grad]);
    $exec2->backward([$out_grad->copyto($ctx1)]);
    zip(sub {
        my ($a, $b) = @_;
        ok(reldiff($a->aspdl, $b->aspdl) < 1e-6);
    }, $arr_grad, $arr_grad2);
}

test_chain();

t/test_module.t  view on Meta::CPAN

use strict;
use warnings;
use Test::More tests => 257;
use AI::MXNet qw(mx);
use AI::MXNet::Base;
use AI::MXNet::TestUtils qw(almost_equal enumerate same_array dies_like);
use Data::Dumper;

sub test_module_layout
{
    my $sym = mx->sym->Variable('data');
    $sym = mx->sym->Activation(data=>$sym, act_type=>'relu', __layout__=>'TNC');

    my $dshape = [3, 8, 7];
    my $mod = mx->mod->Module(
        $sym,
        data_names=>['data'],
        context=>[mx->cpu(0), mx->cpu(1)]
    );

t/test_module.t  view on Meta::CPAN

    $mod->backward([mx->nd->ones($dshape)]);
    is_deeply($mod->get_outputs()->[0]->shape, $dshape);

    my $hdshape = [3, 4, 7];
    for my $x (@{ $mod->get_outputs(0)->[0] })
    {
        is_deeply($x->shape, $hdshape);
    }
}

sub test_save_load
{
    my $dict_equ = sub {
        is_deeply([sort keys %$a], [sort keys %$b]);
        for my $k (keys %$a)
        {
            ok(($a->{$k}->aspdl == $b->{$k}->aspdl)->all);
        }
    };
    my $sym = mx->sym->Variable('data');
    $sym = mx->sym->FullyConnected($sym, num_hidden=>100);

    # single device
    my $mod = mx->mod->Module($sym, data_names=>['data']);
    $mod->bind(data_shapes=>[['data', [10, 10]]]);
    $mod->init_params();
    $mod->init_optimizer(optimizer_params=>{learning_rate => 0.1, momentum => 0.9});
    $mod->update();
    $mod->save_checkpoint('test', 0, 1);

    my $mod2 = mx->mod->Module->load('test', 0, 1, data_names=>['data']);
    $mod2->bind(data_shapes=>[['data', [10, 10]]]);
    $mod2->init_optimizer(optimizer_params=>{learning_rate => 0.1, momentum => 0.9});
    is($mod->_symbol->tojson(), $mod2->_symbol->tojson());
    $dict_equ->(($mod->get_params())[0], ($mod2->get_params())[0]);
    $dict_equ->($mod->_updater->states, $mod2->_updater->states);

    # multi device
    $mod = mx->mod->Module($sym, data_names=>['data'], context=>[mx->cpu(0), mx->cpu(1)]);
    $mod->bind(data_shapes=>[['data', [10, 10]]]);
    $mod->init_params();
    $mod->init_optimizer(optimizer_params=>{learning_rate => 0.1, momentum => 0.9});
    $mod->update();
    $mod->save_checkpoint('test', 0, 1);

    $mod2 = mx->mod->Module->load('test', 0, 1, data_names=>['data']);
    $mod2->bind(data_shapes=>[['data', [10, 10]]]);
    $mod2->init_optimizer(optimizer_params=>{learning_rate => 0.1, momentum => 0.9});
    is($mod->_symbol->tojson(), $mod2->_symbol->tojson());
    $dict_equ->(($mod->get_params())[0], ($mod2->get_params())[0]);
    $dict_equ->($mod->_kvstore->_updater->states, $mod2->_updater->states);
    unlink('test-0000.params');
    unlink('test-0000.states');
    unlink('test-symbol.json');
}


sub test_module_reshape
{
    my $data = mx->sym->Variable('data');
    my $sym  = mx->sym->FullyConnected($data, num_hidden=>20, name=>'fc');

    my $dshape = [7, 20];
    my $mod = mx->mod->Module($sym, data_names=>['data'], context=>[mx->cpu(0), mx->cpu(1)]);
    $mod->bind(data_shapes=>[['data', $dshape]]);
    $mod->init_params();
    $mod->init_optimizer(optimizer_params=>{learning_rate => 1});

t/test_module.t  view on Meta::CPAN

        ),
        is_train => 1
    );
    $mod->backward([mx->nd->ones($dshape)]);
    $mod->update();
    is_deeply($mod->get_outputs()->[0]->shape, $dshape);
    ok((($mod->get_params())[0]{fc_bias}->aspdl == -3)->all);
}


sub test_module_states
{
    my $stack = mx->rnn->SequentialRNNCell();
    for my $i (0..1)
    {
        $stack->add(mx->rnn->LSTMCell(num_hidden=>20, prefix=>"lstm_l${i}_"));
    }
    my $begin_state = $stack->begin_state(func=>mx->sym->can('Variable'));
    my (undef, $states) = $stack->unroll(10, begin_state=>$begin_state, inputs=>mx->sym->Variable('data'));

    my $state_names = [map { $_->name } @$begin_state];

t/test_module.t  view on Meta::CPAN

    $mod->set_states(states=>$out);
    $mod->forward($batch);
    my $out2 = $mod->get_outputs(1);

    zip(sub {
        my ($x1, $x2) = @_;
        ok(not almost_equal($x1->aspdl, $x2->aspdl, 1e-3));
    }, $out1, $out2);
}

sub test_module_switch_bucket
{
    my $vocab_dim  = 5000;
    my $num_hidden = 100;
    my $num_embedding = 100;
    my $num_layer = 2;
    my $default_key = 10;
    my $test_key = 5;
    my $batch_size = 32;
    my $contexts = [mx->cpu(0)];
    my $initializer = mx->init->Xavier(factor_type=>"in", magnitude=>2.34);

    #generate symbols for an LSTM network
    my $gen_sym = sub {
        my $seq_len = shift;
        my $data  = mx->sym->Variable('data');
        my $label = mx->sym->Variable('softmax_label');
        my $embed = mx->sym->Embedding(data=>$data, input_dim=>$vocab_dim,

t/test_module.t  view on Meta::CPAN

            context             => $contexts
        );
        $model->bind(data_shapes=>[['data', [$batch_size, $key]]],
                    label_shapes=>[['softmax_label', [$batch_size, $key]]]
        );
        $model->init_params(initializer=>$initializer);
        return $model;
    };
    #initialize the bucketing module with the default bucket key
    my $bucketing_model = $create_bucketing_module->($default_key);
    #switch to test_key
    $bucketing_model->switch_bucket(
        bucket_key   => $test_key,
        data_shapes  => [['data', [$batch_size, $test_key]]],
        label_shapes => [['softmax_label', [$batch_size, $test_key]]]
    );

    delete $bucketing_model->_buckets->{$test_key};

    $bucketing_model->switch_bucket(
        bucket_key   => $test_key,
        data_shapes  => [['data', [$batch_size, $test_key]]],
        label_shapes => [['softmax_label', [$batch_size, $test_key]]]
    );
}

sub test_monitor
{
    mx->random->seed(11);
    my $data = mx->nd->array([[0.05, .10]]);
    my $label = mx->nd->array([[.01, 0.99]]);
    my $train_data = mx->io->NDArrayIter($data, label => $label, batch_size=>1);

    # symbols
    my $x = mx->symbol->Variable('data');
    $x = mx->symbol->FullyConnected(name=>'fc_0', data=>$x, num_hidden=>2);
    $x = mx->symbol->Activation(name=>"act_0", data=>$x, act_type=>'sigmoid');

t/test_module.t  view on Meta::CPAN

            if($k =~ /^$key/)
            {
                $mon_result_counts->[$idx] += 1;
                return;
            }
        }, $keys);
    }
    is_deeply($mon_result_counts, [2, 2, 1, 6, 6, 4]);
}

sub test_module_dtype
{
    my $dtype = 'float16';
    my $dshape = [3, 8, 7];

    my $sym = mx->sym->Variable('data');
    $sym    = mx->sym->Activation(data=>$sym, act_type=>'relu', __layout__=>'TNC');

    my $mod = mx->mod->Module($sym, data_names=>['data'], context => [mx->cpu(0), mx->cpu(1)]);
    $mod->bind(data_shapes=>[
        mx->io->DataDesc('data', $dshape, dtype => $dtype, layout=>'TNC')

t/test_module.t  view on Meta::CPAN

        )
    );
    $mod->backward([mx->nd->ones($dshape, dtype=>$dtype)]);

    for my $x (@{ $mod->get_outputs() })
    {
        is($x->dtype, $dtype);
    }
}

sub test_module_input_grads
{
    my $a = mx->sym->Variable('a', __layout__=>'NC');
    my $b = mx->sym->Variable('b', __layout__=>'NC');
    my $c = mx->sym->Variable('c', __layout__=>'NC');

    $c = $a + 2 * $b + 3 * $c;
    my $net = mx->mod->Module(
        $c, data_names=>['b', 'c', 'a'],
        context=>[mx->cpu(0), mx->cpu(1)]
    );

t/test_module.t  view on Meta::CPAN

    $net->backward([mx->nd->ones([5, 5])]);
    my $input_grads = $net->get_input_grads();
    my $b_grad = $input_grads->[0]->aspdl;
    my $c_grad = $input_grads->[1]->aspdl;
    my $a_grad = $input_grads->[2]->aspdl;
    ok(($a_grad == 1)->all);
    ok(($b_grad == 2)->all);
    ok(($c_grad == 3)->all);
}

sub test_executor_group
{
    my $get_rnn_sym = sub { my ($num_layers, $num_words, $num_hidden, $num_embed, $seq_len) = @_;
        my $stack = mx->rnn->SequentialRNNCell();
        for my $i (0..$num_layers-1)
        {
            $stack->add(mx->rnn->LSTMCell(num_hidden=>$num_hidden, prefix=>"lstm_l${i}_"));
        }
        my $data = mx->sym->Variable('data');
        my $label = mx->sym->Variable('softmax_label');
        my $embed = mx->sym->Embedding(data=>$data, input_dim=>$num_words,

t/test_module.t  view on Meta::CPAN

        my ($outputs, $states) = $stack->unroll($seq_len, inputs=>$embed, merge_outputs=>1);

        my $pred = mx->sym->Reshape($outputs, shape=>[-1, $num_hidden]);
        $pred = mx->sym->FullyConnected(data=>$pred, num_hidden=>$num_words, name=>'pred');

        $label = mx->sym->Reshape($label, shape=>[-1]);
        $pred = mx->sym->SoftmaxOutput(data=>$pred, label=>$label, name=>'softmax');
        return $pred;
    };

    my $test_shared_exec_group = sub { my ($exec_grp_shared, $exec_grp_created, $shared_arg_names, $extra_args) = @_;
        # Test shared data arrays
        for my $i (0..@{ $exec_grp_shared->execs }-1)
        {
            # test same shared_data_arrays for two exec groups
            my $shared_data_array1 = $exec_grp_shared->shared_data_arrays->[$i];
            my $shared_data_array2 = $exec_grp_created->shared_data_arrays->[$i];
            if(defined $extra_args)
            {
                ok(keys(%$shared_data_array1) == @$extra_args);
            }
            ok(keys(%$shared_data_array1) == keys(%$shared_data_array2));
            while(my ($k, $v) = each %{ $shared_data_array1 })
            {
                if(defined $extra_args)

t/test_module.t  view on Meta::CPAN

                    ok(grep { $_ eq $k } @$extra_args);
                }
                ok(exists $shared_data_array2->{$k});
                ok(same_array($v, $shared_data_array2->{$k}));
            }
            # Test shared argument arrays and gradient arrays
            my $exec_shared  = $exec_grp_shared->execs->[$i];
            my $exec_created = $exec_grp_created->execs->[$i];
            if(defined $shared_arg_names)
            {
                # test shared arguments
                for my $arg_name (@$shared_arg_names)
                {
                    ok(exists $exec_created->arg_dict->{$arg_name});
                    ok(same_array($exec_shared->arg_dict->{$arg_name}, $exec_created->arg_dict->{$arg_name}));
                }
                # test shared argument gradients
                for my $arg_name (@$shared_arg_names)
                {
                    ok(exists $exec_created->grad_dict->{$arg_name});
                    ok(same_array($exec_shared->grad_dict->{$arg_name}, $exec_created->grad_dict->{$arg_name}));
                }
            }
            my $grad_req = $exec_grp_shared->grad_req;
            while(my ($arg_name, $grad) = each %{ $grad_req })
            {
                ok($grad eq $exec_grp_created->grad_req->{$arg_name});

t/test_module.t  view on Meta::CPAN

    $sym = $get_rnn_sym->(5, $num_words, $num_hidden,
                      $num_embed, $max_bucket_size);
    my $arg_names2 = $sym->list_arguments();
    my $exec_group2 = AI::MXNet::DataParallelExecutorGroup->new(symbol=>$sym, contexts=>$contexts,
                                            workload=>$workload, data_shapes=>$data_shapes,
                                            label_shapes=>$label_shapes, param_names=>$shared_arg_names,
                                            for_training=>1, inputs_need_grad=>0,
                                            shared_group=>$exec_group1);
    my %shared_arg_names = map { $_ => 1 } @$shared_arg_names;
    my $extra_args = [grep { not exists $shared_arg_names{$_} } @$arg_names2];
    $test_shared_exec_group->(
        $exec_group1, $exec_group2,
        $shared_arg_names, $extra_args
    );
}

sub test_module_set_params
{
    # data iter
    mx->random->seed(11);
    my $data = mx->nd->array([[0.05, .10]]);
    my $label = mx->nd->array([[.01, 0.99]]);
    my $train_data = mx->io->NDArrayIter(data => $data, label => $label, batch_size => 1);

    # symbols
    my $x = mx->symbol->Variable('data');
    $x = mx->symbol->FullyConnected(name=>'fc_0', data=>$x, num_hidden=>2);

t/test_module.t  view on Meta::CPAN

                  fc_1_weight => mx->nd->array([[.40, .45], [.50, .55]])};

    my $arg_params_extra = {fc_0_weight => mx->nd->array([[.15, .20], [.25, .30]]),
                  fc_0_bias  => mx->nd->array([.35, .35]),
                  fc_1_weight=> mx->nd->array([[.40, .45], [.50, .55]]),
                  fc_1_bias => mx->nd->array([.60, .60]),
                  fc_2_weight => mx->nd->array([.60, .60])};

    my $arg_params_missing_extra = {fc_3_weight => mx->nd->array([.60, .60])};

    # test regular set_params
    $mod->set_params($arg_params_correct, {}, force_init=>1);

    # test allow missing
    $mod->set_params($arg_params_missing, {}, allow_missing=>1, force_init=>1);
    ok(dies_like(sub { $mod->set_params($arg_params_missing, {}, force_init=>1, allow_missing=>0); }, qr/fc_/));

    # test allow extra
    $mod->set_params($arg_params_extra, {}, force_init=>1, allow_missing=>1, allow_extra=>1);
    ok(dies_like(sub { $mod->set_params($arg_params_extra, {}, force_init=>1, allow_missing=>1, allow_extra=>0); }, qr/fc_/));

    # test allow missing + extra, this will throw a runtime error
    ok(dies_like(sub { $mod->set_params($arg_params_missing_extra, {}, force_init=>1, allow_missing=>1, allow_extra=>0); }, qr/fc_/));
}

sub test_forward_reshape
{
    my $num_class = 10;
    my $data1 = mx->sym->Variable('data1');
    my $data2 = mx->sym->Variable('data2');
    my $conv1 = mx->sym->Convolution(data=>$data1, kernel=>[2, 2], num_filter=>2, stride=>[2, 2]);
    my $conv2 = mx->sym->Convolution(data=>$data2, kernel=>[3, 3], num_filter=>3, stride=>[1, 1]);
    my $pooling1 = mx->sym->Pooling(data=>$conv1, kernel=>[2, 2], stride=>[1, 1], pool_type=>"avg");
    my $pooling2 = mx->sym->Pooling(data=>$conv2, kernel=>[2, 2], stride=>[1, 1], pool_type=>"max");
    my $flatten1 = mx->sym->flatten(data=>$pooling1);
    my $flatten2 = mx->sym->flatten(data=>$pooling2);

t/test_module.t  view on Meta::CPAN

    $dataset_shape2 = [10, 3, 20, 40];

    my $pred_dataiter = mx->io->NDArrayIter(data=>[mx->nd->random_uniform(0, 9, $dataset_shape1),
                                            mx->nd->random_uniform(15, 25, $dataset_shape2)]);
    $mod->bind(data_shapes=>[['data1', $dshape1], ['data2', $dshape2]],
             for_training=>0, force_rebind=>1);
    is_deeply($mod->predict($pred_dataiter)->shape, [10, $num_class]);

}

test_module_input_grads();
test_module_dtype();
test_monitor();
test_module_switch_bucket();
test_module_layout();
test_module_states();
test_module_reshape();
test_save_load();
test_executor_group();
test_module_set_params();
test_forward_reshape();

t/test_multi_device_exec.t  view on Meta::CPAN

use strict;
use warnings;
use Test::More tests => 10;
use AI::MXNet qw(mx);
use AI::MXNet::Base;

sub test_ctx_group
{
    my ($data, $fc1, $act1);
    {
        local($mx::AttrScope) = mx->AttrScope(ctx_group=>'stage1');
        $data = mx->symbol->Variable('data');
        $fc1  = mx->symbol->FullyConnected(data => $data, name=>'fc1', num_hidden=>128);
        $act1 = mx->symbol->Activation(data => $fc1, name=>'relu1', act_type=>"relu");
    }
    my %set_stage1 = map { $_ => 1 } @{ $act1->list_arguments };

t/test_multi_device_exec.t  view on Meta::CPAN

        {
            ok($arr->context == $group2ctx->{stage1});
        }
        else
        {
            ok($arr->context == $group2ctx->{stage2});
        }
    }, $texec->arg_arrays, $mlp->list_arguments());
}

test_ctx_group();

t/test_ndarray.t  view on Meta::CPAN

use strict;
use warnings;
use AI::MXNet qw(mx);
use AI::MXNet::TestUtils qw(almost_equal);
use Test::More tests => 10;

sub test_ndarray_reshape
{
    my $tensor = mx->nd->array([[[1, 2], [3, 4]],
                                [[5, 6], [7, 8]]]);
    my $true_res = mx->nd->arange(stop => 8) + 1;
    is_deeply($tensor->reshape([-1])->aspdl->unpdl, $true_res->aspdl->unpdl);
    $true_res  = mx->nd->array([[1, 2, 3, 4],
                                [5, 6, 7, 8]]);
    is_deeply($tensor->reshape([2, -1])->aspdl->unpdl, $true_res->aspdl->unpdl);
    $true_res  = mx->nd->array([[1, 2],
                                [3, 4],
                                [5, 6],
                                [7, 8]]);
    is_deeply($tensor->reshape([-1, 2])->aspdl->unpdl, $true_res->aspdl->unpdl);
}


sub test_moveaxis
{
    my $X = mx->nd->array([[[1, 2, 3], [4, 5, 6]],
                           [[7, 8, 9], [10, 11, 12]]]);
    my $res = $X->moveaxis(0, 2)->aspdl;
    my $true_res = mx->nd->array([[[  1.,   7.],
                                   [  2.,   8.],
                                   [  3.,   9.]],
                                  [[  4.,  10.],
                                   [  5.,  11.],
                                   [  6.,  12.]]]);
    is_deeply($res->unpdl, $true_res->aspdl->unpdl);
    is_deeply($X->moveaxis(2, 0)->shape, [3, 2, 2]);
}


sub test_output
{
    my $shape = [2,2];
    my $ones = mx->nd->ones($shape);
    my $zeros = mx->nd->zeros($shape);
    my $out = mx->nd->zeros($shape);
    mx->nd->ones($shape, out=>$out);
    ok(almost_equal($out->aspdl, $ones->aspdl));
    mx->nd->zeros($shape, out=>$out);
    ok(almost_equal($out->aspdl, $zeros->aspdl));
    mx->nd->full($shape, 2, out=>$out);
    ok(almost_equal($out->aspdl, $ones->aspdl * 2));
}

sub test_cached
{
    my $sym = mx->sym->Convolution(kernel=>[3, 3], num_filter=>10) + 2;
    my $op = mx->nd->CachedOp($sym);
    my $data = mx->nd->ones([3, 4, 10, 10]);
    my $weight = mx->nd->ones([10, 4, 3, 3]);
    my $bias = mx->nd->ones([10]);
    my $o1 = &{$op}($data, $weight, $bias);
    $bias .= 2;
    my $o2 = &{$op}($data, $weight, $bias);
    ok(almost_equal($o2->aspdl, $o1->aspdl+1));
    $o2 .= 0;
    &{$op}($data, $weight, $bias, out=>$o2);
    ok(almost_equal($o2->aspdl, $o1->aspdl+1));
}

test_ndarray_reshape();
test_moveaxis();
test_output();
test_cached();

t/test_optimizers.t  view on Meta::CPAN

                $weight32 += $mom;
            }
        }
        my $tmp = $weight32->astype($weight->dtype);
        $tmp->copyto($weight);
    }
}


package main;
use Test::More tests => 1314;
use AI::MXNet::Base;
use PDL::NiceSlice;
use AI::MXNet::TestUtils qw(same reldiff almost_equal);
use AI::MXNet::Function::Parameters;

func compare_optimizer($opt1, $opt2, $shape, $dtype)
{
    my $w1 = mx->random->uniform({shape => $shape, dtype=>$dtype});
    my $g1 = mx->random->uniform({shape => $shape, dtype=>$dtype});

t/test_optimizers.t  view on Meta::CPAN

    zip(
        sub {
            my ($s1, $s2) = @_;
            ok(reldiff($s1->aspdl, $s2->aspdl) < 1e-5) if defined $s1 and defined $s2;
        },
        ref $state1 eq 'ARRAY' ? $state1 : [$state1], ref $state2 eq 'ARRAY' ? $state2 : [$state2]
    ) if defined $state1 and defined $state2;
    ok(reldiff($w1->aspdl, $w2->aspdl) < 1e-5);
}

func test_adam()
{
    mx->random->seed(0);
    my $opt1 = 'PerlAdam';
    my $opt2 = 'AI::MXNet::Adam';
    my $shape = [3, 4, 5];
    my @kwargs = ({},
              {'clip_gradient'=> 0.5},
              {'clip_gradient'=> 0.1},
              {'rescale_grad'=> 0.1});
    for my $kwarg (@kwargs)
    {
        compare_optimizer($opt1->new(%$kwarg), $opt2->new(wd => 0.9, %$kwarg), $shape, 'float32');
    }
}

func test_rms()
{
    mx->random->seed(0);
    my $opt1 = 'PerlRMSProp';
    my $opt2 = 'AI::MXNet::RMSProp';
    my $shape = [3, 4, 5];
    my @kwargs = ({},
              {clip_gradient => 0.5},
              {clip_gradient => 0.4, rescale_grad => 0.14},
              {rescale_grad  => 0.8},
              {clip_gradient => 0.5, wd => 0.07},

t/test_optimizers.t  view on Meta::CPAN

              {clip_gradient => 0.5, wd => 0.07, centered => 1, clip_weights => 0.01},
              {clip_gradient => 0.4, rescale_grad => 0.14, wd => 0.03, centered => 1, clip_weights => 0.01},
              {rescale_grad  => 0.8, wd => 0.05, centered => 1, clip_weights => 0.01});
    for my $kwarg (@kwargs)
    {
        compare_optimizer($opt1->new(%$kwarg), $opt2->new(%$kwarg), $shape, 'float32');
    }
}


sub test_sgd
{
    mx->random->seed(0);
    my $opt1 = 'PerlSGD';
    my $opt2 = mx->optimizer->SGD;
    my $shape = [3, 4, 5];
    my @mom_options = ({}, {momentum => 0.9});
    my @cg_options = ({}, {clip_gradient => 0.4}, {clip_gradient => 0.5});
    my @rg_options = ({}, {rescale_grad => 0.14}, {rescale_grad => 0.8});
    my @wd_options = ({}, {wd => 0.03}, {wd => 0.05}, {wd => 0.07});
    my @mp_options = ({}, {multi_precision => 0}, {multi_precision => 1});

t/test_optimizers.t  view on Meta::CPAN

                            );
                            compare_optimizer($opt1->new(%kwarg), $opt2->new(%kwarg), $shape, $dtype);
                        }
                    }
                }
            }
        }
    }
}

func test_lr_wd_mult()
{
    my $data = mx->sym->Variable('data');
    my $bias = mx->sym->Variable('fc1_bias', lr_mult => 1.0);
    my $fc1  = mx->sym->FullyConnected({ data => $data, bias => $bias, name => 'fc1', num_hidden => 10, lr_mult => 0 });
    my $fc2  = mx->sym->FullyConnected({ data => $fc1, name => 'fc2', num_hidden => 10, wd_mult => 0.5 });

    my $mod = mx->mod->new(symbol => $fc2, label_names => undef);
    $mod->bind(data_shapes => [['data', [5,10]]]);
    $mod->init_params(initializer => mx->init->Uniform(scale => 1.0));
    $mod->init_optimizer(optimizer_params => { learning_rate => "1.0" });

t/test_optimizers.t  view on Meta::CPAN

    {
        $args2{$k} = $args2{$k}->aspdl;
    }
    is_deeply($mod->_p->_optimizer->lr_mult, { fc1_bias => 1, fc1_weight => 0 }, "lr_mult");
    is_deeply($mod->_p->_optimizer->wd_mult, { fc2_bias => 0.5, fc2_weight => 0.5, fc1_bias => 0, }, "wd_mult");
    ok(almost_equal($args1{fc1_weight}, $args2{fc1_weight}, 1e-10), "fc1_weight");
    ok(!almost_equal($args1{fc1_bias}, $args2{fc1_bias}, 1e-1), "fc1_bias");
    ok(!almost_equal($args1{fc2_weight}, $args2{fc2_weight}, 1e-1), "fc2_weight");
}

test_adam();
test_rms();
test_sgd();
test_lr_wd_mult();

t/test_random.t  view on Meta::CPAN

use strict;
use warnings;
use Test::More tests => 8;
use AI::MXNet qw(mx);
use AI::MXNet::TestUtils qw(same);

sub check_with_device
{
    my ($device)     = @_;
    my ($a, $b)      = (-10, 10);
    my ($mu, $sigma) = (10, 2);
    my $shape        = [100, 100];
    mx->random->seed(128);

t/test_random.t  view on Meta::CPAN

    $yexec->backward($yexec->outputs->[0]);
    my $un1 = ($yexec->outputs->[0] - $x)->copyto($dev);
    ok(same($xgrad->aspdl, $un1->aspdl));
    mx->random->seed(128);
    $yexec->forward;
    my $un2 = ($yexec->outputs->[0] - $x)->copyto($dev);
    ok(same($un1->aspdl, $un2->aspdl));
    ok(abs($un1->aspdl->avg - ($a+$b)/2) < 0.1);
}

sub test_random
{
    check_with_device(mx->cpu);
    check_symbolic_random(mx->cpu);
}

test_random();



( run in 0.527 second using v1.01-cache-2.11-cpan-87723dcf8b7 )