view release on metacpan or search on metacpan
examples/calculator.pl view on Meta::CPAN
#!/usr/bin/perl
use strict;
use warnings;
use AI::MXNet ('mx');
## preparing the samples
## to train our network
sub samples {
my($batch_size, $func) = @_;
# get samples
my $n = 16384;
## creates a pdl with $n rows and two columns with random
## floats in the range between 0 and 1
my $data = PDL->random(2, $n);
## creates the pdl with $n rows and one column with labels
## labels are floats that either sum or product, etc of
## two random values in each corresponding row of the data pdl
my $label = $func->($data->slice('0,:'), $data->slice('1,:'));
examples/char_lstm.pl view on Meta::CPAN
'optimizer=s' => \(my $optimizer = 'adam' ),
'mom=f' => \(my $mom = 0 ),
'wd=f' => \(my $wd = 0.00001 ),
'batch-size=i' => \(my $batch_size = 32 ),
'disp-batches=i' => \(my $disp_batches = 50 ),
'chkp-prefix=s' => \(my $chkp_prefix = 'lstm_' ),
'cell-mode=s' => \(my $cell_mode = 'LSTM' ),
'sample-size=i' => \(my $sample_size = 10000 ),
'chkp-epoch=i' => \(my $chkp_epoch = 1 ),
'bidirectional=i'=> \(my $bidirectional= 0 ),
'help' => sub { HelpMessage(0) },
) or HelpMessage(1);
=head1 NAME
char_lstm.pl - Example of training char LSTM RNN on tiny shakespeare using high level RNN interface
with optional inferred sampling (RNN generates Shakespeare like text)
=head1 SYNOPSIS
--num-layers number of stacked RNN layers, default=2
examples/cudnn_lstm_bucketing.pl view on Meta::CPAN
'optimizer=s' => \(my $optimizer = 'adam' ),
'mom=f' => \(my $mom = 0 ),
'wd=f' => \(my $wd = 0.00001 ),
'batch-size=i' => \(my $batch_size = 32 ),
'disp-batches=i' => \(my $disp_batches = 50 ),
'model-prefix=s' => \(my $model_prefix = 'lstm_' ),
'load-epoch=i' => \(my $load_epoch = 0 ),
'stack-rnn' => \(my $stack_rnn ),
'bidirectional=i' => \(my $bidirectional ),
'dropout=f', => \(my $dropout = 0 ),
'help' => sub { HelpMessage(0) },
) or HelpMessage(1);
=head1 NAME
char_lstm.pl - Example of training char LSTM RNN on tiny shakespeare using high level RNN interface
=head1 SYNOPSIS
--test Whether to test or train (default 0)
--num-layers number of stacked RNN layers, default=2
lib/AI/MXNet.pm view on Meta::CPAN
use AI::MXNet::Module::Bucketing;
use AI::MXNet::RNN;
use AI::MXNet::Visualization;
use AI::MXNet::RecordIO;
use AI::MXNet::Image;
use AI::MXNet::Contrib;
use AI::MXNet::Contrib::AutoGrad;
use AI::MXNet::CachedOp;
our $VERSION = '1.0102';
sub import
{
my ($class, $short_name) = @_;
if($short_name)
{
$short_name =~ s/[^\w:]//g;
if(length $short_name)
{
my $short_name_package =<<"EOP";
package $short_name;
no warnings 'redefine';
sub nd { 'AI::MXNet::NDArray' }
sub sym { 'AI::MXNet::Symbol' }
sub symbol { 'AI::MXNet::Symbol' }
sub init { 'AI::MXNet::Initializer' }
sub initializer { 'AI::MXNet::Initializer' }
sub optimizer { 'AI::MXNet::Optimizer' }
sub opt { 'AI::MXNet::Optimizer' }
sub rnd { 'AI::MXNet::Random' }
sub random { 'AI::MXNet::Random' }
sub Context { shift; AI::MXNet::Context->new(\@_) }
sub cpu { AI::MXNet::Context->cpu(\$_[1]//0) }
sub gpu { AI::MXNet::Context->gpu(\$_[1]//0) }
sub kv { 'AI::MXNet::KVStore' }
sub recordio { 'AI::MXNet::RecordIO' }
sub io { 'AI::MXNet::IO' }
sub metric { 'AI::MXNet::Metric' }
sub mod { 'AI::MXNet::Module' }
sub mon { 'AI::MXNet::Monitor' }
sub viz { 'AI::MXNet::Visualization' }
sub rnn { 'AI::MXNet::RNN' }
sub callback { 'AI::MXNet::Callback' }
sub img { 'AI::MXNet::Image' }
sub contrib { 'AI::MXNet::Contrib' }
sub name { '$short_name' }
sub AttrScope { shift; AI::MXNet::Symbol::AttrScope->new(\@_) }
*AI::MXNet::Symbol::AttrScope::current = sub { \$${short_name}::AttrScope; };
\$${short_name}::AttrScope = AI::MXNet::Symbol::AttrScope->new;
sub Prefix { AI::MXNet::Symbol::Prefix->new(prefix => \$_[1]) }
*AI::MXNet::Symbol::NameManager::current = sub { \$${short_name}::NameManager; };
\$${short_name}::NameManager = AI::MXNet::Symbol::NameManager->new;
*AI::MXNet::Context::current_ctx = sub { \$${short_name}::Context; };
\$${short_name}::Context = AI::MXNet::Context->new(device_type => 'cpu', device_id => 0);
1;
EOP
eval $short_name_package;
}
}
}
1;
__END__
lib/AI/MXNet/Base.pm view on Meta::CPAN
Perl version of for x,y,z in zip (arr_x, arr_y, arr_z)
Parameters
----------
$sub_ref, called with @_ filled with $arr_x->[$i], $arr_y->[$i], $arr_z->[$i]
for each loop iteration.
@array_refs
=cut
sub zip
{
my ($sub, @arrays) = @_;
my $len = @{ $arrays[0] };
for (my $i = 0; $i < $len; $i++)
{
$sub->(map { $_->[$i] } @arrays);
}
}
=head2 enumerate
Same as zip, but the argument list in the anonymous sub is prepended
by the iteration count.
=cut
sub enumerate
{
my ($sub, @arrays) = @_;
my $len = @{ $arrays[0] };
zip($sub, [0..$len-1], @arrays);
}
=head2 product
Calculates the product of the input agruments.
=cut
sub product
{
my $p = 1;
map { $p = $p * $_ } @_;
return $p;
}
=head2 bisect_left
https://hg.python.org/cpython/file/2.7/Lib/bisect.py
=cut
sub bisect_left
{
my ($a, $x, $lo, $hi) = @_;
$lo //= 0;
$hi //= @{ $a };
if($lo < 0)
{
Carp::confess('lo must be non-negative');
}
while($lo < $hi)
{
lib/AI/MXNet/Callback.pm view on Meta::CPAN
package AI::MXNet::Callback;
use strict;
use warnings;
use List::Util qw/max/;
use AI::MXNet::Function::Parameters;
use Mouse;
use overload "&{}" => sub { my $self = shift; sub { $self->call(@_) } };
=head1 NAME
AI::MXNet::Callback - A collection of predefined callback functions
=cut
=head2 module_checkpoint
Callback to save the module setup in the checkpoint files.
lib/AI/MXNet/Context.pm view on Meta::CPAN
package AI::MXNet::Context;
use strict;
use warnings;
use Mouse;
use AI::MXNet::Types;
use AI::MXNet::Function::Parameters;
use constant devtype2str => { 1 => 'cpu', 2 => 'gpu', 3 => 'cpu_pinned' };
use constant devstr2type => { cpu => 1, gpu => 2, cpu_pinned => 3 };
around BUILDARGS => sub {
my $orig = shift;
my $class = shift;
return $class->$orig(device_type => $_[0])
if @_ == 1 and $_[0] =~ /^(?:cpu|gpu|cpu_pinned)$/;
return $class->$orig(
device_type => $_[0]->device_type,
device_id => $_[0]->device_id
) if @_ == 1 and blessed $_[0];
return $class->$orig(device_type => $_[0], device_id => $_[0])
if @_ == 2 and $_[0] =~ /^(?:cpu|gpu|cpu_pinned)$/;
lib/AI/MXNet/Contrib/AutoGrad.pm view on Meta::CPAN
The index of argument to calculate gradient for.
Returns
-------
grad_and_loss_func: a perl sub
A function that would compute both the gradient of arguments and loss value.
=cut
method grad_and_loss(CodeRef $func, Maybe[Int|ArrayRef[Int]] $argnum=)
{
return sub {
my @args = @_;
my @variables = @_;
if(defined $argnum)
{
my @argnum = ref $argnum ? @$argnum : ($argnum);
@variables = map { $_[$_] } @argnum;
}
map {
assert(
(blessed($_) and $_->isa('AI::MXNet::NDArray')),
lib/AI/MXNet/Contrib/Symbol.pm view on Meta::CPAN
package AI::MXNet::Contrib::Symbol;
use strict;
use warnings;
sub AUTOLOAD {
my $sub = $AI::MXNet::Contrib::Symbol::AUTOLOAD;
$sub =~ s/.*:://;
$sub = "_contrib_$sub";
shift;
return AI::MXNet::Symbol->$sub(@_);
}
1;
lib/AI/MXNet/Executor.pm view on Meta::CPAN
Parameters
----------
handle: ExecutorHandle
ExecutorHandle is generated by calling bind.
See Also
--------
AI::MXNet::Symbol->bind : how to create the AI::MXNet::Executor.
=cut
sub BUILD
{
my $self = shift;
my ($symbol, $ctx, $grad_req, $group2ctx)
=
($self->_symbol, $self->_ctx, $self->_grad_req, $self->_group2ctx);
$symbol = $symbol->deepcopy;
$ctx = $ctx->deepcopy;
if(ref $grad_req)
{
if(ref $grad_req eq 'ARRAY')
lib/AI/MXNet/Function/Parameters.pm view on Meta::CPAN
package AI::MXNet::Function::Parameters;
use strict;
use warnings;
use Function::Parameters ();
use AI::MXNet::Types ();
sub import {
Function::Parameters->import(
{
func => {
defaults => 'function_strict',
runtime => 1,
reify_type => sub {
Mouse::Util::TypeConstraints::find_or_create_isa_type_constraint($_[0])
}
},
method => {
defaults => 'method_strict',
runtime => 1,
reify_type => sub {
Mouse::Util::TypeConstraints::find_or_create_isa_type_constraint($_[0])
}
},
}
);
}
{
no warnings 'redefine';
*Function::Parameters::_croak = sub {
local($Carp::CarpLevel) = 1;
Carp::confess ("@_");
};
}
1;
lib/AI/MXNet/IO.pm view on Meta::CPAN
package AI::MXNet::DataDesc;
use Mouse;
use overload '""' => \&stringify,
'@{}' => \&to_nameshape;
has 'name' => (is => 'ro', isa => "Str", required => 1);
has 'shape' => (is => 'ro', isa => "Shape", required => 1);
has 'dtype' => (is => 'ro', isa => "Dtype", default => 'float32');
has 'layout' => (is => 'ro', isa => "Str", default => 'NCHW');
around BUILDARGS => sub {
my $orig = shift;
my $class = shift;
if(@_ >= 2 and ref $_[1] eq 'ARRAY')
{
my $name = shift;
my $shape = shift;
return $class->$orig(name => $name, shape => $shape, @_);
}
return $class->$orig(@_);
};
lib/AI/MXNet/KVStore.pm view on Meta::CPAN
=head1 DESCRIPTION
Key value store interface of MXNet for parameter synchronization, over multiple devices.
=cut
has 'handle' => (is => 'ro', isa => 'KVStoreHandle', required => 1);
has '_updater' => (is => 'rw', isa => 'AI::MXNet::Updater');
has '_updater_func' => (is => 'rw', isa => 'CodeRef');
sub DEMOLISH
{
check_call(AI::MXNetCAPI::KVStoreFree(shift->handle));
}
=head2 init
Initialize a single or a sequence of key-value pairs into the store.
For each key, one must init it before push and pull.
Only worker 0's (rank == 0) data are used.
This function returns after data have been initialized successfully
lib/AI/MXNet/KVStoreServer.pm view on Meta::CPAN
=head2 new
Initialize a new KVStoreServer.
Parameters
----------
kvstore : KVStore
=cut
has 'kvstore' => (is => 'ro', isa => 'AI::MXNet::KVStore', required => 1);
has 'handle' => (is => 'ro', isa => 'KVStoreHandle', default => sub { shift->kvstore->handle }, lazy => 1);
has 'init_logging' => (is => 'rw', isa => 'Int', default => 0);
# return the server controller
method _controller()
{
return sub {
my ($cmd_id, $cmd_body) = @_;
if (not $self->init_logging)
{
## TODO write logging
$self->init_logging(1);
}
if($cmd_id == 0)
{
my $optimizer = Storable::thaw(MIME::Base64::decode_base64($cmd_body));
$self->kvstore->set_optimizer($optimizer);
lib/AI/MXNet/LRScheduler.pm view on Meta::CPAN
package AI::MXNet::LRScheduler;
use strict;
use warnings;
use Mouse;
use AI::MXNet::Function::Parameters;
use AI::MXNet::Logging;
use overload "&{}" => sub { my $self = shift; sub { $self->call(@_) } },
fallback => 1;
=head1 NAME
AI::MXNet::LRScheduler - The adaptive scheduler of the learning rate.
=cut
=head1 DESCRIPTION
Learning rate scheduler, which adaptively changes the learning rate based on the
lib/AI/MXNet/Logging.pm view on Meta::CPAN
package AI::MXNet::Logging;
## TODO
use Mouse;
sub warning { shift; warn sprintf(shift, @_) . "\n" };
*debug = *info = *warning;
sub get_logger { __PACKAGE__->new }
1;
lib/AI/MXNet/Metric.pm view on Meta::CPAN
use warnings;
use AI::MXNet::Function::Parameters;
use Scalar::Util qw/blessed/;
=head1 NAME
AI::MXNet::Metric - Online evaluation metric module.
=cut
# Check to see if the two arrays are the same size.
sub _calculate_shape
{
my $input = shift;
my ($shape);
if(blessed($input))
{
if($input->isa('PDL'))
{
$shape = $input->shape->at(-1);
}
else
lib/AI/MXNet/Module.pm view on Meta::CPAN
return;
}
if($kvstore)
{
my $name = $param_names->[$index];
# push gradient, priority is negative index
$kvstore->push($name, $grad_list, priority => -$index);
# pull back the sum gradients, to the same locations.
$kvstore->pull($name, out => $grad_list, priority => -$index);
}
enumerate(sub {
my ($k, $w, $g) = @_;
# faked an index here, to make optimizer create diff
# state for the same index but on diff devs, TODO(mli)
# use a better solution later
&{$updater}($index*$num_device+$k, $g, $w);
}, $arg_list, $grad_list);
}, $param_arrays, $grad_arrays);
}
method load_checkpoint(Str $prefix, Int $epoch)
lib/AI/MXNet/Module/Bucketing.pm view on Meta::CPAN
$validation_sentences, $batch_size, buckets => $buckets,
invalid_label => $invalid_label
);
my $stack = mx->rnn->SequentialRNNCell();
for my $i (0..$num_layers-1)
{
$stack->add(mx->rnn->LSTMCell(num_hidden => $num_hidden, prefix => "lstm_l${i}_"));
}
my $sym_gen = sub {
my $seq_len = shift;
my $data = mx->sym->Variable('data');
my $label = mx->sym->Variable('softmax_label');
my $embed = mx->sym->Embedding(
data => $data, input_dim => scalar(keys %$vocabulary),
output_dim => $num_embed, name => 'embed'
);
$stack->reset;
my ($outputs, $states) = $stack->unroll($seq_len, inputs => $embed, merge_outputs => 1);
my $pred = mx->sym->Reshape($outputs, shape => [-1, $num_hidden]);
view all matches for this distributionview release on metacpan - search on metacpan