view release on metacpan or search on metacpan
lib/AI/TensorFlow/Libtensorflow.pm view on Meta::CPAN
use AI::TensorFlow::Libtensorflow::Eager::ContextOptions;
use AI::TensorFlow::Libtensorflow::Eager::Context;
use FFI::C;
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
FFI::C->ffi($ffi);
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
sub new {
my ($class) = @_;
bless {}, $class;
}
$ffi->attach( 'Version' => [], 'string' );#}}}
1;
__END__
lib/AI/TensorFlow/Libtensorflow/ApiDefMap.pm view on Meta::CPAN
use warnings;
use namespace::autoclean;
use AI::TensorFlow::Libtensorflow::Lib qw(arg);
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
$ffi->attach( [ 'NewApiDefMap' => 'New' ] => [
arg 'TF_Buffer' => 'op_list_buffer',
arg 'TF_Status' => 'status',
] => 'TF_ApiDefMap' => sub {
my ($xs, $class, @rest) = @_;
$xs->(@rest);
});
$ffi->attach( ['DeleteApiDefMap' => 'DESTROY'] => [
arg 'TF_ApiDefMap' => 'apimap'
] => 'void');
$ffi->attach( [ 'ApiDefMapPut' => 'Put' ] => [
arg 'TF_ApiDefMap' => 'api_def_map',
lib/AI/TensorFlow/Libtensorflow/Buffer.pm view on Meta::CPAN
FFI::C->struct( 'TF_Buffer' => [
data => 'opaque',
length => 'size_t',
_data_deallocator => 'opaque', # data_deallocator_t
# this does not work?
#_data_deallocator => 'data_deallocator_t',
]);
use Sub::Delete;
delete_sub 'DESTROY';
sub data_deallocator {
my ($self, $coderef) = shift;
return $self->{_data_deallocator_closure} unless $coderef;
my $closure = $ffi->closure( $coderef );
$closure->sticky;
$self->{_data_deallocator_closure} = $closure;
my $opaque = $ffi->cast('data_deallocator_t', 'opaque', $closure);
$self->_data_deallocator( $opaque );
}
$ffi->attach( [ 'NewBuffer' => 'New' ] => [] => 'TF_Buffer' );
$ffi->attach( [ 'NewBufferFromString' => 'NewFromString' ] => [
arg 'tf_buffer_buffer' => [qw(proto proto_len)]
] => 'TF_Buffer' => sub {
my ($xs, $class, @rest) = @_;
$xs->(@rest);
});
$ffi->attach( [ 'DeleteBuffer' => 'DESTROY' ] => [ 'TF_Buffer' ], 'void' );
1;
__END__
lib/AI/TensorFlow/Libtensorflow/DataType.pm view on Meta::CPAN
$ffi->type('object(AI::TensorFlow::Libtensorflow::DataType,int)', 'TF_DataType');
$ffi->attach( 'Size' => ['TF_DataType'] => 'size_t' );
use overload
'==' => '_op_num_equals',
'eq' => '_op_eq',
'""' => '_op_stringify';
sub _op_num_equals {
my ($a, $b, $swap) = @_;
my $int_a = ref $a ? 0+$$a : 0+$a;
my $int_b = ref $b ? 0+$$b : 0+$b;
if( STRICT ) { # ASSERT
Int->assert_valid($int_a);
Int->assert_valid($int_b);
}
!$swap
? $int_a == $int_b
: $int_b == $int_b
}
sub _op_eq {
my ($a, $b, $swap) = @_;
my $str_a = "$a";
my $str_b = "$b";
if( STRICT ) { # ASSERT
Str->assert_valid($str_a);
Str->assert_valid($str_b);
}
!$swap
? $str_a eq $str_b
: $str_b eq $str_a;
}
sub _op_stringify { $_REV_ENUM_DTYPE{ 0 + ${$_[0]}} }
1;
__END__
=pod
=encoding UTF-8
=head1 NAME
lib/AI/TensorFlow/Libtensorflow/Eager/Context.pm view on Meta::CPAN
$AI::TensorFlow::Libtensorflow::Eager::Context::VERSION = '0.0.7';
use strict;
use warnings;
use AI::TensorFlow::Libtensorflow::Lib qw(arg);
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
$ffi->attach( [ 'NewContext' => 'New' ] => [
arg TFE_ContextOptions => 'opts',
arg TF_Status => 'status'
] => 'TFE_Context' => sub {
my ($xs, $class, @rest) = @_;
$xs->(@rest);
} );
__END__
=pod
=encoding UTF-8
lib/AI/TensorFlow/Libtensorflow/Graph.pm view on Meta::CPAN
arg TF_Status => 'status',
] => 'TF_ImportGraphDefResults');
$ffi->attach( [ 'GraphImportGraphDefWithReturnOutputs' => 'ImportGraphDefWithReturnOutputs' ] => [
arg TF_Graph => 'graph',
arg TF_Buffer => 'graph_def',
arg TF_ImportGraphDefOptions => 'options',
arg TF_Output_struct_array => 'return_outputs',
arg int => 'num_return_outputs',
arg TF_Status => 'status',
] => 'void' => sub {
my ($xs, $graph, $graph_def, $options, $status) = @_;
my $num_return_outputs = $options->NumReturnOutputs;
return [] if $num_return_outputs == 0;
my $return_outputs = AI::TensorFlow::Libtensorflow::Output->_adef->create( $num_return_outputs );
$xs->($graph, $graph_def, $options,
$return_outputs, $num_return_outputs,
$status);
return AI::TensorFlow::Libtensorflow::Output->_from_array( $return_outputs );
});
lib/AI/TensorFlow/Libtensorflow/Graph.pm view on Meta::CPAN
arg 'TF_Output' => 'output',
arg 'tf_dims_buffer' => [qw(dims num_dims)],
arg 'TF_Status' => 'status',
] => 'void');
$ffi->attach( ['GraphGetTensorShape' => 'GetTensorShape'] => [
arg 'TF_Graph' => 'graph',
arg 'TF_Output' => 'output',
arg 'tf_dims_buffer' => [qw(dims num_dims)],
arg 'TF_Status' => 'status',
] => 'void' => sub {
my ($xs, @rest) = @_;
my ($graph, $output, $status) = @rest;
my $dims = [ (0)x($graph->GetTensorNumDims($output, $status)) ];
$xs->($graph, $output, $dims, $status);
return $dims;
});
$ffi->attach( [ 'GraphGetTensorNumDims' => 'GetTensorNumDims' ] => [
arg 'TF_Graph' => 'graph',
arg 'TF_Output' => 'output',
lib/AI/TensorFlow/Libtensorflow/ImportGraphDefResults.pm view on Meta::CPAN
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
$ffi->attach( [ 'DeleteImportGraphDefResults' => 'DESTROY' ] => [
arg TF_ImportGraphDefResults => 'results',
] => 'void' );
$ffi->attach( [ 'ImportGraphDefResultsReturnOutputs' => 'ReturnOutputs' ] => [
arg TF_ImportGraphDefResults => 'results',
arg 'int*' => 'num_outputs',
arg 'opaque*' => { id => 'outputs', type => 'TF_Output_struct_array*' },
] => 'void' => sub {
my ($xs, $results) = @_;
my $num_outputs;
my $outputs_array = undef;
$xs->($results, \$num_outputs, \$outputs_array);
return [] if $num_outputs == 0;
my $sizeof_output = $ffi->sizeof('TF_Output');
window(my $outputs_packed, $outputs_array, $sizeof_output * $num_outputs );
# due to unpack, these are copies (no longer owned by $results)
my @outputs = map bless(\$_, "AI::TensorFlow::Libtensorflow::Output"),
unpack "(a${sizeof_output})*", $outputs_packed;
return \@outputs;
});
$ffi->attach( [ 'ImportGraphDefResultsReturnOperations' => 'ReturnOperations' ] => [
arg TF_ImportGraphDefResults => 'results',
arg 'int*' => 'num_opers',
arg 'opaque*' => { id => 'opers', type => 'TF_Operation_array*' },
] => 'void' => sub {
my ($xs, $results) = @_;
my $num_opers;
my $opers_array = undef;
$xs->($results, \$num_opers, \$opers_array);
return [] if $num_opers == 0;
my $opers_array_base_packed = buffer_to_scalar($opers_array,
$ffi->sizeof('opaque') * $num_opers );
my @opers = map {
$ffi->cast('opaque', 'TF_Operation', $_ )
} unpack "(@{[ AI::TensorFlow::Libtensorflow::Lib::_pointer_incantation ]})*", $opers_array_base_packed;
return \@opers;
} );
$ffi->attach( [ 'ImportGraphDefResultsMissingUnusedInputMappings' => 'MissingUnusedInputMappings' ] => [
arg TF_ImportGraphDefResults => 'results',
arg 'int*' => 'num_missing_unused_input_mappings',
arg 'opaque*' => { id => 'src_names', ctype => 'const char***' },
arg 'opaque*' => { id => 'src_indexes', ctype => 'int**' },
] => 'void' => sub {
my ($xs, $results) = @_;
my $num_missing_unused_input_mappings;
my $src_names;
my $src_indexes;
$xs->($results,
\$num_missing_unused_input_mappings,
\$src_names, \$src_indexes
);
my $src_names_str = $ffi->cast('opaque',
"string[$num_missing_unused_input_mappings]", $src_names);
lib/AI/TensorFlow/Libtensorflow/Input.pm view on Meta::CPAN
record_layout_1($ffi,
'opaque' => '_oper', # 8 (on 64-bit)
'int' => '_index', # 4
$ffi->sizeof('opaque') == 8 ? (
'char[4]' => ':',
) : (),
);
$ffi->type('record(AI::TensorFlow::Libtensorflow::Input)', 'TF_Input');
sub New {
my ($class, $args) = @_;
my $record = $class->new({
_oper => $ffi->cast( 'TF_Operation', 'opaque', delete $args->{oper} ),
_index => delete $args->{index},
});
}
sub oper { $ffi->cast('opaque', 'TF_Operation', $_[0]->_oper ) }
sub index { $_[0]->_index }
use FFI::C::ArrayDef;
use FFI::C::StructDef;
my $sdef = FFI::C::StructDef->new($ffi,
name => 'TF_Input_struct',
members => [
_oper => 'opaque',
_index => 'int',
__ignore => 'char[4]',
],
);
my $adef = FFI::C::ArrayDef->new($ffi,
name => 'TF_Input_struct_array',
members => [ 'TF_Input_struct' ]
);
sub _adef { $adef; }
sub _as_array {
my $class = shift;
my $output = $class->_adef->create(0 + @_);
for my $idx (0..@_-1) {
next unless defined $_[$idx];
$class->_copy_to_other( $_[$idx], $output->[$idx] );
}
$output;
}
sub _from_array {
my ($class, $array) = @_;
[
map {
my $record = $class->new;
$class->_copy_to_other($array->[$_], $record);
$record;
} 0..$array->count-1
]
}
sub _copy_to_other {
my ($class, $this, $that) = @_;
$that->_oper ($this->_oper);
$that->_index($this->_index);
}
$ffi->load_custom_type(
RecordArrayRef( 'InputArrayPtr',
record_module => __PACKAGE__, with_size => 0,
),
=> 'TF_Input_array');
lib/AI/TensorFlow/Libtensorflow/Lib.pm view on Meta::CPAN
use FFI::CheckLib 0.28 qw( find_lib_or_die );
use Alien::Libtensorflow;
use FFI::Platypus;
use AI::TensorFlow::Libtensorflow::Lib::FFIType::Variant::PackableArrayRef;
use AI::TensorFlow::Libtensorflow::Lib::FFIType::Variant::PackableMaybeArrayRef;
use AI::TensorFlow::Libtensorflow::Lib::FFIType::TFPtrSizeScalar;
use base 'Exporter::Tiny';
our @EXPORT_OK = qw(arg);
sub lib {
$ENV{AI_TENSORFLOW_LIBTENSORFLOW_LIB_DLL}
// find_lib_or_die(
lib => 'tensorflow',
symbol => ['TF_Version'],
alien => ['Alien::Libtensorflow'] );
}
sub ffi {
state $ffi;
$ffi ||= do {
my $ffi = FFI::Platypus->new( api => 2 );
$ffi->lib( __PACKAGE__->lib );
$ffi->load_custom_type('::PointerSizeBuffer' => 'tf_config_proto_buffer');
$ffi->load_custom_type('::PointerSizeBuffer' => 'tf_tensor_shape_proto_buffer');
$ffi->load_custom_type('::PointerSizeBuffer' => 'tf_attr_value_proto_buffer');
$ffi->load_custom_type('AI::TensorFlow::Libtensorflow::Lib::FFIType::TFPtrSizeScalar'
lib/AI/TensorFlow/Libtensorflow/Lib.pm view on Meta::CPAN
## Callbacks for deallocation
# For TF_Buffer
$ffi->type('(opaque,size_t)->void' => 'data_deallocator_t');
# For TF_Tensor
$ffi->type('(opaque,size_t,opaque)->void' => 'tensor_deallocator_t');
$ffi;
};
}
sub mangler_default {
my $target = (caller)[0];
my $prefix = 'TF';
if( $target =~ /::Eager::/ ) {
$prefix = 'TFE';
}
sub {
my ($name) = @_;
"${prefix}_$name";
}
}
sub mangler_for_object {
my ($class, $object_name) = @_;
sub {
my ($name) = @_;
# constructor and destructors
return "TF_New${object_name}" if $name eq 'New';
return "TF_Delete${object_name}" if $name eq 'Delete';
return "TF_${object_name}$name";
};
}
sub arg(@) {
my $arg = AI::TensorFlow::Libtensorflow::Lib::_Arg->new(
type => shift,
id => shift,
);
return $arg, @_;
}
# from FFI::Platypus::Type::StringArray
use constant _pointer_incantation =>
$^O eq 'MSWin32' && do { require Config; $Config::Config{archname} =~ /MSWin32-x64/ }
lib/AI/TensorFlow/Libtensorflow/Lib.pm view on Meta::CPAN
package # hide from PAUSE
AI::TensorFlow::Libtensorflow::Lib::_Arg {
use Class::Tiny qw(type id);
use overload
q{""} => 'stringify',
eq => 'eq';
sub stringify { $_[0]->type }
sub eq {
my ($self, $other, $swap) = @_;
"$self" eq "$other";
}
}
1;
lib/AI/TensorFlow/Libtensorflow/Lib/FFIType/TFPtrPtrLenSizeArrayRefScalar.pm view on Meta::CPAN
package AI::TensorFlow::Libtensorflow::Lib::FFIType::TFPtrPtrLenSizeArrayRefScalar;
# ABSTRACT: Type to hold string list as void** strings, size_t* lengths, int num_items
$AI::TensorFlow::Libtensorflow::Lib::FFIType::TFPtrPtrLenSizeArrayRefScalar::VERSION = '0.0.7';
use strict;
use warnings;
# TODO implement this
sub perl_to_native {
...
}
sub perl_to_native_post {
...
}
sub ffi_custom_type_api_1 {
{
'native_type' => 'opaque',
'perl_to_native' => \&perl_to_native,
'perl_to_native_post' => \&perl_to_native_post,
argument_count => 3,
}
}
1;
lib/AI/TensorFlow/Libtensorflow/Lib/FFIType/TFPtrSizeScalar.pm view on Meta::CPAN
);
use FFI::Platypus::Buffer qw( scalar_to_buffer );
my @stack;
*arguments_set_size_t
= FFI::Platypus->new( api => 2 )->sizeof('size_t') == 4
? \&arguments_set_uint32
: \&arguments_set_uint64;
sub perl_to_native {
my($pointer, $size) = scalar_to_buffer($_[0]);
push @stack, [ $pointer, $size ];
arguments_set_pointer $_[1], $pointer;
arguments_set_size_t($_[1]+1, $size);
}
sub perl_to_native_post {
my($pointer, $size) = @{ pop @stack };
();
}
sub ffi_custom_type_api_1
{
{
native_type => 'opaque',
perl_to_native => \&perl_to_native,
perl_to_native_post => \&perl_to_native_post,
argument_count => 2,
}
}
1;
lib/AI/TensorFlow/Libtensorflow/Lib/FFIType/TFPtrSizeScalarRef.pm view on Meta::CPAN
my @stack;
# See FFI::Platypus::Type::PointerSizeBuffer
*arguments_set_size_t
= FFI::Platypus->new( api => 2 )->sizeof('size_t') == 4
? \&arguments_set_uint32
: \&arguments_set_uint64;
sub perl_to_native {
my ($value, $i) = @_;
die "Value must be a ScalarRef" unless ref $value eq 'SCALAR';
my ($pointer, $size) = defined $$value
? scalar_to_buffer($$value)
: (0, 0);
push @stack, [ $value, $pointer, $size ];
arguments_set_pointer( $i , $pointer);
arguments_set_size_t( $i+1, $size);
}
sub perl_to_native_post {
pop @stack;
();
}
sub ffi_custom_type_api_1 {
{
'native_type' => 'opaque',
'perl_to_native' => \&perl_to_native,
'perl_to_native_post' => \&perl_to_native_post,
argument_count => 2,
}
}
1;
lib/AI/TensorFlow/Libtensorflow/Lib/FFIType/Variant/PackableArrayRef.pm view on Meta::CPAN
# ABSTRACT: ArrayRef to pack()'ed scalar argument with size argument (as int)
$AI::TensorFlow::Libtensorflow::Lib::FFIType::Variant::PackableArrayRef::VERSION = '0.0.7';
use strict;
use warnings;
use FFI::Platypus::Buffer qw(scalar_to_buffer buffer_to_scalar);
use FFI::Platypus::API qw( arguments_set_pointer arguments_set_sint32 );
use Package::Variant;
use Module::Runtime 'module_notional_filename';
sub make_variant {
my ($class, $target_package, $package, %arguments) = @_;
die "Invalid pack type, must be single character"
unless $arguments{pack_type} =~ /^.$/;
my @stack;
my $perl_to_native = install perl_to_native => sub {
my ($value, $i) = @_;
die "Value must be an ArrayRef"
unless defined $value && ref $value eq 'ARRAY';
my $data = pack $arguments{pack_type} . '*', @$value;
my $n = scalar @$value;
my ($pointer, $size) = scalar_to_buffer($data);
push @stack, [ \$data, $pointer, $size ];
arguments_set_pointer( $i , $pointer);
arguments_set_sint32( $i+1, $n);
};
my $perl_to_native_post = install perl_to_native_post => sub {
my ($data_ref, $pointer, $size) = @{ pop @stack };
$$data_ref = buffer_to_scalar($pointer, $size);
@{$_[0]} = unpack $arguments{pack_type} . '*', $$data_ref;
();
};
install ffi_custom_type_api_1 => sub {
{
native_type => 'opaque',
argument_count => 2,
perl_to_native => $perl_to_native,
perl_to_native_post => $perl_to_native_post,
}
};
}
sub make_variant_package_name {
my ($class, $package, %arguments) = @_;
$package = "AI::TensorFlow::Libtensorflow::Lib::FFIType::TF${package}";
die "Won't clobber $package" if $INC{module_notional_filename $package};
return $package;
}
1;
__END__
lib/AI/TensorFlow/Libtensorflow/Lib/FFIType/Variant/PackableMaybeArrayRef.pm view on Meta::CPAN
# ABSTRACT: Maybe[ArrayRef] to pack()'ed scalar argument with size argument (as int) (size is -1 if undef)
$AI::TensorFlow::Libtensorflow::Lib::FFIType::Variant::PackableMaybeArrayRef::VERSION = '0.0.7';
use strict;
use warnings;
use FFI::Platypus::Buffer qw(scalar_to_buffer buffer_to_scalar);
use FFI::Platypus::API qw( arguments_set_pointer arguments_set_sint32 );
use Package::Variant;
use Module::Runtime 'module_notional_filename';
sub make_variant {
my ($class, $target_package, $package, %arguments) = @_;
die "Invalid pack type, must be single character"
unless $arguments{pack_type} =~ /^.$/;
my @stack;
my $perl_to_native = install perl_to_native => sub {
my ($value, $i) = @_;
if( defined $value ) {
die "Value must be an ArrayRef" unless ref $value eq 'ARRAY';
my $data = pack $arguments{pack_type} . '*', @$value;
my $n = scalar @$value;
my ($pointer, $size) = scalar_to_buffer($data);
push @stack, [ \$data, $pointer, $size ];
arguments_set_pointer( $i , $pointer);
arguments_set_sint32( $i+1, $n);
} else {
my $data = undef;
my $n = -1;
my ($pointer, $size) = (0, 0);
push @stack, [ \$data, $pointer, $size ];
arguments_set_pointer( $i , $pointer);
arguments_set_sint32( $i+1, $n);
}
};
my $perl_to_native_post = install perl_to_native_post => sub {
my ($data_ref, $pointer, $size) = @{ pop @stack };
if( ! Scalar::Util::readonly($_[0]) ) {
$$data_ref = buffer_to_scalar($pointer, $size);
@{$_[0]} = unpack $arguments{pack_type} . '*', $$data_ref;
}
();
};
install ffi_custom_type_api_1 => sub {
{
native_type => 'opaque',
argument_count => 2,
perl_to_native => $perl_to_native,
perl_to_native_post => $perl_to_native_post,
}
};
}
sub make_variant_package_name {
my ($class, $package, %arguments) = @_;
$package = "AI::TensorFlow::Libtensorflow::Lib::FFIType::TF${package}";
die "Won't clobber $package" if $INC{module_notional_filename $package};
return $package;
}
1;
__END__
lib/AI/TensorFlow/Libtensorflow/Lib/FFIType/Variant/RecordArrayRef.pm view on Meta::CPAN
# ABSTRACT: Turn FFI::Platypus::Record into packed array (+ size)?
$AI::TensorFlow::Libtensorflow::Lib::FFIType::Variant::RecordArrayRef::VERSION = '0.0.7';
use strict;
use warnings;
use FFI::Platypus::Buffer qw(scalar_to_buffer buffer_to_scalar);
use FFI::Platypus::API qw( arguments_set_pointer arguments_set_sint32 );
use Package::Variant;
use Module::Runtime qw(module_notional_filename is_module_name);
sub make_variant {
my ($class, $target_package, $package, %arguments) = @_;
die "Missing/invalid module name: $arguments{record_module}"
unless is_module_name($arguments{record_module});
my $record_module = $arguments{record_module};
my $with_size = exists $arguments{with_size} ? $arguments{with_size} : 1;
my @stack;
my $perl_to_native = install perl_to_native => sub {
my ($value, $i) = @_;
my $data = pack "(a*)*", map $$_, @$value;
my($pointer, $size) = scalar_to_buffer($data);
my $n = @$value;
my $sizeof = $size / $n;
push @stack, [ \$data, $n, $pointer, $size , $sizeof ];
arguments_set_pointer $i , $pointer;
arguments_set_sint32 $i+1, $n if $with_size;
};
my $perl_to_native_post = install perl_to_native_post => sub {
my($data_ref, $n, $pointer, $size, $sizeof) = @{ pop @stack };
$$data_ref = buffer_to_scalar($pointer, $size);
@{$_[0]} = map {
bless \$_, $record_module
} unpack "(a${sizeof})*", $$data_ref;
();
};
install ffi_custom_type_api_1 => sub {
{
native_type => 'opaque',
argument_count => ($with_size ? 2 : 1),
perl_to_native => $perl_to_native,
perl_to_native_post => $perl_to_native_post,
}
};
}
sub make_variant_package_name {
my ($class, $package, %arguments) = @_;
$package = "AI::TensorFlow::Libtensorflow::Lib::FFIType::TF${package}";
die "Won't clobber $package" if $INC{module_notional_filename $package};
return $package;
}
1;
__END__
lib/AI/TensorFlow/Libtensorflow/Lib/_Alloc.pm view on Meta::CPAN
if( $ffi->find_symbol('aligned_alloc') ) {
# C11 aligned_alloc()
# NOTE: C11 aligned_alloc not available on Windows.
# void *aligned_alloc(size_t alignment, size_t size);
$ffi->attach( [ 'aligned_alloc' => '_aligned_alloc' ] =>
[ 'size_t', 'size_t' ] => 'opaque' );
*_aligned_free = *free;
$_ALIGNED_ALLOC_ALIGNMENT_MULTIPLE = 1;
} else {
# Pure Perl _aligned_alloc()
quote_sub '_aligned_alloc', q{
my ($alignment, $size) = @_;
# $alignment must fit in 8-bits
die "\$alignment must be <= 255" if $alignment > 0xFF;
my $requested_size = $alignment + $size; # size_t
my $ptr = malloc($requested_size); # void*
my $offset = $alignment - $ptr % $alignment; # size_t
my $aligned = $ptr + $offset; # void*
strcpy $aligned - 1, chr($offset);
return $aligned;
};
quote_sub '_aligned_free', q{
my ($aligned) = @_;
my $offset = ord(buffer_to_scalar($aligned - 1, 1));
free( $aligned - $offset );
};
$_ALIGNED_ALLOC_ALIGNMENT_MULTIPLE = 0;
}
use Const::Fast;
# See <https://github.com/tensorflow/tensorflow/issues/58112>.
# This is a power-of-two.
const our $EIGEN_MAX_ALIGN_BYTES => do { _tf_alignment(); };
sub _tf_alignment {
# Bytes of alignment sorted in descending order:
# NOTE Alignment can not currently be larger than 128-bytes as the pure
# Perl implementation of _aligned_alloc() only supports alignment of up
# to 255 bytes (which means 128 bytes is the maximum power-of-two
# alignment).
my @alignments = map 2**$_, reverse 0..7;
# 1-byte element
my $el = INT8;
my $el_size = $el->Size;
lib/AI/TensorFlow/Libtensorflow/Lib/_Alloc.pm view on Meta::CPAN
my $max_alignment = $alignments[0];
my $req_size = 2 * $max_alignment + $el_size;
# All data that is sent to TF_NewTensor here is within the block of
# memory allocated at $ptr_base.
my $ptr_base = malloc($req_size);
defer { free($ptr_base); }
# start at offset that is aligned with $max_alignment
my $ptr = $ptr_base + ( $max_alignment - $ptr_base % $max_alignment );
my $create_tensor_at_alignment = sub {
my ($n, $dealloc_called) = @_;
my $offset = $n - $ptr % $n;
my $ptr_offset = $ptr + $offset;
my $space_for_data = $req_size - $offset;
window(my $data, $ptr_offset, $space_for_data);
return AI::TensorFlow::Libtensorflow::Tensor->New(
$el, [int($space_for_data/$el_size)], \$data, sub {
$$dealloc_called = 1
}
);
};
for my $a_idx (0..@alignments-2) {
my @dealloc = (0, 0);
my @t = map {
$create_tensor_at_alignment->($alignments[$a_idx + $_], \$dealloc[$_]);
} (0..1);
return $alignments[$a_idx] if $dealloc[0] == 0 && $dealloc[1] == 1;
}
return 1;
}
sub _tf_aligned_alloc {
my ($class, $size) = @_;
return _aligned_alloc($EIGEN_MAX_ALIGN_BYTES,
$_ALIGNED_ALLOC_ALIGNMENT_MULTIPLE
# since $EIGEN_MAX_ALIGN_BYTES is a power-of-two, use
# two's complement bit arithmetic
? ($size + $EIGEN_MAX_ALIGN_BYTES - 1 ) & -$EIGEN_MAX_ALIGN_BYTES
: $size
);
}
sub _tf_aligned_free {
my ($class, $ptr) = @_;
_aligned_free($ptr);
}
1;
__END__
=pod
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod view on Meta::CPAN
use List::Util 1.56 qw(mesh);
use Data::Printer ( output => 'stderr', return_value => 'void', filters => ['PDL'] );
use Data::Printer::Filter::PDL ();
use Text::Table::Tiny qw(generate_table);
use Imager;
my $s = AI::TensorFlow::Libtensorflow::Status->New;
sub AssertOK {
die "Status $_[0]: " . $_[0]->Message
unless $_[0]->GetCode == AI::TensorFlow::Libtensorflow::Status::OK;
return;
}
AssertOK($s);
use PDL;
use AI::TensorFlow::Libtensorflow::DataType qw(FLOAT UINT8);
use FFI::Platypus::Memory qw(memcpy);
use FFI::Platypus::Buffer qw(scalar_to_pointer);
sub FloatPDLTOTFTensor {
my ($p) = @_;
return AI::TensorFlow::Libtensorflow::Tensor->New(
FLOAT, [ reverse $p->dims ], $p->get_dataref, sub { undef $p }
);
}
sub FloatTFTensorToPDL {
my ($t) = @_;
my $pdl = zeros(float,reverse( map $t->Dim($_), 0..$t->NumDims-1 ) );
memcpy scalar_to_pointer( ${$pdl->get_dataref} ),
scalar_to_pointer( ${$t->Data} ),
$t->ByteSize;
$pdl->upd_data;
$pdl;
}
sub Uint8PDLTOTFTensor {
my ($p) = @_;
return AI::TensorFlow::Libtensorflow::Tensor->New(
UINT8, [ reverse $p->dims ], $p->get_dataref, sub { undef $p }
);
}
sub Uint8TFTensorToPDL {
my ($t) = @_;
my $pdl = zeros(byte,reverse( map $t->Dim($_), 0..$t->NumDims-1 ) );
memcpy scalar_to_pointer( ${$pdl->get_dataref} ),
scalar_to_pointer( ${$t->Data} ),
$t->ByteSize;
$pdl->upd_data;
$pdl;
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod view on Meta::CPAN
$h->a( { href => $images_for_test_to_uri{$image_name} },
$h->img({
src => $images_for_test_to_uri{$image_name},
alt => $image_name,
width => '100%',
})
),
);
}
sub load_image_to_pdl {
my ($uri, $image_size) = @_;
my $http = HTTP::Tiny->new;
my $response = $http->get( $uri );
die "Could not fetch image from $uri" unless $response->{success};
say "Downloaded $uri";
my $img = Imager->new;
$img->read( data => $response->{content} );
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod view on Meta::CPAN
my $pdl_image_batched = cat(@pdl_images);
my $t = Uint8PDLTOTFTensor($pdl_image_batched);
die "There should be 4 dimensions" unless $pdl_image_batched->ndims == 4;
die "With the final dimension of length 1" unless $pdl_image_batched->dim(3) == 1;
p $pdl_image_batched;
p $t;
my $RunSession = sub {
my ($session, $t) = @_;
my @outputs_t;
my @keys = keys %{ $outputs{out} };
my @values = $outputs{out}->@{ @keys };
$session->Run(
undef,
[ values %{$outputs{in} } ], [$t],
\@values, \@outputs_t,
undef,
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod view on Meta::CPAN
use List::Util 1.56 qw(mesh);
use Data::Printer ( output => 'stderr', return_value => 'void', filters => ['PDL'] );
use Data::Printer::Filter::PDL ();
use Text::Table::Tiny qw(generate_table);
use Imager;
my $s = AI::TensorFlow::Libtensorflow::Status->New;
sub AssertOK {
die "Status $_[0]: " . $_[0]->Message
unless $_[0]->GetCode == AI::TensorFlow::Libtensorflow::Status::OK;
return;
}
AssertOK($s);
And create helpers for converting between C<PDL> ndarrays and C<TFTensor> ndarrays.
use PDL;
use AI::TensorFlow::Libtensorflow::DataType qw(FLOAT UINT8);
use FFI::Platypus::Memory qw(memcpy);
use FFI::Platypus::Buffer qw(scalar_to_pointer);
sub FloatPDLTOTFTensor {
my ($p) = @_;
return AI::TensorFlow::Libtensorflow::Tensor->New(
FLOAT, [ reverse $p->dims ], $p->get_dataref, sub { undef $p }
);
}
sub FloatTFTensorToPDL {
my ($t) = @_;
my $pdl = zeros(float,reverse( map $t->Dim($_), 0..$t->NumDims-1 ) );
memcpy scalar_to_pointer( ${$pdl->get_dataref} ),
scalar_to_pointer( ${$t->Data} ),
$t->ByteSize;
$pdl->upd_data;
$pdl;
}
sub Uint8PDLTOTFTensor {
my ($p) = @_;
return AI::TensorFlow::Libtensorflow::Tensor->New(
UINT8, [ reverse $p->dims ], $p->get_dataref, sub { undef $p }
);
}
sub Uint8TFTensorToPDL {
my ($t) = @_;
my $pdl = zeros(byte,reverse( map $t->Dim($_), 0..$t->NumDims-1 ) );
memcpy scalar_to_pointer( ${$pdl->get_dataref} ),
scalar_to_pointer( ${$t->Data} ),
$t->ByteSize;
$pdl->upd_data;
$pdl;
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod view on Meta::CPAN
}
=head2 Download the test image and transform it into suitable input data
We now fetch the image and prepare it to be in the needed format by using C<Imager>. Note that this model does not need the input image to be of a certain size so no resizing or padding is required.
Then we turn the C<Imager> data into a C<PDL> ndarray. Since we just need the 3 channels of the image as they are, they can be stored directly in a C<PDL> ndarray of type C<byte>.
The reason why we need to concatenate the C<PDL> ndarrays here despite the model only taking a single image at a time is to get an ndarray with four (4) dimensions with the last C<PDL> dimension of size one (1).
sub load_image_to_pdl {
my ($uri, $image_size) = @_;
my $http = HTTP::Tiny->new;
my $response = $http->get( $uri );
die "Could not fetch image from $uri" unless $response->{success};
say "Downloaded $uri";
my $img = Imager->new;
$img->read( data => $response->{content} );
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod view on Meta::CPAN
die "With the final dimension of length 1" unless $pdl_image_batched->dim(3) == 1;
p $pdl_image_batched;
p $t;
=head2 Run the model for inference
We can use the C<Run> method to run the session and get the multiple output C<TFTensor>s. The following uses the names in C<$outputs> mapping to help process the multiple outputs more easily.
my $RunSession = sub {
my ($session, $t) = @_;
my @outputs_t;
my @keys = keys %{ $outputs{out} };
my @values = $outputs{out}->@{ @keys };
$session->Run(
undef,
[ values %{$outputs{in} } ], [$t],
\@values, \@outputs_t,
undef,
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
use File::Which ();
use List::Util ();
use Data::Printer ( output => 'stderr', return_value => 'void', filters => ['PDL'] );
use Data::Printer::Filter::PDL ();
use Text::Table::Tiny qw(generate_table);
my $s = AI::TensorFlow::Libtensorflow::Status->New;
sub AssertOK {
die "Status $_[0]: " . $_[0]->Message
unless $_[0]->GetCode == AI::TensorFlow::Libtensorflow::Status::OK;
return;
}
AssertOK($s);
use PDL;
use AI::TensorFlow::Libtensorflow::DataType qw(FLOAT);
use FFI::Platypus::Memory qw(memcpy);
use FFI::Platypus::Buffer qw(scalar_to_pointer);
sub FloatPDLTOTFTensor {
my ($p) = @_;
return AI::TensorFlow::Libtensorflow::Tensor->New(
FLOAT, [ reverse $p->dims ], $p->get_dataref, sub { undef $p }
);
}
sub FloatTFTensorToPDL {
my ($t) = @_;
my $pdl = zeros(float,reverse( map $t->Dim($_), 0..$t->NumDims-1 ) );
memcpy scalar_to_pointer( ${$pdl->get_dataref} ),
scalar_to_pointer( ${$t->Data} ),
$t->ByteSize;
$pdl->upd_data;
$pdl;
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
use Bio::Tools::Run::Samtools;
my $hg_bgz_fai_path = "${hg_bgz_path}.fai";
if( ! -e $hg_bgz_fai_path ) {
my $faidx_tool = Bio::Tools::Run::Samtools->new( -command => 'faidx' );
$faidx_tool->run( -fas => $hg_bgz_path )
or die "Could not index FASTA file $hg_bgz_path: " . $faidx_tool->error_string;
}
sub saved_model_cli {
my (@rest) = @_;
if( File::Which::which('saved_model_cli')) {
system(qw(saved_model_cli), @rest ) == 0
or die "Could not run saved_model_cli";
} else {
warn "saved_model_cli(): Install the tensorflow Python package to get the `saved_model_cli` command.\n";
return -1;
}
}
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
my $model_central_base_pairs_length = 114_688; # bp
my $model_central_base_pair_window_size = 128; # bp / prediction
say "Number of predictions: ", $model_central_base_pairs_length / $model_central_base_pair_window_size;
use Data::Frame;
my $df = Data::Frame->from_csv( $targets_path, sep => "\t" )
->transform({
file => sub {
my ($col, $df) = @_;
# clean up the paths in 'file' column
[map { join "/", (split('/', $_))[7..8] } $col->list];
}
});
say "Number of targets: ", $df->nrow;
say "";
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
}),
outputs_mouse =>
AI::TensorFlow::Libtensorflow::Output->New({
oper => $graph->OperationByName('StatefulPartitionedCall'),
index => 1,
}),
);
p %puts;
my $predict_on_batch = sub {
my ($session, $t) = @_;
my @outputs_t;
$session->Run(
undef,
[$puts{inputs_args_0}], [$t],
[$puts{outputs_human}], \@outputs_t,
undef,
undef,
$s
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
return $outputs_t[0];
};
undef;
use PDL;
our $SHOW_ENCODER = 1;
sub one_hot_dna {
my ($seq) = @_;
my $from_alphabet = "NACGT";
my $to_alphabet = pack "C*", 0..length($from_alphabet)-1;
# sequences from UCSC genome have both uppercase and lowercase bases
my $from_alphabet_tr = $from_alphabet . lc $from_alphabet;
my $to_alphabet_tr = $to_alphabet x 2;
my $p = zeros(byte, bytes::length($seq));
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
say "One-hot encoding of sequence '$onehot_test_seq' is:";
say $test_encoded->info, $test_encoded;
}
package Interval {
use Bio::Location::Simple ();
use parent qw(Bio::Location::Simple);
sub center {
my $self = shift;
my $center = int( ($self->start + $self->end ) / 2 );
my $delta = ($self->start + $self->end ) % 2;
return $center + $delta;
}
sub resize {
my ($self, $width) = @_;
my $new_interval = $self->clone;
my $center = $self->center;
my $half = int( ($width-1) / 2 );
my $offset = ($width-1) % 2;
$new_interval->start( $center - $half - $offset );
$new_interval->end( $center + $half );
return $new_interval;
}
use overload '""' => \&_op_stringify;
sub _op_stringify { sprintf "%s:%s", $_[0]->seq_id // "(no sequence)", $_[0]->to_FTstring }
}
#####
{
say "Testing interval resizing:\n";
sub _debug_resize {
my ($interval, $to, $msg) = @_;
my $resized_interval = $interval->resize($to);
die "Wrong interval size for $interval --($to)--> $resized_interval"
unless $resized_interval->length == $to;
say sprintf "Interval: %s -> %s, length %2d : %s",
$interval,
$resized_interval, $resized_interval->length,
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
}
}
undef;
use Bio::DB::HTS::Faidx;
my $hg_db = Bio::DB::HTS::Faidx->new( $hg_bgz_path );
sub extract_sequence {
my ($db, $interval) = @_;
my $chrom_length = $db->length($interval->seq_id);
my $trimmed_interval = $interval->clone;
$trimmed_interval->start( List::Util::max( $interval->start, 1 ) );
$trimmed_interval->end( List::Util::min( $interval->end , $chrom_length ) );
# Bio::DB::HTS::Faidx is 0-based for both start and end points
my $seq = $db->get_sequence2_no_length(
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
$trimmed_interval->start - 1,
$trimmed_interval->end - 1,
);
my $pad_upstream = 'N' x List::Util::max( -($interval->start-1), 0 );
my $pad_downstream = 'N' x List::Util::max( $interval->end - $chrom_length, 0 );
return join '', $pad_upstream, $seq, $pad_downstream;
}
sub seq_info {
my ($seq, $n) = @_;
$n ||= 10;
if( length $seq > $n ) {
sprintf "%s...%s (length %d)", uc substr($seq, 0, $n), uc substr($seq, -$n), length $seq;
} else {
sprintf "%s (length %d)", uc $seq, length $seq;
}
}
####
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
use File::Which ();
use List::Util ();
use Data::Printer ( output => 'stderr', return_value => 'void', filters => ['PDL'] );
use Data::Printer::Filter::PDL ();
use Text::Table::Tiny qw(generate_table);
my $s = AI::TensorFlow::Libtensorflow::Status->New;
sub AssertOK {
die "Status $_[0]: " . $_[0]->Message
unless $_[0]->GetCode == AI::TensorFlow::Libtensorflow::Status::OK;
return;
}
AssertOK($s);
And create helpers for converting between C<PDL> ndarrays and C<TFTensor> ndarrays.
use PDL;
use AI::TensorFlow::Libtensorflow::DataType qw(FLOAT);
use FFI::Platypus::Memory qw(memcpy);
use FFI::Platypus::Buffer qw(scalar_to_pointer);
sub FloatPDLTOTFTensor {
my ($p) = @_;
return AI::TensorFlow::Libtensorflow::Tensor->New(
FLOAT, [ reverse $p->dims ], $p->get_dataref, sub { undef $p }
);
}
sub FloatTFTensorToPDL {
my ($t) = @_;
my $pdl = zeros(float,reverse( map $t->Dim($_), 0..$t->NumDims-1 ) );
memcpy scalar_to_pointer( ${$pdl->get_dataref} ),
scalar_to_pointer( ${$t->Data} ),
$t->ByteSize;
$pdl->upd_data;
$pdl;
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
if( ! -e $hg_bgz_fai_path ) {
my $faidx_tool = Bio::Tools::Run::Samtools->new( -command => 'faidx' );
$faidx_tool->run( -fas => $hg_bgz_path )
or die "Could not index FASTA file $hg_bgz_path: " . $faidx_tool->error_string;
}
=head2 Model input and output specification
Now we create a helper to call C<saved_model_cli> and called C<saved_model_cli scan> to ensure that the model is I/O-free for security reasons.
sub saved_model_cli {
my (@rest) = @_;
if( File::Which::which('saved_model_cli')) {
system(qw(saved_model_cli), @rest ) == 0
or die "Could not run saved_model_cli";
} else {
warn "saved_model_cli(): Install the tensorflow Python package to get the `saved_model_cli` command.\n";
return -1;
}
}
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
B<RESULT>:
1
and by looking at the targets file:
use Data::Frame;
my $df = Data::Frame->from_csv( $targets_path, sep => "\t" )
->transform({
file => sub {
my ($col, $df) = @_;
# clean up the paths in 'file' column
[map { join "/", (split('/', $_))[7..8] } $col->list];
}
});
say "Number of targets: ", $df->nrow;
say "";
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
</span><span style="color: #6666cc;">NumInputs</span><span style=""> </span><span style="color: #33ccff;"> </span><span style="color: #ff6633;">274</span><span style="color: #33ccff;">,</span><span style="">
</span><span style="color: #6666cc;">NumOutputs</span><span style="color: #33ccff;"> </span><span style="color: #ff6633;">2</span><span style="color: #33ccff;">,</span><span style="">
</span><span style="color: #6666cc;">OpType</span><span style=""> </span><span style="color: #33ccff;"> </span><span style="color: #33ccff;">"</span><span style="color: #669933;">StatefulPartitionedCall</span><span style="color:...
</span><span style="color: #33ccff;">}</span><span style="">
</span><span style="color: #33ccff;">}</span><span style="">
</span><span style="color: #33ccff;">}</span><span style="">
</span></code></pre></span>
We need a helper to simplify running the session and getting just the predictions that we want.
my $predict_on_batch = sub {
my ($session, $t) = @_;
my @outputs_t;
$session->Run(
undef,
[$puts{inputs_args_0}], [$t],
[$puts{outputs_human}], \@outputs_t,
undef,
undef,
$s
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
| G | C<[0 0 1 0]> |
| T | C<[0 0 0 1]> |
| N | C<[0 0 0 0]> |
We can achieve this encoding by creating a lookup table with a PDL ndarray. This could be done by creating a byte PDL ndarray of dimensions C<[ 256 4 ]> to directly look up the the numeric value of characters 0-255, but here we'll go with a smaller C...
use PDL;
our $SHOW_ENCODER = 1;
sub one_hot_dna {
my ($seq) = @_;
my $from_alphabet = "NACGT";
my $to_alphabet = pack "C*", 0..length($from_alphabet)-1;
# sequences from UCSC genome have both uppercase and lowercase bases
my $from_alphabet_tr = $from_alphabet . lc $from_alphabet;
my $to_alphabet_tr = $to_alphabet x 2;
my $p = zeros(byte, bytes::length($seq));
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
=back
Now we need a way to deal with the sequence interval. We're going to use 1-based coordinates as BioPerl does. In fact, we'll extend a BioPerl class.
package Interval {
use Bio::Location::Simple ();
use parent qw(Bio::Location::Simple);
sub center {
my $self = shift;
my $center = int( ($self->start + $self->end ) / 2 );
my $delta = ($self->start + $self->end ) % 2;
return $center + $delta;
}
sub resize {
my ($self, $width) = @_;
my $new_interval = $self->clone;
my $center = $self->center;
my $half = int( ($width-1) / 2 );
my $offset = ($width-1) % 2;
$new_interval->start( $center - $half - $offset );
$new_interval->end( $center + $half );
return $new_interval;
}
use overload '""' => \&_op_stringify;
sub _op_stringify { sprintf "%s:%s", $_[0]->seq_id // "(no sequence)", $_[0]->to_FTstring }
}
#####
{
say "Testing interval resizing:\n";
sub _debug_resize {
my ($interval, $to, $msg) = @_;
my $resized_interval = $interval->resize($to);
die "Wrong interval size for $interval --($to)--> $resized_interval"
unless $resized_interval->length == $to;
say sprintf "Interval: %s -> %s, length %2d : %s",
$interval,
$resized_interval, $resized_interval->length,
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
Interval: chr11:6..9 -> chr11:5..11, length 7 : 4 -> 7 (+ 3)
Interval: chr11:6..9 -> chr11:4..11, length 8 : 4 -> 8 (+ 4)
Interval: chr11:6..9 -> chr11:4..12, length 9 : 4 -> 9 (+ 5)
use Bio::DB::HTS::Faidx;
my $hg_db = Bio::DB::HTS::Faidx->new( $hg_bgz_path );
sub extract_sequence {
my ($db, $interval) = @_;
my $chrom_length = $db->length($interval->seq_id);
my $trimmed_interval = $interval->clone;
$trimmed_interval->start( List::Util::max( $interval->start, 1 ) );
$trimmed_interval->end( List::Util::min( $interval->end , $chrom_length ) );
# Bio::DB::HTS::Faidx is 0-based for both start and end points
my $seq = $db->get_sequence2_no_length(
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
$trimmed_interval->start - 1,
$trimmed_interval->end - 1,
);
my $pad_upstream = 'N' x List::Util::max( -($interval->start-1), 0 );
my $pad_downstream = 'N' x List::Util::max( $interval->end - $chrom_length, 0 );
return join '', $pad_upstream, $seq, $pad_downstream;
}
sub seq_info {
my ($seq, $n) = @_;
$n ||= 10;
if( length $seq > $n ) {
sprintf "%s...%s (length %d)", uc substr($seq, 0, $n), uc substr($seq, -$n), length $seq;
} else {
sprintf "%s (length %d)", uc $seq, length $seq;
}
}
####
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
use List::Util ();
use Data::Printer ( output => 'stderr', return_value => 'void', filters => ['PDL'] );
use Data::Printer::Filter::PDL ();
use Text::Table::Tiny qw(generate_table);
use Imager;
my $s = AI::TensorFlow::Libtensorflow::Status->New;
sub AssertOK {
die "Status $_[0]: " . $_[0]->Message
unless $_[0]->GetCode == AI::TensorFlow::Libtensorflow::Status::OK;
return;
}
AssertOK($s);
use PDL;
use AI::TensorFlow::Libtensorflow::DataType qw(FLOAT);
use FFI::Platypus::Memory qw(memcpy);
use FFI::Platypus::Buffer qw(scalar_to_pointer);
sub FloatPDLTOTFTensor {
my ($p) = @_;
return AI::TensorFlow::Libtensorflow::Tensor->New(
FLOAT, [ reverse $p->dims ], $p->get_dataref, sub { undef $p }
);
}
sub FloatTFTensorToPDL {
my ($t) = @_;
my $pdl = zeros(float,reverse( map $t->Dim($_), 0..$t->NumDims-1 ) );
memcpy scalar_to_pointer( ${$pdl->get_dataref} ),
scalar_to_pointer( ${$t->Data} ),
$t->ByteSize;
$pdl->upd_data;
$pdl;
}
use HTML::Tiny;
sub my_table {
my ($data, $cb) = @_;
my $h = HTML::Tiny->new;
$h->table( { style => 'width: 100%' },
[
$h->tr(
map {
[
$h->td( $cb->($_, $h) )
]
} @$data
)
]
)
}
sub show_in_gnuplot {
my ($p) = @_;
require PDL::Graphics::Gnuplot;
PDL::Graphics::Gnuplot::image( square => 1, $p );
}
# image_size => [width, height] (but usually square images)
my %model_name_to_params = (
mobilenet_v2_100_224 => {
handle => 'https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/classification/5',
image_size => [ 224, 224 ],
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
my $session = AI::TensorFlow::Libtensorflow::Session->LoadFromSavedModel(
$opt, undef, $model_base, \@tags, $graph, undef, $s
);
AssertOK($s);
my %ops = (
in => $graph->OperationByName('serving_default_inputs'),
out => $graph->OperationByName('StatefulPartitionedCall'),
);
die "Could not get all operations" unless List::Util::all(sub { defined }, values %ops);
my %outputs = map { $_ => [ AI::TensorFlow::Libtensorflow::Output->New( { oper => $ops{$_}, index => 0 } ) ] }
keys %ops;
p %outputs;
say "Input: " , $outputs{in}[0];
say "Output: ", $outputs{out}[0];
my %images_for_test_to_uri = (
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
#by Merdal, CC BY-SA 3.0 <http://creativecommons.org/licenses/by-sa/3.0/>, via Wikimedia Commons
"teapot" => "https://upload.wikimedia.org/wikipedia/commons/4/44/Black_tea_pot_cropped.jpg",
#by Mendhak, CC BY-SA 2.0 <https://creativecommons.org/licenses/by-sa/2.0>, via Wikimedia Commons
);
my @image_names = sort keys %images_for_test_to_uri;
if( IN_IPERL ) {
IPerl->html(
my_table( \@image_names, sub {
my ($image_name, $h) = @_;
(
$h->tt($image_name),
$h->a( { href => $images_for_test_to_uri{$image_name} },
$h->img({
src => $images_for_test_to_uri{$image_name},
alt => $image_name,
width => '50%',
})
),
)
})
);
}
sub imager_paste_center_pad {
my ($inner, $padded_sz, @rest) = @_;
my $outer = Imager->new( List::Util::mesh( [qw(xsize ysize)], $padded_sz ),
@rest
);
$outer->paste(
left => int( ($outer->getwidth - $inner->getwidth ) / 2 ),
top => int( ($outer->getheight - $inner->getheight) / 2 ),
src => $inner,
);
$outer;
}
sub imager_scale_to {
my ($img, $image_size) = @_;
my $rescaled = $img->scale(
List::Util::mesh( [qw(xpixels ypixels)], $image_size ),
type => 'min',
qtype => 'mixing', # 'mixing' seems to work better than 'normal'
);
}
sub load_image_to_pdl {
my ($uri, $image_size) = @_;
my $http = HTTP::Tiny->new;
my $response = $http->get( $uri );
die "Could not fetch image from $uri" unless $response->{success};
say "Downloaded $uri";
my $img = Imager->new;
$img->read( data => $response->{content} );
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
$model_name_to_params{$model_name}{image_size}
);
} @image_names;
my $pdl_image_batched = cat(@pdl_images);
my $t = FloatPDLTOTFTensor($pdl_image_batched);
p $pdl_image_batched;
p $t;
my $RunSession = sub {
my ($session, $t) = @_;
my @outputs_t;
$session->Run(
undef,
$outputs{in}, [$t],
$outputs{out}, \@outputs_t,
undef,
undef,
$s
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
say "Warming up the model";
use PDL::GSL::RNG;
my $rng = PDL::GSL::RNG->new('default');
my $image_size = $model_name_to_params{$model_name}{image_size};
my $warmup_input = zeros(float, 3, @$image_size, 1 );
$rng->get_uniform($warmup_input);
p $RunSession->($session, FloatPDLTOTFTensor($warmup_input));
my $output_pdl_batched = FloatTFTensorToPDL($RunSession->($session, $t));
my $softmax = sub { ( map $_/sumover($_)->dummy(0), exp($_[0]) )[0] };
my $probabilities_batched = $softmax->($output_pdl_batched);
p $probabilities_batched;
my $N = 5; # number to select
my $top_batched = $probabilities_batched->qsorti->slice([-1, -$N]);
my @top_lists = dog($top_batched);
my $includes_background_class = $probabilities_batched->dim(0) == IMAGENET_LABEL_COUNT_WITH_BG;
if( IN_IPERL ) {
my $html = IPerl->html(
my_table( [0..$#image_names], sub {
my ($batch_idx, $h) = @_;
my $image_name = $image_names[$batch_idx];
my @top_for_image = $top_lists[$batch_idx]->list;
(
$h->tt($image_name),
$h->a( { href => $images_for_test_to_uri{$image_name} },
$h->img({
src => $images_for_test_to_uri{$image_name},
alt => $image_name,
width => '50%',
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
use List::Util ();
use Data::Printer ( output => 'stderr', return_value => 'void', filters => ['PDL'] );
use Data::Printer::Filter::PDL ();
use Text::Table::Tiny qw(generate_table);
use Imager;
my $s = AI::TensorFlow::Libtensorflow::Status->New;
sub AssertOK {
die "Status $_[0]: " . $_[0]->Message
unless $_[0]->GetCode == AI::TensorFlow::Libtensorflow::Status::OK;
return;
}
AssertOK($s);
In this notebook, we will use C<PDL> to store and manipulate the ndarray data before converting it to a C<TFTensor>. The following functions help with copying the data back and forth between the two object types.
An important thing to note about the dimensions used by TensorFlow's TFTensors when compared with PDL is that the dimension lists are reversed. Consider a binary raster image with width W and height H stored in L<row-major format|https://en.wikipedia...
This difference will be explained more concretely further in the tutorial.
Future work will provide an API for more convenient wrappers which will provide an option to either copy or share the same data (if possible).
use PDL;
use AI::TensorFlow::Libtensorflow::DataType qw(FLOAT);
use FFI::Platypus::Memory qw(memcpy);
use FFI::Platypus::Buffer qw(scalar_to_pointer);
sub FloatPDLTOTFTensor {
my ($p) = @_;
return AI::TensorFlow::Libtensorflow::Tensor->New(
FLOAT, [ reverse $p->dims ], $p->get_dataref, sub { undef $p }
);
}
sub FloatTFTensorToPDL {
my ($t) = @_;
my $pdl = zeros(float,reverse( map $t->Dim($_), 0..$t->NumDims-1 ) );
memcpy scalar_to_pointer( ${$pdl->get_dataref} ),
scalar_to_pointer( ${$t->Data} ),
$t->ByteSize;
$pdl->upd_data;
$pdl;
}
The following is just a small helper to generate an HTML C<<< <table> >>> for output in C<IPerl>.
use HTML::Tiny;
sub my_table {
my ($data, $cb) = @_;
my $h = HTML::Tiny->new;
$h->table( { style => 'width: 100%' },
[
$h->tr(
map {
[
$h->td( $cb->($_, $h) )
]
} @$data
)
]
)
}
This is a helper to display images in Gnuplot for debugging, but those debugging lines are commented out.
sub show_in_gnuplot {
my ($p) = @_;
require PDL::Graphics::Gnuplot;
PDL::Graphics::Gnuplot::image( square => 1, $p );
}
=head2 Fetch the model and labels
We are going to use an L<image classification model|https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/classification/5> from TensorFlow Hub based on the MobileNet V2 architecture. We download both the model and ImageNet classification labels.
# image_size => [width, height] (but usually square images)
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
);
AssertOK($s);
So let's use the names from the C<saved_model_cli> output to create our C<::Output> C<ArrayRef>s.
my %ops = (
in => $graph->OperationByName('serving_default_inputs'),
out => $graph->OperationByName('StatefulPartitionedCall'),
);
die "Could not get all operations" unless List::Util::all(sub { defined }, values %ops);
my %outputs = map { $_ => [ AI::TensorFlow::Libtensorflow::Output->New( { oper => $ops{$_}, index => 0 } ) ] }
keys %ops;
p %outputs;
say "Input: " , $outputs{in}[0];
say "Output: ", $outputs{out}[0];
B<STREAM (STDOUT)>:
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
#by Merdal, CC BY-SA 3.0 <http://creativecommons.org/licenses/by-sa/3.0/>, via Wikimedia Commons
"teapot" => "https://upload.wikimedia.org/wikipedia/commons/4/44/Black_tea_pot_cropped.jpg",
#by Mendhak, CC BY-SA 2.0 <https://creativecommons.org/licenses/by-sa/2.0>, via Wikimedia Commons
);
my @image_names = sort keys %images_for_test_to_uri;
if( IN_IPERL ) {
IPerl->html(
my_table( \@image_names, sub {
my ($image_name, $h) = @_;
(
$h->tt($image_name),
$h->a( { href => $images_for_test_to_uri{$image_name} },
$h->img({
src => $images_for_test_to_uri{$image_name},
alt => $image_name,
width => '50%',
})
),
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
B<DISPLAY>:
=for html <span style="display:inline-block;margin-left:1em;"><p><table style="width: 100%"><tr><td><tt>apple</tt></td><td><a href="https://upload.wikimedia.org/wikipedia/commons/1/15/Red_Apple.jpg"><img alt="apple" src="https://upload.wikimedia.org/...
=head2 Download the test images and transform them into suitable input data
We now fetch these images and prepare them to be the in the needed format by using C<Imager> to resize and add padding. Then we turn the C<Imager> data into a C<PDL> ndarray. Since the C<Imager> data is stored as 32-bits with 4 channels in the order ...
We then take all the PDL ndarrays and concatenate them. Again, note that the dimension lists for the PDL ndarray and the TFTensor are reversed.
sub imager_paste_center_pad {
my ($inner, $padded_sz, @rest) = @_;
my $outer = Imager->new( List::Util::mesh( [qw(xsize ysize)], $padded_sz ),
@rest
);
$outer->paste(
left => int( ($outer->getwidth - $inner->getwidth ) / 2 ),
top => int( ($outer->getheight - $inner->getheight) / 2 ),
src => $inner,
);
$outer;
}
sub imager_scale_to {
my ($img, $image_size) = @_;
my $rescaled = $img->scale(
List::Util::mesh( [qw(xpixels ypixels)], $image_size ),
type => 'min',
qtype => 'mixing', # 'mixing' seems to work better than 'normal'
);
}
sub load_image_to_pdl {
my ($uri, $image_size) = @_;
my $http = HTTP::Tiny->new;
my $response = $http->get( $uri );
die "Could not fetch image from $uri" unless $response->{success};
say "Downloaded $uri";
my $img = Imager->new;
$img->read( data => $response->{content} );
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
</span><span style="color: #6666cc;">ElementCount </span><span style=""> </span><span style="color: #ff6633;">1806336</span><span style="">
</span><span style="color: #33ccff;">}</span><span style="">
</span></code></pre></span>
=head2 Run the model for inference
We can use the C<Run> method to run the session and get the output C<TFTensor>.
First, we send a single random input to warm up the model.
my $RunSession = sub {
my ($session, $t) = @_;
my @outputs_t;
$session->Run(
undef,
$outputs{in}, [$t],
$outputs{out}, \@outputs_t,
undef,
undef,
$s
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
</span><span style="color: #6666cc;">NumDims </span><span style=""> </span><span style="color: #ff6633;">2</span><span style="">
</span><span style="color: #6666cc;">ElementCount </span><span style=""> </span><span style="color: #ff6633;">1001</span><span style="">
</span><span style="color: #33ccff;">}</span><span style="">
</span></code></pre></span>
Then we send the batched image data. The returned scores need to by normalised using the L<softmax function|https://en.wikipedia.org/wiki/Softmax_function> with the following formula (taken from Wikipedia):
$$ {\displaystyle \sigma (\mathbf {z} )I<{i}={\frac {e^{z>{i}}}{\sum I<{j=1}^{K}e^{z>{j}}}}\ \ {\text{ for }}i=1,\dotsc ,K{\text{ and }}\mathbf {z} =(zI<{1},\dotsc ,z>{K})\in \mathbb {R} ^{K}.} $$
my $output_pdl_batched = FloatTFTensorToPDL($RunSession->($session, $t));
my $softmax = sub { ( map $_/sumover($_)->dummy(0), exp($_[0]) )[0] };
my $probabilities_batched = $softmax->($output_pdl_batched);
p $probabilities_batched;
B<STREAM (STDERR)>:
=for html <span style="display:inline-block;margin-left:1em;"><pre style="display: block"><code><span style="color: #cc66cc;">PDL</span><span style="color: #33ccff;"> {</span><span style="">
</span><span style="color: #6666cc;">Data </span><span style=""> : </span><span style="color: #669933;">too long to print</span><span style="">
</span><span style="color: #6666cc;">Type </span><span style=""> : </span><span style="color: #cc66cc;">float</span><span style="">
</span><span style="color: #6666cc;">Shape </span><span style=""> : </span><span style="color: #33ccff;">[</span><span style="color: #9999cc;">1001 12</span><span style="color: #33ccff;">]</span><span style="">
</span><span style="color: #6666cc;">Nelem </span><span style=""> : </span><span style="color: #dd6;">12012</span><span style="">
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
my $N = 5; # number to select
my $top_batched = $probabilities_batched->qsorti->slice([-1, -$N]);
my @top_lists = dog($top_batched);
my $includes_background_class = $probabilities_batched->dim(0) == IMAGENET_LABEL_COUNT_WITH_BG;
if( IN_IPERL ) {
my $html = IPerl->html(
my_table( [0..$#image_names], sub {
my ($batch_idx, $h) = @_;
my $image_name = $image_names[$batch_idx];
my @top_for_image = $top_lists[$batch_idx]->list;
(
$h->tt($image_name),
$h->a( { href => $images_for_test_to_uri{$image_name} },
$h->img({
src => $images_for_test_to_uri{$image_name},
alt => $image_name,
width => '50%',
lib/AI/TensorFlow/Libtensorflow/Operation.pm view on Meta::CPAN
name => 'TF_Operation_array',
members => [
FFI::C::StructDef->new(
$ffi,
members => [
p => 'opaque'
]
)
],
);
sub _adef { $adef; }
sub _as_array {
my $class = shift;
my $array = $class->_adef->create(0 + @_);
for my $idx (0..@_-1) {
next unless defined $_[$idx];
$array->[$idx]->p($ffi->cast('TF_Operation', 'opaque', $_[$idx]));
}
$array;
}
sub _from_array {
my ($class, $array) = @_;
[
map {
$ffi->cast('opaque', 'TF_Operation', $array->[$_]->p);
} 0..$array->count-1
]
}
$ffi->attach( [ 'OperationName' => 'Name' ], [
arg 'TF_Operation' => 'oper',
lib/AI/TensorFlow/Libtensorflow/Operation.pm view on Meta::CPAN
$ffi->attach( [ 'OperationDevice' => 'Device' ], [
arg 'TF_Operation' => 'oper',
] => 'string');
$ffi->attach( [ 'OperationNumOutputs' => 'NumOutputs' ], [
arg 'TF_Operation' => 'oper',
] => 'int');
$ffi->attach( [ 'OperationOutputType' => 'OutputType' ] => [
arg 'TF_Output' => 'oper_out',
] => 'TF_DataType' => sub {
my ($xs, $self, $output) = @_;
# TODO coerce from LibtfPartialOutput here
$xs->($output);
} );
$ffi->attach( [ 'OperationNumInputs' => 'NumInputs' ] => [
arg 'TF_Operation' => 'oper',
] => 'int' );
$ffi->attach( [ 'OperationInputType' => 'InputType' ] => [
arg 'TF_Input' => 'oper_in',
] => 'TF_DataType' => sub {
my ($xs, $self, $input) = @_;
# TODO coerce from LibtfPartialInput here
$xs->($input);
});
$ffi->attach( [ 'OperationNumControlInputs' => 'NumControlInputs' ] => [
arg 'TF_Operation' => 'oper',
] => 'int' );
$ffi->attach( [ 'OperationNumControlOutputs' => 'NumControlOutputs' ] => [
lib/AI/TensorFlow/Libtensorflow/Operation.pm view on Meta::CPAN
] => 'int');
$ffi->attach( [ 'OperationInputListLength' => 'InputListLength' ] => [
arg 'TF_Operation' => 'oper',
arg 'string' => 'arg_name',
arg 'TF_Status' => 'status',
] => 'int' );
$ffi->attach( [ 'OperationInput' => 'Input' ] => [
arg 'TF_Input' => 'oper_in',
] => 'TF_Output' => sub {
my ($xs, $self, $input) = @_;
# TODO coerce from LibtfPartialInput here
$xs->($input);
});
$ffi->attach( [ 'OperationAllInputs' => 'AllInputs' ] => [
arg 'TF_Operation' => 'oper',
# TODO make OutputArray
arg 'TF_Output_struct_array' => 'inputs',
arg 'int' => 'max_inputs',
] => 'void' => sub {
my ($xs, $oper) = @_;
my $max_inputs = $oper->NumInputs;
return [] if $max_inputs == 0;
my $inputs = AI::TensorFlow::Libtensorflow::Output->_adef->create(0 + $max_inputs);
$xs->($oper, $inputs, $max_inputs);
return AI::TensorFlow::Libtensorflow::Output->_from_array($inputs);
});
$ffi->attach( [ 'OperationGetControlInputs' => 'GetControlInputs' ] => [
arg 'TF_Operation' => 'oper',
arg 'TF_Operation_array' => 'control_inputs',
arg 'int' => 'max_control_inputs',
] => 'void' => sub {
my ($xs, $oper) = @_;
my $max_inputs = $oper->NumControlInputs;
return [] if $max_inputs == 0;
my $inputs = AI::TensorFlow::Libtensorflow::Operation->_adef->create(0 + $max_inputs);
$xs->($oper, $inputs, $max_inputs);
return AI::TensorFlow::Libtensorflow::Operation->_from_array($inputs);
});
$ffi->attach( [ 'OperationGetControlOutputs' => 'GetControlOutputs' ] => [
arg 'TF_Operation' => 'oper',
arg 'TF_Operation_array' => 'control_outputs',
arg 'int' => 'max_control_outputs',
] => 'void' => sub {
my ($xs, $oper) = @_;
my $max_outputs = $oper->NumControlOutputs;
return [] if $max_outputs == 0;
my $outputs = AI::TensorFlow::Libtensorflow::Operation->_adef->create(0 + $max_outputs);
$xs->($oper, $outputs, $max_outputs);
return AI::TensorFlow::Libtensorflow::Operation->_from_array($outputs);
});
$ffi->attach( [ 'OperationOutputNumConsumers' => 'OutputNumConsumers' ] => [
arg 'TF_Output' => 'oper_out',
], 'int' => sub {
my ($xs, $self, $output) = @_;
# TODO coerce from LibtfPartialOutput here
$xs->($output);
});
$ffi->attach( [ 'OperationOutputConsumers' => 'OutputConsumers' ] => [
# TODO simplify API
arg 'TF_Output' => 'oper_out',
arg 'TF_Input_struct_array' => 'consumers',
arg 'int' => 'max_consumers',
] => 'int' => sub {
my ($xs, $self, $output) = @_;
my $max_consumers = $self->OutputNumConsumers( $output );
my $consumers = AI::TensorFlow::Libtensorflow::Input->_adef->create( $max_consumers );
my $count = $xs->($output, $consumers, $max_consumers);
return AI::TensorFlow::Libtensorflow::Input->_from_array( $consumers );
});
sub _data_printer {
my ($self, $ddp) = @_;
my %data = (
Name => $self->Name,
OpType => $self->OpType,
NumInputs => $self->NumInputs,
NumOutputs => $self->NumOutputs,
);
return sprintf('%s %s',
lib/AI/TensorFlow/Libtensorflow/OperationDescription.pm view on Meta::CPAN
=> 'tf_attr_float_list'
);
$ffi->load_custom_type(PackableArrayRef('BoolArrayRef', pack_type => 'C')
=> 'tf_attr_bool_list',
);
$ffi->attach( [ 'NewOperation' => 'New' ] => [
arg 'TF_Graph' => 'graph',
arg 'string' => 'op_type',
arg 'string' => 'oper_name',
] => 'TF_OperationDescription' => sub {
my ($xs, $class, @rest) = @_;
$xs->(@rest);
});
$ffi->attach( [ 'NewOperationLocked' => 'NewLocked' ] => [
arg 'TF_Graph' => 'graph',
arg 'string' => 'op_type',
arg 'string' => 'oper_name',
] => 'TF_OperationDescription' );
lib/AI/TensorFlow/Libtensorflow/OperationDescription.pm view on Meta::CPAN
$ffi->attach( 'AddInput' => [
arg 'TF_OperationDescription' => 'desc',
arg 'TF_Output' => 'input',
] => 'void');
$ffi->attach( AddInputList => [
arg 'TF_OperationDescription' => 'desc',
arg 'TF_Output_struct_array' => 'inputs',
arg 'int' => 'num_inputs',
] => 'void' => sub {
my $xs = shift;
$_[1] = AI::TensorFlow::Libtensorflow::Output->_as_array( @{ $_[1] } );
$_[2] = $_[1]->count;
$xs->(@_);
});
$ffi->attach( AddControlInput => [
arg 'TF_OperationDescription' => 'desc',
arg 'TF_Operation' => 'input',
] => 'void');
lib/AI/TensorFlow/Libtensorflow/Output.pm view on Meta::CPAN
# See also:
# Convert::Binary::C->new( Alignment => 8 )
# ->parse( ... )
# ->sizeof( ... )
$ffi->sizeof('opaque') == 8 ? (
'char[4]' => ':',
) : (),
);
$ffi->type('record(AI::TensorFlow::Libtensorflow::Output)', 'TF_Output');
sub New {
my ($class, $args) = @_;
my $record = $class->new({
_oper => $ffi->cast( 'TF_Operation', 'opaque', delete $args->{oper} ),
_index => delete $args->{index},
});
}
sub oper { $ffi->cast('opaque', 'TF_Operation', $_[0]->_oper ) }
sub index { $_[0]->_index }
use FFI::C::ArrayDef;
use FFI::C::StructDef;
my $sdef = FFI::C::StructDef->new($ffi,
name => 'TF_Output_struct',
members => [
_oper => 'opaque',
_index => 'int',
__ignore => 'char[4]',
],
);
my $adef = FFI::C::ArrayDef->new($ffi,
name => 'TF_Output_struct_array',
members => [ 'TF_Output_struct' ]
);
sub _adef { $adef; }
sub _as_array {
my $class = shift;
my $output = $class->_adef->create(0 + @_);
for my $idx (0..@_-1) {
next unless defined $_[$idx];
$class->_copy_to_other( $_[$idx], $output->[$idx] );
}
$output;
}
sub _from_array {
my ($class, $array) = @_;
[
map {
my $record = $class->new;
$class->_copy_to_other($array->[$_], $record);
$record;
} 0..$array->count-1
]
}
sub _copy_to_other {
my ($class, $this, $that) = @_;
$that->_oper ($this->_oper);
$that->_index($this->_index);
}
$ffi->load_custom_type(
RecordArrayRef( 'OutputArrayPtr',
record_module => __PACKAGE__, with_size => 0,
),
=> 'TF_Output_array');
$ffi->load_custom_type(
RecordArrayRef( 'OutputArrayPtrSz',
record_module => __PACKAGE__, with_size => 1,
),
=> 'TF_Output_array_sz');
use overload
'""' => \&_op_stringify;
sub _op_stringify {
join ":", (
( defined $_[0]->_oper ? $_[0]->oper->Name : '<undefined operation>' ),
( defined $_[0]->index ? $_[0]->index : '<no index>' )
);
}
sub _data_printer {
my ($self, $ddp) = @_;
my %data = (
oper => $self->oper,
index => $self->index,
);
return sprintf('%s %s',
$ddp->maybe_colorize(ref $self, 'class' ),
$ddp->parse(\%data) );
lib/AI/TensorFlow/Libtensorflow/Session.pm view on Meta::CPAN
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
$ffi->attach( [ 'NewSession' => 'New' ] =>
[
arg 'TF_Graph' => 'graph',
arg 'TF_SessionOptions' => 'opt',
arg 'TF_Status' => 'status',
],
=> 'TF_Session' => sub {
my ($xs, $class, @rest) = @_;
return $xs->(@rest);
});
$ffi->attach( [ 'LoadSessionFromSavedModel' => 'LoadFromSavedModel' ] => [
arg TF_SessionOptions => 'session_options',
arg opaque => { id => 'run_options', ffi_type => 'TF_Buffer', maybe => 1 },
arg string => 'export_dir',
arg 'string[]' => 'tags',
arg int => 'tags_len',
arg TF_Graph => 'graph',
arg opaque => { id => 'meta_graph_def', ffi_type => 'TF_Buffer', maybe => 1 },
arg TF_Status => 'status',
] => 'TF_Session' => sub {
my ($xs, $class, @rest) = @_;
my ( $session_options,
$run_options,
$export_dir, $tags,
$graph, $meta_graph_def,
$status) = @rest;
$run_options = $ffi->cast('TF_Buffer', 'opaque', $run_options)
if defined $run_options;
lib/AI/TensorFlow/Libtensorflow/Session.pm view on Meta::CPAN
# Target operations
arg 'opaque' => { id => 'target_opers', ffi_type => 'TF_Operation_array', maybe => 1 },
arg 'int' => 'ntargets',
# RunMetadata
arg 'opaque' => { id => 'run_metadata', ffi_type => 'TF_Buffer', maybe => 1 },
# Output status
arg 'TF_Status' => 'status',
],
=> 'void' => sub {
my ($xs,
$self,
$run_options,
$inputs , $input_values,
$outputs, $output_values,
$target_opers,
$run_metadata,
$status ) = @_;
die "Mismatch in number of inputs and input values" unless $#$inputs == $#$input_values;
lib/AI/TensorFlow/Libtensorflow/Session.pm view on Meta::CPAN
$run_metadata,
$status
);
@{$output_values} = @{ AI::TensorFlow::Libtensorflow::Tensor->_from_array( $output_v_a ) };
}
);
sub _process_target_opers_args {
my ($target_opers) = @_;
my @target_opers_args = defined $target_opers
? do {
my $target_opers_a = AI::TensorFlow::Libtensorflow::Operation->_as_array( @$target_opers );
( $target_opers_a, $target_opers_a->count )
}
: ( undef, 0 );
return @target_opers_args;
}
lib/AI/TensorFlow/Libtensorflow/Session.pm view on Meta::CPAN
# Output names
arg TF_Output_struct_array => 'outputs',
arg int => 'noutputs',
# Target operations
arg opaque => { id => 'target_opers', ffi_type => 'TF_Operation_array', maybe => 1 },
arg int => 'ntargets',
# Output handle
arg 'opaque*' => { id => 'handle', ffi_type => 'string*', window => 1 },
# Output status
arg TF_Status => 'status',
] => 'void' => sub {
my ($xs, $session, $inputs, $outputs, $target_opers, $status) = @_;
$inputs = AI::TensorFlow::Libtensorflow::Output->_as_array( @$inputs );
$outputs = AI::TensorFlow::Libtensorflow::Output->_as_array( @$outputs );
my $handle;
$xs->($session,
$inputs, $inputs->count,
$outputs, $outputs->count,
_process_target_opers_args($target_opers),
lib/AI/TensorFlow/Libtensorflow/Session.pm view on Meta::CPAN
window( my $handle_window, $handle );
my $handle_obj = bless \\$handle_window,
'AI::TensorFlow::Libtensorflow::Session::_PRHandle';
return $handle_obj;
});
$ffi->attach( [ 'DeletePRunHandle' => 'AI::TensorFlow::Libtensorflow::Session::_PRHandle::DESTROY' ] => [
arg 'opaque' => 'handle',
] => 'void' => sub {
my ($xs, $handle_obj) = @_;
my $handle = scalar_to_pointer($$$handle_obj);
$xs->( $handle );
} );
$ffi->attach( [ 'SessionPRun' => 'PRun' ] => [
arg TF_Session => 'session',
arg 'opaque' => 'handle',
# Inputs
lib/AI/TensorFlow/Libtensorflow/Session.pm view on Meta::CPAN
# Outputs
arg TF_Output_struct_array => 'outputs',
arg TF_Tensor_array => 'output_values',
arg int => 'noutputs',
# Targets
arg 'opaque*' => { id => 'target_opers', ffi_type => 'TF_Operation_array', maybe => 1 },
arg int => 'ntargets',
arg TF_Status => 'status',
] => 'void' => sub {
my ($xs, $session, $handle_obj,
$inputs, $input_values,
$outputs, $output_values,
$target_opers,
$status) = @_;
die "Mismatch in number of inputs and input values" unless $#$inputs == $#$input_values;
my $input_v_a = AI::TensorFlow::Libtensorflow::Tensor->_as_array(@$input_values);
my $output_v_a = AI::TensorFlow::Libtensorflow::Tensor->_adef->create( 0+@$outputs );
lib/AI/TensorFlow/Libtensorflow/Session.pm view on Meta::CPAN
[ 'TF_Session',
'TF_Status',
],
=> 'void' );
$ffi->attach( [ 'DeleteSession' => '_Delete' ] => [
arg 'TF_Session' => 's',
arg 'TF_Status' => 'status',
], => 'void' );
sub DESTROY {
my ($self) = @_;
my $s = AI::TensorFlow::Libtensorflow::Status->New;
$self->Close($s);
# TODO this may not be needed with automatic Status handling
die "Could not close session" unless $s->GetCode == AI::TensorFlow::Libtensorflow::Status::OK;
$self->_Delete($s);
die "Could not delete session" unless $s->GetCode == AI::TensorFlow::Libtensorflow::Status::OK;
}
1;
lib/AI/TensorFlow/Libtensorflow/Status.pm view on Meta::CPAN
$ffi->attach( 'SetStatusFromIOError' => [ 'TF_Status', 'int', 'string' ],
'void' );
$ffi->attach( 'GetCode' => [ 'TF_Status' ], 'TF_Code' );
$ffi->attach( 'Message' => [ 'TF_Status' ], 'string' );
use overload
'""' => \&_op_stringify;
sub _op_stringify {
$_TF_CODE_INT_TO_NAME{$_[0]->GetCode};
}
sub _data_printer {
my ($self, $ddp) = @_;
if( $self->GetCode != AI::TensorFlow::Libtensorflow::Status::OK() ) {
return sprintf('%s %s %s %s%s%s %s',
$ddp->maybe_colorize( ref($self), 'class' ),
$ddp->maybe_colorize( '{', 'brackets' ),
$ddp->maybe_colorize( $_TF_CODE_INT_TO_NAME{$self->GetCode}, 'escaped' ),
$ddp->maybe_colorize( '(', 'brackets' ),
$ddp->maybe_colorize( $self->Message, 'string' ),
$ddp->maybe_colorize( ')', 'brackets' ),
$ddp->maybe_colorize( '}', 'brackets' ),
lib/AI/TensorFlow/Libtensorflow/TFLibrary.pm view on Meta::CPAN
$AI::TensorFlow::Libtensorflow::TFLibrary::VERSION = '0.0.7';
use strict;
use warnings;
use AI::TensorFlow::Libtensorflow::Lib qw(arg);
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->attach( [ 'LoadLibrary' => 'LoadLibrary' ] => [
arg string => 'library_filename',
arg TF_Status => 'status',
] => 'TF_Library' => sub {
my ($xs, $class, @rest) = @_;
$xs->(@rest);
} );
$ffi->attach( [ 'GetOpList' => 'GetOpList' ] => [
arg TF_Library => 'lib_handle'
] => 'TF_Buffer' );
$ffi->attach( [ 'DeleteLibraryHandle' => 'DESTROY' ] => [
arg TF_Library => 'lib_handle'
lib/AI/TensorFlow/Libtensorflow/TString.pm view on Meta::CPAN
### From <tensorflow/tsl/platform/ctstring_internal.h>
# typedef enum TF_TString_Type { // NOLINT
# TF_TSTR_SMALL = 0x00,
# TF_TSTR_LARGE = 0x01,
# TF_TSTR_OFFSET = 0x02,
# TF_TSTR_VIEW = 0x03,
# TF_TSTR_TYPE_MASK = 0x03
# } TF_TString_Type;
sub _CREATE {
my ($class) = @_;
my $pointer = malloc SIZEOF_TF_TString;
my $obj = bless { ptr => $pointer }, $class;
}
$ffi->attach( [ 'StringInit' => 'Init' ] => [
arg 'TF_TString' => 'tstr'
] => 'void' => sub {
my ($xs, $invoc) = @_;
my $obj = ref $invoc ? $invoc : $invoc->_CREATE();
$xs->($obj);
$obj;
});
$ffi->attach( [ 'StringCopy' => 'Copy' ] => [
arg TF_TString => 'dst',
arg tf_text_buffer => [ qw( src size ) ],
] => 'void' );
lib/AI/TensorFlow/Libtensorflow/TString.pm view on Meta::CPAN
] => 'size_t' );
$ffi->attach( [ 'StringGetCapacity' => 'GetCapacity' ] => [
arg TF_TString => 'str'
] => 'size_t' );
$ffi->attach( [ 'StringDealloc' => 'Dealloc' ] => [
arg TF_TString => 'tstr',
] => 'void' );
sub DESTROY {
if( ! $_[0]->{owner} ) {
$_[0]->Dealloc;
free $_[0]->{ptr};
}
}
1;
__END__
lib/AI/TensorFlow/Libtensorflow/Tensor.pm view on Meta::CPAN
# const int64_t* dims, int num_dims
arg 'tf_dims_buffer' => [ qw(dims num_dims) ],
# void* data, size_t len
arg 'tf_tensor_buffer' => [ qw(data len) ],
arg 'opaque' => 'deallocator', # tensor_deallocator_t (deallocator)
arg 'opaque' => 'deallocator_arg',
],
=> 'TF_Tensor' => sub {
my ($xs, $class,
$dtype, $dims, $data,
$deallocator, $deallocator_arg,
) = @_;
my $deallocator_closure = $ffi->closure( $deallocator );
$deallocator_closure->sticky;
my $deallocator_ptr = $ffi->cast(
'tensor_deallocator_t', 'opaque',
$deallocator_closure );
lib/AI/TensorFlow/Libtensorflow/Tensor.pm view on Meta::CPAN
# C: TF_AllocateTensor
#
# Constructor
$ffi->attach( [ 'AllocateTensor', 'Allocate' ],
[
arg 'TF_DataType' => 'dtype',
arg 'tf_dims_buffer' => [ qw(dims num_dims) ],
arg 'size_t' => 'len',
],
=> 'TF_Tensor' => sub {
my ($xs, $class, @rest) = @_;
my ($dtype, $dims, $len) = @rest;
if( ! defined $len ) {
$len = product($dtype->Size, @$dims);
}
my $obj = $xs->($dtype, $dims, $len);
}
);
$ffi->attach( [ 'DeleteTensor' => 'DESTROY' ],
[ arg 'TF_Tensor' => 't' ]
=> 'void'
=> sub {
my ($xs, $self) = @_;
$xs->($self);
if( exists $self->{_deallocator_closure} ) {
$self->{_deallocator_closure}->unstick;
}
}
);
$ffi->attach( [ 'TensorData' => 'Data' ],
[ arg 'TF_Tensor' => 'self' ],
=> 'opaque'
=> sub {
my ($xs, @rest) = @_;
my ($self) = @rest;
my $data_p = $xs->(@rest);
window(my $buffer, $data_p, $self->ByteSize);
\$buffer;
}
);
$ffi->attach( [ 'TensorByteSize' => 'ByteSize' ],
[ arg 'TF_Tensor' => 'self' ],
lib/AI/TensorFlow/Libtensorflow/Tensor.pm view on Meta::CPAN
name => 'TF_Tensor_array',
members => [
FFI::C::StructDef->new(
$ffi,
members => [
p => 'opaque'
]
)
],
);
sub _adef {
$adef;
}
sub _as_array {
my $class = shift;
my $array = $class->_adef->create(0 + @_);
for my $idx (0..@_-1) {
next unless defined $_[$idx];
$array->[$idx]->p($ffi->cast('TF_Tensor', 'opaque', $_[$idx]));
}
$array;
}
sub _from_array {
my ($class, $array) = @_;
return [
map {
$ffi->cast(
'opaque',
'TF_Tensor',
$array->[$_]->p)
} 0.. $array->count - 1
]
}
#### Data::Printer ####
sub _data_printer {
my ($self, $ddp) = @_;
my @data = (
[ Type => $ddp->maybe_colorize( $self->Type, 'class' ), ],
[ Dims => sprintf "%s %s %s",
$ddp->maybe_colorize('[', 'brackets'),
join(" ",
map $ddp->maybe_colorize( $self->Dim($_), 'number' ),
0..$self->NumDims-1),
$ddp->maybe_colorize(']', 'brackets'),
lib/AI/TensorFlow/Libtensorflow/Tensor.pm view on Meta::CPAN
=back
Creates a C<TFTensor> from a data buffer C<$data> with the given specification
of data type C<$dtype> and dimensions C<$dims>.
# Create a buffer containing 0 through 8 single-precision
# floating-point data.
my $data = pack("f*", 0..8);
$t = Tensor->New(
FLOAT, [3,3], \$data, sub { undef $data }, undef
);
ok $t, 'Created 3-by-3 float TFTensor';
Implementation note: if C<$dtype> is not a
L<STRING|AI::TensorFlow::Libtensorflow::DataType/STRING>
or
L<RESOURCE|AI::TensorFlow::Libtensorflow::DataType/RESOURCE>,
then the pointer for C<$data> is checked to see if meets the
TensorFlow's alignment preferences. If it does not, the
maint/inc/Pod/Elemental/Transformer/TF_CAPI.pm view on Meta::CPAN
use Pod::Elemental::Element::Pod5::Command;
use namespace::autoclean;
has command_name => (
is => 'ro',
init_arg => undef,
);
sub transform_node {
my ($self, $node) = @_;
for my $i (reverse(0 .. $#{ $node->children })) {
my $para = $node->children->[ $i ];
next unless $self->__is_xformable($para);
my @replacements = $self->_expand( $para );
splice @{ $node->children }, $i, 1, @replacements;
}
}
my $command_dispatch = {
'tf_capi' => \&_expand_capi,
'tf_version' => \&_expand_version,
};
sub __is_xformable {
my ($self, $para) = @_;
return unless $para->isa('Pod::Elemental::Element::Pod5::Command')
and exists $command_dispatch->{ $para->command };
return 1;
}
sub _expand {
my ($self, $parent) = @_;
$command_dispatch->{ $parent->command }->( @_ );
};
sub _expand_version {
my ($self, $parent) = @_;
my @replacements;
my $content = $parent->content;
die "Not a version string: $content"
unless $content =~ /\A v [0-9.]+ \Z/x;
push @replacements, Pod::Elemental::Element::Pod5::Ordinary->new(
content => 'C<libtensorflow> version: ' . $content
);
return @replacements;
}
sub _expand_capi {
my ($self, $parent) = @_;
my @replacements;
my $content = $parent->content;
my @ids = split /,\s*/, $content;
my $doc_name = 'AI::TensorFlow::Libtensorflow::Manual::CAPI';
my $new_content = "B<C API>: "
. join ", ", map {
maint/inc/Pod/Elemental/Transformer/TF_Sig.pm view on Meta::CPAN
use feature qw{ postderef };
use lib 'lib';
use AI::TensorFlow::Libtensorflow::Lib;
use AI::TensorFlow::Libtensorflow::Lib::Types qw(-all);
use Types::Standard qw(Maybe Str Int ArrayRef CodeRef ScalarRef Ref);
use Types::Encodings qw(Bytes);
use Type::Registry qw(t);
use namespace::autoclean;
sub __is_xformable {
my ($self, $para) = @_;
return unless $para->isa('Pod::Elemental::Element::Pod5::Region')
and $para->format_name =~ /^(?:param|returns|signature)$/;
confess("list regions must be pod (=begin :" . $self->format_name . ")")
unless $para->is_pod;
return 1;
}
my %region_types = (
'signature' => 'Signature',
'param' => 'Parameters',
'returns' => 'Returns',
);
around _expand_list_paras => sub {
my ($orig, $self, $para) = @_;
my $is_list_type = $para->format_name =~ /^(?:param|returns)$/;
if( $is_list_type ) {
die "Need description list for @{[ $para->as_pod_string ]}"
unless $para->children->[0]->content =~ /^=/;
}
my $prefix;
if( $para->isa('Pod::Elemental::Element::Pod5::Region')
maint/inc/Pod/Elemental/Transformer/TF_Sig.pm view on Meta::CPAN
=back
EOF
}
unshift @replacements, $prefix if defined $prefix;
@replacements;
};
sub __paras_for_num_marker { die "only support definition lists" }
sub __paras_for_bul_marker { die "only support definition lists" }
around __paras_for_def_marker => sub {
my ($orig, $self, $rest) = @_;
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
my $type_library = 'AI::TensorFlow::Libtensorflow::Lib::Types';
my @types = ($rest);
my $process_type = sub {
my ($type) = @_;
my $new_type_text = $type;
my $info;
if( eval { $info->{TT} = t($type); 1 }
|| eval { $info->{FFI} = $ffi->type_meta($type); 1 } ) {
if( $info->{TT} && $info->{TT}->library eq $type_library ) {
$new_type_text = "L<$type|$type_library/$type>";
}
} else {
die "Could not find type constraint or FFI::Platypus type $type";
maint/inc/PreloadPodWeaver.pm view on Meta::CPAN
package maint::inc::PreloadPodWeaver;
use Moose;
extends 'Dist::Zilla::Plugin';
sub register_component {
require Pod::Elemental::Transformer::TF_CAPI;
require Pod::Elemental::Transformer::TF_Sig;
}
__PACKAGE__->meta->make_immutable;
no Moose;
1;