AI-TensorFlow-Libtensorflow

 view release on metacpan or  search on metacpan

MANIFEST  view on Meta::CPAN

t/upstream/CAPI/029_SavedModel.t
t/upstream/CAPI/030_SavedModelNullArgsAreValid.t
t/upstream/CAPI/031_DeletingNullPointerIsSafe.t
t/upstream/CAPI/032_TestBitcastFrom_Reshape.t
t/upstream/CAPI/033_TestFromProto.t
t/upstream/CAPI/034_TestTensorAligned.t
t/upstream/CAPI/035_TestTensorIsNotAligned.t
t/upstream/CAPI/036_MessageBufferConversion.t
t/upstream/CAPI/037_TestTensorNonScalarBytesAllocateDelete.t
t/upstream/CAPI/TEMPLATE
t/upstream/tensorflow/cc/saved_model/testdata/half_plus_two/00000123/assets/foo.txt
t/upstream/tensorflow/cc/saved_model/testdata/half_plus_two/00000123/saved_model.pb
t/upstream/tensorflow/cc/saved_model/testdata/half_plus_two/00000123/variables/variables.data-00000-of-00001
t/upstream/tensorflow/cc/saved_model/testdata/half_plus_two/00000123/variables/variables.index
weaver.ini
xt/author/critic.t
xt/author/pod-linkcheck.t
xt/author/pod-snippets.t

dist.ini  view on Meta::CPAN

Mu = 0
Path::Tiny = 0
Sort::Key::Multi = 0
Sub::Uplevel = 0
Syntax::Construct = 0
Types::Path::Tiny = 0

[Encoding / ModelData]
encoding = bytes
match    = \.pb$
match    = t/upstream/tensorflow/cc/saved_model/testdata/

[MetaNoIndex]
directory = maint

lib/AI/TensorFlow/Libtensorflow/Buffer.pm  view on Meta::CPAN

package AI::TensorFlow::Libtensorflow::Buffer;
# ABSTRACT: Buffer that holds pointer to data with length
$AI::TensorFlow::Libtensorflow::Buffer::VERSION = '0.0.7';
use strict;
use warnings;
use namespace::autoclean;
use AI::TensorFlow::Libtensorflow::Lib qw(arg);

my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
use FFI::C;
FFI::C->ffi($ffi);

lib/AI/TensorFlow/Libtensorflow/Buffer.pm  view on Meta::CPAN

);

use FFI::Platypus::Buffer;
use FFI::Platypus::Memory;





FFI::C->struct( 'TF_Buffer' => [
	data => 'opaque',
	length => 'size_t',
	_data_deallocator => 'opaque', # data_deallocator_t
	# this does not work?
	#_data_deallocator => 'data_deallocator_t',
]);
use Sub::Delete;
delete_sub 'DESTROY';

sub data_deallocator {
	my ($self, $coderef) = shift;

	return $self->{_data_deallocator_closure} unless $coderef;

	my $closure = $ffi->closure( $coderef );

	$closure->sticky;
	$self->{_data_deallocator_closure} = $closure;

	my $opaque = $ffi->cast('data_deallocator_t', 'opaque', $closure);
	$self->_data_deallocator( $opaque );
}


$ffi->attach( [ 'NewBuffer' => 'New' ] => [] => 'TF_Buffer' );

$ffi->attach( [ 'NewBufferFromString' => 'NewFromString' ] => [
	arg 'tf_buffer_buffer' => [qw(proto proto_len)]
] => 'TF_Buffer' => sub {
	my ($xs, $class, @rest) = @_;
	$xs->(@rest);

lib/AI/TensorFlow/Libtensorflow/Buffer.pm  view on Meta::CPAN

1;

__END__

=pod

=encoding UTF-8

=head1 NAME

AI::TensorFlow::Libtensorflow::Buffer - Buffer that holds pointer to data with length

=head1 SYNOPSIS

  use aliased 'AI::TensorFlow::Libtensorflow::Buffer' => 'Buffer';

=head1 DESCRIPTION

C<TFBuffer> is a data structure that stores a pointer to a block of data, the
length of the data, and optionally a deallocator function for memory
management.

This structure is typically used in C<libtensorflow> to store the data for a
serialized protocol buffer.

=head1 CONSTRUCTORS

=head2 New

=over 2

C<<<
New()

lib/AI/TensorFlow/Libtensorflow/Buffer.pm  view on Meta::CPAN


C<<<
NewFromString( $proto )
>>>

=back

Makes a copy of the input and sets an appropriate deallocator. Useful for
passing in read-only, input protobufs.

  my $data = 'bytes';
  my $buffer = Buffer->NewFromString(\$data);
  ok $buffer, 'create buffer from string';
  is $buffer->length, bytes::length($data), 'same length as string';

B<Parameters>

=over 4

=item ScalarRef[Bytes] $proto

=back

B<Returns>

=over 4

=item L<TFBuffer|AI::TensorFlow::Libtensorflow::Lib::Types/TFBuffer>

Contains a copy of the input data from C<$proto>.

=back

B<C API>: L<< C<TF_NewBufferFromString>|AI::TensorFlow::Libtensorflow::Manual::CAPI/TF_NewBufferFromString >>

=head1 ATTRIBUTES

=head2 data

An C<opaque> pointer to the buffer.

=head2 length

Length of the buffer as a C<size_t>.

=head2 data_deallocator

A C<CodeRef> for the deallocator.

=head1 DESTRUCTORS

=head2 DESTROY

B<C API>: L<< C<TF_DeleteBuffer>|AI::TensorFlow::Libtensorflow::Manual::CAPI/TF_DeleteBuffer >>

=head1 AUTHOR

lib/AI/TensorFlow/Libtensorflow/DataType.pm  view on Meta::CPAN

use warnings;
use AI::TensorFlow::Libtensorflow::Lib;
use Const::Exporter;

use Devel::StrictMode;
use Types::Common qw(Int Str);

use namespace::autoclean;

# enum TF_DataType
# From <tensorflow/c/tf_datatype.h>
my %_ENUM_DTYPE = (
	FLOAT      =>  1,
	DOUBLE     =>  2,
	INT32      =>  3, #// Int32 tensors are always in 'host' memory.
	UINT8      =>  4,
	INT16      =>  5,
	INT8       =>  6,
	STRING     =>  7,
	COMPLEX64  =>  8, #// Single-precision complex
	# NOTE Stubbing out this duplicate so that no new code uses this.

lib/AI/TensorFlow/Libtensorflow/DataType.pm  view on Meta::CPAN

  use AI::TensorFlow::Libtensorflow::DataType qw(FLOAT @DTYPES);
  use List::Util qw(max);

  my $dtype = FLOAT;
  is FLOAT->Size, 4, 'FLOAT is 4 bytes large';
  is max(map { $_->Size } @DTYPES), 16,
    'Largest type has sizeof() == 16 bytes';

=head1 DESCRIPTION

Enum representing native data types used inside of containers such as
L<TFTensor|AI::TensorFlow::Libtensorflow::Lib::Types/TFTensor>.

=head1 CONSTANTS

=head2 STRING

String.

=head2 BOOL

lib/AI/TensorFlow/Libtensorflow/Graph.pm  view on Meta::CPAN

package AI::TensorFlow::Libtensorflow::Graph;
# ABSTRACT: A TensorFlow computation, represented as a dataflow graph
$AI::TensorFlow::Libtensorflow::Graph::VERSION = '0.0.7';
use strict;
use warnings;
use namespace::autoclean;
use AI::TensorFlow::Libtensorflow::Lib qw(arg);
use AI::TensorFlow::Libtensorflow::Buffer;
use AI::TensorFlow::Libtensorflow::Output;
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);

lib/AI/TensorFlow/Libtensorflow/Graph.pm  view on Meta::CPAN

1;

__END__

=pod

=encoding UTF-8

=head1 NAME

AI::TensorFlow::Libtensorflow::Graph - A TensorFlow computation, represented as a dataflow graph

=head1 SYNOPSIS

  use aliased 'AI::TensorFlow::Libtensorflow::Graph' => 'Graph';

=head1 DESCRIPTION

=head1 CONSTRUCTORS

=head2 New

lib/AI/TensorFlow/Libtensorflow/Lib.pm  view on Meta::CPAN



		$ffi->type('object(AI::TensorFlow::Libtensorflow::Eager::ContextOptions)', 'TFE_ContextOptions');

		$ffi->type('object(AI::TensorFlow::Libtensorflow::Eager::Context)', 'TFE_Context');



		## Callbacks for deallocation
		# For TF_Buffer
		$ffi->type('(opaque,size_t)->void'        => 'data_deallocator_t');
		# For TF_Tensor
		$ffi->type('(opaque,size_t,opaque)->void' => 'tensor_deallocator_t');

		$ffi;
	};
}

sub mangler_default {
	my $target = (caller)[0];
	my $prefix = 'TF';

lib/AI/TensorFlow/Libtensorflow/Lib/FFIType/Variant/PackableArrayRef.pm  view on Meta::CPAN


	die "Invalid pack type, must be single character"
		unless $arguments{pack_type} =~ /^.$/;

	my @stack;

	my $perl_to_native = install perl_to_native => sub {
		my ($value, $i) = @_;
		die "Value must be an ArrayRef"
			unless defined $value && ref $value eq 'ARRAY';
		my $data = pack  $arguments{pack_type} . '*', @$value;
		my $n    = scalar @$value;
		my ($pointer, $size) = scalar_to_buffer($data);

		push @stack, [ \$data, $pointer, $size ];
		arguments_set_pointer( $i  , $pointer);
		arguments_set_sint32(  $i+1, $n);
	};

	my $perl_to_native_post = install perl_to_native_post => sub {
		my ($data_ref, $pointer, $size) = @{ pop @stack };
		$$data_ref = buffer_to_scalar($pointer, $size);
		@{$_[0]} = unpack $arguments{pack_type} . '*', $$data_ref;
		();
	};
	install ffi_custom_type_api_1 => sub {
		{
			native_type => 'opaque',
			argument_count => 2,
			perl_to_native => $perl_to_native,
			perl_to_native_post => $perl_to_native_post,
		}
	};

lib/AI/TensorFlow/Libtensorflow/Lib/FFIType/Variant/PackableMaybeArrayRef.pm  view on Meta::CPAN


	die "Invalid pack type, must be single character"
		unless $arguments{pack_type} =~ /^.$/;

	my @stack;

	my $perl_to_native = install perl_to_native => sub {
		my ($value, $i) = @_;
		if( defined $value ) {
			die "Value must be an ArrayRef" unless ref $value eq 'ARRAY';
			my $data = pack  $arguments{pack_type} . '*', @$value;
			my $n    = scalar @$value;
			my ($pointer, $size) = scalar_to_buffer($data);

			push @stack, [ \$data, $pointer, $size ];
			arguments_set_pointer( $i  , $pointer);
			arguments_set_sint32(  $i+1, $n);
		} else {
			my $data = undef;
			my $n    = -1;
			my ($pointer, $size) = (0, 0);
			push @stack, [ \$data, $pointer, $size ];
			arguments_set_pointer( $i  , $pointer);
			arguments_set_sint32(  $i+1, $n);
		}
	};

	my $perl_to_native_post = install perl_to_native_post => sub {
		my ($data_ref, $pointer, $size) = @{ pop @stack };
		if( ! Scalar::Util::readonly($_[0]) ) {
			$$data_ref = buffer_to_scalar($pointer, $size);
			@{$_[0]} = unpack $arguments{pack_type} . '*', $$data_ref;
		}
		();
	};
	install ffi_custom_type_api_1 => sub {
		{
			native_type => 'opaque',
			argument_count => 2,
			perl_to_native => $perl_to_native,
			perl_to_native_post => $perl_to_native_post,
		}

lib/AI/TensorFlow/Libtensorflow/Lib/FFIType/Variant/RecordArrayRef.pm  view on Meta::CPAN

	die "Missing/invalid module name: $arguments{record_module}"
		unless is_module_name($arguments{record_module});

	my $record_module = $arguments{record_module};
	my $with_size     = exists $arguments{with_size} ? $arguments{with_size} : 1;

	my @stack;

	my $perl_to_native = install perl_to_native => sub {
		my ($value, $i) = @_;
		my $data = pack "(a*)*", map $$_, @$value;
		my($pointer, $size) = scalar_to_buffer($data);
		my $n = @$value;
		my $sizeof = $size / $n;
		push @stack, [ \$data, $n, $pointer, $size , $sizeof ];
		arguments_set_pointer $i  , $pointer;
		arguments_set_sint32  $i+1, $n if $with_size;
	};

	my $perl_to_native_post = install perl_to_native_post => sub {
		my($data_ref, $n, $pointer, $size, $sizeof) = @{ pop @stack };
		$$data_ref = buffer_to_scalar($pointer, $size);
		@{$_[0]} = map {
			bless \$_, $record_module
		} unpack  "(a${sizeof})*", $$data_ref;
		();
	};

	install ffi_custom_type_api_1 => sub {
		{
			native_type => 'opaque',
			argument_count => ($with_size ? 2 : 1),
			perl_to_native => $perl_to_native,
			perl_to_native_post => $perl_to_native_post,
		}

lib/AI/TensorFlow/Libtensorflow/Lib/_Alloc.pm  view on Meta::CPAN

	# to 255 bytes (which means 128 bytes is the maximum power-of-two
	# alignment).
	my @alignments = map 2**$_, reverse 0..7;

	# 1-byte element
	my $el = INT8;
	my $el_size = $el->Size;

	my $max_alignment = $alignments[0];
	my $req_size = 2 * $max_alignment + $el_size;
	# All data that is sent to TF_NewTensor here is within the block of
	# memory allocated at $ptr_base.
	my $ptr_base = malloc($req_size);
	defer { free($ptr_base); }

	# start at offset that is aligned with $max_alignment
	my $ptr = $ptr_base + ( $max_alignment - $ptr_base % $max_alignment );

	my $create_tensor_at_alignment = sub {
		my ($n, $dealloc_called) = @_;
		my $offset = $n - $ptr % $n;
		my $ptr_offset = $ptr + $offset;
		my $space_for_data = $req_size - $offset;

		window(my $data, $ptr_offset, $space_for_data);

		return AI::TensorFlow::Libtensorflow::Tensor->New(
			$el, [int($space_for_data/$el_size)], \$data, sub {
				$$dealloc_called = 1
			}
		);
	};

	for my $a_idx (0..@alignments-2) {
		my @dealloc = (0, 0);
		my @t = map {
			$create_tensor_at_alignment->($alignments[$a_idx + $_], \$dealloc[$_]);
		} (0..1);

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  outputs.  Returns the number of control outputs (should match
  TF_OperationNumControlOutputs(oper)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern int TF_OperationGetControlOutputs(
      TF_Operation* oper, TF_Operation** control_outputs,
      int max_control_outputs);

=head2 TF_OperationGetAttrMetadata

=over 2

  Returns metadata about the value of the attribute `attr_name` of `oper`.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern TF_AttrMetadata TF_OperationGetAttrMetadata(
      TF_Operation* oper, const char* attr_name, TF_Status* status);

=head2 TF_OperationGetAttrString

=over 2

  Fills in `value` with the value of the attribute `attr_name`.  `value` must
  point to an array of length at least `max_length` (ideally set to
  TF_AttrMetadata.total_size from TF_OperationGetAttrMetadata(oper,
  attr_name)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrString(TF_Operation* oper,
                                                       const char* attr_name,
                                                       void* value,
                                                       size_t max_length,
                                                       TF_Status* status);

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=head2 TF_OperationGetAttrStringList

=over 2

  Get the list of strings in the value of the attribute `attr_name`.  Fills in
  `values` and `lengths`, each of which must point to an array of length at
  least `max_values`.
  
  The elements of values will point to addresses in `storage` which must be at
  least `storage_size` bytes in length.  Ideally, max_values would be set to
  TF_AttrMetadata.list_size and `storage` would be at least
  TF_AttrMetadata.total_size, obtained from TF_OperationGetAttrMetadata(oper,
  attr_name).
  
  Fails if storage_size is too small to hold the requested number of strings.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrStringList(
      TF_Operation* oper, const char* attr_name, void** values, size_t* lengths,
      int max_values, void* storage, size_t storage_size, TF_Status* status);

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

                                                    const char* attr_name,
                                                    int64_t* value,
                                                    TF_Status* status);

=head2 TF_OperationGetAttrIntList

=over 2

  Fills in `values` with the value of the attribute `attr_name` of `oper`.
  `values` must point to an array of length at least `max_values` (ideally set
  TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper,
  attr_name)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrIntList(TF_Operation* oper,
                                                        const char* attr_name,
                                                        int64_t* values,
                                                        int max_values,
                                                        TF_Status* status);

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

                                                      const char* attr_name,
                                                      float* value,
                                                      TF_Status* status);

=head2 TF_OperationGetAttrFloatList

=over 2

  Fills in `values` with the value of the attribute `attr_name` of `oper`.
  `values` must point to an array of length at least `max_values` (ideally set
  to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper,
  attr_name)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrFloatList(TF_Operation* oper,
                                                          const char* attr_name,
                                                          float* values,
                                                          int max_values,
                                                          TF_Status* status);

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

                                                     const char* attr_name,
                                                     unsigned char* value,
                                                     TF_Status* status);

=head2 TF_OperationGetAttrBoolList

=over 2

  Fills in `values` with the value of the attribute `attr_name` of `oper`.
  `values` must point to an array of length at least `max_values` (ideally set
  to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper,
  attr_name)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrBoolList(TF_Operation* oper,
                                                         const char* attr_name,
                                                         unsigned char* values,
                                                         int max_values,
                                                         TF_Status* status);

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

                                                     const char* attr_name,
                                                     TF_DataType* value,
                                                     TF_Status* status);

=head2 TF_OperationGetAttrTypeList

=over 2

  Fills in `values` with the value of the attribute `attr_name` of `oper`.
  `values` must point to an array of length at least `max_values` (ideally set
  to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper,
  attr_name)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrTypeList(TF_Operation* oper,
                                                         const char* attr_name,
                                                         TF_DataType* values,
                                                         int max_values,
                                                         TF_Status* status);

=head2 TF_OperationGetAttrShape

=over 2

  Fills in `value` with the value of the attribute `attr_name` of `oper`.
  `values` must point to an array of length at least `num_dims` (ideally set to
  TF_Attr_Meta.size from TF_OperationGetAttrMetadata(oper, attr_name)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrShape(TF_Operation* oper,
                                                      const char* attr_name,
                                                      int64_t* value,
                                                      int num_dims,
                                                      TF_Status* status);

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=over 2

  Fills in `dims` with the list of shapes in the attribute `attr_name` of
  `oper` and `num_dims` with the corresponding number of dimensions. On return,
  for every i where `num_dims[i]` > 0, `dims[i]` will be an array of
  `num_dims[i]` elements. A value of -1 for `num_dims[i]` indicates that the
  i-th shape in the list is unknown.
  
  The elements of `dims` will point to addresses in `storage` which must be
  large enough to hold at least `storage_size` int64_ts.  Ideally, `num_shapes`
  would be set to TF_AttrMetadata.list_size and `storage_size` would be set to
  TF_AttrMetadata.total_size from TF_OperationGetAttrMetadata(oper,
  attr_name).
  
  Fails if storage_size is insufficient to hold the requested shapes.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrShapeList(
      TF_Operation* oper, const char* attr_name, int64_t** dims, int* num_dims,
      int num_shapes, int64_t* storage, int storage_size, TF_Status* status);

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  TF_CAPI_EXPORT extern void TF_OperationGetAttrTensorShapeProto(
      TF_Operation* oper, const char* attr_name, TF_Buffer* value,
      TF_Status* status);

=head2 TF_OperationGetAttrTensorShapeProtoList

=over 2

  Fills in `values` with binary-serialized TensorShapeProto values of the
  attribute `attr_name` of `oper`. `values` must point to an array of length at
  least `num_values` (ideally set to TF_AttrMetadata.list_size from
  TF_OperationGetAttrMetadata(oper, attr_name)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrTensorShapeProtoList(
      TF_Operation* oper, const char* attr_name, TF_Buffer** values,
      int max_values, TF_Status* status);

=head2 TF_OperationGetAttrTensor

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

                                                       const char* attr_name,
                                                       TF_Tensor** value,
                                                       TF_Status* status);

=head2 TF_OperationGetAttrTensorList

=over 2

  Fills in `values` with the TF_Tensor values of the attribute `attr_name` of
  `oper`. `values` must point to an array of TF_Tensor* of length at least
  `max_values` (ideally set to TF_AttrMetadata.list_size from
  TF_OperationGetAttrMetadata(oper, attr_name)).
  
  The caller takes ownership of all the non-null TF_Tensor* entries in `values`
  (which can be deleted using TF_DeleteTensor(values[i])).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrTensorList(TF_Operation* oper,
                                                           const char* attr_name,
                                                           TF_Tensor** values,

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  TF_CAPI_EXPORT extern void TF_DeleteSession(TF_Session*, TF_Status* status);

=head2 TF_SessionRun

=over 2

  Run the graph associated with the session starting with the supplied inputs
  (inputs[0,ninputs-1] with corresponding values in input_values[0,ninputs-1]).
  
  Any NULL and non-NULL value combinations for (`run_options`,
  `run_metadata`) are valid.
  
     - `run_options` may be NULL, in which case it will be ignored; or
       non-NULL, in which case it must point to a `TF_Buffer` containing the
       serialized representation of a `RunOptions` protocol buffer.
     - `run_metadata` may be NULL, in which case it will be ignored; or
       non-NULL, in which case it must point to an empty, freshly allocated
       `TF_Buffer` that may be updated to contain the serialized representation
       of a `RunMetadata` protocol buffer.
  
  The caller retains ownership of `input_values` (which can be deleted using
  TF_DeleteTensor). The caller also retains ownership of `run_options` and/or
  `run_metadata` (when not NULL) and should manually call TF_DeleteBuffer on
  them.
  
  On success, the tensors corresponding to outputs[0,noutputs-1] are placed in
  output_values[]. Ownership of the elements of output_values[] is transferred
  to the caller, which must eventually call TF_DeleteTensor on them.
  
  On failure, output_values[] contains NULLs.

=back

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  TF_CAPI_EXPORT extern void TF_SessionRun(
      TF_Session* session,
      // RunOptions
      const TF_Buffer* run_options,
      // Input tensors
      const TF_Output* inputs, TF_Tensor* const* input_values, int ninputs,
      // Output tensors
      const TF_Output* outputs, TF_Tensor** output_values, int noutputs,
      // Target operations
      const TF_Operation* const* target_opers, int ntargets,
      // RunMetadata
      TF_Buffer* run_metadata,
      // Output status
      TF_Status*);

=head2 TF_SessionPRunSetup

=over 2

  Set up the graph with the intended feeds (inputs) and fetches (outputs) for a
  sequence of partial run calls.
  

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_Run(TF_DeprecatedSession*,
                                    const TF_Buffer* run_options,
                                    const char** input_names, TF_Tensor** inputs,
                                    int ninputs, const char** output_names,
                                    TF_Tensor** outputs, int noutputs,
                                    const char** target_oper_names, int ntargets,
                                    TF_Buffer* run_metadata, TF_Status*);

=head2 TF_PRunSetup

=over 2

  See TF_SessionPRunSetup() above.

=back

  /* From <tensorflow/c/c_api.h> */

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  TF_CAPI_EXPORT extern TF_Library* TF_LoadLibrary(const char* library_filename,
                                                   TF_Status* status);

=head2 TF_GetOpList

=over 2

  Get the OpList of OpDefs defined in the library pointed by lib_handle.
  
  Returns a TF_Buffer. The memory pointed to by the result is owned by
  lib_handle. The data in the buffer will be the serialized OpList proto for
  ops defined in the library.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern TF_Buffer TF_GetOpList(TF_Library* lib_handle);

=head2 TF_DeleteLibraryHandle

=over 2

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  TF_CAPI_EXPORT extern void TF_DeleteLibraryHandle(TF_Library* lib_handle);

=head2 TF_GetAllOpList

=over 2

  Get the OpList of all OpDefs defined in this address space.
  Returns a TF_Buffer, ownership of which is transferred to the caller
  (and can be freed using TF_DeleteBuffer).
  
  The data in the buffer will be the serialized OpList proto for ops registered
  in this address space.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern TF_Buffer* TF_GetAllOpList(void);

=head2 TF_NewApiDefMap

=over 2

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


=back

  /* From <tensorflow/c/tf_shape.h> */
  TF_CAPI_EXPORT extern void TF_DeleteShape(TF_Shape* shape);

=head2 TF_NewTensor

=over 2

  Return a new tensor that holds the bytes data[0,len-1].
  
  The data will be deallocated by a subsequent call to TF_DeleteTensor via:
       (*deallocator)(data, len, deallocator_arg)
  Clients must provide a custom deallocator function so they can pass in
  memory managed by something like numpy.
  
  May return NULL (and invoke the deallocator) if the provided data buffer
  (data, len) is inconsistent with a tensor of the given TF_DataType
  and the shape specified by (dima, num_dims).

=back

  /* From <tensorflow/c/tf_tensor.h> */
  TF_CAPI_EXPORT extern TF_Tensor* TF_NewTensor(
      TF_DataType, const int64_t* dims, int num_dims, void* data, size_t len,
      void (*deallocator)(void* data, size_t len, void* arg),
      void* deallocator_arg);

=head2 TF_AllocateTensor

=over 2

  Allocate and return a new Tensor.
  
  This function is an alternative to TF_NewTensor and should be used when
  memory is allocated to pass the Tensor to the C API. The allocated memory

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


=back

  /* From <tensorflow/c/tf_tensor.h> */
  TF_CAPI_EXPORT extern int64_t TF_Dim(const TF_Tensor* tensor, int dim_index);

=head2 TF_TensorByteSize

=over 2

  Return the size of the underlying data in bytes.

=back

  /* From <tensorflow/c/tf_tensor.h> */
  TF_CAPI_EXPORT extern size_t TF_TensorByteSize(const TF_Tensor*);

=head2 TF_TensorData

=over 2

  Return a pointer to the underlying data buffer.

=back

  /* From <tensorflow/c/tf_tensor.h> */
  TF_CAPI_EXPORT extern void* TF_TensorData(const TF_Tensor*);

=head2 TF_TensorElementCount

=over 2

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


=back

  /* From <tensorflow/c/tf_tensor.h> */
  TF_CAPI_EXPORT extern int64_t TF_TensorElementCount(const TF_Tensor* tensor);

=head2 TF_TensorBitcastFrom

=over 2

  Copy the internal data representation of `from` to `to`. `new_dims` and
  `num_new_dims` specify the new shape of the `to` tensor, `type` specifies its
  data type. On success, *status is set to TF_OK and the two tensors share the
  same data buffer.
  
  This call requires that the `from` tensor and the given type and shape (dims
  and num_dims) are "compatible" (i.e. they occupy the same number of bytes).
  Specifically, given from_type_size = TF_DataTypeSize(TF_TensorType(from)):
  
  ShapeElementCount(dims, num_dims) * TF_DataTypeSize(type)
  
  must equal
  
  TF_TensorElementCount(from) * from_type_size

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=head2 TF_DataTypeSize

=over 2

  TF_DataTypeSize returns the sizeof() for the underlying type corresponding
  to the given TF_DataType enum value. Returns 0 for variable length types
  (eg. TF_STRING) or on failure.

=back

  /* From <tensorflow/c/tf_datatype.h> */
  TF_CAPI_EXPORT extern size_t TF_DataTypeSize(TF_DataType dt);

=head2 TF_NewOpDefinitionBuilder

=over 2

  Returns a newly allocated op definition builder for the given op name. The
  returned builder may be customized with the `TF_OpDefinitionBuilder...`
  functions and then registered with TensorFlow with TF_RegisterOpDefinition.
  

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern void TF_FileStat(const char* filename,
                                         TF_FileStatistics* stats,
                                         TF_Status* status);

=head2 TF_NewWritableFile

=over 2

  Creates or truncates the given filename and returns a handle to be used for
  appending data to the file. If status is TF_OK, *handle is updated and the
  caller is responsible for freeing it (see TF_CloseWritableFile).

=back

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern void TF_NewWritableFile(const char* filename,
                                                TF_WritableFileHandle** handle,
                                                TF_Status* status);

=head2 TF_CloseWritableFile

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


=over 2

  Appends the given bytes to the file. Any failure to do so is indicated in
  status.

=back

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern void TF_AppendWritableFile(TF_WritableFileHandle* handle,
                                                   const char* data,
                                                   size_t length,
                                                   TF_Status* status);

=head2 TF_DeleteFile

=over 2

  Deletes the named file and indicates whether successful in *status.

=back

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  TF_CAPI_EXPORT extern void TF_DefaultThreadOptions(TF_ThreadOptions* options);

=head2 TF_StartThread

=over 2

  Returns a new thread that is running work_func and is identified
  (for debugging/performance-analysis) by thread_name.
  
  The given param (which may be null) is passed to work_func when the thread
  starts. In this way, data may be passed from the thread back to the caller.
  
  Caller takes ownership of the result and must call TF_JoinThread on it
  eventually.

=back

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern TF_Thread* TF_StartThread(const TF_ThreadOptions* options,
                                                  const char* thread_name,
                                                  void (*work_func)(void*),

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=back

  /* From <tensorflow/c/kernels.h> */
  TF_CAPI_EXPORT extern void TF_OpKernelContext_Failure(TF_OpKernelContext* ctx,
                                                        TF_Status* status);

=head2 TF_ExpectedOutputDataType

=over 2

  Returns the expected output data type of the ith output. If i < 0 or
  i >= TF_NumOutputs(ctx), the program aborts.

=back

  /* From <tensorflow/c/kernels.h> */
  TF_CAPI_EXPORT extern TF_DataType TF_ExpectedOutputDataType(
      TF_OpKernelContext* ctx, int i);

=head2 TF_IsHostMemoryInput

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


  /* From <tensorflow/c/kernels_experimental.h> */
  TF_CAPI_EXPORT extern bool TF_IsRefInput(TF_OpKernelContext* ctx, int i,
                                           TF_Status* status);

=head2 TF_AddNVariant

=over 2

  Expose higher level AddN operation for Pluggable vendors to implement
  in the plugin for Variant data types. The API takes in the context and a
  callback provided by pluggable vendor to do a Binary Add operation on the
  tensors unwrapped from the Variant tensors. The caller takes ownership of the
  `a`, `b` and `out` tensors and is responsible for freeing them with
  TF_DeleteTensor.

=back

  /* From <tensorflow/c/kernels_experimental.h> */
  TF_CAPI_EXPORT extern void TF_AddNVariant(
      TF_OpKernelContext* ctx,
      void (*binary_add_func)(TF_OpKernelContext* ctx, TF_Tensor* a, TF_Tensor* b,
                              TF_Tensor* out),
      TF_Status* status);

=head2 TF_ZerosLikeVariant

=over 2

  Expose higher level ZerosLike operation for Pluggable vendors to implement
  in the plugin for Variant data types. The API takes in the context and a
  callback provided by pluggable vendor to do a ZerosLike operation on the
  tensors unwrapped from the Variant tensors. The caller takes ownership of the
  `input` and `out` tensors and is responsible for freeing them with
  TF_DeleteTensor.

=back

  /* From <tensorflow/c/kernels_experimental.h> */
  TF_CAPI_EXPORT extern void TF_ZerosLikeVariant(
      TF_OpKernelContext* ctx,

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern int TFE_TensorDebugInfoOnDeviceNumDims(
      TFE_TensorDebugInfo* debug_info);

=head2 TFE_TensorDebugInfoOnDeviceDim

=over 2

  Returns the number of elements in dimension `dim_index`.
  Tensor representation on device can be transposed from its representation
  on host. The data contained in dimension `dim_index` on device
  can correspond to the data contained in another dimension in on-host
  representation. The dimensions are indexed using the standard TensorFlow
  major-to-minor order (slowest varying dimension first),
  not the XLA's minor-to-major order.
  On-device dimensions can be padded. TFE_TensorDebugInfoOnDeviceDim returns
  the number of elements in a dimension after padding.
  The return value was current at the time of TFE_TensorDebugInfo creation.

=back

  /* From <tensorflow/c/eager/c_api.h> */

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

                                                   const TFE_Op* value);

=head2 TFE_OpSetAttrFunctionName

=over 2

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT void TFE_OpSetAttrFunctionName(TFE_Op* op, const char* attr_name,
                                                const char* data, size_t length);

=head2 TFE_OpSetAttrTensor

=over 2

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_OpSetAttrTensor(TFE_Op* op,
                                                 const char* attr_name,

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=over 2

  Checks whether a function is registered under `name`.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT unsigned char TFE_ContextHasFunction(TFE_Context* ctx,
                                                      const char* name);

=head2 TFE_ContextEnableRunMetadata

=over 2

  Enables tracing of RunMetadata on the ops executed from this context.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_ContextEnableRunMetadata(TFE_Context* ctx);

=head2 TFE_ContextDisableRunMetadata

=over 2

  Disables tracing of RunMetadata on the ops executed from this context.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_ContextDisableRunMetadata(TFE_Context* ctx);

=head2 TFE_ContextExportRunMetadata

=over 2

  Populates the passed-in buffer with a serialized RunMetadata protocol buffer
  containing any run metadata information accumulated so far and clears this
  information.
  If async mode is enabled, this call blocks till all currently pending ops are
  done.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_ContextExportRunMetadata(TFE_Context* ctx,
                                                          TF_Buffer* buf,
                                                          TF_Status* status);

=head2 TFE_ContextStartStep

=over 2

  Some TF ops need a step container to be set to limit the lifetime of some
  resources (mostly TensorArray and Stack, used in while loop gradients in
  graph mode). Calling this on a context tells it to start a step.

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_OpReset(TFE_Op* op_to_reset,
                                         const char* op_or_function_name,
                                         const char* raw_device_name,
                                         TF_Status* status);

=head2 TFE_ContextEnableGraphCollection

=over 2

  Enables only graph collection in RunMetadata on the functions executed from
  this context.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_ContextEnableGraphCollection(TFE_Context* ctx);

=head2 TFE_ContextDisableGraphCollection

=over 2

  Disables only graph collection in RunMetadata on the functions executed from
  this context.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_ContextDisableGraphCollection(TFE_Context* ctx);

=head2 TFE_MonitoringCounterCellIncrementBy

=over 2

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


=head2 TFE_TensorHandleDevicePointer

=over 2

  This function will block till the operation that produces `h` has
  completed. This is only valid on local TFE_TensorHandles. The pointer
  returned will be on the device in which the TFE_TensorHandle resides (so e.g.
  for a GPU tensor this will return a pointer to GPU memory). The pointer is
  only guaranteed to be valid until TFE_DeleteTensorHandle is called on this
  TensorHandle. Only supports POD data types.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void* TFE_TensorHandleDevicePointer(TFE_TensorHandle*,
                                                            TF_Status*);

=head2 TFE_TensorHandleDeviceMemorySize

=over 2

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  device_name. Takes ownership of the memory, and will call deleter to release
  it after TF no longer needs it or in case of error.
  
  Custom devices must use TFE_NewCustomDeviceTensorHandle instead.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_NewTensorHandleFromDeviceMemory(
      TFE_Context* ctx, const char* device_name, TF_DataType, const int64_t* dims,
      int num_dims, void* data, size_t len,
      void (*deallocator)(void* data, size_t len, void* arg),
      void* deallocator_arg, TF_Status* status);

=head2 TFE_HostAddressSpace

=over 2

  Retrieves the address space (i.e. job, replia, task) of the local host and
  saves it in the buffer.

=back

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern bool TFE_IsCustomDevice(TFE_Context* ctx,
                                                const char* device_name);

=head2 TFE_NewCustomDeviceTensorHandle

=over 2

  Creates a new TensorHandle from memory residing in a custom device. Takes
  ownership of the memory pointed to by `tensor_handle_data`, and calls
  `methods.deallocator` to release it after TF no longer needs it or in case of
  an error.
  
  This call is similar to `TFE_NewTensorHandleFromDeviceMemory`, but supports
  custom devices instead of physical devices and does not require blocking
  waiting for exact shapes.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_NewCustomDeviceTensorHandle(
      TFE_Context*, const char* device_name, TF_DataType, void* data,
      TFE_CustomDeviceTensorHandle methods, TF_Status* status);

=head2 TFE_ContextGetFunctionDef

=over 2

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_ContextGetFunctionDef(TFE_Context* ctx,

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


=over 2

  Set configuration key and value using coordination service.
  If coordination service is enabled, the key-value will be stored on the
  leader and become accessible to all workers in the cluster.
  Currently, a config key can only be set with one value, and subsequently
  setting the same key will lead to errors.
  
  Note that the key-values are only expected to be used for cluster
  configuration data, and should not be used for storing a large amount of data
  or being accessed very frequently.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_InsertConfigKeyValue(TFE_Context* ctx,
                                                      const char* key,
                                                      const char* value,
                                                      TF_Status* status);

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  /// Filesystem plugins can be loaded on demand by users via
  /// `Env::LoadLibrary` or during TensorFlow's startup if they are on certain
  /// paths (although this has a security risk if two plugins register for the
  /// same filesystem and the malicious one loads before the legimitate one -
  /// but we consider this to be something that users should care about and
  /// manage themselves). In both of these cases, core TensorFlow looks for
  /// the `TF_InitPlugin` symbol and calls this function.
  ///
  /// For every filesystem URI scheme that this plugin supports, the plugin must
  /// add one `TF_FilesystemPluginInfo` entry in `plugin_info->ops` and call
  /// `TF_SetFilesystemVersionMetadata` for that entry.
  ///
  /// Plugins must also initialize `plugin_info->plugin_memory_allocate` and
  /// `plugin_info->plugin_memory_free` to ensure memory allocated by plugin is
  /// freed in a compatible way.

=back

  /* From <tensorflow/c/experimental/filesystem/filesystem_interface.h> */
  TF_CAPI_EXPORT extern void TF_InitPlugin(TF_FilesystemPluginInfo* plugin_info);

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

   deleted.

=back

  /* From <tensorflow/c/experimental/saved_model/public/saved_model_api.h> */
  TF_CAPI_EXPORT extern TF_SignatureDefFunction*
  TF_GetSavedModelSignatureDefFunction(TF_SavedModel* model,
                                       const char* signature_def_key,
                                       TF_Status* status);

=head2 TF_ConcreteFunctionGetMetadata

=over 2

  Returns FunctionMetadata associated with `func`. Metadata's lifetime is
  bound to `func`, which is bound to the TF_SavedModel it was loaded from.

=back

  /* From <tensorflow/c/experimental/saved_model/public/concrete_function.h> */
  TF_CAPI_EXPORT extern TF_FunctionMetadata* TF_ConcreteFunctionGetMetadata(
      TF_ConcreteFunction* func);

=head2 TF_ConcreteFunctionMakeCallOp

=over 2

  Returns a TFE_Op suitable for executing this function. Caller must provide
  all function inputs in `inputs`, and must not add any additional inputs on
  the returned op. (i.e. don't call TFE_OpAddInput or TFE_OpAddInputList).
  The caller is responsible for deleting the returned TFE_Op. If op

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


  Returns the TensorSpec associated with the given parameter. The caller is
  not reponsible for freeing the returned TF_TensorSpec*.

=back

  /* From <tensorflow/c/experimental/saved_model/public/signature_def_param.h> */
  TF_CAPI_EXPORT extern const TF_TensorSpec* TF_SignatureDefParamTensorSpec(
      const TF_SignatureDefParam* param);

=head2 TF_SignatureDefFunctionGetMetadata

=over 2

  Returns FunctionMetadata associated with `func`. Metadata's lifetime is
  bound to `func`, which is bound to the TF_SavedModel it was loaded from.

=back

  /* From <tensorflow/c/experimental/saved_model/public/signature_def_function.h> */
  TF_CAPI_EXPORT extern TF_SignatureDefFunctionMetadata*
  TF_SignatureDefFunctionGetMetadata(TF_SignatureDefFunction* func);

=head2 TF_SignatureDefFunctionMakeCallOp

=over 2

  Returns a TFE_Op suitable for executing this function. Caller must provide
  all function inputs in `inputs`, and must not add any additional inputs on
  the returned op. (i.e. don't call TFE_OpAddInput or TFE_OpAddInputList).
  The caller is responsible for deleting the returned TFE_Op. If op
  construction fails, `status` will be non-OK and the returned pointer will be

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=over 2

  Returns the `i`th TF_SignatureDefParam in the list.

=back

  /* From <tensorflow/c/experimental/saved_model/public/signature_def_param_list.h> */
  TF_CAPI_EXPORT extern const TF_SignatureDefParam* TF_SignatureDefParamListGet(
      const TF_SignatureDefParamList* list, int i);

=head2 TF_SignatureDefFunctionMetadataArgs

=over 2

  Retrieves the arguments of the SignatureDefFunction. The caller is not
  responsible for freeing the returned pointer.

=back

  /* From <tensorflow/c/experimental/saved_model/public/signature_def_function_metadata.h> */
  TF_CAPI_EXPORT extern const TF_SignatureDefParamList*
  TF_SignatureDefFunctionMetadataArgs(
      const TF_SignatureDefFunctionMetadata* list);

=head2 TF_SignatureDefFunctionMetadataReturns

=over 2

  Retrieves the returns of the SignatureDefFunction. The caller is not
  responsible for freeing the returned pointer.

=back

  /* From <tensorflow/c/experimental/saved_model/public/signature_def_function_metadata.h> */
  TF_CAPI_EXPORT extern const TF_SignatureDefParamList*
  TF_SignatureDefFunctionMetadataReturns(
      const TF_SignatureDefFunctionMetadata* list);

=head2 TF_EnableXLACompilation

=over 2

  When `enable` is true, set
  tensorflow.ConfigProto.OptimizerOptions.global_jit_level to ON_1, and also
  set XLA flag values to prepare for XLA compilation. Otherwise set
  global_jit_level to OFF.
  

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


=over 2

  Fast path method that makes constructing a single scalar tensor require less
  overhead and copies.

=back

  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_NewTensorHandleFromScalar(
      TF_DataType data_type, void* data, size_t len, TF_Status* status);

=head2 TFE_EnableCollectiveOps

=over 2

  Specify the server_def that enables collective ops.
  This is different to the above function in that it doesn't create remote
  contexts, and remotely executing ops is not possible. It just enables
  communication for collective ops.

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod  view on Meta::CPAN


use PDL;
use AI::TensorFlow::Libtensorflow::DataType qw(FLOAT UINT8);

use FFI::Platypus::Memory qw(memcpy);
use FFI::Platypus::Buffer qw(scalar_to_pointer);

sub FloatPDLTOTFTensor {
    my ($p) = @_;
    return AI::TensorFlow::Libtensorflow::Tensor->New(
        FLOAT, [ reverse $p->dims ], $p->get_dataref, sub { undef $p }
    );
}

sub FloatTFTensorToPDL {
    my ($t) = @_;

    my $pdl = zeros(float,reverse( map $t->Dim($_), 0..$t->NumDims-1 ) );

    memcpy scalar_to_pointer( ${$pdl->get_dataref} ),
        scalar_to_pointer( ${$t->Data} ),
        $t->ByteSize;
    $pdl->upd_data;

    $pdl;
}

sub Uint8PDLTOTFTensor {
    my ($p) = @_;
    return AI::TensorFlow::Libtensorflow::Tensor->New(
        UINT8, [ reverse $p->dims ], $p->get_dataref, sub { undef $p }
    );
}

sub Uint8TFTensorToPDL {
    my ($t) = @_;

    my $pdl = zeros(byte,reverse( map $t->Dim($_), 0..$t->NumDims-1 ) );

    memcpy scalar_to_pointer( ${$pdl->get_dataref} ),
        scalar_to_pointer( ${$t->Data} ),
        $t->ByteSize;
    $pdl->upd_data;

    $pdl;
}

# image_size => [width, height] (but usually square images)
my %model_name_to_params = (
    centernet_hourglass_512x512 => {
        handle => 'https://tfhub.dev/tensorflow/centernet/hourglass_512x512/1',
        image_size => [ 512, 512 ],
    },

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod  view on Meta::CPAN

}

use Archive::Extract;
my $ae = Archive::Extract->new( archive => $model_archive_path );
die "Could not extract archive" unless $ae->extract( to => $model_base );

my $saved_model = path($model_base)->child('saved_model.pb');
say "Saved model is in $saved_model" if -f $saved_model;

# Get the labels
my $response = $http->get('https://raw.githubusercontent.com/tensorflow/models/a4944a57ad2811e1f6a7a87589a9fc8a776e8d3c/object_detection/data/mscoco_label_map.pbtxt');

my %labels_map = $response->{content} =~ m<
(?:item \s+ \{  \s+
  \Qname:\E \s+ "[^"]+" \s+
  \Qid:\E   \s+ (\d+) \s+
  \Qdisplay_name:\E \s+ "([^"]+)" \s+
})+
>sgx;

my $label_count = List::Util::max keys %labels_map;

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod  view on Meta::CPAN


sub load_image_to_pdl {
    my ($uri, $image_size) = @_;

    my $http = HTTP::Tiny->new;
    my $response = $http->get( $uri );
    die "Could not fetch image from $uri" unless $response->{success};
    say "Downloaded $uri";

    my $img = Imager->new;
    $img->read( data => $response->{content} );

    # Create PDL ndarray from Imager data in-memory.
    my $data;
    $img->write( data => \$data, type => 'raw' )
        or die "could not write ". $img->errstr;

    die "Image does not have 3 channels, it has @{[ $img->getchannels ]} channels"
        if $img->getchannels != 3;

    # $data is packed as PDL->dims == [w,h] with RGB pixels
    my $pdl_raw = zeros(byte, $img->getchannels, $img->getwidth, $img->getheight);
    ${ $pdl_raw->get_dataref } = $data;
    $pdl_raw->upd_data;

    $pdl_raw;
}

my @pdl_images = map {
    load_image_to_pdl(
        $images_for_test_to_uri{$_},
        $model_name_to_params{$model_name}{image_size}
    );
} ($image_names[0]);

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod  view on Meta::CPAN

=pod

=encoding UTF-8

=head1 NAME

AI::TensorFlow::Libtensorflow::Manual::Notebook::InferenceUsingTFHubCenterNetObjDetect - Using TensorFlow to do object detection using a pre-trained model

=head1 SYNOPSIS

The following tutorial is based on the L<TensorFlow Hub Object Detection Colab notebook|https://www.tensorflow.org/hub/tutorials/tf2_object_detection>. It uses a pre-trained model based on the I<CenterNet> architecture trained on the I<COCO 2017> dat...

Some of this code is identical to that of C<InferenceUsingTFHubMobileNetV2Model> notebook. Please look there for an explanation for that code. As stated there, this will later be wrapped up into a high-level library to hide the details behind an API.

=head1 COLOPHON

The following document is either a POD file which can additionally be run as a Perl script or a Jupyter Notebook which can be run in L<IPerl|https://p3rl.org/Devel::IPerl> (viewable online at L<nbviewer|https://nbviewer.org/github/EntropyOrg/perl-AI-...

=over

=item *

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod  view on Meta::CPAN


  use PDL;
  use AI::TensorFlow::Libtensorflow::DataType qw(FLOAT UINT8);
  
  use FFI::Platypus::Memory qw(memcpy);
  use FFI::Platypus::Buffer qw(scalar_to_pointer);
  
  sub FloatPDLTOTFTensor {
      my ($p) = @_;
      return AI::TensorFlow::Libtensorflow::Tensor->New(
          FLOAT, [ reverse $p->dims ], $p->get_dataref, sub { undef $p }
      );
  }
  
  sub FloatTFTensorToPDL {
      my ($t) = @_;
  
      my $pdl = zeros(float,reverse( map $t->Dim($_), 0..$t->NumDims-1 ) );
  
      memcpy scalar_to_pointer( ${$pdl->get_dataref} ),
          scalar_to_pointer( ${$t->Data} ),
          $t->ByteSize;
      $pdl->upd_data;
  
      $pdl;
  }
  
  sub Uint8PDLTOTFTensor {
      my ($p) = @_;
      return AI::TensorFlow::Libtensorflow::Tensor->New(
          UINT8, [ reverse $p->dims ], $p->get_dataref, sub { undef $p }
      );
  }
  
  sub Uint8TFTensorToPDL {
      my ($t) = @_;
  
      my $pdl = zeros(byte,reverse( map $t->Dim($_), 0..$t->NumDims-1 ) );
  
      memcpy scalar_to_pointer( ${$pdl->get_dataref} ),
          scalar_to_pointer( ${$t->Data} ),
          $t->ByteSize;
      $pdl->upd_data;
  
      $pdl;
  }

=head2 Fetch the model and labels

We are going to use an L<object detection model|https://tfhub.dev/tensorflow/centernet/hourglass_512x512/1> from TensorFlow Hub based on the CenterNet architecture. We download both the model and COCO 2017 labels.

  # image_size => [width, height] (but usually square images)
  my %model_name_to_params = (

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod  view on Meta::CPAN

  use Archive::Extract;
  my $ae = Archive::Extract->new( archive => $model_archive_path );
  die "Could not extract archive" unless $ae->extract( to => $model_base );
  
  my $saved_model = path($model_base)->child('saved_model.pb');
  say "Saved model is in $saved_model" if -f $saved_model;

We need to download the COCO 2017 classification labels and parse out the mapping from the numeric index to the textual descriptions.

  # Get the labels
  my $response = $http->get('https://raw.githubusercontent.com/tensorflow/models/a4944a57ad2811e1f6a7a87589a9fc8a776e8d3c/object_detection/data/mscoco_label_map.pbtxt');
  
  my %labels_map = $response->{content} =~ m<
  (?:item \s+ \{  \s+
    \Qname:\E \s+ "[^"]+" \s+
    \Qid:\E   \s+ (\d+) \s+
    \Qdisplay_name:\E \s+ "([^"]+)" \s+
  })+
  >sgx;
  
  my $label_count = List::Util::max keys %labels_map;

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod  view on Meta::CPAN

          $h->a( { href => $images_for_test_to_uri{$image_name} },
              $h->img({
                  src => $images_for_test_to_uri{$image_name},
                  alt => $image_name,
                  width => '100%',
              })
          ),
      );
  }

=head2 Download the test image and transform it into suitable input data

We now fetch the image and prepare it to be in the needed format by using C<Imager>. Note that this model does not need the input image to be of a certain size so no resizing or padding is required.

Then we turn the C<Imager> data into a C<PDL> ndarray. Since we just need the 3 channels of the image as they are, they can be stored directly in a C<PDL> ndarray of type C<byte>.

The reason why we need to concatenate the C<PDL> ndarrays here despite the model only taking a single image at a time is to get an ndarray with four (4) dimensions with the last C<PDL> dimension of size one (1).

  sub load_image_to_pdl {
      my ($uri, $image_size) = @_;
  
      my $http = HTTP::Tiny->new;
      my $response = $http->get( $uri );
      die "Could not fetch image from $uri" unless $response->{success};
      say "Downloaded $uri";
  
      my $img = Imager->new;
      $img->read( data => $response->{content} );
  
      # Create PDL ndarray from Imager data in-memory.
      my $data;
      $img->write( data => \$data, type => 'raw' )
          or die "could not write ". $img->errstr;
  
      die "Image does not have 3 channels, it has @{[ $img->getchannels ]} channels"
          if $img->getchannels != 3;
  
      # $data is packed as PDL->dims == [w,h] with RGB pixels
      my $pdl_raw = zeros(byte, $img->getchannels, $img->getwidth, $img->getheight);
      ${ $pdl_raw->get_dataref } = $data;
      $pdl_raw->upd_data;
  
      $pdl_raw;
  }
  
  my @pdl_images = map {
      load_image_to_pdl(
          $images_for_test_to_uri{$_},
          $model_name_to_params{$model_name}{image_size}
      );
  } ($image_names[0]);

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN


use PDL;
use AI::TensorFlow::Libtensorflow::DataType qw(FLOAT);

use FFI::Platypus::Memory qw(memcpy);
use FFI::Platypus::Buffer qw(scalar_to_pointer);

sub FloatPDLTOTFTensor {
    my ($p) = @_;
    return AI::TensorFlow::Libtensorflow::Tensor->New(
        FLOAT, [ reverse $p->dims ], $p->get_dataref, sub { undef $p }
    );
}

sub FloatTFTensorToPDL {
    my ($t) = @_;

    my $pdl = zeros(float,reverse( map $t->Dim($_), 0..$t->NumDims-1 ) );

    memcpy scalar_to_pointer( ${$pdl->get_dataref} ),
        scalar_to_pointer( ${$t->Data} ),
        $t->ByteSize;
    $pdl->upd_data;

    $pdl;
}

# Model handle
my $model_uri = URI->new( 'https://tfhub.dev/deepmind/enformer/1' );
$model_uri->query_form( 'tf-hub-format' => 'compressed' );
my $model_base = substr( $model_uri->path, 1 ) =~ s,/,_,gr;
my $model_archive_path = "${model_base}.tar.gz";
my $model_sequence_length = 393_216; # bp

# Human targets from Basenji2 dataset
my $targets_uri  = URI->new('https://raw.githubusercontent.com/calico/basenji/master/manuscripts/cross2020/targets_human.txt');
my $targets_path = 'targets_human.txt';

# Human reference genome
my $hg_uri    = URI->new("http://hgdownload.cse.ucsc.edu/goldenPath/hg38/bigZips/hg38.fa.gz");
my $hg_gz_path   = "hg38.fa.gz";
# From http://hgdownload.cse.ucsc.edu/goldenPath/hg38/bigZips/md5sum.txt
my $hg_md5_digest = "1c9dcaddfa41027f17cd8f7a82c7293b";

my $clinvar_uri  = URI->new('https://ftp.ncbi.nlm.nih.gov/pub/clinvar/vcf_GRCh38/clinvar.vcf.gz');

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN

    my ($seq) = @_;

    my $from_alphabet = "NACGT";
    my $to_alphabet   = pack "C*", 0..length($from_alphabet)-1;

    # sequences from UCSC genome have both uppercase and lowercase bases
    my $from_alphabet_tr = $from_alphabet . lc $from_alphabet;
    my $to_alphabet_tr   = $to_alphabet x 2;

    my $p = zeros(byte, bytes::length($seq));
    my $p_dataref = $p->get_dataref;
    ${ $p_dataref } = $seq;
    eval "tr/$from_alphabet_tr/$to_alphabet_tr/" for ${ $p_dataref };
    $p->upd_data;

    my $encoder = append(float(0), identity(float(length($from_alphabet)-1)) );
    say "Encoder is\n", $encoder->info, $encoder if $SHOW_ENCODER;

    my $encoded  = $encoder->index( $p->dummy(0) );

    return $encoded;
}

####

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN

=encoding UTF-8

=head1 NAME

AI::TensorFlow::Libtensorflow::Manual::Notebook::InferenceUsingTFHubEnformerGeneExprPredModel - Using TensorFlow to do gene expression prediction using a pre-trained model

=head1 SYNOPSIS

The following tutorial is based on the L<Enformer usage notebook|https://github.com/deepmind/deepmind-research/blob/master/enformer/enformer-usage.ipynb>. It uses a pre-trained model based on a transformer architecture trained as described in Avsec e...

Running the code requires an Internet connection to download the model (from Google servers) and datasets (from GitHub, UCSC, and NIH).

Some of this code is identical to that of C<InferenceUsingTFHubMobileNetV2Model> notebook. Please look there for explanation for that code. As stated there, this will later be wrapped up into a high-level library to hide the details behind an API.

B<NOTE>: If running this model, please be aware that

=over

=item *

the Docker image takes 3 GB or more of disk space;

=item *

the model and data takes 5 GB or more of disk space.

=back

meaning that you will need a total of B<8 GB> of disk space. You may need at least B<4 GB> of free memory to run the model.

=head1 COLOPHON

The following document is either a POD file which can additionally be run as a Perl script or a Jupyter Notebook which can be run in L<IPerl|https://p3rl.org/Devel::IPerl> (viewable online at L<nbviewer|https://nbviewer.org/github/EntropyOrg/perl-AI-...

You will also need the executables C<gunzip>, C<bgzip>, and C<samtools>. Furthermore,

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN


  use PDL;
  use AI::TensorFlow::Libtensorflow::DataType qw(FLOAT);
  
  use FFI::Platypus::Memory qw(memcpy);
  use FFI::Platypus::Buffer qw(scalar_to_pointer);
  
  sub FloatPDLTOTFTensor {
      my ($p) = @_;
      return AI::TensorFlow::Libtensorflow::Tensor->New(
          FLOAT, [ reverse $p->dims ], $p->get_dataref, sub { undef $p }
      );
  }
  
  sub FloatTFTensorToPDL {
      my ($t) = @_;
  
      my $pdl = zeros(float,reverse( map $t->Dim($_), 0..$t->NumDims-1 ) );
  
      memcpy scalar_to_pointer( ${$pdl->get_dataref} ),
          scalar_to_pointer( ${$t->Data} ),
          $t->ByteSize;
      $pdl->upd_data;
  
      $pdl;
  }

=head2 Download model and data

=over

=item *

L<Enformer model|https://tfhub.dev/deepmind/enformer/1> from

  > Avsec Ž, Agarwal V, Visentin D, Ledsam JR, Grabska-Barwinska A, Taylor KR, Assael Y, Jumper J, Kohli P, Kelley DR. Effective gene expression prediction from sequence by integrating long-range interactions. I<Nat Methods>. 2021 Oct;B<18(10)>:1196...

=item *

L<Human target dataset|https://github.com/calico/basenji/tree/master/manuscripts/cross2020> from

  > Kelley DR. Cross-species regulatory sequence activity prediction. I<PLoS Comput Biol>. 2020 Jul 20;B<16(7)>:e1008050. doi: L<10.1371/journal.pcbi.1008050|https://doi.org/10.1371/journal.pcbi.1008050>. PMID: L<32687525|https://pubmed.ncbi.nlm.nih....

=item *

L<UCSC hg38 genome|https://www.ncbi.nlm.nih.gov/assembly/GCA_000001405.15>. More info at L<http://hgdownload.cse.ucsc.edu/goldenPath/hg38/bigZips/>; L<Genome Reference Consortium Human Build 38|https://www.ncbi.nlm.nih.gov/assembly/GCF_000001405.26/>...

  > Schneider VA, Graves-Lindsay T, Howe K, Bouk N, Chen HC, Kitts PA, Murphy TD, Pruitt KD, Thibaud-Nissen F, Albracht D, Fulton RS, Kremitzki M, Magrini V, Markovic C, McGrath S, Steinberg KM, Auger K, Chow W, Collins J, Harden G, Hubbard T, Pelan ...

=item *

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN


=back

  # Model handle
  my $model_uri = URI->new( 'https://tfhub.dev/deepmind/enformer/1' );
  $model_uri->query_form( 'tf-hub-format' => 'compressed' );
  my $model_base = substr( $model_uri->path, 1 ) =~ s,/,_,gr;
  my $model_archive_path = "${model_base}.tar.gz";
  my $model_sequence_length = 393_216; # bp
  
  # Human targets from Basenji2 dataset
  my $targets_uri  = URI->new('https://raw.githubusercontent.com/calico/basenji/master/manuscripts/cross2020/targets_human.txt');
  my $targets_path = 'targets_human.txt';
  
  # Human reference genome
  my $hg_uri    = URI->new("http://hgdownload.cse.ucsc.edu/goldenPath/hg38/bigZips/hg38.fa.gz");
  my $hg_gz_path   = "hg38.fa.gz";
  # From http://hgdownload.cse.ucsc.edu/goldenPath/hg38/bigZips/md5sum.txt
  my $hg_md5_digest = "1c9dcaddfa41027f17cd8f7a82c7293b";
  
  my $clinvar_uri  = URI->new('https://ftp.ncbi.nlm.nih.gov/pub/clinvar/vcf_GRCh38/clinvar.vcf.gz');

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN

   3  3      0       ENCFF463ZLQ  encode/ENCSR000EIP  32    2      mean      DNASE:Ishikawa treated with 0.02% dimethyl sulfoxide for 1 hour      
   4  4      0       ENCFF890OGQ  encode/ENCSR000EIS  32    2      mean      DNASE:GM03348                                                        
  ------------------------------------------------------------------------------------------------------------------------------------------------

B<RESULT>:

  1

=head2 Load the model

Let's now load the model in Perl and get the inputs and outputs into a data structure by name.

  my $opt = AI::TensorFlow::Libtensorflow::SessionOptions->New;
  
  my @tags = ( 'serve' );
  my $graph = AI::TensorFlow::Libtensorflow::Graph->New;
  my $session = AI::TensorFlow::Libtensorflow::Session->LoadFromSavedModel(
      $opt, undef, $new_model_base, \@tags, $graph, undef, $s
  );
  AssertOK($s);
  

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN

          undef,
          $s
      );
      AssertOK($s);
  
      return $outputs_t[0];
  };
  
  undef;

=head2 Encoding the data

The model specifies that the way to get a sequence of DNA bases into a C<TFTensor> is to use L<one-hot encoding|https://en.wikipedia.org/wiki/One-hot#Machine_learning_and_statistics> in the order C<ACGT>.

This means that the bases are represented as vectors of length 4:

| base | vector encoding |
|------|-----------------|
| A    | C<[1 0 0 0]>     |
| C    | C<[0 1 0 0]>     |
| G    | C<[0 0 1 0]>     |

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN

      my ($seq) = @_;
  
      my $from_alphabet = "NACGT";
      my $to_alphabet   = pack "C*", 0..length($from_alphabet)-1;
  
      # sequences from UCSC genome have both uppercase and lowercase bases
      my $from_alphabet_tr = $from_alphabet . lc $from_alphabet;
      my $to_alphabet_tr   = $to_alphabet x 2;
  
      my $p = zeros(byte, bytes::length($seq));
      my $p_dataref = $p->get_dataref;
      ${ $p_dataref } = $seq;
      eval "tr/$from_alphabet_tr/$to_alphabet_tr/" for ${ $p_dataref };
      $p->upd_data;
  
      my $encoder = append(float(0), identity(float(length($from_alphabet)-1)) );
      say "Encoder is\n", $encoder->info, $encoder if $SHOW_ENCODER;
  
      my $encoded  = $encoder->index( $p->dummy(0) );
  
      return $encoded;
  }
  
  ####

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN

  $gp->end_multi;
  
  $gp->close;
  
  if( IN_IPERL ) {
      IPerl->png( bytestream => path($plot_output_path)->slurp_raw );
  }

B<DISPLAY>:

=for html <span style="display:inline-block;margin-left:1em;"><p><img						src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA+gAAAMgCAIAAAA/et9qAAAgAElEQVR4nOzdd2AUVeIH8Ddb0jshBAIEpSo1GjoIpyAgCOqd3uGdoGBBUQQFRUVBRbkTf9gOBQucqFiwUhSSgJQYCCSBkJBAet1k...

=head2 Parts of the original notebook that fall outside the scope

In the orignal notebook, there are several more steps that have not been ported here:

=over

=item 1.

"Compute contribution scores":

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod  view on Meta::CPAN


use PDL;
use AI::TensorFlow::Libtensorflow::DataType qw(FLOAT);

use FFI::Platypus::Memory qw(memcpy);
use FFI::Platypus::Buffer qw(scalar_to_pointer);

sub FloatPDLTOTFTensor {
    my ($p) = @_;
    return AI::TensorFlow::Libtensorflow::Tensor->New(
        FLOAT, [ reverse $p->dims ], $p->get_dataref, sub { undef $p }
    );
}

sub FloatTFTensorToPDL {
    my ($t) = @_;

    my $pdl = zeros(float,reverse( map $t->Dim($_), 0..$t->NumDims-1 ) );

    memcpy scalar_to_pointer( ${$pdl->get_dataref} ),
        scalar_to_pointer( ${$t->Data} ),
        $t->ByteSize;
    $pdl->upd_data;

    $pdl;
}

use HTML::Tiny;

sub my_table {
    my ($data, $cb) = @_;
    my $h = HTML::Tiny->new;
    $h->table( { style => 'width: 100%' },
        [
            $h->tr(
                map {
                    [
                        $h->td( $cb->($_, $h) )
                    ]
                } @$data
            )
        ]
    )
}

sub show_in_gnuplot {
    my ($p) = @_;
    require PDL::Graphics::Gnuplot;
    PDL::Graphics::Gnuplot::image( square => 1, $p );
}

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod  view on Meta::CPAN

my $model_name = 'mobilenet_v2_100_224';

say "Selected model: $model_name : $model_name_to_params{$model_name}{handle}";

my $model_uri = URI->new( $model_name_to_params{$model_name}{handle} );
$model_uri->query_form( 'tf-hub-format' => 'compressed' );
my $model_base = substr( $model_uri->path, 1 ) =~ s,/,_,gr;
my $model_archive_path = "${model_base}.tar.gz";

use constant IMAGENET_LABEL_COUNT_WITH_BG => 1001;
my $labels_uri = URI->new('https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt');
my $labels_path = ($labels_uri->path_segments)[-1];

my $http = HTTP::Tiny->new;

for my $download ( [ $model_uri  => $model_archive_path ],
                   [ $labels_uri => $labels_path        ]) {
    my ($uri, $path) = @$download;
    say "Downloading $uri to $path";
    next if -e $path;
    $http->mirror( $uri, $path );

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod  view on Meta::CPAN


sub load_image_to_pdl {
    my ($uri, $image_size) = @_;

    my $http = HTTP::Tiny->new;
    my $response = $http->get( $uri );
    die "Could not fetch image from $uri" unless $response->{success};
    say "Downloaded $uri";

    my $img = Imager->new;
    $img->read( data => $response->{content} );

    my $rescaled = imager_scale_to($img, $image_size);

    say sprintf "Rescaled image from [ %d x %d ] to [ %d x %d ]",
        $img->getwidth, $img->getheight,
        $rescaled->getwidth, $rescaled->getheight;

    my $padded = imager_paste_center_pad($rescaled, $image_size,
        # ARGB fits in 32-bits (uint32_t)
        channels => 4
    );

    say sprintf "Padded to [ %d x %d ]", $padded->getwidth, $padded->getheight;

    # Create PDL ndarray from Imager data in-memory.
    my $data;
    $padded->write( data => \$data, type => 'raw' )
        or die "could not write ". $padded->errstr;

    # $data is packed as PDL->dims == [w,h] with ARGB pixels
    #   $ PDL::howbig(ulong) # 4
    my $pdl_raw = zeros(ulong, $padded->getwidth, $padded->getheight);
    ${ $pdl_raw->get_dataref } = $data;
    $pdl_raw->upd_data;

    # Split uint32_t pixels into first dimension with 3 channels (R,G,B) with values 0-255.
    my @shifts = map 8*$_, 0..2;
    my $pdl_channels = $pdl_raw->dummy(0)
        ->and2(ulong(map 0xFF << $_, @shifts)->slice(':,*,*') )
        ->shiftright( ulong(@shifts)->slice(':,*,*') )
        ->byte;

    my $pdl_scaled = (
            # Scale to [ 0, 1 ].

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod  view on Meta::CPAN

=pod

=encoding UTF-8

=head1 NAME

AI::TensorFlow::Libtensorflow::Manual::Notebook::InferenceUsingTFHubMobileNetV2Model - Using TensorFlow to do image classification using a pre-trained model

=head1 SYNOPSIS

The following tutorial is based on the L<Image Classification with TensorFlow Hub notebook|https://github.com/tensorflow/docs/blob/master/site/en/hub/tutorials/image_classification.ipynb>. It uses a pre-trained model based on the I<MobileNet V2> arch...

Please look at the L<SECURITY note|https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md> regarding running models as models are programs. You can also used C<saved_model_cli scan> to check for L<security-sensitive "denylisted ops"|https:/...

If you would like to visualise a model, you can use L<Netron|https://github.com/lutzroeder/netron> on the C<.pb> file.

=head1 COLOPHON

The following document is either a POD file which can additionally be run as a Perl script or a Jupyter Notebook which can be run in L<IPerl|https://p3rl.org/Devel::IPerl> (viewable online at L<nbviewer|https://nbviewer.org/github/EntropyOrg/perl-AI-...

If you are running the code, you may optionally install the L<C<tensorflow> Python package|https://www.tensorflow.org/install/pip> in order to access the C<saved_model_cli> command, but this is only used for informational purposes.

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod  view on Meta::CPAN

  use Imager;
  
  my $s = AI::TensorFlow::Libtensorflow::Status->New;
  sub AssertOK {
      die "Status $_[0]: " . $_[0]->Message
          unless $_[0]->GetCode == AI::TensorFlow::Libtensorflow::Status::OK;
      return;
  }
  AssertOK($s);

In this notebook, we will use C<PDL> to store and manipulate the ndarray data before converting it to a C<TFTensor>. The following functions help with copying the data back and forth between the two object types.

An important thing to note about the dimensions used by TensorFlow's TFTensors when compared with PDL is that the dimension lists are reversed. Consider a binary raster image with width W and height H stored in L<row-major format|https://en.wikipedia...

This difference will be explained more concretely further in the tutorial.

Future work will provide an API for more convenient wrappers which will provide an option to either copy or share the same data (if possible).

  use PDL;
  use AI::TensorFlow::Libtensorflow::DataType qw(FLOAT);
  
  use FFI::Platypus::Memory qw(memcpy);
  use FFI::Platypus::Buffer qw(scalar_to_pointer);
  
  sub FloatPDLTOTFTensor {
      my ($p) = @_;
      return AI::TensorFlow::Libtensorflow::Tensor->New(
          FLOAT, [ reverse $p->dims ], $p->get_dataref, sub { undef $p }
      );
  }
  
  sub FloatTFTensorToPDL {
      my ($t) = @_;
  
      my $pdl = zeros(float,reverse( map $t->Dim($_), 0..$t->NumDims-1 ) );
  
      memcpy scalar_to_pointer( ${$pdl->get_dataref} ),
          scalar_to_pointer( ${$t->Data} ),
          $t->ByteSize;
      $pdl->upd_data;
  
      $pdl;
  }

The following is just a small helper to generate an HTML C<<< <table> >>> for output in C<IPerl>.

  use HTML::Tiny;
  
  sub my_table {
      my ($data, $cb) = @_;
      my $h = HTML::Tiny->new;
      $h->table( { style => 'width: 100%' },
          [
              $h->tr(
                  map {
                      [
                          $h->td( $cb->($_, $h) )
                      ]
                  } @$data
              )
          ]
      )
  }

This is a helper to display images in Gnuplot for debugging, but those debugging lines are commented out.

  sub show_in_gnuplot {
      my ($p) = @_;
      require PDL::Graphics::Gnuplot;

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod  view on Meta::CPAN

  1

We download the model and labels to the current directory then extract the model to a folder with the name given in C<$model_base>.

  my $model_uri = URI->new( $model_name_to_params{$model_name}{handle} );
  $model_uri->query_form( 'tf-hub-format' => 'compressed' );
  my $model_base = substr( $model_uri->path, 1 ) =~ s,/,_,gr;
  my $model_archive_path = "${model_base}.tar.gz";
  
  use constant IMAGENET_LABEL_COUNT_WITH_BG => 1001;
  my $labels_uri = URI->new('https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt');
  my $labels_path = ($labels_uri->path_segments)[-1];
  
  my $http = HTTP::Tiny->new;
  
  for my $download ( [ $model_uri  => $model_archive_path ],
                     [ $labels_uri => $labels_path        ]) {
      my ($uri, $path) = @$download;
      say "Downloading $uri to $path";
      next if -e $path;
      $http->mirror( $uri, $path );

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod  view on Meta::CPAN

  say "Saved model is in $saved_model" if -f $saved_model;
  
  my @labels = path($labels_path)->lines( { chomp => 1 });
  die "Labels should have @{[ IMAGENET_LABEL_COUNT_WITH_BG ]} items"
      unless @labels == IMAGENET_LABEL_COUNT_WITH_BG;
  say "Got labels: ", join( ", ", List::Util::head(5, @labels) ), ", etc.";

B<STREAM (STDOUT)>:

  Downloading https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/classification/5?tf-hub-format=compressed to google_imagenet_mobilenet_v2_100_224_classification_5.tar.gz
  Downloading https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt to ImageNetLabels.txt
  Saved model is in google_imagenet_mobilenet_v2_100_224_classification_5/saved_model.pb
  Got labels: background, tench, goldfish, great white shark, tiger shark, etc.

B<RESULT>:

  1

=head2 Load the model and session

We define the tag set C<[ 'serve' ]> which we will use to load the model.

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod  view on Meta::CPAN

                  ),
              )
          })
      );
  }

B<DISPLAY>:

=for html <span style="display:inline-block;margin-left:1em;"><p><table style="width: 100%"><tr><td><tt>apple</tt></td><td><a href="https://upload.wikimedia.org/wikipedia/commons/1/15/Red_Apple.jpg"><img alt="apple" src="https://upload.wikimedia.org/...

=head2 Download the test images and transform them into suitable input data

We now fetch these images and prepare them to be the in the needed format by using C<Imager> to resize and add padding. Then we turn the C<Imager> data into a C<PDL> ndarray. Since the C<Imager> data is stored as 32-bits with 4 channels in the order ...

We then take all the PDL ndarrays and concatenate them. Again, note that the dimension lists for the PDL ndarray and the TFTensor are reversed.

  sub imager_paste_center_pad {
      my ($inner, $padded_sz, @rest) = @_;
  
      my $outer = Imager->new( List::Util::mesh( [qw(xsize ysize)], $padded_sz ),
          @rest
      );
  

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod  view on Meta::CPAN

  
  sub load_image_to_pdl {
      my ($uri, $image_size) = @_;
  
      my $http = HTTP::Tiny->new;
      my $response = $http->get( $uri );
      die "Could not fetch image from $uri" unless $response->{success};
      say "Downloaded $uri";
  
      my $img = Imager->new;
      $img->read( data => $response->{content} );
  
      my $rescaled = imager_scale_to($img, $image_size);
  
      say sprintf "Rescaled image from [ %d x %d ] to [ %d x %d ]",
          $img->getwidth, $img->getheight,
          $rescaled->getwidth, $rescaled->getheight;
  
      my $padded = imager_paste_center_pad($rescaled, $image_size,
          # ARGB fits in 32-bits (uint32_t)
          channels => 4
      );
  
      say sprintf "Padded to [ %d x %d ]", $padded->getwidth, $padded->getheight;
  
      # Create PDL ndarray from Imager data in-memory.
      my $data;
      $padded->write( data => \$data, type => 'raw' )
          or die "could not write ". $padded->errstr;
  
      # $data is packed as PDL->dims == [w,h] with ARGB pixels
      #   $ PDL::howbig(ulong) # 4
      my $pdl_raw = zeros(ulong, $padded->getwidth, $padded->getheight);
      ${ $pdl_raw->get_dataref } = $data;
      $pdl_raw->upd_data;
  
      # Split uint32_t pixels into first dimension with 3 channels (R,G,B) with values 0-255.
      my @shifts = map 8*$_, 0..2;
      my $pdl_channels = $pdl_raw->dummy(0)
          ->and2(ulong(map 0xFF << $_, @shifts)->slice(':,*,*') )
          ->shiftright( ulong(@shifts)->slice(':,*,*') )
          ->byte;
  
      my $pdl_scaled = (
              # Scale to [ 0, 1 ].

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod  view on Meta::CPAN

B<STREAM (STDERR)>:

=for html <span style="display:inline-block;margin-left:1em;"><pre style="display: block"><code><span style="color: #cc66cc;">AI::TensorFlow::Libtensorflow::Tensor</span><span style=""> </span><span style="color: #33ccff;">{</span><span style="">
    </span><span style="color: #6666cc;">Type           </span><span style=""> </span><span style="color: #cc66cc;">FLOAT</span><span style="">
    </span><span style="color: #6666cc;">Dims           </span><span style=""> </span><span style="color: #33ccff;">[</span><span style=""> </span><span style="color: #ff6633;">1</span><span style=""> </span><span style="color: #ff6633;">1001</span><...
    </span><span style="color: #6666cc;">NumDims        </span><span style=""> </span><span style="color: #ff6633;">2</span><span style="">
    </span><span style="color: #6666cc;">ElementCount   </span><span style=""> </span><span style="color: #ff6633;">1001</span><span style="">
</span><span style="color: #33ccff;">}</span><span style="">
</span></code></pre></span>

Then we send the batched image data. The returned scores need to by normalised using the L<softmax function|https://en.wikipedia.org/wiki/Softmax_function> with the following formula (taken from Wikipedia):

$$ {\displaystyle \sigma (\mathbf {z} )I<{i}={\frac {e^{z>{i}}}{\sum I<{j=1}^{K}e^{z>{j}}}}\ \ {\text{ for }}i=1,\dotsc ,K{\text{ and }}\mathbf {z} =(zI<{1},\dotsc ,z>{K})\in \mathbb {R} ^{K}.} $$

  my $output_pdl_batched = FloatTFTensorToPDL($RunSession->($session, $t));
  my $softmax = sub { ( map $_/sumover($_)->dummy(0), exp($_[0]) )[0] };
  my $probabilities_batched = $softmax->($output_pdl_batched);
  p $probabilities_batched;

B<STREAM (STDERR)>:

lib/AI/TensorFlow/Libtensorflow/Manual/Quickstart.pod  view on Meta::CPAN


This provides a tour of C<libtensorflow> to help get started with using the
library.

=head1 CONVENTIONS

The library uses UpperCamelCase naming convention for method names in order to
match the underlying C library (for compatibility with future API changes) and
to make translating code from C easier as this is a low-level API.

As such, constructors for objects that correspond to C<libtensorflow> data
structures are typically called C<New>. For example, a new
L<AI::TensorFlow::Libtensorflow::Status> object can be created as follows

  use AI::TensorFlow::Libtensorflow::Status;
  my $status = AI::TensorFlow::Libtensorflow::Status->New;

  ok defined $status, 'Created new Status';

These C<libtensorflow> data structures use L<destructors|perlobj/Destructors> where necessary.

=head1 OBJECT TYPES

=over 4

=item L<AI::TensorFlow::Libtensorflow::Status>

Used for error-handling. Many methods take this as the final argument which is
then checked after the method call to ensure that it completed successfully.

=item L<AI::TensorFlow::Libtensorflow::Tensor>, L<AI::TensorFlow::Libtensorflow::DataType>

A C<TFTensor> is a multi-dimensional data structure that stores the data for inputs and outputs.
Each element has the same data type
which is defined by L<AI::TensorFlow::Libtensorflow::DataType>
thus a C<TFTensor> is considered to be "homogeneous data structure".
See L<Introduction to Tensors|https://www.tensorflow.org/guide/tensor> for more.

=item L<AI::TensorFlow::Libtensorflow::OperationDescription>, L<AI::TensorFlow::Libtensorflow::Operation>

An operation is a function that has inputs and outputs. It has a user-defined
name (such as C<MyAdder>) and library-defined type (such as C<AddN>).
L<AI::TensorFlow::Libtensorflow::OperationDescription> is used to build an
operation that will be added to a graph of other operations where those other
operations can set the operation's inputs and get the operation's outputs.
These inputs and outputs have types and dimension specifications, so that the

lib/AI/TensorFlow/Libtensorflow/Manual/Quickstart.pod  view on Meta::CPAN

=head1 TUTORIALS

The object types in L</OBJECT TYPES> are used in the following tutorials:

=over 4

=item L<InferenceUsingTFHubMobileNetV2Model|AI::TensorFlow::Libtensorflow::Manual::Notebook::InferenceUsingTFHubMobileNetV2Model>: image classification tutorial

This tutorial demonstrates using a pre-trained SavedModel and creating a L<AI::TensorFlow::Libtensorflow::Session> with the
L<LoadFromSavedModel|AI::TensorFlow::Libtensorflow::Session/LoadFromSavedModel>
method. It also demonstrates how to prepare image data for use as an input C<TFTensor>.

=item L<InferenceUsingTFHubEnformerGeneExprPredModel|AI::TensorFlow::Libtensorflow::Manual::Notebook::InferenceUsingTFHubEnformerGeneExprPredModel>: gene expression prediction tutorial

This tutorial builds on L<InferenceUsingTFHubMobileNetV2Model|AI::TensorFlow::Libtensorflow::Manual::Notebook::InferenceUsingTFHubMobileNetV2Model>.
It shows how to convert a pre-trained SavedModel from one that does not have a
usable signature to a new model that does. It also demonstrates how to prepare
genomic data for use as an input C<TFTensor>.

=back

=head1 DOCKER IMAGES

Docker (or equivalent runtime) images for the library along with all the
dependencies to run the above tutorials are available at
L<Quay.io|https://quay.io/repository/entropyorg/perl-ai-tensorflow-libtensorflow>
under various tags which can be run as

lib/AI/TensorFlow/Libtensorflow/Manual/Quickstart.pod  view on Meta::CPAN

If using the GPU Docker image for NVIDIA support, make sure that the
L<TensorFlow Docker requirements|https://www.tensorflow.org/install/docker#tensorflow_docker_requirements>
are met and that the correct flags are passed to C<docker run>, for example

C<<
  docker run --rm --gpus all [...]
>>

More information about NVIDIA Docker containers can be found in the
NVIDIA Container Toolkit
L<Installation Guide|https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html>
(specifically L<Setting up NVIDIA Container Toolkit|https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#setting-up-nvidia-container-toolkit>)
and
L<User Guide|https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/user-guide.html>.

=head3 Diagnostics

When using the Docker GPU image, you may come across the error

C<<
  nvidia-container-cli: initialization error: load library failed: libnvidia-ml.so.1: cannot open shared object file: no such file or directory: unknown.
>>

Make sure that you have installed the NVIDIA Container Toolkit correctly

lib/AI/TensorFlow/Libtensorflow/Operation.pm  view on Meta::CPAN

	arg 'TF_Input_struct_array' => 'consumers',
	arg 'int'                   => 'max_consumers',
] => 'int' => sub {
	my ($xs, $self, $output) = @_;
	my $max_consumers = $self->OutputNumConsumers( $output );
	my $consumers = AI::TensorFlow::Libtensorflow::Input->_adef->create( $max_consumers );
	my $count = $xs->($output, $consumers, $max_consumers);
	return AI::TensorFlow::Libtensorflow::Input->_from_array( $consumers );
});

sub _data_printer {
	my ($self, $ddp) = @_;

	my %data = (
		Name       => $self->Name,
		OpType     => $self->OpType,
		NumInputs  => $self->NumInputs,
		NumOutputs => $self->NumOutputs,
	);

	return sprintf('%s %s',
		$ddp->maybe_colorize(ref $self, 'class' ),
		$ddp->parse(\%data) );
}

1;

__END__

=pod

=encoding UTF-8

lib/AI/TensorFlow/Libtensorflow/Output.pm  view on Meta::CPAN

use overload
	'""' => \&_op_stringify;

sub _op_stringify {
	join ":", (
		( defined $_[0]->_oper ? $_[0]->oper->Name : '<undefined operation>' ),
		( defined $_[0]->index ? $_[0]->index      : '<no index>'            )
	);
}

sub _data_printer {
	my ($self, $ddp) = @_;

	my %data = (
		oper  => $self->oper,
		index => $self->index,
	);

	return sprintf('%s %s',
		$ddp->maybe_colorize(ref $self, 'class' ),
		$ddp->parse(\%data) );
};

1;

__END__

=pod

=encoding UTF-8

lib/AI/TensorFlow/Libtensorflow/Session.pm  view on Meta::CPAN


		# Output TFTensors
		arg 'TF_Output_struct_array' => 'outputs',
		arg 'TF_Tensor_array' => 'output_values',
		arg 'int'             => 'noutputs',

		# Target operations
		arg 'opaque'         => { id => 'target_opers', ffi_type => 'TF_Operation_array', maybe => 1 },
		arg 'int'            => 'ntargets',

		# RunMetadata
		arg 'opaque'      => { id => 'run_metadata', ffi_type => 'TF_Buffer', maybe => 1 },

		# Output status
		arg 'TF_Status' => 'status',
	],
	=> 'void' => sub {
		my ($xs,
			$self,
			$run_options,
			$inputs , $input_values,
			$outputs, $output_values,
			$target_opers,
			$run_metadata,
			$status ) = @_;

		die "Mismatch in number of inputs and input values" unless $#$inputs == $#$input_values;
		my $input_v_a  = AI::TensorFlow::Libtensorflow::Tensor->_as_array(@$input_values);
		my $output_v_a = AI::TensorFlow::Libtensorflow::Tensor->_adef->create( 0+@$outputs );

		$inputs  = AI::TensorFlow::Libtensorflow::Output->_as_array( @$inputs );
		$outputs = AI::TensorFlow::Libtensorflow::Output->_as_array( @$outputs );
		$xs->($self,
			$run_options,

			# Inputs
			$inputs, $input_v_a , $input_v_a->count,

			# Outputs
			$outputs, $output_v_a, $output_v_a->count,

			_process_target_opers_args($target_opers),

			$run_metadata,

			$status
		);

		@{$output_values} = @{ AI::TensorFlow::Libtensorflow::Tensor->_from_array( $output_v_a ) };
	}
);

sub _process_target_opers_args {
	my ($target_opers) = @_;

lib/AI/TensorFlow/Libtensorflow/Session.pm  view on Meta::CPAN

Outputs to get.

=item ArrayRef[TFTensor] $output_values

Reference to where the output values for C<$outputs> will be placed.

=item ArrayRef[TFOperation] $target_opers

TODO

=item Maybe[TFBuffer] $run_metadata

Optional empty C<TFBuffer> which will be updated to contain a serialized
representation of a `RunMetadata` protocol buffer.

=item L<TFStatus|AI::TensorFlow::Libtensorflow::Lib::Types/TFStatus> $status

Status

=back

B<C API>: L<< C<TF_SessionRun>|AI::TensorFlow::Libtensorflow::Manual::CAPI/TF_SessionRun >>

=head2 PRunSetup

lib/AI/TensorFlow/Libtensorflow/Status.pm  view on Meta::CPAN


$ffi->attach( 'Message' => [ 'TF_Status' ], 'string' );

use overload
	'""' => \&_op_stringify;

sub _op_stringify {
	$_TF_CODE_INT_TO_NAME{$_[0]->GetCode};
}

sub _data_printer {
	my ($self, $ddp) = @_;
	if( $self->GetCode != AI::TensorFlow::Libtensorflow::Status::OK() ) {
		return sprintf('%s %s %s %s%s%s %s',
			$ddp->maybe_colorize( ref($self), 'class' ),
			$ddp->maybe_colorize( '{', 'brackets' ),
				$ddp->maybe_colorize( $_TF_CODE_INT_TO_NAME{$self->GetCode}, 'escaped' ),
				$ddp->maybe_colorize( '(', 'brackets' ),
					$ddp->maybe_colorize( $self->Message, 'string' ),
				$ddp->maybe_colorize( ')', 'brackets' ),
			$ddp->maybe_colorize( '}', 'brackets' ),

lib/AI/TensorFlow/Libtensorflow/Tensor.pm  view on Meta::CPAN

package AI::TensorFlow::Libtensorflow::Tensor;
# ABSTRACT: A multi-dimensional array of elements of a single data type
$AI::TensorFlow::Libtensorflow::Tensor::VERSION = '0.0.7';
use strict;
use warnings;
use namespace::autoclean;
use AI::TensorFlow::Libtensorflow::Lib qw(arg);
use FFI::Platypus::Closure;
use FFI::Platypus::Buffer qw(window);
use List::Util qw(product);

my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;

lib/AI/TensorFlow/Libtensorflow/Tensor.pm  view on Meta::CPAN




$ffi->attach( [ 'NewTensor' => 'New' ] =>
	[
		arg 'TF_DataType' => 'dtype',

		# const int64_t* dims, int num_dims
		arg 'tf_dims_buffer'   => [ qw(dims num_dims) ],

		# void* data, size_t len
		arg 'tf_tensor_buffer' => [ qw(data len) ],

		arg 'opaque'      => 'deallocator',  # tensor_deallocator_t (deallocator)
		arg 'opaque'      => 'deallocator_arg',
	],
	=> 'TF_Tensor' => sub {
		my ($xs, $class,
			$dtype, $dims, $data,
			$deallocator, $deallocator_arg,
		) = @_;
		my $deallocator_closure = $ffi->closure( $deallocator );
		$deallocator_closure->sticky;
		my $deallocator_ptr = $ffi->cast(
			'tensor_deallocator_t', 'opaque',
			$deallocator_closure );

		my $obj = $xs->(
			$dtype,
			$dims,
			$data,
			$deallocator_ptr, $deallocator_arg,
		);

		# Return early if no TF_Tensor created
		# TODO should this throw an exception instead?
		return unless $obj;

		$obj->{_deallocator_closure} = $deallocator_closure;

		$obj;

lib/AI/TensorFlow/Libtensorflow/Tensor.pm  view on Meta::CPAN

		}
	}
);

$ffi->attach( [ 'TensorData' => 'Data' ],
	[ arg 'TF_Tensor' => 'self' ],
	=> 'opaque'
	=> sub {
		my ($xs, @rest) = @_;
		my ($self) = @rest;
		my $data_p = $xs->(@rest);
		window(my $buffer, $data_p, $self->ByteSize);
		\$buffer;
	}
);

$ffi->attach( [ 'TensorByteSize' => 'ByteSize' ],
	[ arg 'TF_Tensor' => 'self' ],
	=> 'size_t'
);

$ffi->attach( [ 'TensorType' => 'Type' ],

lib/AI/TensorFlow/Libtensorflow/Tensor.pm  view on Meta::CPAN

		map {
			$ffi->cast(
				'opaque',
				'TF_Tensor',
				$array->[$_]->p)
		} 0.. $array->count - 1
	]
}

#### Data::Printer ####
sub _data_printer {
	my ($self, $ddp) = @_;

	my @data = (
		[ Type => $ddp->maybe_colorize( $self->Type, 'class' ), ],
		[ Dims =>  sprintf "%s %s %s",
				$ddp->maybe_colorize('[', 'brackets'),
				join(" ",
					map $ddp->maybe_colorize( $self->Dim($_), 'number' ),
						0..$self->NumDims-1),
				$ddp->maybe_colorize(']', 'brackets'),
		],
		[ NumDims => $ddp->maybe_colorize( $self->NumDims, 'number' ), ],
		[ ElementCount => $ddp->maybe_colorize( $self->ElementCount, 'number' ), ],
	);

	my $output;

	$output .= $ddp->maybe_colorize(ref $self, 'class' );
	$output .= ' ' . $ddp->maybe_colorize('{', 'brackets');
	$ddp->indent;
	for my $item (@data) {
		$output .= $ddp->newline;
		$output .= join " ",
			$ddp->maybe_colorize(sprintf("%-15s", $item->[0]), 'hash'),
			$item->[1];
	}
	$ddp->outdent;
	$output .= $ddp->newline;
	$output .= $ddp->maybe_colorize('}', 'brackets');

	return $output;

lib/AI/TensorFlow/Libtensorflow/Tensor.pm  view on Meta::CPAN

1;

__END__

=pod

=encoding UTF-8

=head1 NAME

AI::TensorFlow::Libtensorflow::Tensor - A multi-dimensional array of elements of a single data type

=head1 SYNOPSIS

  use aliased 'AI::TensorFlow::Libtensorflow::Tensor' => 'Tensor';
  use AI::TensorFlow::Libtensorflow::DataType qw(FLOAT);
  use List::Util qw(product);

  my $dims = [3, 3];

  # Allocate a 3 by 3 ndarray of type FLOAT

lib/AI/TensorFlow/Libtensorflow/Tensor.pm  view on Meta::CPAN

  my $scalar_t = Tensor->Allocate(FLOAT, $scalar_dims);
  is $scalar_t->ElementCount, 1, 'single element';
  is $scalar_t->ByteSize, FLOAT->Size, 'single FLOAT';

=head1 DESCRIPTION

A C<TFTensor> is an object that contains values of a
single type arranged in an n-dimensional array.

For types other than L<STRING|AI::TensorFlow::Libtensorflow::DataType/STRING>,
the data buffer is stored in L<row major order|https://en.wikipedia.org/wiki/Row-_and_column-major_order>.

Of note, this is different from the definition of I<tensor> used in
mathematics and physics which can also be represented as a
multi-dimensional array in some cases, but these tensors are
defined not by the representation but by how they transform. For
more on this see

=over 4

Lim, L.-H. (2021). L<Tensors in computations|https://galton.uchicago.edu/~lekheng/work/acta.pdf>.

lib/AI/TensorFlow/Libtensorflow/Tensor.pm  view on Meta::CPAN


=back

=head1 CONSTRUCTORS

=head2 New

=over 2

C<<<
New( $dtype, $dims, $data, $deallocator, $deallocator_arg )
>>>

=back

Creates a C<TFTensor> from a data buffer C<$data> with the given specification
of data type C<$dtype> and dimensions C<$dims>.

  # Create a buffer containing 0 through 8 single-precision
  # floating-point data.
  my $data = pack("f*",  0..8);

  $t = Tensor->New(
    FLOAT, [3,3], \$data, sub { undef $data }, undef
  );

  ok $t, 'Created 3-by-3 float TFTensor';

Implementation note: if C<$dtype> is not a
L<STRING|AI::TensorFlow::Libtensorflow::DataType/STRING>
or
L<RESOURCE|AI::TensorFlow::Libtensorflow::DataType/RESOURCE>,
then the pointer for C<$data> is checked to see if meets the
TensorFlow's alignment preferences. If it does not, the
contents of C<$data> are copied into a new buffer and
C<$deallocator> is called during construction.
Otherwise the contents of C<$data> are not owned by the returned
C<TFTensor>.

B<Parameters>

=over 4

=item L<TFDataType|AI::TensorFlow::Libtensorflow::Lib::Types/TFDataType> $dtype

DataType for the C<TFTensor>.

=item L<Dims|AI::TensorFlow::Libtensorflow::Lib::Types/Dims> $dims

An C<ArrayRef> of the size of each dimension.

=item ScalarRef[Bytes] $data

Data buffer for the contents of the C<TFTensor>.

=item CodeRef $deallocator

A callback used to deallocate C<$data> which is passed the
parameters C<<
  $deallocator->( opaque $pointer, size_t $size, opaque $deallocator_arg)
>>.

=item Ref $deallocator_arg [optional, default: C<undef>]

Argument that is passed to the C<$deallocator> callback.

=back

B<Returns>

=over 4

=item L<TFTensor|AI::TensorFlow::Libtensorflow::Lib::Types/TFTensor>

A new C<TFTensor> with the given data and specification.

=back

B<C API>: L<< C<TF_NewTensor>|AI::TensorFlow::Libtensorflow::Manual::CAPI/TF_NewTensor >>

=head2 Allocate

=over 2

C<<<
Allocate($dtype, $dims, $len = )
>>>

=back

This constructs a C<TFTensor> with the memory for the C<TFTensor>
allocated and owned by the C<TFTensor> itself. Unlike with L</New>
the allocated memory satisfies TensorFlow's alignment preferences.

See L</Data> for how to write to the data buffer.

  use AI::TensorFlow::Libtensorflow::DataType qw(DOUBLE);

  # Allocate a 2-by-2 ndarray of type DOUBLE
  $dims = [2,2];
  my $t = Tensor->Allocate(DOUBLE, $dims, product(DOUBLE->Size, @$dims));

B<Parameters>

=over 4

lib/AI/TensorFlow/Libtensorflow/Tensor.pm  view on Meta::CPAN

=item L<TFDataType|AI::TensorFlow::Libtensorflow::Lib::Types/TFDataType> $dtype

DataType for the C<TFTensor>.

=item L<Dims|AI::TensorFlow::Libtensorflow::Lib::Types/Dims> $dims

An C<ArrayRef> of the size of each dimension.

=item size_t $len [optional]

Number of bytes for the data buffer. If a value is not given,
this is calculated from C<$dtype> and C<$dims>.

=back

B<Returns>

=over 4

=item L<TFTensor|AI::TensorFlow::Libtensorflow::Lib::Types/TFTensor>

A C<TFTensor> with memory allocated for data buffer.

=back

B<C API>: L<< C<TF_AllocateTensor>|AI::TensorFlow::Libtensorflow::Manual::CAPI/TF_AllocateTensor >>

=head1 ATTRIBUTES

=head2 Data

Provides a way to access the data buffer for the C<TFTensor>. The
C<ScalarRef> that it returns is read-only, but the underlying
pointer can be accessed as long as one is careful when handling the
data (do not write to memory outside the size of the buffer).

  use AI::TensorFlow::Libtensorflow::DataType qw(DOUBLE);
  use FFI::Platypus::Buffer qw(scalar_to_pointer);
  use FFI::Platypus::Memory qw(memcpy);

  my $t = Tensor->Allocate(DOUBLE, [2,2]);

  # [2,2] identity matrix
  my $eye_data = pack 'd*', (1, 0, 0, 1);

  memcpy scalar_to_pointer(${ $t->Data }),
         scalar_to_pointer($eye_data),
         $t->ByteSize;

  ok ${ $t->Data } eq $eye_data, 'contents are the same';

B<Returns>

=over 4

=item ScalarRef[Bytes]

Returns a B<read-only> ScalarRef for the C<TFTensor>'s data
buffer.

=back

B<C API>: L<< C<TF_TensorData>|AI::TensorFlow::Libtensorflow::Manual::CAPI/TF_TensorData >>

=head2 ByteSize

B<Returns>

=over 4

=item size_t

Returns the number of bytes for the C<TFTensor>'s data buffer.

=back

B<C API>: L<< C<TF_TensorByteSize>|AI::TensorFlow::Libtensorflow::Manual::CAPI/TF_TensorByteSize >>

=head2 Type

B<Returns>

=over 4

=item L<TFDataType|AI::TensorFlow::Libtensorflow::Lib::Types/TFDataType>

The C<TFTensor>'s data type.

=back

B<C API>: L<< C<TF_TensorType>|AI::TensorFlow::Libtensorflow::Manual::CAPI/TF_TensorType >>

=head2 NumDims

B<Returns>

=over 4

maint/process-capi.pl  view on Meta::CPAN

		my @order = $self->header_order->@*;
		my @sorted = iikeysort {
				my $item = $_;
				my $first = firstidx { $item =~ $_ } @order;
				($first, length $_);
			} $self->header_paths->@*;
		\@sorted;
	};

	method _process_re($re) {
		my @data;
		my @sorted = $self->sorted_header_paths->@*;
		for my $file (@sorted) {
			my $txt = $file->slurp_utf8;
			while( $txt =~ /$re/g ) {
				push @data, {
					%+,
					file => $file->relative($self->root_path),
					pos  => pos($txt),
				};
			}
		}
		\@data;
	}

	lazy fdecl_data => method() {
		my $re = $self->fdecl_re;
		my $data = $self->_process_re($re);

		# Used for defensive assertion:
		# These are mostly constructors that return a value
		# (i.e., not void) but also take a function pointer as a
		# parameter.
		my %TF_func_ptr = map { ($_ => 1) } qw(
			TF_NewTensor
			TF_StartThread
			TF_NewKernelBuilder
			TFE_NewTensorHandleFromDeviceMemory
		);
		for my $data (@$data) {
			my ($func_name) = $data->{fdecl} =~ m/ \A [^(]*? (\w+) \s* \( (?!\s*\*) /xs;
			die "Could not extract function name" unless $func_name;

			# defensive assertion for parsing
			my $paren_count = () = $data->{fdecl} =~ /[\(]/sg;
			warn "Got $func_name, but more than one open parenthesis [(] in\n@{[ $data->{fdecl} =~ s/^/  /gr ]}\n"
				if(
					$paren_count != 1
					&& !(
						$data->{fdecl} =~ /^ TF_CAPI_EXPORT \s+ extern \s+ void \s+ \Q@{[ $func_name ]}\E \s* \(/xs
						||
						exists $TF_func_ptr{$func_name}
					)
				);

			$data->{func_name} = $func_name;
		}

		$data;
	};

	method generate_capi_funcs() {
		my $pod = '';

		my @data = $self->fdecl_data->@*;

		for my $data (@data) {
			if( $data->{fdecl} =~ /TF_Version/ ) {
				$data->{comment} =~ s,^// -+$,,m;
			}

			my @tags;
			push @tags, 'experimental' if( $data->{file} =~ /experimental/ );
			push @tags, 'eager' if( $data->{file} =~ /\beager\b/ );

			my $text_decomment = $data->{comment} =~ s,^//(?: |$),,mgr;
			$text_decomment =~ s,\A\n+,,sg;
			$text_decomment =~ s,\n+\Z,,sg;

			my $comment_pod = <<~EOF;
			=over 2

			@{[ $text_decomment =~ s/^/  /mgr ]}

			=back

			EOF

			my $code_pod = <<~CODE =~ s/^/  /mgr;
			/* From <@{[ $data->{file} ]}> */
			@{[ $data->{fdecl} ]}
			CODE

			my $func_pod = <<~EOF;

			=head2 @{[ $data->{func_name} ]}

			$comment_pod

			$code_pod

			EOF

			$pod .= $func_pod;
		}

maint/process-capi.pl  view on Meta::CPAN


		=cut

		EOF

		my $output = $self->lib_path->child(module_notional_filename($doc_name) =~ s/\.pm$/.pod/r );
		$output->parent->mkpath;
		$output->spew_utf8($pod);
	}

	lazy typedef_struct_data => method() {
		my $re = qr{
			(?<opaque>
				^
				typedef      \s+
				struct       \s+
				(?<name>\w+) \s+
				\k<name>     \s*
				;
			)
			|

maint/process-capi.pl  view on Meta::CPAN

				[^\}]+
				\}           \s+
				\k<name>     \s*
				;
			)
		}xm;
		$self->_process_re($re);
	};

	method check_types() {
		my @data = $self->typedef_struct_data->@*;
		my %types = map { $_ => 1 } AI::TensorFlow::Libtensorflow::Lib->ffi->types;
		my %part;
		@part{qw(todo done)} = part { exists $types{$_} } uniq map { $_->{name} } @data;
		use DDP; p %part;
	}

	method check_functions($first_arg = undef) {
		my $functions = AI::TensorFlow::Libtensorflow::Lib->ffi->_attached_functions;
		my @dupes = map { $_->[0]{c} }
			grep { @$_ != 1 } values $functions->%*;
		die "Duplicated functions @dupes" if @dupes;

		my @data = $self->fdecl_data->@*;

		say <<~STATS;
		Statistics:
		==========
		Attached functions: @{[ scalar keys %$functions ]}
		Total CAPI functions: @{[ scalar @data ]}
		STATS

		my $first_missing_function = first {
			! exists $functions->{$_->{func_name}}
			&&
			(
				! defined $first_arg ||
				$_->{fdecl} =~ /\(\s*\Q$first_arg\E\s*\*/
			)
		} @data;
		say "Missing function:";
		use DDP; p $first_missing_function;
	}

	method run() {
		$self->generate_capi_funcs;
		#$self->check_types;
		$self->check_functions;
	}

maint/process-notebook.pl  view on Meta::CPAN

rm $DST || true;

#if grep -C5 -P '\s+\\n' $SRC -m 2; then
	#echo -e "Notebook $SRC has whitespace"
	#exit 1
#fi

## Run the notebook
#jupyter nbconvert --execute --inplace $SRC

## Clean up metadata (changed by the previous nbconvert --execute)
## See more at <https://timstaley.co.uk/posts/making-git-and-jupyter-notebooks-play-nice/>
jq --indent 1     '
    del(.cells[].metadata | .execution)
    ' $SRC | sponge $SRC

### Notice about generated file
echo -e "# PODNAME: $PODNAME\n\n" | sponge -a $DST
echo -e "## DO NOT EDIT. Generated from $SRC using $GENERATOR.\n" | sponge -a $DST

## Add code to $DST
jupyter nbconvert $SRC --to script --stdout | sponge -a $DST;

## Add

maint/process-notebook.pl  view on Meta::CPAN

##
##     requires '...';
##     requires '...';
scan-perl-prereqs-nqlite --cpanfile $DST | perl -M5';print qq|=head1 CPANFILE\n\n|' -plE '$_ = q|  | . $_;' | sponge -a $DST ;

## Check output (if on TTY)
if [ -t 0 ]; then
	perldoc $DST;
fi

## Check and run script in the directory of the original (e.g., to get data
## files).
perl -c $DST
#&& perl -MCwd -MPath::Tiny -E '
	#my $nb = path(shift @ARGV);
	#my $script = path(shift @ARGV)->absolute;
	#chdir $nb->parent;
	#do $script;
	#' $SRC $DST
BASH

t/02_load_graph.t  view on Meta::CPAN

use TF_TestQuiet;
use AI::TensorFlow::Libtensorflow;
use Path::Tiny;

use lib 't/lib';

subtest "Load graph" => sub {
	my $model_file = path("t/models/graph.pb");
	my $ffi = FFI::Platypus->new( api => 1 );

	my $data = $model_file->slurp_raw;
	my $buf = AI::TensorFlow::Libtensorflow::Buffer->NewFromString(\$data);
	ok $buf;

	my $graph = AI::TensorFlow::Libtensorflow::Graph->New;
	my $status = AI::TensorFlow::Libtensorflow::Status->New;
	my $opts = AI::TensorFlow::Libtensorflow::ImportGraphDefOptions->New;

	$graph->ImportGraphDef( $buf, $opts, $status );

	if( $status->GetCode == AI::TensorFlow::Libtensorflow::Status::OK ) {
		print "Load graph success\n";

t/03_create_tftensor.t  view on Meta::CPAN

use warnings;

use lib 't/lib';
use TF_TestQuiet;
use TF_Utils;

use AI::TensorFlow::Libtensorflow;
use PDL;

subtest "Create a TFTensor" => sub {
	my $p_data = sequence(float, 1, 5, 12);
	my $t = TF_Utils::FloatPDLToTFTensor($p_data);

	is $t->NumDims, 3, '3D TFTensor';

	is $t->Dim(0), 1 , 'dim[0] = 1';
	is $t->Dim(1), 5 , 'dim[1] = 5';
	is $t->Dim(2), 12, 'dim[2] = 12';

	pass;
};

t/04_allocate_tftensor.t  view on Meta::CPAN

use PDL::Core ':Internal';

use FFI::Platypus::Memory;
use FFI::Platypus::Buffer qw(scalar_to_pointer);

subtest "Allocate a TFTensor" => sub {
	my $ffi = FFI::Platypus->new( api => 1 );

	my @dims = ( 1, 5, 12 );
	my $ndims = scalar @dims;
	my $data_size_bytes = product(howbig(float), @dims);
	my $t = AI::TensorFlow::Libtensorflow::Tensor->Allocate(
		FLOAT, \@dims, $data_size_bytes,
	);

	ok $t && $t->Data, 'Allocated TFTensor';

	my $pdl = sequence(float, @dims );

	memcpy scalar_to_pointer(${ $t->Data }),
		scalar_to_pointer(${ $pdl->get_dataref }),
		List::Util::min( $t->ByteSize, $data_size_bytes );

	is $t->Type, AI::TensorFlow::Libtensorflow::DataType::FLOAT, 'Check Type is FLOAT';
	is $t->NumDims, $ndims, 'Check NumDims';
};

done_testing;

t/05_session_run.t  view on Meta::CPAN

subtest "Session run" => sub {
	my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
	my $graph = TF_Utils::LoadGraph('t/models/graph.pb');
	ok $graph, 'graph';
	my $input_op = Output->New({
		oper => $graph->OperationByName( 'input_4' ),
		index => 0 });
	die "Can not init input op" unless $input_op;

	use PDL;
	my $p_data = float(
		-0.4809832, -0.3770838, 0.1743573, 0.7720509, -0.4064746, 0.0116595, 0.0051413, 0.9135732, 0.7197526, -0.0400658, 0.1180671, -0.6829428,
		-0.4810135, -0.3772099, 0.1745346, 0.7719303, -0.4066443, 0.0114614, 0.0051195, 0.9135003, 0.7196983, -0.0400035, 0.1178188, -0.6830465,
		-0.4809143, -0.3773398, 0.1746384, 0.7719052, -0.4067171, 0.0111654, 0.0054433, 0.9134697, 0.7192584, -0.0399981, 0.1177435, -0.6835230,
		-0.4808300, -0.3774327, 0.1748246, 0.7718700, -0.4070232, 0.0109549, 0.0059128, 0.9133330, 0.7188759, -0.0398740, 0.1181437, -0.6838635,
		-0.4807833, -0.3775733, 0.1748378, 0.7718275, -0.4073670, 0.0107582, 0.0062978, 0.9131795, 0.7187147, -0.0394935, 0.1184392, -0.6840039,
	);
	$p_data->reshape(1,5,12);

	my $input_tensor = AI::TensorFlow::Libtensorflow::Tensor->New(
		FLOAT, [ $p_data->dims ], $p_data->get_dataref,
		sub { undef $p_data }
	);


	my $output_op = Output->New({
		oper => $graph->OperationByName( 'output_node0'),
		index => 0 } );
	die "Can not init output op" unless $output_op;

	my $status = AI::TensorFlow::Libtensorflow::Status->New;
	my $options = AI::TensorFlow::Libtensorflow::SessionOptions->New;

t/05_session_run.t  view on Meta::CPAN

		undef,
		undef,
		$status
	);

	die "run failed" unless $status->GetCode == AI::TensorFlow::Libtensorflow::Status::OK;

	my $output_tensor = $output_values[0];
	my $output_pdl = zeros(float,( map $output_tensor->Dim($_), 0..$output_tensor->NumDims-1) );

	memcpy scalar_to_pointer( ${$output_pdl->get_dataref} ),
		scalar_to_pointer( ${$output_tensor->Data} ),
		$output_tensor->ByteSize;
	$output_pdl->upd_data;

	my $expected_pdl = float( -0.409784, -0.302862, 0.0152587, 0.690515 )->transpose;

	ok approx( $output_pdl, $expected_pdl )->all, 'got expected data';

	$session->Close($status);

	is $status->GetCode, AI::TensorFlow::Libtensorflow::Status::OK, 'status ok';
};

done_testing;

t/lib/TF_Utils.pm  view on Meta::CPAN

use FFI::Platypus::Buffer;
use Test2::V0;

my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;

sub ScalarStringTensor {
	my ($str, $status) = @_;
	#my $tensor = AI::TensorFlow::Libtensorflow::Tensor->_Allocate(
		#AI::TensorFlow::Libtensorflow::DType::STRING,
		#\@dims, $ndims,
		#$data_size_bytes,
	#);
	...
}

sub ReadBufferFromFile {
	my ($path) = @_;
	my $data = path($path)->slurp_raw;
	my $buffer = AI::TensorFlow::Libtensorflow::Buffer->NewFromString(
		\$data
	);
}

sub LoadGraph {
	my ($path, $checkpoint_prefix, $status) = @_;
	my $buffer = ReadBufferFromFile( $path );

	$status //= AI::TensorFlow::Libtensorflow::Status->New;

	my $graph = AI::TensorFlow::Libtensorflow::Graph->New;

t/lib/TF_Utils.pm  view on Meta::CPAN

	}
}

sub FloatPDLToTFTensor {
	my ($p) = @_;
	my $pdl_closure = sub {
		my ($pointer, $size, $pdl_addr) = @_;
		# noop
	};

	my $p_dataref = $p->get_dataref;
	my $tensor = AI::TensorFlow::Libtensorflow::Tensor->New(
		FLOAT, [ $p->dims ], $p_dataref, $pdl_closure
	);

	$tensor;
}

sub Placeholder {
	my ($graph, $status, $name, $dtype) = @_;
	$name ||= 'feed';
	$dtype ||= INT32;
	my $desc = AI::TensorFlow::Libtensorflow::OperationDescription->New($graph, 'Placeholder', $name);

t/lib/TF_Utils.pm  view on Meta::CPAN

);

use FFI::Platypus::Buffer qw(scalar_to_pointer);
use FFI::Platypus::Memory qw(memcpy);

sub ScalarConst {
	my ($graph, $status, $name, $dtype, $value) = @_;
	$name ||= 'scalar';
	my $t = AI::TensorFlow::Libtensorflow::Tensor->Allocate($dtype, []);
	die "Pack format for $dtype is unknown" unless exists $dtype_to_pack{$dtype};
	my $data = pack $dtype_to_pack{$dtype} . '*', $value;
	memcpy scalar_to_pointer(${ $t->Data }),
		 scalar_to_pointer($data),
		 $t->ByteSize;
	return Const($graph, $status, $name, $t);
}


use AI::TensorFlow::Libtensorflow::Lib::Types qw(TFOutput TFOutputFromTuple);
use Types::Standard qw(HashRef);

my $TFOutput = TFOutput->plus_constructors(
		HashRef, 'New'

t/lib/TF_Utils.pm  view on Meta::CPAN

	my ($self, $args) = @_;
	my $s = delete $args->{status};
	my $opts = AI::TensorFlow::Libtensorflow::SessionOptions->New;
	$opts->EnableXLACompilation( $self->use_XLA );
	if( ! exists $args->{session} ) {
		$self->session( AI::TensorFlow::Libtensorflow::Session->New( $self->graph, $opts, $s ) );
	}
  }

  sub SetInputs {
	my ($self, @data) = @_;
	my (@inputs, @input_values);
	for my $pair (@data) {
		my ($oper, $t) = @$pair;
		push @inputs, AI::TensorFlow::Libtensorflow::Output->New({ oper => $oper, index => 0 });
		push @input_values, $t;
	}
	$self->_inputs( \@inputs );
	$self->_input_values( \@input_values );
  }

  sub SetOutputs {
	my ($self, @data) = @_;
	my @outputs;
	my @output_values;
	for my $oper (@data) {
		push @outputs, AI::TensorFlow::Libtensorflow::Output->New({ oper => $oper, index => 0 });
	}
	$self->_outputs( \@outputs );
	$self->_output_values( \@output_values );
  }

  sub SetTargets {
	my ($self, @data) = @_;
	$self->_targets( \@data );
  }

  sub Run {
	my ($self, $s) = @_;
	if( @{ $self->_inputs } != @{ $self->_input_values } ) {
		die "Call SetInputs() before Run()";
	}

	$self->session->Run(
		undef,

t/upstream/CAPI/003_Tensor.t  view on Meta::CPAN

	my @dims = (2, 3);

	note "Creating tensor";
	my $deallocator_called = 0;
	my $t = Tensor->New(FLOAT, \@dims, \$values, sub {
			my ($pointer, $size, $arg) = @_;
			$deallocator_called = 1;
			AI::TensorFlow::Libtensorflow::Lib::_Alloc->_tf_aligned_free($pointer);
		});

	# Deallocator can be called on this data already because it might not
	# fit the alignment needed by TF.
	#
	# It should not be called in this case because aligned_alloc() is used.
	ok ! $deallocator_called, 'deallocator not called yet';

	is $t->Type, 'FLOAT', 'FLOAT TF_Tensor';
	is $t->NumDims, 2, '2D TF_Tensor';
	is $t->Dim(0), $dims[0], 'dim 0';
	is $t->Dim(1), $dims[1], 'dim 1';
	is $t->ByteSize, $num_bytes, 'bytes';
	is scalar_to_pointer(${$t->Data}), scalar_to_pointer($values),
		'data at same pointer address';
	undef $t;
	ok $deallocator_called, 'deallocated';
};

done_testing;

t/upstream/CAPI/004_MalformedTensor.t  view on Meta::CPAN

use Test2::V0;
use lib 't/lib';
use TF_TestQuiet;
use aliased 'AI::TensorFlow::Libtensorflow';
use aliased 'AI::TensorFlow::Libtensorflow::Tensor';
use AI::TensorFlow::Libtensorflow::DataType qw(FLOAT);

subtest "(CAPI, MalformedTensor)" => sub {
	my $noop_dealloc = sub {};
	my $t = Tensor->New(FLOAT, [], \undef, $noop_dealloc);
	ok ! defined $t, 'No data passed in so no tensor created';
};

done_testing;



( run in 0.671 second using v1.01-cache-2.11-cpan-8d75d55dd25 )