view release on metacpan or search on metacpan
t/upstream/CAPI/029_SavedModel.t
t/upstream/CAPI/030_SavedModelNullArgsAreValid.t
t/upstream/CAPI/031_DeletingNullPointerIsSafe.t
t/upstream/CAPI/032_TestBitcastFrom_Reshape.t
t/upstream/CAPI/033_TestFromProto.t
t/upstream/CAPI/034_TestTensorAligned.t
t/upstream/CAPI/035_TestTensorIsNotAligned.t
t/upstream/CAPI/036_MessageBufferConversion.t
t/upstream/CAPI/037_TestTensorNonScalarBytesAllocateDelete.t
t/upstream/CAPI/TEMPLATE
t/upstream/tensorflow/cc/saved_model/testdata/half_plus_two/00000123/assets/foo.txt
t/upstream/tensorflow/cc/saved_model/testdata/half_plus_two/00000123/saved_model.pb
t/upstream/tensorflow/cc/saved_model/testdata/half_plus_two/00000123/variables/variables.data-00000-of-00001
t/upstream/tensorflow/cc/saved_model/testdata/half_plus_two/00000123/variables/variables.index
weaver.ini
xt/author/critic.t
xt/author/pod-linkcheck.t
xt/author/pod-snippets.t
lib/AI/TensorFlow/Libtensorflow/Buffer.pm view on Meta::CPAN
=head2 NewFromString
=over 2
C<<<
NewFromString( $proto )
>>>
=back
Makes a copy of the input and sets an appropriate deallocator. Useful for
passing in read-only, input protobufs.
my $data = 'bytes';
my $buffer = Buffer->NewFromString(\$data);
ok $buffer, 'create buffer from string';
is $buffer->length, bytes::length($data), 'same length as string';
B<Parameters>
=over 4
lib/AI/TensorFlow/Libtensorflow/Lib/FFIType/TFPtrSizeScalar.pm view on Meta::CPAN
package AI::TensorFlow::Libtensorflow::Lib::FFIType::TFPtrSizeScalar;
# ABSTRACT: Type to hold pointer and size in a scalar (input only)
$AI::TensorFlow::Libtensorflow::Lib::FFIType::TFPtrSizeScalar::VERSION = '0.0.7';
use strict;
use warnings;
use FFI::Platypus;
use FFI::Platypus::API qw(
arguments_set_pointer
arguments_set_uint32
arguments_set_uint64
);
use FFI::Platypus::Buffer qw( scalar_to_buffer );
my @stack;
*arguments_set_size_t
= FFI::Platypus->new( api => 2 )->sizeof('size_t') == 4
? \&arguments_set_uint32
: \&arguments_set_uint64;
sub perl_to_native {
my($pointer, $size) = scalar_to_buffer($_[0]);
push @stack, [ $pointer, $size ];
arguments_set_pointer $_[1], $pointer;
arguments_set_size_t($_[1]+1, $size);
}
sub perl_to_native_post {
my($pointer, $size) = @{ pop @stack };
();
}
sub ffi_custom_type_api_1
{
{
lib/AI/TensorFlow/Libtensorflow/Lib/FFIType/TFPtrSizeScalarRef.pm view on Meta::CPAN
package AI::TensorFlow::Libtensorflow::Lib::FFIType::TFPtrSizeScalarRef;
# ABSTRACT: Type to hold pointer and size in a scalar reference
$AI::TensorFlow::Libtensorflow::Lib::FFIType::TFPtrSizeScalarRef::VERSION = '0.0.7';
use strict;
use warnings;
use FFI::Platypus::Buffer qw(scalar_to_buffer);
use FFI::Platypus::API qw(
arguments_set_pointer
arguments_set_uint32
arguments_set_uint64
);
my @stack;
# See FFI::Platypus::Type::PointerSizeBuffer
*arguments_set_size_t
= FFI::Platypus->new( api => 2 )->sizeof('size_t') == 4
? \&arguments_set_uint32
: \&arguments_set_uint64;
sub perl_to_native {
my ($value, $i) = @_;
die "Value must be a ScalarRef" unless ref $value eq 'SCALAR';
my ($pointer, $size) = defined $$value
? scalar_to_buffer($$value)
: (0, 0);
push @stack, [ $value, $pointer, $size ];
arguments_set_pointer( $i , $pointer);
arguments_set_size_t( $i+1, $size);
}
sub perl_to_native_post {
pop @stack;
();
}
sub ffi_custom_type_api_1 {
{
'native_type' => 'opaque',
lib/AI/TensorFlow/Libtensorflow/Lib/FFIType/Variant/PackableArrayRef.pm view on Meta::CPAN
package AI::TensorFlow::Libtensorflow::Lib::FFIType::Variant::PackableArrayRef;
# ABSTRACT: ArrayRef to pack()'ed scalar argument with size argument (as int)
$AI::TensorFlow::Libtensorflow::Lib::FFIType::Variant::PackableArrayRef::VERSION = '0.0.7';
use strict;
use warnings;
use FFI::Platypus::Buffer qw(scalar_to_buffer buffer_to_scalar);
use FFI::Platypus::API qw( arguments_set_pointer arguments_set_sint32 );
use Package::Variant;
use Module::Runtime 'module_notional_filename';
sub make_variant {
my ($class, $target_package, $package, %arguments) = @_;
die "Invalid pack type, must be single character"
unless $arguments{pack_type} =~ /^.$/;
lib/AI/TensorFlow/Libtensorflow/Lib/FFIType/Variant/PackableArrayRef.pm view on Meta::CPAN
my $perl_to_native = install perl_to_native => sub {
my ($value, $i) = @_;
die "Value must be an ArrayRef"
unless defined $value && ref $value eq 'ARRAY';
my $data = pack $arguments{pack_type} . '*', @$value;
my $n = scalar @$value;
my ($pointer, $size) = scalar_to_buffer($data);
push @stack, [ \$data, $pointer, $size ];
arguments_set_pointer( $i , $pointer);
arguments_set_sint32( $i+1, $n);
};
my $perl_to_native_post = install perl_to_native_post => sub {
my ($data_ref, $pointer, $size) = @{ pop @stack };
$$data_ref = buffer_to_scalar($pointer, $size);
@{$_[0]} = unpack $arguments{pack_type} . '*', $$data_ref;
();
};
install ffi_custom_type_api_1 => sub {
{
lib/AI/TensorFlow/Libtensorflow/Lib/FFIType/Variant/PackableMaybeArrayRef.pm view on Meta::CPAN
package AI::TensorFlow::Libtensorflow::Lib::FFIType::Variant::PackableMaybeArrayRef;
# ABSTRACT: Maybe[ArrayRef] to pack()'ed scalar argument with size argument (as int) (size is -1 if undef)
$AI::TensorFlow::Libtensorflow::Lib::FFIType::Variant::PackableMaybeArrayRef::VERSION = '0.0.7';
use strict;
use warnings;
use FFI::Platypus::Buffer qw(scalar_to_buffer buffer_to_scalar);
use FFI::Platypus::API qw( arguments_set_pointer arguments_set_sint32 );
use Package::Variant;
use Module::Runtime 'module_notional_filename';
sub make_variant {
my ($class, $target_package, $package, %arguments) = @_;
die "Invalid pack type, must be single character"
unless $arguments{pack_type} =~ /^.$/;
lib/AI/TensorFlow/Libtensorflow/Lib/FFIType/Variant/PackableMaybeArrayRef.pm view on Meta::CPAN
my $perl_to_native = install perl_to_native => sub {
my ($value, $i) = @_;
if( defined $value ) {
die "Value must be an ArrayRef" unless ref $value eq 'ARRAY';
my $data = pack $arguments{pack_type} . '*', @$value;
my $n = scalar @$value;
my ($pointer, $size) = scalar_to_buffer($data);
push @stack, [ \$data, $pointer, $size ];
arguments_set_pointer( $i , $pointer);
arguments_set_sint32( $i+1, $n);
} else {
my $data = undef;
my $n = -1;
my ($pointer, $size) = (0, 0);
push @stack, [ \$data, $pointer, $size ];
arguments_set_pointer( $i , $pointer);
arguments_set_sint32( $i+1, $n);
}
};
my $perl_to_native_post = install perl_to_native_post => sub {
my ($data_ref, $pointer, $size) = @{ pop @stack };
if( ! Scalar::Util::readonly($_[0]) ) {
$$data_ref = buffer_to_scalar($pointer, $size);
@{$_[0]} = unpack $arguments{pack_type} . '*', $$data_ref;
}
();
lib/AI/TensorFlow/Libtensorflow/Lib/FFIType/Variant/RecordArrayRef.pm view on Meta::CPAN
package AI::TensorFlow::Libtensorflow::Lib::FFIType::Variant::RecordArrayRef;
# ABSTRACT: Turn FFI::Platypus::Record into packed array (+ size)?
$AI::TensorFlow::Libtensorflow::Lib::FFIType::Variant::RecordArrayRef::VERSION = '0.0.7';
use strict;
use warnings;
use FFI::Platypus::Buffer qw(scalar_to_buffer buffer_to_scalar);
use FFI::Platypus::API qw( arguments_set_pointer arguments_set_sint32 );
use Package::Variant;
use Module::Runtime qw(module_notional_filename is_module_name);
sub make_variant {
my ($class, $target_package, $package, %arguments) = @_;
die "Missing/invalid module name: $arguments{record_module}"
unless is_module_name($arguments{record_module});
lib/AI/TensorFlow/Libtensorflow/Lib/FFIType/Variant/RecordArrayRef.pm view on Meta::CPAN
my @stack;
my $perl_to_native = install perl_to_native => sub {
my ($value, $i) = @_;
my $data = pack "(a*)*", map $$_, @$value;
my($pointer, $size) = scalar_to_buffer($data);
my $n = @$value;
my $sizeof = $size / $n;
push @stack, [ \$data, $n, $pointer, $size , $sizeof ];
arguments_set_pointer $i , $pointer;
arguments_set_sint32 $i+1, $n if $with_size;
};
my $perl_to_native_post = install perl_to_native_post => sub {
my($data_ref, $n, $pointer, $size, $sizeof) = @{ pop @stack };
$$data_ref = buffer_to_scalar($pointer, $size);
@{$_[0]} = map {
bless \$_, $record_module
} unpack "(a${sizeof})*", $$data_ref;
();
};
lib/AI/TensorFlow/Libtensorflow/Lib/_Alloc.pm view on Meta::CPAN
} else {
# Pure Perl _aligned_alloc()
quote_sub '_aligned_alloc', q{
my ($alignment, $size) = @_;
# $alignment must fit in 8-bits
die "\$alignment must be <= 255" if $alignment > 0xFF;
my $requested_size = $alignment + $size; # size_t
my $ptr = malloc($requested_size); # void*
my $offset = $alignment - $ptr % $alignment; # size_t
my $aligned = $ptr + $offset; # void*
strcpy $aligned - 1, chr($offset);
return $aligned;
};
quote_sub '_aligned_free', q{
my ($aligned) = @_;
my $offset = ord(buffer_to_scalar($aligned - 1, 1));
free( $aligned - $offset );
};
$_ALIGNED_ALLOC_ALIGNMENT_MULTIPLE = 0;
}
use Const::Fast;
# See <https://github.com/tensorflow/tensorflow/issues/58112>.
# This is a power-of-two.
const our $EIGEN_MAX_ALIGN_BYTES => do { _tf_alignment(); };
sub _tf_alignment {
lib/AI/TensorFlow/Libtensorflow/Lib/_Alloc.pm view on Meta::CPAN
my $el = INT8;
my $el_size = $el->Size;
my $max_alignment = $alignments[0];
my $req_size = 2 * $max_alignment + $el_size;
# All data that is sent to TF_NewTensor here is within the block of
# memory allocated at $ptr_base.
my $ptr_base = malloc($req_size);
defer { free($ptr_base); }
# start at offset that is aligned with $max_alignment
my $ptr = $ptr_base + ( $max_alignment - $ptr_base % $max_alignment );
my $create_tensor_at_alignment = sub {
my ($n, $dealloc_called) = @_;
my $offset = $n - $ptr % $n;
my $ptr_offset = $ptr + $offset;
my $space_for_data = $req_size - $offset;
window(my $data, $ptr_offset, $space_for_data);
return AI::TensorFlow::Libtensorflow::Tensor->New(
$el, [int($space_for_data/$el_size)], \$data, sub {
$$dealloc_called = 1
}
);
};
for my $a_idx (0..@alignments-2) {
my @dealloc = (0, 0);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_DeleteGraph(TF_Graph*);
=head2 TF_GraphSetTensorShape
=over 2
Sets the shape of the Tensor referenced by `output` in `graph` to
the shape described by `dims` and `num_dims`.
If the number of dimensions is unknown, `num_dims` must be set to
-1 and `dims` can be null. If a dimension is unknown, the
corresponding entry in the `dims` array must be -1.
This does not overwrite the existing shape associated with `output`,
but merges the input shape with the existing shape. For example,
setting a shape of [-1, 2] with an existing shape [2, -1] would set
a final shape of [2, 2] based on shape merging semantics.
Returns an error into `status` if:
* `output` is not in `graph`.
* An invalid shape is being set (e.g., the shape being set
is incompatible with the existing shape).
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_GraphSetTensorShape(TF_Graph* graph,
TF_Output output,
const int64_t* dims,
const int num_dims,
TF_Status* status);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=head2 TF_GraphGetTensorShape
=over 2
Returns the shape of the Tensor referenced by `output` in `graph`
into `dims`. `dims` must be an array large enough to hold `num_dims`
entries (e.g., the return value of TF_GraphGetTensorNumDims).
If the number of dimensions in the shape is unknown or the shape is
a scalar, `dims` will remain untouched. Otherwise, each element of
`dims` will be set corresponding to the size of the dimension. An
unknown dimension is represented by `-1`.
Returns an error into `status` if:
* `output` is not in `graph`.
* `num_dims` does not match the actual number of dimensions.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_GraphGetTensorShape(TF_Graph* graph,
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_Operation* TF_FinishOperationLocked(
TF_OperationDescription* desc, TF_Status* status);
=head2 TF_FinishOperation
=over 2
If this function succeeds:
* *status is set to an OK value,
* a TF_Operation is added to the graph,
* a non-null value pointing to the added operation is returned --
this value is valid until the underlying graph is deleted.
Otherwise:
* *status is set to a non-OK value,
* the graph is not modified,
* a null value is returned.
In either case, it deletes `desc`.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_Operation* TF_FinishOperation(
TF_OperationDescription* desc, TF_Status* status);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_Output TF_OperationInput(TF_Input oper_in);
=head2 TF_OperationAllInputs
=over 2
Get list of all inputs of a specific operation. `inputs` must point to
an array of length at least `max_inputs` (ideally set to
TF_OperationNumInputs(oper)). Beware that a concurrent
modification of the graph can increase the number of inputs of
an operation.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_OperationAllInputs(TF_Operation* oper,
TF_Output* inputs,
int max_inputs);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern int TF_OperationOutputNumConsumers(TF_Output oper_out);
=head2 TF_OperationOutputConsumers
=over 2
Get list of all current consumers of a specific output of an
operation. `consumers` must point to an array of length at least
`max_consumers` (ideally set to
TF_OperationOutputNumConsumers(oper_out)). Beware that a concurrent
modification of the graph can increase the number of consumers of
an operation. Returns the number of output consumers (should match
TF_OperationOutputNumConsumers(oper_out)).
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern int TF_OperationOutputConsumers(TF_Output oper_out,
TF_Input* consumers,
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern int TF_OperationNumControlInputs(TF_Operation* oper);
=head2 TF_OperationGetControlInputs
=over 2
Get list of all control inputs to an operation. `control_inputs` must
point to an array of length `max_control_inputs` (ideally set to
TF_OperationNumControlInputs(oper)). Returns the number of control
inputs (should match TF_OperationNumControlInputs(oper)).
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern int TF_OperationGetControlInputs(
TF_Operation* oper, TF_Operation** control_inputs, int max_control_inputs);
=head2 TF_OperationNumControlOutputs
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern int TF_OperationNumControlOutputs(TF_Operation* oper);
=head2 TF_OperationGetControlOutputs
=over 2
Get the list of operations that have `*oper` as a control input.
`control_outputs` must point to an array of length at least
`max_control_outputs` (ideally set to
TF_OperationNumControlOutputs(oper)). Beware that a concurrent
modification of the graph can increase the number of control
outputs. Returns the number of control outputs (should match
TF_OperationNumControlOutputs(oper)).
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern int TF_OperationGetControlOutputs(
TF_Operation* oper, TF_Operation** control_outputs,
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_AttrMetadata TF_OperationGetAttrMetadata(
TF_Operation* oper, const char* attr_name, TF_Status* status);
=head2 TF_OperationGetAttrString
=over 2
Fills in `value` with the value of the attribute `attr_name`. `value` must
point to an array of length at least `max_length` (ideally set to
TF_AttrMetadata.total_size from TF_OperationGetAttrMetadata(oper,
attr_name)).
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_OperationGetAttrString(TF_Operation* oper,
const char* attr_name,
void* value,
size_t max_length,
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=head2 TF_OperationGetAttrStringList
=over 2
Get the list of strings in the value of the attribute `attr_name`. Fills in
`values` and `lengths`, each of which must point to an array of length at
least `max_values`.
The elements of values will point to addresses in `storage` which must be at
least `storage_size` bytes in length. Ideally, max_values would be set to
TF_AttrMetadata.list_size and `storage` would be at least
TF_AttrMetadata.total_size, obtained from TF_OperationGetAttrMetadata(oper,
attr_name).
Fails if storage_size is too small to hold the requested number of strings.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_OperationGetAttrStringList(
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_CAPI_EXPORT extern void TF_OperationGetAttrInt(TF_Operation* oper,
const char* attr_name,
int64_t* value,
TF_Status* status);
=head2 TF_OperationGetAttrIntList
=over 2
Fills in `values` with the value of the attribute `attr_name` of `oper`.
`values` must point to an array of length at least `max_values` (ideally set
TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper,
attr_name)).
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_OperationGetAttrIntList(TF_Operation* oper,
const char* attr_name,
int64_t* values,
int max_values,
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_CAPI_EXPORT extern void TF_OperationGetAttrFloat(TF_Operation* oper,
const char* attr_name,
float* value,
TF_Status* status);
=head2 TF_OperationGetAttrFloatList
=over 2
Fills in `values` with the value of the attribute `attr_name` of `oper`.
`values` must point to an array of length at least `max_values` (ideally set
to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper,
attr_name)).
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_OperationGetAttrFloatList(TF_Operation* oper,
const char* attr_name,
float* values,
int max_values,
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_CAPI_EXPORT extern void TF_OperationGetAttrBool(TF_Operation* oper,
const char* attr_name,
unsigned char* value,
TF_Status* status);
=head2 TF_OperationGetAttrBoolList
=over 2
Fills in `values` with the value of the attribute `attr_name` of `oper`.
`values` must point to an array of length at least `max_values` (ideally set
to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper,
attr_name)).
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_OperationGetAttrBoolList(TF_Operation* oper,
const char* attr_name,
unsigned char* values,
int max_values,
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_CAPI_EXPORT extern void TF_OperationGetAttrType(TF_Operation* oper,
const char* attr_name,
TF_DataType* value,
TF_Status* status);
=head2 TF_OperationGetAttrTypeList
=over 2
Fills in `values` with the value of the attribute `attr_name` of `oper`.
`values` must point to an array of length at least `max_values` (ideally set
to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper,
attr_name)).
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_OperationGetAttrTypeList(TF_Operation* oper,
const char* attr_name,
TF_DataType* values,
int max_values,
TF_Status* status);
=head2 TF_OperationGetAttrShape
=over 2
Fills in `value` with the value of the attribute `attr_name` of `oper`.
`values` must point to an array of length at least `num_dims` (ideally set to
TF_Attr_Meta.size from TF_OperationGetAttrMetadata(oper, attr_name)).
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_OperationGetAttrShape(TF_Operation* oper,
const char* attr_name,
int64_t* value,
int num_dims,
TF_Status* status);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=over 2
Fills in `dims` with the list of shapes in the attribute `attr_name` of
`oper` and `num_dims` with the corresponding number of dimensions. On return,
for every i where `num_dims[i]` > 0, `dims[i]` will be an array of
`num_dims[i]` elements. A value of -1 for `num_dims[i]` indicates that the
i-th shape in the list is unknown.
The elements of `dims` will point to addresses in `storage` which must be
large enough to hold at least `storage_size` int64_ts. Ideally, `num_shapes`
would be set to TF_AttrMetadata.list_size and `storage_size` would be set to
TF_AttrMetadata.total_size from TF_OperationGetAttrMetadata(oper,
attr_name).
Fails if storage_size is insufficient to hold the requested shapes.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_OperationGetAttrShapeList(
TF_Operation* oper, const char* attr_name, int64_t** dims, int* num_dims,
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_CAPI_EXPORT extern void TF_OperationGetAttrTensorShapeProto(
TF_Operation* oper, const char* attr_name, TF_Buffer* value,
TF_Status* status);
=head2 TF_OperationGetAttrTensorShapeProtoList
=over 2
Fills in `values` with binary-serialized TensorShapeProto values of the
attribute `attr_name` of `oper`. `values` must point to an array of length at
least `num_values` (ideally set to TF_AttrMetadata.list_size from
TF_OperationGetAttrMetadata(oper, attr_name)).
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_OperationGetAttrTensorShapeProtoList(
TF_Operation* oper, const char* attr_name, TF_Buffer** values,
int max_values, TF_Status* status);
=head2 TF_OperationGetAttrTensor
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
const char* attr_name,
TF_Tensor** value,
TF_Status* status);
=head2 TF_OperationGetAttrTensorList
=over 2
Fills in `values` with the TF_Tensor values of the attribute `attr_name` of
`oper`. `values` must point to an array of TF_Tensor* of length at least
`max_values` (ideally set to TF_AttrMetadata.list_size from
TF_OperationGetAttrMetadata(oper, attr_name)).
The caller takes ownership of all the non-null TF_Tensor* entries in `values`
(which can be deleted using TF_DeleteTensor(values[i])).
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_OperationGetAttrTensorList(TF_Operation* oper,
const char* attr_name,
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsSetDefaultDevice(
TF_ImportGraphDefOptions* opts, const char* device);
=head2 TF_ImportGraphDefOptionsSetUniquifyNames
=over 2
Set whether to uniquify imported operation names. If true, imported operation
names will be modified if their name already exists in the graph. If false,
conflicting names will be treated as an error. Note that this option has no
effect if a prefix is set, since the prefix will guarantee all names are
unique. Defaults to false.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsSetUniquifyNames(
TF_ImportGraphDefOptions* opts, unsigned char uniquify_names);
=head2 TF_ImportGraphDefOptionsSetUniquifyPrefix
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
between them if `func` does not already have a gradient. If `func` already
has a gradient different from `grad`, an error is returned.
`func` must not be null.
If `grad` is null and `func` is not in `g`, `func` is added without a
gradient.
If `grad` is null and `func` is in `g`, TF_GraphCopyFunction is a noop.
`grad` must have appropriate signature as described in the doc of
GradientDef in tensorflow/core/framework/function.proto.
If successful, status is set to OK and `func` and `grad` are added to `g`.
Otherwise, status is set to the encountered error and `g` is unmodified.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_GraphCopyFunction(TF_Graph* g,
const TF_Function* func,
const TF_Function* grad,
TF_Status* status);
=head2 TF_GraphNumFunctions
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern int TF_GraphNumFunctions(TF_Graph* g);
=head2 TF_GraphGetFunctions
=over 2
Fills in `funcs` with the TF_Function* registered in `g`.
`funcs` must point to an array of TF_Function* of length at least
`max_func`. In usual usage, max_func should be set to the result of
TF_GraphNumFunctions(g). In this case, all the functions registered in
`g` will be returned. Else, an unspecified subset.
If successful, returns the number of TF_Function* successfully set in
`funcs` and sets status to OK. The caller takes ownership of
all the returned TF_Functions. They must be deleted with TF_DeleteFunction.
On error, returns 0, sets status to the encountered error, and the contents
of funcs will be undefined.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern int TF_GraphGetFunctions(TF_Graph* g, TF_Function** funcs,
int max_func, TF_Status* status);
=head2 TF_OperationToNodeDef
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_Output* dx, TF_Status* status,
TF_Output* dy);
=head2 TF_GraphToFunction
=over 2
Create a TF_Function from a TF_Graph
Params:
fn_body - the graph whose operations (or subset of whose operations) will be
converted to TF_Function.
fn_name - the name of the new TF_Function. Should match the operation
name (OpDef.name) regexp [A-Z][A-Za-z0-9_.\\-/]*.
If `append_hash_to_fn_name` is false, `fn_name` must be distinct
from other function and operation names (at least those
registered in graphs where this function will be used).
append_hash_to_fn_name - Must be 0 or 1. If set to 1, the actual name
of the function will be `fn_name` appended with
'_<hash_of_this_function's_definition>'.
If set to 0, the function's name will be `fn_name`.
num_opers - `num_opers` contains the number of elements in the `opers` array
or a special value of -1 meaning that no array is given.
The distinction between an empty array of operations and no
array of operations is necessary to distinguish the case of
creating a function with no body (e.g. identity or permutation)
and the case of creating a function whose body contains all
the nodes in the graph (except for the automatic skipping, see
below).
opers - Array of operations to become the body of the function or null.
- If no array is given (`num_opers` = -1), all the
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_Function* TF_FunctionImportFunctionDef(
const void* proto, size_t proto_len, TF_Status* status);
=head2 TF_FunctionSetAttrValueProto
=over 2
Sets function attribute named `attr_name` to value stored in `proto`.
If this attribute is already set to another value, it is overridden.
`proto` should point to a sequence of bytes of length `proto_len`
representing a binary serialization of an AttrValue protocol
buffer.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_FunctionSetAttrValueProto(TF_Function* func,
const char* attr_name,
const void* proto,
size_t proto_len,
TF_Status* status);
=head2 TF_FunctionGetAttrValueProto
=over 2
Sets `output_attr_value` to the binary-serialized AttrValue proto
representation of the value of the `attr_name` attr of `func`.
If `attr_name` attribute is not present, status is set to an error.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_FunctionGetAttrValueProto(
TF_Function* func, const char* attr_name, TF_Buffer* output_attr_value,
TF_Status* status);
=head2 TF_DeleteFunction
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=over 2
Attempts to evaluate `output`. This will only be possible if `output` doesn't
depend on any graph inputs (this function is safe to call if this isn't the
case though).
If the evaluation is successful, this function returns true and `output`s
value is returned in `result`. Otherwise returns false. An error status is
returned if something is wrong with the graph or input. Note that this may
return false even if no error status is set.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern unsigned char TF_TryEvaluateConstant(TF_Graph* graph,
TF_Output output,
TF_Tensor** result,
TF_Status* status);
=head2 TF_NewSession
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_CAPI_EXPORT extern TF_Session* TF_NewSession(TF_Graph* graph,
const TF_SessionOptions* opts,
TF_Status* status);
=head2 TF_LoadSessionFromSavedModel
=over 2
This function creates a new TF_Session (which is created on success) using
`session_options`, and then initializes state (restoring tensors and other
assets) using `run_options`.
Any NULL and non-NULL value combinations for (`run_options, `meta_graph_def`)
are valid.
- `export_dir` must be set to the path of the exported SavedModel.
- `tags` must include the set of tags used to identify one MetaGraphDef in
the SavedModel.
- `graph` must be a graph newly allocated with TF_NewGraph().
If successful, populates `graph` with the contents of the Graph and
`meta_graph_def` with the MetaGraphDef of the loaded model.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_Session* TF_LoadSessionFromSavedModel(
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=over 2
Set up the graph with the intended feeds (inputs) and fetches (outputs) for a
sequence of partial run calls.
On success, returns a handle that is used for subsequent PRun calls. The
handle should be deleted with TF_DeletePRunHandle when it is no longer
needed.
On failure, out_status contains a tensorflow::Status with an error
message. *handle is set to nullptr.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_SessionPRunSetup(
TF_Session*,
// Input names
const TF_Output* inputs, int ninputs,
// Output names
const TF_Output* outputs, int noutputs,
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=head2 TF_DeleteDeprecatedSession
=over 2
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_DeleteDeprecatedSession(TF_DeprecatedSession*,
TF_Status* status);
=head2 TF_Reset
=over 2
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_Reset(const TF_SessionOptions* opt,
const char** containers, int ncontainers,
TF_Status* status);
=head2 TF_ExtendGraph
=over 2
Treat the bytes proto[0,proto_len-1] as a serialized GraphDef and
add the nodes in that GraphDef to the graph for the session.
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=head2 TF_DeviceListName
=over 2
Retrieves the full name of the device (e.g. /job:worker/replica:0/...)
The return value will be a pointer to a null terminated string. The caller
must not modify or delete the string. It will be deallocated upon a call to
TF_DeleteDeviceList.
If index is out of bounds, an error code will be set in the status object,
and a null pointer will be returned.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern const char* TF_DeviceListName(const TF_DeviceList* list,
int index,
TF_Status* status);
=head2 TF_DeviceListType
=over 2
Retrieves the type of the device at the given index.
The caller must not modify or delete the string. It will be deallocated upon
a call to TF_DeleteDeviceList.
If index is out of bounds, an error code will be set in the status object,
and a null pointer will be returned.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern const char* TF_DeviceListType(const TF_DeviceList* list,
int index,
TF_Status* status);
=head2 TF_DeviceListMemoryBytes
=over 2
Retrieve the amount of memory associated with a given device.
If index is out of bounds, an error code will be set in the status object,
and -1 will be returned.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern int64_t TF_DeviceListMemoryBytes(
const TF_DeviceList* list, int index, TF_Status* status);
=head2 TF_DeviceListIncarnation
=over 2
Retrieve the incarnation number of a given device.
If index is out of bounds, an error code will be set in the status object,
and 0 will be returned.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern uint64_t TF_DeviceListIncarnation(
const TF_DeviceList* list, int index, TF_Status* status);
=head2 TF_LoadLibrary
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=over 2
Allocate and return a new Tensor.
This function is an alternative to TF_NewTensor and should be used when
memory is allocated to pass the Tensor to the C API. The allocated memory
satisfies TensorFlow's memory alignment preferences and should be preferred
over calling malloc and free.
The caller must set the Tensor values by writing them to the pointer returned
by TF_TensorData with length TF_TensorByteSize.
=back
/* From <tensorflow/c/tf_tensor.h> */
TF_CAPI_EXPORT extern TF_Tensor* TF_AllocateTensor(TF_DataType,
const int64_t* dims,
int num_dims, size_t len);
=head2 TF_TensorMaybeMove
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/tf_tensor.h> */
TF_CAPI_EXPORT extern int64_t TF_TensorElementCount(const TF_Tensor* tensor);
=head2 TF_TensorBitcastFrom
=over 2
Copy the internal data representation of `from` to `to`. `new_dims` and
`num_new_dims` specify the new shape of the `to` tensor, `type` specifies its
data type. On success, *status is set to TF_OK and the two tensors share the
same data buffer.
This call requires that the `from` tensor and the given type and shape (dims
and num_dims) are "compatible" (i.e. they occupy the same number of bytes).
Specifically, given from_type_size = TF_DataTypeSize(TF_TensorType(from)):
ShapeElementCount(dims, num_dims) * TF_DataTypeSize(type)
must equal
TF_TensorElementCount(from) * from_type_size
where TF_ShapeElementCount would be the number of elements in a tensor with
the given shape.
In addition, this function requires:
* TF_DataTypeSize(TF_TensorType(from)) != 0
* TF_DataTypeSize(type) != 0
If any of the requirements are not met, *status is set to
TF_INVALID_ARGUMENT.
=back
/* From <tensorflow/c/tf_tensor.h> */
TF_CAPI_EXPORT extern void TF_TensorBitcastFrom(const TF_Tensor* from,
TF_DataType type, TF_Tensor* to,
const int64_t* new_dims,
int num_new_dims,
TF_Status* status);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=back
/* From <tensorflow/c/tf_status.h> */
TF_CAPI_EXPORT extern const char* TF_Message(const TF_Status* s);
=head2 TF_NewBufferFromString
=over 2
Makes a copy of the input and sets an appropriate deallocator. Useful for
passing in read-only, input protobufs.
=back
/* From <tensorflow/c/tf_buffer.h> */
TF_CAPI_EXPORT extern TF_Buffer* TF_NewBufferFromString(const void* proto,
size_t proto_len);
=head2 TF_NewBuffer
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_OpDefinitionBuilder* builder, bool is_stateful);
=head2 TF_OpDefinitionBuilderSetAllowsUninitializedInput
=over 2
Sets the allows_uninitialized_input property of the operation built by this
builder.
By default, all inputs to an Op must be initialized Tensors. Ops that may
initialize tensors for the first time should set this field to true, to allow
the Op to take an uninitialized Tensor as input.
=back
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern void TF_OpDefinitionBuilderSetAllowsUninitializedInput(
TF_OpDefinitionBuilder* builder, bool allows_uninitialized_input);
=head2 TF_OpDefinitionBuilderDeprecated
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=back
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern TF_DimensionHandle* TF_NewDimensionHandle();
=head2 TF_ShapeInferenceContext_GetAttrType
=over 2
Interprets the named shape inference context attribute as a TF_DataType and
places it into *val. *status is set to TF_OK.
If the attribute could not be found or could not be interpreted as
TF_DataType, *status is populated with an error.
=back
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern void TF_ShapeInferenceContext_GetAttrType(
TF_ShapeInferenceContext* ctx, const char* attr_name, TF_DataType* val,
TF_Status* status);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_CAPI_EXPORT extern void TF_ShapeInferenceContextDim(
TF_ShapeInferenceContext* ctx, TF_ShapeHandle* shape_handle, int64_t i,
TF_DimensionHandle* result);
=head2 TF_ShapeInferenceContextSubshape
=over 2
Returns in <*result> a sub-shape of <shape_handle>, with dimensions
[start:end]. <start> and <end> can be negative, to index from the end of the
shape. <start> and <end> are set to the rank of <shape_handle> if > rank of
<shape_handle>.
=back
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern void TF_ShapeInferenceContextSubshape(
TF_ShapeInferenceContext* ctx, TF_ShapeHandle* shape_handle, int64_t start,
int64_t end, TF_ShapeHandle* result, TF_Status* status);
=head2 TF_ShapeInferenceContextSetUnknownShape
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=head2 TF_DeleteRecursively
=over 2
Deletes the specified directory and all subdirectories and files underneath
it. This is accomplished by traversing the directory tree rooted at dirname
and deleting entries as they are encountered.
If dirname itself is not readable or does not exist, *undeleted_dir_count is
set to 1, *undeleted_file_count is set to 0 and an appropriate status (e.g.
TF_NOT_FOUND) is returned.
If dirname and all its descendants were successfully deleted, TF_OK is
returned and both error counters are set to zero.
Otherwise, while traversing the tree, undeleted_file_count and
undeleted_dir_count are updated if an entry of the corresponding type could
not be deleted. The returned error status represents the reason that any one
of these entries could not be deleted.
Typical status codes:
* TF_OK - dirname exists and we were able to delete everything underneath
* TF_NOT_FOUND - dirname doesn't exist
* TF_PERMISSION_DENIED - dirname or some descendant is not writable
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/env.h> */
TF_CAPI_EXPORT extern void TF_DeleteFile(const char* filename,
TF_Status* status);
=head2 TF_StringStreamNext
=over 2
Retrieves the next item from the given TF_StringStream and places a pointer
to it in *result. If no more items are in the list, *result is set to NULL
and false is returned.
Ownership of the items retrieved with this function remains with the library.
Item points are invalidated after a call to TF_StringStreamDone.
=back
/* From <tensorflow/c/env.h> */
TF_CAPI_EXPORT extern bool TF_StringStreamNext(TF_StringStream* list,
const char** result);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=over 2
\brief Load a dynamic library.
Pass "library_filename" to a platform-specific mechanism for dynamically
loading a library. The rules for determining the exact location of the
library are platform-specific and are not documented here.
On success, place OK in status and return the newly created library handle.
Otherwise returns nullptr and set error status.
=back
/* From <tensorflow/c/env.h> */
TF_CAPI_EXPORT extern void* TF_LoadSharedLibrary(const char* library_filename,
TF_Status* status);
=head2 TF_GetSymbolFromLibrary
=over 2
\brief Get a pointer to a symbol from a dynamic library.
"handle" should be a pointer returned from a previous call to
TF_LoadLibraryFromEnv. On success, place OK in status and return a pointer to
the located symbol. Otherwise returns nullptr and set error status.
=back
/* From <tensorflow/c/env.h> */
TF_CAPI_EXPORT extern void* TF_GetSymbolFromLibrary(void* handle,
const char* symbol_name,
TF_Status* status);
=head2 TF_Log
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_CAPI_EXPORT extern void TF_DeleteKernelBuilder(TF_KernelBuilder* builder);
=head2 TF_GetStream
=over 2
TF_GetStream returns the SP_Stream available in ctx.
This function returns a stream only for devices registered using the
StreamExecutor C API
(tensorflow/c/experimental/stream_executor/stream_executor.h). It will return
nullptr and set error status in all other cases.
Experimental: this function doesn't have compatibility guarantees and subject
to change at any time.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern SP_Stream TF_GetStream(TF_OpKernelContext* ctx,
TF_Status* status);
=head2 TF_NumInputs
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_CAPI_EXPORT extern int TF_NumOutputs(TF_OpKernelContext* ctx);
=head2 TF_GetInput
=over 2
Retrieves the ith input from ctx. If TF_GetCode(status) is TF_OK, *tensor is
populated and its ownership is passed to the caller. In any other case,
*tensor is not modified.
If i < 0 or i >= TF_NumInputs(ctx), *status is set to TF_OUT_OF_RANGE.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_GetInput(TF_OpKernelContext* ctx, int i,
TF_Tensor** tensor, TF_Status* status);
=head2 TF_InputRange
=over 2
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
const char* name,
TF_InputRange_Args* args);
=head2 TF_SetOutput
=over 2
Sets the ith output of ctx to tensor. If TF_GetCode(status) is anything but
TF_OK, ctx is left unmodified.
If i < 0 or i >= TF_NumOutputs(ctx), *status is set to TF_OUT_OF_RANGE.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_SetOutput(TF_OpKernelContext* ctx, int i,
const TF_Tensor* tensor,
TF_Status* status);
=head2 TF_GetMutableOutput
=over 2
Retrieves the ith output from ctx. If TF_GetCode(status) is TF_OK, *tensor is
populated and its ownership is passed to the caller. In any other case,
*tensor is not modified.
If i < 0 or i >= TF_NumOutputs(ctx), *status is set to TF_OUT_OF_RANGE.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern TF_Tensor* TF_GetMutableOutput(TF_OpKernelContext* ctx,
int i, TF_Status* status);
=head2 TF_GetSerializedFunctionDefLibrary
=over 2
Retrieves a serialized FunctionDefLibrary. Status will be set.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_GetSerializedFunctionDefLibrary(
TF_OpKernelContext* ctx, TF_Buffer* serialized_function_def_library,
TF_Status* status);
=head2 TF_GetSerializedConfigProto
=over 2
Retrieves a serialized ConfigProto. Status will be set.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_GetSerializedConfigProto(
TF_OpKernelContext* ctx, TF_Buffer* serialized_config_proto,
TF_Status* status);
=head2 TF_OpKernelConstruction_Failure
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrSize(
TF_OpKernelConstruction* ctx, const char* attr_name, int32_t* list_size,
int32_t* total_size, TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrType
=over 2
Interprets the named kernel construction attribute as a TF_DataType and
places it into *val. *status is set to TF_OK.
If the attribute could not be found or could not be interpreted as
TF_DataType, *status is populated with an error.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrType(
TF_OpKernelConstruction* ctx, const char* attr_name, TF_DataType* val,
TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrInt32
=over 2
Interprets the named kernel construction attribute as int32_t and
places it into *val. *status is set to TF_OK.
If the attribute could not be found or could not be interpreted as
int32, *status is populated with an error.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrInt32(
TF_OpKernelConstruction* ctx, const char* attr_name, int32_t* val,
TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrInt64
=over 2
Interprets the named kernel construction attribute as int64_t and
places it into *val. *status is set to TF_OK.
If the attribute could not be found or could not be interpreted as
int64, *status is populated with an error.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrInt64(
TF_OpKernelConstruction* ctx, const char* attr_name, int64_t* val,
TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrFloat
=over 2
Interprets the named kernel construction attribute as float and
places it into *val. *status is set to TF_OK.
If the attribute could not be found or could not be interpreted as
float, *status is populated with an error.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrFloat(
TF_OpKernelConstruction* ctx, const char* attr_name, float* val,
TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrBool
=over 2
Interprets the named kernel construction attribute as bool and
places it into *val. *status is set to TF_OK.
If the attribute could not be found or could not be interpreted as
bool, *status is populated with an error.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrBool(
TF_OpKernelConstruction* ctx, const char* attr_name, TF_Bool* val,
TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrString
=over 2
Interprets the named kernel construction attribute as string and
places it into *val. `val` must
point to an array of length at least `max_length` (ideally set to
total_size from TF_OpKernelConstruction_GetAttrSize(ctx,
attr_name, list_size, total_size)). *status is set to TF_OK.
If the attribute could not be found or could not be interpreted as
string, *status is populated with an error.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrString(
TF_OpKernelConstruction* ctx, const char* attr_name, char* val,
size_t max_length, TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrTensor
=over 2
Interprets the named kernel construction attribute as tensor and places it
into *val. Allocates a new TF_Tensor which the caller is expected to take
ownership of (and can deallocate using TF_DeleteTensor). *status is set to
TF_OK.
If the attribute could not be found or could not be interpreted as
tensor, *status is populated with an error.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrTensor(
TF_OpKernelConstruction* ctx, const char* attr_name, TF_Tensor** val,
TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrTypeList
=over 2
Interprets the named kernel construction attribute as a TF_DataType array and
places it into *vals. *status is set to TF_OK.
`vals` must point to an array of length at least `max_values` (ideally set
to list_size from
TF_OpKernelConstruction_GetAttrSize(ctx, attr_name, list_size,
total_size)).
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrTypeList(
TF_OpKernelConstruction* ctx, const char* attr_name, TF_DataType* vals,
int max_vals, TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrInt32List
=over 2
Interprets the named kernel construction attribute as int32_t array and
places it into *vals. *status is set to TF_OK.
`vals` must point to an array of length at least `max_values` (ideally set
to list_size from
TF_OpKernelConstruction_GetAttrSize(ctx, attr_name, list_size,
total_size)).
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrInt32List(
TF_OpKernelConstruction* ctx, const char* attr_name, int32_t* vals,
int max_vals, TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrInt64List
=over 2
Interprets the named kernel construction attribute as int64_t array and
places it into *vals. *status is set to TF_OK.
`vals` must point to an array of length at least `max_values` (ideally set
to list_size from
TF_OpKernelConstruction_GetAttrSize(ctx, attr_name, list_size,
total_size)).
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrInt64List(
TF_OpKernelConstruction* ctx, const char* attr_name, int64_t* vals,
int max_vals, TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrFloatList
=over 2
Interprets the named kernel construction attribute as float array and
places it into *vals. *status is set to TF_OK.
`vals` must point to an array of length at least `max_values` (ideally set
to list_size from
TF_OpKernelConstruction_GetAttrSize(ctx, attr_name, list_size,
total_size)).
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrFloatList(
TF_OpKernelConstruction* ctx, const char* attr_name, float* vals,
int max_vals, TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrBoolList
=over 2
Interprets the named kernel construction attribute as bool array and
places it into *vals. *status is set to TF_OK.
`vals` must point to an array of length at least `max_values` (ideally set
to list_size from
TF_OpKernelConstruction_GetAttrSize(ctx, attr_name, list_size,
total_size)).
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrBoolList(
TF_OpKernelConstruction* ctx, const char* attr_name, TF_Bool* vals,
int max_vals, TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrStringList
=over 2
Interprets the named kernel construction attribute as string array and fills
in `vals` and `lengths`, each of which must point to an array of length at
least `max_values`. *status is set to TF_OK. The elements of values will
point to addresses in `storage` which must be at least `storage_size` bytes
in length. Ideally, max_values would be set to list_size and `storage` would
be at least total_size, obtained from
TF_OpKernelConstruction_GetAttrSize(ctx, attr_name, list_size,
total_size).
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrStringList(
TF_OpKernelConstruction* ctx, const char* attr_name, char** vals,
size_t* lengths, int max_values, void* storage, size_t storage_size,
TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrTensorList
=over 2
Interprets the named kernel construction attribute as tensor array and places
it into *vals. *status is set to TF_OK.
`vals` must point to an array of length at least `max_values`
(ideally set to list_size from TF_OpKernelConstruction_GetAttrSize(ctx,
attr_name, list_size, total_size)).
The caller takes ownership of all the non-null TF_Tensor* entries in `vals`
(which can be deleted using TF_DeleteTensor(vals[i])).
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrTensorList(
TF_OpKernelConstruction* ctx, const char* attr_name, TF_Tensor** vals,
int max_values, TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrFunction
=over 2
Interprets the named kernel construction attribute as a
tensorflow::NameAttrList and returns the serialized proto as TF_Buffer.
`status` will be set. The caller takes ownership of the returned TF_Buffer
(if not null) and is responsible for managing its lifetime.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern TF_Buffer* TF_OpKernelConstruction_GetAttrFunction(
TF_OpKernelConstruction* ctx, const char* attr_name, TF_Status* status);
=head2 TF_OpKernelConstruction_HasAttr
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=over 2
Expose higher level Assignment operation for Pluggable vendors to implement
in the plugin for Training. The API takes in the context with indices for
the input and value tensors. It also accepts the copy callback provided by
pluggable vendor to do the copying of the tensors. The caller takes ownership
of the `source` and `dest` tensors and is responsible for freeing them with
TF_DeleteTensor. This function will return an error when the following
conditions are met:
1. `validate_shape` is set to `true`
2. The variable is initialized
3. The shape of the value tensor doesn't match the shape of the variable
tensor.
=back
/* From <tensorflow/c/kernels_experimental.h> */
TF_CAPI_EXPORT extern void TF_AssignVariable(
TF_OpKernelContext* ctx, int input_index, int value_index,
bool validate_shape,
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
const char* inputName,
TF_Tensor** tensor,
TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrTensorShape
=over 2
Interprets the named kernel construction attribute as a shape attribute and
fills in `vals` with the size of each dimension. `vals` must point to an
array of length at least `max_values` (ideally set to total_size from
TF_OpKernelConstruction_GetAttrSize(ctx, attr_name, &list_size,
&total_size)).
=back
/* From <tensorflow/c/kernels_experimental.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrTensorShape(
TF_OpKernelConstruction* ctx, const char* attr_name, int64_t* dims,
size_t num_dims, TF_Status* status);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern TFE_ContextDevicePlacementPolicy
TFE_ContextGetDevicePlacementPolicy(TFE_Context* ctx);
=head2 TFE_ContextSetServerDef
=over 2
A tensorflow.ServerDef specifies remote workers (in addition to the current
workers name). Operations created in this context can then be executed on
any of these remote workers by setting an appropriate device.
If the following is set, all servers identified by the
ServerDef must be up when the context is created.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern void TFE_ContextSetServerDef(TFE_Context* ctx,
int keep_alive_secs,
const void* proto,
size_t proto_len,
TF_Status* status);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern const char* TFE_TensorHandleBackingDeviceName(
TFE_TensorHandle* h, TF_Status* status);
=head2 TFE_TensorHandleCopySharingTensor
=over 2
Return a pointer to a new TFE_TensorHandle that shares the underlying tensor
with `h`. On success, `status` is set to OK. On failure, `status` reflects
the error and a nullptr is returned.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_TensorHandleCopySharingTensor(
TFE_TensorHandle* h, TF_Status* status);
=head2 TFE_TensorHandleResolve
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_TensorHandleCopyToDevice(
TFE_TensorHandle* h, TFE_Context* ctx, const char* device_name,
TF_Status* status);
=head2 TFE_TensorHandleTensorDebugInfo
=over 2
Retrieves TFE_TensorDebugInfo for `handle`.
If TFE_TensorHandleTensorDebugInfo succeeds, `status` is set to OK and caller
is responsible for deleting returned TFE_TensorDebugInfo.
If TFE_TensorHandleTensorDebugInfo fails, `status` is set to appropriate
error and nullptr is returned. This function can block till the operation
that produces `handle` has completed.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern TFE_TensorDebugInfo* TFE_TensorHandleTensorDebugInfo(
TFE_TensorHandle* h, TF_Status* status);
=head2 TFE_DeleteTensorDebugInfo
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern void TFE_OpSetAttrType(TFE_Op* op, const char* attr_name,
TF_DataType value);
=head2 TFE_OpSetAttrShape
=over 2
If the number of dimensions is unknown, `num_dims` must be set to
-1 and `dims` can be null. If a dimension is unknown, the
corresponding entry in the `dims` array must be -1.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern void TFE_OpSetAttrShape(TFE_Op* op, const char* attr_name,
const int64_t* dims,
const int num_dims,
TF_Status* out_status);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_Status* status);
=head2 TFE_Execute
=over 2
Execute the operation defined by 'op' and return handles to computed
tensors in `retvals`.
'retvals' must point to a pre-allocated array of TFE_TensorHandle* and
'*num_retvals' should be set to the size of this array. It is an error if
the size of 'retvals' is less than the number of outputs. This call sets
*num_retvals to the number of outputs.
If async execution is enabled, the call may simply enqueue the execution
and return "non-ready" handles in `retvals`. Note that any handles contained
in 'op' should not be mutated till the kernel execution actually finishes.
For sync execution, if any of the inputs to `op` are not ready, this call
will block till they become ready and then return when the kernel execution
is done.
TODO(agarwal): change num_retvals to int from int*.
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern void TFE_ContextExportRunMetadata(TFE_Context* ctx,
TF_Buffer* buf,
TF_Status* status);
=head2 TFE_ContextStartStep
=over 2
Some TF ops need a step container to be set to limit the lifetime of some
resources (mostly TensorArray and Stack, used in while loop gradients in
graph mode). Calling this on a context tells it to start a step.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern void TFE_ContextStartStep(TFE_Context* ctx);
=head2 TFE_ContextEndStep
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=over 2
Calls the destructor of DLManagedTensor, used in the destructor of PyCapsule.
=back
/* From <tensorflow/c/eager/dlpack.h> */
TF_CAPI_EXPORT extern void TFE_CallDLManagedTensorDeleter(void* dlm_ptr);
=head2 TFE_OpReset
=over 2
Resets `op_to_reset` with `op_or_function_name` and `raw_device_name`. This
is for performance optimization by reusing an exiting unused op rather than
creating a new op every time. If `raw_device_name` is `NULL` or empty, it
does not set the device name. If it's not `NULL`, then it attempts to parse
and set the device name. It's effectively `TFE_OpSetDevice`, but it is faster
than separately calling it because if the existing op has the same
`raw_device_name`, it skips parsing and just leave as it is.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_OpReset(TFE_Op* op_to_reset,
const char* op_or_function_name,
const char* raw_device_name,
TF_Status* status);
=head2 TFE_ContextEnableGraphCollection
=over 2
Enables only graph collection in RunMetadata on the functions executed from
this context.
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern TFE_MonitoringCounterCell* TFE_MonitoringGetCellCounter2(
TFE_MonitoringCounter2* counter, const char* label1, const char* label2);
=head2 TFE_MonitoringIntGaugeCellSet
=over 2
Atomically set the value of the cell.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_MonitoringIntGaugeCellSet(
TFE_MonitoringIntGaugeCell* cell, int64_t value);
=head2 TFE_MonitoringIntGaugeCellValue
=over 2
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_ContextOptionsSetTfrtDistributedRuntime(
TFE_ContextOptions* options, bool use_tfrt_distributed_runtime);
=head2 TFE_GetContextId
=over 2
Returns the context_id from the EagerContext which is used by the
EagerService to maintain consistency between client and worker. The
context_id is initialized with a dummy value and is later set when the worker
is initialized (either locally or remotely). The context_id can change during
the process lifetime although this should cause the worker to be
reinitialized (e.g. cleared caches) as well.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern uint64_t TFE_GetContextId(TFE_Context* ctx);
=head2 TFE_NewCancellationManager
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern TFE_Executor* TFE_ContextGetExecutorForThread(
TFE_Context*);
=head2 TFE_ContextUpdateServerDef
=over 2
Update an existing context with a new set of servers defined in a ServerDef
proto. Servers can be added to and removed from the list of remote workers
in the context. A New set of servers identified by the ServerDef must be up
when the context is updated.
This API is for experimental usage and may be subject to change.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_ContextUpdateServerDef(TFE_Context* ctx,
int keep_alive_secs,
const void* proto,
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
There are currently no graph semantics implemented for registered custom
devices, so executing tf.functions which contain operations placed on the
custom devices will fail.
`device_name` must not name an existing physical or custom device. It must
follow the format:
/job:<name>/replica:<replica>/task:<task>/device:<type>:<device_num>
If the device is successfully registered, `status` is set to TF_OK. Otherwise
the device is not usable. In case of a bad status, `device.delete_device` is
still called on `device_info` (i.e. the caller does not retain ownership).
This API is highly experimental, and in particular is expected to change when
it starts supporting operations with attributes and when tf.function support
is added.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
const char* function_name,
TF_Buffer* buf,
TF_Status* status);
=head2 TFE_AllocateHostTensor
=over 2
Allocate and return a new Tensor on the host.
The caller must set the Tensor values by writing them to the pointer returned
by TF_TensorData with length TF_TensorByteSize.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern TF_Tensor* TFE_AllocateHostTensor(TFE_Context* ctx,
TF_DataType dtype,
const int64_t* dims,
int num_dims,
TF_Status* status);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern int TFE_TensorHandleDeviceID(TFE_TensorHandle* h,
TF_Status* status);
=head2 TFE_TensorHandleGetStatus
=over 2
Returns the status for the tensor handle. In TFRT, a tensor handle can carry
error info if error happens. If so, the status will be set with the error
info. If not, status will be set as OK.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_TensorHandleGetStatus(TFE_TensorHandle* h,
TF_Status* status);
=head2 TFE_GetExecutedOpNames
=over 2
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
const char* prefix,
TF_Status* status);
=head2 TFE_InsertConfigKeyValue
=over 2
Set configuration key and value using coordination service.
If coordination service is enabled, the key-value will be stored on the
leader and become accessible to all workers in the cluster.
Currently, a config key can only be set with one value, and subsequently
setting the same key will lead to errors.
Note that the key-values are only expected to be used for cluster
configuration data, and should not be used for storing a large amount of data
or being accessed very frequently.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_InsertConfigKeyValue(TFE_Context* ctx,
const char* key,
const char* value,
TF_Status* status);
=head2 TFE_GetConfigKeyValue
=over 2
Get configuration key and value using coordination service.
The config key must be set before getting its value. Getting value of
non-existing config keys will result in errors.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_GetConfigKeyValue(TFE_Context* ctx,
const char* key,
TF_Buffer* value_buf,
TF_Status* status);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_WaitAtBarrier(TFE_Context* ctx,
const char* barrier_id,
int64_t barrier_timeout_in_ms,
TF_Status* status);
=head2 TF_GetNodesToPreserveListSize
=over 2
Get a set of node names that must be preserved. They can not be transformed
or removed during the graph transformation. This includes feed and fetch
nodes, keep_ops, init_ops. Fills in `num_values` and `storage_size`, they
will be used in `TF_GetNodesToPreserveList`.
=back
/* From <tensorflow/c/experimental/grappler/grappler.h> */
TF_CAPI_EXPORT extern void TF_GetNodesToPreserveListSize(
const TF_GrapplerItem* item, int* num_values, size_t* storage_size,
TF_Status* status);
=head2 TF_GetNodesToPreserveList
=over 2
Get a set of node names that must be preserved. They can not be transformed
or removed during the graph transformation. This includes feed and fetch
nodes, keep_ops, init_ops. Fills in `values` and `lengths`, each of which
must point to an array of length at least `num_values`.
The elements of values will point to addresses in `storage` which must be at
least `storage_size` bytes in length. `num_values` and `storage` can be
obtained from TF_GetNodesToPreserveSize
Fails if storage_size is too small to hold the requested number of strings.
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/experimental/grappler/grappler.h> */
TF_CAPI_EXPORT extern void TF_GetNodesToPreserveList(
const TF_GrapplerItem* item, char** values, size_t* lengths, int num_values,
void* storage, size_t storage_size, TF_Status* status);
=head2 TF_GetFetchNodesListSize
=over 2
Get a set of node names for fetch nodes. Fills in `values` and `lengths`,
they will be used in `TF_GetFetchNodesList`
=back
/* From <tensorflow/c/experimental/grappler/grappler.h> */
TF_CAPI_EXPORT extern void TF_GetFetchNodesListSize(const TF_GrapplerItem* item,
int* num_values,
size_t* storage_size,
TF_Status* status);
=head2 TF_GetFetchNodesList
=over 2
Get a set of node names for fetch nodes. Fills in `values` and `lengths`,
each of which must point to an array of length at least `num_values`.
The elements of values will point to addresses in `storage` which must be at
least `storage_size` bytes in length. `num_values` and `storage` can be
obtained from TF_GetFetchNodesSize
Fails if storage_size is too small to hold the requested number of strings.
=back
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/experimental/saved_model/public/signature_def_function_metadata.h> */
TF_CAPI_EXPORT extern const TF_SignatureDefParamList*
TF_SignatureDefFunctionMetadataReturns(
const TF_SignatureDefFunctionMetadata* list);
=head2 TF_EnableXLACompilation
=over 2
When `enable` is true, set
tensorflow.ConfigProto.OptimizerOptions.global_jit_level to ON_1, and also
set XLA flag values to prepare for XLA compilation. Otherwise set
global_jit_level to OFF.
This and the next API are syntax sugar over TF_SetConfig(), and is used by
clients that cannot read/write the tensorflow.ConfigProto proto.
TODO: Migrate to TF_CreateConfig() below.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TF_EnableXLACompilation(TF_SessionOptions* options,
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT void TF_SetXlaConstantFoldingDisabled(
unsigned char should_enable);
=head2 TF_CreateConfig
=over 2
Create a serialized tensorflow.ConfigProto proto, where:
a) ConfigProto.optimizer_options.global_jit_level is set to ON_1 if
`enable_xla_compilation` is non-zero, and OFF otherwise.
b) ConfigProto.gpu_options.allow_growth is set to `gpu_memory_allow_growth`.
c) ConfigProto.device_count is set to `num_cpu_devices`.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern TF_Buffer* TF_CreateConfig(
unsigned char enable_xla_compilation, unsigned char gpu_memory_allow_growth,
unsigned int num_cpu_devices);
=head2 TF_CreateRunOptions
=over 2
Create a serialized tensorflow.RunOptions proto, where RunOptions.trace_level
is set to FULL_TRACE if `enable_full_trace` is non-zero, and NO_TRACE
otherwise.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern TF_Buffer* TF_CreateRunOptions(
unsigned char enable_full_trace);
=head2 TF_GraphDebugString
=over 2
Returns the graph content in a human-readable format, with length set in
`len`. The format is subject to change in the future.
The returned string is heap-allocated, and caller should call free() on it.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern const char* TF_GraphDebugString(TF_Graph* graph,
size_t* len);
=head2 TF_FunctionDebugString
=over 2
Returns the function content in a human-readable format, with length set in
`len`. The format is subject to change in the future.
The returned string is heap-allocated, and caller should call free() on it.
Do not return const char*, because some foreign language binding
(e.g. swift) cannot then call free() on the returned pointer.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern char* TF_FunctionDebugString(TF_Function* func,
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_CAPI_EXPORT extern void TFE_EnableCollectiveOps(TFE_Context* ctx,
const void* proto,
size_t proto_len,
TF_Status* status);
=head2 TFE_AbortCollectiveOps
=over 2
Aborts all ongoing collectives with the specified status. After abortion,
subsequent collectives will error with this status immediately. To reset the
collectives, create a new EagerContext.
This is intended to be used when a peer failure is detected.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_AbortCollectiveOps(TFE_Context* ctx,
TF_Status* status);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_CAPI_EXPORT extern void TF_DeleteShapeAndTypeListArray(
TF_ShapeAndTypeList** shape_list_array, int num_items);
=head2 TFE_InferShapes
=over 2
Infer shapes for the given `op`. The arguments mimic the arguments of the
`shape_inference::InferenceContext` constructor. Note the following:
- The inputs of the `op` are not used for shape inference. So, it is
OK to not have the inputs properly set in `op`. See `input_tensors`
if you want shape inference to consider the input tensors of the
op for shape inference.
- The types need not be set in `input_shapes` as it is not used.
- The number of `input_tensors` should be the same as the number of items
in `input_shapes`.
The results are returned in `output_shapes` and
`output_resource_shapes_and_types`. The caller is responsible for freeing the
memory in these buffers by calling `TF_DeleteShapeAndTypeList`.
=back
/* From <tensorflow/c/c_api_experimental.h> */
lib/AI/TensorFlow/Libtensorflow/Manual/GPU.pod view on Meta::CPAN
of GPU VRAM. You can check if you have enough free VRAM by using the
C<nvidia-smi> command which displays resource information as well as which
processes are currently using the GPU. If C<libtensorflow> is not able to
allocate enough memory, it will crash with an out-of-memory (OOM) error. This
is typical when running multiple programs that both use the GPU.
If you have multiple GPUs, you can control which GPUs your program can access
by using the
L<C<CUDA_VISIBLE_DEVICES> environment variable|https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#env-vars>
provided by the underlying CUDA library. This is typically
done by setting the variable in a C<BEGIN> block before loading
L<AI::TensorFlow::Libtensorflow>:
BEGIN {
# Set the specific GPU device that is available
# to this program to GPU index 0, which is the
# first GPU as listed in the output of `nvidia-smi`.
$ENV{CUDA_VISIBLE_DEVICES} = '0';
require AI::TensorFlow::Libtensorflow;
}
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod view on Meta::CPAN
say "We have a label count of $label_count. These labels include: ",
join ", ", List::Util::head( 5, @labels_map{ sort keys %labels_map } );
my @tags = ( 'serve' );
if( File::Which::which('saved_model_cli')) {
local $ENV{TF_CPP_MIN_LOG_LEVEL} = 3; # quiet the TensorFlow logger for the following command
system(qw(saved_model_cli show),
qw(--dir) => $model_base,
qw(--tag_set) => join(',', @tags),
qw(--signature_def) => 'serving_default'
) == 0 or die "Could not run saved_model_cli";
} else {
say "Install the tensorflow Python package to get the `saved_model_cli` command.";
}
my $opt = AI::TensorFlow::Libtensorflow::SessionOptions->New;
my $graph = AI::TensorFlow::Libtensorflow::Graph->New;
my $session = AI::TensorFlow::Libtensorflow::Session->LoadFromSavedModel(
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod view on Meta::CPAN
my %pdl_output_by_name = map {
$_ => FloatTFTensorToPDL( $tftensor_output_by_name->{$_} )
} keys $tftensor_output_by_name->%*;
undef;
my $min_score_thresh = 0.30;
my $which_detect = which( $pdl_output_by_name{detection_scores} > $min_score_thresh );
my %subset;
$subset{detection_boxes} = $pdl_output_by_name{detection_boxes}->dice('X', $which_detect);
$subset{detection_classes} = $pdl_output_by_name{detection_classes}->dice($which_detect);
$subset{detection_scores} = $pdl_output_by_name{detection_scores}->dice($which_detect);
$subset{detection_class_labels}->@* = map { $labels_map{$_} } $subset{detection_classes}->list;
p %subset;
use PDL::Graphics::Gnuplot;
my $plot_output_path = 'objects-detected.png';
my $gp = gpwin('pngcairo', font => ",12", output => $plot_output_path, aa => 2, size => [10] );
my @qual_cmap = ('#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6');
$gp->options(
map {
my $idx = $_;
my $lc_rgb = $qual_cmap[ $subset{detection_classes}->slice("($idx)")->squeeze % @qual_cmap ];
my $box_corners_yx_norm = $subset{detection_boxes}->slice([],$idx,[0,0,0]);
$box_corners_yx_norm->reshape(2,2);
my $box_corners_yx_img = $box_corners_yx_norm * $pdl_images[0]->shape->slice('-1:-2');
my $from_xy = join ",", $box_corners_yx_img->slice('-1:0,(0)')->list;
my $to_xy = join ",", $box_corners_yx_img->slice('-1:0,(1)')->list;
my $label_xy = join ",", $box_corners_yx_img->at(1,1), $box_corners_yx_img->at(0,1);
(
[ object => [ "rect" =>
from => $from_xy, to => $to_xy,
qq{front fs empty border lc rgb "$lc_rgb" lw 5} ], ],
[ label => [
sprintf("%s: %.1f",
$subset{detection_class_labels}[$idx],
100*$subset{detection_scores}->at($idx,0) ) =>
at => $label_xy, 'left',
offset => 'character 0,-0.25',
qq{font ",12" boxed front tc rgb "#ffffff"} ], ],
)
} 0..$subset{detection_boxes}->dim(1)-1
);
$gp->plot(
topcmds => q{set style textbox opaque fc "#505050f0" noborder},
square => 1,
yrange => [$pdl_images[0]->dim(2),0],
with => 'image', $pdl_images[0],
);
$gp->close;
IPerl->png( bytestream => path($plot_output_path)->slurp_raw ) if IN_IPERL;
use Filesys::DiskUsage qw/du/;
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod view on Meta::CPAN
=pod
=encoding UTF-8
=head1 NAME
AI::TensorFlow::Libtensorflow::Manual::Notebook::InferenceUsingTFHubCenterNetObjDetect - Using TensorFlow to do object detection using a pre-trained model
=head1 SYNOPSIS
The following tutorial is based on the L<TensorFlow Hub Object Detection Colab notebook|https://www.tensorflow.org/hub/tutorials/tf2_object_detection>. It uses a pre-trained model based on the I<CenterNet> architecture trained on the I<COCO 2017> dat...
Some of this code is identical to that of C<InferenceUsingTFHubMobileNetV2Model> notebook. Please look there for an explanation for that code. As stated there, this will later be wrapped up into a high-level library to hide the details behind an API.
=head1 COLOPHON
The following document is either a POD file which can additionally be run as a Perl script or a Jupyter Notebook which can be run in L<IPerl|https://p3rl.org/Devel::IPerl> (viewable online at L<nbviewer|https://nbviewer.org/github/EntropyOrg/perl-AI-...
=over
=item *
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod view on Meta::CPAN
})+
>sgx;
my $label_count = List::Util::max keys %labels_map;
say "We have a label count of $label_count. These labels include: ",
join ", ", List::Util::head( 5, @labels_map{ sort keys %labels_map } );
=head2 Load the model and session
We define the tag set C<[ 'serve' ]> which we will use to load the model.
my @tags = ( 'serve' );
We can examine what computations are contained in the graph in terms of the names of the inputs and outputs of an operation found in the graph by running C<saved_model_cli>.
if( File::Which::which('saved_model_cli')) {
local $ENV{TF_CPP_MIN_LOG_LEVEL} = 3; # quiet the TensorFlow logger for the following command
system(qw(saved_model_cli show),
qw(--dir) => $model_base,
qw(--tag_set) => join(',', @tags),
qw(--signature_def) => 'serving_default'
) == 0 or die "Could not run saved_model_cli";
} else {
say "Install the tensorflow Python package to get the `saved_model_cli` command.";
}
The above C<saved_model_cli> output shows that the model input is at C<serving_default_input_tensor:0> which means the operation named C<serving_default_input_tensor> at index C<0> and there are multiple outputs with different shapes.
Per the L<model description|https://tfhub.dev/tensorflow/centernet/hourglass_512x512/1> on TensorFlow Hub:
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod view on Meta::CPAN
=item -
C<detection_scores>: a C<tf.float32> tensor of shape [N] containing detection scores.
=back
=back
Note that the above documentation has two errors: both C<num_detections> and C<detection_classes> are not of type C<tf.int>, but are actually C<tf.float32>.
Now we can load the model from that folder with the tag set C<[ 'serve' ]> by using the C<LoadFromSavedModel> constructor to create a C<::Graph> and a C<::Session> for that graph.
my $opt = AI::TensorFlow::Libtensorflow::SessionOptions->New;
my $graph = AI::TensorFlow::Libtensorflow::Graph->New;
my $session = AI::TensorFlow::Libtensorflow::Session->LoadFromSavedModel(
$opt, undef, $model_base, \@tags, $graph, undef, $s
);
AssertOK($s);
So let's use the names from the C<saved_model_cli> output to create our C<::Output> C<ArrayRef>s.
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod view on Meta::CPAN
undef;
=head2 Results summary
Then we use a score threshold to select the objects of interest.
my $min_score_thresh = 0.30;
my $which_detect = which( $pdl_output_by_name{detection_scores} > $min_score_thresh );
my %subset;
$subset{detection_boxes} = $pdl_output_by_name{detection_boxes}->dice('X', $which_detect);
$subset{detection_classes} = $pdl_output_by_name{detection_classes}->dice($which_detect);
$subset{detection_scores} = $pdl_output_by_name{detection_scores}->dice($which_detect);
$subset{detection_class_labels}->@* = map { $labels_map{$_} } $subset{detection_classes}->list;
p %subset;
The following uses the bounding boxes and class label information to draw boxes and labels on top of the image using Gnuplot.
use PDL::Graphics::Gnuplot;
my $plot_output_path = 'objects-detected.png';
my $gp = gpwin('pngcairo', font => ",12", output => $plot_output_path, aa => 2, size => [10] );
my @qual_cmap = ('#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6');
$gp->options(
map {
my $idx = $_;
my $lc_rgb = $qual_cmap[ $subset{detection_classes}->slice("($idx)")->squeeze % @qual_cmap ];
my $box_corners_yx_norm = $subset{detection_boxes}->slice([],$idx,[0,0,0]);
$box_corners_yx_norm->reshape(2,2);
my $box_corners_yx_img = $box_corners_yx_norm * $pdl_images[0]->shape->slice('-1:-2');
my $from_xy = join ",", $box_corners_yx_img->slice('-1:0,(0)')->list;
my $to_xy = join ",", $box_corners_yx_img->slice('-1:0,(1)')->list;
my $label_xy = join ",", $box_corners_yx_img->at(1,1), $box_corners_yx_img->at(0,1);
(
[ object => [ "rect" =>
from => $from_xy, to => $to_xy,
qq{front fs empty border lc rgb "$lc_rgb" lw 5} ], ],
[ label => [
sprintf("%s: %.1f",
$subset{detection_class_labels}[$idx],
100*$subset{detection_scores}->at($idx,0) ) =>
at => $label_xy, 'left',
offset => 'character 0,-0.25',
qq{font ",12" boxed front tc rgb "#ffffff"} ], ],
)
} 0..$subset{detection_boxes}->dim(1)-1
);
$gp->plot(
topcmds => q{set style textbox opaque fc "#505050f0" noborder},
square => 1,
yrange => [$pdl_images[0]->dim(2),0],
with => 'image', $pdl_images[0],
);
$gp->close;
IPerl->png( bytestream => path($plot_output_path)->slurp_raw ) if IN_IPERL;
=head1 RESOURCE USAGE
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
$pdl;
}
# Model handle
my $model_uri = URI->new( 'https://tfhub.dev/deepmind/enformer/1' );
$model_uri->query_form( 'tf-hub-format' => 'compressed' );
my $model_base = substr( $model_uri->path, 1 ) =~ s,/,_,gr;
my $model_archive_path = "${model_base}.tar.gz";
my $model_sequence_length = 393_216; # bp
# Human targets from Basenji2 dataset
my $targets_uri = URI->new('https://raw.githubusercontent.com/calico/basenji/master/manuscripts/cross2020/targets_human.txt');
my $targets_path = 'targets_human.txt';
# Human reference genome
my $hg_uri = URI->new("http://hgdownload.cse.ucsc.edu/goldenPath/hg38/bigZips/hg38.fa.gz");
my $hg_gz_path = "hg38.fa.gz";
# From http://hgdownload.cse.ucsc.edu/goldenPath/hg38/bigZips/md5sum.txt
my $hg_md5_digest = "1c9dcaddfa41027f17cd8f7a82c7293b";
my $clinvar_uri = URI->new('https://ftp.ncbi.nlm.nih.gov/pub/clinvar/vcf_GRCh38/clinvar.vcf.gz');
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
my $delta = ($self->start + $self->end ) % 2;
return $center + $delta;
}
sub resize {
my ($self, $width) = @_;
my $new_interval = $self->clone;
my $center = $self->center;
my $half = int( ($width-1) / 2 );
my $offset = ($width-1) % 2;
$new_interval->start( $center - $half - $offset );
$new_interval->end( $center + $half );
return $new_interval;
}
use overload '""' => \&_op_stringify;
sub _op_stringify { sprintf "%s:%s", $_[0]->seq_id // "(no sequence)", $_[0]->to_FTstring }
}
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
);
use PDL::Graphics::Gnuplot;
my $plot_output_path = 'enformer-target-interval-tracks.png';
my $gp = gpwin('pngcairo', font => ",10", output => $plot_output_path, size => [10,2. * @tracks], aa => 2 );
$gp->multiplot( layout => [1, scalar @tracks], title => $target_interval );
$gp->options(
offsets => [ graph => "0.01, 0, 0, 0" ],
lmargin => "at screen 0.05",
);
my $x = zeroes($predictions_p->dim(1))->xlinvals($target_interval->start, $target_interval->end);
my @tics_opts = (mirror => 0, out => 1);
for my $i (0..$#tracks) {
my ($title, $id, $y) = @{$tracks[$i]};
$gp->plot( {
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
=encoding UTF-8
=head1 NAME
AI::TensorFlow::Libtensorflow::Manual::Notebook::InferenceUsingTFHubEnformerGeneExprPredModel - Using TensorFlow to do gene expression prediction using a pre-trained model
=head1 SYNOPSIS
The following tutorial is based on the L<Enformer usage notebook|https://github.com/deepmind/deepmind-research/blob/master/enformer/enformer-usage.ipynb>. It uses a pre-trained model based on a transformer architecture trained as described in Avsec e...
Running the code requires an Internet connection to download the model (from Google servers) and datasets (from GitHub, UCSC, and NIH).
Some of this code is identical to that of C<InferenceUsingTFHubMobileNetV2Model> notebook. Please look there for explanation for that code. As stated there, this will later be wrapped up into a high-level library to hide the details behind an API.
B<NOTE>: If running this model, please be aware that
=over
=item *
the Docker image takes 3 GB or more of disk space;
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
=over
=item *
L<Enformer model|https://tfhub.dev/deepmind/enformer/1> from
> Avsec Ž, Agarwal V, Visentin D, Ledsam JR, Grabska-Barwinska A, Taylor KR, Assael Y, Jumper J, Kohli P, Kelley DR. Effective gene expression prediction from sequence by integrating long-range interactions. I<Nat Methods>. 2021 Oct;B<18(10)>:1196...
=item *
L<Human target dataset|https://github.com/calico/basenji/tree/master/manuscripts/cross2020> from
> Kelley DR. Cross-species regulatory sequence activity prediction. I<PLoS Comput Biol>. 2020 Jul 20;B<16(7)>:e1008050. doi: L<10.1371/journal.pcbi.1008050|https://doi.org/10.1371/journal.pcbi.1008050>. PMID: L<32687525|https://pubmed.ncbi.nlm.nih....
=item *
L<UCSC hg38 genome|https://www.ncbi.nlm.nih.gov/assembly/GCA_000001405.15>. More info at L<http://hgdownload.cse.ucsc.edu/goldenPath/hg38/bigZips/>; L<Genome Reference Consortium Human Build 38|https://www.ncbi.nlm.nih.gov/assembly/GCF_000001405.26/>...
> Schneider VA, Graves-Lindsay T, Howe K, Bouk N, Chen HC, Kitts PA, Murphy TD, Pruitt KD, Thibaud-Nissen F, Albracht D, Fulton RS, Kremitzki M, Magrini V, Markovic C, McGrath S, Steinberg KM, Auger K, Chow W, Collins J, Harden G, Hubbard T, Pelan ...
=item *
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
=back
# Model handle
my $model_uri = URI->new( 'https://tfhub.dev/deepmind/enformer/1' );
$model_uri->query_form( 'tf-hub-format' => 'compressed' );
my $model_base = substr( $model_uri->path, 1 ) =~ s,/,_,gr;
my $model_archive_path = "${model_base}.tar.gz";
my $model_sequence_length = 393_216; # bp
# Human targets from Basenji2 dataset
my $targets_uri = URI->new('https://raw.githubusercontent.com/calico/basenji/master/manuscripts/cross2020/targets_human.txt');
my $targets_path = 'targets_human.txt';
# Human reference genome
my $hg_uri = URI->new("http://hgdownload.cse.ucsc.edu/goldenPath/hg38/bigZips/hg38.fa.gz");
my $hg_gz_path = "hg38.fa.gz";
# From http://hgdownload.cse.ucsc.edu/goldenPath/hg38/bigZips/md5sum.txt
my $hg_md5_digest = "1c9dcaddfa41027f17cd8f7a82c7293b";
my $clinvar_uri = URI->new('https://ftp.ncbi.nlm.nih.gov/pub/clinvar/vcf_GRCh38/clinvar.vcf.gz');
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
}
say "Checking with saved_model_cli scan:";
saved_model_cli( qw(scan),
qw(--dir) => $model_base,
);
B<STREAM (STDOUT)>:
Checking with saved_model_cli scan:
MetaGraph with tag set ['serve'] does not contain the default denylisted ops: {'ReadFile', 'PrintV2', 'WriteFile'}
B<RESULT>:
1
We need to see what the inputs and outputs of this model are so C<saved_model_cli show> should show us that:
saved_model_cli( qw(show),
qw(--dir) => $model_base,
qw(--all),
);
B<STREAM (STDOUT)>:
MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:
signature_def['__saved_model_init_op']:
The given SavedModel SignatureDef contains the following input(s):
The given SavedModel SignatureDef contains the following output(s):
outputs['__saved_model_init_op'] tensor_info:
dtype: DT_INVALID
shape: unknown_rank
name: NoOp
Method name is:
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
tf.saved_model.save( imported_model , out_path )
EOF
saved_model_cli( qw(show),
qw(--dir) => $new_model_base,
qw(--all),
);
B<STREAM (STDOUT)>:
MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:
signature_def['__saved_model_init_op']:
The given SavedModel SignatureDef contains the following input(s):
The given SavedModel SignatureDef contains the following output(s):
outputs['__saved_model_init_op'] tensor_info:
dtype: DT_INVALID
shape: unknown_rank
name: NoOp
Method name is:
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
Function Name: 'predict_on_batch'
Option #1
Callable with:
Argument #1
args_0: TensorSpec(shape=(None, 393216, 4), dtype=tf.float32, name='args_0')
B<RESULT>:
1
We want to use the C<serve> tag-set and
=over
=item *
the input C<args_0> which has the name C<serving_default_args_0:0> and
=item *
the output C<human> which has the name C<StatefulPartitionedCall:0>.
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
my $delta = ($self->start + $self->end ) % 2;
return $center + $delta;
}
sub resize {
my ($self, $width) = @_;
my $new_interval = $self->clone;
my $center = $self->center;
my $half = int( ($width-1) / 2 );
my $offset = ($width-1) % 2;
$new_interval->start( $center - $half - $offset );
$new_interval->end( $center + $half );
return $new_interval;
}
use overload '""' => \&_op_stringify;
sub _op_stringify { sprintf "%s:%s", $_[0]->seq_id // "(no sequence)", $_[0]->to_FTstring }
}
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
);
use PDL::Graphics::Gnuplot;
my $plot_output_path = 'enformer-target-interval-tracks.png';
my $gp = gpwin('pngcairo', font => ",10", output => $plot_output_path, size => [10,2. * @tracks], aa => 2 );
$gp->multiplot( layout => [1, scalar @tracks], title => $target_interval );
$gp->options(
offsets => [ graph => "0.01, 0, 0, 0" ],
lmargin => "at screen 0.05",
);
my $x = zeroes($predictions_p->dim(1))->xlinvals($target_interval->start, $target_interval->end);
my @tics_opts = (mirror => 0, out => 1);
for my $i (0..$#tracks) {
my ($title, $id, $y) = @{$tracks[$i]};
$gp->plot( {
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
die "Labels should have @{[ IMAGENET_LABEL_COUNT_WITH_BG ]} items"
unless @labels == IMAGENET_LABEL_COUNT_WITH_BG;
say "Got labels: ", join( ", ", List::Util::head(5, @labels) ), ", etc.";
my @tags = ( 'serve' );
if( File::Which::which('saved_model_cli')) {
local $ENV{TF_CPP_MIN_LOG_LEVEL} = 3; # quiet the TensorFlow logger for the following command
system(qw(saved_model_cli show),
qw(--dir) => $model_base,
qw(--tag_set) => join(',', @tags),
qw(--signature_def) => 'serving_default'
) == 0 or die "Could not run saved_model_cli";
} else {
say "Install the tensorflow Python package to get the `saved_model_cli` command.";
}
my $opt = AI::TensorFlow::Libtensorflow::SessionOptions->New;
my $graph = AI::TensorFlow::Libtensorflow::Graph->New;
my $session = AI::TensorFlow::Libtensorflow::Session->LoadFromSavedModel(
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
=pod
=encoding UTF-8
=head1 NAME
AI::TensorFlow::Libtensorflow::Manual::Notebook::InferenceUsingTFHubMobileNetV2Model - Using TensorFlow to do image classification using a pre-trained model
=head1 SYNOPSIS
The following tutorial is based on the L<Image Classification with TensorFlow Hub notebook|https://github.com/tensorflow/docs/blob/master/site/en/hub/tutorials/image_classification.ipynb>. It uses a pre-trained model based on the I<MobileNet V2> arch...
Please look at the L<SECURITY note|https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md> regarding running models as models are programs. You can also used C<saved_model_cli scan> to check for L<security-sensitive "denylisted ops"|https:/...
If you would like to visualise a model, you can use L<Netron|https://github.com/lutzroeder/netron> on the C<.pb> file.
=head1 COLOPHON
The following document is either a POD file which can additionally be run as a Perl script or a Jupyter Notebook which can be run in L<IPerl|https://p3rl.org/Devel::IPerl> (viewable online at L<nbviewer|https://nbviewer.org/github/EntropyOrg/perl-AI-...
If you are running the code, you may optionally install the L<C<tensorflow> Python package|https://www.tensorflow.org/install/pip> in order to access the C<saved_model_cli> command, but this is only used for informational purposes.
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
Downloading https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt to ImageNetLabels.txt
Saved model is in google_imagenet_mobilenet_v2_100_224_classification_5/saved_model.pb
Got labels: background, tench, goldfish, great white shark, tiger shark, etc.
B<RESULT>:
1
=head2 Load the model and session
We define the tag set C<[ 'serve' ]> which we will use to load the model.
my @tags = ( 'serve' );
B<RESULT>:
serve
We can examine what computations are contained in the graph in terms of the names of the inputs and outputs of an operation found in the graph by running C<saved_model_cli>.
if( File::Which::which('saved_model_cli')) {
local $ENV{TF_CPP_MIN_LOG_LEVEL} = 3; # quiet the TensorFlow logger for the following command
system(qw(saved_model_cli show),
qw(--dir) => $model_base,
qw(--tag_set) => join(',', @tags),
qw(--signature_def) => 'serving_default'
) == 0 or die "Could not run saved_model_cli";
} else {
say "Install the tensorflow Python package to get the `saved_model_cli` command.";
}
B<STREAM (STDOUT)>:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
1
The above C<saved_model_cli> output shows that the model input is at C<serving_default_inputs:0> which means the operation named C<serving_default_inputs> at index C<0> and the output is at C<StatefulPartitionedCall:0> which means the operation named...
It also shows the type and shape of the C<TFTensor>s for those inputs and outputs. Together this is known as a signature.
For the C<input>, we have C<(-1, 224, 224, 3)> which is a L<common input image specification for TensorFlow Hub|https://www.tensorflow.org/hub/common_signatures/images#input>. This is known as C<channels_last> (or C<NHWC>) layout where the TensorFlow...
For the C<output>, we have C<(-1, 1001)> which is C<[batch_size, num_classes]> where the elements are scores that the image received for that ImageNet class.
Now we can load the model from that folder with the tag set C<[ 'serve' ]> by using the C<LoadFromSavedModel> constructor to create a C<::Graph> and a C<::Session> for that graph.
my $opt = AI::TensorFlow::Libtensorflow::SessionOptions->New;
my $graph = AI::TensorFlow::Libtensorflow::Graph->New;
my $session = AI::TensorFlow::Libtensorflow::Session->LoadFromSavedModel(
$opt, undef, $model_base, \@tags, $graph, undef, $s
);
AssertOK($s);
So let's use the names from the C<saved_model_cli> output to create our C<::Output> C<ArrayRef>s.
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
$probabilities_batched->at($label_index,$batch_idx),
) ];
}
say generate_table( rows => [ $header, @rows ], header_row => 1 );
print "\n";
}
}
B<DISPLAY>:
=for html <span style="display:inline-block;margin-left:1em;"><p><table style="width: 100%"><tr><td><tt>apple</tt></td><td><a href="https://upload.wikimedia.org/wikipedia/commons/1/15/Red_Apple.jpg"><img alt="apple" src="https://upload.wikimedia.org/...
my $p_approx_batched = $probabilities_batched->sumover->approx(1, 1e-5);
p $p_approx_batched;
say "All probabilities sum up to approximately 1" if $p_approx_batched->all->sclr;
B<STREAM (STDOUT)>:
All probabilities sum up to approximately 1
B<STREAM (STDERR)>:
lib/AI/TensorFlow/Libtensorflow/Manual/Quickstart.pod view on Meta::CPAN
which is defined by L<AI::TensorFlow::Libtensorflow::DataType>
thus a C<TFTensor> is considered to be "homogeneous data structure".
See L<Introduction to Tensors|https://www.tensorflow.org/guide/tensor> for more.
=item L<AI::TensorFlow::Libtensorflow::OperationDescription>, L<AI::TensorFlow::Libtensorflow::Operation>
An operation is a function that has inputs and outputs. It has a user-defined
name (such as C<MyAdder>) and library-defined type (such as C<AddN>).
L<AI::TensorFlow::Libtensorflow::OperationDescription> is used to build an
operation that will be added to a graph of other operations where those other
operations can set the operation's inputs and get the operation's outputs.
These inputs and outputs have types and dimension specifications, so that the
operations only accept and emit certain C<TFTensor>s.
=item L<AI::TensorFlow::Libtensorflow::Graph>
A set of operations with inputs and outputs linked together. This computation
can be serialized along with parameters as part of
a L<SavedModel|https://www.tensorflow.org/guide/saved_model>.
=item L<AI::TensorFlow::Libtensorflow::Session>, L<AI::TensorFlow::Libtensorflow::SessionOptions>
A session drives the execution of a L<AI::TensorFlow::Libtensorflow::Graph>.
Specifics of how the session executes can be set via L<AI::TensorFlow::Libtensorflow::SessionOptions>.
=back
=head1 TUTORIALS
The object types in L</OBJECT TYPES> are used in the following tutorials:
=over 4
=item L<InferenceUsingTFHubMobileNetV2Model|AI::TensorFlow::Libtensorflow::Manual::Notebook::InferenceUsingTFHubMobileNetV2Model>: image classification tutorial
lib/AI/TensorFlow/Libtensorflow/Manual/Quickstart.pod view on Meta::CPAN
L<TensorFlow Docker requirements|https://www.tensorflow.org/install/docker#tensorflow_docker_requirements>
are met and that the correct flags are passed to C<docker run>, for example
C<<
docker run --rm --gpus all [...]
>>
More information about NVIDIA Docker containers can be found in the
NVIDIA Container Toolkit
L<Installation Guide|https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html>
(specifically L<Setting up NVIDIA Container Toolkit|https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#setting-up-nvidia-container-toolkit>)
and
L<User Guide|https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/user-guide.html>.
=head3 Diagnostics
When using the Docker GPU image, you may come across the error
C<<
nvidia-container-cli: initialization error: load library failed: libnvidia-ml.so.1: cannot open shared object file: no such file or directory: unknown.
>>
lib/AI/TensorFlow/Libtensorflow/Session.pm view on Meta::CPAN
B<Parameters>
=over 4
=item Maybe[TFBuffer] $run_options
Optional C<TFBuffer> containing serialized representation of a `RunOptions` protocol buffer.
=item ArrayRef[TFOutput] $inputs
Inputs to set.
=item ArrayRef[TFTensor] $input_values
Values to assign to the inputs given by C<$inputs>.
=item ArrayRef[TFOutput] $outputs
Outputs to get.
=item ArrayRef[TFTensor] $output_values
t/upstream/CAPI/002_Status.t view on Meta::CPAN
subtest "(CAPI, Status)" => sub {
my $s = Status->New;
is $s->GetCode, AI::TensorFlow::Libtensorflow::Status::OK, 'OK code';
is $s->Message, '', 'empty message';
note 'Set status to CANCELLED';
$s->SetStatus('CANCELLED', 'cancel');
is $s->GetCode, AI::TensorFlow::Libtensorflow::Status::CANCELLED, 'CANCELLED code';
is $s->Message, 'cancel', 'check set message';
};
done_testing;
t/upstream/CAPI/003_Tensor.t view on Meta::CPAN
#!/usr/bin/env perl
use Test2::V0;
use lib 't/lib';
use TF_TestQuiet;
use aliased 'AI::TensorFlow::Libtensorflow';
use aliased 'AI::TensorFlow::Libtensorflow::Lib';
use aliased 'AI::TensorFlow::Libtensorflow::Tensor';
use AI::TensorFlow::Libtensorflow::DataType qw(FLOAT);
use FFI::Platypus::Buffer qw(window scalar_to_pointer);
use FFI::Platypus::Memory qw(memset free);
use AI::TensorFlow::Libtensorflow::Lib::_Alloc;
subtest "(CAPI, Tensor)" => sub {
my $n = 6;
my $num_bytes = $n * FLOAT->Size;
my $values_ptr = AI::TensorFlow::Libtensorflow::Lib::_Alloc->_tf_aligned_alloc($num_bytes);
window( my $values, $values_ptr, $num_bytes );
my @dims = (2, 3);
note "Creating tensor";
t/upstream/CAPI/006_MaybeMove.t view on Meta::CPAN
#!/usr/bin/env perl
use Test2::V0;
use lib 't/lib';
use TF_TestQuiet;
use aliased 'AI::TensorFlow::Libtensorflow';
use aliased 'AI::TensorFlow::Libtensorflow::Tensor';
use AI::TensorFlow::Libtensorflow::DataType qw(FLOAT);
use FFI::Platypus::Buffer qw(window scalar_to_pointer);
use FFI::Platypus::Memory qw(memset free);
use AI::TensorFlow::Libtensorflow::Lib::_Alloc;
subtest "(CAPI, MaybeMove)" => sub {
my $num_bytes = 6 * FLOAT->Size;
window( my $values,
AI::TensorFlow::Libtensorflow::Lib::_Alloc->_tf_aligned_alloc($num_bytes),
$num_bytes
);
my @dims = (2,3);
t/upstream/CAPI/014_SetShape.t view on Meta::CPAN
$dims->[1] = 3;
$graph->SetTensorShape( $feed_out_0, $dims, $s);
TF_Utils::AssertStatusOK($s);
note 'Fetch and see that the new value is returned.';
$returned_dims = $graph->GetTensorShape( $feed_out_0, $s );
TF_Utils::AssertStatusOK($s);
is $returned_dims, $dims, "Got shape [ @$dims ]";
note q{
Try to set 'unknown' with unknown rank on the shape and see that
it doesn't change.
};
$graph->SetTensorShape($feed_out_0, undef, $s);
TF_Utils::AssertStatusOK($s);
$num_dims = $graph->GetTensorNumDims( $feed_out_0, $s );
$returned_dims = $graph->GetTensorShape( $feed_out_0, $s );
TF_Utils::AssertStatusOK($s);
is $num_dims, 2, 'unchanged numdims';
is $returned_dims, [2,3], 'dims still [2 3]';
note q{
Try to set 'unknown' with same rank on the shape and see that
it doesn't change.
};
$graph->SetTensorShape($feed_out_0, [-1, -1], $s);
TF_Utils::AssertStatusOK($s);
$returned_dims = $graph->GetTensorShape( $feed_out_0, $s );
TF_Utils::AssertStatusOK($s);
is $returned_dims, [2,3], 'dims still [2 3]';
note 'Try to fetch a shape with the wrong num_dims';
pass 'This test not implemented for binding. Not possible to have invalid argument for num_dims.';
note 'Try to set an invalid shape (cannot change 2x3 to a 2x5).';
$dims->[1] = 5;
$graph->SetTensorShape( $feed_out_0, $dims, $s);
note TF_Utils::AssertStatusNotOK($s);
note 'Test for a scalar.';
my $three = TF_Utils::ScalarConst($graph, $s, 'scalar', INT32, 3);
TF_Utils::AssertStatusOK($s);
my $three_out_0 = Output->New({ oper => $three, index => 0 });
$num_dims = $graph->GetTensorNumDims( $three_out_0, $s );
t/upstream/CAPI/029_SavedModel.t view on Meta::CPAN
);
pass 'Skipping. Can not use C++ tensorflow::MetaGraphDef.';
TF_Utils::AssertStatusOK($s);
my $csession = TF_Utils::CSession->new( session => $session, status => $s );
pass 'Skipping getting signature_def.';
pass 'Skipping writing tensorflow::Example';
pass 'Skipping setting inputs';
pass 'Skipping setting outputs';
pass 'Skipping running session';
};
done_testing;
t/upstream/CAPI/032_TestBitcastFrom_Reshape.t view on Meta::CPAN
TF_Utils::AssertStatusOK($status);
my $same_tftensor = object {
call ElementCount => 6;
call ByteSize => 6 * UINT64->Size;
};
is $t_a, $same_tftensor, '6 elements in 2x3';
is $t_b, $same_tftensor, '6 elements in 3x2';
my $UINT64_pack = 'Q';
my $set_first_value = sub {
my ($t, $v) = @_;
memcpy scalar_to_pointer(${$t->Data}),
scalar_to_pointer(pack($UINT64_pack, $v)),
UINT64->Size;
};
my $get_first_value = sub { my ($t) = @_; unpack $UINT64_pack, ${$t->Data}; };
note 'Check that a write to one tensor shows up in the other.';
$set_first_value->($t_a, 4);
is $get_first_value->($t_b), 4, 'got 4 in tensor b';
$set_first_value->($t_b, 6);
is $get_first_value->($t_a), 6, 'got 6 in tensor a';
};
done_testing;
t/upstream/CAPI/034_TestTensorAligned.t view on Meta::CPAN
subtest "(CAPI, TestTensorAligned)" => sub {
my $dim = 7;
my $tensor_size_bytes = $dim * FLOAT->Size;
my $t_a = AI::TensorFlow::Libtensorflow::Tensor->Allocate(
FLOAT, [$dim], $tensor_size_bytes
);
if( $AI::TensorFlow::Libtensorflow::Lib::_Alloc::EIGEN_MAX_ALIGN_BYTES > 0 ) {
ok $t_a->IsAligned, 'is aligned';
} else {
pass 'No alignment set for library';
}
};
done_testing;
t/upstream/tensorflow/cc/saved_model/testdata/half_plus_two/00000123/assets/foo.txt view on Meta::CPAN
asset-file-contents