AI-TensorFlow-Libtensorflow

 view release on metacpan or  search on metacpan

lib/AI/TensorFlow/Libtensorflow/ApiDefMap.pm  view on Meta::CPAN

Get($name, $status)
>>>

=back

  my $api_def_buf = $map->Get(
    'NoOp',
    my $status = AI::TensorFlow::Libtensorflow::Status->New
  );

  cmp_ok $api_def_buf->length, '>', 0, 'Got ApiDef buffer for NoOp operation';

B<Parameters>

=over 4

=item Str $name

Name of the operation to retrieve.

=item L<TFStatus|AI::TensorFlow::Libtensorflow::Lib::Types/TFStatus> $status

lib/AI/TensorFlow/Libtensorflow/Buffer.pm  view on Meta::CPAN

package AI::TensorFlow::Libtensorflow::Buffer;
# ABSTRACT: Buffer that holds pointer to data with length
$AI::TensorFlow::Libtensorflow::Buffer::VERSION = '0.0.7';
use strict;
use warnings;
use namespace::autoclean;
use AI::TensorFlow::Libtensorflow::Lib qw(arg);

my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
use FFI::C;
FFI::C->ffi($ffi);

lib/AI/TensorFlow/Libtensorflow/Buffer.pm  view on Meta::CPAN


use FFI::Platypus::Buffer;
use FFI::Platypus::Memory;





FFI::C->struct( 'TF_Buffer' => [
	data => 'opaque',
	length => 'size_t',
	_data_deallocator => 'opaque', # data_deallocator_t
	# this does not work?
	#_data_deallocator => 'data_deallocator_t',
]);
use Sub::Delete;
delete_sub 'DESTROY';

sub data_deallocator {
	my ($self, $coderef) = shift;

lib/AI/TensorFlow/Libtensorflow/Buffer.pm  view on Meta::CPAN

1;

__END__

=pod

=encoding UTF-8

=head1 NAME

AI::TensorFlow::Libtensorflow::Buffer - Buffer that holds pointer to data with length

=head1 SYNOPSIS

  use aliased 'AI::TensorFlow::Libtensorflow::Buffer' => 'Buffer';

=head1 DESCRIPTION

C<TFBuffer> is a data structure that stores a pointer to a block of data, the
length of the data, and optionally a deallocator function for memory
management.

This structure is typically used in C<libtensorflow> to store the data for a
serialized protocol buffer.

=head1 CONSTRUCTORS

=head2 New

=over 2

C<<<
New()
>>>

=back

  my $buffer = Buffer->New();

  ok $buffer, 'created an empty buffer';
  is $buffer->length, 0, 'with a length of 0';

Create an empty buffer. Useful for passing as an output parameter.

B<Returns>

=over 4

=item L<TFBuffer|AI::TensorFlow::Libtensorflow::Lib::Types/TFBuffer>

Empty buffer.

lib/AI/TensorFlow/Libtensorflow/Buffer.pm  view on Meta::CPAN

>>>

=back

Makes a copy of the input and sets an appropriate deallocator. Useful for
passing in read-only, input protobufs.

  my $data = 'bytes';
  my $buffer = Buffer->NewFromString(\$data);
  ok $buffer, 'create buffer from string';
  is $buffer->length, bytes::length($data), 'same length as string';

B<Parameters>

=over 4

=item ScalarRef[Bytes] $proto

=back

B<Returns>

lib/AI/TensorFlow/Libtensorflow/Buffer.pm  view on Meta::CPAN

=back

B<C API>: L<< C<TF_NewBufferFromString>|AI::TensorFlow::Libtensorflow::Manual::CAPI/TF_NewBufferFromString >>

=head1 ATTRIBUTES

=head2 data

An C<opaque> pointer to the buffer.

=head2 length

Length of the buffer as a C<size_t>.

=head2 data_deallocator

A C<CodeRef> for the deallocator.

=head1 DESTRUCTORS

=head2 DESTROY

lib/AI/TensorFlow/Libtensorflow/DataType.pm  view on Meta::CPAN


  my $size = $dtype->Size();

B<Returns>

=over 4

=item size_t

The number of bytes used for the DataType C<$dtype>. Returns C<0> for variable
length types such as C<STRING> or for invalid types.

=back

B<C API>: L<< C<TF_DataTypeSize>|AI::TensorFlow::Libtensorflow::Manual::CAPI/TF_DataTypeSize >>

=head1 OPERATORS

=head2 C<< == >>

Numeric equality of the underlying enum integer value.

lib/AI/TensorFlow/Libtensorflow/Lib/FFIType/TFPtrPtrLenSizeArrayRefScalar.pm  view on Meta::CPAN

package AI::TensorFlow::Libtensorflow::Lib::FFIType::TFPtrPtrLenSizeArrayRefScalar;
# ABSTRACT: Type to hold string list as void** strings, size_t* lengths, int num_items
$AI::TensorFlow::Libtensorflow::Lib::FFIType::TFPtrPtrLenSizeArrayRefScalar::VERSION = '0.0.7';
use strict;
use warnings;
# TODO implement this

sub perl_to_native {
	...
}

sub perl_to_native_post {

lib/AI/TensorFlow/Libtensorflow/Lib/FFIType/TFPtrPtrLenSizeArrayRefScalar.pm  view on Meta::CPAN

1;

__END__

=pod

=encoding UTF-8

=head1 NAME

AI::TensorFlow::Libtensorflow::Lib::FFIType::TFPtrPtrLenSizeArrayRefScalar - Type to hold string list as void** strings, size_t* lengths, int num_items

=head1 AUTHOR

Zakariyya Mughal <zmughal@cpan.org>

=head1 COPYRIGHT AND LICENSE

This software is Copyright (c) 2022-2023 by Auto-Parallel Technologies, Inc.

This is free software, licensed under:

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_ColocateWith(TF_OperationDescription* desc,
                                             TF_Operation* op);

=head2 TF_SetAttrString

=over 2

  `value` must point to a string of length `length` bytes.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_SetAttrString(TF_OperationDescription* desc,
                                              const char* attr_name,
                                              const void* value, size_t length);

=head2 TF_SetAttrStringList

=over 2

  `values` and `lengths` each must have lengths `num_values`.
  `values[i]` must point to a string of length `lengths[i]` bytes.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_SetAttrStringList(TF_OperationDescription* desc,
                                                  const char* attr_name,
                                                  const void* const* values,
                                                  const size_t* lengths,
                                                  int num_values);

=head2 TF_SetAttrInt

=over 2

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_SetAttrInt(TF_OperationDescription* desc,

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_SetAttrPlaceholder(TF_OperationDescription* desc,
                                                   const char* attr_name,
                                                   const char* placeholder);

=head2 TF_SetAttrFuncName

=over 2

  Set a 'func' attribute to the specified name.
  `value` must point to a string of length `length` bytes.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_SetAttrFuncName(TF_OperationDescription* desc,
                                                const char* attr_name,
                                                const char* value, size_t length);

=head2 TF_SetAttrShape

=over 2

  Set `num_dims` to -1 to represent "unknown rank".  Otherwise,
  `dims` points to an array of length `num_dims`.  `dims[i]` must be
  >= -1, with -1 meaning "unknown dimension".

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_SetAttrShape(TF_OperationDescription* desc,
                                             const char* attr_name,
                                             const int64_t* dims, int num_dims);

=head2 TF_SetAttrShapeList

=over 2

  `dims` and `num_dims` must point to arrays of length `num_shapes`.
  Set `num_dims[i]` to -1 to represent "unknown rank".  Otherwise,
  `dims[i]` points to an array of length `num_dims[i]`.  `dims[i][j]`
  must be >= -1, with -1 meaning "unknown dimension".

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_SetAttrShapeList(TF_OperationDescription* desc,
                                                 const char* attr_name,
                                                 const int64_t* const* dims,
                                                 const int* num_dims,
                                                 int num_shapes);

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_SetAttrTensorShapeProto(
      TF_OperationDescription* desc, const char* attr_name, const void* proto,
      size_t proto_len, TF_Status* status);

=head2 TF_SetAttrTensorShapeProtoList

=over 2

  `protos` and `proto_lens` must point to arrays of length `num_shapes`.
  `protos[i]` must point to an array of `proto_lens[i]` bytes
  representing a binary-serialized TensorShapeProto.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_SetAttrTensorShapeProtoList(
      TF_OperationDescription* desc, const char* attr_name,
      const void* const* protos, const size_t* proto_lens, int num_shapes,
      TF_Status* status);

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  TF_CAPI_EXPORT extern void TF_SetAttrTensorList(TF_OperationDescription* desc,
                                                  const char* attr_name,
                                                  TF_Tensor* const* values,
                                                  int num_values,
                                                  TF_Status* status);

=head2 TF_SetAttrValueProto

=over 2

  `proto` should point to a sequence of bytes of length `proto_len`
  representing a binary serialization of an AttrValue protocol
  buffer.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_SetAttrValueProto(TF_OperationDescription* desc,
                                                  const char* attr_name,
                                                  const void* proto,
                                                  size_t proto_len,

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern TF_Output TF_OperationInput(TF_Input oper_in);

=head2 TF_OperationAllInputs

=over 2

  Get list of all inputs of a specific operation.  `inputs` must point to
  an array of length at least `max_inputs` (ideally set to
  TF_OperationNumInputs(oper)).  Beware that a concurrent
  modification of the graph can increase the number of inputs of
  an operation.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationAllInputs(TF_Operation* oper,
                                                   TF_Output* inputs,
                                                   int max_inputs);

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern int TF_OperationOutputNumConsumers(TF_Output oper_out);

=head2 TF_OperationOutputConsumers

=over 2

  Get list of all current consumers of a specific output of an
  operation.  `consumers` must point to an array of length at least
  `max_consumers` (ideally set to
  TF_OperationOutputNumConsumers(oper_out)).  Beware that a concurrent
  modification of the graph can increase the number of consumers of
  an operation.  Returns the number of output consumers (should match
  TF_OperationOutputNumConsumers(oper_out)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern int TF_OperationOutputConsumers(TF_Output oper_out,

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern int TF_OperationNumControlInputs(TF_Operation* oper);

=head2 TF_OperationGetControlInputs

=over 2

  Get list of all control inputs to an operation.  `control_inputs` must
  point to an array of length `max_control_inputs` (ideally set to
  TF_OperationNumControlInputs(oper)).  Returns the number of control
  inputs (should match TF_OperationNumControlInputs(oper)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern int TF_OperationGetControlInputs(
      TF_Operation* oper, TF_Operation** control_inputs, int max_control_inputs);

=head2 TF_OperationNumControlOutputs

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern int TF_OperationNumControlOutputs(TF_Operation* oper);

=head2 TF_OperationGetControlOutputs

=over 2

  Get the list of operations that have `*oper` as a control input.
  `control_outputs` must point to an array of length at least
  `max_control_outputs` (ideally set to
  TF_OperationNumControlOutputs(oper)). Beware that a concurrent
  modification of the graph can increase the number of control
  outputs.  Returns the number of control outputs (should match
  TF_OperationNumControlOutputs(oper)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern int TF_OperationGetControlOutputs(

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern TF_AttrMetadata TF_OperationGetAttrMetadata(
      TF_Operation* oper, const char* attr_name, TF_Status* status);

=head2 TF_OperationGetAttrString

=over 2

  Fills in `value` with the value of the attribute `attr_name`.  `value` must
  point to an array of length at least `max_length` (ideally set to
  TF_AttrMetadata.total_size from TF_OperationGetAttrMetadata(oper,
  attr_name)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrString(TF_Operation* oper,
                                                       const char* attr_name,
                                                       void* value,
                                                       size_t max_length,
                                                       TF_Status* status);

=head2 TF_OperationGetAttrStringList

=over 2

  Get the list of strings in the value of the attribute `attr_name`.  Fills in
  `values` and `lengths`, each of which must point to an array of length at
  least `max_values`.
  
  The elements of values will point to addresses in `storage` which must be at
  least `storage_size` bytes in length.  Ideally, max_values would be set to
  TF_AttrMetadata.list_size and `storage` would be at least
  TF_AttrMetadata.total_size, obtained from TF_OperationGetAttrMetadata(oper,
  attr_name).
  
  Fails if storage_size is too small to hold the requested number of strings.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrStringList(
      TF_Operation* oper, const char* attr_name, void** values, size_t* lengths,
      int max_values, void* storage, size_t storage_size, TF_Status* status);

=head2 TF_OperationGetAttrInt

=over 2

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrInt(TF_Operation* oper,
                                                    const char* attr_name,
                                                    int64_t* value,
                                                    TF_Status* status);

=head2 TF_OperationGetAttrIntList

=over 2

  Fills in `values` with the value of the attribute `attr_name` of `oper`.
  `values` must point to an array of length at least `max_values` (ideally set
  TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper,
  attr_name)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrIntList(TF_Operation* oper,
                                                        const char* attr_name,
                                                        int64_t* values,
                                                        int max_values,

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  TF_CAPI_EXPORT extern void TF_OperationGetAttrFloat(TF_Operation* oper,
                                                      const char* attr_name,
                                                      float* value,
                                                      TF_Status* status);

=head2 TF_OperationGetAttrFloatList

=over 2

  Fills in `values` with the value of the attribute `attr_name` of `oper`.
  `values` must point to an array of length at least `max_values` (ideally set
  to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper,
  attr_name)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrFloatList(TF_Operation* oper,
                                                          const char* attr_name,
                                                          float* values,
                                                          int max_values,

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  TF_CAPI_EXPORT extern void TF_OperationGetAttrBool(TF_Operation* oper,
                                                     const char* attr_name,
                                                     unsigned char* value,
                                                     TF_Status* status);

=head2 TF_OperationGetAttrBoolList

=over 2

  Fills in `values` with the value of the attribute `attr_name` of `oper`.
  `values` must point to an array of length at least `max_values` (ideally set
  to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper,
  attr_name)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrBoolList(TF_Operation* oper,
                                                         const char* attr_name,
                                                         unsigned char* values,
                                                         int max_values,

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  TF_CAPI_EXPORT extern void TF_OperationGetAttrType(TF_Operation* oper,
                                                     const char* attr_name,
                                                     TF_DataType* value,
                                                     TF_Status* status);

=head2 TF_OperationGetAttrTypeList

=over 2

  Fills in `values` with the value of the attribute `attr_name` of `oper`.
  `values` must point to an array of length at least `max_values` (ideally set
  to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper,
  attr_name)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrTypeList(TF_Operation* oper,
                                                         const char* attr_name,
                                                         TF_DataType* values,
                                                         int max_values,
                                                         TF_Status* status);

=head2 TF_OperationGetAttrShape

=over 2

  Fills in `value` with the value of the attribute `attr_name` of `oper`.
  `values` must point to an array of length at least `num_dims` (ideally set to
  TF_Attr_Meta.size from TF_OperationGetAttrMetadata(oper, attr_name)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrShape(TF_Operation* oper,
                                                      const char* attr_name,
                                                      int64_t* value,
                                                      int num_dims,
                                                      TF_Status* status);

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrTensorShapeProto(
      TF_Operation* oper, const char* attr_name, TF_Buffer* value,
      TF_Status* status);

=head2 TF_OperationGetAttrTensorShapeProtoList

=over 2

  Fills in `values` with binary-serialized TensorShapeProto values of the
  attribute `attr_name` of `oper`. `values` must point to an array of length at
  least `num_values` (ideally set to TF_AttrMetadata.list_size from
  TF_OperationGetAttrMetadata(oper, attr_name)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrTensorShapeProtoList(
      TF_Operation* oper, const char* attr_name, TF_Buffer** values,
      int max_values, TF_Status* status);

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  TF_CAPI_EXPORT extern void TF_OperationGetAttrTensor(TF_Operation* oper,
                                                       const char* attr_name,
                                                       TF_Tensor** value,
                                                       TF_Status* status);

=head2 TF_OperationGetAttrTensorList

=over 2

  Fills in `values` with the TF_Tensor values of the attribute `attr_name` of
  `oper`. `values` must point to an array of TF_Tensor* of length at least
  `max_values` (ideally set to TF_AttrMetadata.list_size from
  TF_OperationGetAttrMetadata(oper, attr_name)).
  
  The caller takes ownership of all the non-null TF_Tensor* entries in `values`
  (which can be deleted using TF_DeleteTensor(values[i])).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrTensorList(TF_Operation* oper,

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern int TF_OperationGetNumAttrs(TF_Operation* oper);

=head2 TF_OperationGetAttrNameLength

=over 2

  Get the length of the name of the ith attribute, or -1 if there is not an
  ith attribute.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern int TF_OperationGetAttrNameLength(TF_Operation* oper,
                                                          int i);

=head2 TF_OperationGetAttrName

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


=head2 TF_GraphImportGraphDefWithReturnOutputs

=over 2

  Import the graph serialized in `graph_def` into `graph`.
  Convenience function for when only return outputs are needed.
  
  `num_return_outputs` must be the number of return outputs added (i.e. the
  result of TF_ImportGraphDefOptionsNumReturnOutputs()).  If
  `num_return_outputs` is non-zero, `return_outputs` must be of length
  `num_return_outputs`. Otherwise it can be null.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_GraphImportGraphDefWithReturnOutputs(
      TF_Graph* graph, const TF_Buffer* graph_def,
      const TF_ImportGraphDefOptions* options, TF_Output* return_outputs,
      int num_return_outputs, TF_Status* status);

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern int TF_GraphNumFunctions(TF_Graph* g);

=head2 TF_GraphGetFunctions

=over 2

  Fills in `funcs` with the TF_Function* registered in `g`.
  `funcs` must point to an array of TF_Function* of length at least
  `max_func`. In usual usage, max_func should be set to the result of
  TF_GraphNumFunctions(g). In this case, all the functions registered in
  `g` will be returned. Else, an unspecified subset.
  
  If successful, returns the number of TF_Function* successfully set in
  `funcs` and sets status to OK. The caller takes ownership of
  all the returned TF_Functions. They must be deleted with TF_DeleteFunction.
  On error, returns 0, sets status to the encountered error, and the contents
  of funcs will be undefined.

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

            `inputs`. These operation names should start with a letter.
            Normalization will convert all letters to lowercase and
            non-alphanumeric characters to '_' to make resulting names match
            the "[a-z][a-z0-9_]*" pattern for operation argument names.
            `inputs` cannot contain the same tensor twice.
   noutputs - number of elements in `outputs` array
   outputs - array of TF_Outputs that specify the outputs of the function.
             If `noutputs` is zero (the function returns no outputs), `outputs`
             can be null. `outputs` can contain the same tensor more than once.
   output_names - The names of the function's outputs. `output_names` array
                  must either have the same length as `outputs`
                  (i.e. `noutputs`) or be null. In the former case,
                  the names should match the regular expression for ArgDef
                  names - "[a-z][a-z0-9_]*". In the latter case,
                  names for outputs will be generated automatically.
   opts - various options for the function, e.g. XLA's inlining control.
   description - optional human-readable description of this function.
   status - Set to OK on success and an appropriate error on failure.
  
  Note that when the same TF_Output is listed as both an input and an output,
  the corresponding function's output will equal to this input,

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern TF_Function* TF_FunctionImportFunctionDef(
      const void* proto, size_t proto_len, TF_Status* status);

=head2 TF_FunctionSetAttrValueProto

=over 2

  Sets function attribute named `attr_name` to value stored in `proto`.
  If this attribute is already set to another value, it is overridden.
  `proto` should point to a sequence of bytes of length `proto_len`
  representing a binary serialization of an AttrValue protocol
  buffer.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_FunctionSetAttrValueProto(TF_Function* func,
                                                          const char* attr_name,
                                                          const void* proto,
                                                          size_t proto_len,

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=over 2

  Allocate and return a new Tensor.
  
  This function is an alternative to TF_NewTensor and should be used when
  memory is allocated to pass the Tensor to the C API. The allocated memory
  satisfies TensorFlow's memory alignment preferences and should be preferred
  over calling malloc and free.
  
  The caller must set the Tensor values by writing them to the pointer returned
  by TF_TensorData with length TF_TensorByteSize.

=back

  /* From <tensorflow/c/tf_tensor.h> */
  TF_CAPI_EXPORT extern TF_Tensor* TF_AllocateTensor(TF_DataType,
                                                     const int64_t* dims,
                                                     int num_dims, size_t len);

=head2 TF_TensorMaybeMove

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


=back

  /* From <tensorflow/c/tf_tensor.h> */
  TF_CAPI_EXPORT extern int TF_NumDims(const TF_Tensor*);

=head2 TF_Dim

=over 2

  Return the length of the tensor in the "dim_index" dimension.
  REQUIRES: 0 <= dim_index < TF_NumDims(tensor)

=back

  /* From <tensorflow/c/tf_tensor.h> */
  TF_CAPI_EXPORT extern int64_t TF_Dim(const TF_Tensor* tensor, int dim_index);

=head2 TF_TensorByteSize

=over 2

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=back

  /* From <tensorflow/c/tf_tstring.h> */
  TF_CAPI_EXPORT extern void TF_StringDealloc(TF_TString *tstr);

=head2 TF_DataTypeSize

=over 2

  TF_DataTypeSize returns the sizeof() for the underlying type corresponding
  to the given TF_DataType enum value. Returns 0 for variable length types
  (eg. TF_STRING) or on failure.

=back

  /* From <tensorflow/c/tf_datatype.h> */
  TF_CAPI_EXPORT extern size_t TF_DataTypeSize(TF_DataType dt);

=head2 TF_NewOpDefinitionBuilder

=over 2

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

    "numbertype", "realnumbertype", "quantizedtype"
        (meaning "type" with a restriction on valid values)
    "{int32,int64}" or {realnumbertype,quantizedtype,string}"
        (meaning "type" with a restriction containing unions of value types)
    "{\"foo\", \"bar\n baz\"}", or "{'foo', 'bar\n baz'}"
        (meaning "string" with a restriction on valid values)
    "list(string)", ..., "list(tensor)", "list(numbertype)", ...
        (meaning lists of the above types)
    "int >= 2" (meaning "int" with a restriction on valid values)
    "list(string) >= 2", "list(int) >= 2"
        (meaning "list(string)" / "list(int)" with length at least 2)
  <default>, if included, should use the Proto text format
  of <type>.  For lists use [a, b, c] format.
  
  Note that any attr specifying the length of an input or output will
  get a default minimum of 1 unless the >= # syntax is used.

=back

  /* From <tensorflow/c/ops.h> */
  TF_CAPI_EXPORT extern void TF_OpDefinitionBuilderAddAttr(
      TF_OpDefinitionBuilder* builder, const char* attr_spec);

=head2 TF_OpDefinitionBuilderAddInput

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=over 2

  Appends the given bytes to the file. Any failure to do so is indicated in
  status.

=back

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern void TF_AppendWritableFile(TF_WritableFileHandle* handle,
                                                   const char* data,
                                                   size_t length,
                                                   TF_Status* status);

=head2 TF_DeleteFile

=over 2

  Deletes the named file and indicates whether successful in *status.

=back

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


  /* From <tensorflow/c/kernels.h> */
  TF_CAPI_EXPORT extern TF_StringView TF_GetOpKernelRequestedInput(
      TF_OpKernelContext* ctx, size_t index);

=head2 TF_OpKernelConstruction_GetAttrSize

=over 2

  Get the list_size and total_size of the attribute `attr_name` of `oper`.
  list_size - the length of the list.
  total_size - total size of the list.
    (1) If attr_type == TF_ATTR_STRING
        then total_size is the cumulative byte size
        of all the strings in the list.
    (3) If attr_type == TF_ATTR_SHAPE
        then total_size is the number of dimensions
        of the shape valued attribute, or -1
        if its rank is unknown.
    (4) If attr_type == TF_ATTR_SHAPE
        then total_size is the cumulative number

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrBool(
      TF_OpKernelConstruction* ctx, const char* attr_name, TF_Bool* val,
      TF_Status* status);

=head2 TF_OpKernelConstruction_GetAttrString

=over 2

  Interprets the named kernel construction attribute as string and
  places it into *val. `val` must
  point to an array of length at least `max_length` (ideally set to
  total_size from TF_OpKernelConstruction_GetAttrSize(ctx,
  attr_name, list_size, total_size)). *status is set to TF_OK.
  
  If the attribute could not be found or could not be interpreted as
  string, *status is populated with an error.

=back

  /* From <tensorflow/c/kernels.h> */
  TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrString(
      TF_OpKernelConstruction* ctx, const char* attr_name, char* val,
      size_t max_length, TF_Status* status);

=head2 TF_OpKernelConstruction_GetAttrTensor

=over 2

  Interprets the named kernel construction attribute as tensor and places it
  into *val. Allocates a new TF_Tensor which the caller is expected to take
  ownership of (and can deallocate using TF_DeleteTensor). *status is set to
  TF_OK.
  

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrTensor(
      TF_OpKernelConstruction* ctx, const char* attr_name, TF_Tensor** val,
      TF_Status* status);

=head2 TF_OpKernelConstruction_GetAttrTypeList

=over 2

  Interprets the named kernel construction attribute as a TF_DataType array and
  places it into *vals. *status is set to TF_OK.
  `vals` must point to an array of length at least `max_values` (ideally set
  to list_size from
  TF_OpKernelConstruction_GetAttrSize(ctx, attr_name, list_size,
  total_size)).

=back

  /* From <tensorflow/c/kernels.h> */
  TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrTypeList(
      TF_OpKernelConstruction* ctx, const char* attr_name, TF_DataType* vals,
      int max_vals, TF_Status* status);

=head2 TF_OpKernelConstruction_GetAttrInt32List

=over 2

  Interprets the named kernel construction attribute as int32_t array and
  places it into *vals. *status is set to TF_OK.
  `vals` must point to an array of length at least `max_values` (ideally set
  to list_size from
  TF_OpKernelConstruction_GetAttrSize(ctx, attr_name, list_size,
  total_size)).

=back

  /* From <tensorflow/c/kernels.h> */
  TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrInt32List(
      TF_OpKernelConstruction* ctx, const char* attr_name, int32_t* vals,
      int max_vals, TF_Status* status);

=head2 TF_OpKernelConstruction_GetAttrInt64List

=over 2

  Interprets the named kernel construction attribute as int64_t array and
  places it into *vals. *status is set to TF_OK.
  `vals` must point to an array of length at least `max_values` (ideally set
  to list_size from
  TF_OpKernelConstruction_GetAttrSize(ctx, attr_name, list_size,
  total_size)).

=back

  /* From <tensorflow/c/kernels.h> */
  TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrInt64List(
      TF_OpKernelConstruction* ctx, const char* attr_name, int64_t* vals,
      int max_vals, TF_Status* status);

=head2 TF_OpKernelConstruction_GetAttrFloatList

=over 2

  Interprets the named kernel construction attribute as float array and
  places it into *vals. *status is set to TF_OK.
  `vals` must point to an array of length at least `max_values` (ideally set
  to list_size from
  TF_OpKernelConstruction_GetAttrSize(ctx, attr_name, list_size,
  total_size)).

=back

  /* From <tensorflow/c/kernels.h> */
  TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrFloatList(
      TF_OpKernelConstruction* ctx, const char* attr_name, float* vals,
      int max_vals, TF_Status* status);

=head2 TF_OpKernelConstruction_GetAttrBoolList

=over 2

  Interprets the named kernel construction attribute as bool array and
  places it into *vals. *status is set to TF_OK.
  `vals` must point to an array of length at least `max_values` (ideally set
  to list_size from
  TF_OpKernelConstruction_GetAttrSize(ctx, attr_name, list_size,
  total_size)).

=back

  /* From <tensorflow/c/kernels.h> */
  TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrBoolList(
      TF_OpKernelConstruction* ctx, const char* attr_name, TF_Bool* vals,
      int max_vals, TF_Status* status);

=head2 TF_OpKernelConstruction_GetAttrStringList

=over 2

  Interprets the named kernel construction attribute as string array and fills
  in `vals` and `lengths`, each of which must point to an array of length at
  least `max_values`. *status is set to TF_OK. The elements of values will
  point to addresses in `storage` which must be at least `storage_size` bytes
  in length. Ideally, max_values would be set to list_size and `storage` would
  be at least total_size, obtained from
  TF_OpKernelConstruction_GetAttrSize(ctx, attr_name, list_size,
  total_size).

=back

  /* From <tensorflow/c/kernels.h> */
  TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrStringList(
      TF_OpKernelConstruction* ctx, const char* attr_name, char** vals,
      size_t* lengths, int max_values, void* storage, size_t storage_size,
      TF_Status* status);

=head2 TF_OpKernelConstruction_GetAttrTensorList

=over 2

  Interprets the named kernel construction attribute as tensor array and places
  it into *vals. *status is set to TF_OK.
  `vals` must point to an array of length at least `max_values`
  (ideally set to list_size from TF_OpKernelConstruction_GetAttrSize(ctx,
  attr_name, list_size, total_size)).
  
  The caller takes ownership of all the non-null TF_Tensor* entries in `vals`
  (which can be deleted using TF_DeleteTensor(vals[i])).

=back

  /* From <tensorflow/c/kernels.h> */
  TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrTensorList(

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

                                               const char* inputName,
                                               TF_Tensor** tensor,
                                               TF_Status* status);

=head2 TF_OpKernelConstruction_GetAttrTensorShape

=over 2

  Interprets the named kernel construction attribute as a shape attribute and
  fills in `vals` with the size of each dimension. `vals` must point to an
  array of length at least `max_values` (ideally set to total_size from
  TF_OpKernelConstruction_GetAttrSize(ctx, attr_name, &list_size,
  &total_size)).

=back

  /* From <tensorflow/c/kernels_experimental.h> */
  TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrTensorShape(
      TF_OpKernelConstruction* ctx, const char* attr_name, int64_t* dims,
      size_t num_dims, TF_Status* status);

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


=over 2

  Fetches the current number of inputs attached to `op`.
  
  Does not use the operation's definition to determine how many inputs should
  be attached. It is intended for use with TFE_OpGetFlatInput to inspect an
  already-finalized operation.
  
  Note that TFE_OpGetFlatInputCount and TFE_OpGetFlatInput operate on a flat
  sequence of inputs, unlike TFE_OpGetInputLength (for getting the length of a
  particular named input list, which may only be part of the op's inputs).

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern int TFE_OpGetFlatInputCount(const TFE_Op* op,
                                                    TF_Status* status);

=head2 TFE_OpGetFlatInput

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=head2 TFE_OpSetAttrString

=over 2

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_OpSetAttrString(TFE_Op* op,
                                                 const char* attr_name,
                                                 const void* value,
                                                 size_t length);

=head2 TFE_OpSetAttrInt

=over 2

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_OpSetAttrInt(TFE_Op* op, const char* attr_name,
                                              int64_t value);

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

                                                   const TFE_Op* value);

=head2 TFE_OpSetAttrFunctionName

=over 2

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT void TFE_OpSetAttrFunctionName(TFE_Op* op, const char* attr_name,
                                                const char* data, size_t length);

=head2 TFE_OpSetAttrTensor

=over 2

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_OpSetAttrTensor(TFE_Op* op,
                                                 const char* attr_name,

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=head2 TFE_OpSetAttrStringList

=over 2

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_OpSetAttrStringList(TFE_Op* op,
                                                     const char* attr_name,
                                                     const void* const* values,
                                                     const size_t* lengths,
                                                     int num_values);

=head2 TFE_OpSetAttrIntList

=over 2

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_OpSetAttrIntList(TFE_Op* op,

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_OpSetAttrFunctionList(TFE_Op* op,
                                                       const char* attr_name,
                                                       const TFE_Op** value,
                                                       int num_values);

=head2 TFE_OpGetInputLength

=over 2

  Returns the length (number of tensors) of the input argument `input_name`
  found in the provided `op`.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern int TFE_OpGetInputLength(TFE_Op* op,
                                                 const char* input_name,
                                                 TF_Status* status);

=head2 TFE_OpGetOutputLength

=over 2

  Returns the length (number of tensors) of the output argument `output_name`
  found in the provided `op`.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern int TFE_OpGetOutputLength(TFE_Op* op,
                                                  const char* output_name,
                                                  TF_Status* status);

=head2 TFE_Execute

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

                                                       TF_Buffer* buf,
                                                       TF_Status* status);

=head2 TFE_AllocateHostTensor

=over 2

  Allocate and return a new Tensor on the host.
  
  The caller must set the Tensor values by writing them to the pointer returned
  by TF_TensorData with length TF_TensorByteSize.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern TF_Tensor* TFE_AllocateHostTensor(TFE_Context* ctx,
                                                          TF_DataType dtype,
                                                          const int64_t* dims,
                                                          int num_dims,
                                                          TF_Status* status);

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  TF_CAPI_EXPORT extern void TF_GetNodesToPreserveListSize(
      const TF_GrapplerItem* item, int* num_values, size_t* storage_size,
      TF_Status* status);

=head2 TF_GetNodesToPreserveList

=over 2

  Get a set of node names that must be preserved. They can not be transformed
  or removed during the graph transformation. This includes feed and fetch
  nodes, keep_ops, init_ops. Fills in `values` and `lengths`, each of which
  must point to an array of length at least `num_values`.
  
  The elements of values will point to addresses in `storage` which must be at
  least `storage_size` bytes in length.  `num_values` and `storage` can be
  obtained from TF_GetNodesToPreserveSize
  
  Fails if storage_size is too small to hold the requested number of strings.

=back

  /* From <tensorflow/c/experimental/grappler/grappler.h> */
  TF_CAPI_EXPORT extern void TF_GetNodesToPreserveList(
      const TF_GrapplerItem* item, char** values, size_t* lengths, int num_values,
      void* storage, size_t storage_size, TF_Status* status);

=head2 TF_GetFetchNodesListSize

=over 2

  Get a set of node names for fetch nodes. Fills in `values` and `lengths`,
  they will be used in `TF_GetFetchNodesList`

=back

  /* From <tensorflow/c/experimental/grappler/grappler.h> */
  TF_CAPI_EXPORT extern void TF_GetFetchNodesListSize(const TF_GrapplerItem* item,
                                                      int* num_values,
                                                      size_t* storage_size,
                                                      TF_Status* status);

=head2 TF_GetFetchNodesList

=over 2

  Get a set of node names for fetch nodes. Fills in `values` and `lengths`,
  each of which must point to an array of length at least `num_values`.
  
  The elements of values will point to addresses in `storage` which must be at
  least `storage_size` bytes in length.  `num_values` and `storage` can be
  obtained from TF_GetFetchNodesSize
  
  Fails if storage_size is too small to hold the requested number of strings.

=back

  /* From <tensorflow/c/experimental/grappler/grappler.h> */
  TF_CAPI_EXPORT extern void TF_GetFetchNodesList(const TF_GrapplerItem* item,
                                                  char** values, size_t* lengths,
                                                  int num_values, void* storage,
                                                  size_t storage_size,
                                                  TF_Status* status);

=head2 TF_NewGraphProperties

=over 2

  Create GraphProperties. The item must outlive the properties.

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=back

  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT extern TF_Buffer* TF_CreateRunOptions(
      unsigned char enable_full_trace);

=head2 TF_GraphDebugString

=over 2

  Returns the graph content in a human-readable format, with length set in
  `len`. The format is subject to change in the future.
  The returned string is heap-allocated, and caller should call free() on it.

=back

  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT extern const char* TF_GraphDebugString(TF_Graph* graph,
                                                        size_t* len);

=head2 TF_FunctionDebugString

=over 2

  Returns the function content in a human-readable format, with length set in
  `len`. The format is subject to change in the future.
  The returned string is heap-allocated, and caller should call free() on it.
  
  Do not return const char*, because some foreign language binding
  (e.g. swift) cannot then call free() on the returned pointer.

=back

  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT extern char* TF_FunctionDebugString(TF_Function* func,

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TF_AttrBuilderCheckCanRunOnDevice(
      TF_AttrBuilder* builder, const char* device_type, TF_Status* status);

=head2 TF_GetNumberAttrForOpListInput

=over 2

  For argument number input_index, fetch the corresponding number_attr that
  needs to be updated with the argument length of the input list.
  Returns nullptr if there is any problem like op_name is not found, or the
  argument does not support this attribute type.

=back

  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT extern const char* TF_GetNumberAttrForOpListInput(
      const char* op_name, int input_index, TF_Status* status);

=head2 TF_OpIsStateful

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod  view on Meta::CPAN

        $images_for_test_to_uri{$_},
        $model_name_to_params{$model_name}{image_size}
    );
} ($image_names[0]);

my $pdl_image_batched = cat(@pdl_images);
my $t = Uint8PDLTOTFTensor($pdl_image_batched);

die "There should be 4 dimensions" unless $pdl_image_batched->ndims == 4;

die "With the final dimension of length 1" unless $pdl_image_batched->dim(3) == 1;

p $pdl_image_batched;
p $t;

my $RunSession = sub {
    my ($session, $t) = @_;
    my @outputs_t;

    my @keys = keys %{ $outputs{out} };
    my @values = $outputs{out}->@{ @keys };

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod  view on Meta::CPAN

          $images_for_test_to_uri{$_},
          $model_name_to_params{$model_name}{image_size}
      );
  } ($image_names[0]);
  
  my $pdl_image_batched = cat(@pdl_images);
  my $t = Uint8PDLTOTFTensor($pdl_image_batched);
  
  die "There should be 4 dimensions" unless $pdl_image_batched->ndims == 4;
  
  die "With the final dimension of length 1" unless $pdl_image_batched->dim(3) == 1;
  
  p $pdl_image_batched;
  p $t;

=head2 Run the model for inference

We can use the C<Run> method to run the session and get the multiple output C<TFTensor>s. The following uses the names in C<$outputs> mapping to help process the multiple outputs more easily.

  my $RunSession = sub {
      my ($session, $t) = @_;

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN

    $pdl->upd_data;

    $pdl;
}

# Model handle
my $model_uri = URI->new( 'https://tfhub.dev/deepmind/enformer/1' );
$model_uri->query_form( 'tf-hub-format' => 'compressed' );
my $model_base = substr( $model_uri->path, 1 ) =~ s,/,_,gr;
my $model_archive_path = "${model_base}.tar.gz";
my $model_sequence_length = 393_216; # bp

# Human targets from Basenji2 dataset
my $targets_uri  = URI->new('https://raw.githubusercontent.com/calico/basenji/master/manuscripts/cross2020/targets_human.txt');
my $targets_path = 'targets_human.txt';

# Human reference genome
my $hg_uri    = URI->new("http://hgdownload.cse.ucsc.edu/goldenPath/hg38/bigZips/hg38.fa.gz");
my $hg_gz_path   = "hg38.fa.gz";
# From http://hgdownload.cse.ucsc.edu/goldenPath/hg38/bigZips/md5sum.txt
my $hg_md5_digest = "1c9dcaddfa41027f17cd8f7a82c7293b";

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN


imported_model = tf.saved_model.load(in_path).model
tf.saved_model.save( imported_model , out_path )
EOF

saved_model_cli( qw(show),
    qw(--dir) => $new_model_base,
    qw(--all),
);

my $model_central_base_pairs_length     = 114_688; # bp
my $model_central_base_pair_window_size = 128;     # bp / prediction

say "Number of predictions: ", $model_central_base_pairs_length / $model_central_base_pair_window_size;

use Data::Frame;

my $df = Data::Frame->from_csv( $targets_path, sep => "\t" )
    ->transform({
        file => sub {
            my ($col, $df) = @_;
            # clean up the paths in 'file' column
            [map { join "/", (split('/', $_))[7..8] } $col->list];
        }

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN

undef;

use PDL;

our $SHOW_ENCODER = 1;

sub one_hot_dna {
    my ($seq) = @_;

    my $from_alphabet = "NACGT";
    my $to_alphabet   = pack "C*", 0..length($from_alphabet)-1;

    # sequences from UCSC genome have both uppercase and lowercase bases
    my $from_alphabet_tr = $from_alphabet . lc $from_alphabet;
    my $to_alphabet_tr   = $to_alphabet x 2;

    my $p = zeros(byte, bytes::length($seq));
    my $p_dataref = $p->get_dataref;
    ${ $p_dataref } = $seq;
    eval "tr/$from_alphabet_tr/$to_alphabet_tr/" for ${ $p_dataref };
    $p->upd_data;

    my $encoder = append(float(0), identity(float(length($from_alphabet)-1)) );
    say "Encoder is\n", $encoder->info, $encoder if $SHOW_ENCODER;

    my $encoded  = $encoder->index( $p->dummy(0) );

    return $encoded;
}

####

{

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN


{

say "Testing interval resizing:\n";
sub _debug_resize {
    my ($interval, $to, $msg) = @_;

    my $resized_interval = $interval->resize($to);

    die "Wrong interval size for $interval --($to)--> $resized_interval"
        unless $resized_interval->length == $to;

    say sprintf "Interval: %s -> %s, length %2d : %s",
        $interval,
        $resized_interval, $resized_interval->length,
        $msg;
}

for my $interval_spec ( [4, 8], [5, 8], [5, 9], [6, 9]) {
    my ($start, $end) = @$interval_spec;
    my $test_interval = Interval->new( -seq_id => 'chr11', -start => $start, -end => $end );
    say sprintf "Testing interval %s with length %d", $test_interval, $test_interval->length;
    say "-----";
    for(0..5) {
        my $base = $test_interval->length;
        my $to = $base + $_;
        _debug_resize $test_interval, $to, "$base -> $to (+ $_)";
    }
    say "";
}

}

undef;

use Bio::DB::HTS::Faidx;

my $hg_db = Bio::DB::HTS::Faidx->new( $hg_bgz_path );

sub extract_sequence {
    my ($db, $interval) = @_;

    my $chrom_length = $db->length($interval->seq_id);

    my $trimmed_interval = $interval->clone;
    $trimmed_interval->start( List::Util::max( $interval->start, 1               ) );
    $trimmed_interval->end(   List::Util::min( $interval->end  , $chrom_length   ) );

    # Bio::DB::HTS::Faidx is 0-based for both start and end points
    my $seq = $db->get_sequence2_no_length(
        $trimmed_interval->seq_id,
        $trimmed_interval->start - 1,
        $trimmed_interval->end   - 1,
    );

    my $pad_upstream   = 'N' x List::Util::max( -($interval->start-1), 0 );
    my $pad_downstream = 'N' x List::Util::max( $interval->end - $chrom_length, 0 );

    return join '', $pad_upstream, $seq, $pad_downstream;
}

sub seq_info {
    my ($seq, $n) = @_;
    $n ||= 10;
    if( length $seq > $n ) {
        sprintf "%s...%s (length %d)", uc substr($seq, 0, $n), uc substr($seq, -$n), length $seq;
    } else {
        sprintf "%s (length %d)", uc $seq, length $seq;
    }
}

####

{

say "Testing sequence extraction:";

say "1 base: ",   seq_info

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN


say "3 bases: ",  seq_info
    extract_sequence( $hg_db,
        Interval->new( -seq_id => 'chr11',
            -start => 1,
            -end   => 1 )->resize(3) );

say "5 bases: ", seq_info
    extract_sequence( $hg_db,
        Interval->new( -seq_id => 'chr11',
            -start => $hg_db->length('chr11'),
            -end   => $hg_db->length('chr11') )->resize(5) );

say "chr11 is of length ", $hg_db->length('chr11');
say "chr11 bases: ", seq_info
    extract_sequence( $hg_db,
        Interval->new( -seq_id => 'chr11',
            -start => 1,
            -end   => $hg_db->length('chr11') )->resize( $hg_db->length('chr11') ) );
}

my $target_interval = Interval->new( -seq_id => 'chr11',
    -start => 35_082_742 +  1, # BioPerl is 1-based
    -end   => 35_197_430 );

say "Target interval: $target_interval with length @{[ $target_interval->length ]}";

die "Target interval is not $model_central_base_pairs_length bp long"
    unless $target_interval->length == $model_central_base_pairs_length;

say "Target sequence is ", seq_info extract_sequence( $hg_db, $target_interval );


say "";


my $resized_interval = $target_interval->resize( $model_sequence_length );
say "Resized interval: $resized_interval with length @{[ $resized_interval->length ]}";

die "resize() is not working properly!" unless $resized_interval->length == $model_sequence_length;

my $seq = extract_sequence( $hg_db, $resized_interval );

say "Resized sequence is ", seq_info($seq);

my $sequence_one_hot = one_hot_dna( $seq )->dummy(-1);

say $sequence_one_hot->info; undef;

use Devel::Timer;

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN


  > Landrum MJ, Lee JM, Benson M, Brown GR, Chao C, Chitipiralla S, Gu B, Hart J, Hoffman D, Jang W, Karapetyan K, Katz K, Liu C, Maddipatla Z, Malheiro A, McDaniel K, Ovetsky M, Riley G, Zhou G, Holmes JB, Kattman BL, Maglott DR. ClinVar: improving ...

=back

  # Model handle
  my $model_uri = URI->new( 'https://tfhub.dev/deepmind/enformer/1' );
  $model_uri->query_form( 'tf-hub-format' => 'compressed' );
  my $model_base = substr( $model_uri->path, 1 ) =~ s,/,_,gr;
  my $model_archive_path = "${model_base}.tar.gz";
  my $model_sequence_length = 393_216; # bp
  
  # Human targets from Basenji2 dataset
  my $targets_uri  = URI->new('https://raw.githubusercontent.com/calico/basenji/master/manuscripts/cross2020/targets_human.txt');
  my $targets_path = 'targets_human.txt';
  
  # Human reference genome
  my $hg_uri    = URI->new("http://hgdownload.cse.ucsc.edu/goldenPath/hg38/bigZips/hg38.fa.gz");
  my $hg_gz_path   = "hg38.fa.gz";
  # From http://hgdownload.cse.ucsc.edu/goldenPath/hg38/bigZips/md5sum.txt
  my $hg_md5_digest = "1c9dcaddfa41027f17cd8f7a82c7293b";

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN

the output C<human> which has the name C<StatefulPartitionedCall:0>.

=back

all of which are C<DT_FLOAT>.

Make note of the shapes that those take. Per the L<model description|https://tfhub.dev/deepmind/enformer/1> at TensorFlow Hub:

=over 2

The input sequence length is 393,216 with the prediction corresponding to 128 base pair windows for the center 114,688 base pairs. The input sequence is one hot encoded using the order of indices corresponding to 'ACGT' with N values being all zeros.

=back

The input shape C<(-1, 393216, 4)> thus represents dimensions C<[batch size] x [sequence length] x [one-hot encoding of ACGT]>.

The output shape C<(-1, 896, 5313)> represents dimensions C<[batch size] x [ predictions along 114,688 base pairs / 128 base pair windows ] x [ human target by index ]>. We can confirm this by doing some calculations:

  my $model_central_base_pairs_length     = 114_688; # bp
  my $model_central_base_pair_window_size = 128;     # bp / prediction
  
  say "Number of predictions: ", $model_central_base_pairs_length / $model_central_base_pair_window_size;

B<STREAM (STDOUT)>:

  Number of predictions: 896

B<RESULT>:

  1

and by looking at the targets file:

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN

  
      return $outputs_t[0];
  };
  
  undef;

=head2 Encoding the data

The model specifies that the way to get a sequence of DNA bases into a C<TFTensor> is to use L<one-hot encoding|https://en.wikipedia.org/wiki/One-hot#Machine_learning_and_statistics> in the order C<ACGT>.

This means that the bases are represented as vectors of length 4:

| base | vector encoding |
|------|-----------------|
| A    | C<[1 0 0 0]>     |
| C    | C<[0 1 0 0]>     |
| G    | C<[0 0 1 0]>     |
| T    | C<[0 0 0 1]>     |
| N    | C<[0 0 0 0]>     |

We can achieve this encoding by creating a lookup table with a PDL ndarray. This could be done by creating a byte PDL ndarray of dimensions C<[ 256 4 ]> to directly look up the the numeric value of characters 0-255, but here we'll go with a smaller C...

  use PDL;
  
  our $SHOW_ENCODER = 1;
  
  sub one_hot_dna {
      my ($seq) = @_;
  
      my $from_alphabet = "NACGT";
      my $to_alphabet   = pack "C*", 0..length($from_alphabet)-1;
  
      # sequences from UCSC genome have both uppercase and lowercase bases
      my $from_alphabet_tr = $from_alphabet . lc $from_alphabet;
      my $to_alphabet_tr   = $to_alphabet x 2;
  
      my $p = zeros(byte, bytes::length($seq));
      my $p_dataref = $p->get_dataref;
      ${ $p_dataref } = $seq;
      eval "tr/$from_alphabet_tr/$to_alphabet_tr/" for ${ $p_dataref };
      $p->upd_data;
  
      my $encoder = append(float(0), identity(float(length($from_alphabet)-1)) );
      say "Encoder is\n", $encoder->info, $encoder if $SHOW_ENCODER;
  
      my $encoded  = $encoder->index( $p->dummy(0) );
  
      return $encoded;
  }
  
  ####
  
  {

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN

Note that in the above, the PDL ndarray's

=over

=item *

first dimension is 4 which matches the last dimension of the input C<TFTensor>;

=item *

second dimension is the sequence length which matches the penultimate dimension of the input C<TFTensor>.

=back

Now we need a way to deal with the sequence interval. We're going to use 1-based coordinates as BioPerl does. In fact, we'll extend a BioPerl class.

  package Interval {
      use Bio::Location::Simple ();
  
      use parent qw(Bio::Location::Simple);
  

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN

  
  {
  
  say "Testing interval resizing:\n";
  sub _debug_resize {
      my ($interval, $to, $msg) = @_;
  
      my $resized_interval = $interval->resize($to);
  
      die "Wrong interval size for $interval --($to)--> $resized_interval"
          unless $resized_interval->length == $to;
  
      say sprintf "Interval: %s -> %s, length %2d : %s",
          $interval,
          $resized_interval, $resized_interval->length,
          $msg;
  }
  
  for my $interval_spec ( [4, 8], [5, 8], [5, 9], [6, 9]) {
      my ($start, $end) = @$interval_spec;
      my $test_interval = Interval->new( -seq_id => 'chr11', -start => $start, -end => $end );
      say sprintf "Testing interval %s with length %d", $test_interval, $test_interval->length;
      say "-----";
      for(0..5) {
          my $base = $test_interval->length;
          my $to = $base + $_;
          _debug_resize $test_interval, $to, "$base -> $to (+ $_)";
      }
      say "";
  }
  
  }
  
  undef;

B<STREAM (STDOUT)>:

  Testing interval resizing:
  
  Testing interval chr11:4..8 with length 5
  -----
  Interval: chr11:4..8 -> chr11:4..8, length  5 : 5 -> 5 (+ 0)
  Interval: chr11:4..8 -> chr11:3..8, length  6 : 5 -> 6 (+ 1)
  Interval: chr11:4..8 -> chr11:3..9, length  7 : 5 -> 7 (+ 2)
  Interval: chr11:4..8 -> chr11:2..9, length  8 : 5 -> 8 (+ 3)
  Interval: chr11:4..8 -> chr11:2..10, length  9 : 5 -> 9 (+ 4)
  Interval: chr11:4..8 -> chr11:1..10, length 10 : 5 -> 10 (+ 5)
  
  Testing interval chr11:5..8 with length 4
  -----
  Interval: chr11:5..8 -> chr11:5..8, length  4 : 4 -> 4 (+ 0)
  Interval: chr11:5..8 -> chr11:5..9, length  5 : 4 -> 5 (+ 1)
  Interval: chr11:5..8 -> chr11:4..9, length  6 : 4 -> 6 (+ 2)
  Interval: chr11:5..8 -> chr11:4..10, length  7 : 4 -> 7 (+ 3)
  Interval: chr11:5..8 -> chr11:3..10, length  8 : 4 -> 8 (+ 4)
  Interval: chr11:5..8 -> chr11:3..11, length  9 : 4 -> 9 (+ 5)
  
  Testing interval chr11:5..9 with length 5
  -----
  Interval: chr11:5..9 -> chr11:5..9, length  5 : 5 -> 5 (+ 0)
  Interval: chr11:5..9 -> chr11:4..9, length  6 : 5 -> 6 (+ 1)
  Interval: chr11:5..9 -> chr11:4..10, length  7 : 5 -> 7 (+ 2)
  Interval: chr11:5..9 -> chr11:3..10, length  8 : 5 -> 8 (+ 3)
  Interval: chr11:5..9 -> chr11:3..11, length  9 : 5 -> 9 (+ 4)
  Interval: chr11:5..9 -> chr11:2..11, length 10 : 5 -> 10 (+ 5)
  
  Testing interval chr11:6..9 with length 4
  -----
  Interval: chr11:6..9 -> chr11:6..9, length  4 : 4 -> 4 (+ 0)
  Interval: chr11:6..9 -> chr11:6..10, length  5 : 4 -> 5 (+ 1)
  Interval: chr11:6..9 -> chr11:5..10, length  6 : 4 -> 6 (+ 2)
  Interval: chr11:6..9 -> chr11:5..11, length  7 : 4 -> 7 (+ 3)
  Interval: chr11:6..9 -> chr11:4..11, length  8 : 4 -> 8 (+ 4)
  Interval: chr11:6..9 -> chr11:4..12, length  9 : 4 -> 9 (+ 5)
  


  use Bio::DB::HTS::Faidx;
  
  my $hg_db = Bio::DB::HTS::Faidx->new( $hg_bgz_path );
  
  sub extract_sequence {
      my ($db, $interval) = @_;
  
      my $chrom_length = $db->length($interval->seq_id);
  
      my $trimmed_interval = $interval->clone;
      $trimmed_interval->start( List::Util::max( $interval->start, 1               ) );
      $trimmed_interval->end(   List::Util::min( $interval->end  , $chrom_length   ) );
  
      # Bio::DB::HTS::Faidx is 0-based for both start and end points
      my $seq = $db->get_sequence2_no_length(
          $trimmed_interval->seq_id,
          $trimmed_interval->start - 1,
          $trimmed_interval->end   - 1,
      );
  
      my $pad_upstream   = 'N' x List::Util::max( -($interval->start-1), 0 );
      my $pad_downstream = 'N' x List::Util::max( $interval->end - $chrom_length, 0 );
  
      return join '', $pad_upstream, $seq, $pad_downstream;
  }
  
  sub seq_info {
      my ($seq, $n) = @_;
      $n ||= 10;
      if( length $seq > $n ) {
          sprintf "%s...%s (length %d)", uc substr($seq, 0, $n), uc substr($seq, -$n), length $seq;
      } else {
          sprintf "%s (length %d)", uc $seq, length $seq;
      }
  }
  
  ####
  
  {
  
  say "Testing sequence extraction:";
  
  say "1 base: ",   seq_info

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN

  
  say "3 bases: ",  seq_info
      extract_sequence( $hg_db,
          Interval->new( -seq_id => 'chr11',
              -start => 1,
              -end   => 1 )->resize(3) );
  
  say "5 bases: ", seq_info
      extract_sequence( $hg_db,
          Interval->new( -seq_id => 'chr11',
              -start => $hg_db->length('chr11'),
              -end   => $hg_db->length('chr11') )->resize(5) );
  
  say "chr11 is of length ", $hg_db->length('chr11');
  say "chr11 bases: ", seq_info
      extract_sequence( $hg_db,
          Interval->new( -seq_id => 'chr11',
              -start => 1,
              -end   => $hg_db->length('chr11') )->resize( $hg_db->length('chr11') ) );
  }

B<STREAM (STDOUT)>:

  Testing sequence extraction:
  1 base: G (length 1)
  3 bases: NNN (length 3)
  5 bases: NNNNN (length 5)
  chr11 is of length 135086622
  chr11 bases: NNNNNNNNNN...NNNNNNNNNN (length 135086622)

B<RESULT>:

  1

Now we can use the same target interval that is used in the example notebook which recreates part of L<figure 1|https://www.nature.com/articles/s41592-021-01252-x/figures/1> from the Enformer paper.

  my $target_interval = Interval->new( -seq_id => 'chr11',
      -start => 35_082_742 +  1, # BioPerl is 1-based
      -end   => 35_197_430 );
  
  say "Target interval: $target_interval with length @{[ $target_interval->length ]}";
  
  die "Target interval is not $model_central_base_pairs_length bp long"
      unless $target_interval->length == $model_central_base_pairs_length;
  
  say "Target sequence is ", seq_info extract_sequence( $hg_db, $target_interval );
  
  
  say "";
  
  
  my $resized_interval = $target_interval->resize( $model_sequence_length );
  say "Resized interval: $resized_interval with length @{[ $resized_interval->length ]}";
  
  die "resize() is not working properly!" unless $resized_interval->length == $model_sequence_length;
  
  my $seq = extract_sequence( $hg_db, $resized_interval );
  
  say "Resized sequence is ", seq_info($seq);

B<STREAM (STDOUT)>:

  Target interval: chr11:35082743..35197430 with length 114688
  Target sequence is GGTGGCAGCC...ATCTCCTTTT (length 114688)
  
  Resized interval: chr11:34943479..35336694 with length 393216
  Resized sequence is ACTAGTTCTA...GGCCCAAATC (length 393216)

B<RESULT>:

  1

To prepare the input we have to one-hot encode this resized sequence and give it a dummy dimension at the end to indicate that it is is a batch with a single sequence. Then we can turn the PDL ndarray into a C<TFTensor> and pass it to our prediction ...

  my $sequence_one_hot = one_hot_dna( $seq )->dummy(-1);
  
  say $sequence_one_hot->info; undef;

lib/AI/TensorFlow/Libtensorflow/OperationDescription.pm  view on Meta::CPAN

] => 'void');

$ffi->attach( ColocateWith => [
	arg 'TF_OperationDescription' => 'desc',
	arg 'TF_Operation' => 'op',
] => 'void');

$ffi->attach( SetAttrString => [
	arg 'TF_OperationDescription' => 'desc',
	arg 'string' => 'attr_name',
	arg tf_attr_string_buffer => [qw(value length)],
] => 'void');

$ffi->attach(SetAttrStringList => [
	arg 'TF_OperationDescription' => 'desc',
	arg 'string' => 'attr_name',
	arg 'tf_attr_string_list' => [qw(values lengths num_values)],
] => 'void');

$ffi->attach( SetAttrInt => [
	arg 'TF_OperationDescription' => 'desc',
	arg 'string' => 'attr_name',
	arg int64_t  => 'value',
] => 'void');

$ffi->attach( SetAttrIntList => [
	arg 'TF_OperationDescription' => 'desc',

lib/AI/TensorFlow/Libtensorflow/OperationDescription.pm  view on Meta::CPAN


$ffi->attach( SetAttrPlaceholder => [
	arg 'TF_OperationDescription' => 'desc',
	arg 'string' => 'attr_name',
	arg 'string' => 'placeholder',
] => 'void');

$ffi->attach( SetAttrFuncName => [
	arg 'TF_OperationDescription' => 'desc',
	arg 'string' => 'attr_name',
	arg 'tf_attr_string_buffer' => [qw(value length)],
] => 'void');

$ffi->attach( SetAttrShape => [
	arg 'TF_OperationDescription' => 'desc',
	arg 'string' => 'attr_name',
	arg 'tf_dims_buffer' => [qw(dims num_dims)],
] => 'void');

$ffi->attach( SetAttrShapeList => [
	arg 'TF_OperationDescription' => 'desc',

lib/AI/TensorFlow/Libtensorflow/TFLibrary.pm  view on Meta::CPAN


=over 2

C<<<
GetAllOpList()
>>>

=back

  my $buf = AI::TensorFlow::Libtensorflow::TFLibrary->GetAllOpList();
  cmp_ok $buf->length, '>', 0, 'Got OpList buffer';

B<Returns>

=over 4

=item L<TFBuffer|AI::TensorFlow::Libtensorflow::Lib::Types/TFBuffer>

Contains a serialized C<OpList> proto for ops registered in this address space.

=back

maint/process-capi.pl  view on Meta::CPAN

				^ TF_CAPI_EXPORT [^;]+ ;
			)
		}xm;
	};

	lazy sorted_header_paths => method() {
		my @order = $self->header_order->@*;
		my @sorted = iikeysort {
				my $item = $_;
				my $first = firstidx { $item =~ $_ } @order;
				($first, length $_);
			} $self->header_paths->@*;
		\@sorted;
	};

	method _process_re($re) {
		my @data;
		my @sorted = $self->sorted_header_paths->@*;
		for my $file (@sorted) {
			my $txt = $file->slurp_utf8;
			while( $txt =~ /$re/g ) {

t/upstream/CAPI/015_Graph.t  view on Meta::CPAN


	subtest 'Test TF_Operation*() query functions.' => sub {
		is $feed->Name, 'feed', 'name';
		is $feed->OpType, 'Placeholder', 'optype';
		is $feed->Device, '', 'device';
		is $feed->NumOutputs, 1, 'num outputs';
		cmp_ok $feed->OutputType(
			$TFOutput->coerce({oper => $feed, index => 0})
		), 'eq', INT32, 'output 0 type';

		is $feed->OutputListLength("output", $s), 1, 'output list length';
		TF_Utils::AssertStatusOK($s);

		is $feed->NumInputs, 0, 'num inputs';
		is $feed->OutputNumConsumers(
			$TFOutput->coerce({oper => $feed, index => 0})
		), 0, 'output 0 num consumers';
		is $feed->NumControlInputs, 0, 'num control inputs';
		is $feed->NumControlOutputs, 0, 'num control outputs';
	};

t/upstream/CAPI/015_Graph.t  view on Meta::CPAN

	my $add = TF_Utils::Add($feed, $three, $graph, $s);
	TF_Utils::AssertStatusOK($s);

	subtest 'Test TF_Operation*() query functions.' => sub {
		is $add->Name, 'add', 'name';
		is $add->OpType, 'AddN', 'op type';
		is $add->Device, '', 'device';
		is $add->NumOutputs, 1, 'num outputs';
		cmp_ok $add->OutputType($TFOutput->coerce([$add => 0])),
			'eq', INT32, 'output type';
		is $add->OutputListLength('sum', $s), 1, 'output list length';
		TF_Utils::AssertStatusOK($s);
		is $add->NumInputs, 2, 'num inputs';
		is $add->InputListLength("inputs", $s), 2, 'InputListLength';
		TF_Utils::AssertStatusOK($s);
		cmp_ok $add->InputType( $TFInput->coerce([$add, 0])  ),
			'eq', INT32, 'input type 0';
		cmp_ok $add->InputType( $TFInput->coerce([$add, 1])),
			'eq', INT32, 'input type 1';
		my $add_in_0 = $add->Input($TFInput->coerce([$add, 0]));
		is $add_in_0->oper->Name, $feed->Name, 'feed.out[0] -> add.in[0] by name';

t/upstream/CAPI/028_GetOpDef.t  view on Meta::CPAN

use aliased 'AI::TensorFlow::Libtensorflow';
use AI::TensorFlow::Libtensorflow::Status;

subtest "(CAPI, GetOpDef)" => sub {
	my $status = AI::TensorFlow::Libtensorflow::Status->New;
	my $graph = AI::TensorFlow::Libtensorflow::Graph->New;
	my $buffer = AI::TensorFlow::Libtensorflow::Buffer->New;

	$graph->GetOpDef("Add", $buffer, $status);
	TF_Utils::AssertStatusOK($status);
	cmp_ok $buffer->length, '>', 0, 'Got Add OpDef buffer';

	pass 'Skipping these tests. Can not access tensorflow::OpDef C++.';

	$graph->GetOpDef("MyFakeOp", $buffer, $status);
	like $status, object {
		call GetCode => AI::TensorFlow::Libtensorflow::Status::NOT_FOUND;
		call Message => qr/\QOp type not registered 'MyFakeOp' in binary\E/;
	}, 'MyFakeOp is NOT_FOUND';
};

t/upstream/CAPI/037_TestTensorNonScalarBytesAllocateDelete.t  view on Meta::CPAN

	my $t = AI::TensorFlow::Libtensorflow::Tensor->Allocate( STRING, \@dims,
		$sz_tstring * $num_elements );

	my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
	my $data_ptr = scalar_to_pointer( ${ $t->Data } );
	for my $i (0..$batch_size-1) {
		my $data_i_ptr = $data_ptr + $sz_tstring * $i;
		my $data_i = $ffi->cast('opaque', 'TF_TString', $data_i_ptr );
		$data_i->Init;
		$data_i->{owner} = $t; # do not want to free the pointer
		# The following input string length is large enough to make sure that
		# copy to tstring in large mode.
		$data_i->Copy(
			"This is the " . ($i + 1) . "th. data element\n"
		);
	}

	undef $t;

	pass 'Created TF_STRING tensor and deallocated';
};



( run in 0.388 second using v1.01-cache-2.11-cpan-65fba6d93b7 )