AI-TensorFlow-Libtensorflow

 view release on metacpan or  search on metacpan

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

                                                        int max_consumers);

=head2 TF_OperationNumControlInputs

=over 2

  Get the number of control inputs to an operation.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern int TF_OperationNumControlInputs(TF_Operation* oper);

=head2 TF_OperationGetControlInputs

=over 2

  Get list of all control inputs to an operation.  `control_inputs` must
  point to an array of length `max_control_inputs` (ideally set to
  TF_OperationNumControlInputs(oper)).  Returns the number of control
  inputs (should match TF_OperationNumControlInputs(oper)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern int TF_OperationGetControlInputs(
      TF_Operation* oper, TF_Operation** control_inputs, int max_control_inputs);

=head2 TF_OperationNumControlOutputs

=over 2

  Get the number of operations that have `*oper` as a control input.
  Note that this number can change when new operations are added to
  the graph.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern int TF_OperationNumControlOutputs(TF_Operation* oper);

=head2 TF_OperationGetControlOutputs

=over 2

  Get the list of operations that have `*oper` as a control input.
  `control_outputs` must point to an array of length at least
  `max_control_outputs` (ideally set to
  TF_OperationNumControlOutputs(oper)). Beware that a concurrent
  modification of the graph can increase the number of control
  outputs.  Returns the number of control outputs (should match
  TF_OperationNumControlOutputs(oper)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern int TF_OperationGetControlOutputs(
      TF_Operation* oper, TF_Operation** control_outputs,
      int max_control_outputs);

=head2 TF_OperationGetAttrMetadata

=over 2

  Returns metadata about the value of the attribute `attr_name` of `oper`.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern TF_AttrMetadata TF_OperationGetAttrMetadata(
      TF_Operation* oper, const char* attr_name, TF_Status* status);

=head2 TF_OperationGetAttrString

=over 2

  Fills in `value` with the value of the attribute `attr_name`.  `value` must
  point to an array of length at least `max_length` (ideally set to
  TF_AttrMetadata.total_size from TF_OperationGetAttrMetadata(oper,
  attr_name)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrString(TF_Operation* oper,
                                                       const char* attr_name,
                                                       void* value,
                                                       size_t max_length,
                                                       TF_Status* status);

=head2 TF_OperationGetAttrStringList

=over 2

  Get the list of strings in the value of the attribute `attr_name`.  Fills in
  `values` and `lengths`, each of which must point to an array of length at
  least `max_values`.
  
  The elements of values will point to addresses in `storage` which must be at
  least `storage_size` bytes in length.  Ideally, max_values would be set to
  TF_AttrMetadata.list_size and `storage` would be at least
  TF_AttrMetadata.total_size, obtained from TF_OperationGetAttrMetadata(oper,
  attr_name).
  
  Fails if storage_size is too small to hold the requested number of strings.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrStringList(
      TF_Operation* oper, const char* attr_name, void** values, size_t* lengths,
      int max_values, void* storage, size_t storage_size, TF_Status* status);

=head2 TF_OperationGetAttrInt

=over 2

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrInt(TF_Operation* oper,
                                                    const char* attr_name,
                                                    int64_t* value,
                                                    TF_Status* status);

=head2 TF_OperationGetAttrIntList

=over 2

  Fills in `values` with the value of the attribute `attr_name` of `oper`.
  `values` must point to an array of length at least `max_values` (ideally set
  TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper,
  attr_name)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrIntList(TF_Operation* oper,
                                                        const char* attr_name,
                                                        int64_t* values,
                                                        int max_values,
                                                        TF_Status* status);

=head2 TF_OperationGetAttrFloat

=over 2

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrFloat(TF_Operation* oper,
                                                      const char* attr_name,
                                                      float* value,
                                                      TF_Status* status);

=head2 TF_OperationGetAttrFloatList

=over 2

  Fills in `values` with the value of the attribute `attr_name` of `oper`.
  `values` must point to an array of length at least `max_values` (ideally set
  to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper,
  attr_name)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrFloatList(TF_Operation* oper,
                                                          const char* attr_name,
                                                          float* values,
                                                          int max_values,
                                                          TF_Status* status);

=head2 TF_OperationGetAttrBool

=over 2

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrBool(TF_Operation* oper,
                                                     const char* attr_name,
                                                     unsigned char* value,
                                                     TF_Status* status);

=head2 TF_OperationGetAttrBoolList

=over 2

  Fills in `values` with the value of the attribute `attr_name` of `oper`.
  `values` must point to an array of length at least `max_values` (ideally set
  to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper,
  attr_name)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrBoolList(TF_Operation* oper,
                                                         const char* attr_name,
                                                         unsigned char* values,
                                                         int max_values,
                                                         TF_Status* status);

=head2 TF_OperationGetAttrType

=over 2

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrType(TF_Operation* oper,
                                                     const char* attr_name,
                                                     TF_DataType* value,
                                                     TF_Status* status);

=head2 TF_OperationGetAttrTypeList

=over 2

  Fills in `values` with the value of the attribute `attr_name` of `oper`.
  `values` must point to an array of length at least `max_values` (ideally set
  to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper,
  attr_name)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrTypeList(TF_Operation* oper,
                                                         const char* attr_name,
                                                         TF_DataType* values,
                                                         int max_values,
                                                         TF_Status* status);

=head2 TF_OperationGetAttrShape

=over 2

  Fills in `value` with the value of the attribute `attr_name` of `oper`.
  `values` must point to an array of length at least `num_dims` (ideally set to
  TF_Attr_Meta.size from TF_OperationGetAttrMetadata(oper, attr_name)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrShape(TF_Operation* oper,
                                                      const char* attr_name,
                                                      int64_t* value,
                                                      int num_dims,
                                                      TF_Status* status);

=head2 TF_OperationGetAttrShapeList

=over 2

  Fills in `dims` with the list of shapes in the attribute `attr_name` of
  `oper` and `num_dims` with the corresponding number of dimensions. On return,
  for every i where `num_dims[i]` > 0, `dims[i]` will be an array of
  `num_dims[i]` elements. A value of -1 for `num_dims[i]` indicates that the
  i-th shape in the list is unknown.
  
  The elements of `dims` will point to addresses in `storage` which must be
  large enough to hold at least `storage_size` int64_ts.  Ideally, `num_shapes`
  would be set to TF_AttrMetadata.list_size and `storage_size` would be set to
  TF_AttrMetadata.total_size from TF_OperationGetAttrMetadata(oper,
  attr_name).
  
  Fails if storage_size is insufficient to hold the requested shapes.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrShapeList(
      TF_Operation* oper, const char* attr_name, int64_t** dims, int* num_dims,
      int num_shapes, int64_t* storage, int storage_size, TF_Status* status);

=head2 TF_OperationGetAttrTensorShapeProto

=over 2

  Sets `value` to the binary-serialized TensorShapeProto of the value of
  `attr_name` attribute of `oper`.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrTensorShapeProto(
      TF_Operation* oper, const char* attr_name, TF_Buffer* value,
      TF_Status* status);

=head2 TF_OperationGetAttrTensorShapeProtoList

=over 2

  Fills in `values` with binary-serialized TensorShapeProto values of the
  attribute `attr_name` of `oper`. `values` must point to an array of length at
  least `num_values` (ideally set to TF_AttrMetadata.list_size from
  TF_OperationGetAttrMetadata(oper, attr_name)).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrTensorShapeProtoList(
      TF_Operation* oper, const char* attr_name, TF_Buffer** values,
      int max_values, TF_Status* status);

=head2 TF_OperationGetAttrTensor

=over 2

  Gets the TF_Tensor valued attribute of `attr_name` of `oper`.
  
  Allocates a new TF_Tensor which the caller is expected to take
  ownership of (and can deallocate using TF_DeleteTensor).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrTensor(TF_Operation* oper,
                                                       const char* attr_name,
                                                       TF_Tensor** value,
                                                       TF_Status* status);

=head2 TF_OperationGetAttrTensorList

=over 2

  Fills in `values` with the TF_Tensor values of the attribute `attr_name` of
  `oper`. `values` must point to an array of TF_Tensor* of length at least
  `max_values` (ideally set to TF_AttrMetadata.list_size from
  TF_OperationGetAttrMetadata(oper, attr_name)).
  
  The caller takes ownership of all the non-null TF_Tensor* entries in `values`
  (which can be deleted using TF_DeleteTensor(values[i])).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrTensorList(TF_Operation* oper,
                                                           const char* attr_name,
                                                           TF_Tensor** values,
                                                           int max_values,
                                                           TF_Status* status);

=head2 TF_OperationGetAttrValueProto

=over 2

  Sets `output_attr_value` to the binary-serialized AttrValue proto
  representation of the value of the `attr_name` attr of `oper`.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationGetAttrValueProto(
      TF_Operation* oper, const char* attr_name, TF_Buffer* output_attr_value,
      TF_Status* status);

=head2 TF_OperationGetNumAttrs

=over 2

  Get the number of attributes the operation has.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern int TF_OperationGetNumAttrs(TF_Operation* oper);

=head2 TF_OperationGetAttrNameLength

=over 2

  Get the length of the name of the ith attribute, or -1 if there is not an
  ith attribute.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern int TF_OperationGetAttrNameLength(TF_Operation* oper,
                                                          int i);

=head2 TF_OperationGetAttrName

=over 2

  Get the name of the ith attribute.  output should have the size of
  TF_OperationGetAttrNameLength(oper, i).

=back

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  `session_options`, and then initializes state (restoring tensors and other
  assets) using `run_options`.
  
  Any NULL and non-NULL value combinations for (`run_options, `meta_graph_def`)
  are valid.
  
  - `export_dir` must be set to the path of the exported SavedModel.
  - `tags` must include the set of tags used to identify one MetaGraphDef in
     the SavedModel.
  - `graph` must be a graph newly allocated with TF_NewGraph().
  
  If successful, populates `graph` with the contents of the Graph and
  `meta_graph_def` with the MetaGraphDef of the loaded model.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern TF_Session* TF_LoadSessionFromSavedModel(
      const TF_SessionOptions* session_options, const TF_Buffer* run_options,
      const char* export_dir, const char* const* tags, int tags_len,
      TF_Graph* graph, TF_Buffer* meta_graph_def, TF_Status* status);

=head2 TF_CloseSession

=over 2

  Close a session.
  
  Contacts any other processes associated with the session, if applicable.
  May not be called after TF_DeleteSession().

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_CloseSession(TF_Session*, TF_Status* status);

=head2 TF_DeleteSession

=over 2

  Destroy a session object.
  
  Even if error information is recorded in *status, this call discards all
  local resources associated with the session.  The session may not be used
  during or after this call (and the session drops its reference to the
  corresponding graph).

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_DeleteSession(TF_Session*, TF_Status* status);

=head2 TF_SessionRun

=over 2

  Run the graph associated with the session starting with the supplied inputs
  (inputs[0,ninputs-1] with corresponding values in input_values[0,ninputs-1]).
  
  Any NULL and non-NULL value combinations for (`run_options`,
  `run_metadata`) are valid.
  
     - `run_options` may be NULL, in which case it will be ignored; or
       non-NULL, in which case it must point to a `TF_Buffer` containing the
       serialized representation of a `RunOptions` protocol buffer.
     - `run_metadata` may be NULL, in which case it will be ignored; or
       non-NULL, in which case it must point to an empty, freshly allocated
       `TF_Buffer` that may be updated to contain the serialized representation
       of a `RunMetadata` protocol buffer.
  
  The caller retains ownership of `input_values` (which can be deleted using
  TF_DeleteTensor). The caller also retains ownership of `run_options` and/or
  `run_metadata` (when not NULL) and should manually call TF_DeleteBuffer on
  them.
  
  On success, the tensors corresponding to outputs[0,noutputs-1] are placed in
  output_values[]. Ownership of the elements of output_values[] is transferred
  to the caller, which must eventually call TF_DeleteTensor on them.
  
  On failure, output_values[] contains NULLs.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_SessionRun(
      TF_Session* session,
      // RunOptions
      const TF_Buffer* run_options,
      // Input tensors
      const TF_Output* inputs, TF_Tensor* const* input_values, int ninputs,
      // Output tensors
      const TF_Output* outputs, TF_Tensor** output_values, int noutputs,
      // Target operations
      const TF_Operation* const* target_opers, int ntargets,
      // RunMetadata
      TF_Buffer* run_metadata,
      // Output status
      TF_Status*);

=head2 TF_SessionPRunSetup

=over 2

  Set up the graph with the intended feeds (inputs) and fetches (outputs) for a
  sequence of partial run calls.
  
  On success, returns a handle that is used for subsequent PRun calls. The
  handle should be deleted with TF_DeletePRunHandle when it is no longer
  needed.
  
  On failure, out_status contains a tensorflow::Status with an error
  message. *handle is set to nullptr.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_SessionPRunSetup(
      TF_Session*,
      // Input names
      const TF_Output* inputs, int ninputs,
      // Output names
      const TF_Output* outputs, int noutputs,
      // Target operations
      const TF_Operation* const* target_opers, int ntargets,
      // Output handle
      const char** handle,
      // Output status
      TF_Status*);

=head2 TF_SessionPRun

=over 2

  Continue to run the graph with additional feeds and fetches. The
  execution state is uniquely identified by the handle.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_SessionPRun(
      TF_Session*, const char* handle,
      // Input tensors
      const TF_Output* inputs, TF_Tensor* const* input_values, int ninputs,
      // Output tensors
      const TF_Output* outputs, TF_Tensor** output_values, int noutputs,
      // Target operations
      const TF_Operation* const* target_opers, int ntargets,
      // Output status
      TF_Status*);

=head2 TF_DeletePRunHandle

=over 2

  Deletes a handle allocated by TF_SessionPRunSetup.
  Once called, no more calls to TF_SessionPRun should be made.

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=over 2

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_CloseDeprecatedSession(TF_DeprecatedSession*,
                                                       TF_Status* status);

=head2 TF_DeleteDeprecatedSession

=over 2

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_DeleteDeprecatedSession(TF_DeprecatedSession*,
                                                        TF_Status* status);

=head2 TF_Reset

=over 2

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_Reset(const TF_SessionOptions* opt,
                                      const char** containers, int ncontainers,
                                      TF_Status* status);

=head2 TF_ExtendGraph

=over 2

  Treat the bytes proto[0,proto_len-1] as a serialized GraphDef and
  add the nodes in that GraphDef to the graph for the session.
  
  Prefer use of TF_Session and TF_GraphImportGraphDef over this.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_ExtendGraph(TF_DeprecatedSession*,
                                            const void* proto, size_t proto_len,
                                            TF_Status*);

=head2 TF_Run

=over 2

  See TF_SessionRun() above.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_Run(TF_DeprecatedSession*,
                                    const TF_Buffer* run_options,
                                    const char** input_names, TF_Tensor** inputs,
                                    int ninputs, const char** output_names,
                                    TF_Tensor** outputs, int noutputs,
                                    const char** target_oper_names, int ntargets,
                                    TF_Buffer* run_metadata, TF_Status*);

=head2 TF_PRunSetup

=over 2

  See TF_SessionPRunSetup() above.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_PRunSetup(TF_DeprecatedSession*,
                                          const char** input_names, int ninputs,
                                          const char** output_names, int noutputs,
                                          const char** target_oper_names,
                                          int ntargets, const char** handle,
                                          TF_Status*);

=head2 TF_PRun

=over 2

  See TF_SessionPRun above.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_PRun(TF_DeprecatedSession*, const char* handle,
                                     const char** input_names, TF_Tensor** inputs,
                                     int ninputs, const char** output_names,
                                     TF_Tensor** outputs, int noutputs,
                                     const char** target_oper_names, int ntargets,
                                     TF_Status*);

=head2 TF_SessionListDevices

=over 2

  Lists all devices in a TF_Session.
  
  Caller takes ownership of the returned TF_DeviceList* which must eventually
  be freed with a call to TF_DeleteDeviceList.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern TF_DeviceList* TF_SessionListDevices(TF_Session* session,
                                                             TF_Status* status);

=head2 TF_DeprecatedSessionListDevices

=over 2

  Lists all devices in a TF_Session.
  
  Caller takes ownership of the returned TF_DeviceList* which must eventually
  be freed with a call to TF_DeleteDeviceList.

=back

  /* From <tensorflow/c/c_api.h> */

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


=head2 TF_DeviceListMemoryBytes

=over 2

  Retrieve the amount of memory associated with a given device.
  
  If index is out of bounds, an error code will be set in the status object,
  and -1 will be returned.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern int64_t TF_DeviceListMemoryBytes(
      const TF_DeviceList* list, int index, TF_Status* status);

=head2 TF_DeviceListIncarnation

=over 2

  Retrieve the incarnation number of a given device.
  
  If index is out of bounds, an error code will be set in the status object,
  and 0 will be returned.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern uint64_t TF_DeviceListIncarnation(
      const TF_DeviceList* list, int index, TF_Status* status);

=head2 TF_LoadLibrary

=over 2

  Load the library specified by library_filename and register the ops and
  kernels present in that library.
  
  Pass "library_filename" to a platform-specific mechanism for dynamically
  loading a library. The rules for determining the exact location of the
  library are platform-specific and are not documented here.
  
  On success, place OK in status and return the newly created library handle.
  The caller owns the library handle.
  
  On failure, place an error status in status and return NULL.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern TF_Library* TF_LoadLibrary(const char* library_filename,
                                                   TF_Status* status);

=head2 TF_GetOpList

=over 2

  Get the OpList of OpDefs defined in the library pointed by lib_handle.
  
  Returns a TF_Buffer. The memory pointed to by the result is owned by
  lib_handle. The data in the buffer will be the serialized OpList proto for
  ops defined in the library.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern TF_Buffer TF_GetOpList(TF_Library* lib_handle);

=head2 TF_DeleteLibraryHandle

=over 2

  Frees the memory associated with the library handle.
  Does NOT unload the library.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_DeleteLibraryHandle(TF_Library* lib_handle);

=head2 TF_GetAllOpList

=over 2

  Get the OpList of all OpDefs defined in this address space.
  Returns a TF_Buffer, ownership of which is transferred to the caller
  (and can be freed using TF_DeleteBuffer).
  
  The data in the buffer will be the serialized OpList proto for ops registered
  in this address space.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern TF_Buffer* TF_GetAllOpList(void);

=head2 TF_NewApiDefMap

=over 2

  Creates a new TF_ApiDefMap instance.
  
  Params:
   op_list_buffer - TF_Buffer instance containing serialized OpList
     protocol buffer. (See
     https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto
     for the OpList proto definition).
   status - Set to OK on success and an appropriate error on failure.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern TF_ApiDefMap* TF_NewApiDefMap(TF_Buffer* op_list_buffer,
                                                      TF_Status* status);

=head2 TF_DeleteApiDefMap

=over 2

  Deallocates a TF_ApiDefMap.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_DeleteApiDefMap(TF_ApiDefMap* apimap);

=head2 TF_ApiDefMapPut

=over 2

  Add ApiDefs to the map.
  
  `text` corresponds to a text representation of an ApiDefs protocol message.
  (https://www.tensorflow.org/code/tensorflow/core/framework/api_def.proto).
  
  The provided ApiDefs will be merged with existing ones in the map, with
  precedence given to the newly added version in case of conflicts with
  previous calls to TF_ApiDefMapPut.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_ApiDefMapPut(TF_ApiDefMap* api_def_map,
                                             const char* text, size_t text_len,
                                             TF_Status* status);

=head2 TF_ApiDefMapGet

=over 2

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  On success, place OK in status.
  On failure, place an error status in status.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_RegisterFilesystemPlugin(
      const char* plugin_filename, TF_Status* status);

=head2 TF_NewShape

=over 2

  Return a new, unknown rank shape object. The caller is responsible for
  calling TF_DeleteShape to deallocate and destroy the returned shape.

=back

  /* From <tensorflow/c/tf_shape.h> */
  TF_CAPI_EXPORT extern TF_Shape* TF_NewShape();

=head2 TF_ShapeDims

=over 2

  Returns the rank of `shape`. If `shape` has unknown rank, returns -1.

=back

  /* From <tensorflow/c/tf_shape.h> */
  TF_CAPI_EXPORT extern int TF_ShapeDims(const TF_Shape* shape);

=head2 TF_ShapeDimSize

=over 2

  Returns the `d`th dimension of `shape`. If `shape` has unknown rank,
  invoking this function is undefined behavior. Returns -1 if dimension is
  unknown.

=back

  /* From <tensorflow/c/tf_shape.h> */
  TF_CAPI_EXPORT extern int64_t TF_ShapeDimSize(const TF_Shape* shape, int d);

=head2 TF_DeleteShape

=over 2

  Deletes `shape`.

=back

  /* From <tensorflow/c/tf_shape.h> */
  TF_CAPI_EXPORT extern void TF_DeleteShape(TF_Shape* shape);

=head2 TF_NewTensor

=over 2

  Return a new tensor that holds the bytes data[0,len-1].
  
  The data will be deallocated by a subsequent call to TF_DeleteTensor via:
       (*deallocator)(data, len, deallocator_arg)
  Clients must provide a custom deallocator function so they can pass in
  memory managed by something like numpy.
  
  May return NULL (and invoke the deallocator) if the provided data buffer
  (data, len) is inconsistent with a tensor of the given TF_DataType
  and the shape specified by (dima, num_dims).

=back

  /* From <tensorflow/c/tf_tensor.h> */
  TF_CAPI_EXPORT extern TF_Tensor* TF_NewTensor(
      TF_DataType, const int64_t* dims, int num_dims, void* data, size_t len,
      void (*deallocator)(void* data, size_t len, void* arg),
      void* deallocator_arg);

=head2 TF_AllocateTensor

=over 2

  Allocate and return a new Tensor.
  
  This function is an alternative to TF_NewTensor and should be used when
  memory is allocated to pass the Tensor to the C API. The allocated memory
  satisfies TensorFlow's memory alignment preferences and should be preferred
  over calling malloc and free.
  
  The caller must set the Tensor values by writing them to the pointer returned
  by TF_TensorData with length TF_TensorByteSize.

=back

  /* From <tensorflow/c/tf_tensor.h> */
  TF_CAPI_EXPORT extern TF_Tensor* TF_AllocateTensor(TF_DataType,
                                                     const int64_t* dims,
                                                     int num_dims, size_t len);

=head2 TF_TensorMaybeMove

=over 2

  Deletes `tensor` and returns a new TF_Tensor with the same content if
  possible. Returns nullptr and leaves `tensor` untouched if not.

=back

  /* From <tensorflow/c/tf_tensor.h> */
  TF_CAPI_EXPORT extern TF_Tensor* TF_TensorMaybeMove(TF_Tensor* tensor);

=head2 TF_DeleteTensor

=over 2

  Destroy a tensor.

=back

  /* From <tensorflow/c/tf_tensor.h> */
  TF_CAPI_EXPORT extern void TF_DeleteTensor(TF_Tensor*);

=head2 TF_TensorType

=over 2

  Return the type of a tensor element.

=back

  /* From <tensorflow/c/tf_tensor.h> */
  TF_CAPI_EXPORT extern TF_DataType TF_TensorType(const TF_Tensor*);

=head2 TF_SetShape

=over 2

  Set a new shape for the Tensor.

=back

  /* From <tensorflow/c/tf_tensor.h> */
  TF_CAPI_EXPORT extern void TF_SetShape(TF_Tensor* tensor, const int64_t* dims,
                                         int num_dims);

=head2 TF_NumDims

=over 2

  Return the number of dimensions that the tensor has.

=back

  /* From <tensorflow/c/tf_tensor.h> */
  TF_CAPI_EXPORT extern int TF_NumDims(const TF_Tensor*);

=head2 TF_Dim

=over 2

  Return the length of the tensor in the "dim_index" dimension.
  REQUIRES: 0 <= dim_index < TF_NumDims(tensor)

=back

  /* From <tensorflow/c/tf_tensor.h> */
  TF_CAPI_EXPORT extern int64_t TF_Dim(const TF_Tensor* tensor, int dim_index);

=head2 TF_TensorByteSize

=over 2

  Return the size of the underlying data in bytes.

=back

  /* From <tensorflow/c/tf_tensor.h> */
  TF_CAPI_EXPORT extern size_t TF_TensorByteSize(const TF_Tensor*);

=head2 TF_TensorData

=over 2

  Return a pointer to the underlying data buffer.

=back

  /* From <tensorflow/c/tf_tensor.h> */
  TF_CAPI_EXPORT extern void* TF_TensorData(const TF_Tensor*);

=head2 TF_TensorElementCount

=over 2

  Returns the number of elements in the tensor.

=back

  /* From <tensorflow/c/tf_tensor.h> */
  TF_CAPI_EXPORT extern int64_t TF_TensorElementCount(const TF_Tensor* tensor);

=head2 TF_TensorBitcastFrom

=over 2

  Copy the internal data representation of `from` to `to`. `new_dims` and
  `num_new_dims` specify the new shape of the `to` tensor, `type` specifies its
  data type. On success, *status is set to TF_OK and the two tensors share the
  same data buffer.
  
  This call requires that the `from` tensor and the given type and shape (dims
  and num_dims) are "compatible" (i.e. they occupy the same number of bytes).
  Specifically, given from_type_size = TF_DataTypeSize(TF_TensorType(from)):
  
  ShapeElementCount(dims, num_dims) * TF_DataTypeSize(type)
  
  must equal
  
  TF_TensorElementCount(from) * from_type_size
  
  where TF_ShapeElementCount would be the number of elements in a tensor with
  the given shape.
  
  In addition, this function requires:
    * TF_DataTypeSize(TF_TensorType(from)) != 0
    * TF_DataTypeSize(type) != 0
  
  If any of the requirements are not met, *status is set to
  TF_INVALID_ARGUMENT.

=back

  /* From <tensorflow/c/tf_tensor.h> */
  TF_CAPI_EXPORT extern void TF_TensorBitcastFrom(const TF_Tensor* from,
                                                  TF_DataType type, TF_Tensor* to,
                                                  const int64_t* new_dims,
                                                  int num_new_dims,
                                                  TF_Status* status);

=head2 TF_TensorIsAligned

=over 2

  Returns bool iff this tensor is aligned.

=back

  /* From <tensorflow/c/tf_tensor.h> */
  TF_CAPI_EXPORT extern bool TF_TensorIsAligned(const TF_Tensor*);

=head2 TF_NewStatus

=over 2

  Return a new status object.

=back

  /* From <tensorflow/c/tf_status.h> */
  TF_CAPI_EXPORT extern TF_Status* TF_NewStatus(void);

=head2 TF_DeleteStatus

=over 2

  Delete a previously created status object.

=back

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  /* From <tensorflow/c/tf_tstring.h> */
  TF_CAPI_EXPORT extern void TF_StringAssignView(TF_TString *dst, const char *src,
                                                 size_t size);

=head2 TF_StringGetDataPointer

=over 2

=back

  /* From <tensorflow/c/tf_tstring.h> */
  TF_CAPI_EXPORT extern const char *TF_StringGetDataPointer(
      const TF_TString *tstr);

=head2 TF_StringGetType

=over 2

=back

  /* From <tensorflow/c/tf_tstring.h> */
  TF_CAPI_EXPORT extern TF_TString_Type TF_StringGetType(const TF_TString *str);

=head2 TF_StringGetSize

=over 2

=back

  /* From <tensorflow/c/tf_tstring.h> */
  TF_CAPI_EXPORT extern size_t TF_StringGetSize(const TF_TString *tstr);

=head2 TF_StringGetCapacity

=over 2

=back

  /* From <tensorflow/c/tf_tstring.h> */
  TF_CAPI_EXPORT extern size_t TF_StringGetCapacity(const TF_TString *str);

=head2 TF_StringDealloc

=over 2

=back

  /* From <tensorflow/c/tf_tstring.h> */
  TF_CAPI_EXPORT extern void TF_StringDealloc(TF_TString *tstr);

=head2 TF_DataTypeSize

=over 2

  TF_DataTypeSize returns the sizeof() for the underlying type corresponding
  to the given TF_DataType enum value. Returns 0 for variable length types
  (eg. TF_STRING) or on failure.

=back

  /* From <tensorflow/c/tf_datatype.h> */
  TF_CAPI_EXPORT extern size_t TF_DataTypeSize(TF_DataType dt);

=head2 TF_NewOpDefinitionBuilder

=over 2

  Returns a newly allocated op definition builder for the given op name. The
  returned builder may be customized with the `TF_OpDefinitionBuilder...`
  functions and then registered with TensorFlow with TF_RegisterOpDefinition.
  
  The returned pointer is either freed by a call to TF_RegisterOpDefinition, or
  can be manually deleted by TF_DeleteOpDefinitionBuilder if it is never
  registered.

=back

  /* From <tensorflow/c/ops.h> */
  TF_CAPI_EXPORT extern TF_OpDefinitionBuilder* TF_NewOpDefinitionBuilder(
      const char* op_name);

=head2 TF_RegisterOpDefinition

=over 2

  Registers the given op builder with TensorFlow. Indicates success or
  otherwise in the given status.
  
  `builder` is freed whether the op was successfully registered or not. You
  must call either this function or TF_DeleteOpDefinitionBuilder to free the
  builder, but never both.

=back

  /* From <tensorflow/c/ops.h> */
  TF_CAPI_EXPORT extern void TF_RegisterOpDefinition(
      TF_OpDefinitionBuilder* builder, TF_Status* status);

=head2 TF_DeleteOpDefinitionBuilder

=over 2

  Frees the given op definition builder. You must call either this function or
  TF_RegisterOpDefinition to free the builder, but never both.

=back

  /* From <tensorflow/c/ops.h> */
  TF_CAPI_EXPORT extern void TF_DeleteOpDefinitionBuilder(
      TF_OpDefinitionBuilder* builder);

=head2 TF_OpDefinitionBuilderAddAttr

=over 2

  Adds an attr to the given TF_OpDefinitionBuilder. The spec has
  format "<name>:<type>" or "<name>:<type>=<default>"
  where <name> matches regexp [a-zA-Z][a-zA-Z0-9_]*.
  By convention, names containing only capital letters are reserved for
  attributes whose values can be inferred by the operator implementation if not
  supplied by the user. If the attribute name contains characters other than

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


=back

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern void TF_DeleteDir(const char* dirname, TF_Status* status);

=head2 TF_DeleteRecursively

=over 2

  Deletes the specified directory and all subdirectories and files underneath
  it. This is accomplished by traversing the directory tree rooted at dirname
  and deleting entries as they are encountered.
  
  If dirname itself is not readable or does not exist, *undeleted_dir_count is
  set to 1, *undeleted_file_count is set to 0 and an appropriate status (e.g.
  TF_NOT_FOUND) is returned.
  
  If dirname and all its descendants were successfully deleted, TF_OK is
  returned and both error counters are set to zero.
  
  Otherwise, while traversing the tree, undeleted_file_count and
  undeleted_dir_count are updated if an entry of the corresponding type could
  not be deleted. The returned error status represents the reason that any one
  of these entries could not be deleted.
  
  Typical status codes:
   * TF_OK - dirname exists and we were able to delete everything underneath
   * TF_NOT_FOUND - dirname doesn't exist
   * TF_PERMISSION_DENIED - dirname or some descendant is not writable
   * TF_UNIMPLEMENTED - some underlying functions (like Delete) are not
     implemented

=back

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern void TF_DeleteRecursively(const char* dirname,
                                                  int64_t* undeleted_file_count,
                                                  int64_t* undeleted_dir_count,
                                                  TF_Status* status);

=head2 TF_FileStat

=over 2

  Obtains statistics for the given path. If status is TF_OK, *stats is
  updated, otherwise it is not touched.

=back

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern void TF_FileStat(const char* filename,
                                         TF_FileStatistics* stats,
                                         TF_Status* status);

=head2 TF_NewWritableFile

=over 2

  Creates or truncates the given filename and returns a handle to be used for
  appending data to the file. If status is TF_OK, *handle is updated and the
  caller is responsible for freeing it (see TF_CloseWritableFile).

=back

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern void TF_NewWritableFile(const char* filename,
                                                TF_WritableFileHandle** handle,
                                                TF_Status* status);

=head2 TF_CloseWritableFile

=over 2

  Closes the given handle and frees its memory. If there was a problem closing
  the file, it is indicated by status. Memory is freed in any case.

=back

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern void TF_CloseWritableFile(TF_WritableFileHandle* handle,
                                                  TF_Status* status);

=head2 TF_SyncWritableFile

=over 2

  Syncs content of the handle to the filesystem. Blocks waiting for the
  filesystem to indicate that the content has been persisted.

=back

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern void TF_SyncWritableFile(TF_WritableFileHandle* handle,
                                                 TF_Status* status);

=head2 TF_FlushWritableFile

=over 2

  Flush local buffers to the filesystem. If the process terminates after a
  successful flush, the contents may still be persisted, since the underlying
  filesystem may eventually flush the contents.  If the OS or machine crashes
  after a successful flush, the contents may or may not be persisted, depending
  on the implementation.

=back

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern void TF_FlushWritableFile(TF_WritableFileHandle* handle,
                                                  TF_Status* status);

=head2 TF_AppendWritableFile

=over 2

  Appends the given bytes to the file. Any failure to do so is indicated in
  status.

=back

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern void TF_AppendWritableFile(TF_WritableFileHandle* handle,
                                                   const char* data,
                                                   size_t length,
                                                   TF_Status* status);

=head2 TF_DeleteFile

=over 2

  Deletes the named file and indicates whether successful in *status.

=back

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern void TF_DeleteFile(const char* filename,
                                           TF_Status* status);

=head2 TF_StringStreamNext

=over 2

  Retrieves the next item from the given TF_StringStream and places a pointer
  to it in *result. If no more items are in the list, *result is set to NULL
  and false is returned.
  
  Ownership of the items retrieved with this function remains with the library.
  Item points are invalidated after a call to TF_StringStreamDone.

=back

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern bool TF_StringStreamNext(TF_StringStream* list,
                                                 const char** result);

=head2 TF_StringStreamDone

=over 2

  Frees the resources associated with given string list. All pointers returned
  by TF_StringStreamNext are invalid after this call.

=back

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern void TF_StringStreamDone(TF_StringStream* list);

=head2 TF_GetChildren

=over 2

  Retrieves the list of children of the given directory. You can iterate
  through the list with TF_StringStreamNext. The caller is responsible for
  freeing the list (see TF_StringStreamDone).

=back

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern TF_StringStream* TF_GetChildren(const char* filename,
                                                        TF_Status* status);

=head2 TF_GetLocalTempDirectories

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  Creates a temporary file name with an extension.
  The caller is responsible for freeing the returned pointer.

=back

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern char* TF_GetTempFileName(const char* extension);

=head2 TF_NowNanos

=over 2

  Returns the number of nanoseconds since the Unix epoch.

=back

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern uint64_t TF_NowNanos(void);

=head2 TF_NowMicros

=over 2

  Returns the number of microseconds since the Unix epoch.

=back

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern uint64_t TF_NowMicros(void);

=head2 TF_NowSeconds

=over 2

  Returns the number of seconds since the Unix epoch.

=back

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern uint64_t TF_NowSeconds(void);

=head2 TF_DefaultThreadOptions

=over 2

  Populates a TF_ThreadOptions struct with system-default values.

=back

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern void TF_DefaultThreadOptions(TF_ThreadOptions* options);

=head2 TF_StartThread

=over 2

  Returns a new thread that is running work_func and is identified
  (for debugging/performance-analysis) by thread_name.
  
  The given param (which may be null) is passed to work_func when the thread
  starts. In this way, data may be passed from the thread back to the caller.
  
  Caller takes ownership of the result and must call TF_JoinThread on it
  eventually.

=back

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern TF_Thread* TF_StartThread(const TF_ThreadOptions* options,
                                                  const char* thread_name,
                                                  void (*work_func)(void*),
                                                  void* param);

=head2 TF_JoinThread

=over 2

  Waits for the given thread to finish execution, then deletes it.

=back

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern void TF_JoinThread(TF_Thread* thread);

=head2 TF_LoadSharedLibrary

=over 2

  \brief Load a dynamic library.
  
  Pass "library_filename" to a platform-specific mechanism for dynamically
  loading a library. The rules for determining the exact location of the
  library are platform-specific and are not documented here.
  
  On success, place OK in status and return the newly created library handle.
  Otherwise returns nullptr and set error status.

=back

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern void* TF_LoadSharedLibrary(const char* library_filename,
                                                   TF_Status* status);

=head2 TF_GetSymbolFromLibrary

=over 2

  \brief Get a pointer to a symbol from a dynamic library.
  
  "handle" should be a pointer returned from a previous call to
  TF_LoadLibraryFromEnv. On success, place OK in status and return a pointer to
  the located symbol. Otherwise returns nullptr and set error status.

=back

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern void* TF_GetSymbolFromLibrary(void* handle,
                                                      const char* symbol_name,
                                                      TF_Status* status);

=head2 TF_Log

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


  /* From <tensorflow/c/kernels.h> */
  TF_CAPI_EXPORT extern TF_Tensor* TF_GetMutableOutput(TF_OpKernelContext* ctx,
                                                       int i, TF_Status* status);

=head2 TF_GetSerializedFunctionDefLibrary

=over 2

  Retrieves a serialized FunctionDefLibrary. Status will be set.

=back

  /* From <tensorflow/c/kernels.h> */
  TF_CAPI_EXPORT extern void TF_GetSerializedFunctionDefLibrary(
      TF_OpKernelContext* ctx, TF_Buffer* serialized_function_def_library,
      TF_Status* status);

=head2 TF_GetSerializedConfigProto

=over 2

  Retrieves a serialized ConfigProto. Status will be set.

=back

  /* From <tensorflow/c/kernels.h> */
  TF_CAPI_EXPORT extern void TF_GetSerializedConfigProto(
      TF_OpKernelContext* ctx, TF_Buffer* serialized_config_proto,
      TF_Status* status);

=head2 TF_OpKernelConstruction_Failure

=over 2

  Notifies the given OpKernelConstruction that kernel construction has failed.

=back

  /* From <tensorflow/c/kernels.h> */
  TF_CAPI_EXPORT extern void TF_OpKernelConstruction_Failure(
      TF_OpKernelConstruction* ctx, TF_Status* status);

=head2 TF_OpKernelContext_Failure

=over 2

  Notifies the given OpKernelContext that the kernel's compute function has
  failed.

=back

  /* From <tensorflow/c/kernels.h> */
  TF_CAPI_EXPORT extern void TF_OpKernelContext_Failure(TF_OpKernelContext* ctx,
                                                        TF_Status* status);

=head2 TF_ExpectedOutputDataType

=over 2

  Returns the expected output data type of the ith output. If i < 0 or
  i >= TF_NumOutputs(ctx), the program aborts.

=back

  /* From <tensorflow/c/kernels.h> */
  TF_CAPI_EXPORT extern TF_DataType TF_ExpectedOutputDataType(
      TF_OpKernelContext* ctx, int i);

=head2 TF_IsHostMemoryInput

=over 2

  Returns true if the ith input is allocated in host memory. If i < 0 or i >=
  TF_NumInputs(ctx), the program aborts.

=back

  /* From <tensorflow/c/kernels.h> */
  TF_CAPI_EXPORT extern bool TF_IsHostMemoryInput(TF_OpKernelContext* ctx, int i,
                                                  TF_Status* status);

=head2 TF_IsHostMemoryOutput

=over 2

  Returns true if the ith output is allocated in host memory. If i < 0 or i >=
  TF_NumOutputs(ctx), the program aborts.

=back

  /* From <tensorflow/c/kernels.h> */
  TF_CAPI_EXPORT extern bool TF_IsHostMemoryOutput(TF_OpKernelContext* ctx, int i,
                                                   TF_Status* status);

=head2 TF_StepId

=over 2

  Returns the step ID of the given context.

=back

  /* From <tensorflow/c/kernels.h> */
  TF_CAPI_EXPORT extern int64_t TF_StepId(TF_OpKernelContext* ctx);

=head2 TF_OpKernelConstruction_GetNodeDef

=over 2

  Returns the serialized NodeDef protocol buffer for the kernel

=back

  /* From <tensorflow/c/kernels.h> */
  TF_CAPI_EXPORT extern TF_Buffer* TF_OpKernelConstruction_GetNodeDef(
      TF_OpKernelConstruction* ctx, TF_Status* status);

=head2 TF_GetFrameId

=over 2

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


=head2 TF_ReleaseVariableInputLockHolder

=over 2

  The API releases the opaque lock handle returned with
  `TF_MaybeLockVariableInputMutexesInOrder` API

=back

  /* From <tensorflow/c/kernels_experimental.h> */
  TF_CAPI_EXPORT extern void TF_ReleaseVariableInputLockHolder(
      TF_VariableInputLockHolder* lockHolder);

=head2 TF_GetInputByName

=over 2

  Allows plugin to get TF_Tensor when passed its input_name

=back

  /* From <tensorflow/c/kernels_experimental.h> */
  TF_CAPI_EXPORT extern void TF_GetInputByName(TF_OpKernelContext* ctx,
                                               const char* inputName,
                                               TF_Tensor** tensor,
                                               TF_Status* status);

=head2 TF_OpKernelConstruction_GetAttrTensorShape

=over 2

  Interprets the named kernel construction attribute as a shape attribute and
  fills in `vals` with the size of each dimension. `vals` must point to an
  array of length at least `max_values` (ideally set to total_size from
  TF_OpKernelConstruction_GetAttrSize(ctx, attr_name, &list_size,
  &total_size)).

=back

  /* From <tensorflow/c/kernels_experimental.h> */
  TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrTensorShape(
      TF_OpKernelConstruction* ctx, const char* attr_name, int64_t* dims,
      size_t num_dims, TF_Status* status);

=head2 TF_IsRefInput

=over 2

=back

  /* From <tensorflow/c/kernels_experimental.h> */
  TF_CAPI_EXPORT extern bool TF_IsRefInput(TF_OpKernelContext* ctx, int i,
                                           TF_Status* status);

=head2 TF_AddNVariant

=over 2

  Expose higher level AddN operation for Pluggable vendors to implement
  in the plugin for Variant data types. The API takes in the context and a
  callback provided by pluggable vendor to do a Binary Add operation on the
  tensors unwrapped from the Variant tensors. The caller takes ownership of the
  `a`, `b` and `out` tensors and is responsible for freeing them with
  TF_DeleteTensor.

=back

  /* From <tensorflow/c/kernels_experimental.h> */
  TF_CAPI_EXPORT extern void TF_AddNVariant(
      TF_OpKernelContext* ctx,
      void (*binary_add_func)(TF_OpKernelContext* ctx, TF_Tensor* a, TF_Tensor* b,
                              TF_Tensor* out),
      TF_Status* status);

=head2 TF_ZerosLikeVariant

=over 2

  Expose higher level ZerosLike operation for Pluggable vendors to implement
  in the plugin for Variant data types. The API takes in the context and a
  callback provided by pluggable vendor to do a ZerosLike operation on the
  tensors unwrapped from the Variant tensors. The caller takes ownership of the
  `input` and `out` tensors and is responsible for freeing them with
  TF_DeleteTensor.

=back

  /* From <tensorflow/c/kernels_experimental.h> */
  TF_CAPI_EXPORT extern void TF_ZerosLikeVariant(
      TF_OpKernelContext* ctx,
      void (*zeros_like_func)(TF_OpKernelContext* ctx, TF_Tensor* input,
                              TF_Tensor* out),
      TF_Status* status);

=head2 TFE_NewContextOptions

=over 2

  Return a new options object.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern TFE_ContextOptions* TFE_NewContextOptions(void);

=head2 TFE_ContextOptionsSetConfig

=over 2

  Set the config in TF_ContextOptions.options.
  config should be a serialized tensorflow.ConfigProto proto.
  If config was not parsed successfully as a ConfigProto, record the
  error information in *status.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_ContextOptionsSetConfig(
      TFE_ContextOptions* options, const void* proto, size_t proto_len,
      TF_Status* status);

=head2 TFE_ContextOptionsSetAsync

=over 2

  Sets the default execution mode (sync/async). Note that this can be
  overridden per thread using TFE_ContextSetExecutorForThread.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_ContextOptionsSetAsync(TFE_ContextOptions*,
                                                        unsigned char enable);

=head2 TFE_ContextOptionsSetDevicePlacementPolicy

=over 2

=back

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  return "non-ready" handle. Else, this function returns after the copy has
  been done.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_TensorHandleCopyToDevice(
      TFE_TensorHandle* h, TFE_Context* ctx, const char* device_name,
      TF_Status* status);

=head2 TFE_TensorHandleTensorDebugInfo

=over 2

  Retrieves TFE_TensorDebugInfo for `handle`.
  If TFE_TensorHandleTensorDebugInfo succeeds, `status` is set to OK and caller
  is responsible for deleting returned TFE_TensorDebugInfo.
  If TFE_TensorHandleTensorDebugInfo fails, `status` is set to appropriate
  error and nullptr is returned. This function can block till the operation
  that produces `handle` has completed.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern TFE_TensorDebugInfo* TFE_TensorHandleTensorDebugInfo(
      TFE_TensorHandle* h, TF_Status* status);

=head2 TFE_DeleteTensorDebugInfo

=over 2

  Deletes `debug_info`.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_DeleteTensorDebugInfo(
      TFE_TensorDebugInfo* debug_info);

=head2 TFE_TensorDebugInfoOnDeviceNumDims

=over 2

  Returns the number of dimensions used to represent the tensor on its device.
  The number of dimensions used to represent the tensor on device can be
  different from the number returned by TFE_TensorHandleNumDims.
  The return value was current at the time of TFE_TensorDebugInfo creation.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern int TFE_TensorDebugInfoOnDeviceNumDims(
      TFE_TensorDebugInfo* debug_info);

=head2 TFE_TensorDebugInfoOnDeviceDim

=over 2

  Returns the number of elements in dimension `dim_index`.
  Tensor representation on device can be transposed from its representation
  on host. The data contained in dimension `dim_index` on device
  can correspond to the data contained in another dimension in on-host
  representation. The dimensions are indexed using the standard TensorFlow
  major-to-minor order (slowest varying dimension first),
  not the XLA's minor-to-major order.
  On-device dimensions can be padded. TFE_TensorDebugInfoOnDeviceDim returns
  the number of elements in a dimension after padding.
  The return value was current at the time of TFE_TensorDebugInfo creation.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern int64_t TFE_TensorDebugInfoOnDeviceDim(
      TFE_TensorDebugInfo* debug_info, int dim_index);

=head2 TFE_NewOp

=over 2

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern TFE_Op* TFE_NewOp(TFE_Context* ctx,
                                          const char* op_or_function_name,
                                          TF_Status* status);

=head2 TFE_DeleteOp

=over 2

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_DeleteOp(TFE_Op* op);

=head2 TFE_OpGetName

=over 2

  Returns the op or function name `op` will execute.
  
  The returned string remains valid throughout the lifetime of 'op'.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern const char* TFE_OpGetName(const TFE_Op* op,
                                                  TF_Status* status);

=head2 TFE_OpGetContext

=over 2

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern TFE_Context* TFE_OpGetContext(const TFE_Op* op,
                                                      TF_Status* status);

=head2 TFE_OpSetDevice

=over 2

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=head2 TFE_OpSetAttrBool

=over 2

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_OpSetAttrBool(TFE_Op* op, const char* attr_name,
                                               unsigned char value);

=head2 TFE_OpSetAttrType

=over 2

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_OpSetAttrType(TFE_Op* op, const char* attr_name,
                                               TF_DataType value);

=head2 TFE_OpSetAttrShape

=over 2

  If the number of dimensions is unknown, `num_dims` must be set to
  -1 and `dims` can be null.  If a dimension is unknown, the
  corresponding entry in the `dims` array must be -1.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_OpSetAttrShape(TFE_Op* op, const char* attr_name,
                                                const int64_t* dims,
                                                const int num_dims,
                                                TF_Status* out_status);

=head2 TFE_OpSetAttrFunction

=over 2

  Sets the attribute attr_name to be a function specified by 'function'.
  
  TODO(ashankar,iga): Add this functionality to the C API for graph
  construction. Perhaps we want an AttrValueMap equivalent in the C API?

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_OpSetAttrFunction(TFE_Op* op,
                                                   const char* attr_name,
                                                   const TFE_Op* value);

=head2 TFE_OpSetAttrFunctionName

=over 2

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT void TFE_OpSetAttrFunctionName(TFE_Op* op, const char* attr_name,
                                                const char* data, size_t length);

=head2 TFE_OpSetAttrTensor

=over 2

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_OpSetAttrTensor(TFE_Op* op,
                                                 const char* attr_name,
                                                 TF_Tensor* tensor,
                                                 TF_Status* status);

=head2 TFE_OpSetAttrStringList

=over 2

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_OpSetAttrStringList(TFE_Op* op,
                                                     const char* attr_name,
                                                     const void* const* values,
                                                     const size_t* lengths,
                                                     int num_values);

=head2 TFE_OpSetAttrIntList

=over 2

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_OpSetAttrIntList(TFE_Op* op,
                                                  const char* attr_name,
                                                  const int64_t* values,
                                                  int num_values);

=head2 TFE_OpSetAttrFloatList

=over 2

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_OpSetAttrFloatList(TFE_Op* op,
                                                    const char* attr_name,
                                                    const float* values,
                                                    int num_values);

=head2 TFE_OpSetAttrBoolList

=over 2

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_OpSetAttrBoolList(TFE_Op* op,
                                                   const char* attr_name,
                                                   const unsigned char* values,

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_Execute(TFE_Op* op, TFE_TensorHandle** retvals,
                                         int* num_retvals, TF_Status* status);

=head2 TFE_ContextAddFunctionDef

=over 2

  Add a function (serialized FunctionDef protocol buffer) to ctx so
  that it can be invoked using TFE_Execute.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_ContextAddFunctionDef(
      TFE_Context* ctx, const char* serialized_function_def, size_t size,
      TF_Status* status);

=head2 TFE_ContextAddFunction

=over 2

  Adds a function (created from TF_GraphToFunction or
  TF_FunctionImportFunctionDef) to the context, allowing it to be executed with
  TFE_Execute by creating an op with the same name as the function.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_ContextAddFunction(TFE_Context* ctx,
                                                    TF_Function* function,
                                                    TF_Status* status);

=head2 TFE_ContextRemoveFunction

=over 2

  Removes a function from the context. Once removed, you can no longer
  TFE_Execute it or TFE_Execute any TFE_Op which has it as an attribute or any
  other function which calls it as an attribute.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_ContextRemoveFunction(TFE_Context* ctx,
                                                       const char* name,
                                                       TF_Status* status);

=head2 TFE_ContextHasFunction

=over 2

  Checks whether a function is registered under `name`.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT unsigned char TFE_ContextHasFunction(TFE_Context* ctx,
                                                      const char* name);

=head2 TFE_ContextEnableRunMetadata

=over 2

  Enables tracing of RunMetadata on the ops executed from this context.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_ContextEnableRunMetadata(TFE_Context* ctx);

=head2 TFE_ContextDisableRunMetadata

=over 2

  Disables tracing of RunMetadata on the ops executed from this context.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_ContextDisableRunMetadata(TFE_Context* ctx);

=head2 TFE_ContextExportRunMetadata

=over 2

  Populates the passed-in buffer with a serialized RunMetadata protocol buffer
  containing any run metadata information accumulated so far and clears this
  information.
  If async mode is enabled, this call blocks till all currently pending ops are
  done.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_ContextExportRunMetadata(TFE_Context* ctx,
                                                          TF_Buffer* buf,
                                                          TF_Status* status);

=head2 TFE_ContextStartStep

=over 2

  Some TF ops need a step container to be set to limit the lifetime of some
  resources (mostly TensorArray and Stack, used in while loop gradients in
  graph mode). Calling this on a context tells it to start a step.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_ContextStartStep(TFE_Context* ctx);

=head2 TFE_ContextEndStep

=over 2

  Ends a step. When there is no active step (that is, every started step has
  been ended) step containers will be cleared. Note: it is not safe to call
  TFE_ContextEndStep while ops that rely on the step container may be running.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_ContextEndStep(TFE_Context* ctx);

=head2 TFE_HandleToDLPack

=over 2

  Converts eager tensor handle to DLPack (DLManagedTensor*), and return the
  void* for further PyCapsule construction.

=back

  /* From <tensorflow/c/eager/dlpack.h> */
  TF_CAPI_EXPORT extern void* TFE_HandleToDLPack(TFE_TensorHandle* h,
                                                 TF_Status* status);

=head2 TFE_HandleFromDLPack

=over 2

  Converts DLPack (DLManagedTensor*) to eager tensor handle.

=back

  /* From <tensorflow/c/eager/dlpack.h> */
  TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_HandleFromDLPack(void* dlm,
                                                               TF_Status* status,
                                                               TFE_Context* ctx);

=head2 TFE_CallDLManagedTensorDeleter

=over 2

  Calls the destructor of DLManagedTensor, used in the destructor of PyCapsule.

=back

  /* From <tensorflow/c/eager/dlpack.h> */
  TF_CAPI_EXPORT extern void TFE_CallDLManagedTensorDeleter(void* dlm_ptr);

=head2 TFE_OpReset

=over 2

  Resets `op_to_reset` with `op_or_function_name` and `raw_device_name`. This
  is for performance optimization by reusing an exiting unused op rather than
  creating a new op every time. If `raw_device_name` is `NULL` or empty, it
  does not set the device name. If it's not `NULL`, then it attempts to parse
  and set the device name. It's effectively `TFE_OpSetDevice`, but it is faster
  than separately calling it because if the existing op has the same
  `raw_device_name`, it skips parsing and just leave as it is.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_OpReset(TFE_Op* op_to_reset,
                                         const char* op_or_function_name,
                                         const char* raw_device_name,
                                         TF_Status* status);

=head2 TFE_ContextEnableGraphCollection

=over 2

  Enables only graph collection in RunMetadata on the functions executed from
  this context.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_ContextEnableGraphCollection(TFE_Context* ctx);

=head2 TFE_ContextDisableGraphCollection

=over 2

  Disables only graph collection in RunMetadata on the functions executed from
  this context.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_ContextDisableGraphCollection(TFE_Context* ctx);

=head2 TFE_MonitoringCounterCellIncrementBy

=over 2

  Atomically increments the value of the cell. The value must be non-negative.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_MonitoringCounterCellIncrementBy(
      TFE_MonitoringCounterCell* cell, int64_t value);

=head2 TFE_MonitoringCounterCellValue

=over 2

  Retrieves the current value of the cell.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern int64_t TFE_MonitoringCounterCellValue(
      TFE_MonitoringCounterCell* cell);

=head2 TFE_MonitoringNewCounter0

=over 2

  Returns a new Counter metric object. The caller should manage lifetime of
  the object. Using duplicate metric name will crash the program with fatal
  error.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern TFE_MonitoringCounter0* TFE_MonitoringNewCounter0(
      const char* name, TF_Status* status, const char* description);

=head2 TFE_MonitoringDeleteCounter0

=over 2

  Deletes the Counter object.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_MonitoringDeleteCounter0(
      TFE_MonitoringCounter0* counter);

=head2 TFE_MonitoringGetCellCounter0

=over 2

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  TF_CAPI_EXPORT extern TFE_Executor* TFE_ContextGetExecutorForThread(
      TFE_Context*);

=head2 TFE_ContextUpdateServerDef

=over 2

  Update an existing context with a new set of servers defined in a ServerDef
  proto. Servers can be added to and removed from the list of remote workers
  in the context. A New set of servers identified by the ServerDef must be up
  when the context is updated.
  
  This API is for experimental usage and may be subject to change.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_ContextUpdateServerDef(TFE_Context* ctx,
                                                        int keep_alive_secs,
                                                        const void* proto,
                                                        size_t proto_len,
                                                        TF_Status* status);

=head2 TFE_ContextCheckAlive

=over 2

  Checks whether a remote worker is alive or not. This will return true even if
  the context doesn't exist on the remote worker.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern bool TFE_ContextCheckAlive(TFE_Context* ctx,
                                                   const char* worker_name,
                                                   TF_Status* status);

=head2 TFE_ContextAsyncWait

=over 2

  Sync pending nodes in local executors (including the context default executor
  and thread executors) and streaming requests to remote executors, and get the
  combined status.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_ContextAsyncWait(TFE_Context* ctx,
                                                  TF_Status* status);

=head2 TFE_TensorHandleDevicePointer

=over 2

  This function will block till the operation that produces `h` has
  completed. This is only valid on local TFE_TensorHandles. The pointer
  returned will be on the device in which the TFE_TensorHandle resides (so e.g.
  for a GPU tensor this will return a pointer to GPU memory). The pointer is
  only guaranteed to be valid until TFE_DeleteTensorHandle is called on this
  TensorHandle. Only supports POD data types.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void* TFE_TensorHandleDevicePointer(TFE_TensorHandle*,
                                                            TF_Status*);

=head2 TFE_TensorHandleDeviceMemorySize

=over 2

  This function will block till the operation that produces `h` has
  completed. This is only valid on local TFE_TensorHandles. Returns the size in
  bytes of the memory pointed to by the device pointer returned above.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern size_t TFE_TensorHandleDeviceMemorySize(TFE_TensorHandle*,
                                                                TF_Status*);

=head2 TFE_NewTensorHandleFromDeviceMemory

=over 2

  Creates a new TensorHandle from memory residing in the physical device
  device_name. Takes ownership of the memory, and will call deleter to release
  it after TF no longer needs it or in case of error.
  
  Custom devices must use TFE_NewCustomDeviceTensorHandle instead.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_NewTensorHandleFromDeviceMemory(
      TFE_Context* ctx, const char* device_name, TF_DataType, const int64_t* dims,
      int num_dims, void* data, size_t len,
      void (*deallocator)(void* data, size_t len, void* arg),
      void* deallocator_arg, TF_Status* status);

=head2 TFE_HostAddressSpace

=over 2

  Retrieves the address space (i.e. job, replia, task) of the local host and
  saves it in the buffer.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_HostAddressSpace(TFE_Context* ctx,
                                                  TF_Buffer* buf);

=head2 TFE_OpGetAttrs

=over 2

  Fetch a reference to `op`'s attributes. The returned reference is only valid
  while `op` is alive.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern const TFE_OpAttrs* TFE_OpGetAttrs(const TFE_Op* op);

=head2 TFE_OpAddAttrs

=over 2

  Add attributes in `attrs` to `op`.
  
  Does not overwrite or update existing attributes, but adds new ones.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_OpAddAttrs(TFE_Op* op, const TFE_OpAttrs* attrs);

=head2 TFE_OpAttrsSerialize

=over 2

  Serialize `attrs` as a tensorflow::NameAttrList protocol buffer (into `buf`),
  containing the op name and a map of its attributes.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_OpAttrsSerialize(const TFE_OpAttrs* attrs,
                                                  TF_Buffer* buf,
                                                  TF_Status* status);

=head2 TFE_OpSetAttrValueProto

=over 2

  Set an op's attribute from a serialized AttrValue protocol buffer.
  

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=over 2

  Registers a custom device for use with eager execution.
  
  Eager operations may be placed on this device, e.g.  `with
  tf.device("CUSTOM"):` from Python if `device_name` for this call is
  "/job:localhost/replica:0/task:0/device:CUSTOM:0".
  
  The custom device defines copy operations for moving TensorHandles on and
  off, and an execution operation for named operations. Often execution will
  simply wrap op execution on one or more physical devices.
  
  device_info is an opaque caller-defined type stored with the custom device
  which is passed to the functions referenced in the TFE_CustomDevice struct
  `device` (execute, delete_device, etc.). It can for example contain the
  names of wrapped devices.
  
  There are currently no graph semantics implemented for registered custom
  devices, so executing tf.functions which contain operations placed on the
  custom devices will fail.
  
  `device_name` must not name an existing physical or custom device. It must
  follow the format:
  
     /job:<name>/replica:<replica>/task:<task>/device:<type>:<device_num>
  
  If the device is successfully registered, `status` is set to TF_OK. Otherwise
  the device is not usable. In case of a bad status, `device.delete_device` is
  still called on `device_info` (i.e. the caller does not retain ownership).
  
  This API is highly experimental, and in particular is expected to change when
  it starts supporting operations with attributes and when tf.function support
  is added.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_RegisterCustomDevice(TFE_Context* ctx,
                                                      TFE_CustomDevice device,
                                                      const char* device_name,
                                                      void* device_info,
                                                      TF_Status* status);

=head2 TFE_IsCustomDevice

=over 2

  Returns whether `device_name` maps to a registered custom device.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern bool TFE_IsCustomDevice(TFE_Context* ctx,
                                                const char* device_name);

=head2 TFE_NewCustomDeviceTensorHandle

=over 2

  Creates a new TensorHandle from memory residing in a custom device. Takes
  ownership of the memory pointed to by `tensor_handle_data`, and calls
  `methods.deallocator` to release it after TF no longer needs it or in case of
  an error.
  
  This call is similar to `TFE_NewTensorHandleFromDeviceMemory`, but supports
  custom devices instead of physical devices and does not require blocking
  waiting for exact shapes.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_NewCustomDeviceTensorHandle(
      TFE_Context*, const char* device_name, TF_DataType, void* data,
      TFE_CustomDeviceTensorHandle methods, TF_Status* status);

=head2 TFE_ContextGetFunctionDef

=over 2

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_ContextGetFunctionDef(TFE_Context* ctx,
                                                       const char* function_name,
                                                       TF_Buffer* buf,
                                                       TF_Status* status);

=head2 TFE_AllocateHostTensor

=over 2

  Allocate and return a new Tensor on the host.
  
  The caller must set the Tensor values by writing them to the pointer returned
  by TF_TensorData with length TF_TensorByteSize.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern TF_Tensor* TFE_AllocateHostTensor(TFE_Context* ctx,
                                                          TF_DataType dtype,
                                                          const int64_t* dims,
                                                          int num_dims,
                                                          TF_Status* status);

=head2 TFE_NewTensorHandleFromTensor

=over 2

  Given a Tensor, wrap it with a TensorHandle
  
  Similar to TFE_NewTensorHandle, but includes a pointer to the TFE_Context.
  The context should be identical to that of the Tensor.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT TFE_TensorHandle* TFE_NewTensorHandleFromTensor(
      TFE_Context* ctx, TF_Tensor* t, TF_Status* status);

=head2 TFE_CreatePackedTensorHandle

=over 2

  Create a packed TensorHandle with the given list of TensorHandles.
  If `handles` are on the same device, assign the same device to the packed
  handle; if `handles` are on different deivces, assign a CompositeDevice to
  it.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_CreatePackedTensorHandle(

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern int TFE_TensorHandleDeviceID(TFE_TensorHandle* h,
                                                     TF_Status* status);

=head2 TFE_TensorHandleGetStatus

=over 2

  Returns the status for the tensor handle. In TFRT, a tensor handle can carry
  error info if error happens. If so, the status will be set with the error
  info. If not, status will be set as OK.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_TensorHandleGetStatus(TFE_TensorHandle* h,
                                                       TF_Status* status);

=head2 TFE_GetExecutedOpNames

=over 2

  Get a comma-separated list of op names executed in graph functions dispatched
  to `ctx`. This feature is currently only enabled for TFRT debug builds, for
  performance and simplicity reasons.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_GetExecutedOpNames(TFE_Context* ctx,
                                                    TF_Buffer* buf,
                                                    TF_Status* status);

=head2 TFE_SetLogicalCpuDevices

=over 2

  Set logical devices to the context's device manager.
  If logical devices are already configured at context initialization
  through TFE_ContextOptions, this method should not be called.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_SetLogicalCpuDevices(TFE_Context* ctx,
                                                      int num_cpus,
                                                      const char* prefix,
                                                      TF_Status* status);

=head2 TFE_InsertConfigKeyValue

=over 2

  Set configuration key and value using coordination service.
  If coordination service is enabled, the key-value will be stored on the
  leader and become accessible to all workers in the cluster.
  Currently, a config key can only be set with one value, and subsequently
  setting the same key will lead to errors.
  
  Note that the key-values are only expected to be used for cluster
  configuration data, and should not be used for storing a large amount of data
  or being accessed very frequently.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_InsertConfigKeyValue(TFE_Context* ctx,
                                                      const char* key,
                                                      const char* value,
                                                      TF_Status* status);

=head2 TFE_GetConfigKeyValue

=over 2

  Get configuration key and value using coordination service.
  The config key must be set before getting its value. Getting value of
  non-existing config keys will result in errors.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_GetConfigKeyValue(TFE_Context* ctx,
                                                   const char* key,
                                                   TF_Buffer* value_buf,
                                                   TF_Status* status);

=head2 TFE_DeleteConfigKeyValue

=over 2

  Delete configuration key-value. If `key` is a directory, recursively clean up
  all key-values under the path specified by `key`.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_DeleteConfigKeyValue(TFE_Context* ctx,
                                                      const char* key,
                                                      TF_Status* status);

=head2 TFE_ReportErrorToCluster

=over 2

  Report error (specified by error_code and error_message) to other tasks in
  the cluster.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_ReportErrorToCluster(TFE_Context* ctx,
                                                      int error_code,
                                                      const char* error_message,
                                                      TF_Status* status);

=head2 TFE_GetTaskStates

=over 2

  Get task states from the Coordination Service.

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

      TF_FunctionLibraryDefinition* fn_lib);

=head2 TF_LookUpOpDef

=over 2

  Shorthand for calling LookUp to get the OpDef from FunctionLibraryDefinition
  given op name. The returned OpDef is represented by TF_Buffer.

=back

  /* From <tensorflow/c/experimental/grappler/grappler.h> */
  TF_CAPI_EXPORT extern void TF_LookUpOpDef(TF_FunctionLibraryDefinition* fn_lib,
                                            const char* name, TF_Buffer* buf,
                                            TF_Status* s);

=head2 TF_TensorSpecDataType

=over 2

  Returns the dtype associated with the TensorSpec.

=back

  /* From <tensorflow/c/experimental/saved_model/public/tensor_spec.h> */
  TF_CAPI_EXPORT extern TF_DataType TF_TensorSpecDataType(
      const TF_TensorSpec* spec);

=head2 TF_TensorSpecShape

=over 2

  Returns the shape associated with the TensorSpec. The returned Shape is not
  owned by the caller. Caller must not call TF_DeleteShape on the returned
  shape.

=back

  /* From <tensorflow/c/experimental/saved_model/public/tensor_spec.h> */
  TF_CAPI_EXPORT extern const TF_Shape* TF_TensorSpecShape(
      const TF_TensorSpec* spec);

=head2 TF_InitPlugin

=over 2

  /// Initializes a TensorFlow plugin.
  ///
  /// Must be implemented by the plugin DSO. It is called by TensorFlow runtime.
  ///
  /// Filesystem plugins can be loaded on demand by users via
  /// `Env::LoadLibrary` or during TensorFlow's startup if they are on certain
  /// paths (although this has a security risk if two plugins register for the
  /// same filesystem and the malicious one loads before the legimitate one -
  /// but we consider this to be something that users should care about and
  /// manage themselves). In both of these cases, core TensorFlow looks for
  /// the `TF_InitPlugin` symbol and calls this function.
  ///
  /// For every filesystem URI scheme that this plugin supports, the plugin must
  /// add one `TF_FilesystemPluginInfo` entry in `plugin_info->ops` and call
  /// `TF_SetFilesystemVersionMetadata` for that entry.
  ///
  /// Plugins must also initialize `plugin_info->plugin_memory_allocate` and
  /// `plugin_info->plugin_memory_free` to ensure memory allocated by plugin is
  /// freed in a compatible way.

=back

  /* From <tensorflow/c/experimental/filesystem/filesystem_interface.h> */
  TF_CAPI_EXPORT extern void TF_InitPlugin(TF_FilesystemPluginInfo* plugin_info);

=head2 TF_LoadSavedModel

=over 2

  Load a SavedModel from `dirname`. We expect the SavedModel to contain a
  single Metagraph (as for those exported from TF2's `tf.saved_model.save`).
  
  Params:
   dirname - A directory filepath that the SavedModel is at.
   ctx - A TFE_Context containing optional load/TF runtime options.
         `ctx` must outlive the returned TF_SavedModel pointer.
   status - Set to OK on success and an appropriate error on failure.
  Returns:
   If status is not OK, returns nullptr. Otherwise, returns a newly created
   TF_SavedModel instance. It must be deleted by calling TF_DeleteSavedModel.

=back

  /* From <tensorflow/c/experimental/saved_model/public/saved_model_api.h> */
  TF_CAPI_EXPORT extern TF_SavedModel* TF_LoadSavedModel(const char* dirname,
                                                         TFE_Context* ctx,
                                                         TF_Status* status);

=head2 TF_LoadSavedModelWithTags

=over 2

  Load a SavedModel from `dirname`.
  
  Params:
   dirname - A directory filepath that the SavedModel is at.
   ctx - A TFE_Context containing optional load/TF runtime options.
         `ctx` must outlive the returned TF_SavedModel pointer.
   tags - char* array of SavedModel tags. We will load the metagraph matching
          the tags.
   tags_len - number of elements in the `tags` array.
   status - Set to OK on success and an appropriate error on failure.
  Returns:
   If status is not OK, returns nullptr. Otherwise, returns a newly created
   TF_SavedModel instance. It must be deleted by calling TF_DeleteSavedModel.

=back

  /* From <tensorflow/c/experimental/saved_model/public/saved_model_api.h> */
  TF_CAPI_EXPORT extern TF_SavedModel* TF_LoadSavedModelWithTags(
      const char* dirname, TFE_Context* ctx, const char* const* tags,
      int tags_len, TF_Status* status);

=head2 TF_DeleteSavedModel

=over 2

  Deletes a TF_SavedModel, and frees any resources owned by it.

=back

  /* From <tensorflow/c/experimental/saved_model/public/saved_model_api.h> */
  TF_CAPI_EXPORT extern void TF_DeleteSavedModel(TF_SavedModel* model);

=head2 TF_GetSavedModelConcreteFunction

=over 2

  Retrieve a function from the TF2 SavedModel via function path.
  
  Params:
   model - The TF2 SavedModel to load a function from.
   function_path - A string containing the path from the root saved python
                   object to a tf.function method.
                   TODO(bmzhao): Add a detailed example of this with a
                   python tf.module before moving this out of experimental.
   status - Set to OK on success and an appropriate error on failure.
  Returns:
   If status is not OK, returns nullptr. Otherwise, returns a
   TF_ConcreteFunction instance. The lifetime of this instance is
   "conceptually" bound to `model`. Once `model` is deleted, all
   `TF_ConcreteFunctions` retrieved from it are invalid, and have been deleted.

=back

  /* From <tensorflow/c/experimental/saved_model/public/saved_model_api.h> */
  TF_CAPI_EXPORT extern TF_ConcreteFunction* TF_GetSavedModelConcreteFunction(
      TF_SavedModel* model, const char* function_path, TF_Status* status);

=head2 TF_GetSavedModelSignatureDefFunction

=over 2

  Retrieve a function from the TF SavedModel via a SignatureDef key.
  
  Params:
   model - The SavedModel to load a function from.
   signature_def_key - The string key of the SignatureDef map of a SavedModel:
                       https://github.com/tensorflow/tensorflow/blob/69b08900b1e991d84bce31f3b404f5ed768f339f/tensorflow/core/protobuf/meta_graph.proto#L89
   status - Set to OK on success and an appropriate error on failure.
  Returns:
   If status is not OK, returns nullptr. Otherwise, returns a
   TF_SignatureDefFunction instance. Once `model` is deleted, all
   `TF_SignatureDefFunctions` retrieved from it are invalid, and have been
   deleted.

=back

  /* From <tensorflow/c/experimental/saved_model/public/saved_model_api.h> */
  TF_CAPI_EXPORT extern TF_SignatureDefFunction*
  TF_GetSavedModelSignatureDefFunction(TF_SavedModel* model,
                                       const char* signature_def_key,
                                       TF_Status* status);

=head2 TF_ConcreteFunctionGetMetadata

=over 2

  Returns FunctionMetadata associated with `func`. Metadata's lifetime is
  bound to `func`, which is bound to the TF_SavedModel it was loaded from.

=back

  /* From <tensorflow/c/experimental/saved_model/public/concrete_function.h> */
  TF_CAPI_EXPORT extern TF_FunctionMetadata* TF_ConcreteFunctionGetMetadata(
      TF_ConcreteFunction* func);

=head2 TF_ConcreteFunctionMakeCallOp

=over 2

  Returns a TFE_Op suitable for executing this function. Caller must provide
  all function inputs in `inputs`, and must not add any additional inputs on
  the returned op. (i.e. don't call TFE_OpAddInput or TFE_OpAddInputList).
  The caller is responsible for deleting the returned TFE_Op. If op
  construction fails, `status` will be non-OK and the returned pointer will be
  null.
  TODO(bmzhao): Remove this function in a subsequent change; Design + implement
  a Function Execution interface for ConcreteFunction that accepts a tagged
  union of types (tensorflow::Value). This effectively requires moving much of
  the implementation of function.py/def_function.py to C++, and exposing a
  high-level API here. A strawman for what this interface could look like:
  TF_Value* TF_ExecuteFunction(TFE_Context*, TF_ConcreteFunction*, TF_Value*
  inputs, int num_inputs, TF_Status* status);

=back

  /* From <tensorflow/c/experimental/saved_model/public/concrete_function.h> */
  TF_CAPI_EXPORT extern TFE_Op* TF_ConcreteFunctionMakeCallOp(
      TF_ConcreteFunction* func, TFE_TensorHandle** inputs, int num_inputs,
      TF_Status* status);

=head2 TF_SignatureDefParamName

=over 2

  Returns the name of the given parameter. The caller is not responsible for
  freeing the returned char*.

=back

  /* From <tensorflow/c/experimental/saved_model/public/signature_def_param.h> */
  TF_CAPI_EXPORT extern const char* TF_SignatureDefParamName(
      const TF_SignatureDefParam* param);

=head2 TF_SignatureDefParamTensorSpec

=over 2

  Returns the TensorSpec associated with the given parameter. The caller is
  not reponsible for freeing the returned TF_TensorSpec*.

=back

  /* From <tensorflow/c/experimental/saved_model/public/signature_def_param.h> */
  TF_CAPI_EXPORT extern const TF_TensorSpec* TF_SignatureDefParamTensorSpec(
      const TF_SignatureDefParam* param);

=head2 TF_SignatureDefFunctionGetMetadata

=over 2

  Returns FunctionMetadata associated with `func`. Metadata's lifetime is
  bound to `func`, which is bound to the TF_SavedModel it was loaded from.

=back

  /* From <tensorflow/c/experimental/saved_model/public/signature_def_function.h> */
  TF_CAPI_EXPORT extern TF_SignatureDefFunctionMetadata*
  TF_SignatureDefFunctionGetMetadata(TF_SignatureDefFunction* func);

=head2 TF_SignatureDefFunctionMakeCallOp

=over 2

  Returns a TFE_Op suitable for executing this function. Caller must provide
  all function inputs in `inputs`, and must not add any additional inputs on
  the returned op. (i.e. don't call TFE_OpAddInput or TFE_OpAddInputList).
  The caller is responsible for deleting the returned TFE_Op. If op
  construction fails, `status` will be non-OK and the returned pointer will be
  null.

=back

  /* From <tensorflow/c/experimental/saved_model/public/signature_def_function.h> */
  TF_CAPI_EXPORT extern TFE_Op* TF_SignatureDefFunctionMakeCallOp(
      TF_SignatureDefFunction* func, TFE_TensorHandle** inputs, int num_inputs,
      TF_Status* status);

=head2 TF_ConcreteFunctionListSize

=over 2

  Returns the size of `list`.

=back

  /* From <tensorflow/c/experimental/saved_model/public/concrete_function_list.h> */
  TF_CAPI_EXPORT extern size_t TF_ConcreteFunctionListSize(
      TF_ConcreteFunctionList* list);

=head2 TF_ConcreteFunctionListGet

=over 2

  Returns the `i`th TF_ConcreteFunction in the list.

=back

  /* From <tensorflow/c/experimental/saved_model/public/concrete_function_list.h> */
  TF_CAPI_EXPORT extern TF_ConcreteFunction* TF_ConcreteFunctionListGet(
      TF_ConcreteFunctionList* list, int i);

=head2 TF_DeleteConcreteFunctionList

=over 2

  Deletes `list`.

=back

  /* From <tensorflow/c/experimental/saved_model/public/concrete_function_list.h> */
  TF_CAPI_EXPORT extern void TF_DeleteConcreteFunctionList(
      TF_ConcreteFunctionList* list);

=head2 TF_SignatureDefParamListSize

=over 2

  Returns the size of `list`.

=back

  /* From <tensorflow/c/experimental/saved_model/public/signature_def_param_list.h> */
  TF_CAPI_EXPORT extern size_t TF_SignatureDefParamListSize(
      const TF_SignatureDefParamList* list);

=head2 TF_SignatureDefParamListGet

=over 2

  Returns the `i`th TF_SignatureDefParam in the list.

=back

  /* From <tensorflow/c/experimental/saved_model/public/signature_def_param_list.h> */
  TF_CAPI_EXPORT extern const TF_SignatureDefParam* TF_SignatureDefParamListGet(
      const TF_SignatureDefParamList* list, int i);

=head2 TF_SignatureDefFunctionMetadataArgs

=over 2

  Retrieves the arguments of the SignatureDefFunction. The caller is not
  responsible for freeing the returned pointer.

=back

  /* From <tensorflow/c/experimental/saved_model/public/signature_def_function_metadata.h> */
  TF_CAPI_EXPORT extern const TF_SignatureDefParamList*
  TF_SignatureDefFunctionMetadataArgs(
      const TF_SignatureDefFunctionMetadata* list);

=head2 TF_SignatureDefFunctionMetadataReturns

=over 2

  Retrieves the returns of the SignatureDefFunction. The caller is not
  responsible for freeing the returned pointer.

=back

  /* From <tensorflow/c/experimental/saved_model/public/signature_def_function_metadata.h> */
  TF_CAPI_EXPORT extern const TF_SignatureDefParamList*
  TF_SignatureDefFunctionMetadataReturns(
      const TF_SignatureDefFunctionMetadata* list);

=head2 TF_EnableXLACompilation

=over 2

  When `enable` is true, set
  tensorflow.ConfigProto.OptimizerOptions.global_jit_level to ON_1, and also
  set XLA flag values to prepare for XLA compilation. Otherwise set
  global_jit_level to OFF.
  
  This and the next API are syntax sugar over TF_SetConfig(), and is used by
  clients that cannot read/write the tensorflow.ConfigProto proto.
  TODO: Migrate to TF_CreateConfig() below.

=back

  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TF_EnableXLACompilation(TF_SessionOptions* options,
                                                     unsigned char enable);

=head2 TF_SetXlaEnableLazyCompilation

=over 2

  Set XLA's internal BuildXlaOpsPassFlags.tf_xla_enable_lazy_compilation to the
  value of 'enabled'. Also returns the original value of that flag.
  
  Use in tests to allow XLA to fallback to TF classic. This has global effect.

=back

  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT unsigned char TF_SetXlaEnableLazyCompilation(
      unsigned char enable);

=head2 TF_SetTfXlaCpuGlobalJit

=over 2

=back

  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT unsigned char TF_SetTfXlaCpuGlobalJit(unsigned char enable);

=head2 TF_SetXlaAutoJitMode

=over 2

  Sets XLA's auto jit mode according to the specified string, which is parsed
  as if passed in XLA_FLAGS. This has global effect.

=back

  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT void TF_SetXlaAutoJitMode(const char* mode);

=head2 TF_GetXlaAutoJitEnabled

=over 2

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


  For argument number input_index, fetch the corresponding number_attr that
  needs to be updated with the argument length of the input list.
  Returns nullptr if there is any problem like op_name is not found, or the
  argument does not support this attribute type.

=back

  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT extern const char* TF_GetNumberAttrForOpListInput(
      const char* op_name, int input_index, TF_Status* status);

=head2 TF_OpIsStateful

=over 2

  Returns 1 if the op is stateful, 0 otherwise. The return value is undefined
  if the status is not ok.

=back

  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT extern int TF_OpIsStateful(const char* op_type,
                                            TF_Status* status);

=head2 TF_InitMain

=over 2

  Platform specific initialization routine. Very few platforms actually require
  this to be called.

=back

  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT void TF_InitMain(const char* usage, int* argc, char*** argv);

=head2 TF_PickUnusedPortOrDie

=over 2

  Platform-specific implementation to return an unused port. (This should used
  in tests only.)

=back

  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT int TF_PickUnusedPortOrDie(void);

=head2 TFE_NewTensorHandleFromScalar

=over 2

  Fast path method that makes constructing a single scalar tensor require less
  overhead and copies.

=back

  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_NewTensorHandleFromScalar(
      TF_DataType data_type, void* data, size_t len, TF_Status* status);

=head2 TFE_EnableCollectiveOps

=over 2

  Specify the server_def that enables collective ops.
  This is different to the above function in that it doesn't create remote
  contexts, and remotely executing ops is not possible. It just enables
  communication for collective ops.

=back

  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_EnableCollectiveOps(TFE_Context* ctx,
                                                     const void* proto,
                                                     size_t proto_len,
                                                     TF_Status* status);

=head2 TFE_AbortCollectiveOps

=over 2

  Aborts all ongoing collectives with the specified status. After abortion,
  subsequent collectives will error with this status immediately. To reset the
  collectives, create a new EagerContext.
  
  This is intended to be used when a peer failure is detected.

=back

  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_AbortCollectiveOps(TFE_Context* ctx,
                                                    TF_Status* status);

=head2 TFE_CollectiveOpsCheckPeerHealth

=over 2

  Checks the health of collective ops peers. Explicit health check is needed in
  multi worker collective ops to detect failures in the cluster.  If a peer is
  down, collective ops may hang.

=back

  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_CollectiveOpsCheckPeerHealth(
      TFE_Context* ctx, const char* task, int64_t timeout_in_ms,
      TF_Status* status);

=head2 TF_NewShapeAndTypeList

=over 2

  API for manipulating TF_ShapeAndTypeList objects.

=back

  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT extern TF_ShapeAndTypeList* TF_NewShapeAndTypeList(
      int num_shapes);



( run in 0.422 second using v1.01-cache-2.11-cpan-f6376fbd888 )