view release on metacpan or search on metacpan
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=head2 TF_Version
=over 2
TF_Version returns a string describing version information of the
TensorFlow library. TensorFlow uses semantic versioning.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern const char* TF_Version(void);
=head2 TF_TensorFromProto
=over 2
Parsing a serialized TensorProto into a TF_Tensor.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_TensorFromProto(const TF_Buffer* from,
TF_Tensor* to, TF_Status* status);
=head2 TF_NewSessionOptions
=over 2
Return a new options object.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_SessionOptions* TF_NewSessionOptions(void);
=head2 TF_SetTarget
=over 2
Set the target in TF_SessionOptions.options.
target can be empty, a single entry, or a comma separated list of entries.
Each entry is in one of the following formats :
"local"
ip:port
host:port
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_SetTarget(TF_SessionOptions* options,
const char* target);
=head2 TF_SetConfig
=over 2
Set the config in TF_SessionOptions.options.
config should be a serialized tensorflow.ConfigProto proto.
If config was not parsed successfully as a ConfigProto, record the
error information in *status.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_SetConfig(TF_SessionOptions* options,
const void* proto, size_t proto_len,
TF_Status* status);
=head2 TF_DeleteSessionOptions
=over 2
Destroy an options object.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_DeleteSessionOptions(TF_SessionOptions*);
=head2 TF_NewGraph
=over 2
Return a new graph object.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_Graph* TF_NewGraph(void);
=head2 TF_DeleteGraph
=over 2
Destroy an options object. Graph will be deleted once no more
TFSession's are referencing it.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_DeleteGraph(TF_Graph*);
=head2 TF_GraphSetTensorShape
=over 2
Sets the shape of the Tensor referenced by `output` in `graph` to
the shape described by `dims` and `num_dims`.
If the number of dimensions is unknown, `num_dims` must be set to
-1 and `dims` can be null. If a dimension is unknown, the
corresponding entry in the `dims` array must be -1.
This does not overwrite the existing shape associated with `output`,
but merges the input shape with the existing shape. For example,
setting a shape of [-1, 2] with an existing shape [2, -1] would set
a final shape of [2, 2] based on shape merging semantics.
Returns an error into `status` if:
* `output` is not in `graph`.
* An invalid shape is being set (e.g., the shape being set
is incompatible with the existing shape).
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_GraphSetTensorShape(TF_Graph* graph,
TF_Output output,
const int64_t* dims,
const int num_dims,
TF_Status* status);
=head2 TF_GraphGetTensorNumDims
=over 2
Returns the number of dimensions of the Tensor referenced by `output`
in `graph`.
If the number of dimensions in the shape is unknown, returns -1.
Returns an error into `status` if:
* `output` is not in `graph`.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern int TF_GraphGetTensorNumDims(TF_Graph* graph,
TF_Output output,
TF_Status* status);
=head2 TF_GraphGetTensorShape
=over 2
Returns the shape of the Tensor referenced by `output` in `graph`
into `dims`. `dims` must be an array large enough to hold `num_dims`
entries (e.g., the return value of TF_GraphGetTensorNumDims).
If the number of dimensions in the shape is unknown or the shape is
a scalar, `dims` will remain untouched. Otherwise, each element of
`dims` will be set corresponding to the size of the dimension. An
unknown dimension is represented by `-1`.
Returns an error into `status` if:
* `output` is not in `graph`.
* `num_dims` does not match the actual number of dimensions.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_GraphGetTensorShape(TF_Graph* graph,
TF_Output output,
int64_t* dims, int num_dims,
TF_Status* status);
=head2 TF_NewOperationLocked
=over 2
Creates a new operation - see `TF_NewOperation` for more details.
The lock for `graph` must be held when calling this function.
Unless implementing advanced behavior, like custom gradient functions, you
most likely need to call `TF_NewOperation` instead.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_OperationDescription* TF_NewOperationLocked(
TF_Graph* graph, const char* op_type, const char* oper_name);
=head2 TF_NewOperation
=over 2
Operation will only be added to *graph when TF_FinishOperation() is
called (assuming TF_FinishOperation() does not return an error).
*graph must not be deleted until after TF_FinishOperation() is
called.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_OperationDescription* TF_NewOperation(
TF_Graph* graph, const char* op_type, const char* oper_name);
=head2 TF_SetDevice
=over 2
Specify the device for `desc`. Defaults to empty, meaning unconstrained.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_SetDevice(TF_OperationDescription* desc,
const char* device);
=head2 TF_AddInput
=over 2
For inputs that take a single tensor.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_AddInput(TF_OperationDescription* desc,
TF_Output input);
=head2 TF_AddInputList
=over 2
For inputs that take a list of tensors.
inputs must point to TF_Output[num_inputs].
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_AddInputList(TF_OperationDescription* desc,
const TF_Output* inputs,
int num_inputs);
=head2 TF_AddControlInput
=over 2
Call once per control input to `desc`.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_AddControlInput(TF_OperationDescription* desc,
TF_Operation* input);
=head2 TF_ColocateWith
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_GraphVersions(TF_Graph* graph,
TF_Buffer* output_version_def,
TF_Status* status);
=head2 TF_NewImportGraphDefOptions
=over 2
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_ImportGraphDefOptions* TF_NewImportGraphDefOptions(
void);
=head2 TF_DeleteImportGraphDefOptions
=over 2
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_DeleteImportGraphDefOptions(
TF_ImportGraphDefOptions* opts);
=head2 TF_ImportGraphDefOptionsSetPrefix
=over 2
Set the prefix to be prepended to the names of nodes in `graph_def` that will
be imported into `graph`. `prefix` is copied and has no lifetime
requirements.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsSetPrefix(
TF_ImportGraphDefOptions* opts, const char* prefix);
=head2 TF_ImportGraphDefOptionsSetDefaultDevice
=over 2
Set the execution device for nodes in `graph_def`.
Only applies to nodes where a device was not already explicitly specified.
`device` is copied and has no lifetime requirements.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsSetDefaultDevice(
TF_ImportGraphDefOptions* opts, const char* device);
=head2 TF_ImportGraphDefOptionsSetUniquifyNames
=over 2
Set whether to uniquify imported operation names. If true, imported operation
names will be modified if their name already exists in the graph. If false,
conflicting names will be treated as an error. Note that this option has no
effect if a prefix is set, since the prefix will guarantee all names are
unique. Defaults to false.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsSetUniquifyNames(
TF_ImportGraphDefOptions* opts, unsigned char uniquify_names);
=head2 TF_ImportGraphDefOptionsSetUniquifyPrefix
=over 2
If true, the specified prefix will be modified if it already exists as an
operation name or prefix in the graph. If false, a conflicting prefix will be
treated as an error. This option has no effect if no prefix is specified.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsSetUniquifyPrefix(
TF_ImportGraphDefOptions* opts, unsigned char uniquify_prefix);
=head2 TF_ImportGraphDefOptionsAddInputMapping
=over 2
Set any imported nodes with input `src_name:src_index` to have that input
replaced with `dst`. `src_name` refers to a node in the graph to be imported,
`dst` references a node already existing in the graph being imported into.
`src_name` is copied and has no lifetime requirements.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsAddInputMapping(
TF_ImportGraphDefOptions* opts, const char* src_name, int src_index,
TF_Output dst);
=head2 TF_ImportGraphDefOptionsRemapControlDependency
=over 2
Set any imported nodes with control input `src_name` to have that input
replaced with `dst`. `src_name` refers to a node in the graph to be imported,
`dst` references an operation already existing in the graph being imported
into. `src_name` is copied and has no lifetime requirements.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsRemapControlDependency(
TF_ImportGraphDefOptions* opts, const char* src_name, TF_Operation* dst);
=head2 TF_ImportGraphDefOptionsAddControlDependency
=over 2
Cause the imported graph to have a control dependency on `oper`. `oper`
should exist in the graph being imported into.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsAddControlDependency(
TF_ImportGraphDefOptions* opts, TF_Operation* oper);
=head2 TF_ImportGraphDefOptionsAddReturnOutput
=over 2
Add an output in `graph_def` to be returned via the `return_outputs` output
parameter of TF_GraphImportGraphDef(). If the output is remapped via an input
mapping, the corresponding existing tensor in `graph` will be returned.
`oper_name` is copied and has no lifetime requirements.
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_ImportGraphDefResultsReturnOutputs(
TF_ImportGraphDefResults* results, int* num_outputs, TF_Output** outputs);
=head2 TF_ImportGraphDefResultsReturnOperations
=over 2
Fetches the return operations requested via
TF_ImportGraphDefOptionsAddReturnOperation(). The number of fetched
operations is returned in `num_opers`. The array of return operations is
returned in `opers`. `*opers` is owned by and has the lifetime of `results`.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_ImportGraphDefResultsReturnOperations(
TF_ImportGraphDefResults* results, int* num_opers, TF_Operation*** opers);
=head2 TF_ImportGraphDefResultsMissingUnusedInputMappings
=over 2
Fetches any input mappings requested via
TF_ImportGraphDefOptionsAddInputMapping() that didn't appear in the GraphDef
and weren't used as input to any node in the imported graph def. The number
of fetched mappings is returned in `num_missing_unused_input_mappings`. The
array of each mapping's source node name is returned in `src_names`, and the
array of each mapping's source index is returned in `src_indexes`.
`*src_names`, `*src_indexes`, and the memory backing each string in
`src_names` are owned by and have the lifetime of `results`.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_ImportGraphDefResultsMissingUnusedInputMappings(
TF_ImportGraphDefResults* results, int* num_missing_unused_input_mappings,
const char*** src_names, int** src_indexes);
=head2 TF_DeleteImportGraphDefResults
=over 2
Deletes a results object returned by TF_GraphImportGraphDefWithResults().
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_DeleteImportGraphDefResults(
TF_ImportGraphDefResults* results);
=head2 TF_GraphImportGraphDefWithResults
=over 2
Import the graph serialized in `graph_def` into `graph`. Returns nullptr and
a bad status on error. Otherwise, returns a populated
TF_ImportGraphDefResults instance. The returned instance must be deleted via
TF_DeleteImportGraphDefResults().
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_ImportGraphDefResults*
TF_GraphImportGraphDefWithResults(TF_Graph* graph, const TF_Buffer* graph_def,
const TF_ImportGraphDefOptions* options,
TF_Status* status);
=head2 TF_GraphImportGraphDefWithReturnOutputs
=over 2
Import the graph serialized in `graph_def` into `graph`.
Convenience function for when only return outputs are needed.
`num_return_outputs` must be the number of return outputs added (i.e. the
result of TF_ImportGraphDefOptionsNumReturnOutputs()). If
`num_return_outputs` is non-zero, `return_outputs` must be of length
`num_return_outputs`. Otherwise it can be null.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_GraphImportGraphDefWithReturnOutputs(
TF_Graph* graph, const TF_Buffer* graph_def,
const TF_ImportGraphDefOptions* options, TF_Output* return_outputs,
int num_return_outputs, TF_Status* status);
=head2 TF_GraphImportGraphDef
=over 2
Import the graph serialized in `graph_def` into `graph`.
Convenience function for when no results are needed.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_GraphImportGraphDef(
TF_Graph* graph, const TF_Buffer* graph_def,
const TF_ImportGraphDefOptions* options, TF_Status* status);
=head2 TF_GraphCopyFunction
=over 2
Adds a copy of function `func` and optionally its gradient function `grad`
to `g`. Once `func`/`grad` is added to `g`, it can be called by creating
an operation using the function's name.
Any changes to `func`/`grad` (including deleting it) done after this method
returns, won't affect the copy of `func`/`grad` in `g`.
If `func` or `grad` are already in `g`, TF_GraphCopyFunction has no
effect on them, but can establish the function->gradient relationship
between them if `func` does not already have a gradient. If `func` already
has a gradient different from `grad`, an error is returned.
`func` must not be null.
If `grad` is null and `func` is not in `g`, `func` is added without a
gradient.
If `grad` is null and `func` is in `g`, TF_GraphCopyFunction is a noop.
`grad` must have appropriate signature as described in the doc of
GradientDef in tensorflow/core/framework/function.proto.
If successful, status is set to OK and `func` and `grad` are added to `g`.
Otherwise, status is set to the encountered error and `g` is unmodified.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_GraphCopyFunction(TF_Graph* g,
const TF_Function* func,
const TF_Function* grad,
TF_Status* status);
=head2 TF_GraphNumFunctions
=over 2
Returns the number of TF_Functions registered in `g`.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern int TF_GraphNumFunctions(TF_Graph* g);
=head2 TF_GraphGetFunctions
=over 2
Fills in `funcs` with the TF_Function* registered in `g`.
`funcs` must point to an array of TF_Function* of length at least
`max_func`. In usual usage, max_func should be set to the result of
TF_GraphNumFunctions(g). In this case, all the functions registered in
`g` will be returned. Else, an unspecified subset.
If successful, returns the number of TF_Function* successfully set in
`funcs` and sets status to OK. The caller takes ownership of
all the returned TF_Functions. They must be deleted with TF_DeleteFunction.
On error, returns 0, sets status to the encountered error, and the contents
of funcs will be undefined.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern int TF_GraphGetFunctions(TF_Graph* g, TF_Function** funcs,
int max_func, TF_Status* status);
=head2 TF_OperationToNodeDef
=over 2
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_OperationToNodeDef(TF_Operation* oper,
TF_Buffer* output_node_def,
TF_Status* status);
=head2 TF_NewWhile
=over 2
Creates a TF_WhileParams for creating a while loop in `g`. `inputs` are
outputs that already exist in `g` used as initial values for the loop
variables.
The returned TF_WhileParams will have all fields initialized except
`cond_output`, `body_outputs`, and `name`. The `body_outputs` buffer will be
allocated to size `ninputs`. The caller should build `cond_graph` and
`body_graph` starting from the inputs, and store the final outputs in
`cond_output` and `body_outputs`.
If `status` is OK, the caller must call either TF_FinishWhile or
TF_AbortWhile on the returned TF_WhileParams. If `status` isn't OK, the
returned TF_WhileParams is not valid, and the caller should not call
TF_FinishWhile() or TF_AbortWhile().
Missing functionality (TODO):
- Gradients
- Reference-type inputs
- Directly referencing external tensors from the cond/body graphs (this is
possible in the Python API)
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_WhileParams TF_NewWhile(TF_Graph* g, TF_Output* inputs,
int ninputs,
TF_Status* status);
=head2 TF_FinishWhile
=over 2
Builds the while loop specified by `params` and returns the output tensors of
the while loop in `outputs`. `outputs` should be allocated to size
`params.ninputs`.
`params` is no longer valid once this returns.
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=head2 TF_GraphToFunction
=over 2
Create a TF_Function from a TF_Graph
Params:
fn_body - the graph whose operations (or subset of whose operations) will be
converted to TF_Function.
fn_name - the name of the new TF_Function. Should match the operation
name (OpDef.name) regexp [A-Z][A-Za-z0-9_.\\-/]*.
If `append_hash_to_fn_name` is false, `fn_name` must be distinct
from other function and operation names (at least those
registered in graphs where this function will be used).
append_hash_to_fn_name - Must be 0 or 1. If set to 1, the actual name
of the function will be `fn_name` appended with
'_<hash_of_this_function's_definition>'.
If set to 0, the function's name will be `fn_name`.
num_opers - `num_opers` contains the number of elements in the `opers` array
or a special value of -1 meaning that no array is given.
The distinction between an empty array of operations and no
array of operations is necessary to distinguish the case of
creating a function with no body (e.g. identity or permutation)
and the case of creating a function whose body contains all
the nodes in the graph (except for the automatic skipping, see
below).
opers - Array of operations to become the body of the function or null.
- If no array is given (`num_opers` = -1), all the
operations in `fn_body` will become part of the function
except operations referenced in `inputs`. These operations
must have a single output (these operations are typically
placeholders created for the sole purpose of representing
an input. We can relax this constraint if there are
compelling use cases).
- If an array is given (`num_opers` >= 0), all operations
in it will become part of the function. In particular, no
automatic skipping of dummy input operations is performed.
ninputs - number of elements in `inputs` array
inputs - array of TF_Outputs that specify the inputs to the function.
If `ninputs` is zero (the function takes no inputs), `inputs`
can be null. The names used for function inputs are normalized
names of the operations (usually placeholders) pointed to by
`inputs`. These operation names should start with a letter.
Normalization will convert all letters to lowercase and
non-alphanumeric characters to '_' to make resulting names match
the "[a-z][a-z0-9_]*" pattern for operation argument names.
`inputs` cannot contain the same tensor twice.
noutputs - number of elements in `outputs` array
outputs - array of TF_Outputs that specify the outputs of the function.
If `noutputs` is zero (the function returns no outputs), `outputs`
can be null. `outputs` can contain the same tensor more than once.
output_names - The names of the function's outputs. `output_names` array
must either have the same length as `outputs`
(i.e. `noutputs`) or be null. In the former case,
the names should match the regular expression for ArgDef
names - "[a-z][a-z0-9_]*". In the latter case,
names for outputs will be generated automatically.
opts - various options for the function, e.g. XLA's inlining control.
description - optional human-readable description of this function.
status - Set to OK on success and an appropriate error on failure.
Note that when the same TF_Output is listed as both an input and an output,
the corresponding function's output will equal to this input,
instead of the original node's output.
Callers must also satisfy the following constraints:
- `inputs` cannot refer to TF_Outputs within a control flow context. For
example, one cannot use the output of "switch" node as input.
- `inputs` and `outputs` cannot have reference types. Reference types are
not exposed through C API and are being replaced with Resources. We support
reference types inside function's body to support legacy code. Do not
use them in new code.
- Every node in the function's body must have all of its inputs (including
control inputs). In other words, for every node in the body, each input
must be either listed in `inputs` or must come from another node in
the body. In particular, it is an error to have a control edge going from
a node outside of the body into a node in the body. This applies to control
edges going from nodes referenced in `inputs` to nodes in the body when
the former nodes are not in the body (automatically skipped or not
included in explicitly specified body).
Returns:
On success, a newly created TF_Function instance. It must be deleted by
calling TF_DeleteFunction.
On failure, null.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_Function* TF_GraphToFunction(
const TF_Graph* fn_body, const char* fn_name,
unsigned char append_hash_to_fn_name, int num_opers,
const TF_Operation* const* opers, int ninputs, const TF_Output* inputs,
int noutputs, const TF_Output* outputs, const char* const* output_names,
const TF_FunctionOptions* opts, const char* description, TF_Status* status);
=head2 TF_GraphToFunctionWithControlOutputs
=over 2
Similar to TF_GraphToFunction but allows specifying control outputs of the
function.
The arguments of TF_GraphToFunction have the same meaning, but the new
arguments are as follows:
ncontrol_outputs: Number of control outputs of the function.
control_outputs: vector of TF_Operation objects to be marked as control
outputs of the function. Operations marked as control outputs are
guaranteed to execute.
control_output_names: Optional. If not nullptr, vector of strings, one
per control output, with their names to be added to the function's
OpDef.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_Function* TF_GraphToFunctionWithControlOutputs(
const TF_Graph* fn_body, const char* fn_name,
unsigned char append_hash_to_fn_name, int num_opers,
const TF_Operation* const* opers, int ninputs, const TF_Output* inputs,
int noutputs, const TF_Output* outputs, const char* const* output_names,
int ncontrol_outputs, const TF_Operation* const* control_outputs,
const char* const* control_output_names, const TF_FunctionOptions* opts,
const char* description, TF_Status* status);
=head2 TF_FunctionName
=over 2
Returns the name of the graph function.
The return value points to memory that is only usable until the next
mutation to *func.
=back
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=over 2
Write out a serialized representation of `func` (as a FunctionDef protocol
message) to `output_func_def` (allocated by TF_NewBuffer()).
`output_func_def`'s underlying buffer will be freed when TF_DeleteBuffer()
is called.
May fail on very large graphs in the future.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_FunctionToFunctionDef(TF_Function* func,
TF_Buffer* output_func_def,
TF_Status* status);
=head2 TF_FunctionImportFunctionDef
=over 2
Construct and return the function whose FunctionDef representation is
serialized in `proto`. `proto_len` must equal the number of bytes
pointed to by `proto`.
Returns:
On success, a newly created TF_Function instance. It must be deleted by
calling TF_DeleteFunction.
On failure, null.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_Function* TF_FunctionImportFunctionDef(
const void* proto, size_t proto_len, TF_Status* status);
=head2 TF_FunctionSetAttrValueProto
=over 2
Sets function attribute named `attr_name` to value stored in `proto`.
If this attribute is already set to another value, it is overridden.
`proto` should point to a sequence of bytes of length `proto_len`
representing a binary serialization of an AttrValue protocol
buffer.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_FunctionSetAttrValueProto(TF_Function* func,
const char* attr_name,
const void* proto,
size_t proto_len,
TF_Status* status);
=head2 TF_FunctionGetAttrValueProto
=over 2
Sets `output_attr_value` to the binary-serialized AttrValue proto
representation of the value of the `attr_name` attr of `func`.
If `attr_name` attribute is not present, status is set to an error.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_FunctionGetAttrValueProto(
TF_Function* func, const char* attr_name, TF_Buffer* output_attr_value,
TF_Status* status);
=head2 TF_DeleteFunction
=over 2
Frees the memory used by the `func` struct.
TF_DeleteFunction is a noop if `func` is null.
Deleting a function does not remove it from any graphs it was copied to.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_DeleteFunction(TF_Function* func);
=head2 TF_TryEvaluateConstant
=over 2
Attempts to evaluate `output`. This will only be possible if `output` doesn't
depend on any graph inputs (this function is safe to call if this isn't the
case though).
If the evaluation is successful, this function returns true and `output`s
value is returned in `result`. Otherwise returns false. An error status is
returned if something is wrong with the graph or input. Note that this may
return false even if no error status is set.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern unsigned char TF_TryEvaluateConstant(TF_Graph* graph,
TF_Output output,
TF_Tensor** result,
TF_Status* status);
=head2 TF_NewSession
=over 2
Return a new execution session with the associated graph, or NULL on
error. Does not take ownership of any input parameters.
*`graph` must be a valid graph (not deleted or nullptr). `graph` will be
kept alive for the lifetime of the returned TF_Session. New nodes can still
be added to `graph` after this call.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_Session* TF_NewSession(TF_Graph* graph,
const TF_SessionOptions* opts,
TF_Status* status);
=head2 TF_LoadSessionFromSavedModel
=over 2
This function creates a new TF_Session (which is created on success) using
`session_options`, and then initializes state (restoring tensors and other
assets) using `run_options`.
Any NULL and non-NULL value combinations for (`run_options, `meta_graph_def`)
are valid.
- `export_dir` must be set to the path of the exported SavedModel.
- `tags` must include the set of tags used to identify one MetaGraphDef in
the SavedModel.
- `graph` must be a graph newly allocated with TF_NewGraph().
If successful, populates `graph` with the contents of the Graph and
`meta_graph_def` with the MetaGraphDef of the loaded model.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_Session* TF_LoadSessionFromSavedModel(
const TF_SessionOptions* session_options, const TF_Buffer* run_options,
const char* export_dir, const char* const* tags, int tags_len,
TF_Graph* graph, TF_Buffer* meta_graph_def, TF_Status* status);
=head2 TF_CloseSession
=over 2
Close a session.
Contacts any other processes associated with the session, if applicable.
May not be called after TF_DeleteSession().
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_CloseSession(TF_Session*, TF_Status* status);
=head2 TF_DeleteSession
=over 2
Destroy a session object.
Even if error information is recorded in *status, this call discards all
local resources associated with the session. The session may not be used
during or after this call (and the session drops its reference to the
corresponding graph).
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_DeleteSession(TF_Session*, TF_Status* status);
=head2 TF_SessionRun
=over 2
Run the graph associated with the session starting with the supplied inputs
(inputs[0,ninputs-1] with corresponding values in input_values[0,ninputs-1]).
Any NULL and non-NULL value combinations for (`run_options`,
`run_metadata`) are valid.
- `run_options` may be NULL, in which case it will be ignored; or
non-NULL, in which case it must point to a `TF_Buffer` containing the
serialized representation of a `RunOptions` protocol buffer.
- `run_metadata` may be NULL, in which case it will be ignored; or
non-NULL, in which case it must point to an empty, freshly allocated
`TF_Buffer` that may be updated to contain the serialized representation
of a `RunMetadata` protocol buffer.
The caller retains ownership of `input_values` (which can be deleted using
TF_DeleteTensor). The caller also retains ownership of `run_options` and/or
`run_metadata` (when not NULL) and should manually call TF_DeleteBuffer on
them.
On success, the tensors corresponding to outputs[0,noutputs-1] are placed in
output_values[]. Ownership of the elements of output_values[] is transferred
to the caller, which must eventually call TF_DeleteTensor on them.
On failure, output_values[] contains NULLs.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_SessionRun(
TF_Session* session,
// RunOptions
const TF_Buffer* run_options,
// Input tensors
const TF_Output* inputs, TF_Tensor* const* input_values, int ninputs,
// Output tensors
const TF_Output* outputs, TF_Tensor** output_values, int noutputs,
// Target operations
const TF_Operation* const* target_opers, int ntargets,
// RunMetadata
TF_Buffer* run_metadata,
// Output status
TF_Status*);
=head2 TF_SessionPRunSetup
=over 2
Set up the graph with the intended feeds (inputs) and fetches (outputs) for a
sequence of partial run calls.
On success, returns a handle that is used for subsequent PRun calls. The
handle should be deleted with TF_DeletePRunHandle when it is no longer
needed.
On failure, out_status contains a tensorflow::Status with an error
message. *handle is set to nullptr.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_SessionPRunSetup(
TF_Session*,
// Input names
const TF_Output* inputs, int ninputs,
// Output names
const TF_Output* outputs, int noutputs,
// Target operations
const TF_Operation* const* target_opers, int ntargets,
// Output handle
const char** handle,
// Output status
TF_Status*);
=head2 TF_SessionPRun
=over 2
Continue to run the graph with additional feeds and fetches. The
execution state is uniquely identified by the handle.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_SessionPRun(
TF_Session*, const char* handle,
// Input tensors
const TF_Output* inputs, TF_Tensor* const* input_values, int ninputs,
// Output tensors
const TF_Output* outputs, TF_Tensor** output_values, int noutputs,
// Target operations
const TF_Operation* const* target_opers, int ntargets,
// Output status
TF_Status*);
=head2 TF_DeletePRunHandle
=over 2
Deletes a handle allocated by TF_SessionPRunSetup.
Once called, no more calls to TF_SessionPRun should be made.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_DeletePRunHandle(const char* handle);
=head2 TF_NewDeprecatedSession
=over 2
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_DeprecatedSession* TF_NewDeprecatedSession(
const TF_SessionOptions*, TF_Status* status);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=over 2
Lists all devices in a TF_Session.
Caller takes ownership of the returned TF_DeviceList* which must eventually
be freed with a call to TF_DeleteDeviceList.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_DeviceList* TF_SessionListDevices(TF_Session* session,
TF_Status* status);
=head2 TF_DeprecatedSessionListDevices
=over 2
Lists all devices in a TF_Session.
Caller takes ownership of the returned TF_DeviceList* which must eventually
be freed with a call to TF_DeleteDeviceList.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_DeviceList* TF_DeprecatedSessionListDevices(
TF_DeprecatedSession* session, TF_Status* status);
=head2 TF_DeleteDeviceList
=over 2
Deallocates the device list.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_DeleteDeviceList(TF_DeviceList* list);
=head2 TF_DeviceListCount
=over 2
Counts the number of elements in the device list.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern int TF_DeviceListCount(const TF_DeviceList* list);
=head2 TF_DeviceListName
=over 2
Retrieves the full name of the device (e.g. /job:worker/replica:0/...)
The return value will be a pointer to a null terminated string. The caller
must not modify or delete the string. It will be deallocated upon a call to
TF_DeleteDeviceList.
If index is out of bounds, an error code will be set in the status object,
and a null pointer will be returned.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern const char* TF_DeviceListName(const TF_DeviceList* list,
int index,
TF_Status* status);
=head2 TF_DeviceListType
=over 2
Retrieves the type of the device at the given index.
The caller must not modify or delete the string. It will be deallocated upon
a call to TF_DeleteDeviceList.
If index is out of bounds, an error code will be set in the status object,
and a null pointer will be returned.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern const char* TF_DeviceListType(const TF_DeviceList* list,
int index,
TF_Status* status);
=head2 TF_DeviceListMemoryBytes
=over 2
Retrieve the amount of memory associated with a given device.
If index is out of bounds, an error code will be set in the status object,
and -1 will be returned.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern int64_t TF_DeviceListMemoryBytes(
const TF_DeviceList* list, int index, TF_Status* status);
=head2 TF_DeviceListIncarnation
=over 2
Retrieve the incarnation number of a given device.
If index is out of bounds, an error code will be set in the status object,
and 0 will be returned.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern uint64_t TF_DeviceListIncarnation(
const TF_DeviceList* list, int index, TF_Status* status);
=head2 TF_LoadLibrary
=over 2
Load the library specified by library_filename and register the ops and
kernels present in that library.
Pass "library_filename" to a platform-specific mechanism for dynamically
loading a library. The rules for determining the exact location of the
library are platform-specific and are not documented here.
On success, place OK in status and return the newly created library handle.
The caller owns the library handle.
On failure, place an error status in status and return NULL.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_Library* TF_LoadLibrary(const char* library_filename,
TF_Status* status);
=head2 TF_GetOpList
=over 2
Get the OpList of OpDefs defined in the library pointed by lib_handle.
Returns a TF_Buffer. The memory pointed to by the result is owned by
lib_handle. The data in the buffer will be the serialized OpList proto for
ops defined in the library.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_Buffer TF_GetOpList(TF_Library* lib_handle);
=head2 TF_DeleteLibraryHandle
=over 2
Frees the memory associated with the library handle.
Does NOT unload the library.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_DeleteLibraryHandle(TF_Library* lib_handle);
=head2 TF_GetAllOpList
=over 2
Get the OpList of all OpDefs defined in this address space.
Returns a TF_Buffer, ownership of which is transferred to the caller
(and can be freed using TF_DeleteBuffer).
The data in the buffer will be the serialized OpList proto for ops registered
in this address space.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_Buffer* TF_GetAllOpList(void);
=head2 TF_NewApiDefMap
=over 2
Creates a new TF_ApiDefMap instance.
Params:
op_list_buffer - TF_Buffer instance containing serialized OpList
protocol buffer. (See
https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto
for the OpList proto definition).
status - Set to OK on success and an appropriate error on failure.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_ApiDefMap* TF_NewApiDefMap(TF_Buffer* op_list_buffer,
TF_Status* status);
=head2 TF_DeleteApiDefMap
=over 2
Deallocates a TF_ApiDefMap.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_DeleteApiDefMap(TF_ApiDefMap* apimap);
=head2 TF_ApiDefMapPut
=over 2
Add ApiDefs to the map.
`text` corresponds to a text representation of an ApiDefs protocol message.
(https://www.tensorflow.org/code/tensorflow/core/framework/api_def.proto).
The provided ApiDefs will be merged with existing ones in the map, with
precedence given to the newly added version in case of conflicts with
previous calls to TF_ApiDefMapPut.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_ApiDefMapPut(TF_ApiDefMap* api_def_map,
const char* text, size_t text_len,
TF_Status* status);
=head2 TF_ApiDefMapGet
=over 2
Returns a serialized ApiDef protocol buffer for the TensorFlow operation
named `name`.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_Buffer* TF_ApiDefMapGet(TF_ApiDefMap* api_def_map,
const char* name,
size_t name_len,
TF_Status* status);
=head2 TF_GetAllRegisteredKernels
=over 2
Returns a serialized KernelList protocol buffer containing KernelDefs for all
registered kernels.
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=over 2
Blocks until the server has been successfully stopped (via TF_ServerStop or
TF_ServerClose).
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_ServerJoin(TF_Server* server, TF_Status* status);
=head2 TF_ServerTarget
=over 2
Returns the target string that can be provided to TF_SetTarget() to connect
a TF_Session to `server`.
The returned string is valid only until TF_DeleteServer is invoked.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern const char* TF_ServerTarget(TF_Server* server);
=head2 TF_DeleteServer
=over 2
Destroy an in-process TensorFlow server, frees memory. If server is running
it will be stopped and joined.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_DeleteServer(TF_Server* server);
=head2 TF_RegisterLogListener
=over 2
Register a listener method that processes printed messages.
If any listeners are registered, the print operator will call all listeners
with the printed messages and immediately return without writing to the
logs.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_RegisterLogListener(
void (*listener)(const char*));
=head2 TF_RegisterFilesystemPlugin
=over 2
Register a FileSystem plugin from filename `plugin_filename`.
On success, place OK in status.
On failure, place an error status in status.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_RegisterFilesystemPlugin(
const char* plugin_filename, TF_Status* status);
=head2 TF_NewShape
=over 2
Return a new, unknown rank shape object. The caller is responsible for
calling TF_DeleteShape to deallocate and destroy the returned shape.
=back
/* From <tensorflow/c/tf_shape.h> */
TF_CAPI_EXPORT extern TF_Shape* TF_NewShape();
=head2 TF_ShapeDims
=over 2
Returns the rank of `shape`. If `shape` has unknown rank, returns -1.
=back
/* From <tensorflow/c/tf_shape.h> */
TF_CAPI_EXPORT extern int TF_ShapeDims(const TF_Shape* shape);
=head2 TF_ShapeDimSize
=over 2
Returns the `d`th dimension of `shape`. If `shape` has unknown rank,
invoking this function is undefined behavior. Returns -1 if dimension is
unknown.
=back
/* From <tensorflow/c/tf_shape.h> */
TF_CAPI_EXPORT extern int64_t TF_ShapeDimSize(const TF_Shape* shape, int d);
=head2 TF_DeleteShape
=over 2
Deletes `shape`.
=back
/* From <tensorflow/c/tf_shape.h> */
TF_CAPI_EXPORT extern void TF_DeleteShape(TF_Shape* shape);
=head2 TF_NewTensor
=over 2
Return a new tensor that holds the bytes data[0,len-1].
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
Returns bool iff this tensor is aligned.
=back
/* From <tensorflow/c/tf_tensor.h> */
TF_CAPI_EXPORT extern bool TF_TensorIsAligned(const TF_Tensor*);
=head2 TF_NewStatus
=over 2
Return a new status object.
=back
/* From <tensorflow/c/tf_status.h> */
TF_CAPI_EXPORT extern TF_Status* TF_NewStatus(void);
=head2 TF_DeleteStatus
=over 2
Delete a previously created status object.
=back
/* From <tensorflow/c/tf_status.h> */
TF_CAPI_EXPORT extern void TF_DeleteStatus(TF_Status*);
=head2 TF_SetStatus
=over 2
Record <code, msg> in *s. Any previous information is lost.
A common use is to clear a status: TF_SetStatus(s, TF_OK, "");
=back
/* From <tensorflow/c/tf_status.h> */
TF_CAPI_EXPORT extern void TF_SetStatus(TF_Status* s, TF_Code code,
const char* msg);
=head2 TF_SetPayload
=over 2
Record <key, value> as a payload in *s. The previous payload having the
same key (if any) is overwritten. Payload will not be added if the Status
is OK.
=back
/* From <tensorflow/c/tf_status.h> */
TF_CAPI_EXPORT void TF_SetPayload(TF_Status* s, const char* key,
const char* value);
=head2 TF_SetStatusFromIOError
=over 2
Convert from an I/O error code (e.g., errno) to a TF_Status value.
Any previous information is lost. Prefer to use this instead of TF_SetStatus
when the error comes from I/O operations.
=back
/* From <tensorflow/c/tf_status.h> */
TF_CAPI_EXPORT extern void TF_SetStatusFromIOError(TF_Status* s, int error_code,
const char* context);
=head2 TF_GetCode
=over 2
Return the code record in *s.
=back
/* From <tensorflow/c/tf_status.h> */
TF_CAPI_EXPORT extern TF_Code TF_GetCode(const TF_Status* s);
=head2 TF_Message
=over 2
Return a pointer to the (null-terminated) error message in *s. The
return value points to memory that is only usable until the next
mutation to *s. Always returns an empty string if TF_GetCode(s) is
TF_OK.
=back
/* From <tensorflow/c/tf_status.h> */
TF_CAPI_EXPORT extern const char* TF_Message(const TF_Status* s);
=head2 TF_NewBufferFromString
=over 2
Makes a copy of the input and sets an appropriate deallocator. Useful for
passing in read-only, input protobufs.
=back
/* From <tensorflow/c/tf_buffer.h> */
TF_CAPI_EXPORT extern TF_Buffer* TF_NewBufferFromString(const void* proto,
size_t proto_len);
=head2 TF_NewBuffer
=over 2
Useful for passing *out* a protobuf.
=back
/* From <tensorflow/c/tf_buffer.h> */
TF_CAPI_EXPORT extern TF_Buffer* TF_NewBuffer(void);
=head2 TF_DeleteBuffer
=over 2
=back
/* From <tensorflow/c/tf_buffer.h> */
TF_CAPI_EXPORT extern void TF_DeleteBuffer(TF_Buffer*);
=head2 TF_GetBuffer
=over 2
=back
/* From <tensorflow/c/tf_buffer.h> */
TF_CAPI_EXPORT extern TF_Buffer TF_GetBuffer(TF_Buffer* buffer);
=head2 TF_StringInit
=over 2
=back
/* From <tensorflow/c/tf_tstring.h> */
TF_CAPI_EXPORT extern void TF_StringInit(TF_TString *t);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=over 2
Places the given shape handle into the `i`th output position of the given
context. Internally, the shape handle is copied; the caller may subsequently
delete `handle`.
=back
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT
extern void TF_ShapeInferenceContextSetOutput(TF_ShapeInferenceContext* ctx,
int i, TF_ShapeHandle* handle,
TF_Status* status);
=head2 TF_ShapeInferenceContextScalar
=over 2
Returns a newly-allocated scalar shape handle. The returned handle should
be freed with TF_DeleteShapeHandle.
=back
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern TF_ShapeHandle* TF_ShapeInferenceContextScalar(
TF_ShapeInferenceContext* ctx);
=head2 TF_ShapeInferenceContextVectorFromSize
=over 2
Returns a newly-allocate shape handle representing a vector of the given
size. The returned handle should be freed with TF_DeleteShapeHandle.
=back
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern TF_ShapeHandle* TF_ShapeInferenceContextVectorFromSize(
TF_ShapeInferenceContext* ctx, size_t size);
=head2 TF_NewDimensionHandle
=over 2
Returns a newly allocated dimension handle. It must be freed with
TF_DeleteDimensionHandle.
=back
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern TF_DimensionHandle* TF_NewDimensionHandle();
=head2 TF_ShapeInferenceContext_GetAttrType
=over 2
Interprets the named shape inference context attribute as a TF_DataType and
places it into *val. *status is set to TF_OK.
If the attribute could not be found or could not be interpreted as
TF_DataType, *status is populated with an error.
=back
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern void TF_ShapeInferenceContext_GetAttrType(
TF_ShapeInferenceContext* ctx, const char* attr_name, TF_DataType* val,
TF_Status* status);
=head2 TF_ShapeInferenceContextRank
=over 2
Returns the rank of the shape represented by the given handle.
=back
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern int64_t TF_ShapeInferenceContextRank(
TF_ShapeInferenceContext* ctx, TF_ShapeHandle* handle);
=head2 TF_ShapeInferenceContextRankKnown
=over 2
Returns 1 if `handle` has a known rank, 0 otherwise.
=back
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern int TF_ShapeInferenceContextRankKnown(
TF_ShapeInferenceContext* ctx, TF_ShapeHandle* handle);
=head2 TF_ShapeInferenceContextWithRank
=over 2
If <handle> has rank <rank>, or its rank is unknown, return OK and return the
shape with asserted rank in <*result>. Otherwise an error is placed into
`status`.
=back
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern void TF_ShapeInferenceContextWithRank(
TF_ShapeInferenceContext* ctx, TF_ShapeHandle* handle, int64_t rank,
TF_ShapeHandle* result, TF_Status* status);
=head2 TF_ShapeInferenceContextWithRankAtLeast
=over 2
If <handle> has rank at least <rank>, or its rank is unknown, return OK and
return the shape with asserted rank in <*result>. Otherwise an error is
placed into `status`.
=back
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern void TF_ShapeInferenceContextWithRankAtLeast(
TF_ShapeInferenceContext* ctx, TF_ShapeHandle* handle, int64_t rank,
TF_ShapeHandle* result, TF_Status* status);
=head2 TF_ShapeInferenceContextWithRankAtMost
=over 2
If <handle> has rank at most <rank>, or its rank is unknown, return OK and
return the shape with asserted rank in <*result>. Otherwise an error is
placed into `status`.
=back
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern void TF_ShapeInferenceContextWithRankAtMost(
TF_ShapeInferenceContext* ctx, TF_ShapeHandle* handle, int64_t rank,
TF_ShapeHandle* result, TF_Status* status);
=head2 TF_ShapeInferenceContextDim
=over 2
Places a handle to the ith dimension of the given shape into *result.
=back
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern void TF_ShapeInferenceContextDim(
TF_ShapeInferenceContext* ctx, TF_ShapeHandle* shape_handle, int64_t i,
TF_DimensionHandle* result);
=head2 TF_ShapeInferenceContextSubshape
=over 2
Returns in <*result> a sub-shape of <shape_handle>, with dimensions
[start:end]. <start> and <end> can be negative, to index from the end of the
shape. <start> and <end> are set to the rank of <shape_handle> if > rank of
<shape_handle>.
=back
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern void TF_ShapeInferenceContextSubshape(
TF_ShapeInferenceContext* ctx, TF_ShapeHandle* shape_handle, int64_t start,
int64_t end, TF_ShapeHandle* result, TF_Status* status);
=head2 TF_ShapeInferenceContextSetUnknownShape
=over 2
Places an unknown shape in all outputs for the given inference context. Used
for shape inference functions with ops whose output shapes are unknown.
=back
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern void TF_ShapeInferenceContextSetUnknownShape(
TF_ShapeInferenceContext* ctx, TF_Status* status);
=head2 TF_DimensionHandleValueKnown
=over 2
Returns whether the given handle represents a known dimension.
=back
/* From <tensorflow/c/ops.h> */
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=over 2
Frees the given shape handle.
=back
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern void TF_DeleteShapeHandle(TF_ShapeHandle* handle);
=head2 TF_DeleteDimensionHandle
=over 2
Frees the given dimension handle.
=back
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern void TF_DeleteDimensionHandle(TF_DimensionHandle* handle);
=head2 TF_CreateDir
=over 2
Creates the specified directory. Typical status code are:
* TF_OK - successfully created the directory
* TF_ALREADY_EXISTS - directory already exists
* TF_PERMISSION_DENIED - dirname is not writable
=back
/* From <tensorflow/c/env.h> */
TF_CAPI_EXPORT extern void TF_CreateDir(const char* dirname, TF_Status* status);
=head2 TF_DeleteDir
=over 2
Deletes the specified directory. Typical status codes are:
* TF_OK - successfully deleted the directory
* TF_FAILED_PRECONDITION - the directory is not empty
=back
/* From <tensorflow/c/env.h> */
TF_CAPI_EXPORT extern void TF_DeleteDir(const char* dirname, TF_Status* status);
=head2 TF_DeleteRecursively
=over 2
Deletes the specified directory and all subdirectories and files underneath
it. This is accomplished by traversing the directory tree rooted at dirname
and deleting entries as they are encountered.
If dirname itself is not readable or does not exist, *undeleted_dir_count is
set to 1, *undeleted_file_count is set to 0 and an appropriate status (e.g.
TF_NOT_FOUND) is returned.
If dirname and all its descendants were successfully deleted, TF_OK is
returned and both error counters are set to zero.
Otherwise, while traversing the tree, undeleted_file_count and
undeleted_dir_count are updated if an entry of the corresponding type could
not be deleted. The returned error status represents the reason that any one
of these entries could not be deleted.
Typical status codes:
* TF_OK - dirname exists and we were able to delete everything underneath
* TF_NOT_FOUND - dirname doesn't exist
* TF_PERMISSION_DENIED - dirname or some descendant is not writable
* TF_UNIMPLEMENTED - some underlying functions (like Delete) are not
implemented
=back
/* From <tensorflow/c/env.h> */
TF_CAPI_EXPORT extern void TF_DeleteRecursively(const char* dirname,
int64_t* undeleted_file_count,
int64_t* undeleted_dir_count,
TF_Status* status);
=head2 TF_FileStat
=over 2
Obtains statistics for the given path. If status is TF_OK, *stats is
updated, otherwise it is not touched.
=back
/* From <tensorflow/c/env.h> */
TF_CAPI_EXPORT extern void TF_FileStat(const char* filename,
TF_FileStatistics* stats,
TF_Status* status);
=head2 TF_NewWritableFile
=over 2
Creates or truncates the given filename and returns a handle to be used for
appending data to the file. If status is TF_OK, *handle is updated and the
caller is responsible for freeing it (see TF_CloseWritableFile).
=back
/* From <tensorflow/c/env.h> */
TF_CAPI_EXPORT extern void TF_NewWritableFile(const char* filename,
TF_WritableFileHandle** handle,
TF_Status* status);
=head2 TF_CloseWritableFile
=over 2
Closes the given handle and frees its memory. If there was a problem closing
the file, it is indicated by status. Memory is freed in any case.
=back
/* From <tensorflow/c/env.h> */
TF_CAPI_EXPORT extern void TF_CloseWritableFile(TF_WritableFileHandle* handle,
TF_Status* status);
=head2 TF_SyncWritableFile
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=back
/* From <tensorflow/c/env.h> */
TF_CAPI_EXPORT extern uint64_t TF_NowSeconds(void);
=head2 TF_DefaultThreadOptions
=over 2
Populates a TF_ThreadOptions struct with system-default values.
=back
/* From <tensorflow/c/env.h> */
TF_CAPI_EXPORT extern void TF_DefaultThreadOptions(TF_ThreadOptions* options);
=head2 TF_StartThread
=over 2
Returns a new thread that is running work_func and is identified
(for debugging/performance-analysis) by thread_name.
The given param (which may be null) is passed to work_func when the thread
starts. In this way, data may be passed from the thread back to the caller.
Caller takes ownership of the result and must call TF_JoinThread on it
eventually.
=back
/* From <tensorflow/c/env.h> */
TF_CAPI_EXPORT extern TF_Thread* TF_StartThread(const TF_ThreadOptions* options,
const char* thread_name,
void (*work_func)(void*),
void* param);
=head2 TF_JoinThread
=over 2
Waits for the given thread to finish execution, then deletes it.
=back
/* From <tensorflow/c/env.h> */
TF_CAPI_EXPORT extern void TF_JoinThread(TF_Thread* thread);
=head2 TF_LoadSharedLibrary
=over 2
\brief Load a dynamic library.
Pass "library_filename" to a platform-specific mechanism for dynamically
loading a library. The rules for determining the exact location of the
library are platform-specific and are not documented here.
On success, place OK in status and return the newly created library handle.
Otherwise returns nullptr and set error status.
=back
/* From <tensorflow/c/env.h> */
TF_CAPI_EXPORT extern void* TF_LoadSharedLibrary(const char* library_filename,
TF_Status* status);
=head2 TF_GetSymbolFromLibrary
=over 2
\brief Get a pointer to a symbol from a dynamic library.
"handle" should be a pointer returned from a previous call to
TF_LoadLibraryFromEnv. On success, place OK in status and return a pointer to
the located symbol. Otherwise returns nullptr and set error status.
=back
/* From <tensorflow/c/env.h> */
TF_CAPI_EXPORT extern void* TF_GetSymbolFromLibrary(void* handle,
const char* symbol_name,
TF_Status* status);
=head2 TF_Log
=over 2
=back
/* From <tensorflow/c/logging.h> */
TF_CAPI_EXPORT extern void TF_Log(TF_LogLevel level, const char* fmt, ...);
=head2 TF_VLog
=over 2
=back
/* From <tensorflow/c/logging.h> */
TF_CAPI_EXPORT extern void TF_VLog(int level, const char* fmt, ...);
=head2 TF_DVLog
=over 2
=back
/* From <tensorflow/c/logging.h> */
TF_CAPI_EXPORT extern void TF_DVLog(int level, const char* fmt, ...);
=head2 TF_NewKernelBuilder
=over 2
Allocates a new kernel builder and returns a pointer to it.
If non-null, TensorFlow will call create_func when it needs to instantiate
the kernel. The pointer returned by create_func will be passed to
compute_func and delete_func, thereby functioning as a "this" pointer for
referring to kernel instances.
The TF_OpKernelConstruction pointer passed to create_func is owned by
TensorFlow and will be deleted once create_func returns. It must not be used
after this.
When TensorFlow needs to perform a computation with this kernel, it will
call compute_func. This function will receive the pointer returned by
create_func (or null if no create_func was provided), along with the inputs
to the computation.
The TF_OpKernelContext pointer received by compute_func is owned by
TensorFlow and will be deleted once compute_func returns. It must not be used
after this.
Finally, when TensorFlow no longer needs the kernel, it will call
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_KernelBuilder_Label(
TF_KernelBuilder* kernel_builder, const char* label);
=head2 TF_RegisterKernelBuilder
=over 2
Register the given kernel builder with the TensorFlow runtime. If
registration fails, the given status will be populated.
This call takes ownership of the `builder` pointer.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_RegisterKernelBuilder(const char* kernel_name,
TF_KernelBuilder* builder,
TF_Status* status);
=head2 TF_RegisterKernelBuilderWithKernelDef
=over 2
Register the given kernel builder with the TensorFlow runtime. If
registration fails, the given status will be populated.
This method is the same as TF_RegisterKernelBuilder except it takes in a
serialized KernelDef, and uses it for registration, instead of building a new
one. Users can choose to not provide a serialized KernelDef and in that case
it's identical to TF_RegisterKernelBuilder.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_RegisterKernelBuilderWithKernelDef(
const char* serialized_kernel_def, const char* name,
TF_KernelBuilder* builder, TF_Status* status);
=head2 TF_DeleteKernelBuilder
=over 2
Deletes the given TF_KernelBuilder. This should be called only if the kernel
builder is not registered with TensorFlow via TF_RegisterKernelBuilder.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_DeleteKernelBuilder(TF_KernelBuilder* builder);
=head2 TF_GetStream
=over 2
TF_GetStream returns the SP_Stream available in ctx.
This function returns a stream only for devices registered using the
StreamExecutor C API
(tensorflow/c/experimental/stream_executor/stream_executor.h). It will return
nullptr and set error status in all other cases.
Experimental: this function doesn't have compatibility guarantees and subject
to change at any time.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern SP_Stream TF_GetStream(TF_OpKernelContext* ctx,
TF_Status* status);
=head2 TF_NumInputs
=over 2
TF_NumInputs returns the number of inputs available in ctx.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern int TF_NumInputs(TF_OpKernelContext* ctx);
=head2 TF_NumOutputs
=over 2
TF_NumOutputs returns the number of outputs to be placed in *ctx by the
kernel.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern int TF_NumOutputs(TF_OpKernelContext* ctx);
=head2 TF_GetInput
=over 2
Retrieves the ith input from ctx. If TF_GetCode(status) is TF_OK, *tensor is
populated and its ownership is passed to the caller. In any other case,
*tensor is not modified.
If i < 0 or i >= TF_NumInputs(ctx), *status is set to TF_OUT_OF_RANGE.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_GetInput(TF_OpKernelContext* ctx, int i,
TF_Tensor** tensor, TF_Status* status);
=head2 TF_InputRange
=over 2
Retrieves the start and stop indices, given the input name. Equivalent to
OpKernel::InputRange(). `args` will contain the result indices and status.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_InputRange(TF_OpKernelContext* ctx,
const char* name,
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
Returns the default container of the resource manager in OpKernelContext.
The returned TF_StringView's underlying string is owned by the OpKernel and
has the same lifetime as the OpKernel.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern TF_StringView TF_GetResourceMgrDefaultContainerName(
TF_OpKernelContext* ctx);
=head2 TF_GetOpKernelRequestedInput
=over 2
Returns the name of the requested input at `index` from the OpKernel.
The returned TF_StringView's underlying string is owned by the OpKernel and
has the same lifetime as the OpKernel.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern TF_StringView TF_GetOpKernelRequestedInput(
TF_OpKernelContext* ctx, size_t index);
=head2 TF_OpKernelConstruction_GetAttrSize
=over 2
Get the list_size and total_size of the attribute `attr_name` of `oper`.
list_size - the length of the list.
total_size - total size of the list.
(1) If attr_type == TF_ATTR_STRING
then total_size is the cumulative byte size
of all the strings in the list.
(3) If attr_type == TF_ATTR_SHAPE
then total_size is the number of dimensions
of the shape valued attribute, or -1
if its rank is unknown.
(4) If attr_type == TF_ATTR_SHAPE
then total_size is the cumulative number
of dimensions of all shapes in the list.
(5) Otherwise, total_size is undefined.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrSize(
TF_OpKernelConstruction* ctx, const char* attr_name, int32_t* list_size,
int32_t* total_size, TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrType
=over 2
Interprets the named kernel construction attribute as a TF_DataType and
places it into *val. *status is set to TF_OK.
If the attribute could not be found or could not be interpreted as
TF_DataType, *status is populated with an error.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrType(
TF_OpKernelConstruction* ctx, const char* attr_name, TF_DataType* val,
TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrInt32
=over 2
Interprets the named kernel construction attribute as int32_t and
places it into *val. *status is set to TF_OK.
If the attribute could not be found or could not be interpreted as
int32, *status is populated with an error.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrInt32(
TF_OpKernelConstruction* ctx, const char* attr_name, int32_t* val,
TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrInt64
=over 2
Interprets the named kernel construction attribute as int64_t and
places it into *val. *status is set to TF_OK.
If the attribute could not be found or could not be interpreted as
int64, *status is populated with an error.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrInt64(
TF_OpKernelConstruction* ctx, const char* attr_name, int64_t* val,
TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrFloat
=over 2
Interprets the named kernel construction attribute as float and
places it into *val. *status is set to TF_OK.
If the attribute could not be found or could not be interpreted as
float, *status is populated with an error.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrFloat(
TF_OpKernelConstruction* ctx, const char* attr_name, float* val,
TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrBool
=over 2
Interprets the named kernel construction attribute as bool and
places it into *val. *status is set to TF_OK.
If the attribute could not be found or could not be interpreted as
bool, *status is populated with an error.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrBool(
TF_OpKernelConstruction* ctx, const char* attr_name, TF_Bool* val,
TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrString
=over 2
Interprets the named kernel construction attribute as string and
places it into *val. `val` must
point to an array of length at least `max_length` (ideally set to
total_size from TF_OpKernelConstruction_GetAttrSize(ctx,
attr_name, list_size, total_size)). *status is set to TF_OK.
If the attribute could not be found or could not be interpreted as
string, *status is populated with an error.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrString(
TF_OpKernelConstruction* ctx, const char* attr_name, char* val,
size_t max_length, TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrTensor
=over 2
Interprets the named kernel construction attribute as tensor and places it
into *val. Allocates a new TF_Tensor which the caller is expected to take
ownership of (and can deallocate using TF_DeleteTensor). *status is set to
TF_OK.
If the attribute could not be found or could not be interpreted as
tensor, *status is populated with an error.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrTensor(
TF_OpKernelConstruction* ctx, const char* attr_name, TF_Tensor** val,
TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrTypeList
=over 2
Interprets the named kernel construction attribute as a TF_DataType array and
places it into *vals. *status is set to TF_OK.
`vals` must point to an array of length at least `max_values` (ideally set
to list_size from
TF_OpKernelConstruction_GetAttrSize(ctx, attr_name, list_size,
total_size)).
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrTypeList(
TF_OpKernelConstruction* ctx, const char* attr_name, TF_DataType* vals,
int max_vals, TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrInt32List
=over 2
Interprets the named kernel construction attribute as int32_t array and
places it into *vals. *status is set to TF_OK.
`vals` must point to an array of length at least `max_values` (ideally set
to list_size from
TF_OpKernelConstruction_GetAttrSize(ctx, attr_name, list_size,
total_size)).
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrInt32List(
TF_OpKernelConstruction* ctx, const char* attr_name, int32_t* vals,
int max_vals, TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrInt64List
=over 2
Interprets the named kernel construction attribute as int64_t array and
places it into *vals. *status is set to TF_OK.
`vals` must point to an array of length at least `max_values` (ideally set
to list_size from
TF_OpKernelConstruction_GetAttrSize(ctx, attr_name, list_size,
total_size)).
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrInt64List(
TF_OpKernelConstruction* ctx, const char* attr_name, int64_t* vals,
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
Allocates Tensor for output at given index. Caller takes ownership of
returned TF_Tensor and should deallocate it using TF_DeleteTensor(tensor).
This function should be used to allocate outputs inside kernel
compute function.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT TF_Tensor* TF_AllocateOutput(TF_OpKernelContext* context,
int index, TF_DataType dtype,
const int64_t* dims, int num_dims,
size_t len, TF_Status* status);
=head2 TF_ForwardInputOrAllocateOutput
=over 2
Tries to forward one of the inputs given in input_indices to
output[output_index]. If none of the given inputs can be forwarded, calls
allocate_output() to allocate a new output buffer. The index of the
forwarded input will be assign to output argument forwarded_input (if it's
not nullptr). If no inputs are forwarded, forwarded_input will be assigned
-1.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT TF_Tensor* TF_ForwardInputOrAllocateOutput(
TF_OpKernelContext* context, const int* candidate_input_indices,
int num_candidate_input_indices, int output_index,
const int64_t* output_dims, int output_num_dims, int* forwarded_input,
TF_Status* status);
=head2 TF_AllocateTemp
=over 2
Allocates a temporary Tensor of the specified type and shape. The
Tensor must not be used after kernel construction is
complete.
num_dims must equal the size of array dims
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern TF_Tensor* TF_AllocateTemp(
TF_OpKernelContext* context, TF_DataType dtype, const int64_t* dims,
int num_dims, TF_AllocatorAttributes* alloc_attrs, TF_Status* status);
=head2 TF_AssignVariable
=over 2
Expose higher level Assignment operation for Pluggable vendors to implement
in the plugin for Training. The API takes in the context with indices for
the input and value tensors. It also accepts the copy callback provided by
pluggable vendor to do the copying of the tensors. The caller takes ownership
of the `source` and `dest` tensors and is responsible for freeing them with
TF_DeleteTensor. This function will return an error when the following
conditions are met:
1. `validate_shape` is set to `true`
2. The variable is initialized
3. The shape of the value tensor doesn't match the shape of the variable
tensor.
=back
/* From <tensorflow/c/kernels_experimental.h> */
TF_CAPI_EXPORT extern void TF_AssignVariable(
TF_OpKernelContext* ctx, int input_index, int value_index,
bool validate_shape,
void (*copyFunc)(TF_OpKernelContext* ctx, TF_Tensor* source,
TF_Tensor* dest),
TF_Status* status);
=head2 TF_AssignRefVariable
=over 2
Expose higher level Assignment operation for Pluggable vendors to implement
in the plugin for Training on ref variables. The API takes in the context
with indices for the input and value tensors. It also accepts the copy
callback provided by pluggable vendor to do the copying of the tensors. The
caller takes ownership of the `source` and `dest` tensors and is responsible
for freeing them with TF_DeleteTensor.
=back
/* From <tensorflow/c/kernels_experimental.h> */
TF_CAPI_EXPORT extern void TF_AssignRefVariable(
TF_OpKernelContext* ctx, int input_ref_index, int output_ref_index,
int value_index, bool use_locking, bool validate_shape,
void (*copyFunc)(TF_OpKernelContext* ctx, TF_Tensor* source,
TF_Tensor* dest),
TF_Status* status);
=head2 TF_AssignUpdateVariable
=over 2
Expose higher level AssignUpdate operation for Pluggable vendors to implement
in the plugin for Training. The API takes in the context with indices for the
input and value tensors. It also accepts the copy callback provided by
pluggable vendor to do the copying of the tensors and the update callback to
apply the arithmetic operation. The caller takes ownership of the `source`,
`dest`, `tensor` and `value` tensors and is responsible for freeing them with
TF_DeleteTensor.
=back
/* From <tensorflow/c/kernels_experimental.h> */
TF_CAPI_EXPORT extern void TF_AssignUpdateVariable(
TF_OpKernelContext* ctx, int input_index, int value_index, int Op,
int isVariantType,
void (*copyFunc)(TF_OpKernelContext* ctx, TF_Tensor* source,
TF_Tensor* dest),
void (*updateFunc)(TF_OpKernelContext* ctx, TF_Tensor* tensor,
TF_Tensor* value, int Op),
TF_Status* status);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_Status* status);
=head2 TF_AddNVariant
=over 2
Expose higher level AddN operation for Pluggable vendors to implement
in the plugin for Variant data types. The API takes in the context and a
callback provided by pluggable vendor to do a Binary Add operation on the
tensors unwrapped from the Variant tensors. The caller takes ownership of the
`a`, `b` and `out` tensors and is responsible for freeing them with
TF_DeleteTensor.
=back
/* From <tensorflow/c/kernels_experimental.h> */
TF_CAPI_EXPORT extern void TF_AddNVariant(
TF_OpKernelContext* ctx,
void (*binary_add_func)(TF_OpKernelContext* ctx, TF_Tensor* a, TF_Tensor* b,
TF_Tensor* out),
TF_Status* status);
=head2 TF_ZerosLikeVariant
=over 2
Expose higher level ZerosLike operation for Pluggable vendors to implement
in the plugin for Variant data types. The API takes in the context and a
callback provided by pluggable vendor to do a ZerosLike operation on the
tensors unwrapped from the Variant tensors. The caller takes ownership of the
`input` and `out` tensors and is responsible for freeing them with
TF_DeleteTensor.
=back
/* From <tensorflow/c/kernels_experimental.h> */
TF_CAPI_EXPORT extern void TF_ZerosLikeVariant(
TF_OpKernelContext* ctx,
void (*zeros_like_func)(TF_OpKernelContext* ctx, TF_Tensor* input,
TF_Tensor* out),
TF_Status* status);
=head2 TFE_NewContextOptions
=over 2
Return a new options object.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern TFE_ContextOptions* TFE_NewContextOptions(void);
=head2 TFE_ContextOptionsSetConfig
=over 2
Set the config in TF_ContextOptions.options.
config should be a serialized tensorflow.ConfigProto proto.
If config was not parsed successfully as a ConfigProto, record the
error information in *status.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern void TFE_ContextOptionsSetConfig(
TFE_ContextOptions* options, const void* proto, size_t proto_len,
TF_Status* status);
=head2 TFE_ContextOptionsSetAsync
=over 2
Sets the default execution mode (sync/async). Note that this can be
overridden per thread using TFE_ContextSetExecutorForThread.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern void TFE_ContextOptionsSetAsync(TFE_ContextOptions*,
unsigned char enable);
=head2 TFE_ContextOptionsSetDevicePlacementPolicy
=over 2
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern void TFE_ContextOptionsSetDevicePlacementPolicy(
TFE_ContextOptions*, TFE_ContextDevicePlacementPolicy);
=head2 TFE_DeleteContextOptions
=over 2
Destroy an options object.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern void TFE_DeleteContextOptions(TFE_ContextOptions*);
=head2 TFE_NewContext
=over 2
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern TFE_Context* TFE_NewContext(
const TFE_ContextOptions* opts, TF_Status* status);
=head2 TFE_DeleteContext
=over 2
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern void TFE_DeleteContext(TFE_Context* ctx);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=head2 TFE_TensorHandleNumElements
=over 2
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern int64_t TFE_TensorHandleNumElements(TFE_TensorHandle* h,
TF_Status* status);
=head2 TFE_TensorHandleDim
=over 2
This function will block till the operation that produces `h` has completed.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern int64_t TFE_TensorHandleDim(TFE_TensorHandle* h,
int dim_index,
TF_Status* status);
=head2 TFE_TensorHandleDeviceName
=over 2
Returns the device of the operation that produced `h`. If `h` was produced by
a copy, returns the destination device of the copy. Note that the returned
device name is not always the device holding the tensor handle's memory. If
you want the latter, use TFE_TensorHandleBackingDeviceName. This function
will block till the operation that produces `h` has completed.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern const char* TFE_TensorHandleDeviceName(
TFE_TensorHandle* h, TF_Status* status);
=head2 TFE_TensorHandleBackingDeviceName
=over 2
Returns the name of the device in whose memory `h` resides.
This function will block till the operation that produces `h` has completed.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern const char* TFE_TensorHandleBackingDeviceName(
TFE_TensorHandle* h, TF_Status* status);
=head2 TFE_TensorHandleCopySharingTensor
=over 2
Return a pointer to a new TFE_TensorHandle that shares the underlying tensor
with `h`. On success, `status` is set to OK. On failure, `status` reflects
the error and a nullptr is returned.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_TensorHandleCopySharingTensor(
TFE_TensorHandle* h, TF_Status* status);
=head2 TFE_TensorHandleResolve
=over 2
This function will block till the operation that produces `h` has
completed. The memory returned might alias the internal memory used by
TensorFlow. Hence, callers should not mutate this memory (for example by
modifying the memory region pointed to by TF_TensorData() on the returned
TF_Tensor).
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern TF_Tensor* TFE_TensorHandleResolve(TFE_TensorHandle* h,
TF_Status* status);
=head2 TFE_TensorHandleCopyToDevice
=over 2
Create a new TFE_TensorHandle with the same contents as 'h' but placed
in the memory of the device name 'device_name'.
If source and destination are the same device, then this creates a new handle
that shares the underlying buffer. Otherwise, it currently requires at least
one of the source or destination devices to be CPU (i.e., for the source or
destination tensor to be placed in host memory).
If async execution is enabled, the copy may be enqueued and the call will
return "non-ready" handle. Else, this function returns after the copy has
been done.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_TensorHandleCopyToDevice(
TFE_TensorHandle* h, TFE_Context* ctx, const char* device_name,
TF_Status* status);
=head2 TFE_TensorHandleTensorDebugInfo
=over 2
Retrieves TFE_TensorDebugInfo for `handle`.
If TFE_TensorHandleTensorDebugInfo succeeds, `status` is set to OK and caller
is responsible for deleting returned TFE_TensorDebugInfo.
If TFE_TensorHandleTensorDebugInfo fails, `status` is set to appropriate
error and nullptr is returned. This function can block till the operation
that produces `handle` has completed.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern TFE_TensorDebugInfo* TFE_TensorHandleTensorDebugInfo(
TFE_TensorHandle* h, TF_Status* status);
=head2 TFE_DeleteTensorDebugInfo
=over 2
Deletes `debug_info`.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern void TFE_DeleteTensorDebugInfo(
TFE_TensorDebugInfo* debug_info);
=head2 TFE_TensorDebugInfoOnDeviceNumDims
=over 2
Returns the number of dimensions used to represent the tensor on its device.
The number of dimensions used to represent the tensor on device can be
different from the number returned by TFE_TensorHandleNumDims.
The return value was current at the time of TFE_TensorDebugInfo creation.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern int TFE_TensorDebugInfoOnDeviceNumDims(
TFE_TensorDebugInfo* debug_info);
=head2 TFE_TensorDebugInfoOnDeviceDim
=over 2
Returns the number of elements in dimension `dim_index`.
Tensor representation on device can be transposed from its representation
on host. The data contained in dimension `dim_index` on device
can correspond to the data contained in another dimension in on-host
representation. The dimensions are indexed using the standard TensorFlow
major-to-minor order (slowest varying dimension first),
not the XLA's minor-to-major order.
On-device dimensions can be padded. TFE_TensorDebugInfoOnDeviceDim returns
the number of elements in a dimension after padding.
The return value was current at the time of TFE_TensorDebugInfo creation.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern int64_t TFE_TensorDebugInfoOnDeviceDim(
TFE_TensorDebugInfo* debug_info, int dim_index);
=head2 TFE_NewOp
=over 2
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=head2 TFE_OpSetAttrShapeList
=over 2
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern void TFE_OpSetAttrShapeList(
TFE_Op* op, const char* attr_name, const int64_t** dims,
const int* num_dims, int num_values, TF_Status* out_status);
=head2 TFE_OpSetAttrFunctionList
=over 2
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern void TFE_OpSetAttrFunctionList(TFE_Op* op,
const char* attr_name,
const TFE_Op** value,
int num_values);
=head2 TFE_OpGetInputLength
=over 2
Returns the length (number of tensors) of the input argument `input_name`
found in the provided `op`.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern int TFE_OpGetInputLength(TFE_Op* op,
const char* input_name,
TF_Status* status);
=head2 TFE_OpGetOutputLength
=over 2
Returns the length (number of tensors) of the output argument `output_name`
found in the provided `op`.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern int TFE_OpGetOutputLength(TFE_Op* op,
const char* output_name,
TF_Status* status);
=head2 TFE_Execute
=over 2
Execute the operation defined by 'op' and return handles to computed
tensors in `retvals`.
'retvals' must point to a pre-allocated array of TFE_TensorHandle* and
'*num_retvals' should be set to the size of this array. It is an error if
the size of 'retvals' is less than the number of outputs. This call sets
*num_retvals to the number of outputs.
If async execution is enabled, the call may simply enqueue the execution
and return "non-ready" handles in `retvals`. Note that any handles contained
in 'op' should not be mutated till the kernel execution actually finishes.
For sync execution, if any of the inputs to `op` are not ready, this call
will block till they become ready and then return when the kernel execution
is done.
TODO(agarwal): change num_retvals to int from int*.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern void TFE_Execute(TFE_Op* op, TFE_TensorHandle** retvals,
int* num_retvals, TF_Status* status);
=head2 TFE_ContextAddFunctionDef
=over 2
Add a function (serialized FunctionDef protocol buffer) to ctx so
that it can be invoked using TFE_Execute.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern void TFE_ContextAddFunctionDef(
TFE_Context* ctx, const char* serialized_function_def, size_t size,
TF_Status* status);
=head2 TFE_ContextAddFunction
=over 2
Adds a function (created from TF_GraphToFunction or
TF_FunctionImportFunctionDef) to the context, allowing it to be executed with
TFE_Execute by creating an op with the same name as the function.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern void TFE_ContextAddFunction(TFE_Context* ctx,
TF_Function* function,
TF_Status* status);
=head2 TFE_ContextRemoveFunction
=over 2
Removes a function from the context. Once removed, you can no longer
TFE_Execute it or TFE_Execute any TFE_Op which has it as an attribute or any
other function which calls it as an attribute.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern void TFE_ContextRemoveFunction(TFE_Context* ctx,
const char* name,
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_OpReset(TFE_Op* op_to_reset,
const char* op_or_function_name,
const char* raw_device_name,
TF_Status* status);
=head2 TFE_ContextEnableGraphCollection
=over 2
Enables only graph collection in RunMetadata on the functions executed from
this context.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_ContextEnableGraphCollection(TFE_Context* ctx);
=head2 TFE_ContextDisableGraphCollection
=over 2
Disables only graph collection in RunMetadata on the functions executed from
this context.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_ContextDisableGraphCollection(TFE_Context* ctx);
=head2 TFE_MonitoringCounterCellIncrementBy
=over 2
Atomically increments the value of the cell. The value must be non-negative.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_MonitoringCounterCellIncrementBy(
TFE_MonitoringCounterCell* cell, int64_t value);
=head2 TFE_MonitoringCounterCellValue
=over 2
Retrieves the current value of the cell.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern int64_t TFE_MonitoringCounterCellValue(
TFE_MonitoringCounterCell* cell);
=head2 TFE_MonitoringNewCounter0
=over 2
Returns a new Counter metric object. The caller should manage lifetime of
the object. Using duplicate metric name will crash the program with fatal
error.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern TFE_MonitoringCounter0* TFE_MonitoringNewCounter0(
const char* name, TF_Status* status, const char* description);
=head2 TFE_MonitoringDeleteCounter0
=over 2
Deletes the Counter object.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteCounter0(
TFE_MonitoringCounter0* counter);
=head2 TFE_MonitoringGetCellCounter0
=over 2
Retrieves the cell from the Counter object. The Counter object will manage
lifetime of the cell.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern TFE_MonitoringCounterCell* TFE_MonitoringGetCellCounter0(
TFE_MonitoringCounter0* counter);
=head2 TFE_MonitoringNewCounter1
=over 2
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern TFE_MonitoringCounter1* TFE_MonitoringNewCounter1(
const char* name, TF_Status* status, const char* description,
const char* label1);
=head2 TFE_MonitoringDeleteCounter1
=over 2
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteCounter1(
TFE_MonitoringCounter1* counter);
=head2 TFE_MonitoringGetCellCounter1
=over 2
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
maximum number of in flight async nodes. Enqueuing of additional async ops
after the limit is reached blocks until some inflight nodes finishes.
The effect is bounding the memory held by inflight TensorHandles that are
referenced by the inflight nodes.
A recommended value has not been established.
A value of 0 removes the limit, which is the behavior of TensorFlow 2.11.
When is_async is false, the value is ignored.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern TFE_Executor* TFE_NewExecutor(
bool is_async, bool enable_streaming_enqueue, int in_flight_nodes_limit);
=head2 TFE_DeleteExecutor
=over 2
Deletes the eager Executor without waiting for enqueued nodes. Please call
TFE_ExecutorWaitForAllPendingNodes before calling this API if you want to
make sure all nodes are finished.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_DeleteExecutor(TFE_Executor*);
=head2 TFE_ExecutorIsAsync
=over 2
Returns true if the executor is in async mode.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern bool TFE_ExecutorIsAsync(TFE_Executor*);
=head2 TFE_ExecutorWaitForAllPendingNodes
=over 2
Causes the calling thread to block till all ops dispatched in this executor
have been executed. Note that "execution" here refers to kernel execution /
scheduling of copies, etc. Similar to sync execution, it doesn't guarantee
that lower level device queues (like GPU streams) have been flushed.
This call may not block for execution of ops enqueued concurrently with this
call.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_ExecutorWaitForAllPendingNodes(
TFE_Executor*, TF_Status* status);
=head2 TFE_ExecutorClearError
=over 2
When an error happens, any pending operations are discarded, and newly issued
ops return an error. This call clears the error state and re-enables
execution of newly issued ops.
Note that outputs of discarded ops remain in a corrupt state and should not
be used for future calls.
TODO(agarwal): mark the affected handles and raise errors if they are used.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_ExecutorClearError(TFE_Executor*);
=head2 TFE_ContextSetExecutorForThread
=over 2
Sets a custom Executor for the current thread. All nodes created by this
thread will be added to this Executor. It will override the current executor.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_ContextSetExecutorForThread(TFE_Context*,
TFE_Executor*);
=head2 TFE_ContextGetExecutorForThread
=over 2
Returns the Executor for the current thread.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern TFE_Executor* TFE_ContextGetExecutorForThread(
TFE_Context*);
=head2 TFE_ContextUpdateServerDef
=over 2
Update an existing context with a new set of servers defined in a ServerDef
proto. Servers can be added to and removed from the list of remote workers
in the context. A New set of servers identified by the ServerDef must be up
when the context is updated.
This API is for experimental usage and may be subject to change.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_ContextUpdateServerDef(TFE_Context* ctx,
int keep_alive_secs,
const void* proto,
size_t proto_len,
TF_Status* status);
=head2 TFE_ContextCheckAlive
=over 2
Checks whether a remote worker is alive or not. This will return true even if
the context doesn't exist on the remote worker.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern bool TFE_ContextCheckAlive(TFE_Context* ctx,
const char* worker_name,
TF_Status* status);
=head2 TFE_ContextAsyncWait
=over 2
Sync pending nodes in local executors (including the context default executor
and thread executors) and streaming requests to remote executors, and get the
combined status.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_ContextAsyncWait(TFE_Context* ctx,
TF_Status* status);
=head2 TFE_TensorHandleDevicePointer
=over 2
This function will block till the operation that produces `h` has
completed. This is only valid on local TFE_TensorHandles. The pointer
returned will be on the device in which the TFE_TensorHandle resides (so e.g.
for a GPU tensor this will return a pointer to GPU memory). The pointer is
only guaranteed to be valid until TFE_DeleteTensorHandle is called on this
TensorHandle. Only supports POD data types.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void* TFE_TensorHandleDevicePointer(TFE_TensorHandle*,
TF_Status*);
=head2 TFE_TensorHandleDeviceMemorySize
=over 2
This function will block till the operation that produces `h` has
completed. This is only valid on local TFE_TensorHandles. Returns the size in
bytes of the memory pointed to by the device pointer returned above.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern size_t TFE_TensorHandleDeviceMemorySize(TFE_TensorHandle*,
TF_Status*);
=head2 TFE_NewTensorHandleFromDeviceMemory
=over 2
Creates a new TensorHandle from memory residing in the physical device
device_name. Takes ownership of the memory, and will call deleter to release
it after TF no longer needs it or in case of error.
Custom devices must use TFE_NewCustomDeviceTensorHandle instead.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_NewTensorHandleFromDeviceMemory(
TFE_Context* ctx, const char* device_name, TF_DataType, const int64_t* dims,
int num_dims, void* data, size_t len,
void (*deallocator)(void* data, size_t len, void* arg),
void* deallocator_arg, TF_Status* status);
=head2 TFE_HostAddressSpace
=over 2
Retrieves the address space (i.e. job, replia, task) of the local host and
saves it in the buffer.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_HostAddressSpace(TFE_Context* ctx,
TF_Buffer* buf);
=head2 TFE_OpGetAttrs
=over 2
Fetch a reference to `op`'s attributes. The returned reference is only valid
while `op` is alive.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern const TFE_OpAttrs* TFE_OpGetAttrs(const TFE_Op* op);
=head2 TFE_OpAddAttrs
=over 2
Add attributes in `attrs` to `op`.
Does not overwrite or update existing attributes, but adds new ones.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_OpAddAttrs(TFE_Op* op, const TFE_OpAttrs* attrs);
=head2 TFE_OpAttrsSerialize
=over 2
Serialize `attrs` as a tensorflow::NameAttrList protocol buffer (into `buf`),
containing the op name and a map of its attributes.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
Registers a custom device for use with eager execution.
Eager operations may be placed on this device, e.g. `with
tf.device("CUSTOM"):` from Python if `device_name` for this call is
"/job:localhost/replica:0/task:0/device:CUSTOM:0".
The custom device defines copy operations for moving TensorHandles on and
off, and an execution operation for named operations. Often execution will
simply wrap op execution on one or more physical devices.
device_info is an opaque caller-defined type stored with the custom device
which is passed to the functions referenced in the TFE_CustomDevice struct
`device` (execute, delete_device, etc.). It can for example contain the
names of wrapped devices.
There are currently no graph semantics implemented for registered custom
devices, so executing tf.functions which contain operations placed on the
custom devices will fail.
`device_name` must not name an existing physical or custom device. It must
follow the format:
/job:<name>/replica:<replica>/task:<task>/device:<type>:<device_num>
If the device is successfully registered, `status` is set to TF_OK. Otherwise
the device is not usable. In case of a bad status, `device.delete_device` is
still called on `device_info` (i.e. the caller does not retain ownership).
This API is highly experimental, and in particular is expected to change when
it starts supporting operations with attributes and when tf.function support
is added.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_RegisterCustomDevice(TFE_Context* ctx,
TFE_CustomDevice device,
const char* device_name,
void* device_info,
TF_Status* status);
=head2 TFE_IsCustomDevice
=over 2
Returns whether `device_name` maps to a registered custom device.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern bool TFE_IsCustomDevice(TFE_Context* ctx,
const char* device_name);
=head2 TFE_NewCustomDeviceTensorHandle
=over 2
Creates a new TensorHandle from memory residing in a custom device. Takes
ownership of the memory pointed to by `tensor_handle_data`, and calls
`methods.deallocator` to release it after TF no longer needs it or in case of
an error.
This call is similar to `TFE_NewTensorHandleFromDeviceMemory`, but supports
custom devices instead of physical devices and does not require blocking
waiting for exact shapes.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_NewCustomDeviceTensorHandle(
TFE_Context*, const char* device_name, TF_DataType, void* data,
TFE_CustomDeviceTensorHandle methods, TF_Status* status);
=head2 TFE_ContextGetFunctionDef
=over 2
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_ContextGetFunctionDef(TFE_Context* ctx,
const char* function_name,
TF_Buffer* buf,
TF_Status* status);
=head2 TFE_AllocateHostTensor
=over 2
Allocate and return a new Tensor on the host.
The caller must set the Tensor values by writing them to the pointer returned
by TF_TensorData with length TF_TensorByteSize.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern TF_Tensor* TFE_AllocateHostTensor(TFE_Context* ctx,
TF_DataType dtype,
const int64_t* dims,
int num_dims,
TF_Status* status);
=head2 TFE_NewTensorHandleFromTensor
=over 2
Given a Tensor, wrap it with a TensorHandle
Similar to TFE_NewTensorHandle, but includes a pointer to the TFE_Context.
The context should be identical to that of the Tensor.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT TFE_TensorHandle* TFE_NewTensorHandleFromTensor(
TFE_Context* ctx, TF_Tensor* t, TF_Status* status);
=head2 TFE_CreatePackedTensorHandle
=over 2
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT void TFE_ContextSetLogDevicePlacement(TFE_Context* ctx,
unsigned char enable,
TF_Status* status);
=head2 TFE_ContextSetRunEagerOpAsFunction
=over 2
Enables running eager ops as function.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT void TFE_ContextSetRunEagerOpAsFunction(TFE_Context* ctx,
unsigned char enable,
TF_Status* status);
=head2 TFE_ContextSetJitCompileRewrite
=over 2
Enables rewrite jit_compile functions.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT void TFE_ContextSetJitCompileRewrite(TFE_Context* ctx,
unsigned char enable,
TF_Status* status);
=head2 TFE_TensorHandleDeviceType
=over 2
Returns the device type of the operation that produced `h`.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern const char* TFE_TensorHandleDeviceType(
TFE_TensorHandle* h, TF_Status* status);
=head2 TFE_TensorHandleDeviceID
=over 2
Returns the device ID of the operation that produced `h`.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern int TFE_TensorHandleDeviceID(TFE_TensorHandle* h,
TF_Status* status);
=head2 TFE_TensorHandleGetStatus
=over 2
Returns the status for the tensor handle. In TFRT, a tensor handle can carry
error info if error happens. If so, the status will be set with the error
info. If not, status will be set as OK.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_TensorHandleGetStatus(TFE_TensorHandle* h,
TF_Status* status);
=head2 TFE_GetExecutedOpNames
=over 2
Get a comma-separated list of op names executed in graph functions dispatched
to `ctx`. This feature is currently only enabled for TFRT debug builds, for
performance and simplicity reasons.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_GetExecutedOpNames(TFE_Context* ctx,
TF_Buffer* buf,
TF_Status* status);
=head2 TFE_SetLogicalCpuDevices
=over 2
Set logical devices to the context's device manager.
If logical devices are already configured at context initialization
through TFE_ContextOptions, this method should not be called.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_SetLogicalCpuDevices(TFE_Context* ctx,
int num_cpus,
const char* prefix,
TF_Status* status);
=head2 TFE_InsertConfigKeyValue
=over 2
Set configuration key and value using coordination service.
If coordination service is enabled, the key-value will be stored on the
leader and become accessible to all workers in the cluster.
Currently, a config key can only be set with one value, and subsequently
setting the same key will lead to errors.
Note that the key-values are only expected to be used for cluster
configuration data, and should not be used for storing a large amount of data
or being accessed very frequently.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_InsertConfigKeyValue(TFE_Context* ctx,
const char* key,
const char* value,
TF_Status* status);
=head2 TFE_GetConfigKeyValue
=over 2
Get configuration key and value using coordination service.
The config key must be set before getting its value. Getting value of
non-existing config keys will result in errors.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_GetConfigKeyValue(TFE_Context* ctx,
const char* key,
TF_Buffer* value_buf,
TF_Status* status);
=head2 TFE_DeleteConfigKeyValue
=over 2
Delete configuration key-value. If `key` is a directory, recursively clean up
all key-values under the path specified by `key`.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_DeleteConfigKeyValue(TFE_Context* ctx,
const char* key,
TF_Status* status);
=head2 TFE_ReportErrorToCluster
=over 2
Report error (specified by error_code and error_message) to other tasks in
the cluster.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_ReportErrorToCluster(TFE_Context* ctx,
int error_code,
const char* error_message,
TF_Status* status);
=head2 TFE_GetTaskStates
=over 2
Get task states from the Coordination Service.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_GetTaskStates(TFE_Context* ctx,
const TF_Buffer& tasks,
void* states, TF_Status* status);
=head2 TFE_WaitAtBarrier
=over 2
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_WaitAtBarrier(TFE_Context* ctx,
const char* barrier_id,
int64_t barrier_timeout_in_ms,
TF_Status* status);
=head2 TF_GetNodesToPreserveListSize
=over 2
Get a set of node names that must be preserved. They can not be transformed
or removed during the graph transformation. This includes feed and fetch
nodes, keep_ops, init_ops. Fills in `num_values` and `storage_size`, they
will be used in `TF_GetNodesToPreserveList`.
=back
/* From <tensorflow/c/experimental/grappler/grappler.h> */
TF_CAPI_EXPORT extern void TF_GetNodesToPreserveListSize(
const TF_GrapplerItem* item, int* num_values, size_t* storage_size,
TF_Status* status);
=head2 TF_GetNodesToPreserveList
=over 2
Get a set of node names that must be preserved. They can not be transformed
or removed during the graph transformation. This includes feed and fetch
nodes, keep_ops, init_ops. Fills in `values` and `lengths`, each of which
must point to an array of length at least `num_values`.
The elements of values will point to addresses in `storage` which must be at
least `storage_size` bytes in length. `num_values` and `storage` can be
obtained from TF_GetNodesToPreserveSize
Fails if storage_size is too small to hold the requested number of strings.
=back
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=back
/* From <tensorflow/c/experimental/saved_model/public/tensor_spec.h> */
TF_CAPI_EXPORT extern TF_DataType TF_TensorSpecDataType(
const TF_TensorSpec* spec);
=head2 TF_TensorSpecShape
=over 2
Returns the shape associated with the TensorSpec. The returned Shape is not
owned by the caller. Caller must not call TF_DeleteShape on the returned
shape.
=back
/* From <tensorflow/c/experimental/saved_model/public/tensor_spec.h> */
TF_CAPI_EXPORT extern const TF_Shape* TF_TensorSpecShape(
const TF_TensorSpec* spec);
=head2 TF_InitPlugin
=over 2
/// Initializes a TensorFlow plugin.
///
/// Must be implemented by the plugin DSO. It is called by TensorFlow runtime.
///
/// Filesystem plugins can be loaded on demand by users via
/// `Env::LoadLibrary` or during TensorFlow's startup if they are on certain
/// paths (although this has a security risk if two plugins register for the
/// same filesystem and the malicious one loads before the legimitate one -
/// but we consider this to be something that users should care about and
/// manage themselves). In both of these cases, core TensorFlow looks for
/// the `TF_InitPlugin` symbol and calls this function.
///
/// For every filesystem URI scheme that this plugin supports, the plugin must
/// add one `TF_FilesystemPluginInfo` entry in `plugin_info->ops` and call
/// `TF_SetFilesystemVersionMetadata` for that entry.
///
/// Plugins must also initialize `plugin_info->plugin_memory_allocate` and
/// `plugin_info->plugin_memory_free` to ensure memory allocated by plugin is
/// freed in a compatible way.
=back
/* From <tensorflow/c/experimental/filesystem/filesystem_interface.h> */
TF_CAPI_EXPORT extern void TF_InitPlugin(TF_FilesystemPluginInfo* plugin_info);
=head2 TF_LoadSavedModel
=over 2
Load a SavedModel from `dirname`. We expect the SavedModel to contain a
single Metagraph (as for those exported from TF2's `tf.saved_model.save`).
Params:
dirname - A directory filepath that the SavedModel is at.
ctx - A TFE_Context containing optional load/TF runtime options.
`ctx` must outlive the returned TF_SavedModel pointer.
status - Set to OK on success and an appropriate error on failure.
Returns:
If status is not OK, returns nullptr. Otherwise, returns a newly created
TF_SavedModel instance. It must be deleted by calling TF_DeleteSavedModel.
=back
/* From <tensorflow/c/experimental/saved_model/public/saved_model_api.h> */
TF_CAPI_EXPORT extern TF_SavedModel* TF_LoadSavedModel(const char* dirname,
TFE_Context* ctx,
TF_Status* status);
=head2 TF_LoadSavedModelWithTags
=over 2
Load a SavedModel from `dirname`.
Params:
dirname - A directory filepath that the SavedModel is at.
ctx - A TFE_Context containing optional load/TF runtime options.
`ctx` must outlive the returned TF_SavedModel pointer.
tags - char* array of SavedModel tags. We will load the metagraph matching
the tags.
tags_len - number of elements in the `tags` array.
status - Set to OK on success and an appropriate error on failure.
Returns:
If status is not OK, returns nullptr. Otherwise, returns a newly created
TF_SavedModel instance. It must be deleted by calling TF_DeleteSavedModel.
=back
/* From <tensorflow/c/experimental/saved_model/public/saved_model_api.h> */
TF_CAPI_EXPORT extern TF_SavedModel* TF_LoadSavedModelWithTags(
const char* dirname, TFE_Context* ctx, const char* const* tags,
int tags_len, TF_Status* status);
=head2 TF_DeleteSavedModel
=over 2
Deletes a TF_SavedModel, and frees any resources owned by it.
=back
/* From <tensorflow/c/experimental/saved_model/public/saved_model_api.h> */
TF_CAPI_EXPORT extern void TF_DeleteSavedModel(TF_SavedModel* model);
=head2 TF_GetSavedModelConcreteFunction
=over 2
Retrieve a function from the TF2 SavedModel via function path.
Params:
model - The TF2 SavedModel to load a function from.
function_path - A string containing the path from the root saved python
object to a tf.function method.
TODO(bmzhao): Add a detailed example of this with a
python tf.module before moving this out of experimental.
status - Set to OK on success and an appropriate error on failure.
Returns:
If status is not OK, returns nullptr. Otherwise, returns a
TF_ConcreteFunction instance. The lifetime of this instance is
"conceptually" bound to `model`. Once `model` is deleted, all
`TF_ConcreteFunctions` retrieved from it are invalid, and have been deleted.
=back
/* From <tensorflow/c/experimental/saved_model/public/saved_model_api.h> */
TF_CAPI_EXPORT extern TF_ConcreteFunction* TF_GetSavedModelConcreteFunction(
TF_SavedModel* model, const char* function_path, TF_Status* status);
=head2 TF_GetSavedModelSignatureDefFunction
=over 2
Retrieve a function from the TF SavedModel via a SignatureDef key.
Params:
model - The SavedModel to load a function from.
signature_def_key - The string key of the SignatureDef map of a SavedModel:
https://github.com/tensorflow/tensorflow/blob/69b08900b1e991d84bce31f3b404f5ed768f339f/tensorflow/core/protobuf/meta_graph.proto#L89
status - Set to OK on success and an appropriate error on failure.
Returns:
If status is not OK, returns nullptr. Otherwise, returns a
TF_SignatureDefFunction instance. Once `model` is deleted, all
`TF_SignatureDefFunctions` retrieved from it are invalid, and have been
deleted.
=back
/* From <tensorflow/c/experimental/saved_model/public/saved_model_api.h> */
TF_CAPI_EXPORT extern TF_SignatureDefFunction*
TF_GetSavedModelSignatureDefFunction(TF_SavedModel* model,
const char* signature_def_key,
TF_Status* status);
=head2 TF_ConcreteFunctionGetMetadata
=over 2
Returns FunctionMetadata associated with `func`. Metadata's lifetime is
bound to `func`, which is bound to the TF_SavedModel it was loaded from.
=back
/* From <tensorflow/c/experimental/saved_model/public/concrete_function.h> */
TF_CAPI_EXPORT extern TF_FunctionMetadata* TF_ConcreteFunctionGetMetadata(
TF_ConcreteFunction* func);
=head2 TF_ConcreteFunctionMakeCallOp
=over 2
Returns a TFE_Op suitable for executing this function. Caller must provide
all function inputs in `inputs`, and must not add any additional inputs on
the returned op. (i.e. don't call TFE_OpAddInput or TFE_OpAddInputList).
The caller is responsible for deleting the returned TFE_Op. If op
construction fails, `status` will be non-OK and the returned pointer will be
null.
TODO(bmzhao): Remove this function in a subsequent change; Design + implement
a Function Execution interface for ConcreteFunction that accepts a tagged
union of types (tensorflow::Value). This effectively requires moving much of
the implementation of function.py/def_function.py to C++, and exposing a
high-level API here. A strawman for what this interface could look like:
TF_Value* TF_ExecuteFunction(TFE_Context*, TF_ConcreteFunction*, TF_Value*
inputs, int num_inputs, TF_Status* status);
=back
/* From <tensorflow/c/experimental/saved_model/public/concrete_function.h> */
TF_CAPI_EXPORT extern TFE_Op* TF_ConcreteFunctionMakeCallOp(
TF_ConcreteFunction* func, TFE_TensorHandle** inputs, int num_inputs,
TF_Status* status);
=head2 TF_SignatureDefParamName
=over 2
Returns the name of the given parameter. The caller is not responsible for
freeing the returned char*.
=back
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=head2 TF_InitMain
=over 2
Platform specific initialization routine. Very few platforms actually require
this to be called.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT void TF_InitMain(const char* usage, int* argc, char*** argv);
=head2 TF_PickUnusedPortOrDie
=over 2
Platform-specific implementation to return an unused port. (This should used
in tests only.)
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT int TF_PickUnusedPortOrDie(void);
=head2 TFE_NewTensorHandleFromScalar
=over 2
Fast path method that makes constructing a single scalar tensor require less
overhead and copies.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_NewTensorHandleFromScalar(
TF_DataType data_type, void* data, size_t len, TF_Status* status);
=head2 TFE_EnableCollectiveOps
=over 2
Specify the server_def that enables collective ops.
This is different to the above function in that it doesn't create remote
contexts, and remotely executing ops is not possible. It just enables
communication for collective ops.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_EnableCollectiveOps(TFE_Context* ctx,
const void* proto,
size_t proto_len,
TF_Status* status);
=head2 TFE_AbortCollectiveOps
=over 2
Aborts all ongoing collectives with the specified status. After abortion,
subsequent collectives will error with this status immediately. To reset the
collectives, create a new EagerContext.
This is intended to be used when a peer failure is detected.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_AbortCollectiveOps(TFE_Context* ctx,
TF_Status* status);
=head2 TFE_CollectiveOpsCheckPeerHealth
=over 2
Checks the health of collective ops peers. Explicit health check is needed in
multi worker collective ops to detect failures in the cluster. If a peer is
down, collective ops may hang.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_CollectiveOpsCheckPeerHealth(
TFE_Context* ctx, const char* task, int64_t timeout_in_ms,
TF_Status* status);
=head2 TF_NewShapeAndTypeList
=over 2
API for manipulating TF_ShapeAndTypeList objects.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern TF_ShapeAndTypeList* TF_NewShapeAndTypeList(
int num_shapes);
=head2 TF_ShapeAndTypeListSetShape
=over 2
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TF_ShapeAndTypeListSetShape(
TF_ShapeAndTypeList* shape_list, int index, const int64_t* dims,
int num_dims);
=head2 TF_ShapeAndTypeListSetUnknownShape
=over 2
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TF_ShapeAndTypeListSetUnknownShape(
TF_ShapeAndTypeList* shape_list, int index);
=head2 TF_ShapeAndTypeListSetDtype
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TF_DeleteShapeAndTypeListArray(
TF_ShapeAndTypeList** shape_list_array, int num_items);
=head2 TFE_InferShapes
=over 2
Infer shapes for the given `op`. The arguments mimic the arguments of the
`shape_inference::InferenceContext` constructor. Note the following:
- The inputs of the `op` are not used for shape inference. So, it is
OK to not have the inputs properly set in `op`. See `input_tensors`
if you want shape inference to consider the input tensors of the
op for shape inference.
- The types need not be set in `input_shapes` as it is not used.
- The number of `input_tensors` should be the same as the number of items
in `input_shapes`.
The results are returned in `output_shapes` and
`output_resource_shapes_and_types`. The caller is responsible for freeing the
memory in these buffers by calling `TF_DeleteShapeAndTypeList`.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_InferShapes(
TFE_Op* op, TF_ShapeAndTypeList* input_shapes, TF_Tensor** input_tensors,
TF_ShapeAndTypeList* input_tensor_as_shapes,
TF_ShapeAndTypeList** input_resource_shapes_and_types,
TF_ShapeAndTypeList** output_shapes,
TF_ShapeAndTypeList*** output_resource_shapes_and_types, TF_Status* status);
=head2 TF_ImportGraphDefOptionsSetValidateColocationConstraints
=over 2
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern void
TF_ImportGraphDefOptionsSetValidateColocationConstraints(
TF_ImportGraphDefOptions* opts, unsigned char enable);
=head2 TF_LoadPluggableDeviceLibrary
=over 2
Load the library specified by library_filename and register the pluggable
device and related kernels present in that library. This function is not
supported on embedded on mobile and embedded platforms and will fail if
called.
Pass "library_filename" to a platform-specific mechanism for dynamically
loading a library. The rules for determining the exact location of the
library are platform-specific and are not documented here.
On success, returns the newly created library handle and places OK in status.
The caller owns the library handle.
On failure, returns nullptr and places an error status in status.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern TF_Library* TF_LoadPluggableDeviceLibrary(
const char* library_filename, TF_Status* status);
=head2 TF_DeletePluggableDeviceLibraryHandle
=over 2
Frees the memory associated with the library handle.
Does NOT unload the library.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TF_DeletePluggableDeviceLibraryHandle(
TF_Library* lib_handle);
=head1 SEE ALSO
L<https://github.com/tensorflow/tensorflow/tree/master/tensorflow/c>
=head1 AUTHOR
Zakariyya Mughal <zmughal@cpan.org>
=head1 COPYRIGHT AND LICENSE
This software is Copyright (c) 2022-2023 by Auto-Parallel Technologies, Inc.
This is free software, licensed under:
The Apache License, Version 2.0, January 2004
=cut