view release on metacpan or search on metacpan
CONTRIBUTING view on Meta::CPAN
<<<=== RECOGNITION ===>>>
Once we have received your contribution under the terms of the APTech Family Copyright Assignment Agreement above, as well as any necessary Employer Copyright Disclaimer Agreement(s), then we will begin the process of reviewing any software pull requ...
<<<=== SUBMISSION ===>>>
When you are ready to submit the signed agreement(s), please answer the following 12 questions about yourself and your APTech Family contribution, then include your answers in the body of your e-mail or on a separate sheet of paper in snail mail, and...
1. Full Legal Name
2. Preferred Pseudonym (or "none")
3. Country of Citizenship
4. Date of Birth (spell full month name)
5. Snail Mail Address (include country)
6. E-Mail Address
7. Names of APTech Family Files Modified (or "none")
8. Names of APTech Family Files Created (or "none")
9. Current Employer(s) or Contractee(s) (or "none")
10. Does Your Job Involve Computer Programming? (or "not applicable")
11. Does Your Job Involve an IP Ownership Agreement? (or "not applicable")
12. Name(s) & Employer(s) of Additional Contributors (or "none")
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
-bundle = @Author::ZMUGHAL
-remove = License
GatherDir.exclude_filename = EMPLOYERS.pdf
;; Jupyter Notebook
GatherDir.exclude_match[0] = ^notebook/.*
GatherDir.exclude_match[1] = ^notebook/.*\.ipynb
GatherDir.exclude_match[2] = ^notebook/.ipynb_checkpoints
GatherDir.exclude_match[3] = ^docker/
; [PodWeaver]
; authordep Pod::Elemental::Transformer::List
; authordep Pod::Weaver::Section::AllowOverride
[RunExtraTests]
;; For xt/author/pod-linkcheck.t
; authordep Test::Pod::LinkCheck::Lite
;; For xt/author/pod-snippets.t
; authordep Test::Pod::Snippets
; authordep Pod::Simple::Search
; authordep With::Roles
[Test::Perl::Critic]
; authordep Perl::Critic::Community
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_CAPI_EXPORT extern void TF_SetTarget(TF_SessionOptions* options,
const char* target);
=head2 TF_SetConfig
=over 2
Set the config in TF_SessionOptions.options.
config should be a serialized tensorflow.ConfigProto proto.
If config was not parsed successfully as a ConfigProto, record the
error information in *status.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_SetConfig(TF_SessionOptions* options,
const void* proto, size_t proto_len,
TF_Status* status);
=head2 TF_DeleteSessionOptions
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
If the number of dimensions is unknown, `num_dims` must be set to
-1 and `dims` can be null. If a dimension is unknown, the
corresponding entry in the `dims` array must be -1.
This does not overwrite the existing shape associated with `output`,
but merges the input shape with the existing shape. For example,
setting a shape of [-1, 2] with an existing shape [2, -1] would set
a final shape of [2, 2] based on shape merging semantics.
Returns an error into `status` if:
* `output` is not in `graph`.
* An invalid shape is being set (e.g., the shape being set
is incompatible with the existing shape).
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_GraphSetTensorShape(TF_Graph* graph,
TF_Output output,
const int64_t* dims,
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=head2 TF_GraphGetTensorNumDims
=over 2
Returns the number of dimensions of the Tensor referenced by `output`
in `graph`.
If the number of dimensions in the shape is unknown, returns -1.
Returns an error into `status` if:
* `output` is not in `graph`.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern int TF_GraphGetTensorNumDims(TF_Graph* graph,
TF_Output output,
TF_Status* status);
=head2 TF_GraphGetTensorShape
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
Returns the shape of the Tensor referenced by `output` in `graph`
into `dims`. `dims` must be an array large enough to hold `num_dims`
entries (e.g., the return value of TF_GraphGetTensorNumDims).
If the number of dimensions in the shape is unknown or the shape is
a scalar, `dims` will remain untouched. Otherwise, each element of
`dims` will be set corresponding to the size of the dimension. An
unknown dimension is represented by `-1`.
Returns an error into `status` if:
* `output` is not in `graph`.
* `num_dims` does not match the actual number of dimensions.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_GraphGetTensorShape(TF_Graph* graph,
TF_Output output,
int64_t* dims, int num_dims,
TF_Status* status);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_OperationDescription* TF_NewOperationLocked(
TF_Graph* graph, const char* op_type, const char* oper_name);
=head2 TF_NewOperation
=over 2
Operation will only be added to *graph when TF_FinishOperation() is
called (assuming TF_FinishOperation() does not return an error).
*graph must not be deleted until after TF_FinishOperation() is
called.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_OperationDescription* TF_NewOperation(
TF_Graph* graph, const char* op_type, const char* oper_name);
=head2 TF_SetDevice
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsSetDefaultDevice(
TF_ImportGraphDefOptions* opts, const char* device);
=head2 TF_ImportGraphDefOptionsSetUniquifyNames
=over 2
Set whether to uniquify imported operation names. If true, imported operation
names will be modified if their name already exists in the graph. If false,
conflicting names will be treated as an error. Note that this option has no
effect if a prefix is set, since the prefix will guarantee all names are
unique. Defaults to false.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsSetUniquifyNames(
TF_ImportGraphDefOptions* opts, unsigned char uniquify_names);
=head2 TF_ImportGraphDefOptionsSetUniquifyPrefix
=over 2
If true, the specified prefix will be modified if it already exists as an
operation name or prefix in the graph. If false, a conflicting prefix will be
treated as an error. This option has no effect if no prefix is specified.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsSetUniquifyPrefix(
TF_ImportGraphDefOptions* opts, unsigned char uniquify_prefix);
=head2 TF_ImportGraphDefOptionsAddInputMapping
=over 2
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_DeleteImportGraphDefResults(
TF_ImportGraphDefResults* results);
=head2 TF_GraphImportGraphDefWithResults
=over 2
Import the graph serialized in `graph_def` into `graph`. Returns nullptr and
a bad status on error. Otherwise, returns a populated
TF_ImportGraphDefResults instance. The returned instance must be deleted via
TF_DeleteImportGraphDefResults().
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_ImportGraphDefResults*
TF_GraphImportGraphDefWithResults(TF_Graph* graph, const TF_Buffer* graph_def,
const TF_ImportGraphDefOptions* options,
TF_Status* status);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=over 2
Adds a copy of function `func` and optionally its gradient function `grad`
to `g`. Once `func`/`grad` is added to `g`, it can be called by creating
an operation using the function's name.
Any changes to `func`/`grad` (including deleting it) done after this method
returns, won't affect the copy of `func`/`grad` in `g`.
If `func` or `grad` are already in `g`, TF_GraphCopyFunction has no
effect on them, but can establish the function->gradient relationship
between them if `func` does not already have a gradient. If `func` already
has a gradient different from `grad`, an error is returned.
`func` must not be null.
If `grad` is null and `func` is not in `g`, `func` is added without a
gradient.
If `grad` is null and `func` is in `g`, TF_GraphCopyFunction is a noop.
`grad` must have appropriate signature as described in the doc of
GradientDef in tensorflow/core/framework/function.proto.
If successful, status is set to OK and `func` and `grad` are added to `g`.
Otherwise, status is set to the encountered error and `g` is unmodified.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_GraphCopyFunction(TF_Graph* g,
const TF_Function* func,
const TF_Function* grad,
TF_Status* status);
=head2 TF_GraphNumFunctions
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
Fills in `funcs` with the TF_Function* registered in `g`.
`funcs` must point to an array of TF_Function* of length at least
`max_func`. In usual usage, max_func should be set to the result of
TF_GraphNumFunctions(g). In this case, all the functions registered in
`g` will be returned. Else, an unspecified subset.
If successful, returns the number of TF_Function* successfully set in
`funcs` and sets status to OK. The caller takes ownership of
all the returned TF_Functions. They must be deleted with TF_DeleteFunction.
On error, returns 0, sets status to the encountered error, and the contents
of funcs will be undefined.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern int TF_GraphGetFunctions(TF_Graph* g, TF_Function** funcs,
int max_func, TF_Status* status);
=head2 TF_OperationToNodeDef
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
If `noutputs` is zero (the function returns no outputs), `outputs`
can be null. `outputs` can contain the same tensor more than once.
output_names - The names of the function's outputs. `output_names` array
must either have the same length as `outputs`
(i.e. `noutputs`) or be null. In the former case,
the names should match the regular expression for ArgDef
names - "[a-z][a-z0-9_]*". In the latter case,
names for outputs will be generated automatically.
opts - various options for the function, e.g. XLA's inlining control.
description - optional human-readable description of this function.
status - Set to OK on success and an appropriate error on failure.
Note that when the same TF_Output is listed as both an input and an output,
the corresponding function's output will equal to this input,
instead of the original node's output.
Callers must also satisfy the following constraints:
- `inputs` cannot refer to TF_Outputs within a control flow context. For
example, one cannot use the output of "switch" node as input.
- `inputs` and `outputs` cannot have reference types. Reference types are
not exposed through C API and are being replaced with Resources. We support
reference types inside function's body to support legacy code. Do not
use them in new code.
- Every node in the function's body must have all of its inputs (including
control inputs). In other words, for every node in the body, each input
must be either listed in `inputs` or must come from another node in
the body. In particular, it is an error to have a control edge going from
a node outside of the body into a node in the body. This applies to control
edges going from nodes referenced in `inputs` to nodes in the body when
the former nodes are not in the body (automatically skipped or not
included in explicitly specified body).
Returns:
On success, a newly created TF_Function instance. It must be deleted by
calling TF_DeleteFunction.
On failure, null.
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_Function* TF_FunctionImportFunctionDef(
const void* proto, size_t proto_len, TF_Status* status);
=head2 TF_FunctionSetAttrValueProto
=over 2
Sets function attribute named `attr_name` to value stored in `proto`.
If this attribute is already set to another value, it is overridden.
`proto` should point to a sequence of bytes of length `proto_len`
representing a binary serialization of an AttrValue protocol
buffer.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_FunctionSetAttrValueProto(TF_Function* func,
const char* attr_name,
const void* proto,
size_t proto_len,
TF_Status* status);
=head2 TF_FunctionGetAttrValueProto
=over 2
Sets `output_attr_value` to the binary-serialized AttrValue proto
representation of the value of the `attr_name` attr of `func`.
If `attr_name` attribute is not present, status is set to an error.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_FunctionGetAttrValueProto(
TF_Function* func, const char* attr_name, TF_Buffer* output_attr_value,
TF_Status* status);
=head2 TF_DeleteFunction
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=head2 TF_TryEvaluateConstant
=over 2
Attempts to evaluate `output`. This will only be possible if `output` doesn't
depend on any graph inputs (this function is safe to call if this isn't the
case though).
If the evaluation is successful, this function returns true and `output`s
value is returned in `result`. Otherwise returns false. An error status is
returned if something is wrong with the graph or input. Note that this may
return false even if no error status is set.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern unsigned char TF_TryEvaluateConstant(TF_Graph* graph,
TF_Output output,
TF_Tensor** result,
TF_Status* status);
=head2 TF_NewSession
=over 2
Return a new execution session with the associated graph, or NULL on
error. Does not take ownership of any input parameters.
*`graph` must be a valid graph (not deleted or nullptr). `graph` will be
kept alive for the lifetime of the returned TF_Session. New nodes can still
be added to `graph` after this call.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_Session* TF_NewSession(TF_Graph* graph,
const TF_SessionOptions* opts,
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_CloseSession(TF_Session*, TF_Status* status);
=head2 TF_DeleteSession
=over 2
Destroy a session object.
Even if error information is recorded in *status, this call discards all
local resources associated with the session. The session may not be used
during or after this call (and the session drops its reference to the
corresponding graph).
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_DeleteSession(TF_Session*, TF_Status* status);
=head2 TF_SessionRun
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
non-NULL, in which case it must point to an empty, freshly allocated
`TF_Buffer` that may be updated to contain the serialized representation
of a `RunMetadata` protocol buffer.
The caller retains ownership of `input_values` (which can be deleted using
TF_DeleteTensor). The caller also retains ownership of `run_options` and/or
`run_metadata` (when not NULL) and should manually call TF_DeleteBuffer on
them.
On success, the tensors corresponding to outputs[0,noutputs-1] are placed in
output_values[]. Ownership of the elements of output_values[] is transferred
to the caller, which must eventually call TF_DeleteTensor on them.
On failure, output_values[] contains NULLs.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_SessionRun(
TF_Session* session,
// RunOptions
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=over 2
Set up the graph with the intended feeds (inputs) and fetches (outputs) for a
sequence of partial run calls.
On success, returns a handle that is used for subsequent PRun calls. The
handle should be deleted with TF_DeletePRunHandle when it is no longer
needed.
On failure, out_status contains a tensorflow::Status with an error
message. *handle is set to nullptr.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_SessionPRunSetup(
TF_Session*,
// Input names
const TF_Output* inputs, int ninputs,
// Output names
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=head2 TF_DeviceListName
=over 2
Retrieves the full name of the device (e.g. /job:worker/replica:0/...)
The return value will be a pointer to a null terminated string. The caller
must not modify or delete the string. It will be deallocated upon a call to
TF_DeleteDeviceList.
If index is out of bounds, an error code will be set in the status object,
and a null pointer will be returned.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern const char* TF_DeviceListName(const TF_DeviceList* list,
int index,
TF_Status* status);
=head2 TF_DeviceListType
=over 2
Retrieves the type of the device at the given index.
The caller must not modify or delete the string. It will be deallocated upon
a call to TF_DeleteDeviceList.
If index is out of bounds, an error code will be set in the status object,
and a null pointer will be returned.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern const char* TF_DeviceListType(const TF_DeviceList* list,
int index,
TF_Status* status);
=head2 TF_DeviceListMemoryBytes
=over 2
Retrieve the amount of memory associated with a given device.
If index is out of bounds, an error code will be set in the status object,
and -1 will be returned.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern int64_t TF_DeviceListMemoryBytes(
const TF_DeviceList* list, int index, TF_Status* status);
=head2 TF_DeviceListIncarnation
=over 2
Retrieve the incarnation number of a given device.
If index is out of bounds, an error code will be set in the status object,
and 0 will be returned.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern uint64_t TF_DeviceListIncarnation(
const TF_DeviceList* list, int index, TF_Status* status);
=head2 TF_LoadLibrary
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
Load the library specified by library_filename and register the ops and
kernels present in that library.
Pass "library_filename" to a platform-specific mechanism for dynamically
loading a library. The rules for determining the exact location of the
library are platform-specific and are not documented here.
On success, place OK in status and return the newly created library handle.
The caller owns the library handle.
On failure, place an error status in status and return NULL.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_Library* TF_LoadLibrary(const char* library_filename,
TF_Status* status);
=head2 TF_GetOpList
=over 2
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_DeleteLibraryHandle(TF_Library* lib_handle);
=head2 TF_GetAllOpList
=over 2
Get the OpList of all OpDefs defined in this address space.
Returns a TF_Buffer, ownership of which is transferred to the caller
(and can be freed using TF_DeleteBuffer).
The data in the buffer will be the serialized OpList proto for ops registered
in this address space.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_Buffer* TF_GetAllOpList(void);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=over 2
Creates a new TF_ApiDefMap instance.
Params:
op_list_buffer - TF_Buffer instance containing serialized OpList
protocol buffer. (See
https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto
for the OpList proto definition).
status - Set to OK on success and an appropriate error on failure.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_ApiDefMap* TF_NewApiDefMap(TF_Buffer* op_list_buffer,
TF_Status* status);
=head2 TF_DeleteApiDefMap
=over 2
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_CAPI_EXPORT extern void TF_RegisterLogListener(
void (*listener)(const char*));
=head2 TF_RegisterFilesystemPlugin
=over 2
Register a FileSystem plugin from filename `plugin_filename`.
On success, place OK in status.
On failure, place an error status in status.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_RegisterFilesystemPlugin(
const char* plugin_filename, TF_Status* status);
=head2 TF_NewShape
=over 2
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
void* deallocator_arg);
=head2 TF_AllocateTensor
=over 2
Allocate and return a new Tensor.
This function is an alternative to TF_NewTensor and should be used when
memory is allocated to pass the Tensor to the C API. The allocated memory
satisfies TensorFlow's memory alignment preferences and should be preferred
over calling malloc and free.
The caller must set the Tensor values by writing them to the pointer returned
by TF_TensorData with length TF_TensorByteSize.
=back
/* From <tensorflow/c/tf_tensor.h> */
TF_CAPI_EXPORT extern TF_Tensor* TF_AllocateTensor(TF_DataType,
const int64_t* dims,
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=back
/* From <tensorflow/c/tf_status.h> */
TF_CAPI_EXPORT void TF_SetPayload(TF_Status* s, const char* key,
const char* value);
=head2 TF_SetStatusFromIOError
=over 2
Convert from an I/O error code (e.g., errno) to a TF_Status value.
Any previous information is lost. Prefer to use this instead of TF_SetStatus
when the error comes from I/O operations.
=back
/* From <tensorflow/c/tf_status.h> */
TF_CAPI_EXPORT extern void TF_SetStatusFromIOError(TF_Status* s, int error_code,
const char* context);
=head2 TF_GetCode
=over 2
Return the code record in *s.
=back
/* From <tensorflow/c/tf_status.h> */
TF_CAPI_EXPORT extern TF_Code TF_GetCode(const TF_Status* s);
=head2 TF_Message
=over 2
Return a pointer to the (null-terminated) error message in *s. The
return value points to memory that is only usable until the next
mutation to *s. Always returns an empty string if TF_GetCode(s) is
TF_OK.
=back
/* From <tensorflow/c/tf_status.h> */
TF_CAPI_EXPORT extern const char* TF_Message(const TF_Status* s);
=head2 TF_NewBufferFromString
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_OpDefinitionBuilder* builder);
=head2 TF_OpDefinitionBuilderAddAttr
=over 2
Adds an attr to the given TF_OpDefinitionBuilder. The spec has
format "<name>:<type>" or "<name>:<type>=<default>"
where <name> matches regexp [a-zA-Z][a-zA-Z0-9_]*.
By convention, names containing only capital letters are reserved for
attributes whose values can be inferred by the operator implementation if not
supplied by the user. If the attribute name contains characters other than
capital letters, the operator expects the user to provide the attribute value
at operation runtime.
<type> can be:
"string", "int", "float", "bool", "type", "shape", or "tensor"
"numbertype", "realnumbertype", "quantizedtype"
(meaning "type" with a restriction on valid values)
"{int32,int64}" or {realnumbertype,quantizedtype,string}"
(meaning "type" with a restriction containing unions of value types)
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_CAPI_EXPORT extern TF_DimensionHandle* TF_NewDimensionHandle();
=head2 TF_ShapeInferenceContext_GetAttrType
=over 2
Interprets the named shape inference context attribute as a TF_DataType and
places it into *val. *status is set to TF_OK.
If the attribute could not be found or could not be interpreted as
TF_DataType, *status is populated with an error.
=back
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern void TF_ShapeInferenceContext_GetAttrType(
TF_ShapeInferenceContext* ctx, const char* attr_name, TF_DataType* val,
TF_Status* status);
=head2 TF_ShapeInferenceContextRank
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern int TF_ShapeInferenceContextRankKnown(
TF_ShapeInferenceContext* ctx, TF_ShapeHandle* handle);
=head2 TF_ShapeInferenceContextWithRank
=over 2
If <handle> has rank <rank>, or its rank is unknown, return OK and return the
shape with asserted rank in <*result>. Otherwise an error is placed into
`status`.
=back
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern void TF_ShapeInferenceContextWithRank(
TF_ShapeInferenceContext* ctx, TF_ShapeHandle* handle, int64_t rank,
TF_ShapeHandle* result, TF_Status* status);
=head2 TF_ShapeInferenceContextWithRankAtLeast
=over 2
If <handle> has rank at least <rank>, or its rank is unknown, return OK and
return the shape with asserted rank in <*result>. Otherwise an error is
placed into `status`.
=back
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern void TF_ShapeInferenceContextWithRankAtLeast(
TF_ShapeInferenceContext* ctx, TF_ShapeHandle* handle, int64_t rank,
TF_ShapeHandle* result, TF_Status* status);
=head2 TF_ShapeInferenceContextWithRankAtMost
=over 2
If <handle> has rank at most <rank>, or its rank is unknown, return OK and
return the shape with asserted rank in <*result>. Otherwise an error is
placed into `status`.
=back
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern void TF_ShapeInferenceContextWithRankAtMost(
TF_ShapeInferenceContext* ctx, TF_ShapeHandle* handle, int64_t rank,
TF_ShapeHandle* result, TF_Status* status);
=head2 TF_ShapeInferenceContextDim
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
Deletes the specified directory and all subdirectories and files underneath
it. This is accomplished by traversing the directory tree rooted at dirname
and deleting entries as they are encountered.
If dirname itself is not readable or does not exist, *undeleted_dir_count is
set to 1, *undeleted_file_count is set to 0 and an appropriate status (e.g.
TF_NOT_FOUND) is returned.
If dirname and all its descendants were successfully deleted, TF_OK is
returned and both error counters are set to zero.
Otherwise, while traversing the tree, undeleted_file_count and
undeleted_dir_count are updated if an entry of the corresponding type could
not be deleted. The returned error status represents the reason that any one
of these entries could not be deleted.
Typical status codes:
* TF_OK - dirname exists and we were able to delete everything underneath
* TF_NOT_FOUND - dirname doesn't exist
* TF_PERMISSION_DENIED - dirname or some descendant is not writable
* TF_UNIMPLEMENTED - some underlying functions (like Delete) are not
implemented
=back
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=over 2
\brief Load a dynamic library.
Pass "library_filename" to a platform-specific mechanism for dynamically
loading a library. The rules for determining the exact location of the
library are platform-specific and are not documented here.
On success, place OK in status and return the newly created library handle.
Otherwise returns nullptr and set error status.
=back
/* From <tensorflow/c/env.h> */
TF_CAPI_EXPORT extern void* TF_LoadSharedLibrary(const char* library_filename,
TF_Status* status);
=head2 TF_GetSymbolFromLibrary
=over 2
\brief Get a pointer to a symbol from a dynamic library.
"handle" should be a pointer returned from a previous call to
TF_LoadLibraryFromEnv. On success, place OK in status and return a pointer to
the located symbol. Otherwise returns nullptr and set error status.
=back
/* From <tensorflow/c/env.h> */
TF_CAPI_EXPORT extern void* TF_GetSymbolFromLibrary(void* handle,
const char* symbol_name,
TF_Status* status);
=head2 TF_Log
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=head2 TF_NewKernelBuilder
=over 2
Allocates a new kernel builder and returns a pointer to it.
If non-null, TensorFlow will call create_func when it needs to instantiate
the kernel. The pointer returned by create_func will be passed to
compute_func and delete_func, thereby functioning as a "this" pointer for
referring to kernel instances.
The TF_OpKernelConstruction pointer passed to create_func is owned by
TensorFlow and will be deleted once create_func returns. It must not be used
after this.
When TensorFlow needs to perform a computation with this kernel, it will
call compute_func. This function will receive the pointer returned by
create_func (or null if no create_func was provided), along with the inputs
to the computation.
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_CAPI_EXPORT extern void TF_DeleteKernelBuilder(TF_KernelBuilder* builder);
=head2 TF_GetStream
=over 2
TF_GetStream returns the SP_Stream available in ctx.
This function returns a stream only for devices registered using the
StreamExecutor C API
(tensorflow/c/experimental/stream_executor/stream_executor.h). It will return
nullptr and set error status in all other cases.
Experimental: this function doesn't have compatibility guarantees and subject
to change at any time.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern SP_Stream TF_GetStream(TF_OpKernelContext* ctx,
TF_Status* status);
=head2 TF_NumInputs
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
int32_t* total_size, TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrType
=over 2
Interprets the named kernel construction attribute as a TF_DataType and
places it into *val. *status is set to TF_OK.
If the attribute could not be found or could not be interpreted as
TF_DataType, *status is populated with an error.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrType(
TF_OpKernelConstruction* ctx, const char* attr_name, TF_DataType* val,
TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrInt32
=over 2
Interprets the named kernel construction attribute as int32_t and
places it into *val. *status is set to TF_OK.
If the attribute could not be found or could not be interpreted as
int32, *status is populated with an error.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrInt32(
TF_OpKernelConstruction* ctx, const char* attr_name, int32_t* val,
TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrInt64
=over 2
Interprets the named kernel construction attribute as int64_t and
places it into *val. *status is set to TF_OK.
If the attribute could not be found or could not be interpreted as
int64, *status is populated with an error.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrInt64(
TF_OpKernelConstruction* ctx, const char* attr_name, int64_t* val,
TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrFloat
=over 2
Interprets the named kernel construction attribute as float and
places it into *val. *status is set to TF_OK.
If the attribute could not be found or could not be interpreted as
float, *status is populated with an error.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrFloat(
TF_OpKernelConstruction* ctx, const char* attr_name, float* val,
TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrBool
=over 2
Interprets the named kernel construction attribute as bool and
places it into *val. *status is set to TF_OK.
If the attribute could not be found or could not be interpreted as
bool, *status is populated with an error.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrBool(
TF_OpKernelConstruction* ctx, const char* attr_name, TF_Bool* val,
TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrString
=over 2
Interprets the named kernel construction attribute as string and
places it into *val. `val` must
point to an array of length at least `max_length` (ideally set to
total_size from TF_OpKernelConstruction_GetAttrSize(ctx,
attr_name, list_size, total_size)). *status is set to TF_OK.
If the attribute could not be found or could not be interpreted as
string, *status is populated with an error.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrString(
TF_OpKernelConstruction* ctx, const char* attr_name, char* val,
size_t max_length, TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrTensor
=over 2
Interprets the named kernel construction attribute as tensor and places it
into *val. Allocates a new TF_Tensor which the caller is expected to take
ownership of (and can deallocate using TF_DeleteTensor). *status is set to
TF_OK.
If the attribute could not be found or could not be interpreted as
tensor, *status is populated with an error.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrTensor(
TF_OpKernelConstruction* ctx, const char* attr_name, TF_Tensor** val,
TF_Status* status);
=head2 TF_OpKernelConstruction_GetAttrTypeList
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=head2 TF_AssignVariable
=over 2
Expose higher level Assignment operation for Pluggable vendors to implement
in the plugin for Training. The API takes in the context with indices for
the input and value tensors. It also accepts the copy callback provided by
pluggable vendor to do the copying of the tensors. The caller takes ownership
of the `source` and `dest` tensors and is responsible for freeing them with
TF_DeleteTensor. This function will return an error when the following
conditions are met:
1. `validate_shape` is set to `true`
2. The variable is initialized
3. The shape of the value tensor doesn't match the shape of the variable
tensor.
=back
/* From <tensorflow/c/kernels_experimental.h> */
TF_CAPI_EXPORT extern void TF_AssignVariable(
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern TFE_ContextOptions* TFE_NewContextOptions(void);
=head2 TFE_ContextOptionsSetConfig
=over 2
Set the config in TF_ContextOptions.options.
config should be a serialized tensorflow.ConfigProto proto.
If config was not parsed successfully as a ConfigProto, record the
error information in *status.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern void TFE_ContextOptionsSetConfig(
TFE_ContextOptions* options, const void* proto, size_t proto_len,
TF_Status* status);
=head2 TFE_ContextOptionsSetAsync
=over 2
Sets the default execution mode (sync/async). Note that this can be
overridden per thread using TFE_ContextSetExecutorForThread.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern void TFE_ContextOptionsSetAsync(TFE_ContextOptions*,
unsigned char enable);
=head2 TFE_ContextOptionsSetDevicePlacementPolicy
=over 2
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern const char* TFE_TensorHandleBackingDeviceName(
TFE_TensorHandle* h, TF_Status* status);
=head2 TFE_TensorHandleCopySharingTensor
=over 2
Return a pointer to a new TFE_TensorHandle that shares the underlying tensor
with `h`. On success, `status` is set to OK. On failure, `status` reflects
the error and a nullptr is returned.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_TensorHandleCopySharingTensor(
TFE_TensorHandle* h, TF_Status* status);
=head2 TFE_TensorHandleResolve
=over 2
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_Status* status);
=head2 TFE_TensorHandleTensorDebugInfo
=over 2
Retrieves TFE_TensorDebugInfo for `handle`.
If TFE_TensorHandleTensorDebugInfo succeeds, `status` is set to OK and caller
is responsible for deleting returned TFE_TensorDebugInfo.
If TFE_TensorHandleTensorDebugInfo fails, `status` is set to appropriate
error and nullptr is returned. This function can block till the operation
that produces `handle` has completed.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern TFE_TensorDebugInfo* TFE_TensorHandleTensorDebugInfo(
TFE_TensorHandle* h, TF_Status* status);
=head2 TFE_DeleteTensorDebugInfo
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_Status* status);
=head2 TFE_Execute
=over 2
Execute the operation defined by 'op' and return handles to computed
tensors in `retvals`.
'retvals' must point to a pre-allocated array of TFE_TensorHandle* and
'*num_retvals' should be set to the size of this array. It is an error if
the size of 'retvals' is less than the number of outputs. This call sets
*num_retvals to the number of outputs.
If async execution is enabled, the call may simply enqueue the execution
and return "non-ready" handles in `retvals`. Note that any handles contained
in 'op' should not be mutated till the kernel execution actually finishes.
For sync execution, if any of the inputs to `op` are not ready, this call
will block till they become ready and then return when the kernel execution
is done.
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern int64_t TFE_MonitoringCounterCellValue(
TFE_MonitoringCounterCell* cell);
=head2 TFE_MonitoringNewCounter0
=over 2
Returns a new Counter metric object. The caller should manage lifetime of
the object. Using duplicate metric name will crash the program with fatal
error.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern TFE_MonitoringCounter0* TFE_MonitoringNewCounter0(
const char* name, TF_Status* status, const char* description);
=head2 TFE_MonitoringDeleteCounter0
=over 2
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_ExecutorWaitForAllPendingNodes(
TFE_Executor*, TF_Status* status);
=head2 TFE_ExecutorClearError
=over 2
When an error happens, any pending operations are discarded, and newly issued
ops return an error. This call clears the error state and re-enables
execution of newly issued ops.
Note that outputs of discarded ops remain in a corrupt state and should not
be used for future calls.
TODO(agarwal): mark the affected handles and raise errors if they are used.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_ExecutorClearError(TFE_Executor*);
=head2 TFE_ContextSetExecutorForThread
=over 2
Sets a custom Executor for the current thread. All nodes created by this
thread will be added to this Executor. It will override the current executor.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_ContextSetExecutorForThread(TFE_Context*,
TFE_Executor*);
=head2 TFE_ContextGetExecutorForThread
=over 2
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern size_t TFE_TensorHandleDeviceMemorySize(TFE_TensorHandle*,
TF_Status*);
=head2 TFE_NewTensorHandleFromDeviceMemory
=over 2
Creates a new TensorHandle from memory residing in the physical device
device_name. Takes ownership of the memory, and will call deleter to release
it after TF no longer needs it or in case of error.
Custom devices must use TFE_NewCustomDeviceTensorHandle instead.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_NewTensorHandleFromDeviceMemory(
TFE_Context* ctx, const char* device_name, TF_DataType, const int64_t* dims,
int num_dims, void* data, size_t len,
void (*deallocator)(void* data, size_t len, void* arg),
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_CAPI_EXPORT extern bool TFE_IsCustomDevice(TFE_Context* ctx,
const char* device_name);
=head2 TFE_NewCustomDeviceTensorHandle
=over 2
Creates a new TensorHandle from memory residing in a custom device. Takes
ownership of the memory pointed to by `tensor_handle_data`, and calls
`methods.deallocator` to release it after TF no longer needs it or in case of
an error.
This call is similar to `TFE_NewTensorHandleFromDeviceMemory`, but supports
custom devices instead of physical devices and does not require blocking
waiting for exact shapes.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_NewCustomDeviceTensorHandle(
TFE_Context*, const char* device_name, TF_DataType, void* data,
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern int TFE_TensorHandleDeviceID(TFE_TensorHandle* h,
TF_Status* status);
=head2 TFE_TensorHandleGetStatus
=over 2
Returns the status for the tensor handle. In TFRT, a tensor handle can carry
error info if error happens. If so, the status will be set with the error
info. If not, status will be set as OK.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_TensorHandleGetStatus(TFE_TensorHandle* h,
TF_Status* status);
=head2 TFE_GetExecutedOpNames
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_Status* status);
=head2 TFE_InsertConfigKeyValue
=over 2
Set configuration key and value using coordination service.
If coordination service is enabled, the key-value will be stored on the
leader and become accessible to all workers in the cluster.
Currently, a config key can only be set with one value, and subsequently
setting the same key will lead to errors.
Note that the key-values are only expected to be used for cluster
configuration data, and should not be used for storing a large amount of data
or being accessed very frequently.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_InsertConfigKeyValue(TFE_Context* ctx,
const char* key,
const char* value,
TF_Status* status);
=head2 TFE_GetConfigKeyValue
=over 2
Get configuration key and value using coordination service.
The config key must be set before getting its value. Getting value of
non-existing config keys will result in errors.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_GetConfigKeyValue(TFE_Context* ctx,
const char* key,
TF_Buffer* value_buf,
TF_Status* status);
=head2 TFE_DeleteConfigKeyValue
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_DeleteConfigKeyValue(TFE_Context* ctx,
const char* key,
TF_Status* status);
=head2 TFE_ReportErrorToCluster
=over 2
Report error (specified by error_code and error_message) to other tasks in
the cluster.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_ReportErrorToCluster(TFE_Context* ctx,
int error_code,
const char* error_message,
TF_Status* status);
=head2 TFE_GetTaskStates
=over 2
Get task states from the Coordination Service.
=back
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=over 2
Load a SavedModel from `dirname`. We expect the SavedModel to contain a
single Metagraph (as for those exported from TF2's `tf.saved_model.save`).
Params:
dirname - A directory filepath that the SavedModel is at.
ctx - A TFE_Context containing optional load/TF runtime options.
`ctx` must outlive the returned TF_SavedModel pointer.
status - Set to OK on success and an appropriate error on failure.
Returns:
If status is not OK, returns nullptr. Otherwise, returns a newly created
TF_SavedModel instance. It must be deleted by calling TF_DeleteSavedModel.
=back
/* From <tensorflow/c/experimental/saved_model/public/saved_model_api.h> */
TF_CAPI_EXPORT extern TF_SavedModel* TF_LoadSavedModel(const char* dirname,
TFE_Context* ctx,
TF_Status* status);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
Load a SavedModel from `dirname`.
Params:
dirname - A directory filepath that the SavedModel is at.
ctx - A TFE_Context containing optional load/TF runtime options.
`ctx` must outlive the returned TF_SavedModel pointer.
tags - char* array of SavedModel tags. We will load the metagraph matching
the tags.
tags_len - number of elements in the `tags` array.
status - Set to OK on success and an appropriate error on failure.
Returns:
If status is not OK, returns nullptr. Otherwise, returns a newly created
TF_SavedModel instance. It must be deleted by calling TF_DeleteSavedModel.
=back
/* From <tensorflow/c/experimental/saved_model/public/saved_model_api.h> */
TF_CAPI_EXPORT extern TF_SavedModel* TF_LoadSavedModelWithTags(
const char* dirname, TFE_Context* ctx, const char* const* tags,
int tags_len, TF_Status* status);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=over 2
Retrieve a function from the TF2 SavedModel via function path.
Params:
model - The TF2 SavedModel to load a function from.
function_path - A string containing the path from the root saved python
object to a tf.function method.
TODO(bmzhao): Add a detailed example of this with a
python tf.module before moving this out of experimental.
status - Set to OK on success and an appropriate error on failure.
Returns:
If status is not OK, returns nullptr. Otherwise, returns a
TF_ConcreteFunction instance. The lifetime of this instance is
"conceptually" bound to `model`. Once `model` is deleted, all
`TF_ConcreteFunctions` retrieved from it are invalid, and have been deleted.
=back
/* From <tensorflow/c/experimental/saved_model/public/saved_model_api.h> */
TF_CAPI_EXPORT extern TF_ConcreteFunction* TF_GetSavedModelConcreteFunction(
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=head2 TF_GetSavedModelSignatureDefFunction
=over 2
Retrieve a function from the TF SavedModel via a SignatureDef key.
Params:
model - The SavedModel to load a function from.
signature_def_key - The string key of the SignatureDef map of a SavedModel:
https://github.com/tensorflow/tensorflow/blob/69b08900b1e991d84bce31f3b404f5ed768f339f/tensorflow/core/protobuf/meta_graph.proto#L89
status - Set to OK on success and an appropriate error on failure.
Returns:
If status is not OK, returns nullptr. Otherwise, returns a
TF_SignatureDefFunction instance. Once `model` is deleted, all
`TF_SignatureDefFunctions` retrieved from it are invalid, and have been
deleted.
=back
/* From <tensorflow/c/experimental/saved_model/public/saved_model_api.h> */
TF_CAPI_EXPORT extern TF_SignatureDefFunction*
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_Status* status);
=head2 TF_MakeInternalErrorStatus
=over 2
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TF_MakeInternalErrorStatus(TF_Status* status,
const char* errMsg);
=head2 TF_NewCheckpointReader
=over 2
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern TF_CheckpointReader* TF_NewCheckpointReader(
const char* filename, TF_Status* status);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_CAPI_EXPORT extern void TFE_EnableCollectiveOps(TFE_Context* ctx,
const void* proto,
size_t proto_len,
TF_Status* status);
=head2 TFE_AbortCollectiveOps
=over 2
Aborts all ongoing collectives with the specified status. After abortion,
subsequent collectives will error with this status immediately. To reset the
collectives, create a new EagerContext.
This is intended to be used when a peer failure is detected.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_AbortCollectiveOps(TFE_Context* ctx,
TF_Status* status);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
supported on embedded on mobile and embedded platforms and will fail if
called.
Pass "library_filename" to a platform-specific mechanism for dynamically
loading a library. The rules for determining the exact location of the
library are platform-specific and are not documented here.
On success, returns the newly created library handle and places OK in status.
The caller owns the library handle.
On failure, returns nullptr and places an error status in status.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern TF_Library* TF_LoadPluggableDeviceLibrary(
const char* library_filename, TF_Status* status);
=head2 TF_DeletePluggableDeviceLibraryHandle
=over 2
lib/AI/TensorFlow/Libtensorflow/Manual/GPU.pod view on Meta::CPAN
machine is to use C<libtensorflow> via a Docker container and the
NVIDIA Container Toolkit. See L<AI::TensorFlow::Libtensorflow::Manual::Quickstart/DOCKER IMAGES>
for more information.
=head1 RUNTIME
When running C<libtensorflow>, your program will attempt to acquire quite a bit
of GPU VRAM. You can check if you have enough free VRAM by using the
C<nvidia-smi> command which displays resource information as well as which
processes are currently using the GPU. If C<libtensorflow> is not able to
allocate enough memory, it will crash with an out-of-memory (OOM) error. This
is typical when running multiple programs that both use the GPU.
If you have multiple GPUs, you can control which GPUs your program can access
by using the
L<C<CUDA_VISIBLE_DEVICES> environment variable|https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#env-vars>
provided by the underlying CUDA library. This is typically
done by setting the variable in a C<BEGIN> block before loading
L<AI::TensorFlow::Libtensorflow>:
BEGIN {
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod view on Meta::CPAN
}
use URI ();
use HTTP::Tiny ();
use Path::Tiny qw(path);
use File::Which ();
use List::Util 1.56 qw(mesh);
use Data::Printer ( output => 'stderr', return_value => 'void', filters => ['PDL'] );
use Data::Printer::Filter::PDL ();
use Text::Table::Tiny qw(generate_table);
use Imager;
my $s = AI::TensorFlow::Libtensorflow::Status->New;
sub AssertOK {
die "Status $_[0]: " . $_[0]->Message
unless $_[0]->GetCode == AI::TensorFlow::Libtensorflow::Status::OK;
return;
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod view on Meta::CPAN
my $response = $http->get( $uri );
die "Could not fetch image from $uri" unless $response->{success};
say "Downloaded $uri";
my $img = Imager->new;
$img->read( data => $response->{content} );
# Create PDL ndarray from Imager data in-memory.
my $data;
$img->write( data => \$data, type => 'raw' )
or die "could not write ". $img->errstr;
die "Image does not have 3 channels, it has @{[ $img->getchannels ]} channels"
if $img->getchannels != 3;
# $data is packed as PDL->dims == [w,h] with RGB pixels
my $pdl_raw = zeros(byte, $img->getchannels, $img->getwidth, $img->getheight);
${ $pdl_raw->get_dataref } = $data;
$pdl_raw->upd_data;
$pdl_raw;
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod view on Meta::CPAN
}
use URI ();
use HTTP::Tiny ();
use Path::Tiny qw(path);
use File::Which ();
use List::Util 1.56 qw(mesh);
use Data::Printer ( output => 'stderr', return_value => 'void', filters => ['PDL'] );
use Data::Printer::Filter::PDL ();
use Text::Table::Tiny qw(generate_table);
use Imager;
my $s = AI::TensorFlow::Libtensorflow::Status->New;
sub AssertOK {
die "Status $_[0]: " . $_[0]->Message
unless $_[0]->GetCode == AI::TensorFlow::Libtensorflow::Status::OK;
return;
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod view on Meta::CPAN
C<detection_classes>: a C<tf.int> tensor of shape [N] containing detection class index from the label file.
=item -
C<detection_scores>: a C<tf.float32> tensor of shape [N] containing detection scores.
=back
=back
Note that the above documentation has two errors: both C<num_detections> and C<detection_classes> are not of type C<tf.int>, but are actually C<tf.float32>.
Now we can load the model from that folder with the tag set C<[ 'serve' ]> by using the C<LoadFromSavedModel> constructor to create a C<::Graph> and a C<::Session> for that graph.
my $opt = AI::TensorFlow::Libtensorflow::SessionOptions->New;
my $graph = AI::TensorFlow::Libtensorflow::Graph->New;
my $session = AI::TensorFlow::Libtensorflow::Session->LoadFromSavedModel(
$opt, undef, $model_base, \@tags, $graph, undef, $s
);
AssertOK($s);
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod view on Meta::CPAN
my $response = $http->get( $uri );
die "Could not fetch image from $uri" unless $response->{success};
say "Downloaded $uri";
my $img = Imager->new;
$img->read( data => $response->{content} );
# Create PDL ndarray from Imager data in-memory.
my $data;
$img->write( data => \$data, type => 'raw' )
or die "could not write ". $img->errstr;
die "Image does not have 3 channels, it has @{[ $img->getchannels ]} channels"
if $img->getchannels != 3;
# $data is packed as PDL->dims == [w,h] with RGB pixels
my $pdl_raw = zeros(byte, $img->getchannels, $img->getwidth, $img->getheight);
${ $pdl_raw->get_dataref } = $data;
$pdl_raw->upd_data;
$pdl_raw;
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
}
use URI ();
use HTTP::Tiny ();
use Path::Tiny qw(path);
use File::Which ();
use List::Util ();
use Data::Printer ( output => 'stderr', return_value => 'void', filters => ['PDL'] );
use Data::Printer::Filter::PDL ();
use Text::Table::Tiny qw(generate_table);
my $s = AI::TensorFlow::Libtensorflow::Status->New;
sub AssertOK {
die "Status $_[0]: " . $_[0]->Message
unless $_[0]->GetCode == AI::TensorFlow::Libtensorflow::Status::OK;
return;
}
AssertOK($s);
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
[ qw(bgzip -c) ], '>', $hg_bgz_path
);
}
use Bio::Tools::Run::Samtools;
my $hg_bgz_fai_path = "${hg_bgz_path}.fai";
if( ! -e $hg_bgz_fai_path ) {
my $faidx_tool = Bio::Tools::Run::Samtools->new( -command => 'faidx' );
$faidx_tool->run( -fas => $hg_bgz_path )
or die "Could not index FASTA file $hg_bgz_path: " . $faidx_tool->error_string;
}
sub saved_model_cli {
my (@rest) = @_;
if( File::Which::which('saved_model_cli')) {
system(qw(saved_model_cli), @rest ) == 0
or die "Could not run saved_model_cli";
} else {
warn "saved_model_cli(): Install the tensorflow Python package to get the `saved_model_cli` command.\n";
return -1;
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
}
use URI ();
use HTTP::Tiny ();
use Path::Tiny qw(path);
use File::Which ();
use List::Util ();
use Data::Printer ( output => 'stderr', return_value => 'void', filters => ['PDL'] );
use Data::Printer::Filter::PDL ();
use Text::Table::Tiny qw(generate_table);
my $s = AI::TensorFlow::Libtensorflow::Status->New;
sub AssertOK {
die "Status $_[0]: " . $_[0]->Message
unless $_[0]->GetCode == AI::TensorFlow::Libtensorflow::Status::OK;
return;
}
AssertOK($s);
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
[ qw(bgzip -c) ], '>', $hg_bgz_path
);
}
use Bio::Tools::Run::Samtools;
my $hg_bgz_fai_path = "${hg_bgz_path}.fai";
if( ! -e $hg_bgz_fai_path ) {
my $faidx_tool = Bio::Tools::Run::Samtools->new( -command => 'faidx' );
$faidx_tool->run( -fas => $hg_bgz_path )
or die "Could not index FASTA file $hg_bgz_path: " . $faidx_tool->error_string;
}
=head2 Model input and output specification
Now we create a helper to call C<saved_model_cli> and called C<saved_model_cli scan> to ensure that the model is I/O-free for security reasons.
sub saved_model_cli {
my (@rest) = @_;
if( File::Which::which('saved_model_cli')) {
system(qw(saved_model_cli), @rest ) == 0
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
$gp->end_multi;
$gp->close;
if( IN_IPERL ) {
IPerl->png( bytestream => path($plot_output_path)->slurp_raw );
}
B<DISPLAY>:
=for html <span style="display:inline-block;margin-left:1em;"><p><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA+gAAAMgCAIAAAA/et9qAAAgAElEQVR4nOzdd2AUVeIH8Ddb0jshBAIEpSo1GjoIpyAgCOqd3uGdoGBBUQQFRUVBRbkTf9gOBQucqFiwUhSSgJQYCCSBkJBAet1k...
=head2 Parts of the original notebook that fall outside the scope
In the orignal notebook, there are several more steps that have not been ported here:
=over
=item 1.
"Compute contribution scores":
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
}
use URI ();
use HTTP::Tiny ();
use Path::Tiny qw(path);
use File::Which ();
use List::Util ();
use Data::Printer ( output => 'stderr', return_value => 'void', filters => ['PDL'] );
use Data::Printer::Filter::PDL ();
use Text::Table::Tiny qw(generate_table);
use Imager;
my $s = AI::TensorFlow::Libtensorflow::Status->New;
sub AssertOK {
die "Status $_[0]: " . $_[0]->Message
unless $_[0]->GetCode == AI::TensorFlow::Libtensorflow::Status::OK;
return;
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
my $padded = imager_paste_center_pad($rescaled, $image_size,
# ARGB fits in 32-bits (uint32_t)
channels => 4
);
say sprintf "Padded to [ %d x %d ]", $padded->getwidth, $padded->getheight;
# Create PDL ndarray from Imager data in-memory.
my $data;
$padded->write( data => \$data, type => 'raw' )
or die "could not write ". $padded->errstr;
# $data is packed as PDL->dims == [w,h] with ARGB pixels
# $ PDL::howbig(ulong) # 4
my $pdl_raw = zeros(ulong, $padded->getwidth, $padded->getheight);
${ $pdl_raw->get_dataref } = $data;
$pdl_raw->upd_data;
# Split uint32_t pixels into first dimension with 3 channels (R,G,B) with values 0-255.
my @shifts = map 8*$_, 0..2;
my $pdl_channels = $pdl_raw->dummy(0)
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
}
use URI ();
use HTTP::Tiny ();
use Path::Tiny qw(path);
use File::Which ();
use List::Util ();
use Data::Printer ( output => 'stderr', return_value => 'void', filters => ['PDL'] );
use Data::Printer::Filter::PDL ();
use Text::Table::Tiny qw(generate_table);
use Imager;
my $s = AI::TensorFlow::Libtensorflow::Status->New;
sub AssertOK {
die "Status $_[0]: " . $_[0]->Message
unless $_[0]->GetCode == AI::TensorFlow::Libtensorflow::Status::OK;
return;
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
my $padded = imager_paste_center_pad($rescaled, $image_size,
# ARGB fits in 32-bits (uint32_t)
channels => 4
);
say sprintf "Padded to [ %d x %d ]", $padded->getwidth, $padded->getheight;
# Create PDL ndarray from Imager data in-memory.
my $data;
$padded->write( data => \$data, type => 'raw' )
or die "could not write ". $padded->errstr;
# $data is packed as PDL->dims == [w,h] with ARGB pixels
# $ PDL::howbig(ulong) # 4
my $pdl_raw = zeros(ulong, $padded->getwidth, $padded->getheight);
${ $pdl_raw->get_dataref } = $data;
$pdl_raw->upd_data;
# Split uint32_t pixels into first dimension with 3 channels (R,G,B) with values 0-255.
my @shifts = map 8*$_, 0..2;
my $pdl_channels = $pdl_raw->dummy(0)
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
$probabilities_batched->at($label_index,$batch_idx),
) ];
}
say generate_table( rows => [ $header, @rows ], header_row => 1 );
print "\n";
}
}
B<DISPLAY>:
=for html <span style="display:inline-block;margin-left:1em;"><p><table style="width: 100%"><tr><td><tt>apple</tt></td><td><a href="https://upload.wikimedia.org/wikipedia/commons/1/15/Red_Apple.jpg"><img alt="apple" src="https://upload.wikimedia.org/...
my $p_approx_batched = $probabilities_batched->sumover->approx(1, 1e-5);
p $p_approx_batched;
say "All probabilities sum up to approximately 1" if $p_approx_batched->all->sclr;
B<STREAM (STDOUT)>:
All probabilities sum up to approximately 1
B<STREAM (STDERR)>:
lib/AI/TensorFlow/Libtensorflow/Manual/Quickstart.pod view on Meta::CPAN
ok defined $status, 'Created new Status';
These C<libtensorflow> data structures use L<destructors|perlobj/Destructors> where necessary.
=head1 OBJECT TYPES
=over 4
=item L<AI::TensorFlow::Libtensorflow::Status>
Used for error-handling. Many methods take this as the final argument which is
then checked after the method call to ensure that it completed successfully.
=item L<AI::TensorFlow::Libtensorflow::Tensor>, L<AI::TensorFlow::Libtensorflow::DataType>
A C<TFTensor> is a multi-dimensional data structure that stores the data for inputs and outputs.
Each element has the same data type
which is defined by L<AI::TensorFlow::Libtensorflow::DataType>
thus a C<TFTensor> is considered to be "homogeneous data structure".
See L<Introduction to Tensors|https://www.tensorflow.org/guide/tensor> for more.
lib/AI/TensorFlow/Libtensorflow/Manual/Quickstart.pod view on Meta::CPAN
More information about NVIDIA Docker containers can be found in the
NVIDIA Container Toolkit
L<Installation Guide|https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html>
(specifically L<Setting up NVIDIA Container Toolkit|https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#setting-up-nvidia-container-toolkit>)
and
L<User Guide|https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/user-guide.html>.
=head3 Diagnostics
When using the Docker GPU image, you may come across the error
C<<
nvidia-container-cli: initialization error: load library failed: libnvidia-ml.so.1: cannot open shared object file: no such file or directory: unknown.
>>
Make sure that you have installed the NVIDIA Container Toolkit correctly
via the Installation Guide. Also make sure that you only have one Docker daemon
installed. The recommended approach is to install via the official Docker
releases at L<https://docs.docker.com/engine/install/>. Note that in some
cases, you may have other unofficial Docker installations such as the
C<docker.io> package or the Snap C<docker> package, which may conflict with
the official vendor-provided NVIDIA Container Runtime.
lib/AI/TensorFlow/Libtensorflow/Session.pm view on Meta::CPAN
=back
B<Returns>
=over 4
=item Maybe[TFSession]
A new execution session with the associated graph, or C<undef> on
error.
=back
B<C API>: L<< C<TF_NewSession>|AI::TensorFlow::Libtensorflow::Manual::CAPI/TF_NewSession >>
=head2 LoadFromSavedModel
B<C API>: L<< C<TF_LoadSessionFromSavedModel>|AI::TensorFlow::Libtensorflow::Manual::CAPI/TF_LoadSessionFromSavedModel >>
=head1 METHODS
lib/AI/TensorFlow/Libtensorflow/Status.pm view on Meta::CPAN
package AI::TensorFlow::Libtensorflow::Status;
# ABSTRACT: Status used for error checking
$AI::TensorFlow::Libtensorflow::Status::VERSION = '0.0.7';
use strict;
use warnings;
use namespace::autoclean;
use AI::TensorFlow::Libtensorflow::Lib;
use FFI::C;
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
lib/AI/TensorFlow/Libtensorflow/Status.pm view on Meta::CPAN
1;
__END__
=pod
=encoding UTF-8
=head1 NAME
AI::TensorFlow::Libtensorflow::Status - Status used for error checking
=for TF_CAPI_EXPORT TF_CAPI_EXPORT extern TF_Status* TF_NewStatus(void);
=for TF_CAPI_EXPORT TF_CAPI_EXPORT extern void TF_DeleteStatus(TF_Status*);
=for TF_CAPI_EXPORT TF_CAPI_EXPORT extern void TF_SetStatus(TF_Status* s, TF_Code code,
const char* msg);
=begin TF_CAPI_EXPORT
TF_CAPI_EXPORT void TF_SetPayload(TF_Status* s, const char* key,
const char* value);
=end TF_CAPI_EXPORT
=begin TF_CAPI_EXPORT
TF_CAPI_EXPORT extern void TF_SetStatusFromIOError(TF_Status* s, int error_code,
const char* context);
=end TF_CAPI_EXPORT
=for TF_CAPI_EXPORT TF_CAPI_EXPORT extern TF_Code TF_GetCode(const TF_Status* s);
=for TF_CAPI_EXPORT TF_CAPI_EXPORT extern const char* TF_Message(const TF_Status* s);
=end TF_CAPI_EXPORT
=end TF_CAPI_EXPORT
maint/process-capi.pl view on Meta::CPAN
#!/usr/bin/env perl
# PODNAME: gen-capi-docs
# ABSTRACT: Generates POD for C API docs
use strict;
use warnings;
use FindBin;
use lib "$FindBin::Bin/../lib";
use Sub::Uplevel; # place early to override caller()
package TF::CAPI::Extract {
use Mu;
use CLI::Osprey;
use AI::TensorFlow::Libtensorflow::Lib;
use feature qw(say postderef);
use Syntax::Construct qw(heredoc-indent);
use Function::Parameters;
t/upstream/CAPI/015_Graph.t view on Meta::CPAN
TF_Utils::AssertStatusOK($s);
is $feed->NumInputs, 0, 'num inputs';
is $feed->OutputNumConsumers(
$TFOutput->coerce({oper => $feed, index => 0})
), 0, 'output 0 num consumers';
is $feed->NumControlInputs, 0, 'num control inputs';
is $feed->NumControlOutputs, 0, 'num control outputs';
};
subtest 'Test not found errors in TF_Operation*() query functions.' => sub {
is $feed->OutputListLength('bogus', $s), -1, 'bogus output';
note TF_Utils::AssertStatusNotOK($s);
};
note 'Make a constant oper with the scalar "3".';
my $three = TF_Utils::ScalarConst($graph, $s, 'scalar', INT32, 3);
TF_Utils::AssertStatusOK($s);
note 'Add oper.';
my $add = TF_Utils::Add($feed, $three, $graph, $s);
t/upstream/CAPI/027_ShapeInferenceError.t view on Meta::CPAN
#!/usr/bin/env perl
use Test2::V0;
use lib 't/lib';
use TF_TestQuiet;
use TF_Utils;
use aliased 'AI::TensorFlow::Libtensorflow';
subtest "(CAPI, ShapeInferenceError)" => sub {
note q|TF_FinishOperation should fail if the shape of the added operation cannot
be inferred.|;
my $status = AI::TensorFlow::Libtensorflow::Status->New;
my $graph = AI::TensorFlow::Libtensorflow::Graph->New;
note q|Create this failure by trying to add two nodes with incompatible shapes
(A tensor with shape [2] and a tensor with shape [3] cannot be added).|;
my @data = 1..3;
my $vec2_tensor = TF_Utils::Int8Tensor([ @data[0..1] ]);
my $vec2 = TF_Utils::Const($graph, $status, "vec2", $vec2_tensor );
TF_Utils::AssertStatusOK($status);
transformer = TF_CAPI
[-Transformer / TF_Sig]
; in inc/
transformer = TF_Sig
[-Transformer / List]
transformer = List
[Name]
[AllowOverride / NameOverride]
header_re = ^NAME$
; [Version]
[Region / prelude]
[Region / badges]
[Generic / SYNOPSIS]
[Generic / DESCRIPTION]
[Generic / OVERVIEW]
xt/author/pod-snippets.t view on Meta::CPAN
Test::Pod::Snippets::Role::PodLocatable {
use Moose::Role;
use Pod::Simple::Search;
around _parse => sub {
my $orig = shift;
my ($self, $type, $input) = @_;
my $output = eval { $orig->(@_); };
my $error = $@;
if( $error =~ /not found in \@INC/ && $type eq 'module' ) {
my $pod_file = Pod::Simple::Search->new->find($input);
if( -f $pod_file ) {
return $orig->($self, 'file', $pod_file )
} else {
die "$error\nUnable to find POD file for $input\n";
}
}
return $output;
};
}
package # hide from PAUSE
My::Test::Pod::Snippets::Parser {
use Moose;