AI-TensorFlow-Libtensorflow
view release on metacpan - search on metacpan
view release on metacpan or search on metacpan
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=over 2
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern bool TFE_CancellationManagerIsCancelled(
TFE_CancellationManager*);
=head2 TFE_CancellationManagerStartCancel
=over 2
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_CancellationManagerStartCancel(
TFE_CancellationManager*);
=head2 TFE_DeleteCancellationManager
=over 2
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_DeleteCancellationManager(
TFE_CancellationManager*);
=head2 TFE_OpSetCancellationManager
=over 2
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_OpSetCancellationManager(
TFE_Op* op, TFE_CancellationManager* cancellation_manager,
TF_Status* status);
=head2 TFE_NewExecutor
=over 2
Creates a new eager Executor. Nodes in one executor are guaranteed to be
executed in sequence. Assigning nodes to different executors allows executing
nodes in parallel.
in_flight_nodes_limit: when is_async is true, this value controls the
maximum number of in flight async nodes. Enqueuing of additional async ops
after the limit is reached blocks until some inflight nodes finishes.
The effect is bounding the memory held by inflight TensorHandles that are
referenced by the inflight nodes.
A recommended value has not been established.
A value of 0 removes the limit, which is the behavior of TensorFlow 2.11.
When is_async is false, the value is ignored.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern TFE_Executor* TFE_NewExecutor(
bool is_async, bool enable_streaming_enqueue, int in_flight_nodes_limit);
=head2 TFE_DeleteExecutor
=over 2
Deletes the eager Executor without waiting for enqueued nodes. Please call
TFE_ExecutorWaitForAllPendingNodes before calling this API if you want to
make sure all nodes are finished.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_DeleteExecutor(TFE_Executor*);
=head2 TFE_ExecutorIsAsync
=over 2
Returns true if the executor is in async mode.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern bool TFE_ExecutorIsAsync(TFE_Executor*);
=head2 TFE_ExecutorWaitForAllPendingNodes
=over 2
Causes the calling thread to block till all ops dispatched in this executor
have been executed. Note that "execution" here refers to kernel execution /
scheduling of copies, etc. Similar to sync execution, it doesn't guarantee
that lower level device queues (like GPU streams) have been flushed.
This call may not block for execution of ops enqueued concurrently with this
call.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_ExecutorWaitForAllPendingNodes(
TFE_Executor*, TF_Status* status);
=head2 TFE_ExecutorClearError
=over 2
When an error happens, any pending operations are discarded, and newly issued
ops return an error. This call clears the error state and re-enables
execution of newly issued ops.
Note that outputs of discarded ops remain in a corrupt state and should not
be used for future calls.
TODO(agarwal): mark the affected handles and raise errors if they are used.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_ExecutorClearError(TFE_Executor*);
view all matches for this distributionview release on metacpan - search on metacpan
( run in 0.422 second using v1.00-cache-2.02-grep-82fe00e-cpan-dad7e4baca0 )