AI-TensorFlow-Libtensorflow
view release on metacpan or search on metacpan
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=back
/* From <tensorflow/c/env.h> */
TF_CAPI_EXPORT extern void TF_DefaultThreadOptions(TF_ThreadOptions* options);
=head2 TF_StartThread
=over 2
Returns a new thread that is running work_func and is identified
(for debugging/performance-analysis) by thread_name.
The given param (which may be null) is passed to work_func when the thread
starts. In this way, data may be passed from the thread back to the caller.
Caller takes ownership of the result and must call TF_JoinThread on it
eventually.
=back
/* From <tensorflow/c/env.h> */
TF_CAPI_EXPORT extern TF_Thread* TF_StartThread(const TF_ThreadOptions* options,
const char* thread_name,
void (*work_func)(void*),
void* param);
=head2 TF_JoinThread
=over 2
Waits for the given thread to finish execution, then deletes it.
=back
/* From <tensorflow/c/env.h> */
TF_CAPI_EXPORT extern void TF_JoinThread(TF_Thread* thread);
=head2 TF_LoadSharedLibrary
=over 2
\brief Load a dynamic library.
Pass "library_filename" to a platform-specific mechanism for dynamically
loading a library. The rules for determining the exact location of the
library are platform-specific and are not documented here.
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TF_Tensor* dest),
void (*updateFunc)(TF_OpKernelContext* ctx, TF_Tensor* tensor,
TF_Tensor* value, int Op),
TF_Status* status);
=head2 TF_MaybeLockVariableInputMutexesInOrder
=over 2
This is a helper function which acquires mutexes in-order to provide
thread-safe way of performing weights update during the optimizer op. It
returns an opaque LockHolder handle back to plugin. This handle is passed to
the Release API for releasing the locks when the weight update is done. The
caller takes ownership of the `source` and `dest` tensors and is responsible
for freeing them with TF_DeleteTensor.
=back
/* From <tensorflow/c/kernels_experimental.h> */
TF_CAPI_EXPORT extern void TF_MaybeLockVariableInputMutexesInOrder(
TF_OpKernelContext* ctx, bool do_lock, bool sparse, const int* const inputs,
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern void TFE_ContextOptionsSetConfig(
TFE_ContextOptions* options, const void* proto, size_t proto_len,
TF_Status* status);
=head2 TFE_ContextOptionsSetAsync
=over 2
Sets the default execution mode (sync/async). Note that this can be
overridden per thread using TFE_ContextSetExecutorForThread.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern void TFE_ContextOptionsSetAsync(TFE_ContextOptions*,
unsigned char enable);
=head2 TFE_ContextOptionsSetDevicePlacementPolicy
=over 2
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern void TFE_ContextClearCaches(TFE_Context* ctx);
=head2 TFE_ContextSetThreadLocalDevicePlacementPolicy
=over 2
Sets a thread-local device placement policy. After this call, other calls to
TFE_Execute in the same thread will use the device policy specified here
instead of the device policy used to construct the context. This has no
effect on the device policy used by other program threads.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern void TFE_ContextSetThreadLocalDevicePlacementPolicy(
TFE_Context* ctx, TFE_ContextDevicePlacementPolicy policy);
=head2 TFE_ContextGetDevicePlacementPolicy
=over 2
Returns the device placement policy to be used by this context in the current
thread.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern TFE_ContextDevicePlacementPolicy
TFE_ContextGetDevicePlacementPolicy(TFE_Context* ctx);
=head2 TFE_ContextSetServerDef
=over 2
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern bool TFE_ExecutorIsAsync(TFE_Executor*);
=head2 TFE_ExecutorWaitForAllPendingNodes
=over 2
Causes the calling thread to block till all ops dispatched in this executor
have been executed. Note that "execution" here refers to kernel execution /
scheduling of copies, etc. Similar to sync execution, it doesn't guarantee
that lower level device queues (like GPU streams) have been flushed.
This call may not block for execution of ops enqueued concurrently with this
call.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_ExecutorClearError(TFE_Executor*);
=head2 TFE_ContextSetExecutorForThread
=over 2
Sets a custom Executor for the current thread. All nodes created by this
thread will be added to this Executor. It will override the current executor.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_ContextSetExecutorForThread(TFE_Context*,
TFE_Executor*);
=head2 TFE_ContextGetExecutorForThread
=over 2
Returns the Executor for the current thread.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern TFE_Executor* TFE_ContextGetExecutorForThread(
TFE_Context*);
=head2 TFE_ContextUpdateServerDef
=over 2
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern bool TFE_ContextCheckAlive(TFE_Context* ctx,
const char* worker_name,
TF_Status* status);
=head2 TFE_ContextAsyncWait
=over 2
Sync pending nodes in local executors (including the context default executor
and thread executors) and streaming requests to remote executors, and get the
combined status.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_ContextAsyncWait(TFE_Context* ctx,
TF_Status* status);
=head2 TFE_TensorHandleDevicePointer
( run in 0.343 second using v1.01-cache-2.11-cpan-87723dcf8b7 )