AI-TensorFlow-Libtensorflow
view release on metacpan or search on metacpan
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=back
/* From <tensorflow/c/experimental/saved_model/public/signature_def_param_list.h> */
TF_CAPI_EXPORT extern const TF_SignatureDefParam* TF_SignatureDefParamListGet(
const TF_SignatureDefParamList* list, int i);
=head2 TF_SignatureDefFunctionMetadataArgs
=over 2
Retrieves the arguments of the SignatureDefFunction. The caller is not
responsible for freeing the returned pointer.
=back
/* From <tensorflow/c/experimental/saved_model/public/signature_def_function_metadata.h> */
TF_CAPI_EXPORT extern const TF_SignatureDefParamList*
TF_SignatureDefFunctionMetadataArgs(
const TF_SignatureDefFunctionMetadata* list);
=head2 TF_SignatureDefFunctionMetadataReturns
=over 2
Retrieves the returns of the SignatureDefFunction. The caller is not
responsible for freeing the returned pointer.
=back
/* From <tensorflow/c/experimental/saved_model/public/signature_def_function_metadata.h> */
TF_CAPI_EXPORT extern const TF_SignatureDefParamList*
TF_SignatureDefFunctionMetadataReturns(
const TF_SignatureDefFunctionMetadata* list);
=head2 TF_EnableXLACompilation
=over 2
When `enable` is true, set
tensorflow.ConfigProto.OptimizerOptions.global_jit_level to ON_1, and also
set XLA flag values to prepare for XLA compilation. Otherwise set
global_jit_level to OFF.
This and the next API are syntax sugar over TF_SetConfig(), and is used by
clients that cannot read/write the tensorflow.ConfigProto proto.
TODO: Migrate to TF_CreateConfig() below.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TF_EnableXLACompilation(TF_SessionOptions* options,
unsigned char enable);
=head2 TF_SetXlaEnableLazyCompilation
=over 2
Set XLA's internal BuildXlaOpsPassFlags.tf_xla_enable_lazy_compilation to the
value of 'enabled'. Also returns the original value of that flag.
Use in tests to allow XLA to fallback to TF classic. This has global effect.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT unsigned char TF_SetXlaEnableLazyCompilation(
unsigned char enable);
=head2 TF_SetTfXlaCpuGlobalJit
=over 2
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT unsigned char TF_SetTfXlaCpuGlobalJit(unsigned char enable);
=head2 TF_SetXlaAutoJitMode
=over 2
Sets XLA's auto jit mode according to the specified string, which is parsed
as if passed in XLA_FLAGS. This has global effect.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT void TF_SetXlaAutoJitMode(const char* mode);
=head2 TF_GetXlaAutoJitEnabled
=over 2
Returns whether the single GPU or general XLA auto jit optimizations are
enabled through MarkForCompilationPassFlags.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT unsigned char TF_GetXlaAutoJitEnabled();
=head2 TF_SetXlaMinClusterSize
=over 2
Sets XLA's minimum cluster size. This has global effect.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT void TF_SetXlaMinClusterSize(int size);
=head2 TF_GetXlaConstantFoldingDisabled
=over 2
Gets/Sets TF/XLA flag for whether(true) or not(false) to disable constant
folding. This is for testing to ensure that XLA is being tested rather than
Tensorflow's CPU implementation through constant folding.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT unsigned char TF_GetXlaConstantFoldingDisabled();
=head2 TF_SetXlaConstantFoldingDisabled
=over 2
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT void TF_SetXlaConstantFoldingDisabled(
unsigned char should_enable);
=head2 TF_CreateConfig
=over 2
Create a serialized tensorflow.ConfigProto proto, where:
a) ConfigProto.optimizer_options.global_jit_level is set to ON_1 if
`enable_xla_compilation` is non-zero, and OFF otherwise.
b) ConfigProto.gpu_options.allow_growth is set to `gpu_memory_allow_growth`.
c) ConfigProto.device_count is set to `num_cpu_devices`.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern TF_Buffer* TF_CreateConfig(
unsigned char enable_xla_compilation, unsigned char gpu_memory_allow_growth,
unsigned int num_cpu_devices);
=head2 TF_CreateRunOptions
=over 2
Create a serialized tensorflow.RunOptions proto, where RunOptions.trace_level
is set to FULL_TRACE if `enable_full_trace` is non-zero, and NO_TRACE
otherwise.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern TF_Buffer* TF_CreateRunOptions(
unsigned char enable_full_trace);
=head2 TF_GraphDebugString
=over 2
Returns the graph content in a human-readable format, with length set in
`len`. The format is subject to change in the future.
The returned string is heap-allocated, and caller should call free() on it.
=back
/* From <tensorflow/c/c_api_experimental.h> */
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
int num_values);
=head2 TF_AttrBuilderCheckCanRunOnDevice
=over 2
Checks the tensorflow::NodeDef built via the methods above to see if it can
run on device_type.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TF_AttrBuilderCheckCanRunOnDevice(
TF_AttrBuilder* builder, const char* device_type, TF_Status* status);
=head2 TF_GetNumberAttrForOpListInput
=over 2
For argument number input_index, fetch the corresponding number_attr that
needs to be updated with the argument length of the input list.
Returns nullptr if there is any problem like op_name is not found, or the
argument does not support this attribute type.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern const char* TF_GetNumberAttrForOpListInput(
const char* op_name, int input_index, TF_Status* status);
=head2 TF_OpIsStateful
=over 2
Returns 1 if the op is stateful, 0 otherwise. The return value is undefined
if the status is not ok.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern int TF_OpIsStateful(const char* op_type,
TF_Status* status);
=head2 TF_InitMain
=over 2
Platform specific initialization routine. Very few platforms actually require
this to be called.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT void TF_InitMain(const char* usage, int* argc, char*** argv);
=head2 TF_PickUnusedPortOrDie
=over 2
Platform-specific implementation to return an unused port. (This should used
in tests only.)
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT int TF_PickUnusedPortOrDie(void);
=head2 TFE_NewTensorHandleFromScalar
=over 2
Fast path method that makes constructing a single scalar tensor require less
overhead and copies.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_NewTensorHandleFromScalar(
TF_DataType data_type, void* data, size_t len, TF_Status* status);
=head2 TFE_EnableCollectiveOps
=over 2
Specify the server_def that enables collective ops.
This is different to the above function in that it doesn't create remote
contexts, and remotely executing ops is not possible. It just enables
communication for collective ops.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_EnableCollectiveOps(TFE_Context* ctx,
const void* proto,
size_t proto_len,
TF_Status* status);
=head2 TFE_AbortCollectiveOps
=over 2
Aborts all ongoing collectives with the specified status. After abortion,
subsequent collectives will error with this status immediately. To reset the
collectives, create a new EagerContext.
This is intended to be used when a peer failure is detected.
=back
/* From <tensorflow/c/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_AbortCollectiveOps(TFE_Context* ctx,
TF_Status* status);
=head2 TFE_CollectiveOpsCheckPeerHealth
=over 2
Checks the health of collective ops peers. Explicit health check is needed in
multi worker collective ops to detect failures in the cluster. If a peer is
down, collective ops may hang.
( run in 1.724 second using v1.01-cache-2.11-cpan-62a16548d74 )