AI-TensorFlow-Libtensorflow

 view release on metacpan or  search on metacpan

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

                                                          size_t proto_len,
                                                          TF_Status* status);

=head2 TF_FunctionGetAttrValueProto

=over 2

  Sets `output_attr_value` to the binary-serialized AttrValue proto
  representation of the value of the `attr_name` attr of `func`.
  If `attr_name` attribute is not present, status is set to an error.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_FunctionGetAttrValueProto(
      TF_Function* func, const char* attr_name, TF_Buffer* output_attr_value,
      TF_Status* status);

=head2 TF_DeleteFunction

=over 2

  Frees the memory used by the `func` struct.
  TF_DeleteFunction is a noop if `func` is null.
  Deleting a function does not remove it from any graphs it was copied to.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_DeleteFunction(TF_Function* func);

=head2 TF_TryEvaluateConstant

=over 2

  Attempts to evaluate `output`. This will only be possible if `output` doesn't
  depend on any graph inputs (this function is safe to call if this isn't the
  case though).
  
  If the evaluation is successful, this function returns true and `output`s
  value is returned in `result`. Otherwise returns false. An error status is
  returned if something is wrong with the graph or input. Note that this may
  return false even if no error status is set.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern unsigned char TF_TryEvaluateConstant(TF_Graph* graph,
                                                             TF_Output output,
                                                             TF_Tensor** result,
                                                             TF_Status* status);

=head2 TF_NewSession

=over 2

  Return a new execution session with the associated graph, or NULL on
  error. Does not take ownership of any input parameters.
  
  *`graph` must be a valid graph (not deleted or nullptr). `graph` will be
  kept alive for the lifetime of the returned TF_Session. New nodes can still
  be added to `graph` after this call.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern TF_Session* TF_NewSession(TF_Graph* graph,
                                                  const TF_SessionOptions* opts,
                                                  TF_Status* status);

=head2 TF_LoadSessionFromSavedModel

=over 2

  This function creates a new TF_Session (which is created on success) using
  `session_options`, and then initializes state (restoring tensors and other
  assets) using `run_options`.
  
  Any NULL and non-NULL value combinations for (`run_options, `meta_graph_def`)
  are valid.
  
  - `export_dir` must be set to the path of the exported SavedModel.
  - `tags` must include the set of tags used to identify one MetaGraphDef in
     the SavedModel.
  - `graph` must be a graph newly allocated with TF_NewGraph().
  
  If successful, populates `graph` with the contents of the Graph and
  `meta_graph_def` with the MetaGraphDef of the loaded model.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern TF_Session* TF_LoadSessionFromSavedModel(
      const TF_SessionOptions* session_options, const TF_Buffer* run_options,
      const char* export_dir, const char* const* tags, int tags_len,
      TF_Graph* graph, TF_Buffer* meta_graph_def, TF_Status* status);

=head2 TF_CloseSession

=over 2

  Close a session.
  
  Contacts any other processes associated with the session, if applicable.
  May not be called after TF_DeleteSession().

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_CloseSession(TF_Session*, TF_Status* status);

=head2 TF_DeleteSession

=over 2

  Destroy a session object.
  
  Even if error information is recorded in *status, this call discards all
  local resources associated with the session.  The session may not be used
  during or after this call (and the session drops its reference to the
  corresponding graph).

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern TF_DeviceList* TFE_ContextListDevices(TFE_Context* ctx,
                                                              TF_Status* status);

=head2 TFE_ContextClearCaches

=over 2

  Clears the internal caches in the TFE context. Useful when reseeding random
  ops.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_ContextClearCaches(TFE_Context* ctx);

=head2 TFE_ContextSetThreadLocalDevicePlacementPolicy

=over 2

  Sets a thread-local device placement policy. After this call, other calls to
  TFE_Execute in the same thread will use the device policy specified here
  instead of the device policy used to construct the context. This has no
  effect on the device policy used by other program threads.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_ContextSetThreadLocalDevicePlacementPolicy(
      TFE_Context* ctx, TFE_ContextDevicePlacementPolicy policy);

=head2 TFE_ContextGetDevicePlacementPolicy

=over 2

  Returns the device placement policy to be used by this context in the current
  thread.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern TFE_ContextDevicePlacementPolicy
  TFE_ContextGetDevicePlacementPolicy(TFE_Context* ctx);

=head2 TFE_ContextSetServerDef

=over 2

  A tensorflow.ServerDef specifies remote workers (in addition to the current
  workers name). Operations created in this context can then be executed on
  any of these remote workers by setting an appropriate device.
  
  If the following is set, all servers identified by the
  ServerDef must be up when the context is created.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_ContextSetServerDef(TFE_Context* ctx,
                                                     int keep_alive_secs,
                                                     const void* proto,
                                                     size_t proto_len,
                                                     TF_Status* status);

=head2 TFE_NewTensorHandle

=over 2

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_NewTensorHandle(const TF_Tensor* t,
                                                              TF_Status* status);

=head2 TFE_DeleteTensorHandle

=over 2

  Indicates that the caller will not be using `h` any more.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_DeleteTensorHandle(TFE_TensorHandle* h);

=head2 TFE_TensorHandleDataType

=over 2

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern TF_DataType TFE_TensorHandleDataType(TFE_TensorHandle* h);

=head2 TFE_TensorHandleNumDims

=over 2

  This function will block till the operation that produces `h` has completed.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern int TFE_TensorHandleNumDims(TFE_TensorHandle* h,
                                                    TF_Status* status);

=head2 TFE_TensorHandleNumElements

=over 2

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern int64_t TFE_TensorHandleNumElements(TFE_TensorHandle* h,
                                                            TF_Status* status);

=head2 TFE_TensorHandleDim

=over 2

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  TF_CAPI_EXPORT extern void TFE_ExecutorWaitForAllPendingNodes(
      TFE_Executor*, TF_Status* status);

=head2 TFE_ExecutorClearError

=over 2

  When an error happens, any pending operations are discarded, and newly issued
  ops return an error. This call clears the error state and re-enables
  execution of newly issued ops.
  
  Note that outputs of discarded ops remain in a corrupt state and should not
  be used for future calls.
  TODO(agarwal): mark the affected handles and raise errors if they are used.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_ExecutorClearError(TFE_Executor*);

=head2 TFE_ContextSetExecutorForThread

=over 2

  Sets a custom Executor for the current thread. All nodes created by this
  thread will be added to this Executor. It will override the current executor.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_ContextSetExecutorForThread(TFE_Context*,
                                                             TFE_Executor*);

=head2 TFE_ContextGetExecutorForThread

=over 2

  Returns the Executor for the current thread.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern TFE_Executor* TFE_ContextGetExecutorForThread(
      TFE_Context*);

=head2 TFE_ContextUpdateServerDef

=over 2

  Update an existing context with a new set of servers defined in a ServerDef
  proto. Servers can be added to and removed from the list of remote workers
  in the context. A New set of servers identified by the ServerDef must be up
  when the context is updated.
  
  This API is for experimental usage and may be subject to change.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_ContextUpdateServerDef(TFE_Context* ctx,
                                                        int keep_alive_secs,
                                                        const void* proto,
                                                        size_t proto_len,
                                                        TF_Status* status);

=head2 TFE_ContextCheckAlive

=over 2

  Checks whether a remote worker is alive or not. This will return true even if
  the context doesn't exist on the remote worker.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern bool TFE_ContextCheckAlive(TFE_Context* ctx,
                                                   const char* worker_name,
                                                   TF_Status* status);

=head2 TFE_ContextAsyncWait

=over 2

  Sync pending nodes in local executors (including the context default executor
  and thread executors) and streaming requests to remote executors, and get the
  combined status.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_ContextAsyncWait(TFE_Context* ctx,
                                                  TF_Status* status);

=head2 TFE_TensorHandleDevicePointer

=over 2

  This function will block till the operation that produces `h` has
  completed. This is only valid on local TFE_TensorHandles. The pointer
  returned will be on the device in which the TFE_TensorHandle resides (so e.g.
  for a GPU tensor this will return a pointer to GPU memory). The pointer is
  only guaranteed to be valid until TFE_DeleteTensorHandle is called on this
  TensorHandle. Only supports POD data types.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void* TFE_TensorHandleDevicePointer(TFE_TensorHandle*,
                                                            TF_Status*);

=head2 TFE_TensorHandleDeviceMemorySize

=over 2

  This function will block till the operation that produces `h` has
  completed. This is only valid on local TFE_TensorHandles. Returns the size in
  bytes of the memory pointed to by the device pointer returned above.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern size_t TFE_TensorHandleDeviceMemorySize(TFE_TensorHandle*,
                                                                TF_Status*);

=head2 TFE_NewTensorHandleFromDeviceMemory

=over 2

  Creates a new TensorHandle from memory residing in the physical device
  device_name. Takes ownership of the memory, and will call deleter to release
  it after TF no longer needs it or in case of error.
  
  Custom devices must use TFE_NewCustomDeviceTensorHandle instead.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_NewTensorHandleFromDeviceMemory(
      TFE_Context* ctx, const char* device_name, TF_DataType, const int64_t* dims,
      int num_dims, void* data, size_t len,
      void (*deallocator)(void* data, size_t len, void* arg),
      void* deallocator_arg, TF_Status* status);

=head2 TFE_HostAddressSpace

=over 2

  Retrieves the address space (i.e. job, replia, task) of the local host and
  saves it in the buffer.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_HostAddressSpace(TFE_Context* ctx,
                                                  TF_Buffer* buf);

=head2 TFE_OpGetAttrs

=over 2

  Fetch a reference to `op`'s attributes. The returned reference is only valid
  while `op` is alive.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern const TFE_OpAttrs* TFE_OpGetAttrs(const TFE_Op* op);

=head2 TFE_OpAddAttrs

=over 2

  Add attributes in `attrs` to `op`.
  
  Does not overwrite or update existing attributes, but adds new ones.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_OpAddAttrs(TFE_Op* op, const TFE_OpAttrs* attrs);

=head2 TFE_OpAttrsSerialize

=over 2

  Serialize `attrs` as a tensorflow::NameAttrList protocol buffer (into `buf`),
  containing the op name and a map of its attributes.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_OpAttrsSerialize(const TFE_OpAttrs* attrs,
                                                  TF_Buffer* buf,
                                                  TF_Status* status);

=head2 TFE_OpSetAttrValueProto

=over 2

  Set an op's attribute from a serialized AttrValue protocol buffer.
  
  Analogous to TF_SetAttrValueProto for building graph operations.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_OpSetAttrValueProto(const TFE_Op* op,
                                                     const char* attr_name,
                                                     const void* proto,
                                                     size_t proto_len,
                                                     TF_Status* status);

=head2 TFE_RegisterCustomDevice

=over 2

  Registers a custom device for use with eager execution.
  
  Eager operations may be placed on this device, e.g.  `with
  tf.device("CUSTOM"):` from Python if `device_name` for this call is
  "/job:localhost/replica:0/task:0/device:CUSTOM:0".
  



( run in 0.297 second using v1.01-cache-2.11-cpan-754626df90b )