AI-TensorFlow-Libtensorflow

 view release on metacpan or  search on metacpan

CONTRIBUTING  view on Meta::CPAN

"You" or "Your" shall mean the copyright owner, or legal entity authorized by the copyright owner, that is making this Agreement.  For legal entities, the entity making a Contribution and all other entities that control, are controlled by, or are und...

"APTech" is defined as the Delaware corporation named Auto-Parallel Technologies, Inc. with a primary place of business in Cedar Park, Texas, USA.

The "APTech Family of software and documentation" (hereinafter the "APTech Family") is defined as all copyrightable works identified as "part of the APTech Family" immediately following their copyright notice, and includes but is not limited to this ...

"Team APTech" is defined as all duly-authorized contributors to the APTech Family, including You after making Your first Contribution to the APTech Family under the terms of this Agreement.

"Team APTech Leadership" is defined as all duly-authorized administrators and official representatives of the APTech Family, as listed publicly on the most up-to-date copy of the AutoParallel.com website.

"Contribution" shall mean any original work of authorship, including any changes or additions or enhancements to an existing work, that is intentionally submitted by You to this repository for inclusion in, or documentation of, any of the products or...

2. Assignment of Copyright.  Subject to the terms and conditions of this Agreement, and for good and valuable consideration, receipt of which You acknowledge, You hereby transfer to the Delaware corporation named Auto-Parallel Technologies, Inc. with...

You hereby agree that if You have or acquire hereafter any patent or interface copyright or other intellectual property interest dominating the software or documentation contributed to by the Work (or use of that software or documentation), such domi...

You hereby represent and warrant that You are the sole copyright holder for the Work and that You have the right and power to enter into this legally-binding contractual agreement.  You hereby indemnify and hold harmless APTech, its heirs, assignees,...

3. Grant of Patent License.  Subject to the terms and conditions of this Agreement, You hereby grant to APTech and to recipients of software distributed by APTech a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as ...

4. You represent that you are legally entitled to assign the above copyright and grant the above patent license.  If your employer(s) or contractee(s) have rights to intellectual property that you create that includes your Contributions, then you rep...

Changes  view on Meta::CPAN


  Refactoring

   - Add timer to the notebooks to time the inference steps. See <https://github.com/EntropyOrg/perl-AI-TensorFlow-Libtensorflow/pull/17>.

  Documentation

   - Add information about installing GPU version of `libtensorflow` either on
     the "bare metal" or with Docker GPU runtime support. See <https://github.com/EntropyOrg/perl-AI-TensorFlow-Libtensorflow/pull/18>.

  Build changes

   - Add Dockerfile that builds GPU version of the omnibus notebook image.
     Update the CI to additionally build the GPU Docker image. See <https://github.com/EntropyOrg/perl-AI-TensorFlow-Libtensorflow/pull/16>.

0.0.6 2023-01-30 15:22:04-0500

  - Documentation

      - Fix NAME for Notebook POD.

LICENSE  view on Meta::CPAN


   4. Redistribution. You may reproduce and distribute copies of the
      Work or Derivative Works thereof in any medium, with or without
      modifications, and in Source or Object form, provided that You
      meet the following conditions:

      (a) You must give any other recipients of the Work or
          Derivative Works a copy of this License; and

      (b) You must cause any modified files to carry prominent notices
          stating that You changed the files; and

      (c) You must retain, in the Source form of any Derivative Works
          that You distribute, all copyright, patent, trademark, and
          attribution notices from the Source form of the Work,
          excluding those notices that do not pertain to any part of
          the Derivative Works; and

      (d) If the Work includes a "NOTICE" text file as part of its
          distribution, then any Derivative Works that You distribute must
          include a readable copy of the attribution notices contained

lib/AI/TensorFlow/Libtensorflow/Input.pm  view on Meta::CPAN

package AI::TensorFlow::Libtensorflow::Input;
# ABSTRACT: Input of operation as (operation, index) pair
$AI::TensorFlow::Libtensorflow::Input::VERSION = '0.0.7';
# See L<AI::TensorFlow::Libtensorflow::Output> for similar.
# In fact, they are mostly the same, but keeping the classes separate for now
# in case the upstream API changes.

use strict;
use warnings;
use namespace::autoclean;
use FFI::Platypus::Record;
use AI::TensorFlow::Libtensorflow::Lib::FFIType::Variant::RecordArrayRef;

use AI::TensorFlow::Libtensorflow::Lib;
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

                                                TF_Operation* input);

=head2 TF_ColocateWith

=over 2

  Request that `desc` be co-located on the device where `op`
  is placed.
  
  Use of this is discouraged since the implementation of device placement is
  subject to change. Primarily intended for internal libraries

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_ColocateWith(TF_OperationDescription* desc,
                                             TF_Operation* op);

=head2 TF_SetAttrString

=over 2

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern void TF_OperationAllInputs(TF_Operation* oper,
                                                   TF_Output* inputs,
                                                   int max_inputs);

=head2 TF_OperationOutputNumConsumers

=over 2

  Get the number of current consumers of a specific output of an
  operation.  Note that this number can change when new operations
  are added to the graph.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern int TF_OperationOutputNumConsumers(TF_Output oper_out);

=head2 TF_OperationOutputConsumers

=over 2

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern int TF_OperationGetControlInputs(
      TF_Operation* oper, TF_Operation** control_inputs, int max_control_inputs);

=head2 TF_OperationNumControlOutputs

=over 2

  Get the number of operations that have `*oper` as a control input.
  Note that this number can change when new operations are added to
  the graph.

=back

  /* From <tensorflow/c/c_api.h> */
  TF_CAPI_EXPORT extern int TF_OperationNumControlOutputs(TF_Operation* oper);

=head2 TF_OperationGetControlOutputs

=over 2

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

      TF_Graph* graph, const TF_Buffer* graph_def,
      const TF_ImportGraphDefOptions* options, TF_Status* status);

=head2 TF_GraphCopyFunction

=over 2

  Adds a copy of function `func` and optionally its gradient function `grad`
  to `g`. Once `func`/`grad` is added to `g`, it can be called by creating
  an operation using the function's name.
  Any changes to `func`/`grad` (including deleting it) done after this method
  returns, won't affect the copy of `func`/`grad` in `g`.
  If `func` or `grad` are already in `g`, TF_GraphCopyFunction has no
  effect on them, but can establish the function->gradient relationship
  between them if `func` does not already have a gradient. If `func` already
  has a gradient different from `grad`, an error is returned.
  
  `func` must not be null.
  If `grad` is null and `func` is not in `g`, `func` is added without a
  gradient.
  If `grad` is null and `func` is in `g`, TF_GraphCopyFunction is a noop.

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=head2 TF_GetStream

=over 2

  TF_GetStream returns the SP_Stream available in ctx.
  This function returns a stream only for devices registered using the
  StreamExecutor C API
  (tensorflow/c/experimental/stream_executor/stream_executor.h). It will return
  nullptr and set error status in all other cases.
  Experimental: this function doesn't have compatibility guarantees and subject
  to change at any time.

=back

  /* From <tensorflow/c/kernels.h> */
  TF_CAPI_EXPORT extern SP_Stream TF_GetStream(TF_OpKernelContext* ctx,
                                               TF_Status* status);

=head2 TF_NumInputs

=over 2

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  the size of 'retvals' is less than the number of outputs. This call sets
  *num_retvals to the number of outputs.
  
  If async execution is enabled, the call may simply enqueue the execution
  and return "non-ready" handles in `retvals`. Note that any handles contained
  in 'op' should not be mutated till the kernel execution actually finishes.
  
  For sync execution, if any of the inputs to `op` are not ready, this call
  will block till they become ready and then return when the kernel execution
  is done.
  TODO(agarwal): change num_retvals to int from int*.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_Execute(TFE_Op* op, TFE_TensorHandle** retvals,
                                         int* num_retvals, TF_Status* status);

=head2 TFE_ContextAddFunctionDef

=over 2

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  TF_CAPI_EXPORT extern void TFE_ContextOptionsSetTfrtDistributedRuntime(
      TFE_ContextOptions* options, bool use_tfrt_distributed_runtime);

=head2 TFE_GetContextId

=over 2

  Returns the context_id from the EagerContext which is used by the
  EagerService to maintain consistency between client and worker. The
  context_id is initialized with a dummy value and is later set when the worker
  is initialized (either locally or remotely). The context_id can change during
  the process lifetime although this should cause the worker to be
  reinitialized (e.g. cleared caches) as well.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern uint64_t TFE_GetContextId(TFE_Context* ctx);

=head2 TFE_NewCancellationManager

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


=head2 TFE_ContextUpdateServerDef

=over 2

  Update an existing context with a new set of servers defined in a ServerDef
  proto. Servers can be added to and removed from the list of remote workers
  in the context. A New set of servers identified by the ServerDef must be up
  when the context is updated.
  
  This API is for experimental usage and may be subject to change.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_ContextUpdateServerDef(TFE_Context* ctx,
                                                        int keep_alive_secs,
                                                        const void* proto,
                                                        size_t proto_len,
                                                        TF_Status* status);

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  
  `device_name` must not name an existing physical or custom device. It must
  follow the format:
  
     /job:<name>/replica:<replica>/task:<task>/device:<type>:<device_num>
  
  If the device is successfully registered, `status` is set to TF_OK. Otherwise
  the device is not usable. In case of a bad status, `device.delete_device` is
  still called on `device_info` (i.e. the caller does not retain ownership).
  
  This API is highly experimental, and in particular is expected to change when
  it starts supporting operations with attributes and when tf.function support
  is added.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_RegisterCustomDevice(TFE_Context* ctx,
                                                      TFE_CustomDevice device,
                                                      const char* device_name,
                                                      void* device_info,

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=head2 TF_ConcreteFunctionMakeCallOp

=over 2

  Returns a TFE_Op suitable for executing this function. Caller must provide
  all function inputs in `inputs`, and must not add any additional inputs on
  the returned op. (i.e. don't call TFE_OpAddInput or TFE_OpAddInputList).
  The caller is responsible for deleting the returned TFE_Op. If op
  construction fails, `status` will be non-OK and the returned pointer will be
  null.
  TODO(bmzhao): Remove this function in a subsequent change; Design + implement
  a Function Execution interface for ConcreteFunction that accepts a tagged
  union of types (tensorflow::Value). This effectively requires moving much of
  the implementation of function.py/def_function.py to C++, and exposing a
  high-level API here. A strawman for what this interface could look like:
  TF_Value* TF_ExecuteFunction(TFE_Context*, TF_ConcreteFunction*, TF_Value*
  inputs, int num_inputs, TF_Status* status);

=back

  /* From <tensorflow/c/experimental/saved_model/public/concrete_function.h> */

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT extern TF_Buffer* TF_CreateRunOptions(
      unsigned char enable_full_trace);

=head2 TF_GraphDebugString

=over 2

  Returns the graph content in a human-readable format, with length set in
  `len`. The format is subject to change in the future.
  The returned string is heap-allocated, and caller should call free() on it.

=back

  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT extern const char* TF_GraphDebugString(TF_Graph* graph,
                                                        size_t* len);

=head2 TF_FunctionDebugString

=over 2

  Returns the function content in a human-readable format, with length set in
  `len`. The format is subject to change in the future.
  The returned string is heap-allocated, and caller should call free() on it.
  
  Do not return const char*, because some foreign language binding
  (e.g. swift) cannot then call free() on the returned pointer.

=back

  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT extern char* TF_FunctionDebugString(TF_Function* func,
                                                     size_t* len);

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod  view on Meta::CPAN

  Method name is: tensorflow/serving/predict

B<RESULT>:

  1

The above C<saved_model_cli> output shows that the model input is at C<serving_default_inputs:0> which means the operation named C<serving_default_inputs> at index C<0> and the output is at C<StatefulPartitionedCall:0> which means the operation named...

It also shows the type and shape of the C<TFTensor>s for those inputs and outputs. Together this is known as a signature.

For the C<input>, we have C<(-1, 224, 224, 3)> which is a L<common input image specification for TensorFlow Hub|https://www.tensorflow.org/hub/common_signatures/images#input>. This is known as C<channels_last> (or C<NHWC>) layout where the TensorFlow...

For the C<output>, we have C<(-1, 1001)> which is C<[batch_size, num_classes]> where the elements are scores that the image received for that ImageNet class.

Now we can load the model from that folder with the tag set C<[ 'serve' ]> by using the C<LoadFromSavedModel> constructor to create a C<::Graph> and a C<::Session> for that graph.

  my $opt = AI::TensorFlow::Libtensorflow::SessionOptions->New;
  
  my $graph = AI::TensorFlow::Libtensorflow::Graph->New;
  my $session = AI::TensorFlow::Libtensorflow::Session->LoadFromSavedModel(
      $opt, undef, $model_base, \@tags, $graph, undef, $s

lib/AI/TensorFlow/Libtensorflow/Manual/Quickstart.pod  view on Meta::CPAN

AI::TensorFlow::Libtensorflow::Manual::Quickstart - Start here for an overview of the library

=head1 DESCRIPTION

This provides a tour of C<libtensorflow> to help get started with using the
library.

=head1 CONVENTIONS

The library uses UpperCamelCase naming convention for method names in order to
match the underlying C library (for compatibility with future API changes) and
to make translating code from C easier as this is a low-level API.

As such, constructors for objects that correspond to C<libtensorflow> data
structures are typically called C<New>. For example, a new
L<AI::TensorFlow::Libtensorflow::Status> object can be created as follows

  use AI::TensorFlow::Libtensorflow::Status;
  my $status = AI::TensorFlow::Libtensorflow::Status->New;

  ok defined $status, 'Created new Status';

lib/AI/TensorFlow/Libtensorflow/Output.pm  view on Meta::CPAN

package AI::TensorFlow::Libtensorflow::Output;
# ABSTRACT: Output of operation as (operation, index) pair
$AI::TensorFlow::Libtensorflow::Output::VERSION = '0.0.7';
# See L<AI::TensorFlow::Libtensorflow::Input> for similar.
# In fact, they are mostly the same, but keeping the classes separate for now
# in case the upstream API changes.

use strict;
use warnings;
use namespace::autoclean;
use FFI::Platypus::Record;
use AI::TensorFlow::Libtensorflow::Lib::FFIType::Variant::RecordArrayRef;

use AI::TensorFlow::Libtensorflow::Lib;
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);

maint/process-notebook.pl  view on Meta::CPAN

rm $DST || true;

#if grep -C5 -P '\s+\\n' $SRC -m 2; then
	#echo -e "Notebook $SRC has whitespace"
	#exit 1
#fi

## Run the notebook
#jupyter nbconvert --execute --inplace $SRC

## Clean up metadata (changed by the previous nbconvert --execute)
## See more at <https://timstaley.co.uk/posts/making-git-and-jupyter-notebooks-play-nice/>
jq --indent 1     '
    del(.cells[].metadata | .execution)
    ' $SRC | sponge $SRC

### Notice about generated file
echo -e "# PODNAME: $PODNAME\n\n" | sponge -a $DST
echo -e "## DO NOT EDIT. Generated from $SRC using $GENERATOR.\n" | sponge -a $DST

## Add code to $DST

t/upstream/CAPI/014_SetShape.t  view on Meta::CPAN


	my $feed_out_0 = Output->New({ oper => $feed, index => 0 });

	my $num_dims;

	note 'Fetch the shape, it should be completely unknown';
	$num_dims = $graph->GetTensorNumDims($feed_out_0, $s);
	TF_Utils::AssertStatusOK($s);
	is $num_dims, -1, 'Dims are unknown';

	note 'Set the shape to be unknown, expect no change';
	$graph->SetTensorShape($feed_out_0, undef, $s);
	$num_dims = $graph->GetTensorNumDims($feed_out_0, $s);
	TF_Utils::AssertStatusOK($s);
	is $num_dims, -1, 'Dims are still unknown';

	note 'Set the shape to be 2 x Unknown';
	my $dims = [2, -1];
	$graph->SetTensorShape( $feed_out_0, $dims, $s);
	TF_Utils::AssertStatusOK($s);

t/upstream/CAPI/014_SetShape.t  view on Meta::CPAN

	$graph->SetTensorShape( $feed_out_0, $dims, $s);
	TF_Utils::AssertStatusOK($s);

	note 'Fetch and see that the new value is returned.';
	$returned_dims = $graph->GetTensorShape( $feed_out_0, $s );
	TF_Utils::AssertStatusOK($s);
	is $returned_dims, $dims, "Got shape [ @$dims ]";

	note q{
		Try to set 'unknown' with unknown rank on the shape and see that
		it doesn't change.
	};
	$graph->SetTensorShape($feed_out_0, undef, $s);
	TF_Utils::AssertStatusOK($s);
	$num_dims = $graph->GetTensorNumDims( $feed_out_0, $s );
	$returned_dims = $graph->GetTensorShape( $feed_out_0, $s );
	TF_Utils::AssertStatusOK($s);
	is $num_dims, 2, 'unchanged numdims';
	is $returned_dims, [2,3], 'dims still [2 3]';

	note q{
		Try to set 'unknown' with same rank on the shape and see that
		it doesn't change.
	};
	$graph->SetTensorShape($feed_out_0, [-1, -1], $s);
	TF_Utils::AssertStatusOK($s);
	$returned_dims = $graph->GetTensorShape( $feed_out_0, $s );
	TF_Utils::AssertStatusOK($s);
	is $returned_dims, [2,3], 'dims still [2 3]';

	note 'Try to fetch a shape with the wrong num_dims';
	pass 'This test not implemented for binding. Not possible to have invalid argument for num_dims.';

	note 'Try to set an invalid shape (cannot change 2x3 to a 2x5).';
	$dims->[1] = 5;
	$graph->SetTensorShape( $feed_out_0, $dims, $s);
	note TF_Utils::AssertStatusNotOK($s);

	note 'Test for a scalar.';
	my $three = TF_Utils::ScalarConst($graph, $s, 'scalar', INT32, 3);
	TF_Utils::AssertStatusOK($s);
	my $three_out_0 = Output->New({ oper => $three, index => 0 });

	$num_dims = $graph->GetTensorNumDims( $three_out_0, $s );



( run in 0.271 second using v1.01-cache-2.11-cpan-c333fce770f )