AI-TensorFlow-Libtensorflow

 view release on metacpan or  search on metacpan

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=back

  /* From <tensorflow/c/env.h> */
  TF_CAPI_EXPORT extern void TF_DefaultThreadOptions(TF_ThreadOptions* options);

=head2 TF_StartThread

=over 2

  Returns a new thread that is running work_func and is identified
  (for debugging/performance-analysis) by thread_name.
  
  The given param (which may be null) is passed to work_func when the thread
  starts. In this way, data may be passed from the thread back to the caller.
  
  Caller takes ownership of the result and must call TF_JoinThread on it
  eventually.

=back

  /* From <tensorflow/c/env.h> */

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern TFE_TensorDebugInfo* TFE_TensorHandleTensorDebugInfo(
      TFE_TensorHandle* h, TF_Status* status);

=head2 TFE_DeleteTensorDebugInfo

=over 2

  Deletes `debug_info`.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern void TFE_DeleteTensorDebugInfo(
      TFE_TensorDebugInfo* debug_info);

=head2 TFE_TensorDebugInfoOnDeviceNumDims

=over 2

  Returns the number of dimensions used to represent the tensor on its device.
  The number of dimensions used to represent the tensor on device can be
  different from the number returned by TFE_TensorHandleNumDims.
  The return value was current at the time of TFE_TensorDebugInfo creation.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern int TFE_TensorDebugInfoOnDeviceNumDims(
      TFE_TensorDebugInfo* debug_info);

=head2 TFE_TensorDebugInfoOnDeviceDim

=over 2

  Returns the number of elements in dimension `dim_index`.
  Tensor representation on device can be transposed from its representation
  on host. The data contained in dimension `dim_index` on device
  can correspond to the data contained in another dimension in on-host
  representation. The dimensions are indexed using the standard TensorFlow
  major-to-minor order (slowest varying dimension first),
  not the XLA's minor-to-major order.
  On-device dimensions can be padded. TFE_TensorDebugInfoOnDeviceDim returns
  the number of elements in a dimension after padding.
  The return value was current at the time of TFE_TensorDebugInfo creation.

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern int64_t TFE_TensorDebugInfoOnDeviceDim(
      TFE_TensorDebugInfo* debug_info, int dim_index);

=head2 TFE_NewOp

=over 2

=back

  /* From <tensorflow/c/eager/c_api.h> */
  TF_CAPI_EXPORT extern TFE_Op* TFE_NewOp(TFE_Context* ctx,
                                          const char* op_or_function_name,

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN


  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_TensorHandleGetStatus(TFE_TensorHandle* h,
                                                       TF_Status* status);

=head2 TFE_GetExecutedOpNames

=over 2

  Get a comma-separated list of op names executed in graph functions dispatched
  to `ctx`. This feature is currently only enabled for TFRT debug builds, for
  performance and simplicity reasons.

=back

  /* From <tensorflow/c/eager/c_api_experimental.h> */
  TF_CAPI_EXPORT extern void TFE_GetExecutedOpNames(TFE_Context* ctx,
                                                    TF_Buffer* buf,
                                                    TF_Status* status);

=head2 TFE_SetLogicalCpuDevices

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN

    use overload '""' => \&_op_stringify;

    sub _op_stringify { sprintf "%s:%s", $_[0]->seq_id // "(no sequence)", $_[0]->to_FTstring }
}

#####

{

say "Testing interval resizing:\n";
sub _debug_resize {
    my ($interval, $to, $msg) = @_;

    my $resized_interval = $interval->resize($to);

    die "Wrong interval size for $interval --($to)--> $resized_interval"
        unless $resized_interval->length == $to;

    say sprintf "Interval: %s -> %s, length %2d : %s",
        $interval,
        $resized_interval, $resized_interval->length,

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN

}

for my $interval_spec ( [4, 8], [5, 8], [5, 9], [6, 9]) {
    my ($start, $end) = @$interval_spec;
    my $test_interval = Interval->new( -seq_id => 'chr11', -start => $start, -end => $end );
    say sprintf "Testing interval %s with length %d", $test_interval, $test_interval->length;
    say "-----";
    for(0..5) {
        my $base = $test_interval->length;
        my $to = $base + $_;
        _debug_resize $test_interval, $to, "$base -> $to (+ $_)";
    }
    say "";
}

}

undef;

use Bio::DB::HTS::Faidx;

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN

      use overload '""' => \&_op_stringify;
  
      sub _op_stringify { sprintf "%s:%s", $_[0]->seq_id // "(no sequence)", $_[0]->to_FTstring }
  }
  
  #####
  
  {
  
  say "Testing interval resizing:\n";
  sub _debug_resize {
      my ($interval, $to, $msg) = @_;
  
      my $resized_interval = $interval->resize($to);
  
      die "Wrong interval size for $interval --($to)--> $resized_interval"
          unless $resized_interval->length == $to;
  
      say sprintf "Interval: %s -> %s, length %2d : %s",
          $interval,
          $resized_interval, $resized_interval->length,

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN

  }
  
  for my $interval_spec ( [4, 8], [5, 8], [5, 9], [6, 9]) {
      my ($start, $end) = @$interval_spec;
      my $test_interval = Interval->new( -seq_id => 'chr11', -start => $start, -end => $end );
      say sprintf "Testing interval %s with length %d", $test_interval, $test_interval->length;
      say "-----";
      for(0..5) {
          my $base = $test_interval->length;
          my $to = $base + $_;
          _debug_resize $test_interval, $to, "$base -> $to (+ $_)";
      }
      say "";
  }
  
  }
  
  undef;

B<STREAM (STDOUT)>:

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod  view on Meta::CPAN

                  map {
                      [
                          $h->td( $cb->($_, $h) )
                      ]
                  } @$data
              )
          ]
      )
  }

This is a helper to display images in Gnuplot for debugging, but those debugging lines are commented out.

  sub show_in_gnuplot {
      my ($p) = @_;
      require PDL::Graphics::Gnuplot;
      PDL::Graphics::Gnuplot::image( square => 1, $p );
  }

=head2 Fetch the model and labels

We are going to use an L<image classification model|https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/classification/5> from TensorFlow Hub based on the MobileNet V2 architecture. We download both the model and ImageNet classification labels.



( run in 0.482 second using v1.01-cache-2.11-cpan-49f99fa48dc )