AI-TensorFlow-Libtensorflow

 view release on metacpan or  search on metacpan

Changes  view on Meta::CPAN

      - Add `::TFLibrary` class. Move `GetAllOpList()` method there.

  - Documentation

      - Add InferenceUsingTFHubMobileNetV2Model tutorial.

0.0.3 2022-12-15 10:46:52-0500

  - Features

      - Add more testing of basic API. Complete port of "(CAPI, *)" tests
        from upstream `tensorflow/c/c_api_test.cc`.

0.0.2 2022-11-28 14:33:33-0500

  - Features

      - Explicit support for minimum Perl v5.14.

0.0.1 2022-11-25 11:43:37-0500

  Features

MANIFEST  view on Meta::CPAN

t/upstream/CAPI/029_SavedModel.t
t/upstream/CAPI/030_SavedModelNullArgsAreValid.t
t/upstream/CAPI/031_DeletingNullPointerIsSafe.t
t/upstream/CAPI/032_TestBitcastFrom_Reshape.t
t/upstream/CAPI/033_TestFromProto.t
t/upstream/CAPI/034_TestTensorAligned.t
t/upstream/CAPI/035_TestTensorIsNotAligned.t
t/upstream/CAPI/036_MessageBufferConversion.t
t/upstream/CAPI/037_TestTensorNonScalarBytesAllocateDelete.t
t/upstream/CAPI/TEMPLATE
t/upstream/tensorflow/cc/saved_model/testdata/half_plus_two/00000123/assets/foo.txt
t/upstream/tensorflow/cc/saved_model/testdata/half_plus_two/00000123/saved_model.pb
t/upstream/tensorflow/cc/saved_model/testdata/half_plus_two/00000123/variables/variables.data-00000-of-00001
t/upstream/tensorflow/cc/saved_model/testdata/half_plus_two/00000123/variables/variables.index
weaver.ini
xt/author/critic.t
xt/author/pod-linkcheck.t
xt/author/pod-snippets.t

META.json  view on Meta::CPAN

            "overload" : "0",
            "perl" : "5.014",
            "strict" : "0",
            "warnings" : "0"
         },
         "suggests" : {
            "Data::Printer" : "0",
            "PDL" : "0"
         }
      },
      "test" : {
         "requires" : {
            "Data::Dumper" : "0",
            "PDL" : "0",
            "PDL::Core" : "0",
            "Path::Tiny" : "0",
            "Test2::V0" : "0",
            "Test::More" : "0",
            "aliased" : "0",
            "lib" : "0",
            "perl" : "5.014"

Makefile.PL  view on Meta::CPAN

    "Data::Dumper" => 0,
    "PDL" => 0,
    "PDL::Core" => 0,
    "Path::Tiny" => 0,
    "Test2::V0" => 0,
    "Test::More" => 0,
    "aliased" => 0,
    "lib" => 0
  },
  "VERSION" => "0.0.7",
  "test" => {
    "TESTS" => "t/*.t t/AI/TensorFlow/*.t t/upstream/CAPI/*.t"
  }
);


my %FallbackPrereqs = (
  "Alien::Libtensorflow" => 0,
  "Class::Tiny" => 0,
  "Const::Exporter" => 0,
  "Const::Fast" => 0,

dist.ini  view on Meta::CPAN

Mu = 0
Path::Tiny = 0
Sort::Key::Multi = 0
Sub::Uplevel = 0
Syntax::Construct = 0
Types::Path::Tiny = 0

[Encoding / ModelData]
encoding = bytes
match    = \.pb$
match    = t/upstream/tensorflow/cc/saved_model/testdata/

[MetaNoIndex]
directory = maint

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

  TF_CAPI_EXPORT extern void TF_EnableXLACompilation(TF_SessionOptions* options,
                                                     unsigned char enable);

=head2 TF_SetXlaEnableLazyCompilation

=over 2

  Set XLA's internal BuildXlaOpsPassFlags.tf_xla_enable_lazy_compilation to the
  value of 'enabled'. Also returns the original value of that flag.
  
  Use in tests to allow XLA to fallback to TF classic. This has global effect.

=back

  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT unsigned char TF_SetXlaEnableLazyCompilation(
      unsigned char enable);

=head2 TF_SetTfXlaCpuGlobalJit

=over 2

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=back

  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT void TF_SetXlaMinClusterSize(int size);

=head2 TF_GetXlaConstantFoldingDisabled

=over 2

  Gets/Sets TF/XLA flag for whether(true) or not(false) to disable constant
  folding. This is for testing to ensure that XLA is being tested rather than
  Tensorflow's CPU implementation through constant folding.

=back

  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT unsigned char TF_GetXlaConstantFoldingDisabled();

=head2 TF_SetXlaConstantFoldingDisabled

=over 2

lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod  view on Meta::CPAN

=back

  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT void TF_InitMain(const char* usage, int* argc, char*** argv);

=head2 TF_PickUnusedPortOrDie

=over 2

  Platform-specific implementation to return an unused port. (This should used
  in tests only.)

=back

  /* From <tensorflow/c/c_api_experimental.h> */
  TF_CAPI_EXPORT int TF_PickUnusedPortOrDie(void);

=head2 TFE_NewTensorHandleFromScalar

=over 2

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod  view on Meta::CPAN

                index => $index,
            });
        } keys %$port_dict
     }
} keys %ops;

p %outputs;

use HTML::Tiny;

my %images_for_test_to_uri = (
    "beach_scene" => 'https://github.com/tensorflow/models/blob/master/research/object_detection/test_images/image2.jpg?raw=true',
);

my @image_names = sort keys %images_for_test_to_uri;
my $h = HTML::Tiny->new;

my $image_name = 'beach_scene';
if( IN_IPERL ) {
    IPerl->html(
        $h->a( { href => $images_for_test_to_uri{$image_name} },
            $h->img({
                src => $images_for_test_to_uri{$image_name},
                alt => $image_name,
                width => '100%',
            })
        ),
    );
}

sub load_image_to_pdl {
    my ($uri, $image_size) = @_;

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod  view on Meta::CPAN

    # $data is packed as PDL->dims == [w,h] with RGB pixels
    my $pdl_raw = zeros(byte, $img->getchannels, $img->getwidth, $img->getheight);
    ${ $pdl_raw->get_dataref } = $data;
    $pdl_raw->upd_data;

    $pdl_raw;
}

my @pdl_images = map {
    load_image_to_pdl(
        $images_for_test_to_uri{$_},
        $model_name_to_params{$model_name}{image_size}
    );
} ($image_names[0]);

my $pdl_image_batched = cat(@pdl_images);
my $t = Uint8PDLTOTFTensor($pdl_image_batched);

die "There should be 4 dimensions" unless $pdl_image_batched->ndims == 4;

die "With the final dimension of length 1" unless $pdl_image_batched->dim(3) == 1;

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod  view on Meta::CPAN


$gp->plot(
    topcmds => q{set style textbox opaque fc "#505050f0" noborder},
    square => 1,
    yrange => [$pdl_images[0]->dim(2),0],
    with => 'image', $pdl_images[0],
);

$gp->close;

IPerl->png( bytestream => path($plot_output_path)->slurp_raw ) if IN_IPERL;

use Filesys::DiskUsage qw/du/;

my $total = du( { 'human-readable' => 1, dereference => 1 },
    $model_archive_path, $model_base );

say "Disk space usage: $total"; undef;

__END__

=pod

=encoding UTF-8

=head1 NAME

AI::TensorFlow::Libtensorflow::Manual::Notebook::InferenceUsingTFHubCenterNetObjDetect - Using TensorFlow to do object detection using a pre-trained model

=head1 SYNOPSIS

The following tutorial is based on the L<TensorFlow Hub Object Detection Colab notebook|https://www.tensorflow.org/hub/tutorials/tf2_object_detection>. It uses a pre-trained model based on the I<CenterNet> architecture trained on the I<COCO 2017> dat...

Some of this code is identical to that of C<InferenceUsingTFHubMobileNetV2Model> notebook. Please look there for an explanation for that code. As stated there, this will later be wrapped up into a high-level library to hide the details behind an API.

=head1 COLOPHON

The following document is either a POD file which can additionally be run as a Perl script or a Jupyter Notebook which can be run in L<IPerl|https://p3rl.org/Devel::IPerl> (viewable online at L<nbviewer|https://nbviewer.org/github/EntropyOrg/perl-AI-...

=over

=item *

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod  view on Meta::CPAN

              $dict_key => AI::TensorFlow::Libtensorflow::Output->New( {
                  oper => $op,
                  index => $index,
              });
          } keys %$port_dict
       }
  } keys %ops;
  
  p %outputs;

Now we can get the following testing image from GitHub.

  use HTML::Tiny;
  
  my %images_for_test_to_uri = (
      "beach_scene" => 'https://github.com/tensorflow/models/blob/master/research/object_detection/test_images/image2.jpg?raw=true',
  );
  
  my @image_names = sort keys %images_for_test_to_uri;
  my $h = HTML::Tiny->new;
  
  my $image_name = 'beach_scene';
  if( IN_IPERL ) {
      IPerl->html(
          $h->a( { href => $images_for_test_to_uri{$image_name} },
              $h->img({
                  src => $images_for_test_to_uri{$image_name},
                  alt => $image_name,
                  width => '100%',
              })
          ),
      );
  }

=head2 Download the test image and transform it into suitable input data

We now fetch the image and prepare it to be in the needed format by using C<Imager>. Note that this model does not need the input image to be of a certain size so no resizing or padding is required.

Then we turn the C<Imager> data into a C<PDL> ndarray. Since we just need the 3 channels of the image as they are, they can be stored directly in a C<PDL> ndarray of type C<byte>.

The reason why we need to concatenate the C<PDL> ndarrays here despite the model only taking a single image at a time is to get an ndarray with four (4) dimensions with the last C<PDL> dimension of size one (1).

  sub load_image_to_pdl {
      my ($uri, $image_size) = @_;
  

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod  view on Meta::CPAN

      # $data is packed as PDL->dims == [w,h] with RGB pixels
      my $pdl_raw = zeros(byte, $img->getchannels, $img->getwidth, $img->getheight);
      ${ $pdl_raw->get_dataref } = $data;
      $pdl_raw->upd_data;
  
      $pdl_raw;
  }
  
  my @pdl_images = map {
      load_image_to_pdl(
          $images_for_test_to_uri{$_},
          $model_name_to_params{$model_name}{image_size}
      );
  } ($image_names[0]);
  
  my $pdl_image_batched = cat(@pdl_images);
  my $t = Uint8PDLTOTFTensor($pdl_image_batched);
  
  die "There should be 4 dimensions" unless $pdl_image_batched->ndims == 4;
  
  die "With the final dimension of length 1" unless $pdl_image_batched->dim(3) == 1;

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod  view on Meta::CPAN

  
  $gp->plot(
      topcmds => q{set style textbox opaque fc "#505050f0" noborder},
      square => 1,
      yrange => [$pdl_images[0]->dim(2),0],
      with => 'image', $pdl_images[0],
  );
  
  $gp->close;
  
  IPerl->png( bytestream => path($plot_output_path)->slurp_raw ) if IN_IPERL;

=head1 RESOURCE USAGE

  use Filesys::DiskUsage qw/du/;
  
  my $total = du( { 'human-readable' => 1, dereference => 1 },
      $model_archive_path, $model_base );
  
  say "Disk space usage: $total"; undef;

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN


    return $encoded;
}

####

{

say "Testing one-hot encoding:\n";

my $onehot_test_seq = "ACGTNtgcan";
my $test_encoded = one_hot_dna( $onehot_test_seq );
$SHOW_ENCODER = 0;

say "One-hot encoding of sequence '$onehot_test_seq' is:";
say $test_encoded->info, $test_encoded;

}

package Interval {
    use Bio::Location::Simple ();

    use parent qw(Bio::Location::Simple);

    sub center {
        my $self = shift;

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN

        unless $resized_interval->length == $to;

    say sprintf "Interval: %s -> %s, length %2d : %s",
        $interval,
        $resized_interval, $resized_interval->length,
        $msg;
}

for my $interval_spec ( [4, 8], [5, 8], [5, 9], [6, 9]) {
    my ($start, $end) = @$interval_spec;
    my $test_interval = Interval->new( -seq_id => 'chr11', -start => $start, -end => $end );
    say sprintf "Testing interval %s with length %d", $test_interval, $test_interval->length;
    say "-----";
    for(0..5) {
        my $base = $test_interval->length;
        my $to = $base + $_;
        _debug_resize $test_interval, $to, "$base -> $to (+ $_)";
    }
    say "";
}

}

undef;

use Bio::DB::HTS::Faidx;

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN

        # $x scaled by 1e7; filled curve between $y and the x-axis
        $x / 1e7, $y, pdl(0)
    );
}

$gp->end_multi;

$gp->close;

if( IN_IPERL ) {
    IPerl->png( bytestream => path($plot_output_path)->slurp_raw );
}

# Some code that could be used for working with variants.
1 if <<'COMMENT';

use Bio::DB::HTS::VCF;

my $clinvar_tbi_path = "${clinvar_path}.tbi";
unless( -f $clinvar_tbi_path ) {
    system( qw(tabix), $clinvar_path );

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN

  
      return $encoded;
  }
  
  ####
  
  {
  
  say "Testing one-hot encoding:\n";
  
  my $onehot_test_seq = "ACGTNtgcan";
  my $test_encoded = one_hot_dna( $onehot_test_seq );
  $SHOW_ENCODER = 0;
  
  say "One-hot encoding of sequence '$onehot_test_seq' is:";
  say $test_encoded->info, $test_encoded;
  
  }

B<STREAM (STDOUT)>:

  Testing one-hot encoding:
  
  Encoder is
  PDL: Float D [5,4]
  [

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN

          unless $resized_interval->length == $to;
  
      say sprintf "Interval: %s -> %s, length %2d : %s",
          $interval,
          $resized_interval, $resized_interval->length,
          $msg;
  }
  
  for my $interval_spec ( [4, 8], [5, 8], [5, 9], [6, 9]) {
      my ($start, $end) = @$interval_spec;
      my $test_interval = Interval->new( -seq_id => 'chr11', -start => $start, -end => $end );
      say sprintf "Testing interval %s with length %d", $test_interval, $test_interval->length;
      say "-----";
      for(0..5) {
          my $base = $test_interval->length;
          my $to = $base + $_;
          _debug_resize $test_interval, $to, "$base -> $to (+ $_)";
      }
      say "";
  }
  
  }
  
  undef;

B<STREAM (STDOUT)>:

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod  view on Meta::CPAN

          # $x scaled by 1e7; filled curve between $y and the x-axis
          $x / 1e7, $y, pdl(0)
      );
  }
  
  $gp->end_multi;
  
  $gp->close;
  
  if( IN_IPERL ) {
      IPerl->png( bytestream => path($plot_output_path)->slurp_raw );
  }

B<DISPLAY>:

=for html <span style="display:inline-block;margin-left:1em;"><p><img						src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA+gAAAMgCAIAAAA/et9qAAAgAElEQVR4nOzdd2AUVeIH8Ddb0jshBAIEpSo1GjoIpyAgCOqd3uGdoGBBUQQFRUVBRbkTf9gOBQucqFiwUhSSgJQYCCSBkJBAet1k...

=head2 Parts of the original notebook that fall outside the scope

In the orignal notebook, there are several more steps that have not been ported here:

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod  view on Meta::CPAN

die "Could not get all operations" unless List::Util::all(sub { defined }, values %ops);

my %outputs = map { $_ => [ AI::TensorFlow::Libtensorflow::Output->New( { oper => $ops{$_}, index => 0 } ) ] }
    keys %ops;

p %outputs;

say "Input: " , $outputs{in}[0];
say "Output: ", $outputs{out}[0];

my %images_for_test_to_uri = (
    "tiger" => "https://upload.wikimedia.org/wikipedia/commons/b/b0/Bengal_tiger_%28Panthera_tigris_tigris%29_female_3_crop.jpg",
    #by Charles James Sharp, CC BY-SA 4.0 <https://creativecommons.org/licenses/by-sa/4.0>, via Wikimedia Commons
    "bus" => "https://upload.wikimedia.org/wikipedia/commons/6/63/LT_471_%28LTZ_1471%29_Arriva_London_New_Routemaster_%2819522859218%29.jpg",
    #by Martin49 from London, England, CC BY 2.0 <https://creativecommons.org/licenses/by/2.0>, via Wikimedia Commons
    "car" => "https://upload.wikimedia.org/wikipedia/commons/4/49/2013-2016_Toyota_Corolla_%28ZRE172R%29_SX_sedan_%282018-09-17%29_01.jpg",
    #by EurovisionNim, CC BY-SA 4.0 <https://creativecommons.org/licenses/by-sa/4.0>, via Wikimedia Commons
    "cat" => "https://upload.wikimedia.org/wikipedia/commons/4/4d/Cat_November_2010-1a.jpg",
    #by Alvesgaspar, CC BY-SA 3.0 <https://creativecommons.org/licenses/by-sa/3.0>, via Wikimedia Commons
    "dog" => "https://upload.wikimedia.org/wikipedia/commons/archive/a/a9/20090914031557%21Saluki_dog_breed.jpg",
    #by Craig Pemberton, CC BY-SA 3.0 <https://creativecommons.org/licenses/by-sa/3.0>, via Wikimedia Commons

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod  view on Meta::CPAN

    "flamingo" => "https://upload.wikimedia.org/wikipedia/commons/b/b8/James_Flamingos_MC.jpg",
    #by Christian Mehlführer, User:Chmehl, CC BY 3.0 <https://creativecommons.org/licenses/by/3.0>, via Wikimedia Commons
    "piano" => "https://upload.wikimedia.org/wikipedia/commons/d/da/Steinway_%26_Sons_upright_piano%2C_model_K-132%2C_manufactured_at_Steinway%27s_factory_in_Hamburg%2C_Germany.png",
    #by "Photo: © Copyright Steinway & Sons", CC BY-SA 3.0 <https://creativecommons.org/licenses/by-sa/3.0>, via Wikimedia Commons
    "honeycomb" => "https://upload.wikimedia.org/wikipedia/commons/f/f7/Honey_comb.jpg",
    #by Merdal, CC BY-SA 3.0 <http://creativecommons.org/licenses/by-sa/3.0/>, via Wikimedia Commons
    "teapot" => "https://upload.wikimedia.org/wikipedia/commons/4/44/Black_tea_pot_cropped.jpg",
    #by Mendhak, CC BY-SA 2.0 <https://creativecommons.org/licenses/by-sa/2.0>, via Wikimedia Commons
);

my @image_names = sort keys %images_for_test_to_uri;


if( IN_IPERL ) {
    IPerl->html(
        my_table( \@image_names, sub {
            my ($image_name, $h) = @_;
            (
                $h->tt($image_name),
                $h->a( { href => $images_for_test_to_uri{$image_name} },
                    $h->img({
                        src => $images_for_test_to_uri{$image_name},
                        alt => $image_name,
                        width => '50%',
                    })
                ),
            )
        })
    );
}

sub imager_paste_center_pad {

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod  view on Meta::CPAN


    ## flip vertically to see image right way up
    #show_in_gnuplot( $pdl_channels->slice(':,:,-1:0')         ); #DEBUG
    #show_in_gnuplot(   $pdl_scaled->slice(':,:,-1:0') * 255.0 ); #DEBUG

    $pdl_scaled;
}

my @pdl_images = map {
    load_image_to_pdl(
        $images_for_test_to_uri{$_},
        $model_name_to_params{$model_name}{image_size}
    );
} @image_names;

my $pdl_image_batched = cat(@pdl_images);
my $t = FloatPDLTOTFTensor($pdl_image_batched);

p $pdl_image_batched;
p $t;

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod  view on Meta::CPAN

my $includes_background_class = $probabilities_batched->dim(0) == IMAGENET_LABEL_COUNT_WITH_BG;

if( IN_IPERL ) {
    my $html = IPerl->html(
        my_table( [0..$#image_names], sub {
            my ($batch_idx, $h) = @_;
            my $image_name = $image_names[$batch_idx];
            my @top_for_image = $top_lists[$batch_idx]->list;
            (
                    $h->tt($image_name),
                    $h->a( { href => $images_for_test_to_uri{$image_name} },
                        $h->img({
                            src => $images_for_test_to_uri{$image_name},
                            alt => $image_name,
                            width => '50%',
                        })
                    ),
                    do {
                        my @tr;
                        push @tr, [ $h->th('Rank', 'Label No', 'Label', 'Prob') ];
                        while( my ($i, $label_index) = each @top_for_image ) {
                            my $class_index = $includes_background_class ? $label_index : $label_index + 1;
                            push @tr, [ $h->td(

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod  view on Meta::CPAN

=pod

=encoding UTF-8

=head1 NAME

AI::TensorFlow::Libtensorflow::Manual::Notebook::InferenceUsingTFHubMobileNetV2Model - Using TensorFlow to do image classification using a pre-trained model

=head1 SYNOPSIS

The following tutorial is based on the L<Image Classification with TensorFlow Hub notebook|https://github.com/tensorflow/docs/blob/master/site/en/hub/tutorials/image_classification.ipynb>. It uses a pre-trained model based on the I<MobileNet V2> arch...

Please look at the L<SECURITY note|https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md> regarding running models as models are programs. You can also used C<saved_model_cli scan> to check for L<security-sensitive "denylisted ops"|https:/...

If you would like to visualise a model, you can use L<Netron|https://github.com/lutzroeder/netron> on the C<.pb> file.

=head1 COLOPHON

The following document is either a POD file which can additionally be run as a Perl script or a Jupyter Notebook which can be run in L<IPerl|https://p3rl.org/Devel::IPerl> (viewable online at L<nbviewer|https://nbviewer.org/github/EntropyOrg/perl-AI-...

If you are running the code, you may optionally install the L<C<tensorflow> Python package|https://www.tensorflow.org/install/pip> in order to access the C<saved_model_cli> command, but this is only used for informational purposes.

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod  view on Meta::CPAN

  Method name is: tensorflow/serving/predict

B<RESULT>:

  1

The above C<saved_model_cli> output shows that the model input is at C<serving_default_inputs:0> which means the operation named C<serving_default_inputs> at index C<0> and the output is at C<StatefulPartitionedCall:0> which means the operation named...

It also shows the type and shape of the C<TFTensor>s for those inputs and outputs. Together this is known as a signature.

For the C<input>, we have C<(-1, 224, 224, 3)> which is a L<common input image specification for TensorFlow Hub|https://www.tensorflow.org/hub/common_signatures/images#input>. This is known as C<channels_last> (or C<NHWC>) layout where the TensorFlow...

For the C<output>, we have C<(-1, 1001)> which is C<[batch_size, num_classes]> where the elements are scores that the image received for that ImageNet class.

Now we can load the model from that folder with the tag set C<[ 'serve' ]> by using the C<LoadFromSavedModel> constructor to create a C<::Graph> and a C<::Session> for that graph.

  my $opt = AI::TensorFlow::Libtensorflow::SessionOptions->New;
  
  my $graph = AI::TensorFlow::Libtensorflow::Graph->New;
  my $session = AI::TensorFlow::Libtensorflow::Session->LoadFromSavedModel(
      $opt, undef, $model_base, \@tags, $graph, undef, $s

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod  view on Meta::CPAN

                </span><span style="color: #33ccff;">}</span><span style="">
            </span><span style="color: #33ccff;">}</span><span style="">
    </span><span style="color: #33ccff;">]</span><span style="">
</span><span style="color: #33ccff;">}</span><span style="">
</span></code></pre></span>

B<RESULT>:

  1

Now we can get the following testing images from Wikimedia.

  my %images_for_test_to_uri = (
      "tiger" => "https://upload.wikimedia.org/wikipedia/commons/b/b0/Bengal_tiger_%28Panthera_tigris_tigris%29_female_3_crop.jpg",
      #by Charles James Sharp, CC BY-SA 4.0 <https://creativecommons.org/licenses/by-sa/4.0>, via Wikimedia Commons
      "bus" => "https://upload.wikimedia.org/wikipedia/commons/6/63/LT_471_%28LTZ_1471%29_Arriva_London_New_Routemaster_%2819522859218%29.jpg",
      #by Martin49 from London, England, CC BY 2.0 <https://creativecommons.org/licenses/by/2.0>, via Wikimedia Commons
      "car" => "https://upload.wikimedia.org/wikipedia/commons/4/49/2013-2016_Toyota_Corolla_%28ZRE172R%29_SX_sedan_%282018-09-17%29_01.jpg",
      #by EurovisionNim, CC BY-SA 4.0 <https://creativecommons.org/licenses/by-sa/4.0>, via Wikimedia Commons
      "cat" => "https://upload.wikimedia.org/wikipedia/commons/4/4d/Cat_November_2010-1a.jpg",
      #by Alvesgaspar, CC BY-SA 3.0 <https://creativecommons.org/licenses/by-sa/3.0>, via Wikimedia Commons
      "dog" => "https://upload.wikimedia.org/wikipedia/commons/archive/a/a9/20090914031557%21Saluki_dog_breed.jpg",
      #by Craig Pemberton, CC BY-SA 3.0 <https://creativecommons.org/licenses/by-sa/3.0>, via Wikimedia Commons

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod  view on Meta::CPAN

      "flamingo" => "https://upload.wikimedia.org/wikipedia/commons/b/b8/James_Flamingos_MC.jpg",
      #by Christian Mehlführer, User:Chmehl, CC BY 3.0 <https://creativecommons.org/licenses/by/3.0>, via Wikimedia Commons
      "piano" => "https://upload.wikimedia.org/wikipedia/commons/d/da/Steinway_%26_Sons_upright_piano%2C_model_K-132%2C_manufactured_at_Steinway%27s_factory_in_Hamburg%2C_Germany.png",
      #by "Photo: © Copyright Steinway & Sons", CC BY-SA 3.0 <https://creativecommons.org/licenses/by-sa/3.0>, via Wikimedia Commons
      "honeycomb" => "https://upload.wikimedia.org/wikipedia/commons/f/f7/Honey_comb.jpg",
      #by Merdal, CC BY-SA 3.0 <http://creativecommons.org/licenses/by-sa/3.0/>, via Wikimedia Commons
      "teapot" => "https://upload.wikimedia.org/wikipedia/commons/4/44/Black_tea_pot_cropped.jpg",
      #by Mendhak, CC BY-SA 2.0 <https://creativecommons.org/licenses/by-sa/2.0>, via Wikimedia Commons
  );
  
  my @image_names = sort keys %images_for_test_to_uri;
  
  
  if( IN_IPERL ) {
      IPerl->html(
          my_table( \@image_names, sub {
              my ($image_name, $h) = @_;
              (
                  $h->tt($image_name),
                  $h->a( { href => $images_for_test_to_uri{$image_name} },
                      $h->img({
                          src => $images_for_test_to_uri{$image_name},
                          alt => $image_name,
                          width => '50%',
                      })
                  ),
              )
          })
      );
  }

B<DISPLAY>:

=for html <span style="display:inline-block;margin-left:1em;"><p><table style="width: 100%"><tr><td><tt>apple</tt></td><td><a href="https://upload.wikimedia.org/wikipedia/commons/1/15/Red_Apple.jpg"><img alt="apple" src="https://upload.wikimedia.org/...

=head2 Download the test images and transform them into suitable input data

We now fetch these images and prepare them to be the in the needed format by using C<Imager> to resize and add padding. Then we turn the C<Imager> data into a C<PDL> ndarray. Since the C<Imager> data is stored as 32-bits with 4 channels in the order ...

We then take all the PDL ndarrays and concatenate them. Again, note that the dimension lists for the PDL ndarray and the TFTensor are reversed.

  sub imager_paste_center_pad {
      my ($inner, $padded_sz, @rest) = @_;
  
      my $outer = Imager->new( List::Util::mesh( [qw(xsize ysize)], $padded_sz ),
          @rest

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod  view on Meta::CPAN

  
      ## flip vertically to see image right way up
      #show_in_gnuplot( $pdl_channels->slice(':,:,-1:0')         ); #DEBUG
      #show_in_gnuplot(   $pdl_scaled->slice(':,:,-1:0') * 255.0 ); #DEBUG
  
      $pdl_scaled;
  }
  
  my @pdl_images = map {
      load_image_to_pdl(
          $images_for_test_to_uri{$_},
          $model_name_to_params{$model_name}{image_size}
      );
  } @image_names;
  
  my $pdl_image_batched = cat(@pdl_images);
  my $t = FloatPDLTOTFTensor($pdl_image_batched);
  
  p $pdl_image_batched;
  p $t;

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod  view on Meta::CPAN

  my $includes_background_class = $probabilities_batched->dim(0) == IMAGENET_LABEL_COUNT_WITH_BG;
  
  if( IN_IPERL ) {
      my $html = IPerl->html(
          my_table( [0..$#image_names], sub {
              my ($batch_idx, $h) = @_;
              my $image_name = $image_names[$batch_idx];
              my @top_for_image = $top_lists[$batch_idx]->list;
              (
                      $h->tt($image_name),
                      $h->a( { href => $images_for_test_to_uri{$image_name} },
                          $h->img({
                              src => $images_for_test_to_uri{$image_name},
                              alt => $image_name,
                              width => '50%',
                          })
                      ),
                      do {
                          my @tr;
                          push @tr, [ $h->th('Rank', 'Label No', 'Label', 'Prob') ];
                          while( my ($i, $label_index) = each @top_for_image ) {
                              my $class_index = $includes_background_class ? $label_index : $label_index + 1;
                              push @tr, [ $h->td(

lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod  view on Meta::CPAN

      $model_archive_path, $model_base, $labels_path );
  
  say "Disk space usage: $total"; undef;

B<STREAM (STDOUT)>:

  Disk space usage: 27.45M

=head1 DEBUGGING

The following images can be used to test the C<load_image_to_pdl> function.

  my @solid_channel_uris = (
      'https://upload.wikimedia.org/wikipedia/commons/thumb/6/62/Solid_red.svg/480px-Solid_red.svg.png',
      'https://upload.wikimedia.org/wikipedia/commons/thumb/1/1d/Green_00FF00_9x9.svg/480px-Green_00FF00_9x9.svg.png',
      'https://upload.wikimedia.org/wikipedia/commons/thumb/f/ff/Solid_blue.svg/480px-Solid_blue.svg.png',
  );
  undef;

=head1 CPANFILE

lib/AI/TensorFlow/Libtensorflow/Manual/Quickstart.pod  view on Meta::CPAN

=back

=head1 DOCKER IMAGES

Docker (or equivalent runtime) images for the library along with all the
dependencies to run the above tutorials are available at
L<Quay.io|https://quay.io/repository/entropyorg/perl-ai-tensorflow-libtensorflow>
under various tags which can be run as

C<<
  docker run --rm -it -p 8888:8888 quay.io/entropyorg/perl-ai-tensorflow-libtensorflow:latest-nb-omnibus
>>

and when the links come up on the terminal, click the link to
C<http://127.0.0.1:8888/> in order to connect to the Jupyter Notebook interface
via the web browser. In the browser, click on the C<notebook> folder to access
the notebooks.

=head2 GPU Docker support

If using the GPU Docker image for NVIDIA support, make sure that the

lib/AI/TensorFlow/Libtensorflow/Manual/Quickstart.pod  view on Meta::CPAN

installed.  The recommended approach is to install via the official Docker
releases at L<https://docs.docker.com/engine/install/>. Note that in some
cases, you may have other unofficial Docker installations such as the
C<docker.io> package or the Snap C<docker> package, which may conflict with
the official vendor-provided NVIDIA Container Runtime.

=head2 Docker Tags

=over 4

=item C<latest>: base image with only C<libtensorflow> installed.

=item C<latest-nb-image-class>: image containing dependencies needed to run

L<InferenceUsingTFHubMobileNetV2Model|AI::TensorFlow::Libtensorflow::Manual::Notebook::InferenceUsingTFHubMobileNetV2Model>.

=item C<latest-nb-gene-expr-pred>: image containing dependencies needed to run

L<InferenceUsingTFHubEnformerGeneExprPredModel|AI::TensorFlow::Libtensorflow::Manual::Notebook::InferenceUsingTFHubEnformerGeneExprPredModel>.

=item C<latest-nb-obj-detect>: image containing dependencies needed to run

L<InferenceUsingTFHubCenterNetObjDetect|AI::TensorFlow::Libtensorflow::Manual::Notebook::InferenceUsingTFHubCenterNetObjDetect>.

=item C<latest-nb-omnibus>: image containing dependencies for all of the above

notebooks.

=item C<latest-gpu-nb-omnibus>: same as C<latest-nb-omnibus> but with NVIDIA GPU support.

=back

=head1 SEE ALSO

=over 4

=item L<TensorFlow|https://www.tensorflow.org/> home page

=back

t/01_hello_tf.t  view on Meta::CPAN

#!/usr/bin/env perl

use Test2::V0;

use lib 't/lib';

use TF_TestQuiet;
use AI::TensorFlow::Libtensorflow;

subtest "Get version of Tensorflow" => sub {
	my $version = AI::TensorFlow::Libtensorflow->Version;
	note $version;
	pass;
};

done_testing;

t/02_load_graph.t  view on Meta::CPAN

#!/usr/bin/env perl

use strict;
use warnings;

use Test::More tests => 1;
use lib 't/lib';

use TF_TestQuiet;
use AI::TensorFlow::Libtensorflow;
use Path::Tiny;

use lib 't/lib';

subtest "Load graph" => sub {
	my $model_file = path("t/models/graph.pb");
	my $ffi = FFI::Platypus->new( api => 1 );

	my $data = $model_file->slurp_raw;
	my $buf = AI::TensorFlow::Libtensorflow::Buffer->NewFromString(\$data);
	ok $buf;

	my $graph = AI::TensorFlow::Libtensorflow::Graph->New;
	my $status = AI::TensorFlow::Libtensorflow::Status->New;
	my $opts = AI::TensorFlow::Libtensorflow::ImportGraphDefOptions->New;

t/02_load_graph.t  view on Meta::CPAN

	if( $status->GetCode == AI::TensorFlow::Libtensorflow::Status::OK ) {
		print "Load graph success\n";
		pass;
	} else {
		fail;
	}

	pass;
};

done_testing;

t/03_create_tftensor.t  view on Meta::CPAN

#!/usr/bin/env perl

use Test::More tests => 1;

use strict;
use warnings;

use lib 't/lib';
use TF_TestQuiet;
use TF_Utils;

use AI::TensorFlow::Libtensorflow;
use PDL;

subtest "Create a TFTensor" => sub {
	my $p_data = sequence(float, 1, 5, 12);
	my $t = TF_Utils::FloatPDLToTFTensor($p_data);

	is $t->NumDims, 3, '3D TFTensor';

	is $t->Dim(0), 1 , 'dim[0] = 1';
	is $t->Dim(1), 5 , 'dim[1] = 5';
	is $t->Dim(2), 12, 'dim[2] = 12';

	pass;
};

done_testing;

t/04_allocate_tftensor.t  view on Meta::CPAN

#!/usr/bin/env perl

use Test::More tests => 1;

use strict;
use warnings;
use lib 't/lib';

use TF_TestQuiet;
use AI::TensorFlow::Libtensorflow;
use AI::TensorFlow::Libtensorflow::DataType qw(FLOAT);
use List::Util qw(product);
use PDL;
use PDL::Core ':Internal';

use FFI::Platypus::Memory;
use FFI::Platypus::Buffer qw(scalar_to_pointer);

subtest "Allocate a TFTensor" => sub {
	my $ffi = FFI::Platypus->new( api => 1 );

	my @dims = ( 1, 5, 12 );
	my $ndims = scalar @dims;
	my $data_size_bytes = product(howbig(float), @dims);
	my $t = AI::TensorFlow::Libtensorflow::Tensor->Allocate(
		FLOAT, \@dims, $data_size_bytes,
	);

	ok $t && $t->Data, 'Allocated TFTensor';

t/04_allocate_tftensor.t  view on Meta::CPAN

	my $pdl = sequence(float, @dims );

	memcpy scalar_to_pointer(${ $t->Data }),
		scalar_to_pointer(${ $pdl->get_dataref }),
		List::Util::min( $t->ByteSize, $data_size_bytes );

	is $t->Type, AI::TensorFlow::Libtensorflow::DataType::FLOAT, 'Check Type is FLOAT';
	is $t->NumDims, $ndims, 'Check NumDims';
};

done_testing;

t/05_session_run.t  view on Meta::CPAN

use PDL::Core;
use AI::TensorFlow::Libtensorflow;
use AI::TensorFlow::Libtensorflow::DataType qw(FLOAT);

use aliased 'AI::TensorFlow::Libtensorflow::Output';
use aliased 'AI::TensorFlow::Libtensorflow::Tensor';

use FFI::Platypus::Buffer qw(scalar_to_pointer);
use FFI::Platypus::Memory qw(memcpy);

subtest "Session run" => sub {
	my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
	my $graph = TF_Utils::LoadGraph('t/models/graph.pb');
	ok $graph, 'graph';
	my $input_op = Output->New({
		oper => $graph->OperationByName( 'input_4' ),
		index => 0 });
	die "Can not init input op" unless $input_op;

	use PDL;
	my $p_data = float(

t/05_session_run.t  view on Meta::CPAN


	my $expected_pdl = float( -0.409784, -0.302862, 0.0152587, 0.690515 )->transpose;

	ok approx( $output_pdl, $expected_pdl )->all, 'got expected data';

	$session->Close($status);

	is $status->GetCode, AI::TensorFlow::Libtensorflow::Status::OK, 'status ok';
};

done_testing;

t/AI/TensorFlow/Libtensorflow.t  view on Meta::CPAN

#!/usr/bin/env perl

use Test::More tests => 1;
use strict;
use warnings;

use lib 't/lib';
use TF_TestQuiet;

use AI::TensorFlow::Libtensorflow;

pass;

done_testing;

t/upstream/CAPI/001_Version.t  view on Meta::CPAN

#!/usr/bin/env perl

use Test2::V0;
use lib 't/lib';
use TF_TestQuiet;
use aliased 'AI::TensorFlow::Libtensorflow';

subtest "(CAPI, Version)" => sub {
	note 'Version: ', Libtensorflow->Version;
	isnt Libtensorflow->Version, '';
};

done_testing;

t/upstream/CAPI/002_Status.t  view on Meta::CPAN

#!/usr/bin/env perl

use Test2::V0;
use lib 't/lib';
use TF_TestQuiet;
use aliased 'AI::TensorFlow::Libtensorflow';
use aliased 'AI::TensorFlow::Libtensorflow::Status';

subtest "(CAPI, Status)" => sub {
	my $s = Status->New;
	is $s->GetCode, AI::TensorFlow::Libtensorflow::Status::OK, 'OK code';
	is $s->Message, '', 'empty message';

	note 'Set status to CANCELLED';
	$s->SetStatus('CANCELLED', 'cancel');

	is $s->GetCode, AI::TensorFlow::Libtensorflow::Status::CANCELLED, 'CANCELLED code';
	is $s->Message, 'cancel', 'check set message';
};

done_testing;

t/upstream/CAPI/003_Tensor.t  view on Meta::CPAN

use lib 't/lib';
use TF_TestQuiet;
use aliased 'AI::TensorFlow::Libtensorflow';
use aliased 'AI::TensorFlow::Libtensorflow::Lib';
use aliased 'AI::TensorFlow::Libtensorflow::Tensor';
use AI::TensorFlow::Libtensorflow::DataType qw(FLOAT);
use FFI::Platypus::Buffer qw(window scalar_to_pointer);
use FFI::Platypus::Memory qw(memset free);
use AI::TensorFlow::Libtensorflow::Lib::_Alloc;

subtest "(CAPI, Tensor)" => sub {
	my $n = 6;
	my $num_bytes = $n * FLOAT->Size;
	my $values_ptr = AI::TensorFlow::Libtensorflow::Lib::_Alloc->_tf_aligned_alloc($num_bytes);
	window( my $values, $values_ptr, $num_bytes );
	my @dims = (2, 3);

	note "Creating tensor";
	my $deallocator_called = 0;
	my $t = Tensor->New(FLOAT, \@dims, \$values, sub {
			my ($pointer, $size, $arg) = @_;

t/upstream/CAPI/003_Tensor.t  view on Meta::CPAN

	is $t->NumDims, 2, '2D TF_Tensor';
	is $t->Dim(0), $dims[0], 'dim 0';
	is $t->Dim(1), $dims[1], 'dim 1';
	is $t->ByteSize, $num_bytes, 'bytes';
	is scalar_to_pointer(${$t->Data}), scalar_to_pointer($values),
		'data at same pointer address';
	undef $t;
	ok $deallocator_called, 'deallocated';
};

done_testing;

t/upstream/CAPI/004_MalformedTensor.t  view on Meta::CPAN

#!/usr/bin/env perl

use Test2::V0;
use lib 't/lib';
use TF_TestQuiet;
use aliased 'AI::TensorFlow::Libtensorflow';
use aliased 'AI::TensorFlow::Libtensorflow::Tensor';
use AI::TensorFlow::Libtensorflow::DataType qw(FLOAT);

subtest "(CAPI, MalformedTensor)" => sub {
	my $noop_dealloc = sub {};
	my $t = Tensor->New(FLOAT, [], \undef, $noop_dealloc);
	ok ! defined $t, 'No data passed in so no tensor created';
};

done_testing;

t/upstream/CAPI/005_AllocateTensor.t  view on Meta::CPAN

#!/usr/bin/env perl

use Test2::V0;
use lib 't/lib';
use TF_TestQuiet;
use aliased 'AI::TensorFlow::Libtensorflow';
use aliased 'AI::TensorFlow::Libtensorflow::Tensor';
use AI::TensorFlow::Libtensorflow::DataType qw(FLOAT);

subtest "(CAPI, AllocateTensor)" => sub {
	my $num_bytes = 6 * FLOAT->Size;
	my @dims = (2, 3);
	my $t = Tensor->Allocate(FLOAT, \@dims, $num_bytes);

	cmp_ok $t->Type, '==', FLOAT, 'a FLOAT TFTensor';
	is $t->NumDims, 2,  'with 2 dimensions';
	is $t->Dim(0), $dims[0], 'dim[0]';
	is $t->Dim(1), $dims[1], 'dim[1]';
	is $t->ByteSize, $num_bytes, 'size in bytes';
	is $t->ElementCount, 6, 'with 6 elements';
};

done_testing;

t/upstream/CAPI/006_MaybeMove.t  view on Meta::CPAN

use Test2::V0;
use lib 't/lib';
use TF_TestQuiet;
use aliased 'AI::TensorFlow::Libtensorflow';
use aliased 'AI::TensorFlow::Libtensorflow::Tensor';
use AI::TensorFlow::Libtensorflow::DataType qw(FLOAT);
use FFI::Platypus::Buffer qw(window scalar_to_pointer);
use FFI::Platypus::Memory qw(memset free);
use AI::TensorFlow::Libtensorflow::Lib::_Alloc;

subtest "(CAPI, MaybeMove)" => sub {
	my $num_bytes = 6 * FLOAT->Size;
	window( my $values,
		AI::TensorFlow::Libtensorflow::Lib::_Alloc->_tf_aligned_alloc($num_bytes),
		$num_bytes
	);

	my @dims = (2,3);
	my $deallocator_called = 0;
	my $t = Tensor->New(FLOAT, \@dims, \$values, sub {
		my $pointer = shift;

t/upstream/CAPI/006_MaybeMove.t  view on Meta::CPAN

	ok !$deallocator_called, 'not deallocated';

	my $o = $t->MaybeMove;

	is $o, U(), 'it is unsafe to move memory TF might not own';

	undef $t;
	ok $deallocator_called, 'deallocated'
};

done_testing;

t/upstream/CAPI/007_LibraryLoadFunctions.t  view on Meta::CPAN

#!/usr/bin/env perl

use Test2::V0;
use lib 't/lib';
use TF_TestQuiet;
use aliased 'AI::TensorFlow::Libtensorflow';

subtest "(CAPI, LibraryLoadFunctions)" => sub {
	my $todo = todo 'Test not implemented at this time. No library built to load.';
	pass;
};

done_testing;

t/upstream/CAPI/008_TensorEncodeDecodeStrings.t  view on Meta::CPAN

#!/usr/bin/env perl

use Test2::V0;
use lib 't/lib';
use TF_TestQuiet;
use aliased 'AI::TensorFlow::Libtensorflow';

subtest "(CAPI, TensorEncodeDecodeStrings)" => sub {
	my $todo = todo 'Test not implemented at this time. Upstream test uses C++ tensorflow::Tensor.';
	pass;
};

done_testing;

t/upstream/CAPI/009_SessionOptions.t  view on Meta::CPAN

#!/usr/bin/env perl

use Test2::V0;
use lib 't/lib';
use TF_TestQuiet;
use aliased 'AI::TensorFlow::Libtensorflow';
use aliased 'AI::TensorFlow::Libtensorflow::SessionOptions';

subtest "(CAPI, SessionOptions)" => sub {
	my $opt = SessionOptions->New;
	ok $opt, 'created session options';
};

done_testing;

t/upstream/CAPI/010_DeprecatedSession.t  view on Meta::CPAN

#!/usr/bin/env perl

use Test2::V0;
use lib 't/lib';
use TF_TestQuiet;
use aliased 'AI::TensorFlow::Libtensorflow';

subtest "(CAPI, DeprecatedSession)" => sub {
	my $todo = todo 'DeprecatedSession not implemented.';
	pass;
};

done_testing;

t/upstream/CAPI/011_DataTypeEnum.t  view on Meta::CPAN

#!/usr/bin/env perl

use Test2::V0;
use lib 't/lib';
use TF_TestQuiet;
use aliased 'AI::TensorFlow::Libtensorflow';

subtest "(CAPI, DataTypeEnum)" => sub {
	my $todo = todo 'Test not implemented. Casting between C++ and C DataType enum is not needed.';
	pass;
};

done_testing;

t/upstream/CAPI/012_StatusEnum.t  view on Meta::CPAN

#!/usr/bin/env perl

use Test2::V0;
use lib 't/lib';
use TF_TestQuiet;
use aliased 'AI::TensorFlow::Libtensorflow';

subtest "(CAPI, StatusEnum)" => sub {
	my $todo = todo 'Test not implemented. Casting between C++ and C Status enum is not needed.';
	pass;
};

done_testing;

t/upstream/CAPI/013_GetAllOpList.t  view on Meta::CPAN

#!/usr/bin/env perl

use Test2::V0;
use lib 't/lib';
use TF_TestQuiet;
use aliased 'AI::TensorFlow::Libtensorflow';

subtest "(CAPI, GetAllOpList)" => sub {
	my $buf = AI::TensorFlow::Libtensorflow::TFLibrary->GetAllOpList();
	ok $buf;
};

done_testing;

t/upstream/CAPI/014_SetShape.t  view on Meta::CPAN

use Test2::V0;
use lib 't/lib';
use TF_TestQuiet;
use TF_Utils;
use aliased 'AI::TensorFlow::Libtensorflow';
use aliased 'AI::TensorFlow::Libtensorflow::Graph';
use aliased 'AI::TensorFlow::Libtensorflow::Status';
use aliased 'AI::TensorFlow::Libtensorflow::Output';
use AI::TensorFlow::Libtensorflow::DataType qw(INT32);

subtest "(CAPI, SetShape)" => sub {
	my $s     = Status->New;
	my $graph = Graph->New;

	my $feed = TF_Utils::Placeholder($graph, $s);
	TF_Utils::AssertStatusOK($s);

	my $feed_out_0 = Output->New({ oper => $feed, index => 0 });

	my $num_dims;

t/upstream/CAPI/014_SetShape.t  view on Meta::CPAN

		Try to set 'unknown' with same rank on the shape and see that
		it doesn't change.
	};
	$graph->SetTensorShape($feed_out_0, [-1, -1], $s);
	TF_Utils::AssertStatusOK($s);
	$returned_dims = $graph->GetTensorShape( $feed_out_0, $s );
	TF_Utils::AssertStatusOK($s);
	is $returned_dims, [2,3], 'dims still [2 3]';

	note 'Try to fetch a shape with the wrong num_dims';
	pass 'This test not implemented for binding. Not possible to have invalid argument for num_dims.';

	note 'Try to set an invalid shape (cannot change 2x3 to a 2x5).';
	$dims->[1] = 5;
	$graph->SetTensorShape( $feed_out_0, $dims, $s);
	note TF_Utils::AssertStatusNotOK($s);

	note 'Test for a scalar.';
	my $three = TF_Utils::ScalarConst($graph, $s, 'scalar', INT32, 3);
	TF_Utils::AssertStatusOK($s);
	my $three_out_0 = Output->New({ oper => $three, index => 0 });

	$num_dims = $graph->GetTensorNumDims( $three_out_0, $s );
	TF_Utils::AssertStatusOK($s);
	is $num_dims, 0, 'zero dims';
	$returned_dims = $graph->GetTensorShape( $three_out_0, $s );
	is $returned_dims, [], 'dims is empty ArrayRef';
};

done_testing;



( run in 0.525 second using v1.01-cache-2.11-cpan-3cd7ad12f66 )