AI-TensorFlow-Libtensorflow
view release on metacpan or search on metacpan
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod view on Meta::CPAN
# PODNAME: AI::TensorFlow::Libtensorflow::Manual::Notebook::InferenceUsingTFHubCenterNetObjDetect
## DO NOT EDIT. Generated from notebook/InferenceUsingTFHubCenterNetObjDetect.ipynb using ./maint/process-notebook.pl.
use strict;
use warnings;
use utf8;
use constant IN_IPERL => !! $ENV{PERL_IPERL_RUNNING};
no if IN_IPERL, warnings => 'redefine'; # fewer messages when re-running cells
use feature qw(say state postderef);
use Syntax::Construct qw(each-array);
use lib::projectroot qw(lib);
BEGIN {
if( IN_IPERL ) {
$ENV{TF_CPP_MIN_LOG_LEVEL} = 3;
}
require AI::TensorFlow::Libtensorflow;
}
use URI ();
use HTTP::Tiny ();
use Path::Tiny qw(path);
use File::Which ();
use List::Util 1.56 qw(mesh);
use Data::Printer ( output => 'stderr', return_value => 'void', filters => ['PDL'] );
use Data::Printer::Filter::PDL ();
use Text::Table::Tiny qw(generate_table);
use Imager;
my $s = AI::TensorFlow::Libtensorflow::Status->New;
sub AssertOK {
die "Status $_[0]: " . $_[0]->Message
unless $_[0]->GetCode == AI::TensorFlow::Libtensorflow::Status::OK;
return;
}
AssertOK($s);
use PDL;
use AI::TensorFlow::Libtensorflow::DataType qw(FLOAT UINT8);
use FFI::Platypus::Memory qw(memcpy);
use FFI::Platypus::Buffer qw(scalar_to_pointer);
sub FloatPDLTOTFTensor {
my ($p) = @_;
return AI::TensorFlow::Libtensorflow::Tensor->New(
FLOAT, [ reverse $p->dims ], $p->get_dataref, sub { undef $p }
);
}
sub FloatTFTensorToPDL {
my ($t) = @_;
my $pdl = zeros(float,reverse( map $t->Dim($_), 0..$t->NumDims-1 ) );
memcpy scalar_to_pointer( ${$pdl->get_dataref} ),
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod view on Meta::CPAN
width => '100%',
})
),
);
}
=head2 Download the test image and transform it into suitable input data
We now fetch the image and prepare it to be in the needed format by using C<Imager>. Note that this model does not need the input image to be of a certain size so no resizing or padding is required.
Then we turn the C<Imager> data into a C<PDL> ndarray. Since we just need the 3 channels of the image as they are, they can be stored directly in a C<PDL> ndarray of type C<byte>.
The reason why we need to concatenate the C<PDL> ndarrays here despite the model only taking a single image at a time is to get an ndarray with four (4) dimensions with the last C<PDL> dimension of size one (1).
sub load_image_to_pdl {
my ($uri, $image_size) = @_;
my $http = HTTP::Tiny->new;
my $response = $http->get( $uri );
die "Could not fetch image from $uri" unless $response->{success};
say "Downloaded $uri";
my $img = Imager->new;
$img->read( data => $response->{content} );
# Create PDL ndarray from Imager data in-memory.
my $data;
$img->write( data => \$data, type => 'raw' )
or die "could not write ". $img->errstr;
die "Image does not have 3 channels, it has @{[ $img->getchannels ]} channels"
if $img->getchannels != 3;
# $data is packed as PDL->dims == [w,h] with RGB pixels
my $pdl_raw = zeros(byte, $img->getchannels, $img->getwidth, $img->getheight);
${ $pdl_raw->get_dataref } = $data;
$pdl_raw->upd_data;
$pdl_raw;
}
my @pdl_images = map {
load_image_to_pdl(
$images_for_test_to_uri{$_},
$model_name_to_params{$model_name}{image_size}
);
} ($image_names[0]);
my $pdl_image_batched = cat(@pdl_images);
my $t = Uint8PDLTOTFTensor($pdl_image_batched);
die "There should be 4 dimensions" unless $pdl_image_batched->ndims == 4;
die "With the final dimension of length 1" unless $pdl_image_batched->dim(3) == 1;
p $pdl_image_batched;
p $t;
=head2 Run the model for inference
We can use the C<Run> method to run the session and get the multiple output C<TFTensor>s. The following uses the names in C<$outputs> mapping to help process the multiple outputs more easily.
my $RunSession = sub {
my ($session, $t) = @_;
my @outputs_t;
my @keys = keys %{ $outputs{out} };
my @values = $outputs{out}->@{ @keys };
$session->Run(
undef,
[ values %{$outputs{in} } ], [$t],
\@values, \@outputs_t,
undef,
undef,
$s
);
AssertOK($s);
return { mesh \@keys, \@outputs_t };
};
undef;
my $tftensor_output_by_name = $RunSession->($session, $t);
my %pdl_output_by_name = map {
$_ => FloatTFTensorToPDL( $tftensor_output_by_name->{$_} )
} keys $tftensor_output_by_name->%*;
undef;
=head2 Results summary
Then we use a score threshold to select the objects of interest.
my $min_score_thresh = 0.30;
my $which_detect = which( $pdl_output_by_name{detection_scores} > $min_score_thresh );
my %subset;
$subset{detection_boxes} = $pdl_output_by_name{detection_boxes}->dice('X', $which_detect);
$subset{detection_classes} = $pdl_output_by_name{detection_classes}->dice($which_detect);
$subset{detection_scores} = $pdl_output_by_name{detection_scores}->dice($which_detect);
$subset{detection_class_labels}->@* = map { $labels_map{$_} } $subset{detection_classes}->list;
p %subset;
The following uses the bounding boxes and class label information to draw boxes and labels on top of the image using Gnuplot.
use PDL::Graphics::Gnuplot;
my $plot_output_path = 'objects-detected.png';
my $gp = gpwin('pngcairo', font => ",12", output => $plot_output_path, aa => 2, size => [10] );
my @qual_cmap = ('#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6');
$gp->options(
( run in 1.036 second using v1.01-cache-2.11-cpan-f0fbb3f571b )