view release on metacpan or search on metacpan
lib/AI/TensorFlow/Libtensorflow.pm view on Meta::CPAN
use AI::TensorFlow::Libtensorflow::DeviceList;
use AI::TensorFlow::Libtensorflow::Eager::ContextOptions;
use AI::TensorFlow::Libtensorflow::Eager::Context;
use FFI::C;
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
FFI::C->ffi($ffi);
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
sub new {
my ($class) = @_;
bless {}, $class;
}
$ffi->attach( 'Version' => [], 'string' );#}}}
1;
lib/AI/TensorFlow/Libtensorflow/ApiDefMap.pm view on Meta::CPAN
package AI::TensorFlow::Libtensorflow::ApiDefMap;
# ABSTRACT: Maps Operation to API description
$AI::TensorFlow::Libtensorflow::ApiDefMap::VERSION = '0.0.7';
use strict;
use warnings;
use namespace::autoclean;
use AI::TensorFlow::Libtensorflow::Lib qw(arg);
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
$ffi->attach( [ 'NewApiDefMap' => 'New' ] => [
arg 'TF_Buffer' => 'op_list_buffer',
arg 'TF_Status' => 'status',
] => 'TF_ApiDefMap' => sub {
my ($xs, $class, @rest) = @_;
$xs->(@rest);
});
$ffi->attach( ['DeleteApiDefMap' => 'DESTROY'] => [
lib/AI/TensorFlow/Libtensorflow/Buffer.pm view on Meta::CPAN
package AI::TensorFlow::Libtensorflow::Buffer;
# ABSTRACT: Buffer that holds pointer to data with length
$AI::TensorFlow::Libtensorflow::Buffer::VERSION = '0.0.7';
use strict;
use warnings;
use namespace::autoclean;
use AI::TensorFlow::Libtensorflow::Lib qw(arg);
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
use FFI::C;
FFI::C->ffi($ffi);
$ffi->load_custom_type('AI::TensorFlow::Libtensorflow::Lib::FFIType::TFPtrSizeScalarRef'
=> 'tf_buffer_buffer'
);
use FFI::Platypus::Buffer;
use FFI::Platypus::Memory;
lib/AI/TensorFlow/Libtensorflow/DeviceList.pm view on Meta::CPAN
package AI::TensorFlow::Libtensorflow::DeviceList;
# ABSTRACT: A list of devices available for the session to run on
$AI::TensorFlow::Libtensorflow::DeviceList::VERSION = '0.0.7';
use strict;
use warnings;
use namespace::autoclean;
use AI::TensorFlow::Libtensorflow::Lib qw(arg);
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
$ffi->attach( [ 'DeleteDeviceList' => 'DESTROY' ] => [
arg TF_DeviceList => 'list',
] => 'void' );
$ffi->attach( [ 'DeviceListCount' => 'Count' ] => [
arg TF_DeviceList => 'list',
] => 'int' );
my %methods = (
lib/AI/TensorFlow/Libtensorflow/Eager/Context.pm view on Meta::CPAN
package AI::TensorFlow::Libtensorflow::Eager::Context;
# ABSTRACT: Eager context
$AI::TensorFlow::Libtensorflow::Eager::Context::VERSION = '0.0.7';
use strict;
use warnings;
use AI::TensorFlow::Libtensorflow::Lib qw(arg);
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
$ffi->attach( [ 'NewContext' => 'New' ] => [
arg TFE_ContextOptions => 'opts',
arg TF_Status => 'status'
] => 'TFE_Context' => sub {
my ($xs, $class, @rest) = @_;
$xs->(@rest);
} );
__END__
lib/AI/TensorFlow/Libtensorflow/Eager/ContextOptions.pm view on Meta::CPAN
package AI::TensorFlow::Libtensorflow::Eager::ContextOptions;
# ABSTRACT: Eager context options
$AI::TensorFlow::Libtensorflow::Eager::ContextOptions::VERSION = '0.0.7';
use strict;
use warnings;
use AI::TensorFlow::Libtensorflow::Lib qw(arg);
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
$ffi->attach( [ 'NewContextOptions' => 'New' ] => [
] => 'TFE_ContextOptions' );
$ffi->attach( [ 'DeleteContextOptions' => 'DESTROY' ] => [
arg TFE_ContextOptions => 'options'
] => 'void' );
1;
lib/AI/TensorFlow/Libtensorflow/Graph.pm view on Meta::CPAN
package AI::TensorFlow::Libtensorflow::Graph;
# ABSTRACT: A TensorFlow computation, represented as a dataflow graph
$AI::TensorFlow::Libtensorflow::Graph::VERSION = '0.0.7';
use strict;
use warnings;
use namespace::autoclean;
use AI::TensorFlow::Libtensorflow::Lib qw(arg);
use AI::TensorFlow::Libtensorflow::Buffer;
use AI::TensorFlow::Libtensorflow::Output;
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
$ffi->attach( [ 'NewGraph' => 'New' ] => [] => 'TF_Graph' );
$ffi->attach( [ 'DeleteGraph' => 'DESTROY' ] => [ arg 'TF_Graph' => 'self' ], 'void' );
$ffi->attach( [ 'GraphImportGraphDef' => 'ImportGraphDef' ] => [
arg 'TF_Graph' => 'graph',
arg 'TF_Buffer' => 'graph_def',
arg 'TF_ImportGraphDefOptions' => 'options',
arg 'TF_Status' => 'status',
lib/AI/TensorFlow/Libtensorflow/ImportGraphDefOptions.pm view on Meta::CPAN
package AI::TensorFlow::Libtensorflow::ImportGraphDefOptions;
# ABSTRACT: Holds options that can be passed to ::Graph::ImportGraphDef
$AI::TensorFlow::Libtensorflow::ImportGraphDefOptions::VERSION = '0.0.7';
use strict;
use warnings;
use namespace::autoclean;
use AI::TensorFlow::Libtensorflow::Lib qw(arg);
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
$ffi->attach( [ 'NewImportGraphDefOptions' => 'New' ] => [] => 'TF_ImportGraphDefOptions' );
$ffi->attach( [ 'DeleteImportGraphDefOptions' => 'DESTROY' ] => [
arg 'TF_ImportGraphDefOptions' => 'self',
] => 'void' );
$ffi->attach( [ 'ImportGraphDefOptionsSetPrefix' => 'SetPrefix' ] => [
arg 'TF_ImportGraphDefOptions' => 'opts',
arg 'string' => 'prefix',
lib/AI/TensorFlow/Libtensorflow/ImportGraphDefResults.pm view on Meta::CPAN
# ABSTRACT: Results from importing a graph definition
$AI::TensorFlow::Libtensorflow::ImportGraphDefResults::VERSION = '0.0.7';
use strict;
use warnings;
use namespace::autoclean;
use AI::TensorFlow::Libtensorflow::Lib qw(arg);
use FFI::Platypus::Buffer qw(buffer_to_scalar window);
use List::Util ();
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
$ffi->attach( [ 'DeleteImportGraphDefResults' => 'DESTROY' ] => [
arg TF_ImportGraphDefResults => 'results',
] => 'void' );
$ffi->attach( [ 'ImportGraphDefResultsReturnOutputs' => 'ReturnOutputs' ] => [
arg TF_ImportGraphDefResults => 'results',
arg 'int*' => 'num_outputs',
arg 'opaque*' => { id => 'outputs', type => 'TF_Output_struct_array*' },
] => 'void' => sub {
lib/AI/TensorFlow/Libtensorflow/Input.pm view on Meta::CPAN
# in case the upstream API changes.
use strict;
use warnings;
use namespace::autoclean;
use FFI::Platypus::Record;
use AI::TensorFlow::Libtensorflow::Lib::FFIType::Variant::RecordArrayRef;
use AI::TensorFlow::Libtensorflow::Lib;
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
record_layout_1($ffi,
'opaque' => '_oper', # 8 (on 64-bit)
'int' => '_index', # 4
$ffi->sizeof('opaque') == 8 ? (
'char[4]' => ':',
) : (),
);
$ffi->type('record(AI::TensorFlow::Libtensorflow::Input)', 'TF_Input');
lib/AI/TensorFlow/Libtensorflow/Lib.pm view on Meta::CPAN
## Callbacks for deallocation
# For TF_Buffer
$ffi->type('(opaque,size_t)->void' => 'data_deallocator_t');
# For TF_Tensor
$ffi->type('(opaque,size_t,opaque)->void' => 'tensor_deallocator_t');
$ffi;
};
}
sub mangler_default {
my $target = (caller)[0];
my $prefix = 'TF';
if( $target =~ /::Eager::/ ) {
$prefix = 'TFE';
}
sub {
my ($name) = @_;
"${prefix}_$name";
}
}
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
`dx` are used as initial gradients (which represent the symbolic partial
derivatives of some loss function `L` w.r.t. `y`).
`dx` must be nullptr or have size `ny`.
If `dx` is nullptr, the implementation will use dx of `OnesLike` for all
shapes in `y`.
The partial derivatives are returned in `dy`. `dy` should be allocated to
size `nx`.
`prefix` names the scope into which all gradients operations are being added.
`prefix` must be unique within the provided graph otherwise this operation
will fail. If `prefix` is nullptr, the default prefixing behaviour takes
place, see TF_AddGradients for more details.
WARNING: This function does not yet support all the gradients that python
supports. See
https://www.tensorflow.org/code/tensorflow/cc/gradients/README.md
for instructions on how to add C++ more gradients.
=back
/* From <tensorflow/c/c_api.h> */
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern void TF_DeleteOpDefinitionBuilder(
TF_OpDefinitionBuilder* builder);
=head2 TF_OpDefinitionBuilderAddAttr
=over 2
Adds an attr to the given TF_OpDefinitionBuilder. The spec has
format "<name>:<type>" or "<name>:<type>=<default>"
where <name> matches regexp [a-zA-Z][a-zA-Z0-9_]*.
By convention, names containing only capital letters are reserved for
attributes whose values can be inferred by the operator implementation if not
supplied by the user. If the attribute name contains characters other than
capital letters, the operator expects the user to provide the attribute value
at operation runtime.
<type> can be:
"string", "int", "float", "bool", "type", "shape", or "tensor"
"numbertype", "realnumbertype", "quantizedtype"
(meaning "type" with a restriction on valid values)
"{int32,int64}" or {realnumbertype,quantizedtype,string}"
(meaning "type" with a restriction containing unions of value types)
"{\"foo\", \"bar\n baz\"}", or "{'foo', 'bar\n baz'}"
(meaning "string" with a restriction on valid values)
"list(string)", ..., "list(tensor)", "list(numbertype)", ...
(meaning lists of the above types)
"int >= 2" (meaning "int" with a restriction on valid values)
"list(string) >= 2", "list(int) >= 2"
(meaning "list(string)" / "list(int)" with length at least 2)
<default>, if included, should use the Proto text format
of <type>. For lists use [a, b, c] format.
Note that any attr specifying the length of an input or output will
get a default minimum of 1 unless the >= # syntax is used.
=back
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern void TF_OpDefinitionBuilderAddAttr(
TF_OpDefinitionBuilder* builder, const char* attr_spec);
=head2 TF_OpDefinitionBuilderAddInput
=over 2
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=over 2
Sets the is_stateful property of the builder to the given value.
The op built by this builder is stateful if its behavior depends on some
state beyond its input tensors (e.g. variable reading op) or if it has a
side-effect (e.g. printing or asserting ops). Equivalently, stateless ops
must always produce the same output for the same input and have no
side-effects.
By default Ops may be moved between devices. Stateful ops should either not
be moved, or should only be moved if that state can also be moved (e.g. via
some sort of save / restore). Stateful ops are guaranteed to never be
optimized away by Common Subexpression Elimination (CSE).
=back
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern void TF_OpDefinitionBuilderSetIsStateful(
TF_OpDefinitionBuilder* builder, bool is_stateful);
=head2 TF_OpDefinitionBuilderSetAllowsUninitializedInput
=over 2
Sets the allows_uninitialized_input property of the operation built by this
builder.
By default, all inputs to an Op must be initialized Tensors. Ops that may
initialize tensors for the first time should set this field to true, to allow
the Op to take an uninitialized Tensor as input.
=back
/* From <tensorflow/c/ops.h> */
TF_CAPI_EXPORT extern void TF_OpDefinitionBuilderSetAllowsUninitializedInput(
TF_OpDefinitionBuilder* builder, bool allows_uninitialized_input);
=head2 TF_OpDefinitionBuilderDeprecated
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=back
/* From <tensorflow/c/env.h> */
TF_CAPI_EXPORT extern uint64_t TF_NowSeconds(void);
=head2 TF_DefaultThreadOptions
=over 2
Populates a TF_ThreadOptions struct with system-default values.
=back
/* From <tensorflow/c/env.h> */
TF_CAPI_EXPORT extern void TF_DefaultThreadOptions(TF_ThreadOptions* options);
=head2 TF_StartThread
=over 2
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_KernelBuilder_TypeConstraint(
TF_KernelBuilder* kernel_builder, const char* attr_name,
const TF_DataType type, TF_Status* status);
=head2 TF_KernelBuilder_HostMemory
=over 2
Specify that this kernel requires/provides an input/output arg
in host memory (instead of the default, device memory).
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern void TF_KernelBuilder_HostMemory(
TF_KernelBuilder* kernel_builder, const char* arg_name);
=head2 TF_KernelBuilder_Priority
=over 2
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern TF_StringView TF_GetOpKernelName(TF_OpKernelContext* ctx);
=head2 TF_GetResourceMgrDefaultContainerName
=over 2
Returns the default container of the resource manager in OpKernelContext.
The returned TF_StringView's underlying string is owned by the OpKernel and
has the same lifetime as the OpKernel.
=back
/* From <tensorflow/c/kernels.h> */
TF_CAPI_EXPORT extern TF_StringView TF_GetResourceMgrDefaultContainerName(
TF_OpKernelContext* ctx);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern void TFE_ContextOptionsSetConfig(
TFE_ContextOptions* options, const void* proto, size_t proto_len,
TF_Status* status);
=head2 TFE_ContextOptionsSetAsync
=over 2
Sets the default execution mode (sync/async). Note that this can be
overridden per thread using TFE_ContextSetExecutorForThread.
=back
/* From <tensorflow/c/eager/c_api.h> */
TF_CAPI_EXPORT extern void TFE_ContextOptionsSetAsync(TFE_ContextOptions*,
unsigned char enable);
=head2 TFE_ContextOptionsSetDevicePlacementPolicy
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern bool TFE_ContextCheckAlive(TFE_Context* ctx,
const char* worker_name,
TF_Status* status);
=head2 TFE_ContextAsyncWait
=over 2
Sync pending nodes in local executors (including the context default executor
and thread executors) and streaming requests to remote executors, and get the
combined status.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern void TFE_ContextAsyncWait(TFE_Context* ctx,
TF_Status* status);
=head2 TFE_TensorHandleDevicePointer
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod view on Meta::CPAN
say "We have a label count of $label_count. These labels include: ",
join ", ", List::Util::head( 5, @labels_map{ sort keys %labels_map } );
my @tags = ( 'serve' );
if( File::Which::which('saved_model_cli')) {
local $ENV{TF_CPP_MIN_LOG_LEVEL} = 3; # quiet the TensorFlow logger for the following command
system(qw(saved_model_cli show),
qw(--dir) => $model_base,
qw(--tag_set) => join(',', @tags),
qw(--signature_def) => 'serving_default'
) == 0 or die "Could not run saved_model_cli";
} else {
say "Install the tensorflow Python package to get the `saved_model_cli` command.";
}
my $opt = AI::TensorFlow::Libtensorflow::SessionOptions->New;
my $graph = AI::TensorFlow::Libtensorflow::Graph->New;
my $session = AI::TensorFlow::Libtensorflow::Session->LoadFromSavedModel(
$opt, undef, $model_base, \@tags, $graph, undef, $s
);
AssertOK($s);
my %ops = (
in => {
op => $graph->OperationByName('serving_default_input_tensor'),
dict => {
input_tensor => 0,
}
},
out => {
op => $graph->OperationByName('StatefulPartitionedCall'),
dict => {
detection_boxes => 0,
detection_classes => 1,
detection_scores => 2,
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod view on Meta::CPAN
my @tags = ( 'serve' );
We can examine what computations are contained in the graph in terms of the names of the inputs and outputs of an operation found in the graph by running C<saved_model_cli>.
if( File::Which::which('saved_model_cli')) {
local $ENV{TF_CPP_MIN_LOG_LEVEL} = 3; # quiet the TensorFlow logger for the following command
system(qw(saved_model_cli show),
qw(--dir) => $model_base,
qw(--tag_set) => join(',', @tags),
qw(--signature_def) => 'serving_default'
) == 0 or die "Could not run saved_model_cli";
} else {
say "Install the tensorflow Python package to get the `saved_model_cli` command.";
}
The above C<saved_model_cli> output shows that the model input is at C<serving_default_input_tensor:0> which means the operation named C<serving_default_input_tensor> at index C<0> and there are multiple outputs with different shapes.
Per the L<model description|https://tfhub.dev/tensorflow/centernet/hourglass_512x512/1> on TensorFlow Hub:
=over 2
B<Inputs>
A three-channel image of variable size - the model does NOT support batching. The input tensor is a C<tf.uint8> tensor with shape [1, height, width, 3] with values in [0, 255].
B<Outputs>
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod view on Meta::CPAN
my $graph = AI::TensorFlow::Libtensorflow::Graph->New;
my $session = AI::TensorFlow::Libtensorflow::Session->LoadFromSavedModel(
$opt, undef, $model_base, \@tags, $graph, undef, $s
);
AssertOK($s);
So let's use the names from the C<saved_model_cli> output to create our C<::Output> C<ArrayRef>s.
my %ops = (
in => {
op => $graph->OperationByName('serving_default_input_tensor'),
dict => {
input_tensor => 0,
}
},
out => {
op => $graph->OperationByName('StatefulPartitionedCall'),
dict => {
detection_boxes => 0,
detection_classes => 1,
detection_scores => 2,
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
my $graph = AI::TensorFlow::Libtensorflow::Graph->New;
my $session = AI::TensorFlow::Libtensorflow::Session->LoadFromSavedModel(
$opt, undef, $new_model_base, \@tags, $graph, undef, $s
);
AssertOK($s);
my %puts = (
## Inputs
inputs_args_0 =>
AI::TensorFlow::Libtensorflow::Output->New({
oper => $graph->OperationByName('serving_default_args_0'),
index => 0,
}),
## Outputs
outputs_human =>
AI::TensorFlow::Libtensorflow::Output->New({
oper => $graph->OperationByName('StatefulPartitionedCall'),
index => 0,
}),
outputs_mouse =>
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
}
say "Checking with saved_model_cli scan:";
saved_model_cli( qw(scan),
qw(--dir) => $model_base,
);
B<STREAM (STDOUT)>:
Checking with saved_model_cli scan:
MetaGraph with tag set ['serve'] does not contain the default denylisted ops: {'ReadFile', 'PrintV2', 'WriteFile'}
B<RESULT>:
1
We need to see what the inputs and outputs of this model are so C<saved_model_cli show> should show us that:
saved_model_cli( qw(show),
qw(--dir) => $model_base,
qw(--all),
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
signature_def['__saved_model_init_op']:
The given SavedModel SignatureDef contains the following input(s):
The given SavedModel SignatureDef contains the following output(s):
outputs['__saved_model_init_op'] tensor_info:
dtype: DT_INVALID
shape: unknown_rank
name: NoOp
Method name is:
signature_def['serving_default']:
The given SavedModel SignatureDef contains the following input(s):
inputs['args_0'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 393216, 4)
name: serving_default_args_0:0
The given SavedModel SignatureDef contains the following output(s):
outputs['human'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 896, 5313)
name: StatefulPartitionedCall:0
outputs['mouse'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 896, 1643)
name: StatefulPartitionedCall:1
Method name is: tensorflow/serving/predict
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
B<RESULT>:
1
We want to use the C<serve> tag-set and
=over
=item *
the input C<args_0> which has the name C<serving_default_args_0:0> and
=item *
the output C<human> which has the name C<StatefulPartitionedCall:0>.
=back
all of which are C<DT_FLOAT>.
Make note of the shapes that those take. Per the L<model description|https://tfhub.dev/deepmind/enformer/1> at TensorFlow Hub:
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
my $graph = AI::TensorFlow::Libtensorflow::Graph->New;
my $session = AI::TensorFlow::Libtensorflow::Session->LoadFromSavedModel(
$opt, undef, $new_model_base, \@tags, $graph, undef, $s
);
AssertOK($s);
my %puts = (
## Inputs
inputs_args_0 =>
AI::TensorFlow::Libtensorflow::Output->New({
oper => $graph->OperationByName('serving_default_args_0'),
index => 0,
}),
## Outputs
outputs_human =>
AI::TensorFlow::Libtensorflow::Output->New({
oper => $graph->OperationByName('StatefulPartitionedCall'),
index => 0,
}),
outputs_mouse =>
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
);
p %puts;
B<STREAM (STDERR)>:
=for html <span style="display:inline-block;margin-left:1em;"><pre style="display: block"><code><span style="color: #33ccff;">{</span><span style="">
</span><span style="color: #6666cc;">inputs_args_0</span><span style="color: #33ccff;"> </span><span style="color: #cc66cc;">AI::TensorFlow::Libtensorflow::Output</span><span style=""> </span><span style="color: #33ccff;">{</span><span style=""...
</span><span style="color: #6666cc;">index</span><span style="color: #33ccff;"> </span><span style="color: #ff6633;">0</span><span style="color: #33ccff;">,</span><span style="">
</span><span style="color: #6666cc;">oper</span><span style=""> </span><span style="color: #33ccff;"> </span><span style="color: #cc66cc;">AI::TensorFlow::Libtensorflow::Operation</span><span style=""> </span><span style="color: #33ccff;">{...
</span><span style="color: #6666cc;">Name</span><span style=""> </span><span style="color: #33ccff;"> </span><span style="color: #33ccff;">"</span><span style="color: #669933;">serving_default_args_0</span><span style="color: ...
</span><span style="color: #6666cc;">NumInputs</span><span style=""> </span><span style="color: #33ccff;"> </span><span style="color: #ff6633;">0</span><span style="color: #33ccff;">,</span><span style="">
</span><span style="color: #6666cc;">NumOutputs</span><span style="color: #33ccff;"> </span><span style="color: #ff6633;">1</span><span style="color: #33ccff;">,</span><span style="">
</span><span style="color: #6666cc;">OpType</span><span style=""> </span><span style="color: #33ccff;"> </span><span style="color: #33ccff;">"</span><span style="color: #669933;">Placeholder</span><span style="color: #33ccff;">&...
</span><span style="color: #33ccff;">}</span><span style="">
</span><span style="color: #33ccff;">}</span><span style="color: #33ccff;">,</span><span style="">
</span><span style="color: #6666cc;">outputs_human</span><span style="color: #33ccff;"> </span><span style="color: #cc66cc;">AI::TensorFlow::Libtensorflow::Output</span><span style=""> </span><span style="color: #33ccff;">{</span><span style=""...
</span><span style="color: #6666cc;">index</span><span style="color: #33ccff;"> </span><span style="color: #ff6633;">0</span><span style="color: #33ccff;">,</span><span style="">
</span><span style="color: #6666cc;">oper</span><span style=""> </span><span style="color: #33ccff;"> </span><span style="color: #cc66cc;">AI::TensorFlow::Libtensorflow::Operation</span><span style=""> </span><span style="color: #33ccff;">{...
</span><span style="color: #6666cc;">Name</span><span style=""> </span><span style="color: #33ccff;"> </span><span style="color: #33ccff;">"</span><span style="color: #669933;">StatefulPartitionedCall</span><span style="color:...
</span><span style="color: #6666cc;">NumInputs</span><span style=""> </span><span style="color: #33ccff;"> </span><span style="color: #ff6633;">274</span><span style="color: #33ccff;">,</span><span style="">
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
unless @labels == IMAGENET_LABEL_COUNT_WITH_BG;
say "Got labels: ", join( ", ", List::Util::head(5, @labels) ), ", etc.";
my @tags = ( 'serve' );
if( File::Which::which('saved_model_cli')) {
local $ENV{TF_CPP_MIN_LOG_LEVEL} = 3; # quiet the TensorFlow logger for the following command
system(qw(saved_model_cli show),
qw(--dir) => $model_base,
qw(--tag_set) => join(',', @tags),
qw(--signature_def) => 'serving_default'
) == 0 or die "Could not run saved_model_cli";
} else {
say "Install the tensorflow Python package to get the `saved_model_cli` command.";
}
my $opt = AI::TensorFlow::Libtensorflow::SessionOptions->New;
my $graph = AI::TensorFlow::Libtensorflow::Graph->New;
my $session = AI::TensorFlow::Libtensorflow::Session->LoadFromSavedModel(
$opt, undef, $model_base, \@tags, $graph, undef, $s
);
AssertOK($s);
my %ops = (
in => $graph->OperationByName('serving_default_inputs'),
out => $graph->OperationByName('StatefulPartitionedCall'),
);
die "Could not get all operations" unless List::Util::all(sub { defined }, values %ops);
my %outputs = map { $_ => [ AI::TensorFlow::Libtensorflow::Output->New( { oper => $ops{$_}, index => 0 } ) ] }
keys %ops;
p %outputs;
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
undef,
$s
);
AssertOK($s);
return $outputs_t[0];
};
say "Warming up the model";
use PDL::GSL::RNG;
my $rng = PDL::GSL::RNG->new('default');
my $image_size = $model_name_to_params{$model_name}{image_size};
my $warmup_input = zeros(float, 3, @$image_size, 1 );
$rng->get_uniform($warmup_input);
p $RunSession->($session, FloatPDLTOTFTensor($warmup_input));
my $output_pdl_batched = FloatTFTensorToPDL($RunSession->($session, $t));
my $softmax = sub { ( map $_/sumover($_)->dummy(0), exp($_[0]) )[0] };
my $probabilities_batched = $softmax->($output_pdl_batched);
p $probabilities_batched;
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
=encoding UTF-8
=head1 NAME
AI::TensorFlow::Libtensorflow::Manual::Notebook::InferenceUsingTFHubMobileNetV2Model - Using TensorFlow to do image classification using a pre-trained model
=head1 SYNOPSIS
The following tutorial is based on the L<Image Classification with TensorFlow Hub notebook|https://github.com/tensorflow/docs/blob/master/site/en/hub/tutorials/image_classification.ipynb>. It uses a pre-trained model based on the I<MobileNet V2> arch...
Please look at the L<SECURITY note|https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md> regarding running models as models are programs. You can also used C<saved_model_cli scan> to check for L<security-sensitive "denylisted ops"|https:/...
If you would like to visualise a model, you can use L<Netron|https://github.com/lutzroeder/netron> on the C<.pb> file.
=head1 COLOPHON
The following document is either a POD file which can additionally be run as a Perl script or a Jupyter Notebook which can be run in L<IPerl|https://p3rl.org/Devel::IPerl> (viewable online at L<nbviewer|https://nbviewer.org/github/EntropyOrg/perl-AI-...
If you are running the code, you may optionally install the L<C<tensorflow> Python package|https://www.tensorflow.org/install/pip> in order to access the C<saved_model_cli> command, but this is only used for informational purposes.
=head1 TUTORIAL
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
serve
We can examine what computations are contained in the graph in terms of the names of the inputs and outputs of an operation found in the graph by running C<saved_model_cli>.
if( File::Which::which('saved_model_cli')) {
local $ENV{TF_CPP_MIN_LOG_LEVEL} = 3; # quiet the TensorFlow logger for the following command
system(qw(saved_model_cli show),
qw(--dir) => $model_base,
qw(--tag_set) => join(',', @tags),
qw(--signature_def) => 'serving_default'
) == 0 or die "Could not run saved_model_cli";
} else {
say "Install the tensorflow Python package to get the `saved_model_cli` command.";
}
B<STREAM (STDOUT)>:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 224, 224, 3)
name: serving_default_inputs:0
The given SavedModel SignatureDef contains the following output(s):
outputs['logits'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1001)
name: StatefulPartitionedCall:0
Method name is: tensorflow/serving/predict
B<RESULT>:
1
The above C<saved_model_cli> output shows that the model input is at C<serving_default_inputs:0> which means the operation named C<serving_default_inputs> at index C<0> and the output is at C<StatefulPartitionedCall:0> which means the operation named...
It also shows the type and shape of the C<TFTensor>s for those inputs and outputs. Together this is known as a signature.
For the C<input>, we have C<(-1, 224, 224, 3)> which is a L<common input image specification for TensorFlow Hub|https://www.tensorflow.org/hub/common_signatures/images#input>. This is known as C<channels_last> (or C<NHWC>) layout where the TensorFlow...
For the C<output>, we have C<(-1, 1001)> which is C<[batch_size, num_classes]> where the elements are scores that the image received for that ImageNet class.
Now we can load the model from that folder with the tag set C<[ 'serve' ]> by using the C<LoadFromSavedModel> constructor to create a C<::Graph> and a C<::Session> for that graph.
my $opt = AI::TensorFlow::Libtensorflow::SessionOptions->New;
my $graph = AI::TensorFlow::Libtensorflow::Graph->New;
my $session = AI::TensorFlow::Libtensorflow::Session->LoadFromSavedModel(
$opt, undef, $model_base, \@tags, $graph, undef, $s
);
AssertOK($s);
So let's use the names from the C<saved_model_cli> output to create our C<::Output> C<ArrayRef>s.
my %ops = (
in => $graph->OperationByName('serving_default_inputs'),
out => $graph->OperationByName('StatefulPartitionedCall'),
);
die "Could not get all operations" unless List::Util::all(sub { defined }, values %ops);
my %outputs = map { $_ => [ AI::TensorFlow::Libtensorflow::Output->New( { oper => $ops{$_}, index => 0 } ) ] }
keys %ops;
p %outputs;
say "Input: " , $outputs{in}[0];
say "Output: ", $outputs{out}[0];
B<STREAM (STDOUT)>:
Input: serving_default_inputs:0
Output: StatefulPartitionedCall:0
B<STREAM (STDERR)>:
=for html <span style="display:inline-block;margin-left:1em;"><pre style="display: block"><code><span style="color: #33ccff;">{</span><span style="">
</span><span style="color: #6666cc;">in</span><span style=""> </span><span style="color: #33ccff;"> </span><span style="color: #33ccff;">[</span><span style="">
</span><span style="color: #9999cc;">[0] </span><span style="color: #cc66cc;">AI::TensorFlow::Libtensorflow::Output</span><span style=""> </span><span style="color: #33ccff;">{</span><span style="">
</span><span style="color: #6666cc;">index</span><span style="color: #33ccff;"> </span><span style="color: #ff6633;">0</span><span style="color: #33ccff;">,</span><span style="">
</span><span style="color: #6666cc;">oper</span><span style=""> </span><span style="color: #33ccff;"> </span><span style="color: #cc66cc;">AI::TensorFlow::Libtensorflow::Operation</span><span style=""> </span><span style="color: #33...
</span><span style="color: #6666cc;">Name</span><span style=""> </span><span style="color: #33ccff;"> </span><span style="color: #33ccff;">"</span><span style="color: #669933;">serving_default_inputs</span><span style=...
</span><span style="color: #6666cc;">NumInputs</span><span style=""> </span><span style="color: #33ccff;"> </span><span style="color: #ff6633;">0</span><span style="color: #33ccff;">,</span><span style="">
</span><span style="color: #6666cc;">NumOutputs</span><span style="color: #33ccff;"> </span><span style="color: #ff6633;">1</span><span style="color: #33ccff;">,</span><span style="">
</span><span style="color: #6666cc;">OpType</span><span style=""> </span><span style="color: #33ccff;"> </span><span style="color: #33ccff;">"</span><span style="color: #669933;">Placeholder</span><span style="color: #33...
</span><span style="color: #33ccff;">}</span><span style="">
</span><span style="color: #33ccff;">}</span><span style="">
</span><span style="color: #33ccff;">]</span><span style="color: #33ccff;">,</span><span style="">
</span><span style="color: #6666cc;">out</span><span style="color: #33ccff;"> </span><span style="color: #33ccff;">[</span><span style="">
</span><span style="color: #9999cc;">[0] </span><span style="color: #cc66cc;">AI::TensorFlow::Libtensorflow::Output</span><span style=""> </span><span style="color: #33ccff;">{</span><span style="">
</span><span style="color: #6666cc;">index</span><span style="color: #33ccff;"> </span><span style="color: #ff6633;">0</span><span style="color: #33ccff;">,</span><span style="">
</span><span style="color: #6666cc;">oper</span><span style=""> </span><span style="color: #33ccff;"> </span><span style="color: #cc66cc;">AI::TensorFlow::Libtensorflow::Operation</span><span style=""> </span><span style="color: #33...
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
undef,
$s
);
AssertOK($s);
return $outputs_t[0];
};
say "Warming up the model";
use PDL::GSL::RNG;
my $rng = PDL::GSL::RNG->new('default');
my $image_size = $model_name_to_params{$model_name}{image_size};
my $warmup_input = zeros(float, 3, @$image_size, 1 );
$rng->get_uniform($warmup_input);
p $RunSession->($session, FloatPDLTOTFTensor($warmup_input));
B<STREAM (STDOUT)>:
Warming up the model
lib/AI/TensorFlow/Libtensorflow/Operation.pm view on Meta::CPAN
# ABSTRACT: An operation
$AI::TensorFlow::Libtensorflow::Operation::VERSION = '0.0.7';
use strict;
use warnings;
use namespace::autoclean;
use AI::TensorFlow::Libtensorflow::Lib qw(arg);
use AI::TensorFlow::Libtensorflow::Output;
use AI::TensorFlow::Libtensorflow::Input;
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
use FFI::C::ArrayDef;
my $adef = FFI::C::ArrayDef->new(
$ffi,
name => 'TF_Operation_array',
members => [
FFI::C::StructDef->new(
$ffi,
members => [
p => 'opaque'
lib/AI/TensorFlow/Libtensorflow/OperationDescription.pm view on Meta::CPAN
package AI::TensorFlow::Libtensorflow::OperationDescription;
# ABSTRACT: Operation being built
$AI::TensorFlow::Libtensorflow::OperationDescription::VERSION = '0.0.7';
use strict;
use warnings;
use namespace::autoclean;
use AI::TensorFlow::Libtensorflow::Lib qw(arg);
use AI::TensorFlow::Libtensorflow::Lib::FFIType::Variant::PackableArrayRef;
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
$ffi->load_custom_type('AI::TensorFlow::Libtensorflow::Lib::FFIType::TFPtrSizeScalarRef'
=> 'tf_attr_string_buffer'
);
$ffi->load_custom_type('AI::TensorFlow::Libtensorflow::Lib::FFIType::TFPtrPtrLenSizeArrayRefScalar'
=> 'tf_attr_string_list'
);
$ffi->load_custom_type(PackableArrayRef('Int64ArrayRef', pack_type => 'q')
=> 'tf_attr_int_list'
);
$ffi->load_custom_type(PackableArrayRef('Float32ArrayRef', pack_type => 'f')
lib/AI/TensorFlow/Libtensorflow/Output.pm view on Meta::CPAN
# in case the upstream API changes.
use strict;
use warnings;
use namespace::autoclean;
use FFI::Platypus::Record;
use AI::TensorFlow::Libtensorflow::Lib::FFIType::Variant::RecordArrayRef;
use AI::TensorFlow::Libtensorflow::Lib;
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
record_layout_1($ffi,
'opaque' => '_oper', # 8 (on 64-bit)
'int' => '_index', # 4
# padding to make sizeof(record) == 16
# but only on machines where sizeof(opaque) is 8 bytes
# See also:
# Convert::Binary::C->new( Alignment => 8 )
# ->parse( ... )
lib/AI/TensorFlow/Libtensorflow/Session.pm view on Meta::CPAN
use warnings;
use namespace::autoclean;
use AI::TensorFlow::Libtensorflow;
use AI::TensorFlow::Libtensorflow::Lib qw(arg);;
use AI::TensorFlow::Libtensorflow::Tensor;
use AI::TensorFlow::Libtensorflow::Output;
use FFI::Platypus::Buffer qw(window scalar_to_pointer);
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
$ffi->attach( [ 'NewSession' => 'New' ] =>
[
arg 'TF_Graph' => 'graph',
arg 'TF_SessionOptions' => 'opt',
arg 'TF_Status' => 'status',
],
=> 'TF_Session' => sub {
my ($xs, $class, @rest) = @_;
return $xs->(@rest);
lib/AI/TensorFlow/Libtensorflow/SessionOptions.pm view on Meta::CPAN
package AI::TensorFlow::Libtensorflow::SessionOptions;
# ABSTRACT: Holds options that can be passed during session creation
$AI::TensorFlow::Libtensorflow::SessionOptions::VERSION = '0.0.7';
use strict;
use warnings;
use namespace::autoclean;
use AI::TensorFlow::Libtensorflow::Lib qw(arg);;
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
$ffi->attach( [ 'NewSessionOptions' => 'New' ] =>
[ ], => 'TF_SessionOptions' );
$ffi->attach( [ 'DeleteSessionOptions' => 'DESTROY' ] => [
arg 'TF_SessionOptions' => 'self',
] => 'void');
$ffi->attach( 'SetTarget' => [
arg 'TF_SessionOptions' => 'options',
lib/AI/TensorFlow/Libtensorflow/Status.pm view on Meta::CPAN
package AI::TensorFlow::Libtensorflow::Status;
# ABSTRACT: Status used for error checking
$AI::TensorFlow::Libtensorflow::Status::VERSION = '0.0.7';
use strict;
use warnings;
use namespace::autoclean;
use AI::TensorFlow::Libtensorflow::Lib;
use FFI::C;
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
# enum TF_Code {{{
# From <tensorflow/c/tf_status.h>
my @_TF_CODE = (
[ OK => 0 ],
[ CANCELLED => 1 ],
[ UNKNOWN => 2 ],
[ INVALID_ARGUMENT => 3 ],
[ DEADLINE_EXCEEDED => 4 ],
lib/AI/TensorFlow/Libtensorflow/TString.pm view on Meta::CPAN
package AI::TensorFlow::Libtensorflow::TString;
# ABSTRACT: A variable-capacity string type
$AI::TensorFlow::Libtensorflow::TString::VERSION = '0.0.7';
use strict;
use warnings;
use namespace::autoclean;
use AI::TensorFlow::Libtensorflow::Lib qw(arg);
use FFI::Platypus::Memory qw(malloc free);
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
### From <tensorflow/tsl/platform/ctstring_internal.h>
#// _Static_assert(sizeof(TF_TString) == 24);
use constant SIZEOF_TF_TString => 24;
### From <tensorflow/tsl/platform/ctstring_internal.h>
# typedef enum TF_TString_Type { // NOLINT
# TF_TSTR_SMALL = 0x00,
# TF_TSTR_LARGE = 0x01,
lib/AI/TensorFlow/Libtensorflow/Tensor.pm view on Meta::CPAN
$AI::TensorFlow::Libtensorflow::Tensor::VERSION = '0.0.7';
use strict;
use warnings;
use namespace::autoclean;
use AI::TensorFlow::Libtensorflow::Lib qw(arg);
use FFI::Platypus::Closure;
use FFI::Platypus::Buffer qw(window);
use List::Util qw(product);
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
$ffi->load_custom_type('AI::TensorFlow::Libtensorflow::Lib::FFIType::TFPtrSizeScalarRef'
=> 'tf_tensor_buffer'
);
$ffi->attach( [ 'NewTensor' => 'New' ] =>
[
arg 'TF_DataType' => 'dtype',
lib/AI/TensorFlow/Libtensorflow/Tensor.pm view on Meta::CPAN
Data buffer for the contents of the C<TFTensor>.
=item CodeRef $deallocator
A callback used to deallocate C<$data> which is passed the
parameters C<<
$deallocator->( opaque $pointer, size_t $size, opaque $deallocator_arg)
>>.
=item Ref $deallocator_arg [optional, default: C<undef>]
Argument that is passed to the C<$deallocator> callback.
=back
B<Returns>
=over 4
=item L<TFTensor|AI::TensorFlow::Libtensorflow::Lib::Types/TFTensor>
lib/AI/TensorFlow/Libtensorflow/_Misc.pm view on Meta::CPAN
package AI::TensorFlow::Libtensorflow::_Misc;
# ABSTRACT: Private API
$AI::TensorFlow::Libtensorflow::_Misc::VERSION = '0.0.7';
use strict;
use warnings;
use namespace::autoclean;
use AI::TensorFlow::Libtensorflow::Lib qw(arg);
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
$ffi->mangler(AI::TensorFlow::Libtensorflow::Lib->mangler_default);
$ffi->attach( 'TensorFromProto' => [
arg 'TF_Buffer' => 'from',
arg 'TF_Tensor' => 'to',
arg 'TF_Status' => 'status',
]);
1;
maint/process-capi.pl view on Meta::CPAN
use List::Util qw(uniq first);
use List::SomeUtils qw(firstidx part);
use Module::Runtime qw(module_notional_filename);
use Module::Load qw(load);
option 'root_path' => (
is => 'ro',
format => 's',
doc => 'Root for TensorFlow',
default => "$FindBin::Bin/../../tensorflow/tensorflow",
isa => Path,
coerce => 1,
);
option 'lib_path' => (
is => 'ro',
format => 's',
doc => 'Root for lib',
default => "$FindBin::Bin/../lib",
isa => Path,
coerce => 1,
);
lazy capi_path => method() {
$self->root_path->child(qw(tensorflow c));
};
lazy header_paths => method() {
maint/process-capi.pl view on Meta::CPAN
load 'AI::TensorFlow::Libtensorflow::_Misc';
}
}
package AttachedFunctionTrackable {
use Mu::Role;
use Sub::Uplevel qw(uplevel);
use Hook::LexWrap;
ro _attached_functions => ( default => sub { {} } );
around attach => sub {
my ($orig, $self, $name) = @_;
my $real_name;
wrap 'FFI::Platypus::DL::dlsym',
post => sub { $real_name = $_[1] if $_[-1] };
my $ret = uplevel 3, $orig, @_[1..$#_];
push $self->_attached_functions->{$real_name}->@*, {
c => $real_name,
package => (caller(2))[0],