view release on metacpan or search on metacpan
CONTRIBUTING view on Meta::CPAN
<<<=== OTHER CONTRIBUTORS ===>>>
If anyone other than yourself has written software source code or documentation as part of your APTech Family contribution, then they must submit their contributions themselves under the terms of the APTech Family Copyright Assignment Agreement above...
Please be sure you DO NOT STUDY OR INCLUDE any 3rd-party or public-domain intellectual property as part of your APTech Family contribution, including but not limited to: source code; documentation; copyrighted, trademarked, or patented components; or...
<<<=== RECOGNITION ===>>>
Once we have received your contribution under the terms of the APTech Family Copyright Assignment Agreement above, as well as any necessary Employer Copyright Disclaimer Agreement(s), then we will begin the process of reviewing any software pull requ...
<<<=== SUBMISSION ===>>>
When you are ready to submit the signed agreement(s), please answer the following 12 questions about yourself and your APTech Family contribution, then include your answers in the body of your e-mail or on a separate sheet of paper in snail mail, and...
1. Full Legal Name
2. Preferred Pseudonym (or "none")
3. Country of Citizenship
4. Date of Birth (spell full month name)
lib/AI/TensorFlow/Libtensorflow/Status.pm
lib/AI/TensorFlow/Libtensorflow/TFLibrary.pm
lib/AI/TensorFlow/Libtensorflow/TString.pm
lib/AI/TensorFlow/Libtensorflow/Tensor.pm
lib/AI/TensorFlow/Libtensorflow/_Misc.pm
maint/cpanfile-git
maint/devops.yml
maint/inc/Pod/Elemental/Transformer/TF_CAPI.pm
maint/inc/Pod/Elemental/Transformer/TF_Sig.pm
maint/inc/PreloadPodWeaver.pm
maint/process-capi.pl
maint/process-notebook.pl
perlcritic.rc
t/01_hello_tf.t
t/02_load_graph.t
t/03_create_tftensor.t
t/04_allocate_tftensor.t
t/05_session_run.t
t/AI/TensorFlow/Libtensorflow.t
t/lib/TF_TestQuiet.pm
t/lib/TF_Utils.pm
t/models/README
FFI::C = 0.12
FFI::CheckLib = 0
FFI::Platypus::Type::Enum = 0
FFI::Platypus::Type::PtrObject = 0
[Prereqs / RuntimeSuggests]
PDL = 0
Data::Printer = 0
[Prereqs / ProcessCAPI]
; for maint/process-capi.pl
-phase = develop
-relationship = suggests
CLI::Osprey = 0
Data::Printer = 0
File::Find::Rule = 0
Function::Parameters = 0
Hook::LexWrap = 0
List::SomeUtils = 0
Module::Runtime = 0
Mu = 0
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
# PODNAME: AI::TensorFlow::Libtensorflow::Manual::CAPI
# ABSTRACT: List of functions exported by TensorFlow C API
# DO NOT EDIT: Generated by process-capi.pl
__END__
=pod
=encoding UTF-8
=head1 NAME
AI::TensorFlow::Libtensorflow::Manual::CAPI - List of functions exported by TensorFlow C API
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
const TF_SessionOptions* session_options, const TF_Buffer* run_options,
const char* export_dir, const char* const* tags, int tags_len,
TF_Graph* graph, TF_Buffer* meta_graph_def, TF_Status* status);
=head2 TF_CloseSession
=over 2
Close a session.
Contacts any other processes associated with the session, if applicable.
May not be called after TF_DeleteSession().
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_CloseSession(TF_Session*, TF_Status* status);
=head2 TF_DeleteSession
=over 2
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_UpdateEdge(TF_Graph* graph, TF_Output new_src,
TF_Input dst, TF_Status* status);
=head2 TF_NewServer
=over 2
Creates a new in-process TensorFlow server configured using a serialized
ServerDef protocol buffer provided via `proto` and `proto_len`.
The server will not serve any requests until TF_ServerStart is invoked.
The server will stop serving requests once TF_ServerStop or
TF_DeleteServer is invoked.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern TF_Server* TF_NewServer(const void* proto,
size_t proto_len,
TF_Status* status);
=head2 TF_ServerStart
=over 2
Starts an in-process TensorFlow server.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_ServerStart(TF_Server* server, TF_Status* status);
=head2 TF_ServerStop
=over 2
Stops an in-process TensorFlow server.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_ServerStop(TF_Server* server, TF_Status* status);
=head2 TF_ServerJoin
=over 2
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern const char* TF_ServerTarget(TF_Server* server);
=head2 TF_DeleteServer
=over 2
Destroy an in-process TensorFlow server, frees memory. If server is running
it will be stopped and joined.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_DeleteServer(TF_Server* server);
=head2 TF_RegisterLogListener
=over 2
Register a listener method that processes printed messages.
If any listeners are registered, the print operator will call all listeners
with the printed messages and immediately return without writing to the
logs.
=back
/* From <tensorflow/c/c_api.h> */
TF_CAPI_EXPORT extern void TF_RegisterLogListener(
void (*listener)(const char*));
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
=back
/* From <tensorflow/c/env.h> */
TF_CAPI_EXPORT extern void TF_SyncWritableFile(TF_WritableFileHandle* handle,
TF_Status* status);
=head2 TF_FlushWritableFile
=over 2
Flush local buffers to the filesystem. If the process terminates after a
successful flush, the contents may still be persisted, since the underlying
filesystem may eventually flush the contents. If the OS or machine crashes
after a successful flush, the contents may or may not be persisted, depending
on the implementation.
=back
/* From <tensorflow/c/env.h> */
TF_CAPI_EXPORT extern void TF_FlushWritableFile(TF_WritableFileHandle* handle,
TF_Status* status);
lib/AI/TensorFlow/Libtensorflow/Manual/CAPI.pod view on Meta::CPAN
TFE_ContextOptions* options, bool use_tfrt_distributed_runtime);
=head2 TFE_GetContextId
=over 2
Returns the context_id from the EagerContext which is used by the
EagerService to maintain consistency between client and worker. The
context_id is initialized with a dummy value and is later set when the worker
is initialized (either locally or remotely). The context_id can change during
the process lifetime although this should cause the worker to be
reinitialized (e.g. cleared caches) as well.
=back
/* From <tensorflow/c/eager/c_api_experimental.h> */
TF_CAPI_EXPORT extern uint64_t TFE_GetContextId(TFE_Context* ctx);
=head2 TFE_NewCancellationManager
=over 2
lib/AI/TensorFlow/Libtensorflow/Manual/GPU.pod view on Meta::CPAN
An alternative to installing all the software listed on the "bare metal" host
machine is to use C<libtensorflow> via a Docker container and the
NVIDIA Container Toolkit. See L<AI::TensorFlow::Libtensorflow::Manual::Quickstart/DOCKER IMAGES>
for more information.
=head1 RUNTIME
When running C<libtensorflow>, your program will attempt to acquire quite a bit
of GPU VRAM. You can check if you have enough free VRAM by using the
C<nvidia-smi> command which displays resource information as well as which
processes are currently using the GPU. If C<libtensorflow> is not able to
allocate enough memory, it will crash with an out-of-memory (OOM) error. This
is typical when running multiple programs that both use the GPU.
If you have multiple GPUs, you can control which GPUs your program can access
by using the
L<C<CUDA_VISIBLE_DEVICES> environment variable|https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#env-vars>
provided by the underlying CUDA library. This is typically
done by setting the variable in a C<BEGIN> block before loading
L<AI::TensorFlow::Libtensorflow>:
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod view on Meta::CPAN
# PODNAME: AI::TensorFlow::Libtensorflow::Manual::Notebook::InferenceUsingTFHubCenterNetObjDetect
## DO NOT EDIT. Generated from notebook/InferenceUsingTFHubCenterNetObjDetect.ipynb using ./maint/process-notebook.pl.
use strict;
use warnings;
use utf8;
use constant IN_IPERL => !! $ENV{PERL_IPERL_RUNNING};
no if IN_IPERL, warnings => 'redefine'; # fewer messages when re-running cells
use feature qw(say state postderef);
use Syntax::Construct qw(each-array);
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubCenterNetObjDetect.pod view on Meta::CPAN
die "There should be 4 dimensions" unless $pdl_image_batched->ndims == 4;
die "With the final dimension of length 1" unless $pdl_image_batched->dim(3) == 1;
p $pdl_image_batched;
p $t;
=head2 Run the model for inference
We can use the C<Run> method to run the session and get the multiple output C<TFTensor>s. The following uses the names in C<$outputs> mapping to help process the multiple outputs more easily.
my $RunSession = sub {
my ($session, $t) = @_;
my @outputs_t;
my @keys = keys %{ $outputs{out} };
my @values = $outputs{out}->@{ @keys };
$session->Run(
undef,
[ values %{$outputs{in} } ], [$t],
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
# PODNAME: AI::TensorFlow::Libtensorflow::Manual::Notebook::InferenceUsingTFHubEnformerGeneExprPredModel
## DO NOT EDIT. Generated from notebook/InferenceUsingTFHubEnformerGeneExprPredModel.ipynb using ./maint/process-notebook.pl.
use strict;
use warnings;
use utf8;
use constant IN_IPERL => !! $ENV{PERL_IPERL_RUNNING};
no if IN_IPERL, warnings => 'redefine'; # fewer messages when re-running cells
use feature qw(say);
use Syntax::Construct qw( // );
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubEnformerGeneExprPredModel.pod view on Meta::CPAN
=item 1.
"Compute contribution scores":
This task requires implementing C<@tf.function> to compile gradients.
=item 2.
"Predict the effect of a genetic variant" and "Score multiple variants":
The first task is possible, but the second task requires loading a pre-processing pipeline for scikit-learn and unfortunately this pipeline is stored as a pickle file that is valid for an older version of scikit-learn (version 0.23.2) and as such its...
=back
# Some code that could be used for working with variants.
1 if <<'COMMENT';
use Bio::DB::HTS::VCF;
my $clinvar_tbi_path = "${clinvar_path}.tbi";
unless( -f $clinvar_tbi_path ) {
lib/AI/TensorFlow/Libtensorflow/Manual/Notebook/InferenceUsingTFHubMobileNetV2Model.pod view on Meta::CPAN
# PODNAME: AI::TensorFlow::Libtensorflow::Manual::Notebook::InferenceUsingTFHubMobileNetV2Model
## DO NOT EDIT. Generated from notebook/InferenceUsingTFHubMobileNetV2Model.ipynb using ./maint/process-notebook.pl.
use strict;
use warnings;
use utf8;
use constant IN_IPERL => !! $ENV{PERL_IPERL_RUNNING};
no if IN_IPERL, warnings => 'redefine'; # fewer messages when re-running cells
use feature qw(say state);
use Syntax::Construct qw(each-array);
lib/AI/TensorFlow/Libtensorflow/Session.pm view on Meta::CPAN
$outputs = AI::TensorFlow::Libtensorflow::Output->_as_array( @$outputs );
$xs->($self,
$run_options,
# Inputs
$inputs, $input_v_a , $input_v_a->count,
# Outputs
$outputs, $output_v_a, $output_v_a->count,
_process_target_opers_args($target_opers),
$run_metadata,
$status
);
@{$output_values} = @{ AI::TensorFlow::Libtensorflow::Tensor->_from_array( $output_v_a ) };
}
);
sub _process_target_opers_args {
my ($target_opers) = @_;
my @target_opers_args = defined $target_opers
? do {
my $target_opers_a = AI::TensorFlow::Libtensorflow::Operation->_as_array( @$target_opers );
( $target_opers_a, $target_opers_a->count )
}
: ( undef, 0 );
return @target_opers_args;
}
lib/AI/TensorFlow/Libtensorflow/Session.pm view on Meta::CPAN
] => 'void' => sub {
my ($xs, $session, $inputs, $outputs, $target_opers, $status) = @_;
$inputs = AI::TensorFlow::Libtensorflow::Output->_as_array( @$inputs );
$outputs = AI::TensorFlow::Libtensorflow::Output->_as_array( @$outputs );
my $handle;
$xs->($session,
$inputs, $inputs->count,
$outputs, $outputs->count,
_process_target_opers_args($target_opers),
\$handle,
$status,
);
return unless defined $handle;
window( my $handle_window, $handle );
my $handle_obj = bless \\$handle_window,
'AI::TensorFlow::Libtensorflow::Session::_PRHandle';
lib/AI/TensorFlow/Libtensorflow/Session.pm view on Meta::CPAN
$inputs = AI::TensorFlow::Libtensorflow::Output->_as_array( @$inputs );
$outputs = AI::TensorFlow::Libtensorflow::Output->_as_array( @$outputs );
my $handle = scalar_to_pointer( $$$handle_obj );
$xs->($session, $handle,
# Inputs
$inputs, $input_v_a , $input_v_a->count,
# Outputs
$outputs, $output_v_a, $output_v_a->count,
_process_target_opers_args($target_opers),
$status,
);
@{$output_values} = @{ AI::TensorFlow::Libtensorflow::Tensor->_from_array( $output_v_a ) };
} );
$ffi->attach( [ 'SessionListDevices' => 'ListDevices' ] => [
arg TF_Session => 'session',
arg TF_Status => 'status',
maint/inc/Pod/Elemental/Transformer/TF_Sig.pm view on Meta::CPAN
sub __paras_for_num_marker { die "only support definition lists" }
sub __paras_for_bul_marker { die "only support definition lists" }
around __paras_for_def_marker => sub {
my ($orig, $self, $rest) = @_;
my $ffi = AI::TensorFlow::Libtensorflow::Lib->ffi;
my $type_library = 'AI::TensorFlow::Libtensorflow::Lib::Types';
my @types = ($rest);
my $process_type = sub {
my ($type) = @_;
my $new_type_text = $type;
my $info;
if( eval { $info->{TT} = t($type); 1 }
|| eval { $info->{FFI} = $ffi->type_meta($type); 1 } ) {
if( $info->{TT} && $info->{TT}->library eq $type_library ) {
$new_type_text = "L<$type|$type_library/$type>";
}
} else {
die "Could not find type constraint or FFI::Platypus type $type";
}
$new_type_text;
};
my $type_re = qr{
\A (?<ws>\s*) (?<type> \w+)
}xm;
$rest =~ s[$type_re]{$+{ws} . $process_type->($+{type}) }ge;
my @replacements = $orig->($self, $rest);
@replacements;
};
1;
maint/process-capi.pl view on Meta::CPAN
lazy sorted_header_paths => method() {
my @order = $self->header_order->@*;
my @sorted = iikeysort {
my $item = $_;
my $first = firstidx { $item =~ $_ } @order;
($first, length $_);
} $self->header_paths->@*;
\@sorted;
};
method _process_re($re) {
my @data;
my @sorted = $self->sorted_header_paths->@*;
for my $file (@sorted) {
my $txt = $file->slurp_utf8;
while( $txt =~ /$re/g ) {
push @data, {
%+,
file => $file->relative($self->root_path),
pos => pos($txt),
};
}
}
\@data;
}
lazy fdecl_data => method() {
my $re = $self->fdecl_re;
my $data = $self->_process_re($re);
# Used for defensive assertion:
# These are mostly constructors that return a value
# (i.e., not void) but also take a function pointer as a
# parameter.
my %TF_func_ptr = map { ($_ => 1) } qw(
TF_NewTensor
TF_StartThread
TF_NewKernelBuilder
TFE_NewTensorHandleFromDeviceMemory
maint/process-capi.pl view on Meta::CPAN
typedef \s+
struct \s+
(?<name>\w+) \s+
\{
[^\}]+
\} \s+
\k<name> \s*
;
)
}xm;
$self->_process_re($re);
};
method check_types() {
my @data = $self->typedef_struct_data->@*;
my %types = map { $_ => 1 } AI::TensorFlow::Libtensorflow::Lib->ffi->types;
my %part;
@part{qw(todo done)} = part { exists $types{$_} } uniq map { $_->{name} } @data;
use DDP; p %part;
}
maint/process-notebook.pl view on Meta::CPAN
);
$ENV{SRC_BASENAME} = path($notebook)->basename('.ipynb');
$ENV{DOC_PREFIX} = "AI::TensorFlow::Libtensorflow::Manual::Notebook";
$ENV{PODNAME} = $ENV{DOC_PREFIX} . '::' . $ENV{SRC_BASENAME};
$ENV{GENERATOR} = $0;
system( qw(bash -c), <<'BASH' ) == 0 or die "Failed to process $notebook";
rm $DST || true;
#if grep -C5 -P '\s+\\n' $SRC -m 2; then
#echo -e "Notebook $SRC has whitespace"
#exit 1
#fi
## Run the notebook
#jupyter nbconvert --execute --inplace $SRC