view release on metacpan or search on metacpan
(2) You may Distribute verbatim copies of the Source form of the
Standard Version of this Package in any medium without restriction,
either gratis or for a Distributor Fee, provided that you duplicate
all of the original copyright notices and associated disclaimers. At
your discretion, such verbatim copies may or may not include a
Compiled form of the Package.
(3) You may apply any bug fixes, portability changes, and other
modifications made available from the Copyright Holder. The resulting
Package will still be considered the Standard Version, and as such
will be subject to the Original License.
Distribution of Modified Versions of the Package as Source
(4) You may Distribute your Modified Version as Source (either gratis
or for a Distributor Fee, and with or without a Compiled form of the
Modified Version) provided that you clearly document how it differs
from the Standard Version, including, but not limited to, documenting
any non-standard features, executables, or modules, and provided that
you do at least ONE of the following:
build stand-alone binary or bytecode versions of applications that
include the Package, and Distribute the result without restriction,
provided the result does not expose a direct interface to the Package.
Items That are Not Considered Part of a Modified Version
(9) Works (including, but not limited to, modules and scripts) that
merely extend or make use of the Package, do not, by themselves, cause
the Package to be a Modified Version. In addition, such works are not
considered parts of the Package itself, and are not subject to the
terms of this license.
General Provisions
(10) Any use, modification, and distribution of the Standard or
Modified Versions is governed by this Artistic License. By using,
modifying or distributing the Package, you accept this license. Do not
use, modify, or distribute the Package, if you do not accept this
license.
Makefile.PL view on Meta::CPAN
dist => { COMPRESS => 'gzip -9f', SUFFIX => 'gz', },
clean => { FILES => "$distbase-*" },
test => { TESTS => join( ' ', @tests ) },
);
# This is so that we can do
# require 'Makefile.PL'
# and then call get_module_info
sub get_module_info { %module }
if( ! caller ) {
require File::ShareDir::Install;
File::ShareDir::Install::install_share( module => "$module\::Impl" => 'ollama');
{
package MY;
require File::ShareDir::Install;
File::ShareDir::Install->import( qw( postamble ));
}
# I should maybe use something like Shipwright...
my $mm = WriteMakefile1(get_module_info);
my $version = $mm->parse_version($main_file);
regen_README($main_file, $version);
regen_EXAMPLES() if -d 'examples';
};
1;
sub WriteMakefile1 { #Written by Alexandr Ciornii, version 0.21. Added by eumm-upgrade.
my %params=@_;
my $eumm_version=$ExtUtils::MakeMaker::VERSION;
$eumm_version=eval $eumm_version;
die "EXTRA_META is deprecated" if exists $params{EXTRA_META};
die "License not specified" if not exists $params{LICENSE};
if ($params{BUILD_REQUIRES} and $eumm_version < 6.5503) {
#EUMM 6.5502 has problems with BUILD_REQUIRES
$params{PREREQ_PM}={ %{$params{PREREQ_PM} || {}} , %{$params{BUILD_REQUIRES}} };
delete $params{BUILD_REQUIRES};
}
Makefile.PL view on Meta::CPAN
delete $params{META_MERGE} if $eumm_version < 6.46;
delete $params{META_ADD} if $eumm_version < 6.46;
delete $params{LICENSE} if $eumm_version < 6.31;
delete $params{AUTHOR} if $] < 5.005;
delete $params{ABSTRACT_FROM} if $] < 5.005;
delete $params{BINARY_LOCATION} if $] < 5.005;
WriteMakefile(%params);
}
sub regen_README {
# README is the short version that just tells people what this is
# and how to install it
my( $file, $version ) = @_;
eval {
# Get description
my $readme = join "\n",
pod_section($file, 'NAME', 'no heading' ),
pod_section($file, 'DESCRIPTION' ),
<<VERSION,
This document describes version $version.
Makefile.PL view on Meta::CPAN
[](https://github.com/Corion/$distbase/actions?query=workflow%3Awindows)
[](https://github.com/Corion/$distbase/actions?query=workflow%3Amacos)
[](https://github.com/Corion/$distbase/actions?query=workflow%3Alinux)
STATUS
update_file( 'README.mkdn', $readme_mkdn );
};
}
sub pod_section {
my( $filename, $section, $remove_heading ) = @_;
open my $fh, '<', $filename
or die "Couldn't read '$filename': $!";
my @section =
grep { /^=head1\s+$section/.../^=/ } <$fh>;
# Trim the section
if( @section ) {
pop @section if $section[-1] =~ /^=/;
Makefile.PL view on Meta::CPAN
pop @section
while @section and $section[-1] =~ /^\s*$/;
shift @section
while @section and $section[0] =~ /^\s*$/;
};
@section = map { $_ =~ s!^=\w+\s+!!; $_ } @section;
return join "", @section;
}
sub regen_EXAMPLES {
my $perl = $^X;
if ($perl =~/\s/) {
$perl = qq{"$perl"};
};
(my $example_file = $main_file) =~ s!\.pm$!/Examples.pm!;
my $examples = `$perl -w examples/gen_examples_pod.pl`;
if ($examples) {
warn "(Re)Creating $example_file\n";
$examples =~ s/\r\n/\n/g;
update_file( $example_file, $examples );
};
};
sub update_file {
my( $filename, $new_content ) = @_;
my $content;
if( -f $filename ) {
open my $fh, '<:raw:encoding(UTF-8)', $filename
or die "Couldn't read '$filename': $!";
local $/;
$content = <$fh>;
};
if( $content ne $new_content ) {
README.mkdn view on Meta::CPAN
use Future::Utils 'repeat';
my $responses = $client->generateCompletion();
repeat {
my ($res) = $responses->shift;
if( $res ) {
my $str = $res->get;
say $str;
}
Future::Mojo->done( defined $res );
} until => sub($done) { $done->get };
Generate a response for a given prompt with a provided model.
Returns a [AI::Ollama::GenerateCompletionResponse](https://metacpan.org/pod/AI%3A%3AOllama%3A%3AGenerateCompletionResponse).
## `pullModel`
my $res = $client->pullModel(
name => 'llama',
)->get;
lib/AI/Ollama/Client.pm view on Meta::CPAN
=head1 METHODS
=head2 C<< checkBlob >>
my $res = $client->checkBlob()->get;
Check to see if a blob exists on the Ollama server which is useful when creating models.
=cut
around 'checkBlob' => sub ( $super, $self, %options ) {
$super->( $self, %options )->then( sub( $res ) {
if( $res->code =~ /^2\d\d$/ ) {
return Future->done( 1 )
} else {
return Future->done( 0 )
}
});
};
=head2 C<< createBlob >>
lib/AI/Ollama/Client.pm view on Meta::CPAN
use Future::Utils 'repeat';
my $responses = $client->generateCompletion();
repeat {
my ($res) = $responses->shift;
if( $res ) {
my $str = $res->get;
say $str;
}
Future::Mojo->done( defined $res );
} until => sub($done) { $done->get };
Generate a response for a given prompt with a provided model.
Returns a L<< AI::Ollama::GenerateCompletionResponse >>.
=cut
around 'generateCompletion' => sub ( $super, $self, %options ) {
# Encode images as base64, if images exist:
# (but create a copy so we don't over write the input array)
if (my $images = $options{images}) {
# Allow { filename => '/etc/passwd' }
$options{images} = [
map {
my $item = $_;
if( ref($item) eq 'HASH' ) {
$item = Mojo::File->new($item->{filename})->slurp();
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
The L<Mojo::UserAgent> to use
=head2 B<< server >>
The server to access
=cut
has 'schema_file' => (
is => 'lazy',
default => sub { require AI::Ollama::Client::Impl; module_file('AI::Ollama::Client::Impl', 'ollama-curated.yaml') },
);
has 'schema' => (
is => 'lazy',
default => sub {
if( my $fn = $_[0]->schema_file ) {
YAML::PP->new( boolean => 'JSON::PP' )->load_file($fn);
}
},
);
has 'validate_requests' => (
is => 'rw',
default => 1,
);
has 'validate_responses' => (
is => 'rw',
default => 1,
);
has 'openapi' => (
is => 'lazy',
default => sub {
if( my $schema = $_[0]->schema ) {
OpenAPI::Modern->new( openapi_schema => $schema, openapi_uri => '' )
}
},
);
# The HTTP stuff should go into a ::Role I guess
has 'ua' => (
is => 'lazy',
default => sub { Mojo::UserAgent->new },
);
has 'server' => (
is => 'ro',
);
=head1 METHODS
=head2 C<< build_checkBlob_request >>
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
=item B<< digest >>
the SHA256 digest of the blob
=back
=cut
sub build_checkBlob_request( $self, %options ) {
croak "Missing required parameter 'digest'"
unless exists $options{ 'digest' };
my $method = 'HEAD';
my $template = URI::Template->new( '/blobs/{digest}' );
my $path = $template->process(
'digest' => delete $options{'digest'},
);
my $url = Mojo::URL->new( $self->server . $path );
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
{
}
);
$self->validate_request( $tx );
return $tx
}
sub checkBlob( $self, %options ) {
my $tx = $self->build_checkBlob_request(%options);
my $res = Future::Mojo->new();
my $r1 = Future::Mojo->new();
$r1->then( sub( $tx ) {
my $resp = $tx->res;
$self->emit(response => $resp);
# Should we validate using OpenAPI::Modern here?!
if( $resp->code == 200 ) {
# Blob exists on the server
$res->done($resp);
} elsif( $resp->code == 404 ) {
# Blob was not found
$res->done($resp);
} else {
# An unknown/unhandled response, likely an error
$res->fail( sprintf( "unknown_unhandled code %d: %s", $resp->code, $resp->body ), $resp);
}
})->retain;
# Start our transaction
$self->emit(request => $tx);
$tx = $self->ua->start_p($tx)->then(sub($tx) {
$r1->resolve( $tx );
undef $r1;
})->catch(sub($err) {
$self->emit(response => $tx, $err);
$r1->fail( $err => $tx );
undef $r1;
});
return $res
}
=head2 C<< build_createBlob_request >>
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
=item B<< digest >>
the SHA256 digest of the blob
=back
=cut
sub build_createBlob_request( $self, %options ) {
croak "Missing required parameter 'digest'"
unless exists $options{ 'digest' };
my $method = 'POST';
my $template = URI::Template->new( '/blobs/{digest}' );
my $path = $template->process(
'digest' => delete $options{'digest'},
);
my $url = Mojo::URL->new( $self->server . $path );
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
}
=> $body,
);
$self->validate_request( $tx );
return $tx
}
sub createBlob( $self, %options ) {
my $tx = $self->build_createBlob_request(%options);
my $res = Future::Mojo->new();
my $r1 = Future::Mojo->new();
$r1->then( sub( $tx ) {
my $resp = $tx->res;
$self->emit(response => $resp);
# Should we validate using OpenAPI::Modern here?!
if( $resp->code == 201 ) {
# Blob was successfully created
$res->done($resp);
} else {
# An unknown/unhandled response, likely an error
$res->fail( sprintf( "unknown_unhandled code %d: %s", $resp->code, $resp->body ), $resp);
}
})->retain;
# Start our transaction
$self->emit(request => $tx);
$tx = $self->ua->start_p($tx)->then(sub($tx) {
$r1->resolve( $tx );
undef $r1;
})->catch(sub($err) {
$self->emit(response => $tx, $err);
$r1->fail( $err => $tx );
undef $r1;
});
return $res
}
=head2 C<< build_generateChatCompletion_request >>
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
my $response = $client->generateChatCompletion();
my $streamed = $response->get();
repeat {
my ($res) = $streamed->shift;
if( $res ) {
my $str = $res->get;
say $str;
}
Future::Mojo->done( defined $res );
} until => sub($done) { $done->get };
Generate the next message in a chat with a provided model.
This is a streaming endpoint, so there will be a series of responses. The final response object will include statistics and additional data from the request.
=head3 Options
=over 4
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
=item C<< stream >>
If C<false> the response will be returned as a single response object, otherwise the response will be streamed as a series of objects.
=back
Returns a L<< AI::Ollama::GenerateChatCompletionResponse >> on success.
=cut
sub build_generateChatCompletion_request( $self, %options ) {
my $method = 'POST';
my $path = '/chat';
my $url = Mojo::URL->new( $self->server . $path );
my $request = AI::Ollama::GenerateChatCompletionRequest->new( \%options )->as_hash;
my $tx = $self->ua->build_tx(
$method => $url,
{
'Accept' => 'application/x-ndjson',
"Content-Type" => 'application/json',
}
=> json => $request,
);
$self->validate_request( $tx );
return $tx
}
sub generateChatCompletion( $self, %options ) {
my $tx = $self->build_generateChatCompletion_request(%options);
my $res = Future::Mojo->new();
my $r1 = Future::Mojo->new();
our @store; # we should use ->retain() instead
push @store, $r1->then( sub( $tx ) {
my $resp = $tx->res;
$self->emit(response => $resp);
# Should we validate using OpenAPI::Modern here?!
if( $resp->code == 200 ) {
# Successful operation.
my $queue = Future::Queue->new( prototype => 'Future::Mojo' );
$res->done( $queue );
my $ct = $resp->headers->content_type;
return unless $ct;
$ct =~ s/;\s+.*//;
if( $ct eq 'application/x-ndjson' ) {
# we only handle ndjson currently
my $handled_offset = 0;
$resp->on(progress => sub($msg,@) {
my $fresh = substr( $msg->body, $handled_offset );
my $body = $msg->body;
$body =~ s/[^\r\n]+\z//; # Strip any unfinished line
$handled_offset = length $body;
my @lines = split /\n/, $fresh;
for (@lines) {
my $payload = decode_json( $_ );
$self->validate_response( $payload, $tx );
$queue->push(
AI::Ollama::GenerateChatCompletionResponse->new($payload),
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
# Unknown/unhandled content type
$res->fail( sprintf("unknown_unhandled content type '%s'", $resp->content_type), $resp );
}
} else {
# An unknown/unhandled response, likely an error
$res->fail( sprintf( "unknown_unhandled code %d", $resp->code ), $resp);
}
});
my $_tx;
$tx->res->once( progress => sub($msg, @) {
$r1->resolve( $tx );
undef $_tx;
undef $r1;
});
$self->emit(request => $tx);
$_tx = $self->ua->start_p($tx);
return $res
}
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
=item C<< source >>
Name of the model to copy.
=back
=cut
sub build_copyModel_request( $self, %options ) {
my $method = 'POST';
my $path = '/copy';
my $url = Mojo::URL->new( $self->server . $path );
my $request = AI::Ollama::CopyModelRequest->new( \%options )->as_hash;
my $tx = $self->ua->build_tx(
$method => $url,
{
"Content-Type" => 'application/json',
}
=> json => $request,
);
$self->validate_request( $tx );
return $tx
}
sub copyModel( $self, %options ) {
my $tx = $self->build_copyModel_request(%options);
my $res = Future::Mojo->new();
my $r1 = Future::Mojo->new();
$r1->then( sub( $tx ) {
my $resp = $tx->res;
$self->emit(response => $resp);
# Should we validate using OpenAPI::Modern here?!
if( $resp->code == 200 ) {
# Successful operation.
$res->done($resp);
} else {
# An unknown/unhandled response, likely an error
$res->fail( sprintf( "unknown_unhandled code %d: %s", $resp->code, $resp->body ), $resp);
}
})->retain;
# Start our transaction
$self->emit(request => $tx);
$tx = $self->ua->start_p($tx)->then(sub($tx) {
$r1->resolve( $tx );
undef $r1;
})->catch(sub($err) {
$self->emit(response => $tx, $err);
$r1->fail( $err => $tx );
undef $r1;
});
return $res
}
=head2 C<< build_createModel_request >>
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
my $response = $client->createModel();
my $streamed = $response->get();
repeat {
my ($res) = $streamed->shift;
if( $res ) {
my $str = $res->get;
say $str;
}
Future::Mojo->done( defined $res );
} until => sub($done) { $done->get };
Create a model from a Modelfile.
It is recommended to set C<modelfile> to the content of the Modelfile rather than just set C<path>. This is a requirement for remote create. Remote model creation should also create any file blobs, fields such as C<FROM> and C<ADAPTER>, explicitly wi...
=head3 Options
=over 4
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
=item C<< stream >>
If C<false> the response will be returned as a single response object, otherwise the response will be streamed as a series of objects.
=back
Returns a L<< AI::Ollama::CreateModelResponse >> on success.
=cut
sub build_createModel_request( $self, %options ) {
my $method = 'POST';
my $path = '/create';
my $url = Mojo::URL->new( $self->server . $path );
my $request = AI::Ollama::CreateModelRequest->new( \%options )->as_hash;
my $tx = $self->ua->build_tx(
$method => $url,
{
'Accept' => 'application/x-ndjson',
"Content-Type" => 'application/json',
}
=> json => $request,
);
$self->validate_request( $tx );
return $tx
}
sub createModel( $self, %options ) {
my $tx = $self->build_createModel_request(%options);
my $res = Future::Mojo->new();
my $r1 = Future::Mojo->new();
our @store; # we should use ->retain() instead
push @store, $r1->then( sub( $tx ) {
my $resp = $tx->res;
$self->emit(response => $resp);
# Should we validate using OpenAPI::Modern here?!
if( $resp->code == 200 ) {
# Successful operation.
my $queue = Future::Queue->new( prototype => 'Future::Mojo' );
$res->done( $queue );
my $ct = $resp->headers->content_type;
return unless $ct;
$ct =~ s/;\s+.*//;
if( $ct eq 'application/x-ndjson' ) {
# we only handle ndjson currently
my $handled_offset = 0;
$resp->on(progress => sub($msg,@) {
my $fresh = substr( $msg->body, $handled_offset );
my $body = $msg->body;
$body =~ s/[^\r\n]+\z//; # Strip any unfinished line
$handled_offset = length $body;
my @lines = split /\n/, $fresh;
for (@lines) {
my $payload = decode_json( $_ );
$self->validate_response( $payload, $tx );
$queue->push(
AI::Ollama::CreateModelResponse->new($payload),
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
# Unknown/unhandled content type
$res->fail( sprintf("unknown_unhandled content type '%s'", $resp->content_type), $resp );
}
} else {
# An unknown/unhandled response, likely an error
$res->fail( sprintf( "unknown_unhandled code %d", $resp->code ), $resp);
}
});
my $_tx;
$tx->res->once( progress => sub($msg, @) {
$r1->resolve( $tx );
undef $_tx;
undef $r1;
});
$self->emit(request => $tx);
$_tx = $self->ua->start_p($tx);
return $res
}
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
The model name.
Model names follow a C<model:tag> format. Some examples are C<orca-mini:3b-q4_1> and C<llama2:70b>. The tag is optional and, if not provided, will default to C<latest>. The tag is used to identify a specific version.
=back
=cut
sub build_deleteModel_request( $self, %options ) {
my $method = 'DELETE';
my $path = '/delete';
my $url = Mojo::URL->new( $self->server . $path );
my $request = AI::Ollama::DeleteModelRequest->new( \%options )->as_hash;
my $tx = $self->ua->build_tx(
$method => $url,
{
"Content-Type" => 'application/json',
}
=> json => $request,
);
$self->validate_request( $tx );
return $tx
}
sub deleteModel( $self, %options ) {
my $tx = $self->build_deleteModel_request(%options);
my $res = Future::Mojo->new();
my $r1 = Future::Mojo->new();
$r1->then( sub( $tx ) {
my $resp = $tx->res;
$self->emit(response => $resp);
# Should we validate using OpenAPI::Modern here?!
if( $resp->code == 200 ) {
# Successful operation.
$res->done($resp);
} else {
# An unknown/unhandled response, likely an error
$res->fail( sprintf( "unknown_unhandled code %d: %s", $resp->code, $resp->body ), $resp);
}
})->retain;
# Start our transaction
$self->emit(request => $tx);
$tx = $self->ua->start_p($tx)->then(sub($tx) {
$r1->resolve( $tx );
undef $r1;
})->catch(sub($err) {
$self->emit(response => $tx, $err);
$r1->fail( $err => $tx );
undef $r1;
});
return $res
}
=head2 C<< build_generateEmbedding_request >>
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
=item C<< prompt >>
Text to generate embeddings for.
=back
Returns a L<< AI::Ollama::GenerateEmbeddingResponse >> on success.
=cut
sub build_generateEmbedding_request( $self, %options ) {
my $method = 'POST';
my $path = '/embeddings';
my $url = Mojo::URL->new( $self->server . $path );
my $request = AI::Ollama::GenerateEmbeddingRequest->new( \%options )->as_hash;
my $tx = $self->ua->build_tx(
$method => $url,
{
'Accept' => 'application/json',
"Content-Type" => 'application/json',
}
=> json => $request,
);
$self->validate_request( $tx );
return $tx
}
sub generateEmbedding( $self, %options ) {
my $tx = $self->build_generateEmbedding_request(%options);
my $res = Future::Mojo->new();
my $r1 = Future::Mojo->new();
$r1->then( sub( $tx ) {
my $resp = $tx->res;
$self->emit(response => $resp);
# Should we validate using OpenAPI::Modern here?!
if( $resp->code == 200 ) {
# Successful operation.
my $ct = $resp->headers->content_type;
$ct =~ s/;\s+.*//;
if( $ct eq 'application/json' ) {
my $payload = $resp->json();
$self->validate_response( $payload, $tx );
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
$res->fail( sprintf("unknown_unhandled content type '%s'", $resp->content_type), $resp );
}
} else {
# An unknown/unhandled response, likely an error
$res->fail( sprintf( "unknown_unhandled code %d: %s", $resp->code, $resp->body ), $resp);
}
})->retain;
# Start our transaction
$self->emit(request => $tx);
$tx = $self->ua->start_p($tx)->then(sub($tx) {
$r1->resolve( $tx );
undef $r1;
})->catch(sub($err) {
$self->emit(response => $tx, $err);
$r1->fail( $err => $tx );
undef $r1;
});
return $res
}
=head2 C<< build_generateCompletion_request >>
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
my $response = $client->generateCompletion();
my $streamed = $response->get();
repeat {
my ($res) = $streamed->shift;
if( $res ) {
my $str = $res->get;
say $str;
}
Future::Mojo->done( defined $res );
} until => sub($done) { $done->get };
Generate a response for a given prompt with a provided model.
The final response object will include statistics and additional data from the request.
=head3 Options
=over 4
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
=item C<< template >>
The full prompt or prompt template (overrides what is defined in the Modelfile).
=back
Returns a L<< AI::Ollama::GenerateCompletionResponse >> on success.
=cut
sub build_generateCompletion_request( $self, %options ) {
my $method = 'POST';
my $path = '/generate';
my $url = Mojo::URL->new( $self->server . $path );
my $request = AI::Ollama::GenerateCompletionRequest->new( \%options )->as_hash;
my $tx = $self->ua->build_tx(
$method => $url,
{
'Accept' => 'application/x-ndjson',
"Content-Type" => 'application/json',
}
=> json => $request,
);
$self->validate_request( $tx );
return $tx
}
sub generateCompletion( $self, %options ) {
my $tx = $self->build_generateCompletion_request(%options);
my $res = Future::Mojo->new();
my $r1 = Future::Mojo->new();
our @store; # we should use ->retain() instead
push @store, $r1->then( sub( $tx ) {
my $resp = $tx->res;
$self->emit(response => $resp);
# Should we validate using OpenAPI::Modern here?!
if( $resp->code == 200 ) {
# Successful operation.
my $queue = Future::Queue->new( prototype => 'Future::Mojo' );
$res->done( $queue );
my $ct = $resp->headers->content_type;
return unless $ct;
$ct =~ s/;\s+.*//;
if( $ct eq 'application/x-ndjson' ) {
# we only handle ndjson currently
my $handled_offset = 0;
$resp->on(progress => sub($msg,@) {
my $fresh = substr( $msg->body, $handled_offset );
my $body = $msg->body;
$body =~ s/[^\r\n]+\z//; # Strip any unfinished line
$handled_offset = length $body;
my @lines = split /\n/, $fresh;
for (@lines) {
my $payload = decode_json( $_ );
$self->validate_response( $payload, $tx );
$queue->push(
AI::Ollama::GenerateCompletionResponse->new($payload),
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
# Unknown/unhandled content type
$res->fail( sprintf("unknown_unhandled content type '%s'", $resp->content_type), $resp );
}
} else {
# An unknown/unhandled response, likely an error
$res->fail( sprintf( "unknown_unhandled code %d", $resp->code ), $resp);
}
});
my $_tx;
$tx->res->once( progress => sub($msg, @) {
$r1->resolve( $tx );
undef $_tx;
undef $r1;
});
$self->emit(request => $tx);
$_tx = $self->ua->start_p($tx);
return $res
}
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
my $response = $client->pullModel();
my $streamed = $response->get();
repeat {
my ($res) = $streamed->shift;
if( $res ) {
my $str = $res->get;
say $str;
}
Future::Mojo->done( defined $res );
} until => sub($done) { $done->get };
Download a model from the ollama library.
Cancelled pulls are resumed from where they left off, and multiple calls will share the same download progress.
=head3 Options
=over 4
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
=item C<< stream >>
If C<false> the response will be returned as a single response object, otherwise the response will be streamed as a series of objects.
=back
Returns a L<< AI::Ollama::PullModelResponse >> on success.
=cut
sub build_pullModel_request( $self, %options ) {
my $method = 'POST';
my $path = '/pull';
my $url = Mojo::URL->new( $self->server . $path );
my $request = AI::Ollama::PullModelRequest->new( \%options )->as_hash;
my $tx = $self->ua->build_tx(
$method => $url,
{
'Accept' => 'application/x-ndjson',
"Content-Type" => 'application/json',
}
=> json => $request,
);
$self->validate_request( $tx );
return $tx
}
sub pullModel( $self, %options ) {
my $tx = $self->build_pullModel_request(%options);
my $res = Future::Mojo->new();
my $r1 = Future::Mojo->new();
our @store; # we should use ->retain() instead
push @store, $r1->then( sub( $tx ) {
my $resp = $tx->res;
$self->emit(response => $resp);
# Should we validate using OpenAPI::Modern here?!
if( $resp->code == 200 ) {
# Successful operation.
my $queue = Future::Queue->new( prototype => 'Future::Mojo' );
$res->done( $queue );
my $ct = $resp->headers->content_type;
return unless $ct;
$ct =~ s/;\s+.*//;
if( $ct eq 'application/x-ndjson' ) {
# we only handle ndjson currently
my $handled_offset = 0;
$resp->on(progress => sub($msg,@) {
my $fresh = substr( $msg->body, $handled_offset );
my $body = $msg->body;
$body =~ s/[^\r\n]+\z//; # Strip any unfinished line
$handled_offset = length $body;
my @lines = split /\n/, $fresh;
for (@lines) {
my $payload = decode_json( $_ );
$self->validate_response( $payload, $tx );
$queue->push(
AI::Ollama::PullModelResponse->new($payload),
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
# Unknown/unhandled content type
$res->fail( sprintf("unknown_unhandled content type '%s'", $resp->content_type), $resp );
}
} else {
# An unknown/unhandled response, likely an error
$res->fail( sprintf( "unknown_unhandled code %d", $resp->code ), $resp);
}
});
my $_tx;
$tx->res->once( progress => sub($msg, @) {
$r1->resolve( $tx );
undef $_tx;
undef $r1;
});
$self->emit(request => $tx);
$_tx = $self->ua->start_p($tx);
return $res
}
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
=item C<< stream >>
If C<false> the response will be returned as a single response object, otherwise the response will be streamed as a series of objects.
=back
Returns a L<< AI::Ollama::PushModelResponse >> on success.
=cut
sub build_pushModel_request( $self, %options ) {
my $method = 'POST';
my $path = '/push';
my $url = Mojo::URL->new( $self->server . $path );
my $request = AI::Ollama::PushModelRequest->new( \%options )->as_hash;
my $tx = $self->ua->build_tx(
$method => $url,
{
'Accept' => 'application/json',
"Content-Type" => 'application/json',
}
=> json => $request,
);
$self->validate_request( $tx );
return $tx
}
sub pushModel( $self, %options ) {
my $tx = $self->build_pushModel_request(%options);
my $res = Future::Mojo->new();
my $r1 = Future::Mojo->new();
$r1->then( sub( $tx ) {
my $resp = $tx->res;
$self->emit(response => $resp);
# Should we validate using OpenAPI::Modern here?!
if( $resp->code == 200 ) {
# Successful operation.
my $ct = $resp->headers->content_type;
$ct =~ s/;\s+.*//;
if( $ct eq 'application/json' ) {
my $payload = $resp->json();
$self->validate_response( $payload, $tx );
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
$res->fail( sprintf("unknown_unhandled content type '%s'", $resp->content_type), $resp );
}
} else {
# An unknown/unhandled response, likely an error
$res->fail( sprintf( "unknown_unhandled code %d: %s", $resp->code, $resp->body ), $resp);
}
})->retain;
# Start our transaction
$self->emit(request => $tx);
$tx = $self->ua->start_p($tx)->then(sub($tx) {
$r1->resolve( $tx );
undef $r1;
})->catch(sub($err) {
$self->emit(response => $tx, $err);
$r1->fail( $err => $tx );
undef $r1;
});
return $res
}
=head2 C<< build_showModelInfo_request >>
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
The model name.
Model names follow a C<model:tag> format. Some examples are C<orca-mini:3b-q4_1> and C<llama2:70b>. The tag is optional and, if not provided, will default to C<latest>. The tag is used to identify a specific version.
=back
Returns a L<< AI::Ollama::ModelInfo >> on success.
=cut
sub build_showModelInfo_request( $self, %options ) {
my $method = 'POST';
my $path = '/show';
my $url = Mojo::URL->new( $self->server . $path );
my $request = AI::Ollama::ModelInfoRequest->new( \%options )->as_hash;
my $tx = $self->ua->build_tx(
$method => $url,
{
'Accept' => 'application/json',
"Content-Type" => 'application/json',
}
=> json => $request,
);
$self->validate_request( $tx );
return $tx
}
sub showModelInfo( $self, %options ) {
my $tx = $self->build_showModelInfo_request(%options);
my $res = Future::Mojo->new();
my $r1 = Future::Mojo->new();
$r1->then( sub( $tx ) {
my $resp = $tx->res;
$self->emit(response => $resp);
# Should we validate using OpenAPI::Modern here?!
if( $resp->code == 200 ) {
# Successful operation.
my $ct = $resp->headers->content_type;
$ct =~ s/;\s+.*//;
if( $ct eq 'application/json' ) {
my $payload = $resp->json();
$self->validate_response( $payload, $tx );
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
$res->fail( sprintf("unknown_unhandled content type '%s'", $resp->content_type), $resp );
}
} else {
# An unknown/unhandled response, likely an error
$res->fail( sprintf( "unknown_unhandled code %d: %s", $resp->code, $resp->body ), $resp);
}
})->retain;
# Start our transaction
$self->emit(request => $tx);
$tx = $self->ua->start_p($tx)->then(sub($tx) {
$r1->resolve( $tx );
undef $r1;
})->catch(sub($err) {
$self->emit(response => $tx, $err);
$r1->fail( $err => $tx );
undef $r1;
});
return $res
}
=head2 C<< build_listModels_request >>
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
my $res = $client->listModels()->get;
List models that are available locally.
Returns a L<< AI::Ollama::ModelsResponse >> on success.
=cut
sub build_listModels_request( $self, %options ) {
my $method = 'GET';
my $path = '/tags';
my $url = Mojo::URL->new( $self->server . $path );
my $tx = $self->ua->build_tx(
$method => $url,
{
'Accept' => 'application/json',
}
);
$self->validate_request( $tx );
return $tx
}
sub listModels( $self, %options ) {
my $tx = $self->build_listModels_request(%options);
my $res = Future::Mojo->new();
my $r1 = Future::Mojo->new();
$r1->then( sub( $tx ) {
my $resp = $tx->res;
$self->emit(response => $resp);
# Should we validate using OpenAPI::Modern here?!
if( $resp->code == 200 ) {
# Successful operation.
my $ct = $resp->headers->content_type;
$ct =~ s/;\s+.*//;
if( $ct eq 'application/json' ) {
my $payload = $resp->json();
$self->validate_response( $payload, $tx );
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
$res->fail( sprintf("unknown_unhandled content type '%s'", $resp->content_type), $resp );
}
} else {
# An unknown/unhandled response, likely an error
$res->fail( sprintf( "unknown_unhandled code %d: %s", $resp->code, $resp->body ), $resp);
}
})->retain;
# Start our transaction
$self->emit(request => $tx);
$tx = $self->ua->start_p($tx)->then(sub($tx) {
$r1->resolve( $tx );
undef $r1;
})->catch(sub($err) {
$self->emit(response => $tx, $err);
$r1->fail( $err => $tx );
undef $r1;
});
return $res
}
sub validate_response( $self, $payload, $tx ) {
if( $self->validate_responses
and my $openapi = $self->openapi ) {
my $results = $openapi->validate_response($payload, { request => $tx->req });
if( $results->{error}) {
say $results;
say $tx->res->to_string;
};
};
}
sub validate_request( $self, $tx ) {
if( $self->validate_requests
and my $openapi = $self->openapi ) {
my $results = $openapi->validate_request($tx->req);
if( $results->{error}) {
say $results;
say $tx->req->to_string;
};
};
}
lib/AI/Ollama/CopyModelRequest.pm view on Meta::CPAN
AI::Ollama::CopyModelRequest -
=head1 SYNOPSIS
my $obj = AI::Ollama::CopyModelRequest->new();
...
=cut
sub as_hash( $self ) {
return { $self->%* }
}
=head1 PROPERTIES
=head2 C<< destination >>
Name of the new model.
=cut
lib/AI/Ollama/CreateModelRequest.pm view on Meta::CPAN
AI::Ollama::CreateModelRequest -
=head1 SYNOPSIS
my $obj = AI::Ollama::CreateModelRequest->new();
...
=cut
sub as_hash( $self ) {
return { $self->%* }
}
=head1 PROPERTIES
=head2 C<< modelfile >>
The contents of the Modelfile.
=cut
lib/AI/Ollama/CreateModelResponse.pm view on Meta::CPAN
AI::Ollama::CreateModelResponse -
=head1 SYNOPSIS
my $obj = AI::Ollama::CreateModelResponse->new();
...
=cut
sub as_hash( $self ) {
return { $self->%* }
}
=head1 PROPERTIES
=head2 C<< status >>
Status creating the model
=cut
lib/AI/Ollama/DeleteModelRequest.pm view on Meta::CPAN
AI::Ollama::DeleteModelRequest -
=head1 SYNOPSIS
my $obj = AI::Ollama::DeleteModelRequest->new();
...
=cut
sub as_hash( $self ) {
return { $self->%* }
}
=head1 PROPERTIES
=head2 C<< name >>
The model name.
Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama2:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version.
lib/AI/Ollama/GenerateChatCompletionRequest.pm view on Meta::CPAN
AI::Ollama::GenerateChatCompletionRequest -
=head1 SYNOPSIS
my $obj = AI::Ollama::GenerateChatCompletionRequest->new();
...
=cut
sub as_hash( $self ) {
return { $self->%* }
}
=head1 PROPERTIES
=head2 C<< format >>
The format to return a response in. Currently the only accepted value is json.
Enable JSON mode by setting the format parameter to json. This will structure the response as valid JSON.
lib/AI/Ollama/GenerateChatCompletionResponse.pm view on Meta::CPAN
AI::Ollama::GenerateChatCompletionResponse -
=head1 SYNOPSIS
my $obj = AI::Ollama::GenerateChatCompletionResponse->new();
...
=cut
sub as_hash( $self ) {
return { $self->%* }
}
=head1 PROPERTIES
=head2 C<< created_at >>
Date on which a model was created.
=cut
lib/AI/Ollama/GenerateCompletionRequest.pm view on Meta::CPAN
AI::Ollama::GenerateCompletionRequest -
=head1 SYNOPSIS
my $obj = AI::Ollama::GenerateCompletionRequest->new();
...
=cut
sub as_hash( $self ) {
return { $self->%* }
}
=head1 PROPERTIES
=head2 C<< context >>
The context parameter returned from a previous request to [generateCompletion], this can be used to keep a short conversational memory.
=cut
lib/AI/Ollama/GenerateCompletionResponse.pm view on Meta::CPAN
AI::Ollama::GenerateCompletionResponse -
=head1 SYNOPSIS
my $obj = AI::Ollama::GenerateCompletionResponse->new();
...
=cut
sub as_hash( $self ) {
return { $self->%* }
}
=head1 PROPERTIES
=head2 C<< context >>
An encoding of the conversation used in this response, this can be sent in the next request to keep a conversational memory.
=cut
lib/AI/Ollama/GenerateEmbeddingRequest.pm view on Meta::CPAN
AI::Ollama::GenerateEmbeddingRequest -
=head1 SYNOPSIS
my $obj = AI::Ollama::GenerateEmbeddingRequest->new();
...
=cut
sub as_hash( $self ) {
return { $self->%* }
}
=head1 PROPERTIES
=head2 C<< model >>
The model name.
Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama2:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version.
lib/AI/Ollama/GenerateEmbeddingResponse.pm view on Meta::CPAN
AI::Ollama::GenerateEmbeddingResponse -
=head1 SYNOPSIS
my $obj = AI::Ollama::GenerateEmbeddingResponse->new();
...
=cut
sub as_hash( $self ) {
return { $self->%* }
}
=head1 PROPERTIES
=head2 C<< embedding >>
The embedding for the prompt.
=cut
lib/AI/Ollama/Message.pm view on Meta::CPAN
AI::Ollama::Message -
=head1 SYNOPSIS
my $obj = AI::Ollama::Message->new();
...
=cut
sub as_hash( $self ) {
return { $self->%* }
}
=head1 PROPERTIES
=head2 C<< content >>
The content of the message
=cut
lib/AI/Ollama/Model.pm view on Meta::CPAN
AI::Ollama::Model -
=head1 SYNOPSIS
my $obj = AI::Ollama::Model->new();
...
=cut
sub as_hash( $self ) {
return { $self->%* }
}
=head1 PROPERTIES
=head2 C<< modified_at >>
Model modification date.
=cut
lib/AI/Ollama/ModelInfo.pm view on Meta::CPAN
AI::Ollama::ModelInfo -
=head1 SYNOPSIS
my $obj = AI::Ollama::ModelInfo->new();
...
=cut
sub as_hash( $self ) {
return { $self->%* }
}
=head1 PROPERTIES
=head2 C<< license >>
The model's license.
=cut
lib/AI/Ollama/ModelInfoRequest.pm view on Meta::CPAN
AI::Ollama::ModelInfoRequest -
=head1 SYNOPSIS
my $obj = AI::Ollama::ModelInfoRequest->new();
...
=cut
sub as_hash( $self ) {
return { $self->%* }
}
=head1 PROPERTIES
=head2 C<< name >>
The model name.
Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama2:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version.
lib/AI/Ollama/ModelsResponse.pm view on Meta::CPAN
AI::Ollama::ModelsResponse -
=head1 SYNOPSIS
my $obj = AI::Ollama::ModelsResponse->new();
...
=cut
sub as_hash( $self ) {
return { $self->%* }
}
=head1 PROPERTIES
=head2 C<< models >>
List of models available locally.
=cut
lib/AI/Ollama/PullModelRequest.pm view on Meta::CPAN
AI::Ollama::PullModelRequest -
=head1 SYNOPSIS
my $obj = AI::Ollama::PullModelRequest->new();
...
=cut
sub as_hash( $self ) {
return { $self->%* }
}
=head1 PROPERTIES
=head2 C<< insecure >>
Allow insecure connections to the library.
Only use this if you are pulling from your own library during development.
lib/AI/Ollama/PullModelResponse.pm view on Meta::CPAN
AI::Ollama::PullModelResponse -
=head1 SYNOPSIS
my $obj = AI::Ollama::PullModelResponse->new();
...
=cut
sub as_hash( $self ) {
return { $self->%* }
}
=head1 PROPERTIES
=head2 C<< completed >>
Total bytes transferred.
=cut
lib/AI/Ollama/PushModelRequest.pm view on Meta::CPAN
AI::Ollama::PushModelRequest -
=head1 SYNOPSIS
my $obj = AI::Ollama::PushModelRequest->new();
...
=cut
sub as_hash( $self ) {
return { $self->%* }
}
=head1 PROPERTIES
=head2 C<< insecure >>
Allow insecure connections to the library.
Only use this if you are pushing to your library during development.
lib/AI/Ollama/PushModelResponse.pm view on Meta::CPAN
AI::Ollama::PushModelResponse -
=head1 SYNOPSIS
my $obj = AI::Ollama::PushModelResponse->new();
...
=cut
sub as_hash( $self ) {
return { $self->%* }
}
=head1 PROPERTIES
=head2 C<< digest >>
the model's digest
=cut
lib/AI/Ollama/RequestOptions.pm view on Meta::CPAN
AI::Ollama::RequestOptions -
=head1 SYNOPSIS
my $obj = AI::Ollama::RequestOptions->new();
...
=cut
sub as_hash( $self ) {
return { $self->%* }
}
=head1 PROPERTIES
=head2 C<< embedding_only >>
Enable embedding only. (Default: false)
=cut
scripts/code-completion.pl view on Meta::CPAN
my $ol = AI::Ollama::Client->new(
server => 'http://192.168.1.97:11434/api',
);
my $model = 'codellama:13b-code';
my $tx = $ol->pullModel(
name => $model,
)->get;
my @prompts = @ARGV ? @ARGV : (qq{fetch an url and print its content with Mojolicious; write concise code <PRE> sub fetch {\n <SUF> } <MID>});
for my $prompt (@prompts) {
my $response = $ol->generateCompletion(
model => $model,
prompt => $prompt,
system => 'You are a helpful concise coding assistant',
);
my $code;
my $responses = $response->get;
repeat {
my ($res) = $responses->shift;
my $info;
if( $res ) {
$info = $res->get;
local $| = 1;
print $info->response;
$code .= $info->response;
};
Future::Mojo->done( $info->done || !defined $res );
} until => sub($done) { my $res = $done->get; return $res };
if( $code =~ /\A(.*?)<EOT>/s ) {
my $insert = $1;
my ($pre,$suf) = ($prompt =~ /<PRE>(.*?)<SUF>(.*?)<MID>/s);
print "$pre$insert$suf";
}
}
scripts/describe-image.pl view on Meta::CPAN
use 5.020;
use Mojo::JSON 'decode_json';
use experimental 'signatures';
use AI::Ollama::Client;
use Future::Utils 'repeat';
my $ol = AI::Ollama::Client->new(
server => 'http://192.168.1.97:11434/api',
);
$ol->on('request' => sub( $ol, $tx ) {
use Data::Dumper;
warn Dumper $tx->req;
});
$ol->on('response' => sub( $ol, $tx, $err='' ) {
if( $err ) {
warn $err if $err;
} else {
#use Data::Dumper;
warn $tx->code;
}
});
my $tx = $ol->pullModel(
name => 'llava:latest',
)->catch(sub {
use Data::Dumper; warn Dumper \@_;
})->get;
my @images = @ARGV ? @ARGV : ('t/testdata/objectdetection.jpg');
for my $image (@images) {
my $response = $ol->generateCompletion(
model => 'llava:latest',
prompt => 'You are tagging images. Please list all the objects in this image as tags. Also list the location where it was taken.',
images => [
scripts/describe-image.pl view on Meta::CPAN
repeat {
my ($res) = $responses->shift;
my $info;
if( $res ) {
$info = $res->get;
local $| = 1;
print $info->response;
};
Future::Mojo->done( $info->done || !defined $res );
} until => sub($done) { my $res = $done->get; return $res };
}
scripts/music-genre-json.pl view on Meta::CPAN
model => $model,
prompt => $prompt,
temperature => '0.0',
messages => [
{role => 'system',
content => join "\n",
'You are a music expert.',
'You are given an artist name and song title.',
'Please suggest three musical genres of that title and performer.',
'Only list the musical genres.',
#'Answer in JSON only with an array containing objects { "genre": "the genre", "sub-genre": "the sub genre" }.',
},
{ role => 'user', content => $prompt },
],
);
my $chat;
my $responses = $response->get;
repeat {
my $check = eval {
my ($res) = $responses->shift;
scripts/music-genre-json.pl view on Meta::CPAN
if( $res ) {
$info = $res->get;
local $| = 1;
#print $info->message->{content};
$chat .= $info->message->{content};
};
Future::Mojo->done( $info->done || !defined $res );
}; warn $@ if $@;
$check
} until => sub($done) { my $res = $done->get; return $res };
# Try to extract from a text list
#my @genres = ($chat =~ /^\s*[\d]+\.\s*(.*?)$/mg);
# Try to extract from a JSON string
my @genres;
my ($json) = ($chat =~ /^(\[.*\])$/msg);
if( $json ) {
@genres = decode_json( $json )->@*;
};
xt/99-compile.t view on Meta::CPAN
};
plan 'no_plan';
require './Makefile.PL';
# Loaded from Makefile.PL
our %module = get_module_info();
my $last_version = undef;
sub check {
#return if (! m{(\.pm|\.pl) \z}xmsi);
my ($stdout, $stderr, $exit) = capture(sub {
system( $^X, '-Mblib', '-c', $_ );
});
s!\s*\z!!
for ($stdout, $stderr);
if( $exit ) {
diag $stderr;
diag "Exit code: ", $exit;
fail($_);
xt/99-compile.t view on Meta::CPAN
);
if( my $exe = $module{EXE_FILES}) {
push @files, @$exe;
};
for (@files) {
check($_)
}
sub wanted {
push @files, $File::Find::name if /\.p(l|m|od)$/;
}
xt/99-pod.t view on Meta::CPAN
if( my $exe = $module{EXE_FILES}) {
push @files, @$exe;
};
plan tests => scalar @files;
foreach my $file (@files) {
pod_file_ok($file);
}
}
sub wanted {
push @files, $File::Find::name if /\.p(l|m|od)$/;
}