AI-Ollama-Client
view release on metacpan or search on metacpan
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
$self->validate_response( $payload, $tx );
$queue->push(
AI::Ollama::GenerateCompletionResponse->new($payload),
);
};
if( $msg->{state} eq 'finished' ) {
$queue->finish();
}
});
} else {
# Unknown/unhandled content type
$res->fail( sprintf("unknown_unhandled content type '%s'", $resp->content_type), $resp );
}
} else {
# An unknown/unhandled response, likely an error
$res->fail( sprintf( "unknown_unhandled code %d", $resp->code ), $resp);
}
});
my $_tx;
$tx->res->once( progress => sub($msg, @) {
$r1->resolve( $tx );
undef $_tx;
undef $r1;
});
$self->emit(request => $tx);
$_tx = $self->ua->start_p($tx);
return $res
}
=head2 C<< build_pullModel_request >>
Build an HTTP request as L<Mojo::Request> object. For the parameters see below.
=head2 C<< pullModel >>
use Future::Utils 'repeat';
my $response = $client->pullModel();
my $streamed = $response->get();
repeat {
my ($res) = $streamed->shift;
if( $res ) {
my $str = $res->get;
say $str;
}
Future::Mojo->done( defined $res );
} until => sub($done) { $done->get };
Download a model from the ollama library.
Cancelled pulls are resumed from where they left off, and multiple calls will share the same download progress.
=head3 Options
=over 4
=item C<< insecure >>
Allow insecure connections to the library.
Only use this if you are pulling from your own library during development.
=item C<< name >>
The model name.
Model names follow a C<model:tag> format. Some examples are C<orca-mini:3b-q4_1> and C<llama2:70b>. The tag is optional and, if not provided, will default to C<latest>. The tag is used to identify a specific version.
=item C<< stream >>
If C<false> the response will be returned as a single response object, otherwise the response will be streamed as a series of objects.
=back
Returns a L<< AI::Ollama::PullModelResponse >> on success.
=cut
sub build_pullModel_request( $self, %options ) {
my $method = 'POST';
my $path = '/pull';
my $url = Mojo::URL->new( $self->server . $path );
my $request = AI::Ollama::PullModelRequest->new( \%options )->as_hash;
my $tx = $self->ua->build_tx(
$method => $url,
{
'Accept' => 'application/x-ndjson',
"Content-Type" => 'application/json',
}
=> json => $request,
);
$self->validate_request( $tx );
return $tx
}
sub pullModel( $self, %options ) {
my $tx = $self->build_pullModel_request(%options);
my $res = Future::Mojo->new();
my $r1 = Future::Mojo->new();
our @store; # we should use ->retain() instead
push @store, $r1->then( sub( $tx ) {
my $resp = $tx->res;
$self->emit(response => $resp);
# Should we validate using OpenAPI::Modern here?!
if( $resp->code == 200 ) {
# Successful operation.
my $queue = Future::Queue->new( prototype => 'Future::Mojo' );
$res->done( $queue );
my $ct = $resp->headers->content_type;
return unless $ct;
$ct =~ s/;\s+.*//;
if( $ct eq 'application/x-ndjson' ) {
# we only handle ndjson currently
my $handled_offset = 0;
$resp->on(progress => sub($msg,@) {
my $fresh = substr( $msg->body, $handled_offset );
my $body = $msg->body;
$body =~ s/[^\r\n]+\z//; # Strip any unfinished line
$handled_offset = length $body;
my @lines = split /\n/, $fresh;
for (@lines) {
my $payload = decode_json( $_ );
$self->validate_response( $payload, $tx );
$queue->push(
AI::Ollama::PullModelResponse->new($payload),
);
};
if( $msg->{state} eq 'finished' ) {
$queue->finish();
}
});
} else {
# Unknown/unhandled content type
$res->fail( sprintf("unknown_unhandled content type '%s'", $resp->content_type), $resp );
}
} else {
# An unknown/unhandled response, likely an error
$res->fail( sprintf( "unknown_unhandled code %d", $resp->code ), $resp);
}
});
my $_tx;
$tx->res->once( progress => sub($msg, @) {
$r1->resolve( $tx );
undef $_tx;
undef $r1;
});
$self->emit(request => $tx);
$_tx = $self->ua->start_p($tx);
return $res
}
=head2 C<< build_pushModel_request >>
Build an HTTP request as L<Mojo::Request> object. For the parameters see below.
=head2 C<< pushModel >>
my $res = $client->pushModel()->get;
Upload a model to a model library.
Requires registering for ollama.ai and adding a public key first.
=head3 Options
=over 4
=item C<< insecure >>
Allow insecure connections to the library.
Only use this if you are pushing to your library during development.
=item C<< name >>
The name of the model to push in the form of /:.
=item C<< stream >>
If C<false> the response will be returned as a single response object, otherwise the response will be streamed as a series of objects.
=back
Returns a L<< AI::Ollama::PushModelResponse >> on success.
=cut
sub build_pushModel_request( $self, %options ) {
my $method = 'POST';
my $path = '/push';
my $url = Mojo::URL->new( $self->server . $path );
my $request = AI::Ollama::PushModelRequest->new( \%options )->as_hash;
my $tx = $self->ua->build_tx(
$method => $url,
{
'Accept' => 'application/json',
"Content-Type" => 'application/json',
}
=> json => $request,
);
$self->validate_request( $tx );
return $tx
}
sub pushModel( $self, %options ) {
my $tx = $self->build_pushModel_request(%options);
my $res = Future::Mojo->new();
my $r1 = Future::Mojo->new();
$r1->then( sub( $tx ) {
my $resp = $tx->res;
$self->emit(response => $resp);
# Should we validate using OpenAPI::Modern here?!
if( $resp->code == 200 ) {
# Successful operation.
my $ct = $resp->headers->content_type;
$ct =~ s/;\s+.*//;
if( $ct eq 'application/json' ) {
my $payload = $resp->json();
$self->validate_response( $payload, $tx );
$res->done(
AI::Ollama::PushModelResponse->new($payload),
);
( run in 1.119 second using v1.01-cache-2.11-cpan-39bf76dae61 )