AI-Ollama-Client
view release on metacpan or search on metacpan
lib/AI/Ollama/Client.pm view on Meta::CPAN
Returns a L<< AI::Ollama::GenerateCompletionResponse >>.
=cut
around 'generateCompletion' => sub ( $super, $self, %options ) {
# Encode images as base64, if images exist:
# (but create a copy so we don't over write the input array)
if (my $images = $options{images}) {
# Allow { filename => '/etc/passwd' }
$options{images} = [
map {
my $item = $_;
if( ref($item) eq 'HASH' ) {
$item = Mojo::File->new($item->{filename})->slurp();
};
encode_base64($item)
} @$images ];
}
return $super->($self, %options);
};
=head2 C<< pullModel >>
my $res = $client->pullModel(
name => 'llama',
)->get;
Download a model from the ollama library.
Returns a L<< AI::Ollama::PullModelResponse >>.
=cut
=head2 C<< pushModel >>
my $res = $client->pushModel()->get;
Upload a model to a model library.
Returns a L<< AI::Ollama::PushModelResponse >>.
=cut
=head2 C<< showModelInfo >>
my $info = $client->showModelInfo()->get;
say $info->modelfile;
Show details about a model including modelfile, template, parameters, license, and system prompt.
Returns a L<< AI::Ollama::ModelInfo >>.
=cut
=head2 C<< listModels >>
my $info = $client->listModels()->get;
for my $model ($info->models->@*) {
say $model->model; # llama2:latest
}
List models that are available locally.
Returns a L<< AI::Ollama::ModelsResponse >>.
=cut
1;
( run in 1.383 second using v1.01-cache-2.11-cpan-39bf76dae61 )