AI-Ollama-Client
view release on metacpan or search on metacpan
lib/AI/Ollama/PullModelRequest.pm view on Meta::CPAN
package AI::Ollama::PullModelRequest 0.05;
# DO NOT EDIT! This is an autogenerated file.
use 5.020;
use Moo 2;
use experimental 'signatures';
use stable 'postderef';
use Types::Standard qw(Enum Str Bool Num Int HashRef ArrayRef);
use MooX::TypeTiny;
use namespace::clean;
=encoding utf8
=head1 NAME
AI::Ollama::PullModelRequest -
=head1 SYNOPSIS
my $obj = AI::Ollama::PullModelRequest->new();
...
=cut
sub as_hash( $self ) {
return { $self->%* }
}
=head1 PROPERTIES
=head2 C<< insecure >>
Allow insecure connections to the library.
Only use this if you are pulling from your own library during development.
=cut
has 'insecure' => (
is => 'ro',
);
=head2 C<< name >>
The model name.
Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama2:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version.
=cut
has 'name' => (
is => 'ro',
isa => Str,
required => 1,
);
=head2 C<< stream >>
If `false` the response will be returned as a single response object, otherwise the response will be streamed as a series of objects.
=cut
has 'stream' => (
is => 'ro',
);
1;
( run in 1.067 second using v1.01-cache-2.11-cpan-39bf76dae61 )