AI-Ollama-Client

 view release on metacpan or  search on metacpan

lib/AI/Ollama/RequestOptions.pm  view on Meta::CPAN

package AI::Ollama::RequestOptions 0.05;
# DO NOT EDIT! This is an autogenerated file.

use 5.020;
use Moo 2;
use experimental 'signatures';
use stable 'postderef';
use Types::Standard qw(Enum Str Bool Num Int HashRef ArrayRef);
use MooX::TypeTiny;

use namespace::clean;

=encoding utf8

=head1 NAME

AI::Ollama::RequestOptions -

=head1 SYNOPSIS

  my $obj = AI::Ollama::RequestOptions->new();
  ...

=cut

sub as_hash( $self ) {
    return { $self->%* }
}

=head1 PROPERTIES

=head2 C<< embedding_only >>

Enable embedding only. (Default: false)

=cut

has 'embedding_only' => (
    is       => 'ro',
);

=head2 C<< f16_kv >>

Enable f16 key/value. (Default: false)

=cut

has 'f16_kv' => (
    is       => 'ro',
);

=head2 C<< frequency_penalty >>

Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.

=cut

has 'frequency_penalty' => (
    is       => 'ro',
    isa      => Num,
);

=head2 C<< logits_all >>

Enable logits all. (Default: false)

=cut

has 'logits_all' => (
    is       => 'ro',
);

=head2 C<< low_vram >>

Enable low VRAM mode. (Default: false)

=cut

has 'low_vram' => (
    is       => 'ro',
);

=head2 C<< main_gpu >>

The GPU to use for the main model. Default is 0.



( run in 0.634 second using v1.01-cache-2.11-cpan-39bf76dae61 )