AI-Ollama-Client

 view release on metacpan or  search on metacpan

lib/AI/Ollama/GenerateCompletionResponse.pm  view on Meta::CPAN

package AI::Ollama::GenerateCompletionResponse 0.03;
# DO NOT EDIT! This is an autogenerated file.
use 5.020;
use Moo 2;
use experimental 'signatures';
use Types::Standard qw(Str Bool Num Int Object ArrayRef);
use MooX::TypeTiny;

=head1 NAME

AI::Ollama::GenerateCompletionResponse -

=head1 SYNOPSIS

  my $obj = AI::Ollama::GenerateCompletionResponse->new();
  ...

=cut

sub as_hash( $self ) {
    return { $self->%* }
}

=head1 PROPERTIES

=head2 C<< context >>

An encoding of the conversation used in this response, this can be sent in the next request to keep a conversational memory.

=cut

has 'context' => (
    is       => 'ro',
    isa      => ArrayRef[Int],
);

=head2 C<< created_at >>

Date on which a model was created.

=cut

has 'created_at' => (
    is       => 'ro',
    isa      => Str,
);

=head2 C<< done >>

Whether the response has completed.

=cut

has 'done' => (
    is       => 'ro',
);

=head2 C<< eval_count >>

Number of tokens the response.

=cut

has 'eval_count' => (
    is       => 'ro',
    isa      => Int,
);

=head2 C<< eval_duration >>

Time in nanoseconds spent generating the response.

=cut

has 'eval_duration' => (
    is       => 'ro',
    isa      => Int,
);

=head2 C<< load_duration >>

Time spent in nanoseconds loading the model.

=cut

has 'load_duration' => (
    is       => 'ro',
    isa      => Int,

 view all matches for this distribution
 view release on metacpan -  search on metacpan

( run in 0.505 second using v1.00-cache-2.02-grep-82fe00e-cpan-2c419f77a38b )