view release on metacpan or search on metacpan
Makefile.PL view on Meta::CPAN
warn "(Re)Creating $example_file\n";
$examples =~ s/\r\n/\n/g;
update_file( $example_file, $examples );
};
};
sub update_file {
my( $filename, $new_content ) = @_;
my $content;
if( -f $filename ) {
open my $fh, '<:raw:encoding(UTF-8)', $filename
or die "Couldn't read '$filename': $!";
local $/;
$content = <$fh>;
};
if( $content ne $new_content ) {
if( open my $fh, '>:raw:encoding(UTF-8)', $filename ) {
print $fh $new_content;
} else {
warn "Couldn't (re)write '$filename': $!";
};
};
}
lib/AI/Ollama/Client/Impl.pm view on Meta::CPAN
use AI::Ollama::Model;
use AI::Ollama::ModelInfo;
use AI::Ollama::ModelInfoRequest;
use AI::Ollama::ModelsResponse;
use AI::Ollama::PullModelRequest;
use AI::Ollama::PullModelResponse;
use AI::Ollama::PushModelRequest;
use AI::Ollama::PushModelResponse;
use AI::Ollama::RequestOptions;
=encoding utf8
=head1 SYNOPSIS
my $client = AI::Ollama::Client::Impl->new(
schema_file => '...',
);
=head1 PROPERTIES
=head2 B<< schema_file >>
lib/AI/Ollama/CopyModelRequest.pm view on Meta::CPAN
use 5.020;
use Moo 2;
use experimental 'signatures';
use stable 'postderef';
use Types::Standard qw(Enum Str Bool Num Int HashRef ArrayRef);
use MooX::TypeTiny;
use namespace::clean;
=encoding utf8
=head1 NAME
AI::Ollama::CopyModelRequest -
=head1 SYNOPSIS
my $obj = AI::Ollama::CopyModelRequest->new();
...
lib/AI/Ollama/CreateModelRequest.pm view on Meta::CPAN
use 5.020;
use Moo 2;
use experimental 'signatures';
use stable 'postderef';
use Types::Standard qw(Enum Str Bool Num Int HashRef ArrayRef);
use MooX::TypeTiny;
use namespace::clean;
=encoding utf8
=head1 NAME
AI::Ollama::CreateModelRequest -
=head1 SYNOPSIS
my $obj = AI::Ollama::CreateModelRequest->new();
...
lib/AI/Ollama/CreateModelResponse.pm view on Meta::CPAN
use 5.020;
use Moo 2;
use experimental 'signatures';
use stable 'postderef';
use Types::Standard qw(Enum Str Bool Num Int HashRef ArrayRef);
use MooX::TypeTiny;
use namespace::clean;
=encoding utf8
=head1 NAME
AI::Ollama::CreateModelResponse -
=head1 SYNOPSIS
my $obj = AI::Ollama::CreateModelResponse->new();
...
lib/AI/Ollama/DeleteModelRequest.pm view on Meta::CPAN
use 5.020;
use Moo 2;
use experimental 'signatures';
use stable 'postderef';
use Types::Standard qw(Enum Str Bool Num Int HashRef ArrayRef);
use MooX::TypeTiny;
use namespace::clean;
=encoding utf8
=head1 NAME
AI::Ollama::DeleteModelRequest -
=head1 SYNOPSIS
my $obj = AI::Ollama::DeleteModelRequest->new();
...
lib/AI/Ollama/GenerateChatCompletionRequest.pm view on Meta::CPAN
use 5.020;
use Moo 2;
use experimental 'signatures';
use stable 'postderef';
use Types::Standard qw(Enum Str Bool Num Int HashRef ArrayRef);
use MooX::TypeTiny;
use namespace::clean;
=encoding utf8
=head1 NAME
AI::Ollama::GenerateChatCompletionRequest -
=head1 SYNOPSIS
my $obj = AI::Ollama::GenerateChatCompletionRequest->new();
...
lib/AI/Ollama/GenerateChatCompletionResponse.pm view on Meta::CPAN
use 5.020;
use Moo 2;
use experimental 'signatures';
use stable 'postderef';
use Types::Standard qw(Enum Str Bool Num Int HashRef ArrayRef);
use MooX::TypeTiny;
use namespace::clean;
=encoding utf8
=head1 NAME
AI::Ollama::GenerateChatCompletionResponse -
=head1 SYNOPSIS
my $obj = AI::Ollama::GenerateChatCompletionResponse->new();
...
lib/AI/Ollama/GenerateCompletionRequest.pm view on Meta::CPAN
use 5.020;
use Moo 2;
use experimental 'signatures';
use stable 'postderef';
use Types::Standard qw(Enum Str Bool Num Int HashRef ArrayRef);
use MooX::TypeTiny;
use namespace::clean;
=encoding utf8
=head1 NAME
AI::Ollama::GenerateCompletionRequest -
=head1 SYNOPSIS
my $obj = AI::Ollama::GenerateCompletionRequest->new();
...
lib/AI/Ollama/GenerateCompletionResponse.pm view on Meta::CPAN
use 5.020;
use Moo 2;
use experimental 'signatures';
use stable 'postderef';
use Types::Standard qw(Enum Str Bool Num Int HashRef ArrayRef);
use MooX::TypeTiny;
use namespace::clean;
=encoding utf8
=head1 NAME
AI::Ollama::GenerateCompletionResponse -
=head1 SYNOPSIS
my $obj = AI::Ollama::GenerateCompletionResponse->new();
...
=cut
sub as_hash( $self ) {
return { $self->%* }
}
=head1 PROPERTIES
=head2 C<< context >>
An encoding of the conversation used in this response, this can be sent in the next request to keep a conversational memory.
=cut
has 'context' => (
is => 'ro',
isa => ArrayRef[Int],
);
=head2 C<< created_at >>
lib/AI/Ollama/GenerateEmbeddingRequest.pm view on Meta::CPAN
use 5.020;
use Moo 2;
use experimental 'signatures';
use stable 'postderef';
use Types::Standard qw(Enum Str Bool Num Int HashRef ArrayRef);
use MooX::TypeTiny;
use namespace::clean;
=encoding utf8
=head1 NAME
AI::Ollama::GenerateEmbeddingRequest -
=head1 SYNOPSIS
my $obj = AI::Ollama::GenerateEmbeddingRequest->new();
...
lib/AI/Ollama/GenerateEmbeddingResponse.pm view on Meta::CPAN
use 5.020;
use Moo 2;
use experimental 'signatures';
use stable 'postderef';
use Types::Standard qw(Enum Str Bool Num Int HashRef ArrayRef);
use MooX::TypeTiny;
use namespace::clean;
=encoding utf8
=head1 NAME
AI::Ollama::GenerateEmbeddingResponse -
=head1 SYNOPSIS
my $obj = AI::Ollama::GenerateEmbeddingResponse->new();
...
lib/AI/Ollama/Message.pm view on Meta::CPAN
use 5.020;
use Moo 2;
use experimental 'signatures';
use stable 'postderef';
use Types::Standard qw(Enum Str Bool Num Int HashRef ArrayRef);
use MooX::TypeTiny;
use namespace::clean;
=encoding utf8
=head1 NAME
AI::Ollama::Message -
=head1 SYNOPSIS
my $obj = AI::Ollama::Message->new();
...
lib/AI/Ollama/Model.pm view on Meta::CPAN
use 5.020;
use Moo 2;
use experimental 'signatures';
use stable 'postderef';
use Types::Standard qw(Enum Str Bool Num Int HashRef ArrayRef);
use MooX::TypeTiny;
use namespace::clean;
=encoding utf8
=head1 NAME
AI::Ollama::Model -
=head1 SYNOPSIS
my $obj = AI::Ollama::Model->new();
...
lib/AI/Ollama/ModelInfo.pm view on Meta::CPAN
use 5.020;
use Moo 2;
use experimental 'signatures';
use stable 'postderef';
use Types::Standard qw(Enum Str Bool Num Int HashRef ArrayRef);
use MooX::TypeTiny;
use namespace::clean;
=encoding utf8
=head1 NAME
AI::Ollama::ModelInfo -
=head1 SYNOPSIS
my $obj = AI::Ollama::ModelInfo->new();
...
lib/AI/Ollama/ModelInfoRequest.pm view on Meta::CPAN
use 5.020;
use Moo 2;
use experimental 'signatures';
use stable 'postderef';
use Types::Standard qw(Enum Str Bool Num Int HashRef ArrayRef);
use MooX::TypeTiny;
use namespace::clean;
=encoding utf8
=head1 NAME
AI::Ollama::ModelInfoRequest -
=head1 SYNOPSIS
my $obj = AI::Ollama::ModelInfoRequest->new();
...
lib/AI/Ollama/ModelsResponse.pm view on Meta::CPAN
use 5.020;
use Moo 2;
use experimental 'signatures';
use stable 'postderef';
use Types::Standard qw(Enum Str Bool Num Int HashRef ArrayRef);
use MooX::TypeTiny;
use namespace::clean;
=encoding utf8
=head1 NAME
AI::Ollama::ModelsResponse -
=head1 SYNOPSIS
my $obj = AI::Ollama::ModelsResponse->new();
...
lib/AI/Ollama/PullModelRequest.pm view on Meta::CPAN
use 5.020;
use Moo 2;
use experimental 'signatures';
use stable 'postderef';
use Types::Standard qw(Enum Str Bool Num Int HashRef ArrayRef);
use MooX::TypeTiny;
use namespace::clean;
=encoding utf8
=head1 NAME
AI::Ollama::PullModelRequest -
=head1 SYNOPSIS
my $obj = AI::Ollama::PullModelRequest->new();
...
lib/AI/Ollama/PullModelResponse.pm view on Meta::CPAN
use 5.020;
use Moo 2;
use experimental 'signatures';
use stable 'postderef';
use Types::Standard qw(Enum Str Bool Num Int HashRef ArrayRef);
use MooX::TypeTiny;
use namespace::clean;
=encoding utf8
=head1 NAME
AI::Ollama::PullModelResponse -
=head1 SYNOPSIS
my $obj = AI::Ollama::PullModelResponse->new();
...
lib/AI/Ollama/PushModelRequest.pm view on Meta::CPAN
use 5.020;
use Moo 2;
use experimental 'signatures';
use stable 'postderef';
use Types::Standard qw(Enum Str Bool Num Int HashRef ArrayRef);
use MooX::TypeTiny;
use namespace::clean;
=encoding utf8
=head1 NAME
AI::Ollama::PushModelRequest -
=head1 SYNOPSIS
my $obj = AI::Ollama::PushModelRequest->new();
...
lib/AI/Ollama/PushModelResponse.pm view on Meta::CPAN
use 5.020;
use Moo 2;
use experimental 'signatures';
use stable 'postderef';
use Types::Standard qw(Enum Str Bool Num Int HashRef ArrayRef);
use MooX::TypeTiny;
use namespace::clean;
=encoding utf8
=head1 NAME
AI::Ollama::PushModelResponse -
=head1 SYNOPSIS
my $obj = AI::Ollama::PushModelResponse->new();
...
lib/AI/Ollama/RequestOptions.pm view on Meta::CPAN
use 5.020;
use Moo 2;
use experimental 'signatures';
use stable 'postderef';
use Types::Standard qw(Enum Str Bool Num Int HashRef ArrayRef);
use MooX::TypeTiny;
use namespace::clean;
=encoding utf8
=head1 NAME
AI::Ollama::RequestOptions -
=head1 SYNOPSIS
my $obj = AI::Ollama::RequestOptions->new();
...
ollama/ollama-curated.json view on Meta::CPAN
{"openapi":"3.0.3","components":{"schemas":{"PushModelResponse":{"properties":{"total":{"type":"integer","description":"total size of the model","example":"2142590208"},"status":{"$ref":"#/components/schemas/PushModelStatus"},"digest":{"example":"sha...
ollama/ollama-curated.yaml view on Meta::CPAN
type: string
description: The response for a given prompt with a provided model.
example: The sky appears blue because of a phenomenon called Rayleigh scattering.
done:
type: boolean
description: Whether the response has completed.
example: true
context:
type: array
description: |
An encoding of the conversation used in this response, this can be sent in the next request to keep a conversational memory.
items:
type: integer
example: [ 1, 2, 3 ]
total_duration:
type: integer
description: Time spent generating the response.
example: 5589157167
load_duration:
type: integer
description: Time spent in nanoseconds loading the model.