AI-Ollama-Client

 view release on metacpan or  search on metacpan

lib/AI/Ollama/Client/Impl.pm  view on Meta::CPAN

=item B<< digest >>

the SHA256 digest of the blob

=back



=cut

sub build_checkBlob_request( $self, %options ) {
    croak "Missing required parameter 'digest'"
        unless exists $options{ 'digest' };

    my $method = 'HEAD';
    my $template = URI::Template->new( '/blobs/{digest}' );
    my $path = $template->process(
              'digest' => delete $options{'digest'},
    );
    my $url = Mojo::URL->new( $self->server . $path );

lib/AI/Ollama/Client/Impl.pm  view on Meta::CPAN

        {
        }
    );

    $self->validate_request( $tx );

    return $tx
}


sub checkBlob( $self, %options ) {
    my $tx = $self->build_checkBlob_request(%options);


    my $res = Future::Mojo->new();

    my $r1 = Future::Mojo->new();
    $r1->then( sub( $tx ) {
        my $resp = $tx->res;
        $self->emit(response => $resp);
        # Should we validate using OpenAPI::Modern here?!

lib/AI/Ollama/Client/Impl.pm  view on Meta::CPAN

=item B<< digest >>

the SHA256 digest of the blob

=back



=cut

sub build_createBlob_request( $self, %options ) {
    croak "Missing required parameter 'digest'"
        unless exists $options{ 'digest' };

    my $method = 'POST';
    my $template = URI::Template->new( '/blobs/{digest}' );
    my $path = $template->process(
              'digest' => delete $options{'digest'},
    );
    my $url = Mojo::URL->new( $self->server . $path );

lib/AI/Ollama/Client/Impl.pm  view on Meta::CPAN

        }
        => $body,
    );

    $self->validate_request( $tx );

    return $tx
}


sub createBlob( $self, %options ) {
    my $tx = $self->build_createBlob_request(%options);


    my $res = Future::Mojo->new();

    my $r1 = Future::Mojo->new();
    $r1->then( sub( $tx ) {
        my $resp = $tx->res;
        $self->emit(response => $resp);
        # Should we validate using OpenAPI::Modern here?!

lib/AI/Ollama/Client/Impl.pm  view on Meta::CPAN

=item C<< stream >>

If C<false> the response will be returned as a single response object, otherwise the response will be streamed as a series of objects.

=back

Returns a L<< AI::Ollama::GenerateChatCompletionResponse >> on success.

=cut

sub build_generateChatCompletion_request( $self, %options ) {
    my $method = 'POST';
    my $path = '/chat';
    my $url = Mojo::URL->new( $self->server . $path );

    my $request = AI::Ollama::GenerateChatCompletionRequest->new( \%options )->as_hash;
    my $tx = $self->ua->build_tx(
        $method => $url,
        {
            'Accept' => 'application/x-ndjson',
            "Content-Type" => 'application/json',
        }
        => json => $request,
    );

    $self->validate_request( $tx );

    return $tx
}


sub generateChatCompletion( $self, %options ) {
    my $tx = $self->build_generateChatCompletion_request(%options);


    my $res = Future::Mojo->new();

    my $r1 = Future::Mojo->new();
    our @store; # we should use ->retain() instead
    push @store, $r1->then( sub( $tx ) {
        my $resp = $tx->res;
        $self->emit(response => $resp);

lib/AI/Ollama/Client/Impl.pm  view on Meta::CPAN


=item C<< source >>

Name of the model to copy.

=back


=cut

sub build_copyModel_request( $self, %options ) {
    my $method = 'POST';
    my $path = '/copy';
    my $url = Mojo::URL->new( $self->server . $path );

    my $request = AI::Ollama::CopyModelRequest->new( \%options )->as_hash;
    my $tx = $self->ua->build_tx(
        $method => $url,
        {
            "Content-Type" => 'application/json',
        }
        => json => $request,
    );

    $self->validate_request( $tx );

    return $tx
}


sub copyModel( $self, %options ) {
    my $tx = $self->build_copyModel_request(%options);


    my $res = Future::Mojo->new();

    my $r1 = Future::Mojo->new();
    $r1->then( sub( $tx ) {
        my $resp = $tx->res;
        $self->emit(response => $resp);
        # Should we validate using OpenAPI::Modern here?!

lib/AI/Ollama/Client/Impl.pm  view on Meta::CPAN

=item C<< stream >>

If C<false> the response will be returned as a single response object, otherwise the response will be streamed as a series of objects.

=back

Returns a L<< AI::Ollama::CreateModelResponse >> on success.

=cut

sub build_createModel_request( $self, %options ) {
    my $method = 'POST';
    my $path = '/create';
    my $url = Mojo::URL->new( $self->server . $path );

    my $request = AI::Ollama::CreateModelRequest->new( \%options )->as_hash;
    my $tx = $self->ua->build_tx(
        $method => $url,
        {
            'Accept' => 'application/x-ndjson',
            "Content-Type" => 'application/json',
        }
        => json => $request,
    );

    $self->validate_request( $tx );

    return $tx
}


sub createModel( $self, %options ) {
    my $tx = $self->build_createModel_request(%options);


    my $res = Future::Mojo->new();

    my $r1 = Future::Mojo->new();
    our @store; # we should use ->retain() instead
    push @store, $r1->then( sub( $tx ) {
        my $resp = $tx->res;
        $self->emit(response => $resp);

lib/AI/Ollama/Client/Impl.pm  view on Meta::CPAN


The model name.

Model names follow a C<model:tag> format. Some examples are C<orca-mini:3b-q4_1> and C<llama2:70b>. The tag is optional and, if not provided, will default to C<latest>. The tag is used to identify a specific version.

=back


=cut

sub build_deleteModel_request( $self, %options ) {
    my $method = 'DELETE';
    my $path = '/delete';
    my $url = Mojo::URL->new( $self->server . $path );

    my $request = AI::Ollama::DeleteModelRequest->new( \%options )->as_hash;
    my $tx = $self->ua->build_tx(
        $method => $url,
        {
            "Content-Type" => 'application/json',
        }
        => json => $request,
    );

    $self->validate_request( $tx );

    return $tx
}


sub deleteModel( $self, %options ) {
    my $tx = $self->build_deleteModel_request(%options);


    my $res = Future::Mojo->new();

    my $r1 = Future::Mojo->new();
    $r1->then( sub( $tx ) {
        my $resp = $tx->res;
        $self->emit(response => $resp);
        # Should we validate using OpenAPI::Modern here?!

lib/AI/Ollama/Client/Impl.pm  view on Meta::CPAN

=item C<< prompt >>

Text to generate embeddings for.

=back

Returns a L<< AI::Ollama::GenerateEmbeddingResponse >> on success.

=cut

sub build_generateEmbedding_request( $self, %options ) {
    my $method = 'POST';
    my $path = '/embeddings';
    my $url = Mojo::URL->new( $self->server . $path );

    my $request = AI::Ollama::GenerateEmbeddingRequest->new( \%options )->as_hash;
    my $tx = $self->ua->build_tx(
        $method => $url,
        {
            'Accept' => 'application/json',
            "Content-Type" => 'application/json',
        }
        => json => $request,
    );

    $self->validate_request( $tx );

    return $tx
}


sub generateEmbedding( $self, %options ) {
    my $tx = $self->build_generateEmbedding_request(%options);


    my $res = Future::Mojo->new();

    my $r1 = Future::Mojo->new();
    $r1->then( sub( $tx ) {
        my $resp = $tx->res;
        $self->emit(response => $resp);
        # Should we validate using OpenAPI::Modern here?!

lib/AI/Ollama/Client/Impl.pm  view on Meta::CPAN

=item C<< template >>

The full prompt or prompt template (overrides what is defined in the Modelfile).

=back

Returns a L<< AI::Ollama::GenerateCompletionResponse >> on success.

=cut

sub build_generateCompletion_request( $self, %options ) {
    my $method = 'POST';
    my $path = '/generate';
    my $url = Mojo::URL->new( $self->server . $path );

    my $request = AI::Ollama::GenerateCompletionRequest->new( \%options )->as_hash;
    my $tx = $self->ua->build_tx(
        $method => $url,
        {
            'Accept' => 'application/x-ndjson',
            "Content-Type" => 'application/json',
        }
        => json => $request,
    );

    $self->validate_request( $tx );

    return $tx
}


sub generateCompletion( $self, %options ) {
    my $tx = $self->build_generateCompletion_request(%options);


    my $res = Future::Mojo->new();

    my $r1 = Future::Mojo->new();
    our @store; # we should use ->retain() instead
    push @store, $r1->then( sub( $tx ) {
        my $resp = $tx->res;
        $self->emit(response => $resp);

lib/AI/Ollama/Client/Impl.pm  view on Meta::CPAN

=item C<< stream >>

If C<false> the response will be returned as a single response object, otherwise the response will be streamed as a series of objects.

=back

Returns a L<< AI::Ollama::PullModelResponse >> on success.

=cut

sub build_pullModel_request( $self, %options ) {
    my $method = 'POST';
    my $path = '/pull';
    my $url = Mojo::URL->new( $self->server . $path );

    my $request = AI::Ollama::PullModelRequest->new( \%options )->as_hash;
    my $tx = $self->ua->build_tx(
        $method => $url,
        {
            'Accept' => 'application/x-ndjson',
            "Content-Type" => 'application/json',
        }
        => json => $request,
    );

    $self->validate_request( $tx );

    return $tx
}


sub pullModel( $self, %options ) {
    my $tx = $self->build_pullModel_request(%options);


    my $res = Future::Mojo->new();

    my $r1 = Future::Mojo->new();
    our @store; # we should use ->retain() instead
    push @store, $r1->then( sub( $tx ) {
        my $resp = $tx->res;
        $self->emit(response => $resp);

lib/AI/Ollama/Client/Impl.pm  view on Meta::CPAN

=item C<< stream >>

If C<false> the response will be returned as a single response object, otherwise the response will be streamed as a series of objects.

=back

Returns a L<< AI::Ollama::PushModelResponse >> on success.

=cut

sub build_pushModel_request( $self, %options ) {
    my $method = 'POST';
    my $path = '/push';
    my $url = Mojo::URL->new( $self->server . $path );

    my $request = AI::Ollama::PushModelRequest->new( \%options )->as_hash;
    my $tx = $self->ua->build_tx(
        $method => $url,
        {
            'Accept' => 'application/json',
            "Content-Type" => 'application/json',
        }
        => json => $request,
    );

    $self->validate_request( $tx );

    return $tx
}


sub pushModel( $self, %options ) {
    my $tx = $self->build_pushModel_request(%options);


    my $res = Future::Mojo->new();

    my $r1 = Future::Mojo->new();
    $r1->then( sub( $tx ) {
        my $resp = $tx->res;
        $self->emit(response => $resp);
        # Should we validate using OpenAPI::Modern here?!

lib/AI/Ollama/Client/Impl.pm  view on Meta::CPAN

The model name.

Model names follow a C<model:tag> format. Some examples are C<orca-mini:3b-q4_1> and C<llama2:70b>. The tag is optional and, if not provided, will default to C<latest>. The tag is used to identify a specific version.

=back

Returns a L<< AI::Ollama::ModelInfo >> on success.

=cut

sub build_showModelInfo_request( $self, %options ) {
    my $method = 'POST';
    my $path = '/show';
    my $url = Mojo::URL->new( $self->server . $path );

    my $request = AI::Ollama::ModelInfoRequest->new( \%options )->as_hash;
    my $tx = $self->ua->build_tx(
        $method => $url,
        {
            'Accept' => 'application/json',
            "Content-Type" => 'application/json',
        }
        => json => $request,
    );

    $self->validate_request( $tx );

    return $tx
}


sub showModelInfo( $self, %options ) {
    my $tx = $self->build_showModelInfo_request(%options);


    my $res = Future::Mojo->new();

    my $r1 = Future::Mojo->new();
    $r1->then( sub( $tx ) {
        my $resp = $tx->res;
        $self->emit(response => $resp);
        # Should we validate using OpenAPI::Modern here?!

lib/AI/Ollama/Client/Impl.pm  view on Meta::CPAN


  my $res = $client->listModels()->get;

List models that are available locally.


Returns a L<< AI::Ollama::ModelsResponse >> on success.

=cut

sub build_listModels_request( $self, %options ) {
    my $method = 'GET';
    my $path = '/tags';
    my $url = Mojo::URL->new( $self->server . $path );

    my $tx = $self->ua->build_tx(
        $method => $url,
        {
            'Accept' => 'application/json',
        }
    );

    $self->validate_request( $tx );

    return $tx
}


sub listModels( $self, %options ) {
    my $tx = $self->build_listModels_request(%options);


    my $res = Future::Mojo->new();

    my $r1 = Future::Mojo->new();
    $r1->then( sub( $tx ) {
        my $resp = $tx->res;
        $self->emit(response => $resp);
        # Should we validate using OpenAPI::Modern here?!

lib/AI/Ollama/Client/Impl.pm  view on Meta::CPAN

    })->catch(sub($err) {
        $self->emit(response => $tx, $err);
        $r1->fail( $err => $tx );
        undef $r1;
    });

    return $res
}


sub validate_response( $self, $payload, $tx ) {
    if(     $self->validate_responses
        and my $openapi = $self->openapi ) {
        my $results = $openapi->validate_response($payload, { request => $tx->req });
        if( $results->{error}) {
            say $results;
            say $tx->res->to_string;
        };
    };
}

sub validate_request( $self, $tx ) {
    if(        $self->validate_requests
        and my $openapi = $self->openapi ) {
        my $results = $openapi->validate_request($tx->req);
        if( $results->{error}) {
            say $results;
            say $tx->req->to_string;
        };
    };
}

lib/AI/Ollama/CopyModelRequest.pm  view on Meta::CPAN


AI::Ollama::CopyModelRequest -

=head1 SYNOPSIS

  my $obj = AI::Ollama::CopyModelRequest->new();
  ...

=cut

sub as_hash( $self ) {
    return { $self->%* }
}

=head1 PROPERTIES

=head2 C<< destination >>

Name of the new model.

=cut

lib/AI/Ollama/CreateModelRequest.pm  view on Meta::CPAN


AI::Ollama::CreateModelRequest -

=head1 SYNOPSIS

  my $obj = AI::Ollama::CreateModelRequest->new();
  ...

=cut

sub as_hash( $self ) {
    return { $self->%* }
}

=head1 PROPERTIES

=head2 C<< modelfile >>

The contents of the Modelfile.

=cut

lib/AI/Ollama/CreateModelResponse.pm  view on Meta::CPAN


AI::Ollama::CreateModelResponse -

=head1 SYNOPSIS

  my $obj = AI::Ollama::CreateModelResponse->new();
  ...

=cut

sub as_hash( $self ) {
    return { $self->%* }
}

=head1 PROPERTIES

=head2 C<< status >>

Status creating the model

=cut

lib/AI/Ollama/DeleteModelRequest.pm  view on Meta::CPAN


AI::Ollama::DeleteModelRequest -

=head1 SYNOPSIS

  my $obj = AI::Ollama::DeleteModelRequest->new();
  ...

=cut

sub as_hash( $self ) {
    return { $self->%* }
}

=head1 PROPERTIES

=head2 C<< name >>

The model name.

Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama2:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version.

lib/AI/Ollama/GenerateChatCompletionRequest.pm  view on Meta::CPAN


AI::Ollama::GenerateChatCompletionRequest -

=head1 SYNOPSIS

  my $obj = AI::Ollama::GenerateChatCompletionRequest->new();
  ...

=cut

sub as_hash( $self ) {
    return { $self->%* }
}

=head1 PROPERTIES

=head2 C<< format >>

The format to return a response in. Currently the only accepted value is json.

Enable JSON mode by setting the format parameter to json. This will structure the response as valid JSON.

lib/AI/Ollama/GenerateChatCompletionResponse.pm  view on Meta::CPAN


AI::Ollama::GenerateChatCompletionResponse -

=head1 SYNOPSIS

  my $obj = AI::Ollama::GenerateChatCompletionResponse->new();
  ...

=cut

sub as_hash( $self ) {
    return { $self->%* }
}

=head1 PROPERTIES

=head2 C<< created_at >>

Date on which a model was created.

=cut

lib/AI/Ollama/GenerateCompletionRequest.pm  view on Meta::CPAN


AI::Ollama::GenerateCompletionRequest -

=head1 SYNOPSIS

  my $obj = AI::Ollama::GenerateCompletionRequest->new();
  ...

=cut

sub as_hash( $self ) {
    return { $self->%* }
}

=head1 PROPERTIES

=head2 C<< context >>

The context parameter returned from a previous request to [generateCompletion], this can be used to keep a short conversational memory.

=cut

lib/AI/Ollama/GenerateCompletionResponse.pm  view on Meta::CPAN


AI::Ollama::GenerateCompletionResponse -

=head1 SYNOPSIS

  my $obj = AI::Ollama::GenerateCompletionResponse->new();
  ...

=cut

sub as_hash( $self ) {
    return { $self->%* }
}

=head1 PROPERTIES

=head2 C<< context >>

An encoding of the conversation used in this response, this can be sent in the next request to keep a conversational memory.

=cut

lib/AI/Ollama/GenerateEmbeddingRequest.pm  view on Meta::CPAN


AI::Ollama::GenerateEmbeddingRequest -

=head1 SYNOPSIS

  my $obj = AI::Ollama::GenerateEmbeddingRequest->new();
  ...

=cut

sub as_hash( $self ) {
    return { $self->%* }
}

=head1 PROPERTIES

=head2 C<< model >>

The model name.

Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama2:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version.

lib/AI/Ollama/GenerateEmbeddingResponse.pm  view on Meta::CPAN


AI::Ollama::GenerateEmbeddingResponse -

=head1 SYNOPSIS

  my $obj = AI::Ollama::GenerateEmbeddingResponse->new();
  ...

=cut

sub as_hash( $self ) {
    return { $self->%* }
}

=head1 PROPERTIES

=head2 C<< embedding >>

The embedding for the prompt.

=cut

lib/AI/Ollama/Message.pm  view on Meta::CPAN


AI::Ollama::Message -

=head1 SYNOPSIS

  my $obj = AI::Ollama::Message->new();
  ...

=cut

sub as_hash( $self ) {
    return { $self->%* }
}

=head1 PROPERTIES

=head2 C<< content >>

The content of the message

=cut

lib/AI/Ollama/Model.pm  view on Meta::CPAN


AI::Ollama::Model -

=head1 SYNOPSIS

  my $obj = AI::Ollama::Model->new();
  ...

=cut

sub as_hash( $self ) {
    return { $self->%* }
}

=head1 PROPERTIES

=head2 C<< modified_at >>

Model modification date.

=cut

lib/AI/Ollama/ModelInfo.pm  view on Meta::CPAN


AI::Ollama::ModelInfo -

=head1 SYNOPSIS

  my $obj = AI::Ollama::ModelInfo->new();
  ...

=cut

sub as_hash( $self ) {
    return { $self->%* }
}

=head1 PROPERTIES

=head2 C<< license >>

The model's license.

=cut

lib/AI/Ollama/ModelInfoRequest.pm  view on Meta::CPAN


AI::Ollama::ModelInfoRequest -

=head1 SYNOPSIS

  my $obj = AI::Ollama::ModelInfoRequest->new();
  ...

=cut

sub as_hash( $self ) {
    return { $self->%* }
}

=head1 PROPERTIES

=head2 C<< name >>

The model name.

Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama2:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version.

lib/AI/Ollama/ModelsResponse.pm  view on Meta::CPAN


AI::Ollama::ModelsResponse -

=head1 SYNOPSIS

  my $obj = AI::Ollama::ModelsResponse->new();
  ...

=cut

sub as_hash( $self ) {
    return { $self->%* }
}

=head1 PROPERTIES

=head2 C<< models >>

List of models available locally.

=cut

lib/AI/Ollama/PullModelRequest.pm  view on Meta::CPAN


AI::Ollama::PullModelRequest -

=head1 SYNOPSIS

  my $obj = AI::Ollama::PullModelRequest->new();
  ...

=cut

sub as_hash( $self ) {
    return { $self->%* }
}

=head1 PROPERTIES

=head2 C<< insecure >>

Allow insecure connections to the library.

Only use this if you are pulling from your own library during development.

lib/AI/Ollama/PullModelResponse.pm  view on Meta::CPAN


AI::Ollama::PullModelResponse -

=head1 SYNOPSIS

  my $obj = AI::Ollama::PullModelResponse->new();
  ...

=cut

sub as_hash( $self ) {
    return { $self->%* }
}

=head1 PROPERTIES

=head2 C<< completed >>

Total bytes transferred.

=cut

lib/AI/Ollama/PushModelRequest.pm  view on Meta::CPAN


AI::Ollama::PushModelRequest -

=head1 SYNOPSIS

  my $obj = AI::Ollama::PushModelRequest->new();
  ...

=cut

sub as_hash( $self ) {
    return { $self->%* }
}

=head1 PROPERTIES

=head2 C<< insecure >>

Allow insecure connections to the library.

Only use this if you are pushing to your library during development.

lib/AI/Ollama/PushModelResponse.pm  view on Meta::CPAN


AI::Ollama::PushModelResponse -

=head1 SYNOPSIS

  my $obj = AI::Ollama::PushModelResponse->new();
  ...

=cut

sub as_hash( $self ) {
    return { $self->%* }
}

=head1 PROPERTIES

=head2 C<< digest >>

the model's digest

=cut

lib/AI/Ollama/RequestOptions.pm  view on Meta::CPAN


AI::Ollama::RequestOptions -

=head1 SYNOPSIS

  my $obj = AI::Ollama::RequestOptions->new();
  ...

=cut

sub as_hash( $self ) {
    return { $self->%* }
}

=head1 PROPERTIES

=head2 C<< embedding_only >>

Enable embedding only. (Default: false)

=cut



( run in 0.648 second using v1.01-cache-2.11-cpan-65fba6d93b7 )