AI-Ollama-Client

 view release on metacpan or  search on metacpan

Makefile.PL  view on Meta::CPAN

    WriteMakefile(%params);
}

sub regen_README {
    # README is the short version that just tells people what this is
    # and how to install it
    my( $file, $version ) = @_;
    eval {
        # Get description
        my $readme = join "\n",
            pod_section($file, 'NAME', 'no heading' ),
            pod_section($file, 'DESCRIPTION' ),
            <<VERSION,
This document describes version $version.
VERSION
            <<INSTALL,

INSTALLATION

This is a Perl module distribution. It should be installed with whichever
tool you use to manage your installation of Perl, e.g. any of

Makefile.PL  view on Meta::CPAN


Consult https://www.cpan.org/modules/INSTALL.html for further instruction.
Should you wish to install this module manually, the procedure is

  perl Makefile.PL
  make
  make test
  make install

INSTALL
            pod_section($file, 'REPOSITORY'),
            pod_section($file, 'SUPPORT'),
            pod_section($file, 'TALKS'),
            pod_section($file, 'KNOWN ISSUES'),
            pod_section($file, 'BUG TRACKER'),
            pod_section($file, 'CONTRIBUTING'),
            pod_section($file, 'SEE ALSO'),
            pod_section($file, 'AUTHOR'),
            pod_section($file, 'LICENSE' ),
            pod_section($file, 'COPYRIGHT' ),
    ;
        update_file( 'README', $readme );
    };
    # README.mkdn is the documentation that will be shown as the main
    # page of the repository on Github. Hence we recreate the POD here
    # as Markdown
    eval {
        require Pod::Markdown;

        my $parser = Pod::Markdown->new();

Makefile.PL  view on Meta::CPAN


[![Windows](https://github.com/Corion/$distbase/workflows/windows/badge.svg)](https://github.com/Corion/$distbase/actions?query=workflow%3Awindows)
[![MacOS](https://github.com/Corion/$distbase/workflows/macos/badge.svg)](https://github.com/Corion/$distbase/actions?query=workflow%3Amacos)
[![Linux](https://github.com/Corion/$distbase/workflows/linux/badge.svg)](https://github.com/Corion/$distbase/actions?query=workflow%3Alinux)

STATUS
        update_file( 'README.mkdn', $readme_mkdn );
    };
}

sub pod_section {
    my( $filename, $section, $remove_heading ) = @_;
    open my $fh, '<', $filename
        or die "Couldn't read '$filename': $!";

    my @section =
        grep { /^=head1\s+$section/.../^=/ } <$fh>;

    # Trim the section
    if( @section ) {
        pop @section if $section[-1] =~ /^=/;
        shift @section if $remove_heading;

        pop @section
            while @section and $section[-1] =~ /^\s*$/;
        shift @section
            while @section and $section[0] =~ /^\s*$/;
    };

    @section = map { $_ =~ s!^=\w+\s+!!; $_ } @section;
    return join "", @section;
}

sub regen_EXAMPLES {
    my $perl = $^X;
    if ($perl =~/\s/) {
        $perl = qq{"$perl"};
    };
    (my $example_file = $main_file) =~ s!\.pm$!/Examples.pm!;
    my $examples = `$perl -w examples/gen_examples_pod.pl`;
    if ($examples) {

lib/AI/Ollama/Client/Impl.pm  view on Meta::CPAN


Download a model from the ollama library.

Cancelled pulls are resumed from where they left off, and multiple calls will share the same download progress.


=head3 Options

=over 4

=item C<< insecure >>

Allow insecure connections to the library.

Only use this if you are pulling from your own library during development.

=item C<< name >>

The model name.

Model names follow a C<model:tag> format. Some examples are C<orca-mini:3b-q4_1> and C<llama2:70b>. The tag is optional and, if not provided, will default to C<latest>. The tag is used to identify a specific version.

=item C<< stream >>

lib/AI/Ollama/Client/Impl.pm  view on Meta::CPAN


Upload a model to a model library.

Requires registering for ollama.ai and adding a public key first.


=head3 Options

=over 4

=item C<< insecure >>

Allow insecure connections to the library.

Only use this if you are pushing to your library during development.

=item C<< name >>

The name of the model to push in the form of /:.

=item C<< stream >>

If C<false> the response will be returned as a single response object, otherwise the response will be streamed as a series of objects.

lib/AI/Ollama/GenerateChatCompletionResponse.pm  view on Meta::CPAN


=cut

has 'eval_count' => (
    is       => 'ro',
    isa      => Int,
);

=head2 C<< eval_duration >>

Time in nanoseconds spent generating the response.

=cut

has 'eval_duration' => (
    is       => 'ro',
    isa      => Int,
);

=head2 C<< load_duration >>

Time spent in nanoseconds loading the model.

=cut

has 'load_duration' => (
    is       => 'ro',
    isa      => Int,
);

=head2 C<< message >>

lib/AI/Ollama/GenerateChatCompletionResponse.pm  view on Meta::CPAN


=cut

has 'prompt_eval_count' => (
    is       => 'ro',
    isa      => Int,
);

=head2 C<< prompt_eval_duration >>

Time spent in nanoseconds evaluating the prompt.

=cut

has 'prompt_eval_duration' => (
    is       => 'ro',
    isa      => Int,
);

=head2 C<< total_duration >>

lib/AI/Ollama/GenerateCompletionResponse.pm  view on Meta::CPAN


=cut

has 'eval_count' => (
    is       => 'ro',
    isa      => Int,
);

=head2 C<< eval_duration >>

Time in nanoseconds spent generating the response.

=cut

has 'eval_duration' => (
    is       => 'ro',
    isa      => Int,
);

=head2 C<< load_duration >>

Time spent in nanoseconds loading the model.

=cut

has 'load_duration' => (
    is       => 'ro',
    isa      => Int,
);

=head2 C<< model >>

lib/AI/Ollama/GenerateCompletionResponse.pm  view on Meta::CPAN


=cut

has 'prompt_eval_count' => (
    is       => 'ro',
    isa      => Int,
);

=head2 C<< prompt_eval_duration >>

Time spent in nanoseconds evaluating the prompt.

=cut

has 'prompt_eval_duration' => (
    is       => 'ro',
    isa      => Int,
);

=head2 C<< response >>

lib/AI/Ollama/PullModelRequest.pm  view on Meta::CPAN

  ...

=cut

sub as_hash( $self ) {
    return { $self->%* }
}

=head1 PROPERTIES

=head2 C<< insecure >>

Allow insecure connections to the library.

Only use this if you are pulling from your own library during development.

=cut

has 'insecure' => (
    is       => 'ro',
);

=head2 C<< name >>

The model name.

Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama2:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version.

=cut

lib/AI/Ollama/PushModelRequest.pm  view on Meta::CPAN

  ...

=cut

sub as_hash( $self ) {
    return { $self->%* }
}

=head1 PROPERTIES

=head2 C<< insecure >>

Allow insecure connections to the library.

Only use this if you are pushing to your library during development.

=cut

has 'insecure' => (
    is       => 'ro',
);

=head2 C<< name >>

The name of the model to push in the form of <namespace>/<model>:<tag>.

=cut

has 'name' => (

ollama/ollama-curated.json  view on Meta::CPAN

{"openapi":"3.0.3","components":{"schemas":{"PushModelResponse":{"properties":{"total":{"type":"integer","description":"total size of the model","example":"2142590208"},"status":{"$ref":"#/components/schemas/PushModelStatus"},"digest":{"example":"sha...

ollama/ollama-curated.yaml  view on Meta::CPAN

            An encoding of the conversation used in this response, this can be sent in the next request to keep a conversational memory.
          items:
            type: integer
          example: [ 1, 2, 3 ]
        total_duration:
          type: integer
          description: Time spent generating the response.
          example: 5589157167
        load_duration:
          type: integer
          description: Time spent in nanoseconds loading the model.
          example: 3013701500
        prompt_eval_count:
          type: integer
          description: Number of tokens in the prompt.
          example: 46
        prompt_eval_duration:
          type: integer
          description: Time spent in nanoseconds evaluating the prompt.
          example: 1160282000
        eval_count:
          type: integer
          description: Number of tokens the response.
          example: 113
        eval_duration:
          type: integer
          description: Time in nanoseconds spent generating the response.
          example: 1325948000
    GenerateChatCompletionRequest:
      type: object
      description: Request class for the chat endpoint.
      properties:
        model:
          type: string
          description: *model_name
          example: llama2:7b
        messages:

ollama/ollama-curated.yaml  view on Meta::CPAN

        done:
          type: boolean
          description: Whether the response has completed.
          example: true
        total_duration:
          type: integer
          description: Time spent generating the response.
          example: 5589157167
        load_duration:
          type: integer
          description: Time spent in nanoseconds loading the model.
          example: 3013701500
        prompt_eval_count:
          type: integer
          description: Number of tokens in the prompt.
          example: 46
        prompt_eval_duration:
          type: integer
          description: Time spent in nanoseconds evaluating the prompt.
          example: 1160282000
        eval_count:
          type: integer
          description: Number of tokens the response.
          example: 113
        eval_duration:
          type: integer
          description: Time in nanoseconds spent generating the response.
          example: 1325948000
    Message:
      type: object
      description: A message in the chat endpoint
      properties:
        role:
          type: string
          description: The role of the message
          enum: [ "system", "user", "assistant" ]
        content:

ollama/ollama-curated.yaml  view on Meta::CPAN

      required:
        - name
    PullModelRequest:
      description: Request class for pulling a model.
      type: object
      properties:
        name:
          type: string
          description: *model_name
          example: llama2:7b
        insecure:
          type: boolean
          description: |
            Allow insecure connections to the library.

            Only use this if you are pulling from your own library during development.
          default: false
        stream:
          type: boolean
          description: *stream
          default: false
      required:
        - name
    PullModelResponse:

ollama/ollama-curated.yaml  view on Meta::CPAN

        - success
      example: pulling manifest
    PushModelRequest:
      description: Request class for pushing a model.
      type: object
      properties:
        name:
          type: string
          description: The name of the model to push in the form of <namespace>/<model>:<tag>.
          example: 'mattw/pygmalion:latest'
        insecure:
          type: boolean
          description: |
            Allow insecure connections to the library.

            Only use this if you are pushing to your library during development.
          default: false
        stream:
          type: boolean
          description: *stream
          default: false
      required:
        - name
    PushModelResponse:

openapi/petstore-expanded.yaml  view on Meta::CPAN

  license:
    name: Apache 2.0
    url: https://www.apache.org/licenses/LICENSE-2.0.html
servers:
  - url: https://petstore.swagger.io/v2
paths:
  /pets:
    get:
      description: |
        Returns all pets from the system that the user has access to
        Nam sed condimentum est. Maecenas tempor sagittis sapien, nec rhoncus sem sagittis sit amet. Aenean at gravida augue, ac iaculis sem. Curabitur odio lorem, ornare eget elementum nec, cursus id lectus. Duis mi turpis, pulvinar ac eros ac, tinc...

        Sed tempus felis lobortis leo pulvinar rutrum. Nam mattis velit nisl, eu condimentum ligula luctus nec. Phasellus semper velit eget aliquet faucibus. In a mattis elit. Phasellus vel urna viverra, condimentum lorem id, rhoncus nibh. Ut pellent...
      operationId: findPets
      parameters:
        - name: tags
          in: query
          description: tags to filter by
          required: false
          style: form
          schema:

xt/99-synopsis.t  view on Meta::CPAN

            pass $name;
        } else {
            fail $name;
            diag $output;
            diag $_ for @synopsis;
        };
        unlink $tempname
            or warn "Couldn't clean up $tempname: $!";
    } else {
        SKIP: {
            skip "$file has no SYNOPSIS section", 1;
        };
    };

}



( run in 0.722 second using v1.01-cache-2.11-cpan-39bf76dae61 )