Result:
found more than 672 distributions - search limited to the first 2001 files matching your query ( run in 1.119 )


Compress-DSRC

 view release on metacpan or  search on metacpan

t/dsrc.t  view on Meta::CPAN

        $rec->set_quality( $orig_qual );
        $rec->set_plus('+');
        $w->write_record( $rec );
    }
    ok( $read_count > 0 && $read_count*4 == $passed,
        "streaming records identical" );
    $r->finish();
    $w->finish();

    ok( $m->decompress('bee.dsrc' => 'bee.fastq', 1),
        "one-shot decompression 4" );

 view all matches for this distribution


Compress-LZ4

 view release on metacpan or  search on metacpan

src/lz4.c  view on Meta::CPAN

{
    return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)(dest - 64 KB), NULL, 64 KB);
}


/*===== streaming decompression functions =====*/

/*
 * If you prefer dynamic allocation methods,
 * LZ4_createStreamDecode()
 * provides a pointer (void*) towards an initialized LZ4_streamDecode_t structure.

src/lz4.c  view on Meta::CPAN

    return 1;
}

/*
*_continue() :
    These decoding functions allow decompression of multiple blocks in "streaming" mode.
    Previously decoded blocks must still be available at the memory position where they were decoded.
    If it's not possible, save the relevant part of decoded data into a safe buffer,
    and indicate where it stands using LZ4_setStreamDecode()
*/
int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)

src/lz4.c  view on Meta::CPAN

    LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)LZ4_Data)->internal_donotuse;
    int dictSize = LZ4_saveDict((LZ4_stream_t*)LZ4_Data, (char*)ctx->bufferStart, 64 KB);
    return (char*)(ctx->bufferStart + dictSize);
}

/* Obsolete streaming decompression functions */

int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
{
    return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
}

 view all matches for this distribution


Compress-LZ4Frame

 view release on metacpan or  search on metacpan

lz4.c  view on Meta::CPAN

{
    return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)(dest - 64 KB), NULL, 64 KB);
}


/*===== streaming decompression functions =====*/

LZ4_streamDecode_t* LZ4_createStreamDecode(void)
{
    LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOCATOR(1, sizeof(LZ4_streamDecode_t));
    return lz4s;

lz4.c  view on Meta::CPAN

    return 1;
}

/*
*_continue() :
    These decoding functions allow decompression of multiple blocks in "streaming" mode.
    Previously decoded blocks must still be available at the memory position where they were decoded.
    If it's not possible, save the relevant part of decoded data into a safe buffer,
    and indicate where it stands using LZ4_setStreamDecode()
*/
LZ4_FORCE_O2_GCC_PPC64LE

lz4.c  view on Meta::CPAN

    LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)LZ4_Data)->internal_donotuse;
    int dictSize = LZ4_saveDict((LZ4_stream_t*)LZ4_Data, (char*)ctx->bufferStart, 64 KB);
    return (char*)(ctx->bufferStart + dictSize);
}

/* Obsolete streaming decompression functions */

int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
{
    return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
}

 view all matches for this distribution


Compress-Stream-Zstd

 view release on metacpan or  search on metacpan

lib/Compress/Stream/Zstd.pm  view on Meta::CPAN

Compress::Stream::Zstd - Perl interface to the Zstd (Zstandard) (de)compressor

=head1 NOTE

This module is a fork of L<Compress-Zstd|https://github.com/spiritloose/Compress-Zstd>.
It contains a few changes to make streaming compression/uncompression more robust.
The only reason for this fork is to allow the module to work with `IO-Compress-Zstd`.
The hope is that the changes made here can be merged back upstream and this module can be retired.

=head1 SYNOPSIS

lib/Compress/Stream/Zstd.pm  view on Meta::CPAN


=head1 AUTHOR

Jiro Nishiguchi E<lt>jiro@cpan.orgE<gt>

Some streaming enhancement by Paul Marquess  E<lt>pmqs@cpan.orgE<gt>

Zstandard by Facebook, Inc.

=cut

 view all matches for this distribution


Compress-Zopfli

 view release on metacpan or  search on metacpan

lib/Compress/Zopfli.pm  view on Meta::CPAN

deflate libraries can decompress the data, i.e. I<IO::Compress>.

=head2 B<($compressed) = compress( $input, I<ZOPFLI_FORMAT>, [OPTIONS] ] )>

This is the only function provided by I<Compress::Zopfli>. The input must
be a string. The underlying function does not seem to support any streaming
interface.

=head1 OPTIONS

Options map directly to the I<zopfli> low-level function. Must be a hash

 view all matches for this distribution


Compress-Zstd

 view release on metacpan or  search on metacpan

lib/Compress/Zstd/Compressor.pm  view on Meta::CPAN


=encoding utf-8

=head1 NAME

Compress::Zstd::Compressor - Zstd streaming compression

=head1 SYNOPSIS

    use Compress::Zstd::Compressor qw(ZSTD_CSTREAM_IN_SIZE);

lib/Compress/Zstd/Compressor.pm  view on Meta::CPAN

    }
    print $compressor->end;

=head1 DESCRIPTION

The Compress::Zstd::Compressor module provides a streaming interface to the Zstd compressor.

=head1 METHODS

=head2 Compress::Zstd::Compressor->new([$level]) :Compress::Zstd::Compressor

 view all matches for this distribution


Config-UCL

 view release on metacpan or  search on metacpan

libucl-0.8.1/doc/api.md  view on Meta::CPAN

# Description

Libucl is a parser and `C` API to parse and generate `ucl` objects. Libucl consist of several groups of functions:

### Parser functions
Used to parse `ucl` files and provide interface to extract `ucl` object. Currently, `libucl` can parse only full `ucl` documents, for instance, it is impossible to parse a part of document and therefore it is impossible to use `libucl` as a streaming...

### Emitting functions
Convert `ucl` objects to some textual or binary representation. Currently, libucl supports the following exports:

- `JSON` - valid json format (can possibly lose some original data, such as implicit arrays)

 view all matches for this distribution


Continuity

 view release on metacpan or  search on metacpan

lib/Continuity/Adapt/PSGI.pm  view on Meta::CPAN

  # comes in

  my $app = sub {
    my $env = shift;

    unless ($env->{'psgi.streaming'}) {
      die 'This application needs psgi.streaming support!';
    }

    # stuff $env onto a queue that get_request above pulls from; get_request is
    # called from Continuity's main execution context/loop. Continuity's main
    # execution loop invokes the Mapper to send the request across a queue to

 view all matches for this distribution


Corona

 view release on metacpan or  search on metacpan

lib/Corona/Server.pm  view on Meta::CPAN

        'psgi.url_scheme' => 'http', # SSL support?
        'psgi.nonblocking'  => Plack::Util::TRUE,
        'psgi.run_once'     => Plack::Util::FALSE,
        'psgi.multithread'  => Plack::Util::TRUE,
        'psgi.multiprocess' => Plack::Util::FALSE,
        'psgi.streaming'    => Plack::Util::TRUE,
        'psgix.io'          => $fh->fh,
    };

    my $res = [ 400, [ 'Content-Type' => 'text/plain' ], [ 'Bad Request' ] ];

lib/Corona/Server.pm  view on Meta::CPAN

    push @lines, "\015\012";

    $fh->syswrite(join '', @lines);

    if (!defined $res->[2]) {
        # streaming write
        return Plack::Util::inline_object
            write => sub { $fh->syswrite(join '', @_) },
            close => $rouse_cb;
    } elsif (HAS_AIO && Plack::Util::is_real_fh($res->[2])) {
        my $length = -s $res->[2];

 view all matches for this distribution


Courriel

 view release on metacpan or  search on metacpan

lib/Courriel.pm  view on Meta::CPAN

This method will send the stringified email to the specified output. The output
can be a subroutine reference, a filehandle, or an object with a C<print()>
method. The output may be sent as a single string, as a list of strings, or via
multiple calls to the output.

For large emails, streaming can be much more memory efficient than generating a
single string in memory.

=head2 $email->as_string()

Returns the email as a string, along with its headers. Lines will be terminated

 view all matches for this distribution


Crypt-Bear

 view release on metacpan or  search on metacpan

lib/Crypt/Bear/HMAC.pm  view on Meta::CPAN

 }
 say unpack 'H*', $digester->out;

=head1 DESCRIPTION

This represents a streaming implementation of hmac on top of common hash functions.

=head1 METHODS

=head2 new($key)

 view all matches for this distribution


Crypt-HSM

 view release on metacpan or  search on metacpan

lib/Crypt/HSM/Stream.pm  view on Meta::CPAN

 }
 $ciphertext .= $stream->finish;

=head1 DESCRIPTION

This is a base-class for streaming actions.

=head1 METHODS

=head2 get_state()

 view all matches for this distribution


Crypt-NaCl-Sodium

 view release on metacpan or  search on metacpan

lib/Crypt/NaCl/Sodium/auth.pod  view on Meta::CPAN


Method returns true if message has been verified, false otherwise.

=head1 ADVANCED USAGE

Single pass and streaming API keyed message authentication using I<HMAC-SHA-256>,
I<HMAC-SHA-512> and I<HMAC-SHA-512/256> are described below.

=head2 HMAC-SHA-256

=head3 hmacsha256_keygen

 view all matches for this distribution


Crypt-Rijndael

 view release on metacpan or  search on metacpan

Changes  view on Meta::CPAN

Revision history for Perl module Crypt::Rijndael

1.16 - 2021-04-01
   * Add non-blocksized encoding support to streaming modes
   * Explicitly use downgraded strings

1.15 - 2020-10-10
   * Use standard uint8_t and uint32_t from stdint.h

 view all matches for this distribution



Crypt-xxHash

 view release on metacpan or  search on metacpan

ext/xxHash/CHANGELOG  view on Meta::CPAN

v0.8.1
- perf : much improved performance for XXH3 streaming variants, notably on gcc and msvc
- perf : improved XXH64 speed and latency on small inputs
- perf : small XXH32 speed and latency improvement on small inputs of random size
- perf : minor stack usage improvement for XXH32 and XXH64
- api  : new experimental variants XXH3_*_withSecretandSeed()
- api  : update XXH3_generateSecret(), can no generate secret of any size (>= XXH3_SECRET_SIZE_MIN)

ext/xxHash/CHANGELOG  view on Meta::CPAN

- perf: automatic vector detection and selection at runtime (`xxh_x86dispatch.h`), initiated by @easyaspi314
- perf: added AVX512 support, by @gzm55
- api : new: secret generator `XXH_generateSecret()`, suggested by @koraa
- api : fix: XXH3_state_t is movable, identified by @koraa
- api : fix: state is correctly aligned in AVX mode (unlike `malloc()`), by @easyaspi314
- api : fix: streaming generated wrong values in some combination of random ingestion lengths, reported by @WayneD
- cli : fix unicode print on Windows, by @easyaspi314
- cli : can `-c` check file generated by sfv
- build: `make DISPATCH=1` generates `xxhsum` and `libxxhash` with runtime vector detection (x86/x64 only)
- install: cygwin installation support
- doc : Cryptol specification of XXH32 and XXH64, by @weaversa

 view all matches for this distribution


DBD-CSV

 view release on metacpan or  search on metacpan

ChangeLog  view on Meta::CPAN

0.41	- 2013-07-29, H.Merijn Brand
    * Use File::Spec->tmpdir () for universal existing folder
      Note that huge $TMP folders may cause the test to run slow
    * Use File::Spec::rel2abs () instead of abs_path and hoops
    * Guard against parallel testing, which is unsupported
    * Guard against streaming tests (they PASS on the DBI-devel)

0.40	- 2013-07-23, H.Merijn Brand
    * Fix version references in doc
    * Fix tests for Windows

 view all matches for this distribution


DBD-JDBC

 view release on metacpan or  search on metacpan

lib/DBD/JDBC.pod  view on Meta::CPAN


DBI metadata methods, cancel, row cache.

=item *

Better handling of long fields via some sort of streaming interface.

=item *

JDBC 2.0 support.

 view all matches for this distribution


DBD-Oracle

 view release on metacpan or  search on metacpan

lib/DBD/Oracle.pm  view on Meta::CPAN

contain, starting at position 100234 in the bin_data column,
the string "This string will overwrite a portion of the blob".

=head3 Example: Streaming character data from the database

In this example, we demonstrate a technique for streaming
data from the database to a file handle, in this case
STDOUT.  This allows more data to be read in and written out
than could be stored in memory at a given time.

   my $lob_id = 17;   # Arbitrary row identifier, for example

 view all matches for this distribution


DBD-SQLcipher

 view release on metacpan or  search on metacpan

Changes  view on Meta::CPAN

    - Aliases for perl_call_*
    - Updated to sqlite 2.8.6
    - use sqlite_freemem everywhere

0.27
    - Changed API to use sqlite streaming API. This makes things slightly
      slower for large result sets, at the benefit of being more "sane"
      internally.

0.26
    - Update to sqlite 2.8.5

 view all matches for this distribution


DBD-SQLeet

 view release on metacpan or  search on metacpan

sqlite3.c  view on Meta::CPAN

** this function, all changes that relate to a single table are visited 
** consecutively. There is no chance that the iterator will visit a change 
** the applies to table X, then one for table Y, and then later on visit 
** another change for table X.
**
** The behavior of sqlite3changeset_start_v2() and its streaming equivalent
** may be modified by passing a combination of
** [SQLITE_CHANGESETSTART_INVERT | supported flags] as the 4th parameter.
**
** Note that the sqlite3changeset_start_v2() API is still <b>experimental</b>
** and therefore subject to change.

sqlite3.c  view on Meta::CPAN

**
** Any number of calls to add() and output() may be made between the calls to
** new() and delete(), and in any order.
**
** As well as the regular sqlite3changegroup_add() and 
** sqlite3changegroup_output() functions, also available are the streaming
** versions sqlite3changegroup_add_strm() and sqlite3changegroup_output_strm().
*/
SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp);

/*

sqlite3.c  view on Meta::CPAN

** caller to eventually free any such buffer using sqlite3_free(). The buffer
** is only allocated and populated if one or more conflicts were encountered
** while applying the patchset. See comments surrounding the sqlite3_rebaser
** APIs for further details.
**
** The behavior of sqlite3changeset_apply_v2() and its streaming equivalent
** may be modified by passing a combination of
** [SQLITE_CHANGESETAPPLY_NOSAVEPOINT | supported flags] as the 9th parameter.
**
** Note that the sqlite3changeset_apply_v2() API is still <b>experimental</b>
** and therefore subject to change.

sqlite3.c  view on Meta::CPAN

SQLITE_API void sqlite3rebaser_delete(sqlite3_rebaser *p); 

/*
** CAPI3REF: Streaming Versions of API functions.
**
** The six streaming API xxx_strm() functions serve similar purposes to the 
** corresponding non-streaming API functions:
**
** <table border=1 style="margin-left:8ex;margin-right:8ex">
**   <tr><th>Streaming function<th>Non-streaming equivalent</th>
**   <tr><td>sqlite3changeset_apply_strm<td>[sqlite3changeset_apply] 
**   <tr><td>sqlite3changeset_apply_strm_v2<td>[sqlite3changeset_apply_v2] 
**   <tr><td>sqlite3changeset_concat_strm<td>[sqlite3changeset_concat] 
**   <tr><td>sqlite3changeset_invert_strm<td>[sqlite3changeset_invert] 
**   <tr><td>sqlite3changeset_start_strm<td>[sqlite3changeset_start] 
**   <tr><td>sqlite3session_changeset_strm<td>[sqlite3session_changeset] 
**   <tr><td>sqlite3session_patchset_strm<td>[sqlite3session_patchset] 
** </table>
**
** Non-streaming functions that accept changesets (or patchsets) as input
** require that the entire changeset be stored in a single buffer in memory. 
** Similarly, those that return a changeset or patchset do so by returning 
** a pointer to a single large buffer allocated using sqlite3_malloc(). 
** Normally this is convenient. However, if an application running in a 
** low-memory environment is required to handle very large changesets, the
** large contiguous memory allocations required can become onerous.
**
** In order to avoid this problem, instead of a single large buffer, input
** is passed to a streaming API functions by way of a callback function that
** the sessions module invokes to incrementally request input data as it is
** required. In all cases, a pair of API function parameters such as
**
**  <pre>
**  &nbsp;     int nChangeset,

sqlite3.c  view on Meta::CPAN

** error occurs the xInput method should copy up to (*pnData) bytes of data 
** into the buffer and set (*pnData) to the actual number of bytes copied 
** before returning SQLITE_OK. If the input is completely exhausted, (*pnData) 
** should be set to zero to indicate this. Or, if an error occurs, an SQLite 
** error code should be returned. In all cases, if an xInput callback returns
** an error, all processing is abandoned and the streaming API function
** returns a copy of the error code to the caller.
**
** In the case of sqlite3changeset_start_strm(), the xInput callback may be
** invoked by the sessions module at any point during the lifetime of the
** iterator. If such an xInput callback returns an error, the iterator enters
** an error state, whereby all subsequent calls to iterator functions 
** immediately fail with the same error code as returned by xInput.
**
** Similarly, streaming API functions that return changesets (or patchsets)
** return them in chunks by way of a callback function instead of via a
** pointer to a single large buffer. In this case, a pair of parameters such
** as:
**
**  <pre>

sqlite3.c  view on Meta::CPAN

** pOut pointer supplied by the application. The second parameter, pData,
** points to a buffer nData bytes in size containing the chunk of output
** data being returned. If the xOutput callback successfully processes the
** supplied data, it should return SQLITE_OK to indicate success. Otherwise,
** it should return some other SQLite error code. In this case processing
** is immediately abandoned and the streaming API function returns a copy
** of the xOutput error code to the application.
**
** The sessions module never invokes an xOutput callback with the third 
** parameter set to a value less than or equal to zero. Other than this,
** no guarantees are made as to the size of the chunks of data returned.

sqlite3.c  view on Meta::CPAN

** the effect of calling this function depends on the value of the first
** parameter.
**
** <dl>
** <dt>SQLITE_SESSION_CONFIG_STRMSIZE<dd>
**    By default, the sessions module streaming interfaces attempt to input
**    and output data in approximately 1 KiB chunks. This operand may be used
**    to set and query the value of this configuration setting. The pointer
**    passed as the second argument must point to a value of type (int).
**    If this value is greater than 0, it is used as the new streaming data
**    chunk size for both input and output. Before returning, the (int) value
**    pointed to by pArg is set to the final value of the streaming interface
**    chunk size.
** </dl>
**
** This function returns SQLITE_OK if successful, or an SQLite error code
** otherwise.

sqlite3.c  view on Meta::CPAN

typedef struct SessionChange SessionChange;
typedef struct SessionBuffer SessionBuffer;
typedef struct SessionInput SessionInput;

/*
** Minimum chunk size used by streaming versions of functions.
*/
#ifndef SESSIONS_STRM_CHUNK_SIZE
# ifdef SQLITE_TEST
#   define SESSIONS_STRM_CHUNK_SIZE 64
# else

sqlite3.c  view on Meta::CPAN

** change to contain a field set to "undefined".
**
** REBASE BLOB FORMAT:
**
** A rebase blob may be output by sqlite3changeset_apply_v2() and its 
** streaming equivalent for use with the sqlite3_rebaser APIs to rebase
** existing changesets. A rebase blob contains one entry for each conflict
** resolved using either the OMIT or REPLACE strategies within the apply_v2()
** call.
**
** The format used for a rebase blob is very similar to that used for

sqlite3.c  view on Meta::CPAN

  int bInvert = !!(flags & SQLITE_CHANGESETSTART_INVERT);
  return sessionChangesetStart(pp, xInput, pIn, 0, 0, bInvert);
}

/*
** If the SessionInput object passed as the only argument is a streaming
** object and the buffer is full, discard some data to free up space.
*/
static void sessionDiscardData(SessionInput *pIn){
  if( pIn->xInput && pIn->iNext>=sessions_strm_chunk_size ){
    int nMove = pIn->buf.nBuf - pIn->iNext;

sqlite3.c  view on Meta::CPAN

/*
** Serialize a changeset (or patchset) based on all changesets (or patchsets)
** added to the changegroup object passed as the first argument.
**
** If xOutput is not NULL, then the changeset/patchset is returned to the
** user via one or more calls to xOutput, as with the other streaming
** interfaces. 
**
** Or, if xOutput is NULL, then (*ppOut) is populated with a pointer to a
** buffer containing the output changeset before this function returns. In
** this case (*pnOut) is set to the size of the output buffer in bytes. It

 view all matches for this distribution


DBD-SQLite-Amalgamation

 view release on metacpan or  search on metacpan

sqlite-amalgamation.c  view on Meta::CPAN

** leaf layer of the tree.
*/
typedef struct LeavesReader {
  int idx;                  /* Index within the segment. */

  sqlite3_stmt *pStmt;      /* Statement we're streaming leaves from. */
  int eof;                  /* we've seen SQLITE_DONE from pStmt. */

  LeafReader leafReader;    /* reader for the current leaf. */
  DataBuffer rootData;      /* root data for inline. */
} LeavesReader;

 view all matches for this distribution


DBD-SQLite

 view release on metacpan or  search on metacpan

Changes  view on Meta::CPAN

    - Aliases for perl_call_*
    - Updated to sqlite 2.8.6
    - use sqlite_freemem everywhere

0.27
    - Changed API to use sqlite streaming API. This makes things slightly
      slower for large result sets, at the benefit of being more "sane"
      internally.

0.26
    - Update to sqlite 2.8.5

 view all matches for this distribution


DBD-SQLite2

 view release on metacpan or  search on metacpan

Changes  view on Meta::CPAN

    - Aliases for perl_call_*
    - Updated to sqlite 2.8.6
    - use sqlite_freemem everywhere

0.27
    - Changed API to use sqlite streaming API. This makes things slightly
      slower for large result sets, at the benefit of being more "sane"
      internally.

0.26
    - Update to sqlite 2.8.5

 view all matches for this distribution


DBD-Wire10

 view release on metacpan or  search on metacpan

lib/DBD/Wire10.pm  view on Meta::CPAN

}

sub take_imp_data {
	my $dbh = shift;

	# Finish any active statements (important if streaming enabled).
	for my $sth (@{$dbh->{ChildHandles} || []}) {
		next unless $sth;
		$sth->finish if $sth->{Active};
	}

lib/DBD/Wire10.pm  view on Meta::CPAN

		}
	}

	my $rowcount = eval {
		$sth->finish;
		my $stream_results = $sth->FETCH('wire10_streaming') || 0;
		my $res = $stream_results ? $ps->stream : $ps->query;

		die if $wire->get_error_info;

		$sth->STORE('wire10_warning_count', $res->get_warning_count);

lib/DBD/Wire10.pm  view on Meta::CPAN

}

sub finish {
	my $sth = shift;
	my $dbh = $sth->{Database};
	# If in streaming mode, flush remaining results.
	my $iterator = $sth->{wire10_iterator};
	$iterator->spool if defined $iterator;
	$sth->{wire10_iterator} = undef;
	$sth->STORE('Active', 0);
	$sth->SUPER::finish;

lib/DBD/Wire10.pm  view on Meta::CPAN


Contains the auto_increment value for the last row inserted.

  my $id = $sth->{wire10_insertid};

=head4 wire10_streaming

If this is set to 1 (or any value that evaluates to true), results will be streamed from the server rather than downloaded all at once, when the statement is executed.

  $sth->{wire10_streaming} = 1;

Notice that the underlying protocol has a limitation: when a streaming statement is active, no other statements can execute on the same connection.

=head4 wire10_warning_count

Contains the number of warnings produced by the last query.

lib/DBD/Wire10.pm  view on Meta::CPAN


=head3 Supported C<mysql_> attributes

All of the C<mysql_> attributes are unavailable.  DBI requires that each driver uses a unique prefix, therefore this driver supports only attributes named C<wire10_>.

Not all C<mysql_> attributes have equivalently named C<wire10_> attributes.  For example, there is no C<mysql_use_result> attribute, but one called C<wire10_streaming> does exactly the same.

=head2 Dependencies

This module requires these other modules and libraries:

 view all matches for this distribution


DBI

 view release on metacpan or  search on metacpan

lib/DBD/Gofer/Transport/stream.pm  view on Meta::CPAN


__END__

=head1 NAME

DBD::Gofer::Transport::stream - DBD::Gofer transport for stdio streaming

=head1 SYNOPSIS

  DBI->connect('dbi:Gofer:transport=stream;url=ssh:username@host.example.com;dsn=dbi:...',...)

 view all matches for this distribution


DJabberd

 view release on metacpan or  search on metacpan

doc/rfc3920-notes.txt  view on Meta::CPAN

   Copyright (C) The Internet Society (2004).

Abstract

   This memo defines the core features of the Extensible Messaging and
   Presence Protocol (XMPP), a protocol for streaming Extensible Markup
   Language (XML) elements in order to exchange structured information
   in close to real time between any two network endpoints.  While XMPP
   provides a generalized, extensible framework for exchanging XML data,
   it is used mainly for the purpose of building instant messaging and
   presence applications that meet the requirements of RFC 2779.

doc/rfc3920-notes.txt  view on Meta::CPAN


11.  XML Usage within XMPP

11.1.  Restrictions

   XMPP is a simplified and specialized protocol for streaming XML
   elements in order to exchange structured information in close to real
   time.  Because XMPP does not require the parsing of arbitrary and
   complete XML documents, there is no requirement that XMPP needs to
   support the full feature set of [XML].  In particular, the following
   restrictions apply.

 view all matches for this distribution


DTA-CAB

 view release on metacpan or  search on metacpan

Changes  view on Meta::CPAN

	* all type keys are inherited by default
	* new dta-cab-analysis -analyzer-class=CLASS option
	* new Chain::Multi analyzer option 'chain=C1,C2,...' parses user-defined sub-chains

v1.04 2010-09-22 09:38  moocow
	* added -block-size=NLINES option to dta-cab-analyze.perl for pseudo-streaming TT analysis
	* updated MorphSafe: first- and geonames are now 'safe'

v1.03 2010-05-19 10:36  moocow
	* require Unicode::CharName
	* updated system/resources using CAB v1.x on uhura (no complete re-build yet)

 view all matches for this distribution


DTOne-Crypt

 view release on metacpan or  search on metacpan

lib/DTOne/Crypt.pm  view on Meta::CPAN

Master key is expected to be exactly 256 bits in length, encoded in base64.

=head2 Performance

Random byte generation on Linux might run slow over time unless L<haveged(8)>
is running. In this scenario, the streaming facility of AES-GCM will be more
memory efficient.

=head1 AUTHOR

Arnold Tan Casis E<lt>atancasis@cpan.orgE<gt>

 view all matches for this distribution


Dancer

 view release on metacpan or  search on metacpan

lib/Dancer.pm  view on Meta::CPAN

    my $env = Dancer::SharedData->request->env;

    my $request = Dancer::Request->new_for_request('GET' => $path);
    Dancer::SharedData->request($request);

    # if you asked for streaming but it's not supported in PSGI
    if ( $options{'streaming'} && ! $env->{'psgi.streaming'} ) {
        # TODO: throw a fit (AKA "exception") or a Dancer::Error?
        raise core => 'Sorry, streaming is not supported on this server.';
    }

    if (exists($options{content_type})) {
        $request->content_type($options{content_type});
    }

lib/Dancer.pm  view on Meta::CPAN

            $resp->push_header('Content-Disposition' => 
                "attachment; filename=\"$options{filename}\""
            );
        }

        if ( $options{'streaming'} ) {
            # handle streaming
            $resp->streamed( sub {
                my ( $status, $headers ) = @_;
                my %callbacks = defined $options{'callbacks'} ?
                                %{ $options{'callbacks'} }    :
                                ();

lib/Dancer.pm  view on Meta::CPAN

            # This code will be ignored
            do_stuff();
        }
    };

Send file supports streaming possibility using PSGI streaming. The server should
support it but normal streaming is supported on most, if not all.

    get '/download/:file' => sub {
        send_file( params->{file}, streaming => 1 );
    }

You can control what happens using callbacks.

First, C<around_content> allows you to get the writer object and the chunk of
content read, and then decide what to do with each chunk:

    get '/download/:file' => sub {
        send_file(
            params->{file},
            streaming => 1,
            callbacks => {
                around_content => sub {
                    my ( $writer, $chunk ) = @_;
                    $writer->write("* $chunk");
                },

lib/Dancer.pm  view on Meta::CPAN

it:

    get '/download/:file' => sub {
        send_file(
            params->{file},
            streaming => 1,
            callbacks => {
                around => sub {
                    my ( $writer, $content ) = @_;

                    # we know it's a text file, so we'll just stream

lib/Dancer.pm  view on Meta::CPAN

                },
            },
        );
    }

Or you could use C<override> to control the entire streaming callback request:

    get '/download/:file' => sub {
        send_file(
            params->{file},
            streaming => 1,
            callbacks => {
                override => sub {
                    my ( $respond, $response ) = @_;

                    my $writer = $respond->( [ $newstatus, $newheaders ] );

lib/Dancer.pm  view on Meta::CPAN

42K bytes) using C<bytes>:

    get '/download/:file' => sub {
        send_file(
            params->{file},
            streaming => 1,
            bytes     => 524288, # 512K
        );
    };

The content-type will be set depending on the current MIME types definition

 view all matches for this distribution


( run in 1.119 second using v1.01-cache-2.11-cpan-a5abf4f5562 )