view release on metacpan or search on metacpan
libuv/ChangeLog view on Meta::CPAN
* unix: unconditionally stop handle on close (Ben Noordhuis)
* freebsd: don't enable dtrace if it's not available (Brian White)
* build: make HAVE_DTRACE=0 should disable dtrace (Timothy J. Fontaine)
* unix: remove overzealous assert (Ben Noordhuis)
* unix: clear UV_STREAM_SHUTTING after shutdown() (Ben Noordhuis)
* unix: fix busy loop, write if POLLERR or POLLHUP (Ben Noordhuis)
2013.06.05, Version 0.10.10 (Stable), 0d95a88bd35fce93863c57a460be613aea34d2c5
Changes since version 0.10.9:
* include: document uv_update_time() and uv_now() (Ben Noordhuis)
* linux: fix cpu model parsing on newer arm kernels (Ben Noordhuis)
libuv/docs/src/errors.rst view on Meta::CPAN
.. c:macro:: UV_EALREADY
connection already in progress
.. c:macro:: UV_EBADF
bad file descriptor
.. c:macro:: UV_EBUSY
resource busy or locked
.. c:macro:: UV_ECANCELED
operation canceled
.. c:macro:: UV_ECHARSET
invalid Unicode character
.. c:macro:: UV_ECONNABORTED
libuv/docs/src/errors.rst view on Meta::CPAN
.. c:macro:: UV_ESRCH
no such process
.. c:macro:: UV_ETIMEDOUT
connection timed out
.. c:macro:: UV_ETXTBSY
text file is busy
.. c:macro:: UV_EXDEV
cross-device link not permitted
.. c:macro:: UV_UNKNOWN
unknown error
.. c:macro:: UV_EOF
libuv/docs/src/poll.rst view on Meta::CPAN
:c:type:`uv_tcp_t`, :c:type:`uv_udp_t`, etc. provide an implementation that is faster and
more scalable than what can be achieved with :c:type:`uv_poll_t`, especially on
Windows.
It is possible that poll handles occasionally signal that a file descriptor is
readable or writable even when it isn't. The user should therefore always
be prepared to handle EAGAIN or equivalent when it attempts to read from or
write to the fd.
It is not okay to have multiple active poll handles for the same socket, this
can cause libuv to busyloop or otherwise malfunction.
The user should not close a file descriptor while it is being polled by an
active poll handle. This can cause the handle to report an error,
but it might also start polling another socket. However the fd can be safely
closed immediately after a call to :c:func:`uv_poll_stop` or :c:func:`uv_close`.
.. note::
On windows only sockets can be polled with poll handles. On Unix any file
descriptor that would be accepted by :man:`poll(2)` can be used.
libuv/include/uv.h view on Meta::CPAN
XX(EAI_FAMILY, "ai_family not supported") \
XX(EAI_MEMORY, "out of memory") \
XX(EAI_NODATA, "no address") \
XX(EAI_NONAME, "unknown node or service") \
XX(EAI_OVERFLOW, "argument buffer overflow") \
XX(EAI_PROTOCOL, "resolved protocol is unknown") \
XX(EAI_SERVICE, "service not available for socket type") \
XX(EAI_SOCKTYPE, "socket type not supported") \
XX(EALREADY, "connection already in progress") \
XX(EBADF, "bad file descriptor") \
XX(EBUSY, "resource busy or locked") \
XX(ECANCELED, "operation canceled") \
XX(ECHARSET, "invalid Unicode character") \
XX(ECONNABORTED, "software caused connection abort") \
XX(ECONNREFUSED, "connection refused") \
XX(ECONNRESET, "connection reset by peer") \
XX(EDESTADDRREQ, "destination address required") \
XX(EEXIST, "file already exists") \
XX(EFAULT, "bad address in system call argument") \
XX(EFBIG, "file too large") \
XX(EHOSTUNREACH, "host is unreachable") \
libuv/include/uv.h view on Meta::CPAN
XX(EPIPE, "broken pipe") \
XX(EPROTO, "protocol error") \
XX(EPROTONOSUPPORT, "protocol not supported") \
XX(EPROTOTYPE, "protocol wrong type for socket") \
XX(ERANGE, "result too large") \
XX(EROFS, "read-only file system") \
XX(ESHUTDOWN, "cannot send after transport endpoint shutdown") \
XX(ESPIPE, "invalid seek") \
XX(ESRCH, "no such process") \
XX(ETIMEDOUT, "connection timed out") \
XX(ETXTBSY, "text file is busy") \
XX(EXDEV, "cross-device link not permitted") \
XX(UNKNOWN, "unknown error") \
XX(EOF, "end of file") \
XX(ENXIO, "no such device or address") \
XX(EMLINK, "too many links") \
XX(EHOSTDOWN, "host is down") \
XX(EREMOTEIO, "remote I/O error") \
XX(ENOTTY, "inappropriate ioctl for device") \
XX(EFTYPE, "inappropriate file type or format") \
libuv/samples/socks5-proxy/client.c view on Meta::CPAN
* IN THE SOFTWARE.
*/
#include "defs.h"
#include <errno.h>
#include <stdlib.h>
#include <string.h>
/* A connection is modeled as an abstraction on top of two simple state
* machines, one for reading and one for writing. Either state machine
* is, when active, in one of three states: busy, done or stop; the fourth
* and final state, dead, is an end state and only relevant when shutting
* down the connection. A short overview:
*
* busy done stop
* ----------|---------------------------|--------------------|------|
* readable | waiting for incoming data | have incoming data | idle |
* writable | busy writing out data | completed write | idle |
*
* We could remove the done state from the writable state machine. For our
* purposes, it's functionally equivalent to the stop state.
*
* When the connection with upstream has been established, the client_ctx
* moves into a state where incoming data from the client is sent upstream
* and vice versa, incoming data from upstream is sent to the client. In
* other words, we're just piping data back and forth. See conn_cycle()
* for details.
*
libuv/samples/socks5-proxy/client.c view on Meta::CPAN
* completes, the connection stops reading until further notice.
*
* The rationale for this approach is that we have to wait until the data
* has been sent out again before we can reuse the read buffer.
*
* It also pleasingly unifies with the request model that libuv uses for
* writes and everything else; libuv may switch to a request model for
* reads in the future.
*/
enum conn_state {
c_busy, /* Busy; waiting for incoming data or for a write to complete. */
c_done, /* Done; read incoming data or write finished. */
c_stop, /* Stopped. */
c_dead
};
/* Session states. */
enum sess_state {
s_handshake, /* Wait for client handshake. */
s_handshake_auth, /* Wait for client authentication data. */
s_req_start, /* Start waiting for request data. */
libuv/samples/socks5-proxy/client.c view on Meta::CPAN
}
c = CONTAINER_OF(req, conn, t.connect_req);
c->result = status;
do_next(c->client);
}
static void conn_read(conn *c) {
ASSERT(c->rdstate == c_stop);
CHECK(0 == uv_read_start(&c->handle.stream, conn_alloc, conn_read_done));
c->rdstate = c_busy;
conn_timer_reset(c);
}
static void conn_read_done(uv_stream_t *handle,
ssize_t nread,
const uv_buf_t *buf) {
conn *c;
c = CONTAINER_OF(handle, conn, handle);
ASSERT(c->t.buf == buf->base);
ASSERT(c->rdstate == c_busy);
c->rdstate = c_done;
c->result = nread;
uv_read_stop(&c->handle.stream);
do_next(c->client);
}
static void conn_alloc(uv_handle_t *handle, size_t size, uv_buf_t *buf) {
conn *c;
c = CONTAINER_OF(handle, conn, handle);
ASSERT(c->rdstate == c_busy);
buf->base = c->t.buf;
buf->len = sizeof(c->t.buf);
}
static void conn_write(conn *c, const void *data, unsigned int len) {
uv_buf_t buf;
ASSERT(c->wrstate == c_stop || c->wrstate == c_done);
c->wrstate = c_busy;
/* It's okay to cast away constness here, uv_write() won't modify the
* memory.
*/
buf.base = (char *) data;
buf.len = len;
CHECK(0 == uv_write(&c->write_req,
&c->handle.stream,
&buf,
libuv/samples/socks5-proxy/client.c view on Meta::CPAN
}
static void conn_write_done(uv_write_t *req, int status) {
conn *c;
if (status == UV_ECANCELED) {
return; /* Handle has been closed. */
}
c = CONTAINER_OF(req, conn, write_req);
ASSERT(c->wrstate == c_busy);
c->wrstate = c_done;
c->result = status;
do_next(c->client);
}
static void conn_close(conn *c) {
ASSERT(c->rdstate != c_dead);
ASSERT(c->wrstate != c_dead);
c->rdstate = c_dead;
c->wrstate = c_dead;
libuv/src/fs-poll.c view on Meta::CPAN
#else
#include "unix/internal.h"
#endif
#include <assert.h>
#include <stdlib.h>
#include <string.h>
struct poll_ctx {
uv_fs_poll_t* parent_handle;
int busy_polling;
unsigned int interval;
uint64_t start_time;
uv_loop_t* loop;
uv_fs_poll_cb poll_cb;
uv_timer_t timer_handle;
uv_fs_t fs_req; /* TODO(bnoordhuis) mark fs_req internal */
uv_stat_t statbuf;
struct poll_ctx* previous; /* context from previous start()..stop() period */
char path[1]; /* variable length */
};
libuv/src/fs-poll.c view on Meta::CPAN
uint64_t interval;
uv_fs_poll_t* handle;
ctx = container_of(req, struct poll_ctx, fs_req);
handle = ctx->parent_handle;
if (!uv_is_active((uv_handle_t*)handle) || uv__is_closing(handle))
goto out;
if (req->result != 0) {
if (ctx->busy_polling != req->result) {
ctx->poll_cb(ctx->parent_handle,
req->result,
&ctx->statbuf,
&zero_statbuf);
ctx->busy_polling = req->result;
}
goto out;
}
statbuf = &req->statbuf;
if (ctx->busy_polling != 0)
if (ctx->busy_polling < 0 || !statbuf_eq(&ctx->statbuf, statbuf))
ctx->poll_cb(ctx->parent_handle, 0, &ctx->statbuf, statbuf);
ctx->statbuf = *statbuf;
ctx->busy_polling = 1;
out:
uv_fs_req_cleanup(req);
if (!uv_is_active((uv_handle_t*)handle) || uv__is_closing(handle)) {
uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb);
return;
}
/* Reschedule timer, subtract the delay from doing the stat(). */
libuv/src/win/core.c view on Meta::CPAN
uv_fatal_error(GetLastError(), "GetQueuedCompletionStatus");
} else if (timeout > 0) {
/* GetQueuedCompletionStatus can occasionally return a little early.
* Make sure that the desired timeout target time is reached.
*/
uv_update_time(loop);
if (timeout_time > loop->time) {
timeout = (DWORD)(timeout_time - loop->time);
/* The first call to GetQueuedCompletionStatus should return very
* close to the target time and the second should reach it, but
* this is not stated in the documentation. To make sure a busy
* loop cannot happen, the timeout is increased exponentially
* starting on the third round.
*/
timeout += repeat ? (1 << (repeat - 1)) : 0;
continue;
}
}
break;
}
}
libuv/src/win/core.c view on Meta::CPAN
uv_fatal_error(GetLastError(), "GetQueuedCompletionStatusEx");
} else if (timeout > 0) {
/* GetQueuedCompletionStatus can occasionally return a little early.
* Make sure that the desired timeout target time is reached.
*/
uv_update_time(loop);
if (timeout_time > loop->time) {
timeout = (DWORD)(timeout_time - loop->time);
/* The first call to GetQueuedCompletionStatus should return very
* close to the target time and the second should reach it, but
* this is not stated in the documentation. To make sure a busy
* loop cannot happen, the timeout is increased exponentially
* starting on the third round.
*/
timeout += repeat ? (1 << (repeat - 1)) : 0;
continue;
}
}
break;
}
}
libuv/test/test-async.c view on Meta::CPAN
ASSERT(r == 0);
/* Work around a bug in Valgrind.
*
* Valgrind runs threads not in parallel but sequentially, i.e. one after
* the other. It also doesn't preempt them, instead it depends on threads
* yielding voluntarily by making a syscall.
*
* That never happens here: the pipe that is associated with the async
* handle is written to once but that's too early for Valgrind's scheduler
* to kick in. Afterwards, the thread busy-loops, starving the main thread.
* Therefore, we yield.
*
* This behavior has been observed with Valgrind 3.7.0 and 3.9.0.
*/
uv_sleep(0);
}
}
static void close_cb(uv_handle_t* handle) {
libuv/test/test-tcp-unexpected-read.c view on Meta::CPAN
ASSERT(0 == uv_tcp_init(loop, &peer_handle));
ASSERT(0 == uv_tcp_bind(&server_handle, (const struct sockaddr*) &addr, 0));
ASSERT(0 == uv_listen((uv_stream_t*) &server_handle, 1, connection_cb));
ASSERT(0 == uv_tcp_connect(&connect_req,
&client_handle,
(const struct sockaddr*) &addr,
connect_cb));
ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT));
/* This is somewhat inexact but the idea is that the event loop should not
* start busy looping when the server sends a message and the client isn't
* reading.
*/
ASSERT(ticks <= 20);
MAKE_VALGRIND_HAPPY();
return 0;
}