Alien-uv
view release on metacpan or search on metacpan
libuv/src/unix/aix.c view on Meta::CPAN
abort();
}
pc.cmd = PS_ADD;
if (pollset_ctl(loop->backend_fd, &pc, 1)) {
assert(0 && "Failed to add file descriptor (pc.fd) to pollset");
abort();
}
}
w->events = w->pevents;
}
assert(timeout >= -1);
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
for (;;) {
nfds = pollset_poll(loop->backend_fd,
events,
ARRAY_SIZE(events),
timeout);
/* Update loop->time unconditionally. It's tempting to skip the update when
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
* operating system didn't reschedule our process while in the syscall.
*/
SAVE_ERRNO(uv__update_time(loop));
if (nfds == 0) {
assert(timeout != -1);
return;
}
if (nfds == -1) {
if (errno != EINTR) {
abort();
}
if (timeout == -1)
continue;
if (timeout == 0)
return;
/* Interrupted by a signal. Update timeout and poll again. */
goto update_timeout;
}
have_signals = 0;
nevents = 0;
assert(loop->watchers != NULL);
loop->watchers[loop->nwatchers] = (void*) events;
loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
for (i = 0; i < nfds; i++) {
pe = events + i;
pc.cmd = PS_DELETE;
pc.fd = pe->fd;
/* Skip invalidated events, see uv__platform_invalidate_fd */
if (pc.fd == -1)
continue;
assert(pc.fd >= 0);
assert((unsigned) pc.fd < loop->nwatchers);
w = loop->watchers[pc.fd];
if (w == NULL) {
/* File descriptor that we've stopped watching, disarm it.
*
* Ignore all errors because we may be racing with another thread
* when the file descriptor is closed.
*/
pollset_ctl(loop->backend_fd, &pc, 1);
continue;
}
/* Run signal watchers last. This also affects child process watchers
* because those are implemented in terms of signal watchers.
*/
if (w == &loop->signal_io_watcher)
have_signals = 1;
else
w->cb(loop, w, pe->revents);
nevents++;
}
if (have_signals != 0)
loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
loop->watchers[loop->nwatchers] = NULL;
loop->watchers[loop->nwatchers + 1] = NULL;
if (have_signals != 0)
return; /* Event loop should cycle now so don't poll again. */
if (nevents != 0) {
if (nfds == ARRAY_SIZE(events) && --count != 0) {
/* Poll for more events but don't block this time. */
timeout = 0;
continue;
}
return;
}
if (timeout == 0)
return;
if (timeout == -1)
continue;
update_timeout:
assert(timeout > 0);
diff = loop->time - base;
if (diff >= (uint64_t) timeout)
return;
libuv/src/unix/aix.c view on Meta::CPAN
return 0;
}
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
uv_cpu_info_t* cpu_info;
perfstat_cpu_total_t ps_total;
perfstat_cpu_t* ps_cpus;
perfstat_id_t cpu_id;
int result, ncpus, idx = 0;
result = perfstat_cpu_total(NULL, &ps_total, sizeof(ps_total), 1);
if (result == -1) {
return UV_ENOSYS;
}
ncpus = result = perfstat_cpu(NULL, NULL, sizeof(perfstat_cpu_t), 0);
if (result == -1) {
return UV_ENOSYS;
}
ps_cpus = (perfstat_cpu_t*) uv__malloc(ncpus * sizeof(perfstat_cpu_t));
if (!ps_cpus) {
return UV_ENOMEM;
}
/* TODO(bnoordhuis) Check uv__strscpy() return value. */
uv__strscpy(cpu_id.name, FIRST_CPU, sizeof(cpu_id.name));
result = perfstat_cpu(&cpu_id, ps_cpus, sizeof(perfstat_cpu_t), ncpus);
if (result == -1) {
uv__free(ps_cpus);
return UV_ENOSYS;
}
*cpu_infos = (uv_cpu_info_t*) uv__malloc(ncpus * sizeof(uv_cpu_info_t));
if (!*cpu_infos) {
uv__free(ps_cpus);
return UV_ENOMEM;
}
*count = ncpus;
cpu_info = *cpu_infos;
while (idx < ncpus) {
cpu_info->speed = (int)(ps_total.processorHZ / 1000000);
cpu_info->model = uv__strdup(ps_total.description);
cpu_info->cpu_times.user = ps_cpus[idx].user;
cpu_info->cpu_times.sys = ps_cpus[idx].sys;
cpu_info->cpu_times.idle = ps_cpus[idx].idle;
cpu_info->cpu_times.irq = ps_cpus[idx].wait;
cpu_info->cpu_times.nice = 0;
cpu_info++;
idx++;
}
uv__free(ps_cpus);
return 0;
}
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
struct pollfd* events;
uintptr_t i;
uintptr_t nfds;
struct poll_ctl pc;
assert(loop->watchers != NULL);
assert(fd >= 0);
events = (struct pollfd*) loop->watchers[loop->nwatchers];
nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
if (events != NULL)
/* Invalidate events with same file descriptor */
for (i = 0; i < nfds; i++)
if ((int) events[i].fd == fd)
events[i].fd = -1;
/* Remove the file descriptor from the poll set */
pc.events = 0;
pc.cmd = PS_DELETE;
pc.fd = fd;
if(loop->backend_fd >= 0)
pollset_ctl(loop->backend_fd, &pc, 1);
}
( run in 0.325 second using v1.01-cache-2.11-cpan-119454b85a5 )