Data-Heap-Shared

 view release on metacpan or  search on metacpan

heap.h  view on Meta::CPAN

    int         notify_fd;
    int         backing_fd;
} HeapHandle;

/* ================================================================
 * Mutex (PID-based, stale-recoverable)
 * ================================================================ */

static const struct timespec heap_lock_timeout = { 2, 0 };

static inline int heap_pid_alive(uint32_t pid) {
    if (pid == 0) return 1;
    return !(kill((pid_t)pid, 0) == -1 && errno == ESRCH);
}

static inline void heap_mutex_lock(HeapHeader *hdr) {
    uint32_t mypid = HEAP_MUTEX_BIT | ((uint32_t)getpid() & HEAP_MUTEX_PID);
    for (int spin = 0; ; spin++) {
        uint32_t expected = 0;
        if (__atomic_compare_exchange_n(&hdr->mutex, &expected, mypid,
                1, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))

heap.h  view on Meta::CPAN

#endif
            continue;
        }
        __atomic_add_fetch(&hdr->mutex_waiters, 1, __ATOMIC_RELAXED);
        uint32_t cur = __atomic_load_n(&hdr->mutex, __ATOMIC_RELAXED);
        if (cur != 0) {
            long rc = syscall(SYS_futex, &hdr->mutex, FUTEX_WAIT, cur,
                              &heap_lock_timeout, NULL, 0);
            if (rc == -1 && errno == ETIMEDOUT && cur >= HEAP_MUTEX_BIT) {
                uint32_t pid = cur & HEAP_MUTEX_PID;
                if (!heap_pid_alive(pid)) {
                    if (__atomic_compare_exchange_n(&hdr->mutex, &cur, 0,
                            0, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED)) {
                        __atomic_add_fetch(&hdr->stat_recoveries, 1, __ATOMIC_RELAXED);
                        /* Wake one waiter so recovery latency is not bounded by the 2s timeout. */
                        if (__atomic_load_n(&hdr->mutex_waiters, __ATOMIC_RELAXED) > 0)
                            syscall(SYS_futex, &hdr->mutex, FUTEX_WAKE, 1, NULL, NULL, 0);
                    }
                }
            }
        }



( run in 1.158 second using v1.01-cache-2.11-cpan-39bf76dae61 )