Data-Buffer-Shared

 view release on metacpan or  search on metacpan

Shared.xs  view on Meta::CPAN

#include "buf_u32.h"
#include "buf_i64.h"
#include "buf_u64.h"
#include "buf_f32.h"
#include "buf_f64.h"
#include "buf_str.h"

#include "XSParseKeyword.h"

/* ---- as_scalar magic: prevent use-after-free by preventing buffer DESTROY
 * while the returned scalar ref is alive. We attach magic to the inner SV
 * that holds a reference to the buffer object. When the inner SV is freed,
 * the magic destructor releases the reference. ---- */

static int buf_scalar_magic_free(pTHX_ SV *sv, MAGIC *mg) {
    PERL_UNUSED_ARG(sv);
    if (mg->mg_obj) SvREFCNT_dec(mg->mg_obj);
    return 0;
}

static const MGVTBL buf_scalar_magic_vtbl = {

buf_generic.h  view on Meta::CPAN

    __asm__ volatile("yield" ::: "memory");
#else
    __asm__ volatile("" ::: "memory");
#endif
}

#define BUF_RWLOCK_WRITER_BIT 0x80000000U
#define BUF_RWLOCK_PID_MASK   0x7FFFFFFFU
#define BUF_RWLOCK_WR(pid)    (BUF_RWLOCK_WRITER_BIT | ((uint32_t)(pid) & BUF_RWLOCK_PID_MASK))

static inline int buf_pid_alive(uint32_t pid) {
    if (pid == 0) return 1;
    return !(kill((pid_t)pid, 0) == -1 && errno == ESRCH);
}

static inline void buf_recover_stale_lock(BufHeader *hdr, uint32_t observed_rwlock) {
    uint32_t mypid = BUF_RWLOCK_WR((uint32_t)getpid());
    if (!__atomic_compare_exchange_n(&hdr->rwlock, &observed_rwlock,
            mypid, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
        return;
    uint32_t seq = __atomic_load_n(&hdr->seq, __ATOMIC_ACQUIRE);

buf_generic.h  view on Meta::CPAN

        cur = __atomic_load_n(lock, __ATOMIC_RELAXED);
        if (cur >= BUF_RWLOCK_WRITER_BIT || cur == 0) {
            long rc = syscall(SYS_futex, lock, FUTEX_WAIT, cur,
                              &buf_lock_timeout, NULL, 0);
            if (rc == -1 && errno == ETIMEDOUT) {
                __atomic_sub_fetch(waiters, 1, __ATOMIC_RELAXED);
                if (cur >= BUF_RWLOCK_WRITER_BIT) {
                    uint32_t val = __atomic_load_n(lock, __ATOMIC_RELAXED);
                    if (val >= BUF_RWLOCK_WRITER_BIT) {
                        uint32_t pid = val & BUF_RWLOCK_PID_MASK;
                        if (!buf_pid_alive(pid))
                            buf_recover_stale_lock(hdr, val);
                    }
                } else {
                    /* Yielding to writer timed out — drop one writers_waiting
                     * to recover from a potentially-crashed parked writer. */
                    uint32_t wc = __atomic_load_n(writers_waiting, __ATOMIC_RELAXED);
                    while (wc > 0 && !__atomic_compare_exchange_n(
                            writers_waiting, &wc, wc - 1,
                            1, __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {}
                }

buf_generic.h  view on Meta::CPAN

        uint32_t cur = __atomic_load_n(lock, __ATOMIC_RELAXED);
        if (cur != 0) {
            long rc = syscall(SYS_futex, lock, FUTEX_WAIT, cur,
                              &buf_lock_timeout, NULL, 0);
            if (rc == -1 && errno == ETIMEDOUT) {
                __atomic_sub_fetch(waiters, 1, __ATOMIC_RELAXED);
                __atomic_sub_fetch(writers_waiting, 1, __ATOMIC_RELAXED);
                uint32_t val = __atomic_load_n(lock, __ATOMIC_RELAXED);
                if (val >= BUF_RWLOCK_WRITER_BIT) {
                    uint32_t pid = val & BUF_RWLOCK_PID_MASK;
                    if (!buf_pid_alive(pid))
                        buf_recover_stale_lock(hdr, val);
                }
                spin = 0;
                continue;
            }
        }
        __atomic_sub_fetch(waiters, 1, __ATOMIC_RELAXED);
        __atomic_sub_fetch(writers_waiting, 1, __ATOMIC_RELAXED);
        spin = 0;
    }

buf_generic.h  view on Meta::CPAN

        uint32_t s = __atomic_load_n(&hdr->seq, __ATOMIC_ACQUIRE);
        if (__builtin_expect((s & 1) == 0, 1)) return s;
        if (__builtin_expect(spin < 100000, 1)) {
            buf_spin_pause();
            spin++;
            continue;
        }
        uint32_t val = __atomic_load_n(&hdr->rwlock, __ATOMIC_RELAXED);
        if (val >= BUF_RWLOCK_WRITER_BIT) {
            uint32_t pid = val & BUF_RWLOCK_PID_MASK;
            if (!buf_pid_alive(pid)) {
                buf_recover_stale_lock(hdr, val);
                spin = 0;
                continue;
            }
        }
        struct timespec ts = {0, 1000000};
        nanosleep(&ts, NULL);
        spin = 0;
    }
}

t/09-review-fixes.t  view on Meta::CPAN

    my $buf = Data::Buffer::Shared::Str->new_anon(5, 16);
    $buf->set(0, "hello");

    $buf->lock_wr;
    is($buf->get(0), "hello", 'str get under lock_wr');
    my @vals = $buf->slice(0, 1);
    is($vals[0], "hello", 'str slice under lock_wr');
    $buf->unlock_wr;
}

# === as_scalar keeps buffer alive (prevents use-after-free) ===
{
    my $ref;
    {
        my $buf = Data::Buffer::Shared::I64->new_anon(10);
        $buf->set(0, 12345);
        $ref = $buf->as_scalar;
        # $buf goes out of scope here — but magic ref prevents DESTROY
    }
    # buffer should still be alive because $ref holds a backref
    my @vals = unpack("q<", $$ref);
    is($vals[0], 12345, 'as_scalar keeps buffer alive after scope exit');
}

done_testing;



( run in 2.478 seconds using v1.01-cache-2.11-cpan-39bf76dae61 )