Data-HashMap-Shared
view release on metacpan or search on metacpan
print "permanent=$pv ttl_remaining=$pr\n";
}
print "\nSleeping 3 seconds...\n";
sleep 3;
my $v = shm_si_get $map, "counter";
printf "counter=%s (expired: %s)\n", $v // 'undef', defined $v ? 'no' : 'yes';
my $p = shm_si_get $map, "permanent";
printf "permanent=%s (still alive)\n", $p;
$map->unlink;
shm_generic.h view on Meta::CPAN
#else
__asm__ volatile("" ::: "memory");
#endif
}
/* Extract writer PID from rwlock value (lower 31 bits when write-locked). */
#define SHM_RWLOCK_WRITER_BIT 0x80000000U
#define SHM_RWLOCK_PID_MASK 0x7FFFFFFFU
#define SHM_RWLOCK_WR(pid) (SHM_RWLOCK_WRITER_BIT | ((uint32_t)(pid) & SHM_RWLOCK_PID_MASK))
/* Check if a PID is alive. Returns 1 if alive or unknown, 0 if definitely dead. */
static inline int shm_pid_alive(uint32_t pid) {
if (pid == 0) return 1; /* no owner recorded, assume alive */
return !(kill((pid_t)pid, 0) == -1 && errno == ESRCH);
}
/* Force-recover a stale write lock left by a dead process.
* CAS to OUR pid to hold the lock while fixing seqlock, then release.
* Using our pid (not a bare WRITER_BIT sentinel) means a subsequent
* recovering process can detect and re-recover if we crash mid-recovery. */
static inline void shm_recover_stale_lock(ShmHeader *hdr, uint32_t observed_rwlock) {
uint32_t mypid = SHM_RWLOCK_WR((uint32_t)getpid());
if (!__atomic_compare_exchange_n(&hdr->rwlock, &observed_rwlock,
shm_generic.h view on Meta::CPAN
/* Sleep when write-locked OR when yielding to waiting writers */
if (cur >= SHM_RWLOCK_WRITER_BIT || cur == 0) {
long rc = syscall(SYS_futex, lock, FUTEX_WAIT, cur,
&shm_lock_timeout, NULL, 0);
if (rc == -1 && errno == ETIMEDOUT) {
__atomic_sub_fetch(waiters, 1, __ATOMIC_RELAXED);
if (cur >= SHM_RWLOCK_WRITER_BIT) {
uint32_t val = __atomic_load_n(lock, __ATOMIC_RELAXED);
if (val >= SHM_RWLOCK_WRITER_BIT) {
uint32_t pid = val & SHM_RWLOCK_PID_MASK;
if (!shm_pid_alive(pid))
shm_recover_stale_lock(hdr, val);
}
} else {
/* Yielding to writer timed out â drop one writers_waiting
* to recover from a potentially-crashed parked writer. */
uint32_t wc = __atomic_load_n(writers_waiting, __ATOMIC_RELAXED);
while (wc > 0 && !__atomic_compare_exchange_n(
writers_waiting, &wc, wc - 1,
1, __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {}
}
shm_generic.h view on Meta::CPAN
uint32_t cur = __atomic_load_n(lock, __ATOMIC_RELAXED);
if (cur != 0) {
long rc = syscall(SYS_futex, lock, FUTEX_WAIT, cur,
&shm_lock_timeout, NULL, 0);
if (rc == -1 && errno == ETIMEDOUT) {
__atomic_sub_fetch(waiters, 1, __ATOMIC_RELAXED);
__atomic_sub_fetch(writers_waiting, 1, __ATOMIC_RELAXED);
uint32_t val = __atomic_load_n(lock, __ATOMIC_RELAXED);
if (val >= SHM_RWLOCK_WRITER_BIT) {
uint32_t pid = val & SHM_RWLOCK_PID_MASK;
if (!shm_pid_alive(pid))
shm_recover_stale_lock(hdr, val);
}
spin = 0;
continue;
}
}
__atomic_sub_fetch(waiters, 1, __ATOMIC_RELAXED);
__atomic_sub_fetch(writers_waiting, 1, __ATOMIC_RELAXED);
spin = 0;
}
shm_generic.h view on Meta::CPAN
if (__builtin_expect((s & 1) == 0, 1)) return s;
if (__builtin_expect(spin < 100000, 1)) {
shm_rwlock_spin_pause();
spin++;
continue;
}
/* Prolonged odd seq â check for dead writer */
uint32_t val = __atomic_load_n(&hdr->rwlock, __ATOMIC_RELAXED);
if (val >= SHM_RWLOCK_WRITER_BIT) {
uint32_t pid = val & SHM_RWLOCK_PID_MASK;
if (!shm_pid_alive(pid)) {
shm_recover_stale_lock(hdr, val);
spin = 0;
continue;
}
}
/* Writer is alive, yield CPU */
struct timespec ts = {0, 1000000}; /* 1ms */
nanosleep(&ts, NULL);
spin = 0;
}
}
static inline int shm_seqlock_read_retry(uint32_t *seq, uint32_t start) {
__atomic_thread_fence(__ATOMIC_ACQUIRE); /* ensure data loads complete before retry check */
return __atomic_load_n(seq, __ATOMIC_RELAXED) != start;
}
t/04-lru-ttl.t view on Meta::CPAN
shm_ii_put $map, 1, 10;
shm_ii_put_ttl $map, 2, 20, 100; # long TTL
sleep 4;
my @k = shm_ii_keys $map;
# key 1 should be expired during iteration, key 2 should survive
# Note: keys iteration may or may not lazily expire
# But get should definitely expire
ok(!defined(shm_ii_get $map, 1), 'key 1 expired');
is(shm_ii_get $map, 2, 20, 'key 2 still alive');
unlink $path;
}
# clear resets LRU state
{
my $path = tmpfile();
my $map = Data::HashMap::Shared::II->new($path, 1000, 3);
shm_ii_put $map, 1, 10;
t/10-edge-cases.t view on Meta::CPAN
}
# unlink: instance method
{
my $path = tmpfile();
my $map = Data::HashMap::Shared::II->new($path, 100);
shm_ii_put $map, 1, 42;
ok(-f $path, 'backing file exists');
ok($map->unlink, 'instance unlink returns true');
ok(!-f $path, 'backing file removed after unlink');
# map still works (mmap stays alive after unlink)
my $v = shm_ii_get $map, 1;
is($v, 42, 'map still readable after unlink');
}
# unlink: class method
{
my $path = tmpfile();
my $map = Data::HashMap::Shared::II->new($path, 100);
undef $map;
ok(-f $path, 'backing file exists before class unlink');
xt/stale_recovery_crash.t view on Meta::CPAN
use warnings;
use Test::More;
use POSIX qw(_exit);
use Time::HiRes qw(time);
use Data::HashMap::Shared::II;
# Regression: Pass 14 â if the process recovering a stale lock itself
# crashes mid-recovery, the lock must remain recoverable.
# Pre-fix: shm_recover_stale_lock held lock as bare WRITER_BIT (PID=0),
# which shm_pid_alive treated as always alive, causing permanent hang.
# This regression is hard to trigger deterministically â it requires a
# crash in the ~5 instruction window between CAS and seqlock-fix + release.
# Best we can do in a portable test: verify that basic operations succeed
# after a forced SIGKILL during writes, which is the common trigger path.
use File::Temp qw(tmpnam);
my $path = tmpnam() . ".$$";
my $m = Data::HashMap::Shared::II->new($path, 1024);
( run in 1.444 second using v1.01-cache-2.11-cpan-39bf76dae61 )