Data-Graph-Shared
view release on metacpan or search on metacpan
*
* Header (128 bytes)
* Node data: max_nodes * sizeof(int64_t) â node values
* Node heads: max_nodes * sizeof(uint32_t) â first-edge index per node
* Node bitmap: ceil(max_nodes/64) * 8 â allocation bitmap
* Edges: max_edges * sizeof(GraphEdge) â edge pool
* Edge bitmap: ceil(max_edges/64) * 8 â edge allocation bitmap
* ================================================================ */
typedef struct {
uint32_t dst;
uint32_t next; /* index of next edge in adjacency list, GRAPH_NONE = end */
int64_t weight;
} GraphEdge;
typedef struct {
uint32_t magic;
uint32_t version;
uint32_t max_nodes;
uint32_t max_edges;
uint64_t total_size;
uint64_t node_data_off;
uint64_t node_heads_off;
uint64_t node_bitmap_off;
uint64_t edge_data_off;
uint64_t edge_bitmap_off;
uint32_t node_count; /* 64 */
uint32_t edge_count; /* 68 */
uint32_t mutex; /* 72 */
uint32_t mutex_waiters; /* 76 */
uint64_t stat_ops; /* 80 */
uint8_t _pad1[40]; /* 88-127 */
} GraphHeader;
#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L
_Static_assert(sizeof(GraphHeader) == 128, "GraphHeader must be 128 bytes");
#endif
typedef struct {
GraphHeader *hdr;
int64_t *node_data;
uint32_t *node_heads;
uint64_t *node_bitmap;
GraphEdge *edges;
uint64_t *edge_bitmap;
uint32_t node_bwords;
uint32_t edge_bwords;
size_t mmap_size;
char *path;
int notify_fd;
int backing_fd;
} GraphHandle;
/* ================================================================
* Mutex (same pattern as Heap)
* ================================================================ */
static const struct timespec graph_lock_timeout = { 2, 0 };
static inline int graph_pid_alive(uint32_t pid) {
if (pid == 0) return 1;
return !(kill((pid_t)pid, 0) == -1 && errno == ESRCH);
}
static inline void graph_mutex_lock(GraphHeader *hdr) {
uint32_t mypid = GRAPH_MUTEX_BIT | ((uint32_t)getpid() & GRAPH_MUTEX_PID);
for (int spin = 0; ; spin++) {
uint32_t expected = 0;
if (__atomic_compare_exchange_n(&hdr->mutex, &expected, mypid,
1, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
return;
if (spin < 32) {
#if defined(__x86_64__) || defined(__i386__)
__asm__ volatile("pause" ::: "memory");
#elif defined(__aarch64__)
__asm__ volatile("yield" ::: "memory");
#endif
continue;
}
__atomic_add_fetch(&hdr->mutex_waiters, 1, __ATOMIC_RELAXED);
uint32_t cur = __atomic_load_n(&hdr->mutex, __ATOMIC_RELAXED);
if (cur != 0) {
long rc = syscall(SYS_futex, &hdr->mutex, FUTEX_WAIT, cur,
&graph_lock_timeout, NULL, 0);
if (rc == -1 && errno == ETIMEDOUT && cur >= GRAPH_MUTEX_BIT) {
uint32_t pid = cur & GRAPH_MUTEX_PID;
if (!graph_pid_alive(pid) &&
__atomic_compare_exchange_n(&hdr->mutex, &cur, 0,
0, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED)) {
/* Recovered â wake one waiter so it can proceed. */
syscall(SYS_futex, &hdr->mutex, FUTEX_WAKE, 1, NULL, NULL, 0);
}
}
}
__atomic_sub_fetch(&hdr->mutex_waiters, 1, __ATOMIC_RELAXED);
spin = 0;
}
}
static inline void graph_mutex_unlock(GraphHeader *hdr) {
__atomic_store_n(&hdr->mutex, 0, __ATOMIC_RELEASE);
if (__atomic_load_n(&hdr->mutex_waiters, __ATOMIC_RELAXED) > 0)
syscall(SYS_futex, &hdr->mutex, FUTEX_WAKE, 1, NULL, NULL, 0);
}
/* ================================================================
* Bitmap helpers
* ================================================================ */
static inline int graph_bit_set(uint64_t *bm, uint32_t idx) {
uint64_t word = __atomic_load_n(&bm[idx / 64], __ATOMIC_ACQUIRE);
return (word >> (idx % 64)) & 1;
}
static inline int32_t graph_bit_alloc(uint64_t *bm, uint32_t bwords, uint32_t max) {
for (uint32_t w = 0; w < bwords; w++) {
uint64_t word = __atomic_load_n(&bm[w], __ATOMIC_RELAXED);
if (word == ~(uint64_t)0) continue;
int bit = __builtin_ctzll(~word);
uint32_t idx = w * 64 + bit;
if (idx >= max) return -1;
__atomic_fetch_or(&bm[w], (uint64_t)1 << bit, __ATOMIC_RELEASE);
return (int32_t)idx;
}
return -1;
}
static inline void graph_bit_free(uint64_t *bm, uint32_t idx) {
__atomic_fetch_and(&bm[idx / 64], ~((uint64_t)1 << (idx % 64)), __ATOMIC_RELEASE);
}
/* ================================================================
* Graph operations (must hold mutex)
* ================================================================ */
static inline int32_t graph_add_node_locked(GraphHandle *h, int64_t data) {
int32_t idx = graph_bit_alloc(h->node_bitmap, h->node_bwords, h->hdr->max_nodes);
if (idx < 0) return -1;
h->node_data[idx] = data;
h->node_heads[idx] = GRAPH_NONE;
__atomic_fetch_add(&h->hdr->node_count, 1, __ATOMIC_RELAXED);
return idx;
}
static inline int graph_add_edge_locked(GraphHandle *h, uint32_t src, uint32_t dst, int64_t weight) {
if (src >= h->hdr->max_nodes || dst >= h->hdr->max_nodes) return 0;
if (!graph_bit_set(h->node_bitmap, src) || !graph_bit_set(h->node_bitmap, dst))
( run in 0.607 second using v1.01-cache-2.11-cpan-39bf76dae61 )