Affix
view release on metacpan or search on metacpan
infix/src/jit/executor.c view on Meta::CPAN
*p++ = 16; // def_cfa_offset 16
*p++ = 0x9d;
*p++ = 2; // offset r29 (x29), 2 (CFA - 16)
*p++ = 0x9e;
*p++ = 1; // offset r30 (x30/lr), 1 (CFA - 8)
*p++ = 0x41; // loc +1 (4 bytes, after mov)
*p++ = 0x0d;
*p++ = 29; // def_cfa_register r29
}
else {
// stp x29, x30, [sp, #-16]!; stp x19, x20, ...; stp x21, x22, ...; mov x29, sp
*p++ = 0x41; // after stp x29, x30
*p++ = 0x0e;
*p++ = 16;
*p++ = 0x9d;
*p++ = 2; // x29 at CFA - 16
*p++ = 0x9e;
*p++ = 1; // x30 at CFA - 8
*p++ = 0x41; // after stp x19, x20
*p++ = 0x0e;
*p++ = 32;
*p++ = 0x93;
*p++ = 4; // x19 at CFA - 32
*p++ = 0x94;
*p++ = 3; // x20 at CFA - 24
*p++ = 0x41; // after stp x21, x22
*p++ = 0x0e;
*p++ = 48;
*p++ = 0x95;
*p++ = 6; // x21 at CFA - 48
*p++ = 0x96;
*p++ = 5; // x22 at CFA - 40
*p++ = 0x41; // after mov x29, sp
*p++ = 0x0d;
*p++ = 29; // def_cfa_register x29 (offset remains 48)
}
while ((size_t)(p - eh) < (cie_size + fde_size))
*p++ = 0;
*(uint32_t *)p = 0; // Terminator
// Register the frame with the runtime.
extern void __register_frame(void *);
pthread_mutex_lock(&g_dwarf_mutex);
__register_frame(eh);
pthread_mutex_unlock(&g_dwarf_mutex);
exec->eh_frame_ptr = eh;
INFIX_DEBUG_PRINTF("Registered ARM64 DWARF .eh_frame at %p for JIT code at %p", (void *)eh, exec->rx_ptr);
}
#endif
/**
* @internal
* @brief Frees a block of executable memory with use-after-free hardening.
*
* @details Before freeing the memory, this function first attempts to change the
* memory protection to be inaccessible (`PROT_NONE` or `PAGE_NOACCESS`). This
* creates a "guard page" that will cause an immediate, safe crash if a dangling
* pointer to the freed trampoline is ever used, making use-after-free bugs
* much easier to detect and debug.
*
* @param exec The executable memory block to free.
*/
void infix_executable_free(infix_executable_t exec) {
if (exec.size == 0)
return;
#if defined(INFIX_OS_WINDOWS)
#if defined(INFIX_ARCH_X64) || defined(INFIX_ARCH_AARCH64)
if (exec.seh_registration)
RtlDeleteFunctionTable((PRUNTIME_FUNCTION)exec.seh_registration);
#endif
if (exec.rw_ptr) {
// Change protection to NOACCESS to catch use-after-free bugs immediately.
if (!VirtualProtect(exec.rw_ptr, exec.size, PAGE_NOACCESS, &(DWORD){0}))
INFIX_DEBUG_PRINTF("WARNING: VirtualProtect failed to set PAGE_NOACCESS guard page.");
VirtualFree(exec.rw_ptr, 0, MEM_RELEASE);
}
#elif defined(INFIX_OS_MACOS)
// On macOS with MAP_JIT, the memory is managed with special thread-local permissions.
// We only need to unmap the single mapping.
if (exec.rw_ptr) {
// Creating a guard page before unmapping is good practice.
mprotect(exec.rw_ptr, exec.size, PROT_NONE);
munmap(exec.rw_ptr, exec.size);
}
#elif defined(INFIX_OS_ANDROID) || defined(INFIX_OS_OPENBSD) || defined(INFIX_OS_DRAGONFLY)
// Other single-mapping POSIX systems.
if (exec.rw_ptr) {
mprotect(exec.rw_ptr, exec.size, PROT_NONE);
munmap(exec.rw_ptr, exec.size);
}
#else
// Dual-mapping POSIX: protect and unmap both views.
if (exec.eh_frame_ptr) {
extern void __deregister_frame(void *);
pthread_mutex_lock(&g_dwarf_mutex);
__deregister_frame(exec.eh_frame_ptr);
pthread_mutex_unlock(&g_dwarf_mutex);
infix_free(exec.eh_frame_ptr);
}
if (exec.rx_ptr)
mprotect(exec.rx_ptr, exec.size, PROT_NONE);
if (exec.rw_ptr)
munmap(exec.rw_ptr, exec.size);
if (exec.rx_ptr && exec.rx_ptr != exec.rw_ptr) // rw_ptr might be same as rx_ptr on some platforms
munmap(exec.rx_ptr, exec.size);
if (exec.shm_fd >= 0)
close(exec.shm_fd);
#endif
}
/**
* @internal
* @brief Makes a block of JIT memory executable and flushes instruction caches.
*
* @details This function completes the W^X process.
* - On single-mapping platforms, it changes the memory protection from RW to RX.
* - On dual-mapping platforms, this is a no-op as the RX mapping already exists.
*
* Crucially, it also handles flushing the CPU's instruction cache on architectures
* that require it (like AArch64). This is necessary because the CPU may have cached
( run in 0.732 second using v1.01-cache-2.11-cpan-39bf76dae61 )