Alien-Judy
view release on metacpan or search on metacpan
src/judy-1.0.5/test/malloc-pre2.8a.c view on Meta::CPAN
/*
MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
It must be a power of two at least 2 * SIZE_SZ, even on machines
for which smaller alignments would suffice. It may be defined as
larger than this though. Note however that code and data structures
are optimized for the case of 8-byte alignment.
*/
#ifndef MALLOC_ALIGNMENT
#define MALLOC_ALIGNMENT (2 * SIZE_SZ)
#endif
/* The corresponding bit mask value */
#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
/*
REALLOC_ZERO_BYTES_FREES should be set if a call to
realloc with zero bytes should be the same as a call to free.
Some people think it should. Otherwise, since this malloc
returns a unique pointer for malloc(0), so does realloc(p, 0).
*/
/* #define REALLOC_ZERO_BYTES_FREES */
/*
TRIM_FASTBINS controls whether free() of a very small chunk can
immediately lead to trimming. Setting to true (1) can reduce memory
footprint, but will almost always slow down programs that use a lot
of small chunks.
Define this only if you are willing to give up some speed to more
aggressively reduce system-level memory footprint when releasing
memory in programs that use many small chunks. You can get
essentially the same effect by setting MXFAST to 0, but this can
lead to even greater slowdowns in programs using many small chunks.
TRIM_FASTBINS is an in-between compile-time option, that disables
only those chunks bordering topmost memory from being placed in
fastbins.
*/
#ifndef TRIM_FASTBINS
#define TRIM_FASTBINS 0
#endif
/*
USE_DL_PREFIX will prefix all public routines with the string 'dl'.
This is necessary when you only want to use this malloc in one part
of a program, using your regular system malloc elsewhere.
*/
/* #define USE_DL_PREFIX */
/*
USE_MALLOC_LOCK causes wrapper functions to surround each
callable routine with pthread mutex lock/unlock.
USE_MALLOC_LOCK forces USE_PUBLIC_MALLOC_WRAPPERS to be defined
*/
/* #define USE_MALLOC_LOCK */
/*
If USE_PUBLIC_MALLOC_WRAPPERS is defined, every public routine is
actually a wrapper function that first calls MALLOC_PREACTION, then
calls the internal routine, and follows it with
MALLOC_POSTACTION. This is needed for locking, but you can also use
this, without USE_MALLOC_LOCK, for purposes of interception,
instrumentation, etc. It is a sad fact that using wrappers often
noticeably degrades performance of malloc-intensive programs.
*/
#ifdef USE_MALLOC_LOCK
#define USE_PUBLIC_MALLOC_WRAPPERS
#else
/* #define USE_PUBLIC_MALLOC_WRAPPERS */
#endif
/*
Two-phase name translation.
All of the actual routines are given mangled names.
When wrappers are used, they become the public callable versions.
When DL_PREFIX is used, the callable names are prefixed.
*/
#ifndef USE_PUBLIC_MALLOC_WRAPPERS
#define cALLOc public_cALLOc
#define fREe public_fREe
#define cFREe public_cFREe
#define mALLOc public_mALLOc
#define mEMALIGn public_mEMALIGn
#define rEALLOc public_rEALLOc
#define vALLOc public_vALLOc
#define pVALLOc public_pVALLOc
#define mALLINFo public_mALLINFo
#define mALLOPt public_mALLOPt
#define mTRIm public_mTRIm
#define mSTATs public_mSTATs
#define mUSABLe public_mUSABLe
#define iCALLOc public_iCALLOc
#define iCOMALLOc public_iCOMALLOc
#endif
#ifdef USE_DL_PREFIX
#define public_cALLOc dlcalloc
#define public_fREe dlfree
#define public_cFREe dlcfree
#define public_mALLOc dlmalloc
#define public_mEMALIGn dlmemalign
#define public_rEALLOc dlrealloc
#define public_vALLOc dlvalloc
#define public_pVALLOc dlpvalloc
#define public_mALLINFo dlmallinfo
#define public_mALLOPt dlmallopt
src/judy-1.0.5/test/malloc-pre2.8a.c view on Meta::CPAN
#define DEFAULT_MMAP_MAX (65536)
#else
#define DEFAULT_MMAP_MAX (0)
#endif
#endif
#ifdef __cplusplus
}; /* end of extern "C" */
#endif
/*
========================================================================
To make a fully customizable malloc.h header file, cut everything
above this line, put into file malloc.h, edit to suit, and #include it
on the next line, as well as in programs that use this malloc.
========================================================================
*/
/* #include "malloc.h" */
/* --------------------- public wrappers ---------------------- */
#ifdef USE_PUBLIC_MALLOC_WRAPPERS
/* Declare all routines as internal */
static Void_t* mALLOc(size_t);
static void fREe(Void_t*);
static Void_t* rEALLOc(Void_t*, size_t);
static Void_t* mEMALIGn(size_t, size_t);
static Void_t* vALLOc(size_t);
static Void_t* pVALLOc(size_t);
static Void_t* cALLOc(size_t, size_t);
static Void_t** iCALLOc(size_t, size_t, Void_t**);
static Void_t** iCOMALLOc(size_t, size_t*, Void_t**);
static void cFREe(Void_t*);
static int mTRIm(size_t);
static size_t mUSABLe(Void_t*);
static void mSTATs();
static int mALLOPt(int, int);
static struct mallinfo mALLINFo(void);
/*
MALLOC_PREACTION and MALLOC_POSTACTION should be
defined to return 0 on success, and nonzero on failure.
The return value of MALLOC_POSTACTION is currently ignored
in wrapper functions since there is no reasonable default
action to take on failure.
*/
#ifdef USE_MALLOC_LOCK
#ifdef WIN32
static int mALLOC_MUTEx;
#define MALLOC_PREACTION slwait(&mALLOC_MUTEx)
#define MALLOC_POSTACTION slrelease(&mALLOC_MUTEx)
#else
#include <pthread.h>
static pthread_mutex_t mALLOC_MUTEx = PTHREAD_MUTEX_INITIALIZER;
#define MALLOC_PREACTION pthread_mutex_lock(&mALLOC_MUTEx)
#define MALLOC_POSTACTION pthread_mutex_unlock(&mALLOC_MUTEx)
#endif /* USE_MALLOC_LOCK */
#else
/* Substitute anything you like for these */
#define MALLOC_PREACTION (0)
#define MALLOC_POSTACTION (0)
#endif
Void_t* public_mALLOc(size_t bytes) {
Void_t* m;
if (MALLOC_PREACTION != 0) {
return 0;
}
m = mALLOc(bytes);
if (MALLOC_POSTACTION != 0) {
}
return m;
}
void public_fREe(Void_t* m) {
if (MALLOC_PREACTION != 0) {
return;
}
fREe(m);
if (MALLOC_POSTACTION != 0) {
}
}
Void_t* public_rEALLOc(Void_t* m, size_t bytes) {
if (MALLOC_PREACTION != 0) {
return 0;
}
m = rEALLOc(m, bytes);
if (MALLOC_POSTACTION != 0) {
}
return m;
}
Void_t* public_mEMALIGn(size_t alignment, size_t bytes) {
Void_t* m;
if (MALLOC_PREACTION != 0) {
return 0;
}
m = mEMALIGn(alignment, bytes);
if (MALLOC_POSTACTION != 0) {
}
return m;
}
Void_t* public_vALLOc(size_t bytes) {
Void_t* m;
if (MALLOC_PREACTION != 0) {
return 0;
}
m = vALLOc(bytes);
if (MALLOC_POSTACTION != 0) {
src/judy-1.0.5/test/malloc-pre2.8a.c view on Meta::CPAN
brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
if (brk != (char*)(MORECORE_FAILURE)) {
/* We do not need, and cannot use, another sbrk call to find end */
snd_brk = brk + size;
/*
Record that we no longer have a contiguous sbrk region.
After the first time mmap is used as backup, we do not
ever rely on contiguous space since this could incorrectly
bridge regions.
*/
set_noncontiguous(av);
}
}
}
#endif
if (brk != (char*)(MORECORE_FAILURE)) {
av->sbrked_mem += size;
/*
If MORECORE extends previous space, we can likewise extend top size.
*/
if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) {
set_head(old_top, (size + old_size) | PREV_INUSE);
}
/*
Otherwise, make adjustments:
* If the first time through or noncontiguous, we need to call sbrk
just to find out where the end of memory lies.
* We need to ensure that all returned chunks from malloc will meet
MALLOC_ALIGNMENT
* If there was an intervening foreign sbrk, we need to adjust sbrk
request size to account for fact that we will not be able to
combine new space with existing space in old_top.
* Almost all systems internally allocate whole pages at a time, in
which case we might as well use the whole last page of request.
So we allocate enough more memory to hit a page boundary now,
which in turn causes future contiguous calls to page-align.
*/
else {
front_misalign = 0;
end_misalign = 0;
correction = 0;
aligned_brk = brk;
/*
If MORECORE returns an address lower than we have seen before,
we know it isn't really contiguous. This and some subsequent
checks help cope with non-conforming MORECORE functions and
the presence of "foreign" calls to MORECORE from outside of
malloc or by other threads. We cannot guarantee to detect
these in all cases, but cope with the ones we do detect.
*/
if (contiguous(av) && old_size != 0 && brk < old_end) {
set_noncontiguous(av);
}
/* handle contiguous cases */
if (contiguous(av)) {
/*
We can tolerate forward non-contiguities here (usually due
to foreign calls) but treat them as part of our space for
stats reporting.
*/
if (old_size != 0)
av->sbrked_mem += brk - old_end;
/* Guarantee alignment of first new chunk made from this space */
front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
if (front_misalign > 0) {
/*
Skip over some bytes to arrive at an aligned position.
We don't need to specially mark these wasted front bytes.
They will never be accessed anyway because
prev_inuse of av->top (and any chunk created from its start)
is always true after initialization.
*/
correction = MALLOC_ALIGNMENT - front_misalign;
aligned_brk += correction;
}
/*
If this isn't adjacent to existing space, then we will not
be able to merge with old_top space, so must add to 2nd request.
*/
correction += old_size;
/* Extend the end address to hit a page boundary */
end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
assert(correction >= 0);
snd_brk = (char*)(MORECORE(correction));
if (snd_brk == (char*)(MORECORE_FAILURE)) {
/*
If can't allocate correction, try to at least find out current
brk. It might be enough to proceed without failing.
*/
correction = 0;
snd_brk = (char*)(MORECORE(0));
}
else if (snd_brk < brk) {
/*
If the second call gives noncontiguous space even though
it says it won't, the only course of action is to ignore
src/judy-1.0.5/test/malloc-pre2.8a.c view on Meta::CPAN
If MORECORE_CONTIGUOUS is false:
* MORECORE must allocate in multiples of pagesize. It will
only be called with arguments that are multiples of pagesize.
* MORECORE(0) must return an address that is at least
MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
else (i.e. If MORECORE_CONTIGUOUS is true):
* Consecutive calls to MORECORE with positive arguments
return increasing addresses, indicating that space has been
contiguously extended.
* MORECORE need not allocate in multiples of pagesize.
Calls to MORECORE need not have args of multiples of pagesize.
* MORECORE need not page-align.
In either case:
* MORECORE may allocate more memory than requested. (Or even less,
but this will generally result in a malloc failure.)
* MORECORE must not allocate memory when given argument zero, but
instead return one past the end address of memory from previous
nonzero call. This malloc does NOT call MORECORE(0)
until at least one call with positive arguments is made, so
the initial value returned is not important.
* Even though consecutive calls to MORECORE need not return contiguous
addresses, it must be OK for malloc'ed chunks to span multiple
regions in those cases where they do happen to be contiguous.
* MORECORE need not handle negative arguments -- it may instead
just return MORECORE_FAILURE when given negative arguments.
Negative arguments are always multiples of pagesize. MORECORE
must not misinterpret negative args as large positive unsigned
args. You can suppress all such calls from even occurring by defining
MORECORE_CANNOT_TRIM,
There is some variation across systems about the type of the
argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
actually be size_t, because sbrk supports negative args, so it is
normally the signed type of the same width as size_t (sometimes
declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
matter though. Internally, we use "long" as arguments, which should
work across all reasonable possibilities.
Additionally, if MORECORE ever returns failure for a positive
request, and HAVE_MMAP is true, then mmap is used as a noncontiguous
system allocator. This is a useful backup strategy for systems with
holes in address spaces -- in this case sbrk cannot contiguously
expand the heap, but mmap may be able to map noncontiguous space.
If you'd like mmap to ALWAYS be used, you can define MORECORE to be
a function that always returns MORECORE_FAILURE.
Malloc only has limited ability to detect failures of MORECORE
to supply contiguous space when it says it can. In particular,
multithreaded programs that do not use locks may result in
rece conditions across calls to MORECORE that result in gaps
that cannot be detected as such, and subsequent corruption.
If you are using this malloc with something other than sbrk (or its
emulation) to supply memory regions, you probably want to set
MORECORE_CONTIGUOUS as false. As an example, here is a custom
allocator kindly contributed for pre-OSX macOS. It uses virtually
but not necessarily physically contiguous non-paged memory (locked
in, present and won't get swapped out). You can use it by
uncommenting this section, adding some #includes, and setting up the
appropriate defines above:
#define MORECORE osMoreCore
#define MORECORE_CONTIGUOUS 0
There is also a shutdown routine that should somehow be called for
cleanup upon program exit.
#define MAX_POOL_ENTRIES 100
#define MINIMUM_MORECORE_SIZE (64 * 1024)
static int next_os_pool;
void *our_os_pools[MAX_POOL_ENTRIES];
void *osMoreCore(int size)
{
void *ptr = 0;
static void *sbrk_top = 0;
if (size > 0)
{
if (size < MINIMUM_MORECORE_SIZE)
size = MINIMUM_MORECORE_SIZE;
if (CurrentExecutionLevel() == kTaskLevel)
ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
if (ptr == 0)
{
return (void *) MORECORE_FAILURE;
}
// save ptrs so they can be freed during cleanup
our_os_pools[next_os_pool] = ptr;
next_os_pool++;
ptr = (void *) ((((CHUNK_SIZE_T) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
sbrk_top = (char *) ptr + size;
return ptr;
}
else if (size < 0)
{
// we don't currently support shrink behavior
return (void *) MORECORE_FAILURE;
}
else
{
return sbrk_top;
}
}
// cleanup any allocated memory pools
// called as last thing before shutting down driver
void osCleanupMem(void)
src/judy-1.0.5/test/malloc-pre2.8a.c view on Meta::CPAN
#endif
/* Adjust the regions commit top */
g_last->top_committed = (char *) base_committed + remaining_commit_size;
}
} {
/* Now we are going to search and reserve. */
int contiguous = -1;
int found = FALSE;
MEMORY_BASIC_INFORMATION memory_info;
void *base_reserved;
long reserve_size;
do {
/* Assume contiguous memory */
contiguous = TRUE;
/* Round size to reserve */
reserve_size = CEIL (to_reserve, g_my_regionsize);
/* Start with the current region's top */
memory_info.BaseAddress = g_last->top_reserved;
/* Assert preconditions */
assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
assert (0 < reserve_size && reserve_size % g_regionsize == 0);
while (VirtualQuery (memory_info.BaseAddress, &memory_info, sizeof (memory_info))) {
/* Assert postconditions */
assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
#ifdef TRACE
printf ("Query %p %d %s\n", memory_info.BaseAddress, memory_info.RegionSize,
memory_info.State == MEM_FREE ? "FREE":
(memory_info.State == MEM_RESERVE ? "RESERVED":
(memory_info.State == MEM_COMMIT ? "COMMITTED": "?")));
#endif
/* Region is free, well aligned and big enough: we are done */
if (memory_info.State == MEM_FREE &&
(unsigned) memory_info.BaseAddress % g_regionsize == 0 &&
memory_info.RegionSize >= (unsigned) reserve_size) {
found = TRUE;
break;
}
/* From now on we can't get contiguous memory! */
contiguous = FALSE;
/* Recompute size to reserve */
reserve_size = CEIL (allocate_size, g_my_regionsize);
memory_info.BaseAddress = (char *) memory_info.BaseAddress + memory_info.RegionSize;
/* Assert preconditions */
assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
assert (0 < reserve_size && reserve_size % g_regionsize == 0);
}
/* Search failed? */
if (! found)
goto sbrk_exit;
/* Assert preconditions */
assert ((unsigned) memory_info.BaseAddress % g_regionsize == 0);
assert (0 < reserve_size && reserve_size % g_regionsize == 0);
/* Try to reserve this */
base_reserved = VirtualAlloc (memory_info.BaseAddress, reserve_size,
MEM_RESERVE, PAGE_NOACCESS);
if (! base_reserved) {
int rc = GetLastError ();
if (rc != ERROR_INVALID_ADDRESS)
goto sbrk_exit;
}
/* A null pointer signals (hopefully) a race condition with another thread. */
/* In this case, we try again. */
} while (! base_reserved);
/* Check returned pointer for consistency */
if (memory_info.BaseAddress && base_reserved != memory_info.BaseAddress)
goto sbrk_exit;
/* Assert postconditions */
assert ((unsigned) base_reserved % g_regionsize == 0);
#ifdef TRACE
printf ("Reserve %p %d\n", base_reserved, reserve_size);
#endif
/* Did we get contiguous memory? */
if (contiguous) {
long start_size = (char *) g_last->top_committed - (char *) g_last->top_allocated;
/* Adjust allocation size */
allocate_size -= start_size;
/* Adjust the regions allocation top */
g_last->top_allocated = g_last->top_committed;
/* Recompute the size to commit */
to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
/* Round size to commit */
commit_size = CEIL (to_commit, g_my_pagesize);
}
/* Append the new region to the list */
if (! region_list_append (&g_last, base_reserved, reserve_size))
goto sbrk_exit;
/* Didn't we get contiguous memory? */
if (! contiguous) {
/* Recompute the size to commit */
to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
/* Round size to commit */
commit_size = CEIL (to_commit, g_my_pagesize);
}
}
}
/* Assert preconditions */
assert ((unsigned) g_last->top_committed % g_pagesize == 0);
assert (0 < commit_size && commit_size % g_pagesize == 0); {
/* Commit this */
void *base_committed = VirtualAlloc (g_last->top_committed, commit_size,
MEM_COMMIT, PAGE_READWRITE);
/* Check returned pointer for consistency */
if (base_committed != g_last->top_committed)
goto sbrk_exit;
/* Assert postconditions */
assert ((unsigned) base_committed % g_pagesize == 0);
#ifdef TRACE
printf ("Commit %p %d\n", base_committed, commit_size);
#endif
/* Adjust the regions commit top */
g_last->top_committed = (char *) base_committed + commit_size;
}
}
/* Adjust the regions allocation top */
g_last->top_allocated = (char *) g_last->top_allocated + allocate_size;
result = (char *) g_last->top_allocated - size;
/* Deallocation requested? */
} else if (size < 0) {
long deallocate_size = - size;
/* As long as we have a region to release */
while ((char *) g_last->top_allocated - deallocate_size < (char *) g_last->top_reserved - g_last->reserve_size) {
( run in 0.965 second using v1.01-cache-2.11-cpan-d0baa829c65 )