Alien-Judy

 view release on metacpan or  search on metacpan

src/judy-1.0.5/test/malloc-pre2.8a.c  view on Meta::CPAN

  Equivalent to valloc(minimum-page-that-holds(n)), that is,
  round up n to nearest pagesize.
 */
Void_t*  public_pVALLOc(size_t);

/*
  cfree(Void_t* p);
  Equivalent to free(p).

  cfree is needed/defined on some systems that pair it with calloc,
  for odd historical reasons (such as: cfree is used in example 
  code in the first edition of K&R).
*/
void     public_cFREe(Void_t*);

/*
  malloc_trim(size_t pad);

  If possible, gives memory back to the system (via negative
  arguments to sbrk) if there is unused memory at the `high' end of
  the malloc pool. You can call this after freeing large blocks of
  memory to potentially reduce the system-level memory requirements
  of a program. However, it cannot guarantee to reduce memory. Under
  some allocation patterns, some large free blocks of memory will be
  locked between two used chunks, so they cannot be given back to
  the system.
  
  The `pad' argument to malloc_trim represents the amount of free
  trailing space to leave untrimmed. If this argument is zero,
  only the minimum amount of memory to maintain internal data
  structures will be left (one page or less). Non-zero arguments
  can be supplied to maintain enough trailing space to service
  future expected allocations without having to re-obtain memory
  from the system.
  
  Malloc_trim returns 1 if it actually released any memory, else 0.
  On systems that do not support "negative sbrks", it will always
  rreturn 0.
*/
int      public_mTRIm(size_t);

/*
  malloc_usable_size(Void_t* p);

  Returns the number of bytes you can actually use in
  an allocated chunk, which may be more than you requested (although
  often not) due to alignment and minimum size constraints.
  You can use this many bytes without worrying about
  overwriting other allocated objects. This is not a particularly great
  programming practice. malloc_usable_size can be more useful in
  debugging and assertions, for example:

  p = malloc(n);
  assert(malloc_usable_size(p) >= 256);

*/
size_t   public_mUSABLe(Void_t*);

/*
  malloc_stats();
  Prints on stderr the amount of space obtained from the system (both
  via sbrk and mmap), the maximum amount (which may be more than
  current if malloc_trim and/or munmap got called), and the current
  number of bytes allocated via malloc (or realloc, etc) but not yet
  freed. Note that this is the number of bytes allocated, not the
  number requested. It will be larger than the number requested
  because of alignment and bookkeeping overhead. Because it includes
  alignment wastage as being in use, this figure may be greater than
  zero even when no user-level chunks are allocated.

  The reported current and maximum system memory can be inaccurate if
  a program makes other calls to system memory allocation functions
  (normally sbrk) outside of malloc.

  malloc_stats prints only the most commonly interesting statistics.
  More information can be obtained by calling mallinfo.

*/
void     public_mSTATs();

/* mallopt tuning options */

/*
  M_MXFAST is the maximum request size used for "fastbins", special bins
  that hold returned chunks without consolidating their spaces. This
  enables future requests for chunks of the same size to be handled
  very quickly, but can increase fragmentation, and thus increase the
  overall memory footprint of a program.

  This malloc manages fastbins very conservatively yet still
  efficiently, so fragmentation is rarely a problem for values less
  than or equal to the default.  The maximum supported value of MXFAST
  is 64 (also the default). You wouldn't want it any higher than this
  anyway.  Fastbins are designed especially for use with many small
  structs, objects or strings -- the default handles
  structs/objects/arrays with sizes up to 16 4byte fields, or small
  strings representing words, tokens, etc. Using fastbins for larger
  objects normally worsens fragmentation without improving speed.

  M_MXFAST is set in REQUEST size units. It is internally used in
  chunksize units, which adds padding and alignment.  You can reduce
  M_MXFAST to 0 to disable all use of fastbins.  This causes the malloc
  algorithm to be a closer approximation of fifo-best-fit in all cases,
  not just for larger requests, but will generally cause it to be
  slower.
*/


/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
#ifndef M_MXFAST
#define M_MXFAST            1    
#endif

#ifndef DEFAULT_MXFAST
#define DEFAULT_MXFAST     64
#endif


/*
  M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
  to keep before releasing via malloc_trim in free().

src/judy-1.0.5/test/malloc-pre2.8a.c  view on Meta::CPAN

    topsize = chunksize(av->top);
    avail = topsize;
    nblocks = 1;  /* top always exists */

    /* traverse fastbins */
    nfastblocks = 0;
    fastavail = 0;
    
    for (i = 0; i < NFASTBINS; ++i) {
      for (p = av->fastbins[i]; p != 0; p = p->fd) {
        ++nfastblocks;
        fastavail += chunksize(p);
      }
    }
    
    avail += fastavail;
    
    /* traverse small bins */
    for (i = 2; i < NBINS; ++i) {
      mbinptr b = bin_at(av, i);
      mchunkptr p;
      for (p = b->bk; p != b; p = p->bk) {
        ++nblocks;
        avail += chunksize(p);
      }
    }
    
    /* traverse tree bins */
    for (i = 0; i < NBINS; ++i) {
      tchunkptr t = *(tbin_at(av, i));
      if (t != 0)
        count_tree_blocks(t, &nblocks, &avail);
    }
  }

  mi.smblks = nfastblocks;
  mi.smblks = 0;
  mi.ordblks = nblocks;
  mi.fordblks = avail;
  mi.uordblks = av->sbrked_mem - avail;
  mi.arena = av->sbrked_mem;
  mi.hblks = av->n_mmaps;
  mi.hblkhd = av->mmapped_mem;
  mi.fsmblks = 0;
  mi.keepcost = topsize;
  mi.usmblks = av->max_total_mem;
  return mi;
}

/*
  ------------------------------ malloc_stats ------------------------------
*/

void mSTATs() {
  struct mallinfo mi = mALLINFo();

#ifdef WIN32
  {
    CHUNK_SIZE_T  free, reserved, committed;
    vminfo (&free, &reserved, &committed);
    fprintf(stderr, "free bytes       = %10lu\n", 
            free);
    fprintf(stderr, "reserved bytes   = %10lu\n", 
            reserved);
    fprintf(stderr, "committed bytes  = %10lu\n", 
            committed);
  }
#endif


  fprintf(stderr, "max system bytes = %10lu\n",
          (CHUNK_SIZE_T)(mi.usmblks));
  fprintf(stderr, "system bytes     = %10lu\n",
          (CHUNK_SIZE_T)(mi.arena + mi.hblkhd));
  fprintf(stderr, "in use bytes     = %10lu\n",
          (CHUNK_SIZE_T)(mi.uordblks + mi.hblkhd));

#if 0
  fprintf(stderr, "n0     = %10u\n", n0);
  fprintf(stderr, "n1     = %10u\n", n1);
  fprintf(stderr, "n2     = %10u\n", n2);
  fprintf(stderr, "n3     = %10u\n", n3);
  fprintf(stderr, "n4     = %10u\n", n4);
  fprintf(stderr, "n5     = %10u\n", n5);
  fprintf(stderr, "n6     = %10u\n", n6);
  fprintf(stderr, "n7     = %10u\n", n7);
  fprintf(stderr, "n8     = %10u\n", n8);
#endif


#ifdef WIN32 
  {
    CHUNK_SIZE_T  kernel, user;
    if (cpuinfo (TRUE, &kernel, &user)) {
      fprintf(stderr, "kernel ms        = %10lu\n", 
              kernel);
      fprintf(stderr, "user ms          = %10lu\n", 
              user);
    }
  }
#endif
}


/*
  ------------------------------ mallopt ------------------------------
*/

int mALLOPt(int param_number, int value) {
  mstate av = get_malloc_state();

  ensure_initialization(av);

  switch(param_number) {
  case M_MXFAST:
    malloc_consolidate(av);
    if (value >= 0 && value <= MAX_FAST_SIZE) {
      set_max_fast(av, value);
      return 1;
    }
    else
      return 0;

  case M_TRIM_THRESHOLD:
    av->trim_threshold = value;
    return 1;

  case M_TOP_PAD:
    av->top_pad = value;
    return 1;

  case M_MMAP_THRESHOLD:
    av->mmap_threshold = value;
    return 1;

  case M_MMAP_MAX:
#if !HAVE_MMAP
    if (value != 0)
      return 0;
#endif
    av->n_mmaps_max = value;
    return 1;

  default:
    return 0;
  }
}

/* ----------- Routines dealing with system allocation -------------- */

#if HAVE_MMAP
static mchunkptr mmap_malloc(mstate av, INTERNAL_SIZE_T nb) {
  char* mm;                       /* return value from mmap call*/
  CHUNK_SIZE_T    sum;            /* for updating stats */
  mchunkptr       p;              /* the allocated/returned chunk */
  long            size;           
  INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */



( run in 0.509 second using v1.01-cache-2.11-cpan-39bf76dae61 )