Convert-Binary-C
view release on metacpan or search on metacpan
ctlib/y_parser.c view on Meta::CPAN
#define YYRECOVERING() (!!yyerrstatus)
#define YYBACKUP(Token, Value) \
do \
if (yychar == YYEMPTY) \
{ \
yychar = (Token); \
yylval = (Value); \
YYPOPSTACK (yylen); \
yystate = *yyssp; \
goto yybackup; \
} \
else \
{ \
yyerror (pState, YY_("syntax error: cannot back up")); \
YYERROR; \
} \
while (0)
/* Backward compatibility with an undocumented macro.
Use YYerror or YYUNDEF. */
ctlib/y_parser.c view on Meta::CPAN
if (yyss + yystacksize - 1 <= yyssp)
YYABORT;
}
#endif /* !defined yyoverflow && !defined YYSTACK_RELOCATE */
if (yystate == YYFINAL)
YYACCEPT;
goto yybackup;
/*-----------.
| yybackup. |
`-----------*/
yybackup:
/* Do appropriate processing given the current state. Read a
lookahead token if we need one and don't already have one. */
/* First try to decide what to do without reference to lookahead token. */
yyn = yypact[yystate];
if (yypact_value_is_default (yyn))
goto yydefault;
/* Not known => get a lookahead token if don't already have one. */
ctlib/y_pragma.c view on Meta::CPAN
#define YYRECOVERING() (!!yyerrstatus)
#define YYBACKUP(Token, Value) \
do \
if (yychar == YYEMPTY) \
{ \
yychar = (Token); \
yylval = (Value); \
YYPOPSTACK (yylen); \
yystate = *yyssp; \
goto yybackup; \
} \
else \
{ \
yyerror (pState, YY_("syntax error: cannot back up")); \
YYERROR; \
} \
while (0)
/* Backward compatibility with an undocumented macro.
Use YYerror or YYUNDEF. */
ctlib/y_pragma.c view on Meta::CPAN
if (yyss + yystacksize - 1 <= yyssp)
YYABORT;
}
#endif /* !defined yyoverflow && !defined YYSTACK_RELOCATE */
if (yystate == YYFINAL)
YYACCEPT;
goto yybackup;
/*-----------.
| yybackup. |
`-----------*/
yybackup:
/* Do appropriate processing given the current state. Read a
lookahead token if we need one and don't already have one. */
/* First try to decide what to do without reference to lookahead token. */
yyn = yypact[yystate];
if (yypact_value_is_default (yyn))
goto yydefault;
/* Not known => get a lookahead token if don't already have one. */
av_shift|5.003007|5.003007|
av_store|5.003007|5.003007|
av_store_simple|5.035002||cV
av_tindex|5.017009|5.003007|p
av_tindex_skip_len_mg|5.025010||Viu
av_top_index|5.017009|5.003007|p
av_top_index_skip_len_mg|5.025010||Viu
av_undef|5.003007|5.003007|
av_unshift|5.003007|5.003007|
ax|5.003007|5.003007|
backup_one_GCB|5.025003||Viu
backup_one_LB|5.023007||Viu
backup_one_SB|5.021009||Viu
backup_one_WB|5.021009||Viu
bad_type_gv|5.019002||Viu
bad_type_pv|5.016000||Viu
BADVERSION|5.011004||Viu
BASEOP|5.003007||Viu
BhkDISABLE|5.013003||xV
BhkENABLE|5.013003||xV
BhkENTRY|5.013003||xVi
BhkENTRY_set|5.013003||xV
BHKf_bhk_eval|5.013006||Viu
BHKf_bhk_post_end|5.013006||Viu
tests/include/pdclib/functions/_dlmalloc/malloc.c view on Meta::CPAN
If non-zero, suppresses traversals of memory segments
returned by either MORECORE or CALL_MMAP. This disables
merging of segments that are contiguous, and selectively
releasing them to the OS if unused, but bounds execution times.
HAVE_MMAP default: 1 (true)
True if this system supports mmap or an emulation of it. If so, and
HAVE_MORECORE is not true, MMAP is used for all system
allocation. If set and HAVE_MORECORE is true as well, MMAP is
primarily used to directly allocate very large blocks. It is also
used as a backup strategy in cases where MORECORE fails to provide
space from system. Note: A single call to MUNMAP is assumed to be
able to unmap memory that may have be allocated using multiple calls
to MMAP, so long as they are adjacent.
HAVE_MREMAP default: 1 on linux, else 0
If true realloc() uses mremap() to re-allocate large blocks and
extend or shrink allocation spaces.
MMAP_CLEARS default: 1 except on WINCE.
True if mmap clears memory so calloc doesn't need to. This is true
tests/include/pdclib/functions/_dlmalloc/malloc.c view on Meta::CPAN
/*
Try getting memory in any of three ways (in most-preferred to
least-preferred order):
1. A call to MORECORE that can normally contiguously extend memory.
(disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
or main space is mmapped or a previous contiguous call failed)
2. A call to MMAP new space (disabled if not HAVE_MMAP).
Note that under the default settings, if MORECORE is unable to
fulfill a request, and HAVE_MMAP is true, then mmap is
used as a noncontiguous system allocator. This is a useful backup
strategy for systems with holes in address spaces -- in this case
sbrk cannot contiguously expand the heap, but mmap may be able to
find space.
3. A call to MORECORE that cannot usually contiguously extend memory.
(disabled if not HAVE_MORECORE)
In all cases, we need to request enough bytes from system to ensure
we can malloc nb bytes upon success, so pad with enough space for
top_foot, plus alignment-pad to make sure we don't lose bytes if
not on boundary, and round this up to a granularity unit.
tests/include/pdclib/functions/_dlmalloc/malloc.c view on Meta::CPAN
* new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
and Anonymous.
* Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
helping test this.)
* memalign: check alignment arg
* realloc: don't try to shift chunks backwards, since this
leads to more fragmentation in some programs and doesn't
seem to help in any others.
* Collect all cases in malloc requiring system memory into sysmalloc
* Use mmap as backup to sbrk
* Place all internal state in malloc_state
* Introduce fastbins (although similar to 2.5.1)
* Many minor tunings and cosmetic improvements
* Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
* Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
* Include errno.h to support default failure action.
V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
* return null for negative arguments
ucpp/hash.c view on Meta::CPAN
n = t->next;
if (dd) (*dd)(t->data);
freemem(t);
t = n;
}
freemem(ht->lists);
freemem(ht);
}
/*
* This function stores a backup of the hash table, for context stacking.
*/
void saveHT(struct HT *ht, void **buffer)
{
struct hash_item **b = (struct hash_item **)buffer;
mmv(b, ht->lists, ht->nb_lists * sizeof(struct hash_item *));
}
/*
* This function restores the saved state of the hash table.
* Do NOT use if some of the entries that were present before the backup
* have been removed (even temporarily).
*/
void restoreHT(struct HT *ht, void **buffer)
{
struct hash_item **b = (struct hash_item **)buffer;
int i;
for (i = 0; i < ht->nb_lists; i ++) {
struct hash_item *t = ht->lists[i], *n;
( run in 2.008 seconds using v1.01-cache-2.11-cpan-49f99fa48dc )