Devel-LeakTrace-Fast
view release on metacpan or search on metacpan
h->slot[s] = -1;
}
if ( err = buffer_init( &h->buf, 0, 256 ), ERR_None == err ) {
*hh = h;
}
return err;
}
int hash_set_callbacks( hash * h, void *cbd,
int ( *cb_add ) ( hash * h, void *d, void **v ),
int ( *cb_del ) ( hash * h, void *d, void *v ),
int ( *cb_upd ) ( hash * h, void *d, void *ov,
void **nv ) ) {
h->cbd = cbd;
h->cb_add = cb_add;
h->cb_del = cb_del;
h->cb_upd = cb_upd;
return ERR_None;
INST_I( rehash );
/* All we do is make a new hash and copy the old one into it...
*/
if ( err = hash_new( ncap, &nh ), ERR_None != err ) {
return err;
}
/* Iterate through the keys copying entries one at a time. This has the
* happy side effect of clearing out the garbage left by any deleted keys.
* Any callbacks that are installed for the original hash won't be in
* effect on the new hash so there's no need to worry about any side
* effects they might have. Once the new hash data is moved back into the
* original hash any callbacks will automatically take effect again.
*/
key = hash_get_first_key( h, &i, &key_len );
while ( key ) {
if ( err =
hash_put( nh, key, key_len, hash_get( h, key, key_len ) ),
ERR_None != err ) {
hash_delete( nh );
return err;
}
key = hash_get_next_key( h, &i, &key_len );
if ( ( int )h->size > h->cap * 5 ) {
return _rehash( h );
}
}
else {
/* Replace an existing entry.
*/
sl = ( hash_slot * ) ( ( char * )h->buf.buf + s );
/* If the value is actually changing inform any callbacks */
if ( sl->v != val ) {
if ( h->cb_upd ) {
if ( err =
h->cb_upd( h, h->cbd, sl->v, &val ),
ERR_None != err ) {
/* NULL out the pointer on the assumption that the update function
* at least managed to free the old value. If this turns out to be
* untrue we'll have leaked a little.
*/
sl->v = NULL;
buffer buf; /* Buffer for keys */
long *slot; /* Array of buckets */
long cap; /* Size of bucket array */
long state; /* Incremented every time the hash's state changes so that we
* can spot the case where an iterator gets out of sync with
* the hash it's iterating over.
*/
size_t size;
size_t deleted;
/* Optional callbacks for value addition, deletion */
void *cbd;
int ( *cb_add ) ( struct _hash * h, void *d, void **v );
int ( *cb_del ) ( struct _hash * h, void *d, void *v );
int ( *cb_upd ) ( struct _hash * h, void *d, void *ov, void **nv );
} hash;
typedef struct {
long state;
long bucket;
long sl;
} hash_iter;
extern void *hash_NULL;
#define hash_PUTNULL(p) ((p) ? (p) : hash_NULL)
#define hash_GETNULL(p) ((p) == hash_NULL ? NULL : p)
int hash_new( long capacity, hash ** hh );
int hash_set_callbacks( hash * h, void *cbd,
int ( *cb_add ) ( hash * h, void *d, void **v ),
int ( *cb_del ) ( hash * h, void *d, void *v ),
int ( *cb_upd ) ( struct _hash * h, void *d,
void *ov, void **nv ) );
int hash_delete( hash * h );
int hash_put( hash * h, const void *key, size_t key_len, void *val );
int hash_delete_key( hash * h, const void *key, size_t key_len );
void *hash_get( hash * h, const void *key, size_t key_len );
size_t hash_size( hash * h );
const void *hash_get_first_key( hash * h, hash_iter * i,
( run in 0.412 second using v1.01-cache-2.11-cpan-9b1e4054eb1 )