JavaScript-Duktape-XS
view release on metacpan or search on metacpan
/* Fatal error handling, called e.g. when a longjmp() is needed but
* lj.jmpbuf_ptr is NULL. fatal_func must never return; it's not
* declared as "noreturn" because doing that for typedefs is a bit
* challenging portability-wise.
*/
duk_fatal_function fatal_func;
/* Main list of allocated heap objects. Objects are either here,
* in finalize_list waiting for processing, or in refzero_list
* temporarily while a DECREF refzero cascade finishes.
*/
duk_heaphdr *heap_allocated;
/* Temporary work list for freeing a cascade of objects when a DECREF
* (or DECREF_NORZ) encounters a zero refcount. Using a work list
* allows fixed C stack size when refcounts go to zero for a chain of
* objects. Outside of DECREF this is always a NULL because DECREF is
* processed without side effects (only memory free calls).
*/
#if defined(DUK_USE_REFERENCE_COUNTING)
duk_heaphdr *refzero_list;
#endif
#if defined(DUK_USE_FINALIZER_SUPPORT)
* zero; the refcount of objects placed by mark-and-sweep is > 0. In both
* cases the refcount is bumped by 1 artificially so that a REFZERO event
* can never happen while an object is waiting for finalization. Without
* this bump a REFZERO could now happen because user code may call
* duk_push_heapptr() and then pop a value even when it's on finalize_list.
*
* List processing assumes refcounts are kept up-to-date at all times, so
* that once the finalizer returns, a zero refcount is a reliable reason to
* free the object immediately rather than place it back to the heap. This
* is the case because we run outside of refzero_list processing so that
* DECREF cascades are handled fully inline.
*
* For mark-and-sweep queued objects (had_zero_refcount false) the object
* may be freed immediately if its refcount is zero after the finalizer call
* (i.e. finalizer removed the reference loop for the object). If not, the
* next mark-and-sweep will collect the object unless it has become reachable
* (i.e. rescued) by that time and its refcount hasn't fallen to zero before
* that. Mark-and-sweep detects these objects because their FINALIZED flag
* is set.
*
* There's an inherent limitation for mark-and-sweep finalizer rescuing: an
* Heap object refcount finalization.
*
* When an object is about to be freed, all other objects it refers to must
* be decref'd. Refcount finalization does NOT free the object or its inner
* allocations (mark-and-sweep shares these helpers), it just manipulates
* the refcounts.
*
* Note that any of the DECREFs may cause a refcount to drop to zero. If so,
* the object won't be refzero processed inline, but will just be queued to
* refzero_list and processed by an earlier caller working on refzero_list,
* eliminating C recursion from even long refzero cascades. If refzero
* finalization is triggered by mark-and-sweep, refzero conditions are ignored
* (objects are not even queued to refzero_list) because mark-and-sweep deals
* with them; refcounts are still updated so that they remain in sync with
* actual references.
*/
DUK_LOCAL void duk__decref_tvals_norz(duk_hthread *thr, duk_tval *tv, duk_idx_t count) {
DUK_ASSERT(count == 0 || tv != NULL);
while (count-- > 0) {
* 'prev' points are set correctly, with the element at refzero_list
* having a NULL 'prev' pointer. The fact that refzero_list is non-NULL
* is used to reject (1) recursive duk__refcount_free_pending() and
* (2) finalize_list processing calls.
*
* - When we're done with the current object, read its 'prev' pointer and
* free the object. If 'prev' is NULL, we've reached head of list and are
* done: set refzero_list to NULL and process pending finalizers. Otherwise
* continue processing the list.
*
* A refzero cascade is free of side effects because it only involves
* queueing more objects and freeing memory; finalizer execution is blocked
* in the code path queueing objects to finalize_list. As a result the
* initial refzero call (which triggers duk__refcount_free_pending()) must
* check finalize_list so that finalizers are executed snappily.
*
* If finalize_list processing starts first, refzero may occur while we're
* processing finalizers. That's fine: that particular refzero cascade is
* handled to completion without side effects. Once the cascade is complete,
* we'll run pending finalizers but notice that we're already doing that and
* return.
*
* This could be expanded to allow incremental freeing: just bail out
* early and resume at a future alloc/decref/refzero. However, if that
* were done, the list structure would need to be kept consistent at all
* times, mark-and-sweep would need to handle refzero_list, etc.
*/
DUK_LOCAL void duk__refcount_free_pending(duk_heap *heap) {
* for its finalizer call. Refzero might otherwise
* now happen because we allow duk_push_heapptr() for
* objects pending finalization.
*/
DUK_HEAPHDR_PREINC_REFCOUNT(hdr);
#endif
DUK_HEAP_INSERT_INTO_FINALIZE_LIST(heap, hdr);
/* Process finalizers unless skipping is explicitly
* requested (NORZ) or refzero_list is being processed
* (avoids side effects during a refzero cascade).
* If refzero_list is processed, the initial refzero
* call will run pending finalizers when refzero_list
* is done.
*/
if (!skip_free_pending && heap->refzero_list == NULL) {
duk_heap_process_finalize_list(heap);
}
return;
}
}
root = heap->refzero_list;
DUK_HEAPHDR_SET_PREV(heap, hdr, NULL);
/* 'next' is left as GARBAGE. */
heap->refzero_list = hdr;
if (root == NULL) {
/* Object is now queued. Refzero_list was NULL so
* no-one is currently processing it; do it here.
* With refzero processing just doing a cascade of
* free calls, we can process it directly even when
* NORZ macros are used: there are no side effects.
*/
duk__refcount_free_pending(heap);
DUK_ASSERT(heap->refzero_list == NULL);
/* Process finalizers only after the entire cascade
* is finished. In most cases there's nothing to
* finalize, so fast path check to avoid a call.
*/
#if defined(DUK_USE_FINALIZER_SUPPORT)
if (!skip_free_pending && DUK_UNLIKELY(heap->finalize_list != NULL)) {
duk_heap_process_finalize_list(heap);
}
#endif
} else {
DUK_ASSERT(DUK_HEAPHDR_GET_PREV(heap, root) == NULL);
DUK_ASSERT(DUK_HEAPHDR_GET_TYPE((duk_heaphdr *) buf) == DUK_HTYPE_BUFFER);
DUK_HEAP_REMOVE_FROM_HEAP_ALLOCATED(heap, (duk_heaphdr *) buf);
duk_free_hbuffer(heap, buf);
}
/*
* Incref and decref functions.
*
* Decref may trigger immediate refzero handling, which may free and finalize
* an arbitrary number of objects (a "DECREF cascade").
*
* Refzero handling is skipped entirely if (1) mark-and-sweep is running or
* (2) execution is paused in the debugger. The objects are left in the heap,
* and will be freed by mark-and-sweep or eventual heap destruction.
*
* This is necessary during mark-and-sweep because refcounts are also updated
* during the sweep phase (otherwise objects referenced by a swept object
* would have incorrect refcounts) which then calls here. This could be
* avoided by using separate decref macros in mark-and-sweep; however,
* mark-and-sweep also calls finalizers which would use the ordinary decref
* The implementation is relatively straightforward, except for the array
* abandonment process. Array abandonment requires that new string keys
* are interned, which may trigger GC. All keys interned so far must be
* reachable for GC at all times and correctly refcounted for; valstack is
* used for that now.
*
* Also, a GC triggered during this reallocation process must not interfere
* with the object being resized. This is currently controlled by preventing
* finalizers (as they may affect ANY object) and object compaction in
* mark-and-sweep. It would suffice to protect only this particular object
* from compaction, however. DECREF refzero cascades are side effect free
* and OK.
*
* Note: because we need to potentially resize the valstack (as part
* of abandoning the array part), any tval pointers to the valstack
* will become invalid after this call.
*/
DUK_INTERNAL void duk_hobject_realloc_props(duk_hthread *thr,
duk_hobject *obj,
duk_uint32_t new_e_size,
retval = DUK__LONGJMP_RETHROW;
goto just_return;
}
duk_hthread_activation_unwind_norz(thr);
}
DUK_DD(DUK_DDPRINT("-> throw not caught by current thread, yield error to resumer and recheck longjmp"));
/* Not caught by current thread, thread terminates (yield error to resumer);
* note that this may cause a cascade if the resumer terminates with an uncaught
* exception etc (this is OK, but needs careful testing).
*/
DUK_ASSERT(thr->resumer != NULL);
DUK_ASSERT(thr->resumer->callstack_top >= 2); /* ECMAScript activation + Duktape.Thread.resume() activation */
DUK_ASSERT(thr->resumer->callstack_curr != NULL);
DUK_ASSERT(thr->resumer->callstack_curr->parent != NULL);
DUK_ASSERT(
DUK_ACT_GET_FUNC(thr->resumer->callstack_curr->parent) != NULL &&
DUK_HOBJECT_IS_COMPFUNC(DUK_ACT_GET_FUNC(thr->resumer->callstack_curr->parent))); /* an ECMAScript function */
( run in 0.552 second using v1.01-cache-2.11-cpan-49f99fa48dc )