JavaScript-Embedded

 view release on metacpan or  search on metacpan

lib/JavaScript/Embedded/C/lib/duktape.c  view on Meta::CPAN

	do { \
		DUK_ASSERT(heap != NULL); \
		DUK_ASSERT(heap->lj.type == DUK_LJ_TYPE_UNKNOWN); \
		DUK_ASSERT(heap->lj.iserror == 0); \
		DUK_ASSERT(DUK_TVAL_IS_UNDEFINED(&heap->lj.value1)); \
		DUK_ASSERT(DUK_TVAL_IS_UNDEFINED(&heap->lj.value2)); \
	} while (0)
#define DUK_ASSERT_LJSTATE_SET(heap) \
	do { \
		DUK_ASSERT(heap != NULL); \
		DUK_ASSERT(heap->lj.type != DUK_LJ_TYPE_UNKNOWN); \
	} while (0)

/*
 *  Literal intern cache
 */

struct duk_litcache_entry {
	const duk_uint8_t *addr;
	duk_hstring *h;
};

/*
 *  Main heap structure
 */

#if defined(DUK_USE_ASSERTIONS)
DUK_INTERNAL_DECL void duk_heap_assert_valid(duk_heap *heap);
#define DUK_HEAP_ASSERT_VALID(heap) \
	do { \
		duk_heap_assert_valid((heap)); \
	} while (0)
#else
#define DUK_HEAP_ASSERT_VALID(heap) \
	do { \
	} while (0)
#endif

struct duk_heap {
	duk_small_uint_t flags;

	/* Allocator functions. */
	duk_alloc_function alloc_func;
	duk_realloc_function realloc_func;
	duk_free_function free_func;

	/* Heap udata, used for allocator functions but also for other heap
	 * level callbacks like fatal function, pointer compression, etc.
	 */
	void *heap_udata;

	/* Fatal error handling, called e.g. when a longjmp() is needed but
	 * lj.jmpbuf_ptr is NULL.  fatal_func must never return; it's not
	 * declared as "noreturn" because doing that for typedefs is a bit
	 * challenging portability-wise.
	 */
	duk_fatal_function fatal_func;

	/* Main list of allocated heap objects.  Objects are either here,
	 * in finalize_list waiting for processing, or in refzero_list
	 * temporarily while a DECREF refzero cascade finishes.
	 */
	duk_heaphdr *heap_allocated;

	/* Temporary work list for freeing a cascade of objects when a DECREF
	 * (or DECREF_NORZ) encounters a zero refcount.  Using a work list
	 * allows fixed C stack size when refcounts go to zero for a chain of
	 * objects.  Outside of DECREF this is always a NULL because DECREF is
	 * processed without side effects (only memory free calls).
	 */
#if defined(DUK_USE_REFERENCE_COUNTING)
	duk_heaphdr *refzero_list;
#endif

#if defined(DUK_USE_FINALIZER_SUPPORT)
	/* Work list for objects to be finalized. */
	duk_heaphdr *finalize_list;
#if defined(DUK_USE_ASSERTIONS)
	/* Object whose finalizer is executing right now (no nesting). */
	duk_heaphdr *currently_finalizing;
#endif
#endif

	/* Freelist for duk_activations and duk_catchers. */
#if defined(DUK_USE_CACHE_ACTIVATION)
	duk_activation *activation_free;
#endif
#if defined(DUK_USE_CACHE_CATCHER)
	duk_catcher *catcher_free;
#endif

	/* Voluntary mark-and-sweep trigger counter.  Intentionally signed
	 * because we continue decreasing the value when voluntary GC cannot
	 * run.
	 */
#if defined(DUK_USE_VOLUNTARY_GC)
	duk_int_t ms_trigger_counter;
#endif

	/* Mark-and-sweep recursion control: too deep recursion causes
	 * multi-pass processing to avoid growing C stack without bound.
	 */
	duk_uint_t ms_recursion_depth;

	/* Mark-and-sweep flags automatically active (used for critical sections). */
	duk_small_uint_t ms_base_flags;

	/* Mark-and-sweep running flag.  Prevents re-entry, and also causes
	 * refzero events to be ignored (= objects won't be queued to refzero_list).
	 *
	 * 0: mark-and-sweep not running
	 * 1: mark-and-sweep is running
	 * 2: heap destruction active or debugger active, prevent mark-and-sweep
	 *    and refzero processing (but mark-and-sweep not itself running)
	 */
	duk_uint_t ms_running;

	/* Mark-and-sweep prevent count, stacking.  Used to avoid M&S side
	 * effects (besides finalizers which are controlled separately) such
	 * as compacting the string table or object property tables.  This
	 * is also bumped when ms_running is set to prevent recursive re-entry.
	 * Can also be bumped when mark-and-sweep is not running.
	 */
	duk_uint_t ms_prevent_count;

lib/JavaScript/Embedded/C/lib/duktape.c  view on Meta::CPAN

	duk_hthread_valstack_torture_realloc(thr);

	/* Inner function call, error throw. */
	duk_eval_string_noresult(thr,
	                         "(function dummy() {\n"
	                         "    dummy.prototype = null;  /* break reference loop */\n"
	                         "    try {\n"
	                         "        throw 'fake-finalizer-dummy-error';\n"
	                         "    } catch (e) {\n"
	                         "        void e;\n"
	                         "    }\n"
	                         "})()");

	/* The above creates garbage (e.g. a function instance).  Because
	 * the function/prototype reference loop is broken, it gets collected
	 * immediately by DECREF.  If Function.prototype has a _Finalizer
	 * property (happens in some test cases), the garbage gets queued to
	 * finalize_list.  This still won't cause an infinite loop because
	 * the torture finalizer is called once per finalize_list run and
	 * the garbage gets handled in the same run.  (If the garbage needs
	 * mark-and-sweep collection, an infinite loop might ensue.)
	 */
	return 0;
}

DUK_LOCAL void duk__run_global_torture_finalizer(duk_hthread *thr) {
	DUK_ASSERT(thr != NULL);

	/* Avoid fake finalization when callstack limit is near.  Otherwise
	 * a callstack limit error will be created, then refzero'ed.  The
	 * +5 headroom is conservative.
	 */
	if (thr->heap->call_recursion_depth + 5 >= thr->heap->call_recursion_limit ||
	    thr->callstack_top + 5 >= DUK_USE_CALLSTACK_LIMIT) {
		DUK_D(DUK_DPRINT("skip global torture finalizer, too little headroom for call recursion or call stack size"));
		return;
	}

	/* Run fake finalizer.  Avoid creating unnecessary garbage. */
	duk_push_c_function(thr, duk__fake_global_finalizer, 0 /*nargs*/);
	(void) duk_pcall(thr, 0 /*nargs*/);
	duk_pop(thr);
}
#endif /* DUK_USE_FINALIZER_TORTURE */

/*
 *  Process the finalize_list to completion.
 *
 *  An object may be placed on finalize_list by either refcounting or
 *  mark-and-sweep.  The refcount of objects placed by refcounting will be
 *  zero; the refcount of objects placed by mark-and-sweep is > 0.  In both
 *  cases the refcount is bumped by 1 artificially so that a REFZERO event
 *  can never happen while an object is waiting for finalization.  Without
 *  this bump a REFZERO could now happen because user code may call
 *  duk_push_heapptr() and then pop a value even when it's on finalize_list.
 *
 *  List processing assumes refcounts are kept up-to-date at all times, so
 *  that once the finalizer returns, a zero refcount is a reliable reason to
 *  free the object immediately rather than place it back to the heap.  This
 *  is the case because we run outside of refzero_list processing so that
 *  DECREF cascades are handled fully inline.
 *
 *  For mark-and-sweep queued objects (had_zero_refcount false) the object
 *  may be freed immediately if its refcount is zero after the finalizer call
 *  (i.e. finalizer removed the reference loop for the object).  If not, the
 *  next mark-and-sweep will collect the object unless it has become reachable
 *  (i.e. rescued) by that time and its refcount hasn't fallen to zero before
 *  that.  Mark-and-sweep detects these objects because their FINALIZED flag
 *  is set.
 *
 *  There's an inherent limitation for mark-and-sweep finalizer rescuing: an
 *  object won't get refinalized if (1) it's rescued, but (2) becomes
 *  unreachable before mark-and-sweep has had time to notice it.  The next
 *  mark-and-sweep round simply doesn't have any information of whether the
 *  object has been unreachable the whole time or not (the only way to get
 *  that information would be a mark-and-sweep pass for *every finalized
 *  object*).  This is awkward for the application because the mark-and-sweep
 *  round is not generally visible or under full application control.
 *
 *  For refcount queued objects (had_zero_refcount true) the object is either
 *  immediately freed or rescued, and waiting for a mark-and-sweep round is not
 *  necessary (or desirable); FINALIZED is cleared when a rescued object is
 *  queued back to heap_allocated.  The object is eligible for finalization
 *  again (either via refcounting or mark-and-sweep) immediately after being
 *  rescued.  If a refcount finalized object is placed into an unreachable
 *  reference loop by its finalizer, it will get collected by mark-and-sweep
 *  and currently the finalizer will execute again.
 *
 *  There's a special case where:
 *
 *    - Mark-and-sweep queues an object to finalize_list for finalization.
 *    - The finalizer is executed, FINALIZED is set, and object is queued
 *      back to heap_allocated, waiting for a new mark-and-sweep round.
 *    - The object's refcount drops to zero before mark-and-sweep has a
 *      chance to run another round and make a rescue/free decision.
 *
 *  This is now handled by refzero code: if an object has a finalizer but
 *  FINALIZED is already set, the object is freed without finalizer processing.
 *  The outcome is the same as if mark-and-sweep was executed at that point;
 *  mark-and-sweep would also free the object without another finalizer run.
 *  This could also be changed so that the refzero-triggered finalizer *IS*
 *  executed: being refzero collected implies someone has operated on the
 *  object so it hasn't been totally unreachable the whole time.  This would
 *  risk a finalizer loop however.
 */

DUK_INTERNAL void duk_heap_process_finalize_list(duk_heap *heap) {
	duk_heaphdr *curr;
#if defined(DUK_USE_DEBUG)
	duk_size_t count = 0;
#endif

	DUK_DDD(DUK_DDDPRINT("duk_heap_process_finalize_list: %p", (void *) heap));

	if (heap->pf_prevent_count != 0) {
		DUK_DDD(DUK_DDDPRINT("skip finalize_list processing: pf_prevent_count != 0"));
		return;
	}

	/* Heap alloc prevents mark-and-sweep before heap_thread is ready. */
	DUK_ASSERT(heap != NULL);

lib/JavaScript/Embedded/C/lib/duktape.c  view on Meta::CPAN

		if (curr_thr == NULL) {
			/* For initial entry use default value; zero forces an
			 * interrupt before executing the first insturction.
			 */
			DUK_DD(DUK_DDPRINT("switch thread, initial entry, init default interrupt counter"));
			new_thr->interrupt_counter = 0;
			new_thr->interrupt_init = 0;
		} else {
			/* Copy interrupt counter/init value state to new thread (if any).
			 * It's OK for new_thr to be the same as curr_thr.
			 */
#if defined(DUK_USE_DEBUG)
			if (new_thr != curr_thr) {
				DUK_DD(DUK_DDPRINT("switch thread, not initial entry, copy interrupt counter"));
			}
#endif
			new_thr->interrupt_counter = curr_thr->interrupt_counter;
			new_thr->interrupt_init = curr_thr->interrupt_init;
		}
	} else {
		DUK_DD(DUK_DDPRINT("switch thread, new thread is NULL, no interrupt counter changes"));
	}

	heap->curr_thread = new_thr; /* may be NULL */
}
#endif /* DUK_USE_INTERRUPT_COUNTER */

#if defined(DUK_USE_ASSERTIONS)
DUK_INTERNAL void duk_heap_assert_valid(duk_heap *heap) {
	DUK_ASSERT(heap != NULL);
}
#endif
#line 1 "duk_heap_refcount.c"
/*
 *  Reference counting implementation.
 *
 *  INCREF/DECREF, finalization and freeing of objects whose refcount reaches
 *  zero (refzero).  These operations are very performance sensitive, so
 *  various small tricks are used in an attempt to maximize speed.
 */

/* #include duk_internal.h -> already included */

#if defined(DUK_USE_REFERENCE_COUNTING)

#if !defined(DUK_USE_DOUBLE_LINKED_HEAP)
#error internal error, reference counting requires a double linked heap
#endif

/*
 *  Heap object refcount finalization.
 *
 *  When an object is about to be freed, all other objects it refers to must
 *  be decref'd.  Refcount finalization does NOT free the object or its inner
 *  allocations (mark-and-sweep shares these helpers), it just manipulates
 *  the refcounts.
 *
 *  Note that any of the DECREFs may cause a refcount to drop to zero.  If so,
 *  the object won't be refzero processed inline, but will just be queued to
 *  refzero_list and processed by an earlier caller working on refzero_list,
 *  eliminating C recursion from even long refzero cascades.  If refzero
 *  finalization is triggered by mark-and-sweep, refzero conditions are ignored
 *  (objects are not even queued to refzero_list) because mark-and-sweep deals
 *  with them; refcounts are still updated so that they remain in sync with
 *  actual references.
 */

DUK_LOCAL void duk__decref_tvals_norz(duk_hthread *thr, duk_tval *tv, duk_idx_t count) {
	DUK_ASSERT(count == 0 || tv != NULL);

	while (count-- > 0) {
		DUK_TVAL_DECREF_NORZ(thr, tv);
		tv++;
	}
}

DUK_INTERNAL void duk_hobject_refcount_finalize_norz(duk_heap *heap, duk_hobject *h) {
	duk_hthread *thr;
	duk_uint_fast32_t i;
	duk_uint_fast32_t n;
	duk_propvalue *p_val;
	duk_tval *p_tv;
	duk_hstring **p_key;
	duk_uint8_t *p_flag;
	duk_hobject *h_proto;

	DUK_ASSERT(heap != NULL);
	DUK_ASSERT(heap->heap_thread != NULL);
	DUK_ASSERT(h);
	DUK_ASSERT(DUK_HEAPHDR_GET_TYPE((duk_heaphdr *) h) == DUK_HTYPE_OBJECT);

	thr = heap->heap_thread;
	DUK_ASSERT(thr != NULL);

	p_key = DUK_HOBJECT_E_GET_KEY_BASE(heap, h);
	p_val = DUK_HOBJECT_E_GET_VALUE_BASE(heap, h);
	p_flag = DUK_HOBJECT_E_GET_FLAGS_BASE(heap, h);
	n = DUK_HOBJECT_GET_ENEXT(h);
	while (n-- > 0) {
		duk_hstring *key;

		key = p_key[n];
		if (DUK_UNLIKELY(key == NULL)) {
			continue;
		}
		DUK_HSTRING_DECREF_NORZ(thr, key);
		if (DUK_UNLIKELY(p_flag[n] & DUK_PROPDESC_FLAG_ACCESSOR)) {
			duk_hobject *h_getset;
			h_getset = p_val[n].a.get;
			DUK_ASSERT(h_getset == NULL || DUK_HEAPHDR_IS_OBJECT((duk_heaphdr *) h_getset));
			DUK_HOBJECT_DECREF_NORZ_ALLOWNULL(thr, h_getset);
			h_getset = p_val[n].a.set;
			DUK_ASSERT(h_getset == NULL || DUK_HEAPHDR_IS_OBJECT((duk_heaphdr *) h_getset));
			DUK_HOBJECT_DECREF_NORZ_ALLOWNULL(thr, h_getset);
		} else {
			duk_tval *tv_val;
			tv_val = &p_val[n].v;
			DUK_TVAL_DECREF_NORZ(thr, tv_val);
		}
	}

lib/JavaScript/Embedded/C/lib/duktape.c  view on Meta::CPAN

			}
#endif
		}

		for (i = 0; i < DUK_NUM_BUILTINS; i++) {
			DUK_HOBJECT_DECREF_NORZ_ALLOWNULL(thr, (duk_hobject *) t->builtins[i]);
		}

		DUK_HTHREAD_DECREF_NORZ_ALLOWNULL(thr, (duk_hthread *) t->resumer);
	} else {
		/* We may come here if the object should have a FASTREFS flag
		 * but it's missing for some reason.  Assert for never getting
		 * here; however, other than performance, this is harmless.
		 */
		DUK_D(DUK_DPRINT("missing FASTREFS flag for: %!iO", h));
		DUK_ASSERT(0);
	}
}

DUK_INTERNAL void duk_heaphdr_refcount_finalize_norz(duk_heap *heap, duk_heaphdr *hdr) {
	DUK_ASSERT(heap != NULL);
	DUK_ASSERT(heap->heap_thread != NULL);
	DUK_ASSERT(hdr != NULL);

	if (DUK_HEAPHDR_IS_OBJECT(hdr)) {
		duk_hobject_refcount_finalize_norz(heap, (duk_hobject *) hdr);
	}
	/* DUK_HTYPE_BUFFER: nothing to finalize */
	/* DUK_HTYPE_STRING: nothing to finalize */
}

/*
 *  Refzero processing for duk_hobject: queue a refzero'ed object to either
 *  finalize_list or refzero_list and process the relevent list(s) if
 *  necessary.
 *
 *  Refzero_list is single linked, with only 'prev' pointers set and valid.
 *  All 'next' pointers are intentionally left as garbage.  This doesn't
 *  matter because refzero_list is processed to completion before any other
 *  code (like mark-and-sweep) might walk the list.
 *
 *  In more detail:
 *
 *  - On first insert refzero_list is NULL and the new object becomes the
 *    first and only element on the list; duk__refcount_free_pending() is
 *    called and it starts processing the list from the initial element,
 *    i.e. the list tail.
 *
 *  - As each object is refcount finalized, new objects may be queued to
 *    refzero_list head.  Their 'next' pointers are left as garbage, but
 *    'prev' points are set correctly, with the element at refzero_list
 *    having a NULL 'prev' pointer.  The fact that refzero_list is non-NULL
 *    is used to reject (1) recursive duk__refcount_free_pending() and
 *    (2) finalize_list processing calls.
 *
 *  - When we're done with the current object, read its 'prev' pointer and
 *    free the object.  If 'prev' is NULL, we've reached head of list and are
 *    done: set refzero_list to NULL and process pending finalizers.  Otherwise
 *    continue processing the list.
 *
 *  A refzero cascade is free of side effects because it only involves
 *  queueing more objects and freeing memory; finalizer execution is blocked
 *  in the code path queueing objects to finalize_list.  As a result the
 *  initial refzero call (which triggers duk__refcount_free_pending()) must
 *  check finalize_list so that finalizers are executed snappily.
 *
 *  If finalize_list processing starts first, refzero may occur while we're
 *  processing finalizers.  That's fine: that particular refzero cascade is
 *  handled to completion without side effects.  Once the cascade is complete,
 *  we'll run pending finalizers but notice that we're already doing that and
 *  return.
 *
 *  This could be expanded to allow incremental freeing: just bail out
 *  early and resume at a future alloc/decref/refzero.  However, if that
 *  were done, the list structure would need to be kept consistent at all
 *  times, mark-and-sweep would need to handle refzero_list, etc.
 */

DUK_LOCAL void duk__refcount_free_pending(duk_heap *heap) {
	duk_heaphdr *curr;
#if defined(DUK_USE_DEBUG)
	duk_int_t count = 0;
#endif

	DUK_ASSERT(heap != NULL);

	curr = heap->refzero_list;
	DUK_ASSERT(curr != NULL);
	DUK_ASSERT(DUK_HEAPHDR_GET_PREV(heap, curr) == NULL); /* We're called on initial insert only. */
	/* curr->next is GARBAGE. */

	do {
		duk_heaphdr *prev;

		DUK_DDD(DUK_DDDPRINT("refzero processing %p: %!O", (void *) curr, (duk_heaphdr *) curr));

#if defined(DUK_USE_DEBUG)
		count++;
#endif

		DUK_ASSERT(curr != NULL);
		DUK_ASSERT(DUK_HEAPHDR_GET_TYPE(curr) == DUK_HTYPE_OBJECT); /* currently, always the case */
		/* FINALIZED may be set; don't care about flags here. */

		/* Refcount finalize 'curr'.  Refzero_list must be non-NULL
		 * here to prevent recursive entry to duk__refcount_free_pending().
		 */
		DUK_ASSERT(heap->refzero_list != NULL);
		duk_hobject_refcount_finalize_norz(heap, (duk_hobject *) curr);

		prev = DUK_HEAPHDR_GET_PREV(heap, curr);
		DUK_ASSERT((prev == NULL && heap->refzero_list == curr) || (prev != NULL && heap->refzero_list != curr));
		/* prev->next is intentionally not updated and is garbage. */

		duk_free_hobject(heap, (duk_hobject *) curr); /* Invalidates 'curr'. */

		curr = prev;
	} while (curr != NULL);

	heap->refzero_list = NULL;

	DUK_DD(DUK_DDPRINT("refzero processed %ld objects", (long) count));
}

DUK_LOCAL DUK_INLINE void duk__refcount_refzero_hobject(duk_heap *heap, duk_hobject *obj, duk_bool_t skip_free_pending) {
	duk_heaphdr *hdr;
	duk_heaphdr *root;

	DUK_ASSERT(heap != NULL);
	DUK_ASSERT(heap->heap_thread != NULL);
	DUK_ASSERT(obj != NULL);
	DUK_ASSERT(DUK_HEAPHDR_GET_TYPE((duk_heaphdr *) obj) == DUK_HTYPE_OBJECT);

	hdr = (duk_heaphdr *) obj;

	/* Refzero'd objects must be in heap_allocated.  They can't be in
	 * finalize_list because all objects on finalize_list have an
	 * artificial +1 refcount bump.
	 */
#if defined(DUK_USE_ASSERTIONS)
	DUK_ASSERT(duk_heap_in_heap_allocated(heap, (duk_heaphdr *) obj));
#endif

	DUK_HEAP_REMOVE_FROM_HEAP_ALLOCATED(heap, hdr);

#if defined(DUK_USE_FINALIZER_SUPPORT)
	/* This finalizer check MUST BE side effect free.  It should also be
	 * as fast as possible because it's applied to every object freed.
	 */
	if (DUK_UNLIKELY(DUK_HOBJECT_HAS_FINALIZER_FAST(heap, (duk_hobject *) hdr) != 0U)) {
		/* Special case: FINALIZED may be set if mark-and-sweep queued
		 * object for finalization, the finalizer was executed (and
		 * FINALIZED set), mark-and-sweep hasn't yet processed the
		 * object again, but its refcount drops to zero.  Free without
		 * running the finalizer again.
		 */
		if (DUK_HEAPHDR_HAS_FINALIZED(hdr)) {
			DUK_D(DUK_DPRINT("refzero'd object has finalizer and FINALIZED is set -> free"));
		} else {
			/* Set FINALIZABLE flag so that all objects on finalize_list
			 * will have it set and are thus detectable based on the
			 * flag alone.
			 */
			DUK_HEAPHDR_SET_FINALIZABLE(hdr);
			DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZED(hdr));

#if defined(DUK_USE_REFERENCE_COUNTING)
			/* Bump refcount on finalize_list insert so that a
			 * refzero can never occur when an object is waiting
			 * for its finalizer call.  Refzero might otherwise
			 * now happen because we allow duk_push_heapptr() for
			 * objects pending finalization.
			 */
			DUK_HEAPHDR_PREINC_REFCOUNT(hdr);
#endif
			DUK_HEAP_INSERT_INTO_FINALIZE_LIST(heap, hdr);

			/* Process finalizers unless skipping is explicitly
			 * requested (NORZ) or refzero_list is being processed
			 * (avoids side effects during a refzero cascade).
			 * If refzero_list is processed, the initial refzero
			 * call will run pending finalizers when refzero_list
			 * is done.
			 */
			if (!skip_free_pending && heap->refzero_list == NULL) {
				duk_heap_process_finalize_list(heap);
			}
			return;
		}
	}
#endif /* DUK_USE_FINALIZER_SUPPORT */

	/* No need to finalize, free object via refzero_list. */

	root = heap->refzero_list;

	DUK_HEAPHDR_SET_PREV(heap, hdr, NULL);
	/* 'next' is left as GARBAGE. */
	heap->refzero_list = hdr;

	if (root == NULL) {
		/* Object is now queued.  Refzero_list was NULL so
		 * no-one is currently processing it; do it here.
		 * With refzero processing just doing a cascade of
		 * free calls, we can process it directly even when
		 * NORZ macros are used: there are no side effects.
		 */
		duk__refcount_free_pending(heap);
		DUK_ASSERT(heap->refzero_list == NULL);

		/* Process finalizers only after the entire cascade
		 * is finished.  In most cases there's nothing to
		 * finalize, so fast path check to avoid a call.
		 */
#if defined(DUK_USE_FINALIZER_SUPPORT)
		if (!skip_free_pending && DUK_UNLIKELY(heap->finalize_list != NULL)) {
			duk_heap_process_finalize_list(heap);
		}
#endif
	} else {
		DUK_ASSERT(DUK_HEAPHDR_GET_PREV(heap, root) == NULL);
		DUK_HEAPHDR_SET_PREV(heap, root, hdr);

		/* Object is now queued.  Because refzero_list was
		 * non-NULL, it's already being processed by someone
		 * in the C call stack, so we're done.
		 */
	}
}

#if defined(DUK_USE_FINALIZER_SUPPORT)
DUK_INTERNAL DUK_ALWAYS_INLINE void duk_refzero_check_fast(duk_hthread *thr) {
	DUK_ASSERT(thr != NULL);
	DUK_ASSERT(thr->heap != NULL);
	DUK_ASSERT(thr->heap->refzero_list == NULL); /* Processed to completion inline. */

	if (DUK_UNLIKELY(thr->heap->finalize_list != NULL)) {
		duk_heap_process_finalize_list(thr->heap);
	}
}

DUK_INTERNAL void duk_refzero_check_slow(duk_hthread *thr) {
	DUK_ASSERT(thr != NULL);
	DUK_ASSERT(thr->heap != NULL);
	DUK_ASSERT(thr->heap->refzero_list == NULL); /* Processed to completion inline. */

	if (DUK_UNLIKELY(thr->heap->finalize_list != NULL)) {
		duk_heap_process_finalize_list(thr->heap);
	}
}
#endif /* DUK_USE_FINALIZER_SUPPORT */

/*
 *  Refzero processing for duk_hstring.
 */

DUK_LOCAL DUK_INLINE void duk__refcount_refzero_hstring(duk_heap *heap, duk_hstring *str) {
	DUK_ASSERT(heap != NULL);
	DUK_ASSERT(heap->heap_thread != NULL);
	DUK_ASSERT(str != NULL);
	DUK_ASSERT(DUK_HEAPHDR_GET_TYPE((duk_heaphdr *) str) == DUK_HTYPE_STRING);

	duk_heap_strcache_string_remove(heap, str);
	duk_heap_strtable_unlink(heap, str);
	duk_free_hstring(heap, str);
}

/*
 *  Refzero processing for duk_hbuffer.
 */

DUK_LOCAL DUK_INLINE void duk__refcount_refzero_hbuffer(duk_heap *heap, duk_hbuffer *buf) {
	DUK_ASSERT(heap != NULL);
	DUK_ASSERT(heap->heap_thread != NULL);
	DUK_ASSERT(buf != NULL);
	DUK_ASSERT(DUK_HEAPHDR_GET_TYPE((duk_heaphdr *) buf) == DUK_HTYPE_BUFFER);

	DUK_HEAP_REMOVE_FROM_HEAP_ALLOCATED(heap, (duk_heaphdr *) buf);
	duk_free_hbuffer(heap, buf);
}

/*
 *  Incref and decref functions.
 *
 *  Decref may trigger immediate refzero handling, which may free and finalize
 *  an arbitrary number of objects (a "DECREF cascade").
 *
 *  Refzero handling is skipped entirely if (1) mark-and-sweep is running or
 *  (2) execution is paused in the debugger.  The objects are left in the heap,
 *  and will be freed by mark-and-sweep or eventual heap destruction.
 *
 *  This is necessary during mark-and-sweep because refcounts are also updated
 *  during the sweep phase (otherwise objects referenced by a swept object
 *  would have incorrect refcounts) which then calls here.  This could be
 *  avoided by using separate decref macros in mark-and-sweep; however,
 *  mark-and-sweep also calls finalizers which would use the ordinary decref
 *  macros anyway.
 *
 *  We can't process refzeros (= free objects) when the debugger is running
 *  as the debugger might make an object unreachable but still continue
 *  inspecting it (or even cause it to be pushed back).  So we must rely on
 *  mark-and-sweep to collect them.
 *
 *  The DUK__RZ_SUPPRESS_CHECK() condition is also used in heap destruction
 *  when running finalizers for remaining objects: the flag prevents objects
 *  from being moved around in heap linked lists while that's being done.
 *
 *  The suppress condition is important to performance.
 */

#define DUK__RZ_SUPPRESS_ASSERT1() \
	do { \
		DUK_ASSERT(thr != NULL); \
		DUK_ASSERT(thr->heap != NULL); \
		/* When mark-and-sweep runs, heap_thread must exist. */ \
		DUK_ASSERT(thr->heap->ms_running == 0 || thr->heap->heap_thread != NULL); \
		/* In normal operation finalizers are executed with ms_running == 0 \
		 * so we should never see ms_running == 1 and thr != heap_thread. \
		 * In heap destruction finalizers are executed with ms_running != 0 \
		 * to e.g. prevent refzero; a special value ms_running == 2 is used \
		 * in that case so it can be distinguished from the normal runtime \
		 * case, and allows a stronger assertion here (GH-2030). \
		 */ \
		DUK_ASSERT(!(thr->heap->ms_running == 1 && thr != thr->heap->heap_thread)); \
		/* We may be called when the heap is initializing and we process \
		 * refzeros normally, but mark-and-sweep and finalizers are prevented \
		 * if that's the case. \
		 */ \
		DUK_ASSERT(thr->heap->heap_initializing == 0 || thr->heap->ms_prevent_count > 0); \
		DUK_ASSERT(thr->heap->heap_initializing == 0 || thr->heap->pf_prevent_count > 0); \
	} while (0)

#if defined(DUK_USE_DEBUGGER_SUPPORT)
#define DUK__RZ_SUPPRESS_ASSERT2() \
	do { \
		/* When debugger is paused, ms_running is set. */ \
		DUK_ASSERT(!DUK_HEAP_HAS_DEBUGGER_PAUSED(thr->heap) || thr->heap->ms_running != 0); \
	} while (0)
#define DUK__RZ_SUPPRESS_COND() (heap->ms_running != 0)
#else
#define DUK__RZ_SUPPRESS_ASSERT2() \
	do { \
	} while (0)
#define DUK__RZ_SUPPRESS_COND() (heap->ms_running != 0)
#endif /* DUK_USE_DEBUGGER_SUPPORT */

lib/JavaScript/Embedded/C/lib/duktape.c  view on Meta::CPAN

		DUK_ASSERT(h_key != NULL);
		if (DUK_HSTRING_HAS_HIDDEN(h_key)) {
			/* Symbol accesses must go through proxy lookup in ES2015.
			 * Hidden symbols behave like Duktape 1.x internal keys
			 * and currently won't.
			 */
			DUK_DDD(DUK_DDDPRINT("hidden key, skip proxy handler and apply to target"));
			return 0;
		}
	}

	/* The handler is looked up with a normal property lookup; it may be an
	 * accessor or the handler object itself may be a proxy object.  If the
	 * handler is a proxy, we need to extend the valstack as we make a
	 * recursive proxy check without a function call in between (in fact
	 * there is no limit to the potential recursion here).
	 *
	 * (For sanity, proxy creation rejects another proxy object as either
	 * the handler or the target at the moment so recursive proxy cases
	 * are not realized now.)
	 */

	/* XXX: C recursion limit if proxies are allowed as handler/target values */

	duk_require_stack(thr, DUK__VALSTACK_PROXY_LOOKUP);
	duk_push_hobject(thr, h_handler);
	if (duk_get_prop_stridx_short(thr, -1, stridx_trap)) {
		/* -> [ ... handler trap ] */
		duk_insert(thr, -2); /* -> [ ... trap handler ] */

		/* stack prepped for func call: [ ... trap handler ] */
		return 1;
	} else {
		duk_pop_2_unsafe(thr);
		return 0;
	}
}
#endif /* DUK_USE_ES6_PROXY */

/*
 *  Reallocate property allocation, moving properties to the new allocation.
 *
 *  Includes key compaction, rehashing, and can also optionally abandon
 *  the array part, 'migrating' array entries into the beginning of the
 *  new entry part.
 *
 *  There is no support for in-place reallocation or just compacting keys
 *  without resizing the property allocation.  This is intentional to keep
 *  code size minimal, but would be useful future work.
 *
 *  The implementation is relatively straightforward, except for the array
 *  abandonment process.  Array abandonment requires that new string keys
 *  are interned, which may trigger GC.  All keys interned so far must be
 *  reachable for GC at all times and correctly refcounted for; valstack is
 *  used for that now.
 *
 *  Also, a GC triggered during this reallocation process must not interfere
 *  with the object being resized.  This is currently controlled by preventing
 *  finalizers (as they may affect ANY object) and object compaction in
 *  mark-and-sweep.  It would suffice to protect only this particular object
 *  from compaction, however.  DECREF refzero cascades are side effect free
 *  and OK.
 *
 *  Note: because we need to potentially resize the valstack (as part
 *  of abandoning the array part), any tval pointers to the valstack
 *  will become invalid after this call.
 */

DUK_INTERNAL void duk_hobject_realloc_props(duk_hthread *thr,
                                            duk_hobject *obj,
                                            duk_uint32_t new_e_size,
                                            duk_uint32_t new_a_size,
                                            duk_uint32_t new_h_size,
                                            duk_bool_t abandon_array) {
	duk_small_uint_t prev_ms_base_flags;
	duk_uint32_t new_alloc_size;
	duk_uint32_t new_e_size_adjusted;
	duk_uint8_t *new_p;
	duk_hstring **new_e_k;
	duk_propvalue *new_e_pv;
	duk_uint8_t *new_e_f;
	duk_tval *new_a;
	duk_uint32_t *new_h;
	duk_uint32_t new_e_next;
	duk_uint_fast32_t i;
	duk_size_t array_copy_size;
#if defined(DUK_USE_ASSERTIONS)
	duk_bool_t prev_error_not_allowed;
#endif

	DUK_ASSERT(thr != NULL);
	DUK_ASSERT(obj != NULL);
	DUK_ASSERT(!abandon_array || new_a_size == 0); /* if abandon_array, new_a_size must be 0 */
	DUK_ASSERT(DUK_HOBJECT_GET_PROPS(thr->heap, obj) != NULL ||
	           (DUK_HOBJECT_GET_ESIZE(obj) == 0 && DUK_HOBJECT_GET_ASIZE(obj) == 0));
	DUK_ASSERT(new_h_size == 0 || new_h_size >= new_e_size); /* required to guarantee success of rehashing,
	                                                          * intentionally use unadjusted new_e_size
	                                                          */
	DUK_ASSERT(!DUK_HEAPHDR_HAS_READONLY((duk_heaphdr *) obj));
	DUK_ASSERT_VALSTACK_SPACE(thr, DUK__VALSTACK_SPACE);

	DUK_STATS_INC(thr->heap, stats_object_realloc_props);

	/*
	 *  Pre resize assertions.
	 */

#if defined(DUK_USE_ASSERTIONS)
	/* XXX: pre-checks (such as no duplicate keys) */
#endif

	/*
	 *  For property layout 1, tweak e_size to ensure that the whole entry
	 *  part (key + val + flags) is a suitable multiple for alignment
	 *  (platform specific).
	 *
	 *  Property layout 2 does not require this tweaking and is preferred
	 *  on low RAM platforms requiring alignment.
	 */

#if defined(DUK_USE_HOBJECT_LAYOUT_2) || defined(DUK_USE_HOBJECT_LAYOUT_3)

lib/JavaScript/Embedded/C/lib/duktape.c  view on Meta::CPAN

		duk_hthread *resumer;

		for (;;) {
			act = thr->callstack_curr;
			if (act == NULL) {
				break;
			}

			for (;;) {
				cat = act->cat;
				if (cat == NULL) {
					break;
				}

				if (DUK_CAT_HAS_CATCH_ENABLED(cat)) {
					DUK_ASSERT(DUK_CAT_GET_TYPE(cat) == DUK_CAT_TYPE_TCF);

					DUK_DDD(DUK_DDDPRINT("before catch part 1: thr=%p, act=%p, cat=%p",
					                     (void *) thr,
					                     (void *) act,
					                     (void *) act->cat));
					duk__handle_catch_part1(thr,
					                        &thr->heap->lj.value1,
					                        DUK_LJ_TYPE_THROW,
					                        out_delayed_catch_setup);

					DUK_DD(DUK_DDPRINT("-> throw caught by a 'catch' clause, restart execution"));
					retval = DUK__LONGJMP_RESTART;
					goto wipe_and_return;
				}

				if (DUK_CAT_HAS_FINALLY_ENABLED(cat)) {
					DUK_ASSERT(DUK_CAT_GET_TYPE(cat) == DUK_CAT_TYPE_TCF);
					DUK_ASSERT(!DUK_CAT_HAS_CATCH_ENABLED(cat));

					duk__handle_finally(thr, &thr->heap->lj.value1, DUK_LJ_TYPE_THROW);

					DUK_DD(DUK_DDPRINT("-> throw caught by a 'finally' clause, restart execution"));
					retval = DUK__LONGJMP_RESTART;
					goto wipe_and_return;
				}

				duk_hthread_catcher_unwind_norz(thr, act);
			}

			if (act == entry_act) {
				/* Not caught by anything before entry level; rethrow and let the
				 * final catcher finish unwinding (esp. value stack).
				 */
				DUK_D(DUK_DPRINT("-> throw propagated up to entry level, rethrow and exit bytecode executor"));
				retval = DUK__LONGJMP_RETHROW;
				goto just_return;
			}

			duk_hthread_activation_unwind_norz(thr);
		}

		DUK_DD(DUK_DDPRINT("-> throw not caught by current thread, yield error to resumer and recheck longjmp"));

		/* Not caught by current thread, thread terminates (yield error to resumer);
		 * note that this may cause a cascade if the resumer terminates with an uncaught
		 * exception etc (this is OK, but needs careful testing).
		 */

		DUK_ASSERT(thr->resumer != NULL);
		DUK_ASSERT(thr->resumer->callstack_top >= 2); /* ECMAScript activation + Duktape.Thread.resume() activation */
		DUK_ASSERT(thr->resumer->callstack_curr != NULL);
		DUK_ASSERT(thr->resumer->callstack_curr->parent != NULL);
		DUK_ASSERT(
		    DUK_ACT_GET_FUNC(thr->resumer->callstack_curr->parent) != NULL &&
		    DUK_HOBJECT_IS_COMPFUNC(DUK_ACT_GET_FUNC(thr->resumer->callstack_curr->parent))); /* an ECMAScript function */

		resumer = thr->resumer;

		/* reset longjmp */

		DUK_ASSERT(thr->heap->lj.type == DUK_LJ_TYPE_THROW); /* already set */
		/* lj.value1 already set */

		duk_hthread_terminate(thr); /* updates thread state, minimizes its allocations */
		DUK_ASSERT(thr->state == DUK_HTHREAD_STATE_TERMINATED);

		thr->resumer = NULL;
		DUK_HTHREAD_DECREF_NORZ(thr, resumer);
		resumer->state = DUK_HTHREAD_STATE_RUNNING;
		DUK_HEAP_SWITCH_THREAD(thr->heap, resumer);
		thr = resumer;
		goto check_longjmp;
	}

	case DUK_LJ_TYPE_BREAK: /* pseudotypes, not used in actual longjmps */
	case DUK_LJ_TYPE_CONTINUE:
	case DUK_LJ_TYPE_RETURN:
	case DUK_LJ_TYPE_NORMAL:
	default: {
		/* should never happen, but be robust */
		DUK_D(DUK_DPRINT("caught unknown longjmp type %ld, treat as internal error", (long) thr->heap->lj.type));
		goto convert_to_internal_error;
	}

	} /* end switch */

	DUK_UNREACHABLE();

wipe_and_return:
	DUK_DD(DUK_DDPRINT("handling longjmp done, wipe-and-return, top=%ld", (long) duk_get_top(thr)));
	thr->heap->lj.type = DUK_LJ_TYPE_UNKNOWN;
	thr->heap->lj.iserror = 0;

	DUK_TVAL_SET_UNDEFINED_UPDREF(thr, &thr->heap->lj.value1); /* side effects */
	DUK_TVAL_SET_UNDEFINED_UPDREF(thr, &thr->heap->lj.value2); /* side effects */

	DUK_GC_TORTURE(thr->heap);

just_return:
	return retval;

convert_to_internal_error:
	/* This could also be thrown internally (set the error, goto check_longjmp),
	 * but it's better for internal errors to bubble outwards so that we won't
	 * infinite loop in this catchpoint.



( run in 1.013 second using v1.01-cache-2.11-cpan-acebb50784d )