Devel-StatProfiler
view release on metacpan or search on metacpan
src/runloop.cpp view on Meta::CPAN
strcpy(dst, "Time::HiRes::");
strcat(dst, name);
CV *src_cv = get_cv(src, 0);
if (!src_cv)
croak("Unable to get source XSUB for '%s'", src);
CV *dst_cv = get_cv(dst, 0);
if (!dst_cv)
croak("Unable to get source XSUB for '%s'", dst);
CvXSUB(dst_cv) = CvXSUB(src_cv);
}
void
devel::statprofiler::test_enable()
{
increment_counter_function = &test_increment_counter;
fake_hires_function("usleep");
fake_hires_function("sleep");
fake_hires_function("time");
orig_ftdir = PL_ppaddr[OP_FTDIR];
orig_unstack = PL_ppaddr[OP_UNSTACK];
orig_subst = PL_ppaddr[OP_SUBST];
orig_nextstate = PL_ppaddr[OP_NEXTSTATE];
PL_ppaddr[OP_FTDIR] = test_ftdir;
PL_ppaddr[OP_UNSTACK] = test_unstack;
PL_ppaddr[OP_SUBST] = test_subst;
PL_ppaddr[OP_NEXTSTATE] = test_nextstate;
}
double
devel::statprofiler::test_hires_usleep(unsigned int usec)
{
test_force_sample(usec);
return usec;
}
double
devel::statprofiler::test_hires_sleep(double sleep)
{
test_hires_usleep(sleep * 1000000);
return sleep;
}
double
devel::statprofiler::test_hires_time()
{
return 1234567890 + counter * (sampling_interval / 1000000.0);
}
#if defined(_WIN32)
static void
win32_nanosleep_busywait(unsigned nsec) {
LONGLONG current, wanted;
QueryPerformanceCounter((LARGE_INTEGER *) &wanted);
wanted += nsec * performance_counter_frequency / 1000000000;
do {
QueryPerformanceCounter((LARGE_INTEGER *) ¤t);
} while (current < wanted);
}
#endif
void
devel::statprofiler::test_force_sample(unsigned int increment)
{
if (increment_counter_function != &test_increment_counter)
return;
dTHX;
dMY_CXT;
static unsigned int seed = rand_seed();
// we could just increment the counter by increment and be done
// with it, but this way the test is more realistic (the counter
// is incremented by a separate thread, and we test the thread is
// running)
rand(&seed);
test_counter_increment_mutex.lock();
test_counter_increment += increment + (seed % increment) / 5;
if (test_counter_increment < sampling_interval) {
test_counter_increment_mutex.unlock();
return;
}
test_counter_increment_mutex.unlock();
// ugly and inefficient, but good enough for testing
for (;MY_CXT.outer_runloop;) {
test_counter_increment_mutex.lock();
if (test_counter_increment < sampling_interval) {
test_counter_increment_mutex.unlock();
break;
}
test_counter_increment_mutex.unlock();
#if defined(_WIN32)
win32_nanosleep_busywait(100000);
#else
timespec sleep = {0, 100000};
while (nanosleep(&sleep, &sleep) == EINTR)
;
#endif
}
if (!MY_CXT.outer_runloop) {
test_counter_increment_mutex.lock();
counter += test_counter_increment / sampling_interval;
test_counter_increment %= sampling_interval;
test_counter_increment_mutex.unlock();
}
}
static void
test_increment_counter(CounterCxt *cxt)
{
delete cxt;
for (;;) {
#if defined(_WIN32)
win32_nanosleep_busywait(100000);
#else
timespec sleep = {0, 100000};
while (nanosleep(&sleep, &sleep) == EINTR)
;
#endif
test_counter_increment_mutex.lock();
if (test_counter_increment >= sampling_interval) {
counter += test_counter_increment / sampling_interval;
test_counter_increment %= sampling_interval;
}
test_counter_increment_mutex.unlock();
if (refcount == 1) {
refcount_mutex.lock();
// avoid the complex termination logic in the non-test function
if (refcount == 1) {
--refcount;
refcount_mutex.unlock();
return;
} else {
refcount_mutex.unlock();
}
}
}
}
MGVTBL Devel_StatProfiler_save_eval_txt_vtbl = {
NULL, // get
NULL, // set
NULL, // len
NULL, // clear
write_eval_if_needed, // free
};
( run in 1.700 second using v1.01-cache-2.11-cpan-39bf76dae61 )