Devel-StatProfiler
view release on metacpan or search on metacpan
lib/Devel/StatProfiler/EvalSource.pm view on Meta::CPAN
use strict;
use warnings;
use Devel::StatProfiler::EvalSourceStorage;
use Devel::StatProfiler::Utils qw(
check_serializer
read_data
read_file
state_dir
utf8_sha1
write_data_any
write_file
);
use File::Path;
use File::Spec::Functions;
sub new {
my ($class, %opts) = @_;
my $self = bless {
all => {},
seen_in_process => {},
hashed => {},
serializer => $opts{serializer} || 'storable',
root_dir => $opts{root_directory},
shard => $opts{shard},
genealogy => $opts{genealogy},
storage => undef,
}, $class;
if ($self->{root_dir}) {
my $storage_base = File::Spec::Functions::catdir($self->{root_dir}, '__source__');
$self->{storage} = Devel::StatProfiler::EvalSourceStorage->new(
base_dir => $storage_base,
),
}
check_serializer($self->{serializer});
return $self;
}
my $HOLE = "\x00" x 20;
sub add_sources_from_reader {
my ($self, $r) = @_;
my ($process_id, $process_ordinal) = @{$r->get_genealogy_info};
my $source_code = $r->get_source_code;
for my $name (keys %$source_code) {
my $hash = utf8_sha1($source_code->{$name});
warn "Duplicate eval STRING source code for eval '$name'"
if exists $self->{seen_in_process}{$process_id}{$name} &&
$self->{seen_in_process}{$process_id}{$name} ne $hash;
$self->{seen_in_process}{$process_id}{$name} = $hash;
$self->{all}{$process_id}{$process_ordinal}{sparse}{$name} = $hash;
$self->{hashed}{$hash} = $source_code->{$name};
}
}
sub update_genealogy {
my ($self, $process_id, $process_ordinal, $parent_id, $parent_ordinal) = @_;
$self->{genealogy}{$process_id}{$process_ordinal} = [$parent_id, $parent_ordinal];
}
# this tries to optimize for the case where we dumped all evals, the number
# of evals is unlikely to be an issue when we only dump traced ones
sub _pack_data {
my ($self) = @_;
my $all = $self->{all};
for my $process_id (keys %$all) {
ORDINAL: for my $ordinal (keys %{$all->{$process_id}}) {
my $first = $all->{$process_id}{$ordinal}{first};
my $next = $first && $first + length($all->{$process_id}{$ordinal}{packed}) / 20;
# files are processed in sequential order, and either we have all the
# evals handed to us in order, or we have holes in the sequence
# (depending on save_source mode)
next if $first && !exists $all->{$process_id}{$ordinal}{sparse}{"(eval $next)"};
my @indices = sort { $a <=> $b }
map { /^\(eval ([0-9]+)\)$/ ? ($1) : () }
keys %{$all->{$process_id}{$ordinal}{sparse}};
my $curr;
if (!$first) {
for my $index (@indices) {
if (!$first) {
$first = $index;
$next = $first + 1;
} elsif ($next == $index) {
++$next
} else {
# not contiguous, bail out
next ORDINAL;
}
}
$all->{$process_id}{$ordinal}{first} = $curr = $first;
} else {
$curr = $next;
}
for my $name (@indices) {
my $hash = delete $all->{$process_id}{$ordinal}{sparse}{"(eval $curr)"};
$all->{$process_id}{$ordinal}{packed} .= $hash;
++$curr;
}
}
}
}
sub _save {
my ($self, $state_dir, $is_part) = @_;
$state_dir //= state_dir($self);
$self->_pack_data;
# $self->{seen_in_process} can be reconstructed from $self->{all}
write_data_any($is_part, $self, $state_dir, 'source', $self->{all})
if %{$self->{all}};
( run in 1.735 second using v1.01-cache-2.11-cpan-39bf76dae61 )