AC-Yenta
view release on metacpan or search on metacpan
--- #YAML:1.0
name: AC-Yenta
version: 1.1
abstract: eventually-consistent distributed key/value data store. et al.
author:
- AdCopy <http://www.adcopy.com>
license: perl
distribution_type: module
configure_requires:
ExtUtils::MakeMaker: 0
requires:
AC::DC: 0
BerkeleyDB: 0
Crypt::Rijndael: 0
Digest::SHA: 0
Google::ProtocolBuffers: 0
JSON: 0
POSIX: 0
Sys::Hostname: 0
lib/AC/Yenta.pm view on Meta::CPAN
=head2 Kibitzing
Each yenta kibitzes (gossips) with the other yentas in the network
to exchange status information, distribute key-value data, and
detect and correct inconsistent data.
=head2 Eventual Consistency
Key-value data is versioned with timestamps. By default, newest wins.
Maps can be configured to keep and return multiple versions and client
code can use other conflict resolution mechanisms.
Lost, missing or otherwise inconsistent data is detected
by kibitzing merkle tree hash values.
=head2 Topological awareness
Yentas can take network topology into account when tranferring
data around to minimize long-distance transfers. You will need to
write a custom C<MySelf> class with a C<my_datacenter> function.
lib/AC/Yenta.pm view on Meta::CPAN
syslog local5
=item debug
enable debugging for a particular section
debug map
=item map
configure a map (a collection of key-value data). you do not need
to configure the same set of maps on all servers. maps should be
configured similarly on all servers that they are on.
map users {
backend bdb
dbfile /home/acdata/users.ydb
history 4
}
=back
=head1 BUGS
lib/AC/Yenta/D.pm view on Meta::CPAN
return bless \$class, $class;
}
sub daemon {
my $me = shift;
my $cfile = shift;
my $opt = shift; # foreground, debugall, persistent_id, argv
die "no config file specified\n" unless $cfile;
# configure
$AC::Yenta::CONF = AC::Yenta::Config->new(
$cfile, onreload => sub {
AC::Yenta::Store::configure();
});
initlog( 'yenta', (conf_value('syslog') || 'local5'), $opt->{debugall} );
AC::Yenta::Debug->init( $opt->{debugall}, $AC::Yenta::CONF);
daemonize(5, 'yentad', $opt->{argv}) unless $opt->{foreground};
verbose("starting.");
$SIG{CHLD} = $SIG{PIPE} = sub{}; # ignore
$SIG{INT} = $SIG{TERM} = $SIG{QUIT} = \&AC::DC::IO::request_exit; # abort
# initialize subsystems
my $port = $opt->{port} || conf_value('port');
AC::Yenta::MySelf->init( $port, $opt->{persistent_id} );
AC::Yenta::Store::configure();
AC::Yenta::Status::init( $port );
AC::Yenta::Monitor::init();
AC::Yenta::NetMon::init();
AC::DC::IO::TCP::Server->new( $port, 'AC::Yenta::Server' );
verbose("server started on tcp/$port");
# start "cronjobs"
AC::DC::Sched->new(
info => 'check config files',
lib/AC/Yenta/Status.pm view on Meta::CPAN
if( $DATA->{allpeer}{$id} ){
# already known
delete $DATA->{sceptical}{$id};
return;
}
debug("rcvd update (sceptical) about $id from $io->{peerip}");
# only accept updates from the server itself
# no 3rd party updates. no misconfigured serevrs.
problem("server misconfigured $id != $io->{peerip}")
unless grep { inet_atoi($io->{peerip}) == $_->{ipv4} } @{$up->{ip}};
$up->{id} = $id;
delete $up->{lastup};
$DATA->{sceptical}{$id} = $up;
}
sub update {
my $class = shift;
my $id = shift; # -> server_id
lib/AC/Yenta/Store.pm view on Meta::CPAN
use AC::Yenta::Store::AE;
use AC::Yenta::Store::Expire;
use strict;
our @EXPORT = qw(store_get store_put store_want store_get_merkle store_get_internal store_set_internal store_expire store_remove);
my %STORE;
# create maps from config
sub configure {
my $maps = $AC::Yenta::CONF->{config}{map};
my %remove = %STORE;
for my $map (keys %{$maps}){
debug("configuring map $map");
my $conf = $maps->{$map};
my $sharded = $conf->{sharded};
my $c = $sharded ? 'AC::Yenta::Store::Sharded' : 'AC::Yenta::Store::Map';
( run in 0.313 second using v1.01-cache-2.11-cpan-3989ada0592 )