AI-NeuralNet-SOM
view release on metacpan or search on metacpan
examples/eigenvector_initialization.pl view on Meta::CPAN
input_dim => $dim);
my @training_vectors; # find these training vectors
{ # and prime them with this eigenvector stuff;
use PDL;
my $A = pdl \@vs;
while ($A->getdim(0) < $A->getdim(1)) { # make the beast quadratic
$A = append ($A, zeroes (1)); # by padding zeroes
}
my ($E, $e) = eigens_sym $A;
# print $E;
# print $e;
my @es = list $e; # eigenvalues
# warn "es : ".Dumper \@es;
my @es_desc = sort { $b <=> $a } @es; # eigenvalues sorted desc
# warn "desc: ".Dumper \@es_desc;
my @es_idx = map { _find_num ($_, \@es) } @es_desc; # eigenvalue indices sorted by eigenvalue (desc)
# warn "idx: ".Dumper \@es_idx;
sub _find_num {
my $v = shift;
examples/load_save.pl view on Meta::CPAN
input_dim => $dim);
my @training_vectors; # find these training vectors
{ # and prime them with this eigenvector stuff;
use PDL;
my $A = pdl \@vs;
while ($A->getdim(0) < $A->getdim(1)) { # make the beast quadratic
$A = append ($A, zeroes (1)); # by padding zeroes
}
my ($E, $e) = eigens_sym $A;
# print $E;
# print $e;
my @es = list $e; # eigenvalues
# warn "es : ".Dumper \@es;
my @es_desc = sort { $b <=> $a } @es; # eigenvalues sorted desc
# warn "desc: ".Dumper \@es_desc;
my @es_idx = map { _find_num ($_, \@es) } @es_desc; # eigenvalue indices sorted by eigenvalue (desc)
# warn "idx: ".Dumper \@es_idx;
sub _find_num {
my $v = shift;
lib/AI/NeuralNet/SOM.pm view on Meta::CPAN
input_dim => 3);
$nn->initialize;
$nn->train (30,
[ 3, 2, 4 ],
[ -1, -1, -1 ],
[ 0, 4, -3]);
my @mes = $nn->train (30, ...); # learn about the smallest errors
# during training
print $nn->as_data; # dump the raw data
print $nn->as_string; # prepare a somehow formatted string
use AI::NeuralNet::SOM::Torus;
# similar to above
use AI::NeuralNet::SOM::Hexa;
my $nn = new AI::NeuralNet::SOM::Hexa (output_dim => 6,
input_dim => 4);
$nn->initialize ( [ 0, 0, 0, 0 ] ); # all get this value
$nn->value (3, 2, [ 1, 1, 1, 1 ]); # change value for a neuron
print $nn->value (3, 2);
$nn->label (3, 2, 'Danger'); # add a label to the neuron
print $nn->label (3, 2);
=head1 DESCRIPTION
This package is a stripped down implementation of the Kohonen Maps
(self organizing maps). It is B<NOT> meant as demonstration or for use
together with some visualisation software. And while it is not (yet)
optimized for speed, some consideration has been given that it is not
overly slow.
lib/AI/NeuralNet/SOM.pm view on Meta::CPAN
my $self = shift;
my ($x, $y) = (shift, shift);
my $l = shift;
return defined $l ? $self->{labels}->[$x]->[$y] = $l : $self->{labels}->[$x]->[$y];
}
=pod
=item I<as_string>
print I<$nn>->as_string
This methods creates a pretty-print version of the current vectors.
=cut
sub as_string { die; }
=pod
=item I<as_data>
print I<$nn>->as_data
This methods creates a string containing the raw vector data, row by
row. This can be fed into gnuplot, for instance.
=cut
sub as_data { die; }
=pod
lib/AI/NeuralNet/SOM/Hexa.pm view on Meta::CPAN
<0,3> <1,2> <2,1> <3,0>
...............................
=item I<as_string>
Not implemented.
=cut
## TODO: pretty printing of this as hexagon ?
sub as_string { die "not implemented"; }
=pod
=item I<as_data>
Not implemented.
=cut
lib/AI/NeuralNet/SOM/Rect.pm view on Meta::CPAN
use AI::NeuralNet::SOM::Rect;
my $nn = new AI::NeuralNet::SOM::Rect (output_dim => "5x6",
input_dim => 3);
$nn->initialize;
$nn->train (30,
[ 3, 2, 4 ],
[ -1, -1, -1 ],
[ 0, 4, -3]);
print $nn->as_data;
=head1 INTERFACE
=head2 Constructor
The constructor takes the following arguments (additionally to those in the base class):
=over
=item C<output_dim> : (mandatory, no default)
lib/AI/NeuralNet/SOM/Rect.pm view on Meta::CPAN
}
=cut
sub as_string {
my $self = shift;
my $s = '';
$s .= " ";
for my $y (0 .. $self->{_Y}-1){
$s .= sprintf (" %02d ",$y);
}
$s .= sprintf "\n","-"x107,"\n";
my $dim = scalar @{ $self->{map}->[0]->[0] };
for my $x (0 .. $self->{_X}-1) {
for my $w ( 0 .. $dim-1 ){
$s .= sprintf ("%02d | ",$x);
for my $y (0 .. $self->{_Y}-1){
$s .= sprintf ("% 2.2f ", $self->{map}->[$x]->[$y]->[$w]);
}
$s .= sprintf "\n";
}
$s .= sprintf "\n";
}
return $s;
}
=pod
=item I<as_data>
print I<$nn>->as_data
This methods creates a string containing the raw vector data, row by
row. This can be fed into gnuplot, for instance.
=cut
sub as_data {
my $self = shift;
my $s = '';
my $dim = scalar @{ $self->{map}->[0]->[0] };
for my $x (0 .. $self->{_X}-1) {
for my $y (0 .. $self->{_Y}-1){
for my $w ( 0 .. $dim-1 ){
$s .= sprintf ("\t%f", $self->{map}->[$x]->[$y]->[$w]);
}
$s .= sprintf "\n";
}
}
return $s;
}
=pod
=back
lib/AI/NeuralNet/SOM/Torus.pm view on Meta::CPAN
use AI::NeuralNet::SOM::Torus;
my $nn = new AI::NeuralNet::SOM::Torus (output_dim => "5x6",
input_dim => 3);
$nn->initialize;
$nn->train (30,
[ 3, 2, 4 ],
[ -1, -1, -1 ],
[ 0, 4, -3]);
print $nn->as_data;
=head1 DESCRIPTION
This SOM is very similar to that with a rectangular topology, except that the rectangle is connected
on the top edge and the bottom edge to first form a cylinder; and that cylinder is then formed into
a torus by connecting the rectangle's left and right border (L<http://en.wikipedia.org/wiki/Torus>).
=head1 INTERFACE
It exposes the same interface as the base class.
#########################
# change 'tests => 1' to 'tests => last_test_to_print';
use Test::More qw(no_plan);
BEGIN { use_ok('AI::NeuralNet::SOM::Hexa') };
######
use Data::Dumper;
{
my $nn = new AI::NeuralNet::SOM::Hexa (output_dim => 6,
#########################
# change 'tests => 1' to 'tests => last_test_to_print';
use Test::More qw(no_plan);
BEGIN { use_ok('AI::NeuralNet::SOM::Rect') };
######
use Data::Dumper;
{
my $nn = new AI::NeuralNet::SOM::Rect (output_dim => "5x6",
input_dim => 3);
is ($nn->{_Y}, 6, 'Y');
is ($nn->{_Z}, 3, 'Z');
is ($nn->radius, 2.5, 'radius');
is ($nn->output_dim, "5x6", 'output dim');
}
{
my $nn = new AI::NeuralNet::SOM::Rect (output_dim => "5x6",
input_dim => 3);
$nn->initialize;
# print Dumper $nn;
# exit;
my @vs = ([ 3, 2, 4 ], [ -1, -1, -1 ], [ 0, 4, -3]);
$nn->train (400, @vs);
foreach my $v (@vs) {
ok (_find ($v, $nn->map), 'found learned vector '. join (",", @$v));
}
sub _find {
use AI::NeuralNet::SOM::Utils;
foreach my $x ( 0 .. 4 ) {
foreach my $y ( 0 .. 5 ) {
return 1 if AI::NeuralNet::SOM::Utils::vector_distance ($m->[$x]->[$y], $v) < 0.01;
}
}
return 0;
}
ok ($nn->as_string, 'pretty print');
ok ($nn->as_data, 'raw format');
# print $nn->as_string;
}
{
my $nn = new AI::NeuralNet::SOM::Rect (output_dim => "5x6",
input_dim => 3);
$nn->initialize;
foreach my $x (0 .. 5 -1) {
foreach my $y (0 .. 6 -1 ) {
ok ( (!grep { $_ > 0.5 || $_ < -0.5 } @{ $nn->value ( $x, $y ) }) , "$x, $y: random vectors in [-0.5, 0.5]");
#########################
# Change 'tests => 1' to 'tests => last_test_to_print';
use Test::More qw(no_plan);
BEGIN { use_ok('AI::NeuralNet::SOM') };
######
use Data::Dumper;
{
use AI::NeuralNet::SOM::Rect; # any non-abstract subclass should do
my $nn = new AI::NeuralNet::SOM::Rect (output_dim => "5x6",
#########################
# change 'tests => 1' to 'tests => last_test_to_print';
use Test::More qw(no_plan);
BEGIN { use_ok('AI::NeuralNet::SOM::Torus') };
######
use Data::Dumper;
{
my $nn = new AI::NeuralNet::SOM::Torus (output_dim => "5x6",
input_dim => 3);
[ 3, 2, '0' ],
[ 3, 3, '1' ],
[ 4, 2, '1' ]
]), 'neighbors 4+1');
}
{
my $nn = new AI::NeuralNet::SOM::Torus (output_dim => "5x6",
input_dim => 3);
$nn->initialize;
# print Dumper $nn;
# exit;
my @vs = ([ 3, 2, 4 ], [ -1, -1, -1 ], [ 0, 4, -3]);
$nn->train (400, @vs);
foreach my $v (@vs) {
ok (_find ($v, $nn->map), 'found learned vector '. join (",", @$v));
}
sub _find {
use AI::NeuralNet::SOM::Utils;
foreach my $x ( 0 .. 4 ) {
foreach my $y ( 0 .. 5 ) {
return 1 if AI::NeuralNet::SOM::Utils::vector_distance ($m->[$x]->[$y], $v) < 0.01;
}
}
return 0;
}
ok ($nn->as_string, 'pretty print');
ok ($nn->as_data, 'raw format');
# print $nn->as_string;
}
__END__
# randomized pick
@vectors = ...;
my $get = sub {
return @vectors [ int (rand (scalar @vectors) ) ];
}
( run in 0.960 second using v1.01-cache-2.11-cpan-de7293f3b23 )