AI-NeuralNet-SOM

 view release on metacpan or  search on metacpan

Changes  view on Meta::CPAN

0.06  Fri May 23 10:23:29 CEST 2008
	- fix: label '0' in label method (tom fawcett)
	- fix: value '0' in value method (rho)

0.05  Mi 16. Jan 20:58:19 CET 2008
	- improvement of documentation
	- training now holds sigma and l constant during an epoch, but applies ALL vectors (exactly once)

0.04  17. Jun CEST 2007
	- added labels get/set
	- added mean_error function

0.03  Do 14. Jun 21:07:54 CEST 2007
	- added output_dim method
	- added ::Torus subclass of ::Rect

0.02  Sa 9. Jun 17:55:23 CEST 2007
	- split ::SOM.pm into ::SOM::Rect and ::SOM::Hexa
	- added more features for initialization
	- factored out vector computation into ::SOM::Utils

examples/eigenvector_initialization.pl  view on Meta::CPAN


my $epsilon = 0.001;
my $epochs  = 400;

{ # random initialisation
    my $nn = new AI::NeuralNet::SOM::Rect (output_dim => "5x6",
					   input_dim  => $dim);

    $nn->initialize;                                      # random
    my @mes = $nn->train ($epochs, @vs);
    warn "random:   length until error is < $epsilon ". scalar (grep { $_ >= $epsilon } @mes);
}

{ # constant initialisation
    my $nn = new AI::NeuralNet::SOM::Rect (output_dim => "5x6",
					   input_dim  => $dim);

    $nn->initialize ($vs[-1]);
    my @mes = $nn->train ($epochs, @vs);
    warn "constant: length until error is < $epsilon ". scalar (grep { $_ >= $epsilon } @mes);
}

{ # eigenvector initialisation
    my $nn = new AI::NeuralNet::SOM::Rect (output_dim => "5x6",
					   input_dim  => $dim);
    my @training_vectors;                                                # find these training vectors 
    {                                                                    # and prime them with this eigenvector stuff;
	use PDL;
	my $A = pdl \@vs;

examples/eigenvector_initialization.pl  view on Meta::CPAN

}

	for (@es_idx) {                                                  # from the highest values downwards, take the index
	    push @training_vectors, [ list $E->dice($_) ] ;              # get the corresponding vector
	}
    }

    $nn->initialize (@training_vectors[0..0]);                           # take only the biggest ones (the eigenvalues are big, actually)
#warn $nn->as_string;
    my @mes = $nn->train ($epochs, @vs);
    warn "eigen:    length until error is < $epsilon ". scalar (grep { $_ >= $epsilon } @mes);
}

__END__

examples/load_save.pl  view on Meta::CPAN



#my @vs  = ([1,-0.5],    [0,1]);
#my $dim = 2;

my $epsilon = 0.001;


    $nn->initialize;                                      # random
    my @mes = $nn->train ($epochs, @vs);
    warn "random:   length until error is < $epsilon ". scalar (grep { $_ >= $epsilon } @mes);
}

{ # constant initialisation
    my $nn = new AI::NeuralNet::SOM::Rect (output_dim => "5x6",
					   input_dim  => $dim);

    $nn->initialize ($vs[-1]);
    my @mes = $nn->train ($epochs, @vs);
    warn "constant: length until error is < $epsilon ". scalar (grep { $_ >= $epsilon } @mes);
}

{ # eigenvector initialisation
    my $nn = new AI::NeuralNet::SOM::Rect (output_dim => "5x6",
					   input_dim  => $dim);
    my @training_vectors;                                                # find these training vectors 
    {                                                                    # and prime them with this eigenvector stuff;
	use PDL;
	my $A = pdl \@vs;

examples/load_save.pl  view on Meta::CPAN

}

	for (@es_idx) {                                                  # from the highest values downwards, take the index
	    push @training_vectors, [ list $E->dice($_) ] ;              # get the corresponding vector
	}
    }

    $nn->initialize (@training_vectors[0..0]);                           # take only the biggest ones (the eigenvalues are big, actually)
#warn $nn->as_string;
    my @mes = $nn->train ($epochs, @vs);
    warn "eigen:    length until error is < $epsilon ". scalar (grep { $_ >= $epsilon } @mes);
}

__END__

lib/AI/NeuralNet/SOM.pm  view on Meta::CPAN


  use AI::NeuralNet::SOM::Rect;
  my $nn = new AI::NeuralNet::SOM::Rect (output_dim => "5x6",
                                         input_dim  => 3);
  $nn->initialize;
  $nn->train (30, 
    [ 3, 2, 4 ], 
    [ -1, -1, -1 ],
    [ 0, 4, -3]);

  my @mes = $nn->train (30, ...);      # learn about the smallest errors
                                       # during training

  print $nn->as_data;                  # dump the raw data
  print $nn->as_string;                # prepare a somehow formatted string

  use AI::NeuralNet::SOM::Torus;
  # similar to above

  use AI::NeuralNet::SOM::Hexa;
  my $nn = new AI::NeuralNet::SOM::Hexa (output_dim => 6,

lib/AI/NeuralNet/SOM.pm  view on Meta::CPAN


=cut

sub train {
    my $self   = shift;
    my $epochs = shift || 1;
    die "no data to learn" unless @_;

    $self->{LAMBDA} = $epochs / log ($self->{_Sigma0});                                 # educated guess?

    my @mes    = ();                                                                    # this will contain the errors during the epochs
    for my $epoch (1..$epochs)  {
	$self->{T} = $epoch;
	my $sigma = $self->{_Sigma0} * exp ( - $self->{T} / $self->{LAMBDA} );          # compute current radius
	my $l     = $self->{_L0}     * exp ( - $self->{T} / $epochs );                  # current learning rate

	my @veggies = @_;                                                               # make a local copy, that will be destroyed in the loop
	while (@veggies) {
	    my $sample = splice @veggies, int (rand (scalar @veggies) ), 1;             # find (and take out)

	    my @bmu = $self->bmu ($sample);                                             # find the best matching unit

lib/AI/NeuralNet/SOM.pm  view on Meta::CPAN


This method finds the I<best matching unit>, i.e. that neuron which is closest to the vector passed
in. The method returns the coordinates and the actual distance.

=cut

sub bmu { die; }

=pod

=item I<mean_error>

I<$me> = I<$nn>->mean_error (I<@vectors>)

This method takes a number of vectors and produces the I<mean distance>, i.e. the average I<error>
which the SOM makes when finding the C<bmu>s for the vectors. At least one vector must be passed in.

Obviously, the longer you let your SOM be trained, the smaller the error should become.

=cut
    
sub mean_error {
    my $self = shift;
    my $error = 0;
    map { $error += $_ }                    # then add them all up
        map { ( $self->bmu($_) )[2] }       # then find the distance
           @_;                              # take all data vectors
    return ($error / scalar @_);            # return the mean value
}

=pod

=item I<neighbors>

I<$ns> = I<$nn>->neighbors (I<$sigma>, I<$x>, I<$y>)

Finds all neighbors of (X, Y) with a distance smaller than SIGMA. Returns a list reference of (X, Y,
distance) triples.

t/som.t  view on Meta::CPAN

    is ($nn->label ( 1, 0), undef, 'label set/get');
}

{
    my $nn = new AI::NeuralNet::SOM::Rect (output_dim => "5x6",
					   input_dim  => 3);
    $nn->initialize;

    my @vs = ([ 3, 2, 4 ], [ -1, -1, -1 ], [ 0, 4, -3]);

    my $me = $nn->mean_error (@vs);
    for (1 .. 40) {
	$nn->train (50, @vs);
	ok ($me >= $nn->mean_error (@vs), 'mean error getting smaller');
	$me = $nn->mean_error (@vs);
#	warn $me;
    }

    foreach (1..3) {
	my @mes = $nn->train (20, @vs);
	is (scalar @mes, 3 * 20, 'errors while training, nr');
	ok ((!grep { $_ > 10 * $me } @mes), 'errors while training, none significantly bigger');
    }
}

__END__

# randomized pick
    @vectors = ...;
my $get = sub {
    return @vectors [ int (rand (scalar @vectors) ) ];
    



( run in 0.581 second using v1.01-cache-2.11-cpan-49f99fa48dc )