AI-NeuralNet-Simple

 view release on metacpan or  search on metacpan

META.yml  view on Meta::CPAN

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
--- #YAML:1.0
name:                AI-NeuralNet-Simple
version:             0.11
abstract:            An easy to use backprop neural net.
license:             perl
generated_by:        ExtUtils::MakeMaker version 6.31
distribution_type:   module
requires:    
    Log::Agent:                    0.208
    Sub::Uplevel:                  0
    Test::Exception:               0.15
    Test::More:                    0.48_01
meta-spec:
    version: 1.2
author:
    - Curtis "Ovid" Poe <ovid@cpan.org>

README  view on Meta::CPAN

7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
To install this module type the following:
 
   perl Makefile.PL
   make
   make test
   make install
 
DEPENDENCIES
 
This module requires these other modules and libraries:
 
  Inline::C
 
COPYRIGHT AND LICENCE
 
Copyright (C) 2003 Curtis "Ovid" Poe <eop_divo_sitruc@yahoo.com>
 
Reverse "eop_divo_sitruc" to send me email.
 
This library is free software; you can redistribute it and/or modify

Simple.xs  view on Meta::CPAN

413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
        av_store(av, i, build_rv(av2));
    }
 
    return build_rv(av);
}
 
#define EXPORT_VERSION    1
#define EXPORTED_ITEMS    9
 
/*
 * Exports the C data structures to the Perl world for serialization
 * by Storable.  We don't want to duplicate the logic of Storable here
 * even though we have to do some low-level Perl object construction.
 *
 * The structure we return is an array reference, which contains the
 * following items:
 *
 *  0    the export version number, in case format changes later
 *  1    the amount of neurons in the input layer
 *  2    the amount of neurons in the hidden layer
 *  3    the amount of neurons in the output layer

Simple.xs  view on Meta::CPAN

866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
    free(input);
    free(output);
 
    return max_error;
}
 
SV* c_infer(int handle, SV *array_ref)
{
    NEURAL_NETWORK *n = c_get_network(handle);
    int    i;
    AV     *perl_array, *result = newAV();
 
    /* feed the data */
    perl_array = get_array(array_ref);
 
    for (i = 0; i < n->size.input; i++)
        n->tmp[i] = get_float_element(perl_array, i);
 
    c_feed(n, n->tmp, NULL, 0);
 
    /* read the results */
    for (i = 0; i < n->size.output; i++) {
        av_push(result, newSVnv(n->neuron.output[i]));
    }
    return newRV_noinc((SV*) result);
}
 
void c_feed(NEURAL_NETWORK *n, double *input, double *output, int learn)
{
    int i;
 
    for (i=0; i < n->size.input; i++) {
        n->neuron.input[i]  = input[i];
    }

Simple.xs  view on Meta::CPAN

904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
    c_feed_forward(n);
 
    if (learn) c_back_propagate(n);
}
 
/*
 *  The original author of this code is M. Tim Jones <mtj@cogitollc.com> and
 *  written for the book "AI Application Programming", by Charles River Media.
 *
 *  It's been so heavily modified that it bears little resemblance to the
 *  original, but credit should be given where credit is due.  Therefore ...
 *
 *  Copyright (c) 2003 Charles River Media.  All rights reserved.
 *
 *  Redistribution and use in source and binary forms, with or without
 *  modification, is hereby granted without fee provided that the following
 *  conditions are met:
 *
 *    1.  Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.  2.
 *    Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.  3.

examples/game_ai.pl  view on Meta::CPAN

49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
    [POOR,     NO,   NO, 1],  HIDE,
    [POOR,     NO,   NO, 0],  WANDER,
    [POOR,    YES,   NO, 0],  WANDER,
]);
 
 
my $format = "%8s %5s %3s %7s %6s\n";
my @actions = qw/attack run wander hide/;
 
printf $format, qw/Health Knife Gun Enemies Action/;
display_result($net,2,1,1,1);
display_result($net,2,0,0,2);
display_result($net,2,0,1,2);
display_result($net,2,0,1,3);
display_result($net,1,1,0,0);
display_result($net,1,0,1,2);
display_result($net,0,1,0,3);
 
while (1) {
    print "Type 'quit' to exit\n";
    my $health  = prompt("Am I in poor, average, or good health? ", qr/^(?i:[pag])/);
    my $knife   = prompt("Do I have a knife? ", qr/^(?i:[yn])/);
    my $gun     = prompt("Do I have a gun? ", qr/^(?i:[yn])/);
    my $enemies = prompt("How many enemies can I see? ", qr/^\d+$/);
     
    $health = substr $health, 0, 1;
    $health =~ tr/pag/012/;

examples/game_ai.pl  view on Meta::CPAN

80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
    printf "I think I will %s!\n\n", $actions[$net->winner([
        $health,
        $knife,
        $gun,
        $enemies])];
}
 
sub prompt
{
    my ($message,$domain) = @_;
    my $valid_response = 0;
    my $response;
    do {
        print $message;
        chomp($response = <STDIN>);
        exit if substr(lc $response, 0, 1) eq 'q';
        $valid_response = $response =~ /$domain/;
    } until $valid_response;
    return $response;
}
 
sub display_result
{
    my ($net,@data) = @_;
    my $result      = $net->winner(\@data);
    my @health      = qw/Poor Average Good/;
    my @knife       = qw/No Yes/;
    my @gun         = qw/No Yes/;
    printf $format,
        $health[$_[1]],
        $knife[$_[2]],
        $gun[$_[3]],
        $_[4],             # number of enemies
        $actions[$result];
}

lib/AI/NeuralNet/Simple.pm  view on Meta::CPAN

158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
      $net->train([0,1],[0,1]);
      $net->train([0,0],[1,0]);
  }
  printf "Answer: %d\n",   $net->winner([1,1]);
  printf "Answer: %d\n",   $net->winner([1,0]);
  printf "Answer: %d\n",   $net->winner([0,1]);
  printf "Answer: %d\n\n", $net->winner([0,0]);
 
=head1 ABSTRACT
 
  This module is a simple neural net designed for those who have an interest
  in artificial intelligence but need a "gentle" introduction.  This is not
  intended to replace any of the neural net modules currently available on the
  CPAN.
 
=head1 DESCRIPTION
 
=head2 The Disclaimer
 
Please note that the following information is terribly incomplete.  That's
deliberate.  Anyone familiar with neural networks is going to laugh themselves
silly at how simplistic the following information is and the astute reader will
notice that I've raised far more questions than I've answered.
 
So why am I doing this?  Because I'm giving I<just enough> information for
someone new to neural networks to have enough of an idea of what's going on so
they can actually use this module and then move on to something more powerful,
if interested.
 
=head2 The Biology
 
A neural network, at its simplest, is merely an attempt to mimic nature's
"design" of a brain.  Like many successful ventures in the field of artificial
intelligence, we find that blatantly ripping off natural designs has allowed us
to solve many problems that otherwise might prove intractable.  Fortunately,
Mother Nature has not chosen to apply for patents.
 
Our brains are comprised of neurons connected to one another by axons.  The
axon makes the actual connection to a neuron via a synapse.  When neurons
receive information, they process it and feed this information to other neurons
who in turn process the information and send it further until eventually
commands are sent to various parts of the body and muscles twitch, emotions are
felt and we start eyeing our neighbor's popcorn in the movie theater, wondering

lib/AI/NeuralNet/Simple.pm  view on Meta::CPAN

240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
cannot be used with the type of network that C<AI::NeuralNet::Simple> employs.
This module uses the sigmoid activation function.  (More information about
these can be found by reading the information in the L<SEE ALSO> section or by
just searching with Google.)
 
Once the activation function is applied, the output is then sent through the
next synapse, where it will be multiplied by w4 and the process will continue.
 
=head2 C<AI::NeuralNet::Simple> architecture
 
The architecture used by this module has (at present) 3 fixed layers of
neurons: an input, hidden, and output layer.  In practice, a 3 layer network is
applicable to many problems for which a neural network is appropriate, but this
is not always the case.  In this module, we've settled on a fixed 3 layer
network for simplicity.
 
Here's how a three layer network might learn "logical or".  First, we need to
determine how many inputs and outputs we'll have.  The inputs are simple, we'll
choose two inputs as this is the minimum necessary to teach a network this
concept.  For the outputs, we'll also choose two neurons, with the neuron with
the highest output value being the "true" or "false" response that we are
looking for.  We'll only have one neuron for the hidden layer.  Thus, we get a
network that resembles the following:
 
           Input   Hidden   Output
 
 input1  ----> n1 -+    +----> n4 --->  output1
                    \  /
                     n3
                    /  \
 input2  ----> n2 -+    +----> n5 --->  output2
 
Let's say that output 1 will correspond to "false" and output 2 will correspond
to true.  If we feed 1 (or true) or both input 1 and input 2, we hope that output
2 will be true and output 1 will be false.  The following table should illustrate
the expected results:
 
 input   output
 1   2   1    2
 -----   ------
 1   1   0    1
 1   0   0    1
 0   1   0    1
 0   0   1    0
 
The type of network we use is a forward-feed back error propagation network,
referred to as a back-propagation network, for short.  The way it works is
simple.  When we feed in our input, it travels from the input to hidden layers
and then to the output layers.  This is the "feed forward" part.  We then
compare the output to the expected results and measure how far off we are.  We
then adjust the weights on the "output to hidden" synapses, measure the error
on the hidden nodes and then adjust the weights on the "hidden to input"
synapses.  This is what is referred to as "back error propagation".
 
We continue this process until the amount of error is small enough that we are
satisfied.  In reality, we will rarely if ever get precise results from the
network, but we learn various strategies to interpret the results.  In the
example above, we use a "winner takes all" strategy.  Which ever of the output
nodes has the greatest value will be the "winner", and thus the answer.
 
In the examples directory, you will find a program named "logical_or.pl" which
demonstrates the above process.
 
=head2 Building a network
 
In creating a new neural network, there are three basic steps:

lib/AI/NeuralNet/Simple.pm  view on Meta::CPAN

313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
With more complete neural net packages, you can also pick which activation
functions you wish to use and the "learn rate" of the neurons.
 
=item 2 Training
 
This involves feeding the neural network enough data until the error rate is
low enough to be acceptable.  Often we have a large data set and merely keep
iterating until the desired error rate is achieved.
 
=item 3 Measuring results
 
One frequent mistake made with neural networks is failing to test the network
with different data from the training data.  It's quite possible for a
backpropagation network to hit what is known as a "local minimum" which is not
truly where it should be.  This will cause false results.  To check for this,
after training we often feed in other known good data for verification.  If the
results are not satisfactory, perhaps a different number of neurons per layer
should be tried or a different set of training data should be supplied.
 
=back
 
=head1 Programming C<AI::NeuralNet::Simple>
 
=head2 C<new($input, $hidden, $output)>
 
C<new()> accepts three integers.  These number represent the number of nodes in
the input, hidden, and output layers, respectively.  To create the "logical or"
network described earlier:
 
  my $net = AI::NeuralNet::Simple->new(2,1,2);
 
By default, the activation function for the neurons is the sigmoid function
S() with delta = 1:
 
        S(x) = 1 / (1 + exp(-delta * x))
 
but you can change the delta after creation.  You can also use a bipolar

lib/AI/NeuralNet/Simple.pm  view on Meta::CPAN

369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
Returns whether the network currently uses a bipolar activation function.
If an argument is supplied, instruct the network to use a bipolar activation
function or not.
 
You should not change the activation function during the traning.
 
=head2 C<train(\@input, \@output)>
 
This method trains the network to associate the input data set with the output
data set.  Representing the "logical or" is as follows:
 
  $net->train([1,1] => [0,1]);
  $net->train([1,0] => [0,1]);
  $net->train([0,1] => [0,1]);
  $net->train([0,0] => [1,0]);
 
Note that a one pass through the data is seldom sufficient to train a network.
In the example "logical or" program, we actually run this data through the
network ten thousand times.
 
  for (1 .. 10000) {
    $net->train([1,1] => [0,1]);
    $net->train([1,0] => [0,1]);
    $net->train([0,1] => [0,1]);
    $net->train([0,0] => [1,0]);
  }
 
The routine returns the Mean Squared Error (MSE) representing how far the
network answered.
 
It is far preferable to use C<train_set()> as this lets you control the MSE
over the training set and it is more efficient because there are less memory
copies back and forth.
 
=head2 C<train_set(\@dataset, [$iterations, $mse])>
 
Similar to train, this method allows us to train an entire data set at once.
It is typically faster than calling individual "train" methods.  The first

lib/AI/NeuralNet/Simple.pm  view on Meta::CPAN

460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
      ->train_set(\@training_data);
 
If you choose a lower learning rate, you will train the network slower, but you
may get a better accuracy.  A higher learning rate will train the network
faster, but it can have a tendancy to "overshoot" the answer when learning and
not learn as accurately.
 
=head2 C<infer(\@input)>
 
This method, if provided with an input array reference, will return an array
reference corresponding to the output values that it is guessing.  Note that
these values will generally be close, but not exact.  For example, with the
"logical or" program, you might expect results similar to:
 
  use Data::Dumper;
  print Dumper $net->infer([1,1]);
   
  $VAR1 = [
          '0.00993729281477686',
          '0.990100297418451'
        ];
 
That clearly has the second output item being close to 1, so as a helper method
for use with a winner take all strategy, we have ...
 
=head2 C<winner(\@input)>
 
This method returns the index of the highest value from inferred results:
 
  print $net->winner([1,1]); # will likely print "1"
 
For a more comprehensive example of how this is used, see the
"examples/game_ai.pl" program.
 
=head1 EXPORT
 
None by default.

lib/AI/NeuralNet/Simple.pm  view on Meta::CPAN

535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
copyright (c) 1990 by Massachussetts Institute of Technology.
 
This book is a decent introduction to neural networks in general.  The forward
feed back error propogation is but one of many types.
 
=head1 AUTHORS
 
Curtis "Ovid" Poe, C<ovid [at] cpan [dot] org>
 
Multiple network support, persistence, export of MSE (mean squared error),
training until MSE below a given threshold and customization of the
activation function added by Raphael Manfredi C<Raphael_Manfredi@pobox.com>.
 
=head1 COPYRIGHT AND LICENSE
 
Copyright (c) 2003-2005 by Curtis "Ovid" Poe
 
Copyright (c) 2006 by Raphael Manfredi
 
This library is free software; you can redistribute it and/or modify
it under the same terms as Perl itself.

t/10nn_simple.t  view on Meta::CPAN

38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
ok($net->train([1,1], [0,1]), 'Calling train() with valid data should succeed');
for (1 .. 10000) {
    $net->train([1,1],[0,1]);
    $net->train([1,0],[0,1]);
    $net->train([0,1],[0,1]);
    $net->train([0,0],[1,0]);
}
 
can_ok($net, 'winner');
is($net->winner([1,1]), 1, '... and it should return the index of the highest valued result');
is($net->winner([1,0]), 1, '... and it should return the index of the highest valued result');
is($net->winner([0,1]), 1, '... and it should return the index of the highest valued result');
is($net->winner([0,0]), 0, '... and it should return the index of the highest valued result');
 
# teach the network logical 'and' using the tanh() activation with delta=2
$net = $CLASS->new(2,1,2);
$net->delta(2);
$net->use_bipolar(1);
my $mse = $net->train_set([
        [1,1] => [0,1],
        [1,0] => [1,0],
        [0,1] => [1,0],
        [0,0] => [1,0],

t/30nn_storable.t  view on Meta::CPAN

15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
my $net = $CLASS->new(2,1,2);
$net->delta(2);
$net->use_bipolar(5);
for (1 .. 10000) {
    $net->train([1,1],[0,1]);
    $net->train([1,0],[0,1]);
    $net->train([0,1],[0,1]);
    $net->train([0,0],[1,0]);
}
 
is($net->winner([1,1]), 1, '... and it should return the index of the highest valued result');
is($net->winner([1,0]), 1, '... and it should return the index of the highest valued result');
is($net->winner([0,1]), 1, '... and it should return the index of the highest valued result');
is($net->winner([0,0]), 0, '... and it should return the index of the highest valued result');
 
ok(store($net, "t/store"), "store() succeeds");
$net = undef;
 
$net = retrieve("t/store");
ok($net, "retrieve() succeeds");
unlink 't/store';
can_ok($net, 'learn_rate');
is($net->delta, 2, 'properly restored value of delta');
is($net->use_bipolar, 5, 'properly restored value of use_bipolar');
 
is($net->winner([1,1]), 1, '... and it should return the index of the highest valued result');
is($net->winner([1,0]), 1, '... and it should return the index of the highest valued result');
is($net->winner([0,1]), 1, '... and it should return the index of the highest valued result');
is($net->winner([0,0]), 0, '... and it should return the index of the highest valued result');



( run in 0.905 second using v1.01-cache-2.11-cpan-87723dcf8b7 )