Result:
found more than 489 distributions - search limited to the first 2001 files matching your query ( run in 1.029 )


AI-ExpertSystem-Simple

 view release on metacpan or  search on metacpan

lib/AI/ExpertSystem/Simple.pm  view on Meta::CPAN

	$self->{_filename} = undef;

	$self->{_ask_about} = undef;
	$self->{_told_about} = undef;

	$self->{_log} = ();

	$self->{_number_of_rules} = 0;
	$self->{_number_of_attributes} = 0;
	$self->{_number_of_questions} = 0;

lib/AI/ExpertSystem/Simple.pm  view on Meta::CPAN

		$self->{_knowledge}->{$name}->reset();
	}

	$self->{_ask_about} = undef;
	$self->{_told_about} = undef;
	$self->{_log} = ();
}

sub load {
	my ($self, $filename) = @_;

lib/AI/ExpertSystem/Simple.pm  view on Meta::CPAN


		die "Simple->load() XML parse failed: $@" if $@;

		$self->{_filename} = $filename;

		$self->_add_to_log( "Read in $filename" );
		$self->_add_to_log( "There are " . $self->{_number_of_rules} . " rules" );
		$self->_add_to_log( "There are " . $self->{_number_of_attributes} . " attributes" );
		$self->_add_to_log( "There are " . $self->{_number_of_questions} . " questions" );
		$self->_add_to_log( "The goal attibutes is " . $self->{_goal}->name() );
		return 1;
	} else {
		die "Simple->load() unable to use file";
	}
}

lib/AI/ExpertSystem/Simple.pm  view on Meta::CPAN

			foreach my $answer (keys(%old_answers)) {
				my $n = $answer;
				my $v = $old_answers{$answer}->{value};
				my $s = $old_answers{$answer}->{setter};

				$self->_add_to_log( "Setting '$n' to '$v'" );

				$self->{_knowledge}->{$n}->set_value($v,$s);

				foreach my $key (keys(%{$self->{_rules}})) {
					if($self->{_rules}->{$key}->state() eq 'active') {
						my $state = $self->{_rules}->{$key}->given($n, $v);
						if($state eq 'completed') {
							$self->_add_to_log( "Rule '$key' has completed" );
							my %y = $self->{_rules}->{$key}->actions();
							foreach my $k (keys(%y)) {
								$self->_add_to_log( "Rule '$key' is setting '$k' to '$y{$k}'" );
								$answers{$k}->{value} = $y{$k};
								$answers{$k}->{setter} = $key;
							}
						} elsif($state eq 'invalid') {
							$self->_add_to_log( "Rule '$key' is now inactive" );
						}
					}
				}
			}
		}

lib/AI/ExpertSystem/Simple.pm  view on Meta::CPAN

	my $n = $self->{_goal}->name();

	return $self->{_goal}->answer($self->{_knowledge}->{$n}->get_value());
}

sub log {
	my ($self) = @_;

	die "Simple->log() takes no arguments" if scalar(@_) != 1;

	my @return = ();
	@return = @{$self->{_log}} if defined @{$self->{_log}};

	$self->{_log} = ();

	return @return;
}

sub _add_to_log {
	my ($self, $message) = @_;

	push( @{$self->{_log}}, $message );
}

sub explain {
	my ($self) = @_;

lib/AI/ExpertSystem/Simple.pm  view on Meta::CPAN

	my $name  = $self->{_goal}->name();
	my $rule  = $self->{_knowledge}->{$name}->get_setter();
	my $value = $self->{_knowledge}->{$name}->get_value();

	my $x = "The goal '$name' was set to '$value' by " . ($rule ? "rule '$rule'" : 'asking a question' );
	$self->_add_to_log( $x );

	my @processed_rules;
	push( @processed_rules, $rule ) if $rule;

	$self->_explain_this( $rule, '', @processed_rules );
}

sub _explain_this {
	my ($self, $rule, $depth, @processed_rules) = @_;

	$self->_add_to_log( "${depth}Explaining rule '$rule'" );

	my %dont_do_these = map{ $_ => 1 } @processed_rules;

	my @check_these_rules = ();

lib/AI/ExpertSystem/Simple.pm  view on Meta::CPAN

	foreach my $name (sort keys %conditions) {
		my $value = $conditions{$name};
		my $setter = $self->{_knowledge}->{$name}->get_setter();

		my $x = "$depth Condition '$name' was set to '$value' by " . ($setter ? "rule '$setter'" : 'asking a question' );
		$self->_add_to_log( $x );

		if($setter) {
			unless($dont_do_these{$setter}) {
				$dont_do_these{$setter} = 1;
				push( @check_these_rules, $setter );

lib/AI/ExpertSystem/Simple.pm  view on Meta::CPAN

	my %actions = $self->{_rules}->{$rule}->actions();
	foreach my $name (sort keys %actions) {
		my $value = $actions{$name};

		my $x = "$depth Action set '$name' to '$value'";
		$self->_add_to_log( $x );
	}

	@processed_rules = keys %dont_do_these;

	foreach my $x ( @check_these_rules ) {

lib/AI/ExpertSystem/Simple.pm  view on Meta::CPAN


=head2 Overview

This class is where all the work is being done and the other three classes are only 
there for support. At present there is little you can do with it other than run it. Future 
version will make subclassing of this class feasable and features like logging will be introduced.

To see how to use this class there is a simple shell in the bin directory which allows you 
to consult the example knowledge bases and more extensive documemtation in the docs directory.

There is a Ruby version that reads the same XML knowledge bases, if you are interested.

lib/AI/ExpertSystem/Simple.pm  view on Meta::CPAN

=item get_answer( )

If the process( ) method has returned "finished" then the answer to the users query will be 
returned by this method.

=item log( )

Returns a list of the actions undertaken so far and clears the log.

=item explain( )

Explain how the given answer was arrived at. The explanation is added to the log.

=back

=head2 Private methods

lib/AI/ExpertSystem/Simple.pm  view on Meta::CPAN


=item _question

A private method to get the question data from the knowledgebase.

=item _add_to_log

A private method to add a message to the log.

=item _explain_this

A private method to explain how a single attribute was set.

lib/AI/ExpertSystem/Simple.pm  view on Meta::CPAN

=item Simple->get_answer() takes no arguments

When the method is called it requires no arguments. This message is given if 
some arguments were supplied.

=item Simple->log() takes no arguments

When the method is called it requires no arguments. This message is given if 
some arguments were supplied.

=item Simple->explain() takes no arguments

 view all matches for this distribution


AI-FANN-Evolving

 view release on metacpan or  search on metacpan

lib/AI/FANN/Evolving.pm  view on Meta::CPAN

use Algorithm::Genetic::Diploid;
use base qw'Algorithm::Genetic::Diploid::Base';

our $VERSION = '0.4';
our $AUTOLOAD;
my $log = __PACKAGE__->logger;

my %enum = (
	'train' => {
#		'FANN_TRAIN_INCREMENTAL' => FANN_TRAIN_INCREMENTAL, # only want batch training
		'FANN_TRAIN_BATCH'       => FANN_TRAIN_BATCH,

lib/AI/FANN/Evolving.pm  view on Meta::CPAN

=over

=item new

Constructor requires 'file', or 'data' and 'neurons' arguments. Optionally takes 
'connection_rate' argument for sparse topologies. Returns a wrapper around L<AI::FANN>.

=cut

sub new {
	my $class = shift;

lib/AI/FANN/Evolving.pm  view on Meta::CPAN

	$self->_init(%args);
	
	# de-serialize from a file
	if ( my $file = $args{'file'} ) {
		$self->{'ann'} = AI::FANN->new_from_file($file);
		$log->debug("instantiating from file $file");
		return $self;
	}
	
	# build new topology from input data
	elsif ( my $data = $args{'data'} ) {
		$log->debug("instantiating from data $data");
		$data = $data->to_fann if $data->isa('AI::FANN::Evolving::TrainData');
		
		# prepare arguments
		my $neurons = $args{'neurons'} || ( $data->num_inputs + 1 );
		my @sizes = ( 
			$data->num_inputs, 
			$neurons,
			$data->num_outputs
		);
		
		# build topology
		if ( $args{'connection_rate'} ) {
			$self->{'ann'} = AI::FANN->new_sparse( $args{'connection_rate'}, @sizes );
		}
		else {
			$self->{'ann'} = AI::FANN->new_standard( @sizes );

lib/AI/FANN/Evolving.pm  view on Meta::CPAN

		return $self;
	}
	
	# build new ANN using argument as a template
	elsif ( my $ann = $args{'ann'} ) {
		$log->debug("instantiating from template $ann");
		
		# copy the wrapper properties
		%{ $self } = %{ $ann };
		
		# instantiate the network dimensions

lib/AI/FANN/Evolving.pm  view on Meta::CPAN


sub template {
	my ( $self, $other ) = @_;
	
	# copy over the simple properties
	$log->debug("copying over simple properties");
	my %scalar_properties = __PACKAGE__->_scalar_properties;
	for my $prop ( keys %scalar_properties ) {
		my $val = $self->$prop;
		$other->$prop($val);
	}
	
	# copy over the list properties
	$log->debug("copying over list properties");
	my %list_properties = __PACKAGE__->_list_properties;
	for my $prop ( keys %list_properties ) {
		my @values = $self->$prop;
		$other->$prop(@values);
	}
	
	# copy over the layer properties
	$log->debug("copying over layer properties");
	my %layer_properties = __PACKAGE__->_layer_properties;
	for my $prop ( keys %layer_properties ) {
		for my $i ( 0 .. $self->num_layers - 1 ) {
			for my $j ( 0 .. $self->layer_num_neurons($i) - 1 ) {
				my $val = $self->$prop($i,$j);

lib/AI/FANN/Evolving.pm  view on Meta::CPAN


=cut

sub mutate {
	my ( $self, $mu ) = @_;
	$log->debug("going to mutate at rate $mu");
	
	# mutate the simple properties
	$log->debug("mutating scalar properties");
	my %scalar_properties = __PACKAGE__->_scalar_properties;
	for my $prop ( keys %scalar_properties ) {
		my $handler = $scalar_properties{$prop};
		my $val = $self->$prop;
		if ( ref $handler ) {

lib/AI/FANN/Evolving.pm  view on Meta::CPAN

			$self->$prop( _mutate_enum($handler,$val,$mu) );
		}
	}	
	
	# mutate the list properties
	$log->debug("mutating list properties");
	my %list_properties = __PACKAGE__->_list_properties;
	for my $prop ( keys %list_properties ) {
		my $handler = $list_properties{$prop};		
		my @values = $self->$prop;
		if ( ref $handler ) {

lib/AI/FANN/Evolving.pm  view on Meta::CPAN

			$self->$prop( map { _mutate_enum($handler,$_,$mu) } @values );
		}		
	}	
	
	# mutate the layer properties
	$log->debug("mutating layer properties");
	my %layer_properties = __PACKAGE__->_layer_properties;
	for my $prop ( keys %layer_properties ) {
		my $handler = $layer_properties{$prop};
		for my $i ( 1 .. $self->num_layers ) {
			for my $j ( 1 .. $self->layer_num_neurons($i) ) {

lib/AI/FANN/Evolving.pm  view on Meta::CPAN


sub defaults {
	my $self = shift;
	my %args = @_;
	for my $key ( keys %args ) {
		$log->info("setting $key to $args{$key}");
		if ( $key eq 'activation_function' ) {
			$args{$key} = $constant{$args{$key}};
		}
		$default{$key} = $args{$key};
	}

lib/AI/FANN/Evolving.pm  view on Meta::CPAN


=cut

sub clone {
	my $self = shift;
	$log->debug("cloning...");
	
	# we delete the reference here so we can use 
	# Algorithm::Genetic::Diploid::Base's cloning method, which
	# dumps and loads from YAML. This wouldn't work if the 
	# reference is still attached because it cannot be 

lib/AI/FANN/Evolving.pm  view on Meta::CPAN

=cut

sub train {
	my ( $self, $data ) = @_;
	if ( $self->train_type eq 'cascade' ) {
		$log->debug("cascade training");
	
		# set learning curve
		$self->cascade_activation_functions( $self->activation_function );
		
		# train

lib/AI/FANN/Evolving.pm  view on Meta::CPAN

			$self->neuron_printfreq,
			$self->error,
		);
	}
	else {
		$log->debug("normal training");
	
		# set learning curves
		$self->hidden_activation_function( $self->activation_function );
		$self->output_activation_function( $self->activation_function );
		

lib/AI/FANN/Evolving.pm  view on Meta::CPAN


sub error {
	my $self = shift;
	if ( @_ ) {
		my $value = shift;
		$log->debug("setting error threshold to $value");
		return $self->{'error'} = $value;
	}
	else {
		$log->debug("getting error threshold");
		return $self->{'error'};
	}
}

=item epochs

lib/AI/FANN/Evolving.pm  view on Meta::CPAN


sub epochs {
	my $self = shift;
	if ( @_ ) {
		my $value = shift;
		$log->debug("setting training epochs to $value");
		return $self->{'epochs'} = $value;
	}
	else {
		$log->debug("getting training epochs");
		return $self->{'epochs'};
	}
}

=item epoch_printfreq

lib/AI/FANN/Evolving.pm  view on Meta::CPAN


sub epoch_printfreq {
	my $self = shift;
	if ( @_ ) {
		my $value = shift;
		$log->debug("setting epoch printfreq to $value");
		return $self->{'epoch_printfreq'} = $value;
	}
	else {
		$log->debug("getting epoch printfreq");
		return $self->{'epoch_printfreq'}
	}
}

=item neurons

lib/AI/FANN/Evolving.pm  view on Meta::CPAN


sub neurons {
	my $self = shift;
	if ( @_ ) {
		my $value = shift;
		$log->debug("setting neurons to $value");
		return $self->{'neurons'} = $value;
	}
	else {
		$log->debug("getting neurons");
		return $self->{'neurons'};
	}
}

=item neuron_printfreq

lib/AI/FANN/Evolving.pm  view on Meta::CPAN


sub neuron_printfreq {
	my $self = shift;
	if ( @_ ) {
		my $value = shift;
		$log->debug("setting neuron printfreq to $value");
		return $self->{'neuron_printfreq'} = $value;
	}
	else {	
		$log->debug("getting neuron printfreq");
		return $self->{'neuron_printfreq'};
	}
}

=item train_type

lib/AI/FANN/Evolving.pm  view on Meta::CPAN


sub train_type {
	my $self = shift;
	if ( @_ ) {
		my $value = lc shift;
		$log->debug("setting train type to $value"); 
		return $self->{'train_type'} = $value;
	}
	else {
		$log->debug("getting train type");
		return $self->{'train_type'};
	}
}

=item activation_function

lib/AI/FANN/Evolving.pm  view on Meta::CPAN


sub activation_function {
	my $self = shift;
	if ( @_ ) {
		my $value = shift;
		$log->debug("setting activation function to $value");
		return $self->{'activation_function'} = $value;
	}
	else {
		$log->debug("getting activation function");
		return $self->{'activation_function'};
	}
}

# this is here so that we can trap method calls that need to be 

 view all matches for this distribution


AI-FANN

 view release on metacpan or  search on metacpan

FANN.xs  view on Meta::CPAN

MODULE = AI::FANN		PACKAGE = AI::FANN		PREFIX = fann_

PROTOTYPES: DISABLE

BOOT:
    fann_set_error_log(0, 0);

void
_constants()
  PREINIT:
    unsigned int i;

 view all matches for this distribution


AI-FuzzyEngine

 view release on metacpan or  search on metacpan

lib/AI/FuzzyEngine.pm  view on Meta::CPAN


=head2 Regular Perl - without PDL

    use AI::FuzzyEngine;

    # Engine (or factory) provides fuzzy logical arithmetic
    my $fe = AI::FuzzyEngine->new();

    # Disjunction:
    my $a = $fe->or ( 0.2, 0.5, 0.8, 0.7 ); # 0.8
    # Conjunction:

 view all matches for this distribution


AI-FuzzyInference

 view release on metacpan or  search on metacpan

FuzzyInference.pm  view on Meta::CPAN

# First some global vars.
#
############################################

# this hash defines the possible interpretations of the
# standard fuzzy logic operations.
my %_operations = (
		   '&' => {
		       min     => sub { (sort {$a <=> $b} @_)[0] },
		       product => sub { my $p = 1; $p *= $_ for @_; $p },
		       default => 'min',

FuzzyInference.pm  view on Meta::CPAN

}

# sub _init() - private method.
#
# no arguments. Initializes the data structures we will need.
# It also defines the default logic operations we might need.

sub _init {
    my $self = shift;

    $self->{SET}     = new AI::FuzzyInference::Set;

FuzzyInference.pm  view on Meta::CPAN

    return $self->{DEFUZZIFICATION};
}

# sub operation() - public method.
#
# two arguments: first one mandatory and specifies the logic operation
# in question. Second one is optional and has to match one of the keys
# of the %{$_operations{$first_arg}} hash.
# Used to query/set the logic operations method.

sub operation {
    my ($self,
	$op,
	$new,

FuzzyInference.pm  view on Meta::CPAN

    $self->{FUZZIFY} = \%terms;
}

# sub _infer() - private method.
#
# no arguments. This method applies the logic operations to combine
# multiple parts of the antecedent of a rule to get one crisp value 
# that is the degree of support of that rule.
# Rules with positive support "fire".

sub _infer {

FuzzyInference.pm  view on Meta::CPAN


For a more detailed explanation of fuzzy inference, you can check out
the tutorial by Jerry Mendel at
S<http://sipi.usc.edu/~mendel/publications/FLS_Engr_Tutorial_Errata.pdf>.

Note: The terminology used in this module might differ from that used
in the above tutorial.

=head1 PUBLIC METHODS

The module has the following public methods:

FuzzyInference.pm  view on Meta::CPAN

initialized AI::FuzzyInference object.

=item operation()

This method is used to set/query the fuzzy operations. It takes at least
one argument, and at most 2. The first argument specifies the logic
operation in question, and can be either C<&> for logical I<AND>,
C<|> for logical I<OR>, or C<!> for logical I<NOT>. The second
argument is used to set what method to use for the given operator.
The following values are possible:

=item &

 view all matches for this distribution


AI-Genetic-Pro

 view release on metacpan or  search on metacpan

lib/AI/Genetic/Pro.pm  view on Meta::CPAN

        values_vertical     => 1,
        values_format       => ($params{-format} || '%.2f'),

        zero_axis           => 1,
        #interlaced          => 1,
        logo_position       => 'BR',
        legend_placement    => 'RT',

        bgclr               => 'white',
        boxclr              => '#FFFFAA',
        transparent         => 0,

        title       		=> ($params{'-title'}   || q/Evolution/ ),
        x_label     		=> ($params{'-x_label'} || q/Generation/),
        y_label     		=> ($params{'-y_label'} || q/Value/     ),
        
        ( $params{-logo} && -f $params{-logo} ? ( logo => $params{-logo} ) : ( ) )
    );
	
	
    my $gd = $graph->plot( [ [ 0..$#{$data->[0]} ], @$data ] ) or croak($@);
    open(my $fh, '>', $params{-filename}) or croak($@);

lib/AI/Genetic/Pro.pm  view on Meta::CPAN


=item -font

Path to font (in *.ttf format) to be used (default: none).

=item -logo

Path to logo (png/jpg image) to embed in a chart (default: none).

=item For example:

	$ga->chart(-width => 480, height => 320, -filename => 'chart.png');

 view all matches for this distribution


AI-LibNeural

 view release on metacpan or  search on metacpan

LibNeural.pm  view on Meta::CPAN


  use AI::LibNeural;

  my $nn = AI::LibNeural->new( 2, 4, 1 );

  # teach it the logical AND
  $nn->train( [ 0, 0 ], [ 0.05 ], 0.0000000005, 0.2 );
  $nn->train( [ 0, 1 ], [ 0.05 ], 0.0000000005, 0.2 );
  $nn->train( [ 1, 0 ], [ 0.05 ], 0.0000000005, 0.2 );
  $nn->train( [ 1, 1 ], [ 0.95 ], 0.0000000005, 0.2 );

 view all matches for this distribution


AI-Logic-AnswerSet

 view release on metacpan or  search on metacpan

lib/AI/Logic/AnswerSet.pm  view on Meta::CPAN

Now the functions returns the answer sets that satisfy the condition, i.e., an answer set
is returned only if the number of occurrences of the predicate "node" is higher than 4, and the number of occurrences of the predicate "edge" less than 15.

=head3 getFacts

Get the logic program facts from a file or a string.

	my @facts = AI::Logic::AnswerSet::getFacts($inputFile);

or

 view all matches for this distribution


AI-ML

 view release on metacpan or  search on metacpan

lib/AI/ML/LogisticRegression.pm  view on Meta::CPAN

    $self->{alpha} 	= exists $opts{alpha} ? $opts{alpha} : 0.1;

	return $self;
}

=head2 logistic_regression

    considerando x [m,n]
    considerando y [m,1]

=cut

lib/AI/ML/LogisticRegression.pm  view on Meta::CPAN

    if ( exists $self->{reg} ) {
				$lambda = $self->{reg};
    		for my $i (1 .. $iters) {
        		$h = sigmoid($x x $thetas);
        		$reg = ($lambda / (2 * $m)) * sum( $thetas->slice(x0 => 1) ** 2 );
        		$cost = (-1 / $m) * sum($y * log($h) + (1 - $y) * log(1-$h)) + $reg;

        		push @cost_values, $cost->get_element(0,0) if exists $self->{cost};

        		$reg_thetas = ($lambda / $m) * $thetas;
        		$reg_thetas->set_element(0,0,0);

lib/AI/ML/LogisticRegression.pm  view on Meta::CPAN

      	}
    }
    else {
      	for my $i (1 .. $iters) {
        	$h = sigmoid($x x $thetas);
        	$cost = (-1 / $m)*sum(($y * log($h)) + ((1-$y) * log(1-$h)));

        	push @cost_values, $cost->get_element(0,0) if exists $self->{cost};

        	$grad = ($x->T x ($h - $y)) / $m;
        	$thetas = $thetas - $alpha * $grad;

 view all matches for this distribution


AI-MXNet

 view release on metacpan or  search on metacpan

examples/calculator.pl  view on Meta::CPAN


## the network model
sub nn_fc {
    my $data = mx->sym->Variable('data');
    my $ln = mx->sym->exp(mx->sym->FullyConnected(
        data => mx->sym->log($data),
        num_hidden => 1,
    ));
    my $wide = mx->sym->Concat($data, $ln);
    my $fc = mx->sym->FullyConnected(
	$wide,

 view all matches for this distribution


AI-MaxEntropy

 view release on metacpan or  search on metacpan

inc/Test/Builder.pm  view on Meta::CPAN

        $fh = $file_or_fh;
    }
    else {
        $fh = do { local *FH };
        open $fh, ">$file_or_fh" or
            $self->croak("Can't open test output log $file_or_fh: $!");
	_autoflush($fh);
    }

    return $fh;
}

 view all matches for this distribution


AI-MegaHAL

 view release on metacpan or  search on metacpan

lib/AI/MegaHAL.pm  view on Meta::CPAN


POE::Component::AI::MegaHAL - IRC bot,

L<http://teaandbiscuits.org.uk/drupal/node/65> - Irssi IRC bot,

L<Hailo> - A pluggable Markov engine analogous to MegaHAL

=head1 AUTHOR

The Perl AI::MegaHAL interface was written by Cory Spencer <cspencer[at]sprocket.org>.

 view all matches for this distribution


AI-MicroStructure

 view release on metacpan or  search on metacpan

lib/AI/MicroStructure.pm  view on Meta::CPAN

  @{$data->{rows}->{"search"}}=split("#",join("",@d));
  if($line>0){
   __PACKAGE__->save_new($StructureName,$data,$line);
   exit 0;
  }else{
   printf "your logic is today impaired !!!\n";
   exit 0;
  }
  }
  if($write == 1) {
   __PACKAGE__->save_default();

 view all matches for this distribution


AI-NNEasy

 view release on metacpan or  search on metacpan

lib/AI/NNEasy.pm  view on Meta::CPAN


The secret of a NN is the number of hidden layers and nodes/neurons for each layer.
Basically the best way to define the hidden layers is 1 layer of (INPUT_NODES+OUTPUT_NODES).
So, a layer of 2 input nodes and 1 output node, should have 3 nodes in the hidden layer.
This definition exists because the number of inputs define the maximal variability of
the inputs (N**2 for bollean inputs), and the output defines if the variability is reduced by some logic restriction, like
int the XOR example, where we have 2 inputs and 1 output, so, hidden is 3. And as we can see in the
logic we have 3 groups of inputs:

  0 0 => 0 # false
  0 1 => 1 # or
  1 0 => 1 # or
  1 1 => 1 # true

 view all matches for this distribution


AI-NNFlex

 view release on metacpan or  search on metacpan

lib/AI/NNFlex/Backprop.pm  view on Meta::CPAN


AI::NNFlex::Backprop is a class to generate feedforward, backpropagation neural nets. It inherits various constructs from AI::NNFlex & AI::NNFlex::Feedforward, but is documented here as a standalone.

The code should be simple enough to use for teaching purposes, but a simpler implementation of a simple backprop network is included in the example file bp.pl. This is derived from Phil Brierleys freely available java code at www.philbrierley.com.

AI::NNFlex::Backprop leans towards teaching NN and cognitive modelling applications. Future modules are likely to include more biologically plausible nets like DeVries & Principes Gamma model.

Full documentation for AI::NNFlex::Dataset can be found in the modules own perldoc. It's documented here for convenience only.

=head1 CONSTRUCTOR 

 view all matches for this distribution


AI-NaiveBayes

 view release on metacpan or  search on metacpan

lib/AI/NaiveBayes.pm  view on Meta::CPAN

    my ($self, $newattrs) = @_;
    $newattrs or die "Missing parameter for classify()";

    my $m = $self->model;

    # Note that we're using the log(prob) here.  That's why we add instead of multiply.

    my %scores = %{$m->{prior_probs}};
    my %features;
    while (my ($feature, $value) = each %$newattrs) {
        next unless exists $m->{attributes}{$feature};  # Ignore totally unseen features

lib/AI/NaiveBayes.pm  view on Meta::CPAN

}

sub rescale {
    my ($scores) = @_;

    # Scale everything back to a reasonable area in logspace (near zero), un-loggify, and normalize
    my $total = 0;
    my $max = max(values %$scores);
    foreach (values %$scores) {
        $_ = exp($_ - $max);
        $total += $_**2;

 view all matches for this distribution


AI-NaiveBayes1

 view release on metacpan or  search on metacpan

NaiveBayes1.pm  view on Meta::CPAN


=head1 HISTORY

L<Algorithm::NaiveBayes> by Ken Williams was not what I needed so I
wrote this one.  L<Algorithm::NaiveBayes> is oriented towards text
categorization, it includes smoothing, and log probabilities.  This
module is a generic, basic Naive Bayes algorithm.

=head1 THANKS

I would like to thank Daniel Bohmer for documentation corrections,

 view all matches for this distribution


AI-Nerl

 view release on metacpan or  search on metacpan

examples/digits/digits.pl  view on Meta::CPAN

sub sigmoid{
   my $foo = shift;
   return 1/(1+E**-$foo);
}

sub logistic{
   #find sigmoid before calling this.
   #grad=logistic(sigmoid(foo))
   my $foo = shift;
   return $foo * (1-$foo);
}

 view all matches for this distribution


AI-NeuralNet-BackProp

 view release on metacpan or  search on metacpan

BackProp.pm  view on Meta::CPAN

well as automatic crunching of run() and learn*() inputs. Included in the examples directory
are seven new practical-use example scripts. Also implemented in this version is a much cleaner 
learning function for individual neurons which is more accurate than previous verions and is 
based on the LMS rule. See range() for information on output range limits. I have also updated 
the load() and save() methods so that they do not depend on Storable anymore. In this version 
you also have the choice between three network topologies, two not as stable, and the third is 
the default which has been in use for the previous four versions.


=head1 DESCRIPTION

BackProp.pm  view on Meta::CPAN


=head2 METHODS

=over 4

=item new AI::NeuralNet::BackProp($layers, $size [, $outputs, $topology_flag])

Returns a newly created neural network from an C<AI::NeuralNet::BackProp>
object. The network will have C<$layers> number layers in it
and each layer will have C<$size> number of neurons in that layer.

There is an optional parameter of $outputs, which specifies the number
of output neurons to provide. If $outputs is not specified, $outputs
defaults to equal $size. $outputs may not exceed $size. If $outputs
exceeds $size, the new() constructor will return undef.

The optional parameter, $topology_flag, defaults to 0 when not used. There are
three valid topology flag values:

B<0> I<default>
My feed-foward style: Each neuron in layer X is connected to one input of every
neuron in layer Y. The best and most proven flag style.

 view all matches for this distribution


AI-NeuralNet-FastSOM

 view release on metacpan or  search on metacpan

lib/AI/NeuralNet/FastSOM/Hexa.pm  view on Meta::CPAN


=pod

=head1 NAME

AI::NeuralNet::FastSOM::Hexa - Perl extension for Kohonen Maps (hexagonal topology)

=head1 SYNOPSIS

  use AI::NeuralNet::FastSOM::Hexa;
  my $nn = new AI::NeuralNet::FastSOM::Hexa (output_dim => 6,

 view all matches for this distribution


AI-NeuralNet-Hopfield

 view release on metacpan or  search on metacpan

lib/AI/NeuralNet/Hopfield.pm  view on Meta::CPAN

If your Modified Version has been derived from a Modified Version made
by someone other than you, you are nevertheless required to ensure that
your Modified Version complies with the requirements of this license.

This license does not grant you the right to use any trademark, service
mark, tradename, or logo of the Copyright Holder.

This license includes the non-exclusive, worldwide, free-of-charge
patent license to make, have made, use, offer to sell, sell, import and
otherwise transfer the Package with respect to any patent claims
licensable by the Copyright Holder that are necessarily infringed by the

 view all matches for this distribution


AI-NeuralNet-Kohonen

 view release on metacpan or  search on metacpan

lib/AI/NeuralNet/Kohonen.pm  view on Meta::CPAN


=over 4

=item time_constant

The number of iterations (epochs) to be completed, over the log of the map radius.

=item t

The current epoch, or moment in time.

lib/AI/NeuralNet/Kohonen.pm  view on Meta::CPAN

		$self->{map_dim_a} 		= $self->{map_dim_x} + (($self->{map_dim_y}-$self->{map_dim_x})/2)
	}
	$self->{neighbour_factor}	= 2.5 unless $self->{neighbour_factor};
	$self->{epochs}				= 99 unless defined $self->{epochs};
	$self->{epochs}				= 1 if $self->{epochs}<1;
	$self->{time_constant}		= $self->{epochs} / log($self->{map_dim_a}) unless $self->{time_constant};	# to base 10?
	$self->{learning_rate}		= 0.5 unless $self->{learning_rate};
	$self->{l}					= $self->{learning_rate};
	if (not $self->{weight_dim}){
		cluck "{weight_dim} not set";
		return undef;

lib/AI/NeuralNet/Kohonen.pm  view on Meta::CPAN

		warn "Could not open file for writing <$path>: $!";
		return undef;
	}
	#- Dimensionality of the vectors (integer, compulsory).
	print OUT ($self->{weight_dim}+1)," ";	# Perl indexing
	#- Topology type, either hexa or rect (string, optional, case-sensitive).
	if (not defined $self->{display}){
		print OUT "rect ";
	} else { # $self->{display} eq 'hex'
		print OUT "hexa ";
	}

lib/AI/NeuralNet/Kohonen.pm  view on Meta::CPAN

	chomp @_;
	my @specs = split/\s+/,(shift @_);
	#- Dimensionality of the vectors (integer, compulsory).
	$self->{weight_dim} = shift @specs;
	$self->{weight_dim}--; # Perl indexing
	#- Topology type, either hexa or rect (string, optional, case-sensitive).
	my $display		    = shift @specs;
	if (not defined $display and exists $self->{display}){
		# Intentionally blank
	} elsif (not defined $display){
		$self->{display} = undef;

lib/AI/NeuralNet/Kohonen.pm  view on Meta::CPAN


=head1 FILE FORMAT

This module has begun to attempt the I<SOM_PAK> format:
I<SOM_PAK> file format version 3.1 (April 7, 1995),
Helsinki University of Technology, Espoo:

=over 4

The input data is stored in ASCII-form as a list of entries, one line
...for each vectorial sample.

lib/AI/NeuralNet/Kohonen.pm  view on Meta::CPAN

The first line of the file is reserved for status knowledge of the
entries; in the present version it is used to define the following
items (these items MUST occur in the indicated order):

   - Dimensionality of the vectors (integer, compulsory).
   - Topology type, either hexa or rect (string, optional, case-sensitive).
   - Map dimension in x-direction (integer, optional).
   - Map dimension in y-direction (integer, optional).
   - Neighborhood type, either bubble or gaussian (string, optional, case-sen-
      sitive).

 view all matches for this distribution


AI-NeuralNet-Mesh

 view release on metacpan or  search on metacpan

Mesh.pm  view on Meta::CPAN

Neural networks are poor at precise calculations and serial processing. They
are also unable to predict or recognize anything that does not inherently
contain some sort of pattern.  For example, they cannot predict the lottery,
since this is a random process.  It is unlikely that a neural network could
be built which has the capacity to think as well as a person does for two
reasons.  Neural networks are terrible at deduction, or logical thinking and
the human brain is just too complex to completely simulate.  Also, some
problems are too difficult for present technology.  Real vision, for
example, is a long way off.

In short, Neural Networks are poor at precise calculations, but good at
association, evaluation, and pattern recognition.

 view all matches for this distribution


AI-NeuralNet-SOM

 view release on metacpan or  search on metacpan

lib/AI/NeuralNet/SOM.pm  view on Meta::CPAN

from the sample data, which is a set of N-dimensional vectors.

Slowly, the vectors in the network will try to approximate the sample
vectors fed in. If in the sample vectors there were clusters, then
these clusters will be neighbourhoods within the rectangle (or
whatever topology you are using).

Technically, you have reduced your dimension from N to 2.

=head1 INTERFACE

lib/AI/NeuralNet/SOM.pm  view on Meta::CPAN


A non-negative number representing the start value for the learning radius. Practically, the value
should be chosen in such a way to cover a larger part of the map. During the learning process this
value will be narrowed down, so that the learning radius impacts less and less neurons.

B<NOTE>: Do not choose C<1> as the C<log> function is used on this value.

=back

Subclasses will (re)define some of these parameters and add others:

lib/AI/NeuralNet/SOM.pm  view on Meta::CPAN

sub train {
    my $self   = shift;
    my $epochs = shift || 1;
    die "no data to learn" unless @_;

    $self->{LAMBDA} = $epochs / log ($self->{_Sigma0});                                 # educated guess?

    my @mes    = ();                                                                    # this will contain the errors during the epochs
    for my $epoch (1..$epochs)  {
	$self->{T} = $epoch;
	my $sigma = $self->{_Sigma0} * exp ( - $self->{T} / $self->{LAMBDA} );          # compute current radius

lib/AI/NeuralNet/SOM.pm  view on Meta::CPAN


=item I<radius> (read-only)

I<$radius> = I<$nn>->radius

Returns the I<radius> of the map. Different topologies interpret this differently.

=item I<map>

I<$m> = I<$nn>->map

lib/AI/NeuralNet/SOM.pm  view on Meta::CPAN


=item implement different window forms (bubble/gaussian), linear/random

=item implement the format mentioned in the original AI::NeuralNet::SOM

=item add methods as_html to individual topologies

=item add iterators through vector lists for I<initialize> and I<train>

=back

 view all matches for this distribution


AI-NeuralNet-Simple

 view release on metacpan or  search on metacpan

lib/AI/NeuralNet/Simple.pm  view on Meta::CPAN


sub handle { $_[0]->{handle} }

sub new {
    my ( $class, @args ) = @_;
    logdie "you must supply three positive integers to new()"
      unless 3 == @args;
    foreach (@args) {
        logdie "arguments to new() must be positive integers"
          unless defined $_ && /^\d+$/;
    }
    my $seed = rand(1);    # Perl invokes srand() on first call to rand()
    my $handle = c_new_network(@args);
    logdie "could not create new network" unless $handle >= 0;
    my $self = bless {
        input  => $args[0],
        hidden => $args[1],
        output => $args[2],
        handle => $handle,

lib/AI/NeuralNet/Simple.pm  view on Meta::CPAN

}

sub iterations {
    my ( $self, $iterations ) = @_;
    if ( defined $iterations ) {
        logdie "iterations() value must be a positive integer."
          unless $iterations
          and $iterations =~ /^\d+$/;
        $self->{iterations} = $iterations;
        return $self;
    }

lib/AI/NeuralNet/Simple.pm  view on Meta::CPAN

}

sub delta {
    my ( $self, $delta ) = @_;
    return c_get_delta( $self->handle )              unless defined $delta;
    logdie "delta() value must be a positive number" unless $delta > 0.0;
    c_set_delta( $self->handle, $delta );
    return $self;
}

sub use_bipolar {

lib/AI/NeuralNet/Simple.pm  view on Meta::CPAN

}

sub learn_rate {
    my ( $self, $rate ) = @_;
    return c_get_learn_rate( $self->handle ) unless defined $rate;
    logdie "learn rate must be between 0 and 1, exclusive"
      unless $rate > 0 && $rate < 1;
    c_set_learn_rate( $self->handle, $rate );
    return $self;
}

lib/AI/NeuralNet/Simple.pm  view on Meta::CPAN


=head1 SYNOPSIS

  use AI::NeuralNet::Simple;
  my $net = AI::NeuralNet::Simple->new(2,1,2);
  # teach it logical 'or'
  for (1 .. 10000) {
      $net->train([1,1],[0,1]);
      $net->train([1,0],[0,1]);
      $net->train([0,1],[0,1]);
      $net->train([0,0],[1,0]);

lib/AI/NeuralNet/Simple.pm  view on Meta::CPAN

So why am I doing this?  Because I'm giving I<just enough> information for
someone new to neural networks to have enough of an idea of what's going on so
they can actually use this module and then move on to something more powerful,
if interested.

=head2 The Biology

A neural network, at its simplest, is merely an attempt to mimic nature's
"design" of a brain.  Like many successful ventures in the field of artificial
intelligence, we find that blatantly ripping off natural designs has allowed us
to solve many problems that otherwise might prove intractable.  Fortunately,

lib/AI/NeuralNet/Simple.pm  view on Meta::CPAN

felt and we start eyeing our neighbor's popcorn in the movie theater, wondering
if they'll notice if we snatch some while they're watching the movie.

=head2 A simple example of a neuron

Now that you have a solid biology background (uh, no), how does this work when
we're trying to simulate a neural network?  The simplest part of the network is
the neuron (also known as a node or, sometimes, a neurode).  A we might think
of a neuron as follows (OK, so I won't make a living as an ASCII artist):

Input neurons   Synapses   Neuron   Output

lib/AI/NeuralNet/Simple.pm  view on Meta::CPAN

neurons: an input, hidden, and output layer.  In practice, a 3 layer network is
applicable to many problems for which a neural network is appropriate, but this
is not always the case.  In this module, we've settled on a fixed 3 layer
network for simplicity.

Here's how a three layer network might learn "logical or".  First, we need to
determine how many inputs and outputs we'll have.  The inputs are simple, we'll
choose two inputs as this is the minimum necessary to teach a network this
concept.  For the outputs, we'll also choose two neurons, with the neuron with
the highest output value being the "true" or "false" response that we are
looking for.  We'll only have one neuron for the hidden layer.  Thus, we get a

lib/AI/NeuralNet/Simple.pm  view on Meta::CPAN

satisfied.  In reality, we will rarely if ever get precise results from the
network, but we learn various strategies to interpret the results.  In the
example above, we use a "winner takes all" strategy.  Which ever of the output
nodes has the greatest value will be the "winner", and thus the answer.

In the examples directory, you will find a program named "logical_or.pl" which
demonstrates the above process.

=head2 Building a network

In creating a new neural network, there are three basic steps:

lib/AI/NeuralNet/Simple.pm  view on Meta::CPAN

=head1 Programming C<AI::NeuralNet::Simple>

=head2 C<new($input, $hidden, $output)>

C<new()> accepts three integers.  These number represent the number of nodes in
the input, hidden, and output layers, respectively.  To create the "logical or"
network described earlier:

  my $net = AI::NeuralNet::Simple->new(2,1,2);

By default, the activation function for the neurons is the sigmoid function

lib/AI/NeuralNet/Simple.pm  view on Meta::CPAN

You should not change the activation function during the traning.

=head2 C<train(\@input, \@output)>

This method trains the network to associate the input data set with the output
data set.  Representing the "logical or" is as follows:

  $net->train([1,1] => [0,1]);
  $net->train([1,0] => [0,1]);
  $net->train([0,1] => [0,1]);
  $net->train([0,0] => [1,0]);

Note that a one pass through the data is seldom sufficient to train a network.
In the example "logical or" program, we actually run this data through the
network ten thousand times.

  for (1 .. 10000) {
    $net->train([1,1] => [0,1]);
    $net->train([1,0] => [0,1]);

lib/AI/NeuralNet/Simple.pm  view on Meta::CPAN

=head2 C<infer(\@input)>

This method, if provided with an input array reference, will return an array
reference corresponding to the output values that it is guessing.  Note that
these values will generally be close, but not exact.  For example, with the 
"logical or" program, you might expect results similar to:

  use Data::Dumper;
  print Dumper $net->infer([1,1]);
  
  $VAR1 = [

lib/AI/NeuralNet/Simple.pm  view on Meta::CPAN

network in the book.  The "game ai" example in the examples directory is based
upon an example he has graciously allowed me to use.  I I<had> to use it
because it's more fun than many of the dry examples out there :)

"Naturally Intelligent Systems", by Maureen Caudill and Charles Butler,
copyright (c) 1990 by Massachussetts Institute of Technology.

This book is a decent introduction to neural networks in general.  The forward
feed back error propogation is but one of many types.

=head1 AUTHORS

 view all matches for this distribution


AI-Ollama-Client

 view release on metacpan or  search on metacpan

lib/AI/Ollama/RequestOptions.pm  view on Meta::CPAN

has 'frequency_penalty' => (
    is       => 'ro',
    isa      => Num,
);

=head2 C<< logits_all >>

Enable logits all. (Default: false)

=cut

has 'logits_all' => (
    is       => 'ro',
);

=head2 C<< low_vram >>

lib/AI/Ollama/RequestOptions.pm  view on Meta::CPAN

    isa      => Int,
);

=head2 C<< num_thread >>

Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores).

=cut

has 'num_thread' => (
    is       => 'ro',

 view all matches for this distribution


AI-PSO

 view release on metacpan or  search on metacpan

lib/AI/PSO.pm  view on Meta::CPAN


#-#-# search parameters #-#-#
my $numParticles  = 'null';            # This is the number of particles that actually search the problem hyperspace
my $numNeighbors  = 'null';            # This is the number of neighboring particles that each particle shares information with
                                       # which must obviously be less than the number of particles and greater than 0.
                                         # TODO: write code to preconstruct different topologies.  Such as fully connected, ring, star etc.
                                         #       Currently, neighbors are chosen by a simple hash function.  
                                         #       It would be fun (no theoretical benefit that I know of) to play with different topologies.
my $maxIterations = 'null';            # This is the maximum number of optimization iterations before exiting if the fitness goal is never reached.
my $exitFitness   = 'null';            # this is the exit criteria.  It must be a value between 0 and 1.
my $dimensions    = 'null';            # this is the number of variables the user is optimizing


lib/AI/PSO.pm  view on Meta::CPAN




#
# initialize_neighbors
# NOTE: I made this a separate subroutine so that different topologies of neighbors can be created and used instead of this.
# NOTE: This subroutine is currently not used because we access neighbors by index to the particle array rather than storing their references
# 
#  - adds a neighbor array to the particle hash data structure
#  - sets the neighbor based on the default neighbor hash function
#

lib/AI/PSO.pm  view on Meta::CPAN


    If you have a large search space, increasing deltaMin and deltaMax 
    and delta max can help cover more area. Conversely, if you have a 
    small search space, then decreasing them will fine tune the search.

=item 3. Swarm Topology

    I've personally found that using a global (fully connected) topology 
    where each particle is neighbors with all other particles 
    (numNeighbors == numParticles - 1) converges more quickly.  However, 
    this will drastically increase the number of calls to your fitness 
    function.  So, if your fitness function is the bottleneck, then you 
    should tune this value for the appropriate time/accuracy trade-off.  

 view all matches for this distribution


AI-ParticleSwarmOptimization-MCE

 view release on metacpan or  search on metacpan

lib/AI/ParticleSwarmOptimization/MCE.pm  view on Meta::CPAN

=item I<-verbose>: flags, optional

If set to a non-zero value I<-verbose> determines the level of diagnostic print
reporting that is generated during optimization.

The following constants may be bitwise ored together to set logging options:

=over 4

=item * kLogBetter

lib/AI/ParticleSwarmOptimization/MCE.pm  view on Meta::CPAN


Shows the current iteration number.

=item * kLogDetail

Shows additional details for some of the other logging options.

=item * kLogIterDetail

Shorthand for C<kLogIter | kLogIterDetail>

 view all matches for this distribution


AI-ParticleSwarmOptimization-Pmap

 view release on metacpan or  search on metacpan

lib/AI/ParticleSwarmOptimization/Pmap.pm  view on Meta::CPAN

=item I<-verbose>: flags, optional

If set to a non-zero value I<-verbose> determines the level of diagnostic print
reporting that is generated during optimization.

The following constants may be bitwise ored together to set logging options:

=over 4

=item * kLogBetter

lib/AI/ParticleSwarmOptimization/Pmap.pm  view on Meta::CPAN


Shows the current iteration number.

=item * kLogDetail

Shows additional details for some of the other logging options.

=item * kLogIterDetail

Shorthand for C<kLogIter | kLogIterDetail>

 view all matches for this distribution


AI-ParticleSwarmOptimization

 view release on metacpan or  search on metacpan

lib/AI/ParticleSwarmOptimization.pm  view on Meta::CPAN


    die "-dimensions must be greater than 0\n"
        if exists $params{-dimensions} && $params{-dimensions} <= 0;

    if (defined $self->{verbose} and 'ARRAY' eq ref $self->{verbose}) {
        my @log = map {lc} @{$self->{verbose}};
        my %logTypes = (
            better => kLogBetter,
            stall  => kLogStall,
            iter   => kLogIter,
            detail => kLogDetail,
        );

        $self->{verbose} = 0;
        exists $logTypes{$_} and $self->{verbose} |= $logTypes{$_} for @log;
    }

    $self->{numParticles} ||= $self->{dimensions} * 10
        if defined $self->{dimensions};
    $self->{numNeighbors} ||= int sqrt $self->{numParticles}

lib/AI/ParticleSwarmOptimization.pm  view on Meta::CPAN

=item I<-verbose>: flags, optional

If set to a non-zero value I<-verbose> determines the level of diagnostic print
reporting that is generated during optimization.

The following constants may be bitwise ored together to set logging options:

=over 4

=item * kLogBetter

lib/AI/ParticleSwarmOptimization.pm  view on Meta::CPAN


Shows the current iteration number.

=item * kLogDetail

Shows additional details for some of the other logging options.

=item * kLogIterDetail

Shorthand for C<kLogIter | kLogIterDetail>

 view all matches for this distribution


( run in 1.029 second using v1.01-cache-2.11-cpan-49f99fa48dc )