AI-ActivationFunctions
view release on metacpan or search on metacpan
AI-ActivationFunctions-0.01/AI-ActivationFunctions-0.01/lib/AI/ActivationFunctions.pm view on Meta::CPAN
);
our %EXPORT_TAGS = (
all => \@EXPORT_OK,
basic => [qw(relu prelu leaky_relu sigmoid tanh softmax)],
advanced => [qw(elu swish gelu)],
derivatives => [qw(relu_derivative sigmoid_derivative)],
);
# ReLU
sub relu {
my ($x) = @_;
return $x > 0 ? $x : 0;
}
# PReLU
sub prelu {
my ($x, $alpha) = @_;
$alpha //= 0.01;
return $x > 0 ? $x : $alpha * $x;
}
# Leaky ReLU
sub leaky_relu {
my ($x) = @_;
return prelu($x, 0.01);
}
# Sigmoid
sub sigmoid {
my ($x) = @_;
return 1 / (1 + exp(-$x));
}
# Tanh
sub tanh {
my ($x) = @_;
my $e2x = exp(2 * $x);
return ($e2x - 1) / ($e2x + 1);
}
# Softmax para array
sub softmax {
my ($array) = @_;
return undef unless ref($array) eq 'ARRAY';
# Encontrar máximo
my $max = $array->[0];
foreach my $val (@$array) {
$max = $val if $val > $max;
}
AI-ActivationFunctions-0.01/AI-ActivationFunctions-0.01/lib/AI/ActivationFunctions.pm view on Meta::CPAN
my $exp_val = exp($val - $max);
push @exp_vals, $exp_val;
$sum += $exp_val;
}
# Normalizar
return [map { $_ / $sum } @exp_vals];
}
# ELU (Exponential Linear Unit)
sub elu {
my ($x, $alpha) = @_;
$alpha //= 1.0;
return $x > 0 ? $x : $alpha * (exp($x) - 1);
}
# Swish (Google)
sub swish {
my ($x) = @_;
return $x * sigmoid($x);
}
# GELU (Gaussian Error Linear Unit)
sub gelu {
my ($x) = @_;
return 0.5 * $x * (1 + tanh(sqrt(2/3.141592653589793) *
($x + 0.044715 * $x**3)));
}
# Derivada da ReLU
sub relu_derivative {
my ($x) = @_;
return $x > 0 ? 1 : 0;
}
# Derivada da Sigmoid
sub sigmoid_derivative {
my ($x) = @_;
my $s = sigmoid($x);
return $s * (1 - $s);
}
1;
=head1 NAME
examples/neural_network.pl view on Meta::CPAN
#!/usr/bin/perl
use strict;
use warnings;
use AI::ActivationFunctions qw(relu sigmoid softmax relu_derivative sigmoid_derivative);
print "=== Simple Neural Network Demo ===\n\n";
# Simple neural network layer simulation
sub neural_layer {
my ($inputs, $weights, $biases, $activation) = @_;
# Linear transformation: Wx + b
my @output;
for my $i (0..$#$weights) {
my $sum = $biases->[$i];
for my $j (0..$#$inputs) {
$sum += $weights->[$i][$j] * $inputs->[$j];
}
push @output, $sum;
lib/AI/ActivationFunctions.pm view on Meta::CPAN
);
our %EXPORT_TAGS = (
all => \@EXPORT_OK,
basic => [qw(relu prelu leaky_relu sigmoid tanh softmax)],
advanced => [qw(elu swish gelu)],
derivatives => [qw(relu_derivative sigmoid_derivative)],
);
# ReLU
sub relu {
my ($x) = @_;
return $x > 0 ? $x : 0;
}
# PReLU
sub prelu {
my ($x, $alpha) = @_;
$alpha //= 0.01;
return $x > 0 ? $x : $alpha * $x;
}
# Leaky ReLU
sub leaky_relu {
my ($x) = @_;
return prelu($x, 0.01);
}
# Sigmoid
sub sigmoid {
my ($x) = @_;
return 1 / (1 + exp(-$x));
}
# Tanh
sub tanh {
my ($x) = @_;
my $e2x = exp(2 * $x);
return ($e2x - 1) / ($e2x + 1);
}
# Softmax para array
sub softmax {
my ($array) = @_;
return undef unless ref($array) eq 'ARRAY';
# Encontrar máximo
my $max = $array->[0];
foreach my $val (@$array) {
$max = $val if $val > $max;
}
lib/AI/ActivationFunctions.pm view on Meta::CPAN
my $exp_val = exp($val - $max);
push @exp_vals, $exp_val;
$sum += $exp_val;
}
# Normalizar
return [map { $_ / $sum } @exp_vals];
}
# ELU (Exponential Linear Unit)
sub elu {
my ($x, $alpha) = @_;
$alpha //= 1.0;
return $x > 0 ? $x : $alpha * (exp($x) - 1);
}
# Swish (Google)
sub swish {
my ($x) = @_;
return $x * sigmoid($x);
}
# GELU (Gaussian Error Linear Unit)
sub gelu {
my ($x) = @_;
return 0.5 * $x * (1 + tanh(sqrt(2/3.141592653589793) *
($x + 0.044715 * $x**3)));
}
# Derivada da ReLU
sub relu_derivative {
my ($x) = @_;
return $x > 0 ? 1 : 0;
}
# Derivada da Sigmoid
sub sigmoid_derivative {
my ($x) = @_;
my $s = sigmoid($x);
return $s * (1 - $s);
}
1;
=head1 NAME
lib/AI/ActivationFunctions/PDL.pm view on Meta::CPAN
package AI::ActivationFunctions;
use strict;
use warnings;
use Exporter 'import';
our $VERSION = '0.01';
our @EXPORT_OK = qw(relu prelu leaky_relu sigmoid tanh softmax elu swish gelu relu_derivative sigmoid_derivative);
our %EXPORT_TAGS = (all => \@EXPORT_OK);
# ReLU - MUITO simples
sub relu {
my ($x) = @_;
return $x > 0 ? $x : 0;
}
# PReLU
sub prelu {
my ($x, $alpha) = @_;
$alpha //= 0.01;
return $x > 0 ? $x : $alpha * $x;
}
# Leaky ReLU
sub leaky_relu {
my ($x) = @_;
return $x > 0 ? $x : 0.01 * $x;
}
# Sigmoid
sub sigmoid {
my ($x) = @_;
return 1 / (1 + exp(-$x));
}
# Tanh - versão correta (sem CORE::tanh)
sub tanh {
my ($x) = @_;
my $e2x = exp(2 * $x);
return ($e2x - 1) / ($e2x + 1);
}
# Softmax para array
sub softmax {
my ($array) = @_;
# Encontrar máximo para estabilidade numérica
my $max = $array->[0];
foreach my $val (@$array) {
$max = $val if $val > $max;
}
# Calcular exponenciais
my @exp_vals;
lib/AI/ActivationFunctions/PDL.pm view on Meta::CPAN
push @exp_vals, $exp_val;
$sum += $exp_val;
}
# Normalizar
return [map { $_ / $sum } @exp_vals];
}
# ELU (Exponential Linear Unit)
sub elu {
my ($x, $alpha) = @_;
$alpha //= 1.0;
return $x > 0 ? $x : $alpha * (exp($x) - 1);
}
# Swish (Google)
sub swish {
my ($x) = @_;
return $x * sigmoid($x);
}
# GELU (Gaussian Error Linear Unit) - usado em BERT/GPT
sub gelu {
my ($x) = @_;
return 0.5 * $x * (1 + tanh(sqrt(2/3.141592653589793) *
($x + 0.044715 * $x**3)));
}
# Derivada da ReLU (para backpropagation)
sub relu_derivative {
my ($x) = @_;
return $x > 0 ? 1 : 0;
}
# Derivada da Sigmoid (para backpropagation)
sub sigmoid_derivative {
my ($x) = @_;
my $s = sigmoid($x);
return $s * (1 - $s);
}
1;
test_minimal.pl view on Meta::CPAN
#!/usr/bin/perl
use strict;
use warnings;
# Teste MÃNIMO - sem usar o módulo ainda
print "1. Testando funções básicas...\n";
# Defina as funções localmente primeiro
sub relu {
my ($x) = @_;
return $x > 0 ? $x : 0;
}
sub tanh_simple {
my ($x) = @_;
my $e2x = exp(2 * $x);
return ($e2x - 1) / ($e2x + 1);
}
# Teste
print " relu(5) = " . relu(5) . " (esperado: 5)\n";
print " relu(-3) = " . relu(-3) . " (esperado: 0)\n";
print " tanh(0) = " . tanh_simple(0) . " (esperado: ~0)\n";
( run in 0.525 second using v1.01-cache-2.11-cpan-5f4f29bf90f )