Algorithm-ExpectationMaximization
view release on metacpan or search on metacpan
lib/Algorithm/ExpectationMaximization.pm view on Meta::CPAN
# general, the smaller the value of the MDL quality measure calculated below, the
# better the clustering of the data.
sub clustering_quality_mdl {
my $self = shift;
# Calculate the inverses of all of the covariance matrices in order to avoid
# having to calculate them repeatedly inside the inner 'foreach' loop in the
# main part of this method. Here we go:
my @covar_inverses;
foreach my $cluster_index (0..$self->{_K}-1) {
my $covar = $self->{_cluster_covariances}->[$cluster_index];
push @covar_inverses, $covar->inverse();
}
# For the clustering quality, first calculate the log-likelihood of all the
# observed data:
my $log_likelihood = 0;
foreach my $tag (@{$self->{_data_id_tags}}) {
my $likelihood_for_each_tag = 0;
foreach my $cluster_index (0..$self->{_K}-1) {
my $mean_vec = $self->{_cluster_means}->[$cluster_index];
my $covar = $self->{_cluster_covariances}->[$cluster_index];
my $data_vec = Math::GSL::Matrix->new($self->{_data_dimensions},1);
$data_vec->set_col( 0, $self->{_data}->{$tag});
my $datavec_minus_mean = $data_vec - $mean_vec;
my $exponent = undef;
if ($self->{_data_dimensions} > 1) {
$exponent = -0.5 * vector_matrix_multiply(
transpose($datavec_minus_mean),
matrix_vector_multiply($covar_inverses[$cluster_index],
$datavec_minus_mean ) );
} else {
my @var_inverse = $covar_inverses[$cluster_index]->as_list;
my $var_inverse_val = $var_inverse[0];
my @data_minus_mean = $datavec_minus_mean->as_list;
my $data_minus_mean_val = $data_minus_mean[0];
$exponent = -0.5 * ($data_minus_mean_val ** 2) * $var_inverse_val;
}
next if $covar->det() < 0;
my $coefficient = 1.0 /
( (2 * $Math::GSL::Const::M_PI)**$self->{_data_dimensions}
* sqrt($covar->det()) );
my $prob = $coefficient * exp($exponent);
$likelihood_for_each_tag +=
$prob * $self->{_class_priors}->[$cluster_index];
}
$log_likelihood += log( $likelihood_for_each_tag );
}
# Now calculate the model complexity penalty. $L is the total number of
# parameters it takes to specify a mixture of K Gaussians. If d is the
# dimensionality of the data space, the covariance matrix of each Gaussian takes
# (d**2 -d)/2 + d = d(d+1)/2 parameters since this matrix must be symmetric. And
# then you need d mean value parameters, and one prior probability parameter
# for the Gaussian. So $L = K[1 + d + d(d+1)/2] - 1 where the final '1' that
# is subtracted is to account for the normalization on the class priors.
my $L = (0.5 * $self->{_K} *
($self->{_data_dimensions}**2 + 3*$self->{_data_dimensions} + 2) ) - 1;
my $model_complexity_penalty = 0.5 * $L * log( $self->{_N} );
my $mdl_criterion = -1 * $log_likelihood + $model_complexity_penalty;
return $mdl_criterion;
}
# For our second measure of clustering quality, we use `trace( SW^-1 . SB)' where SW
# is the within-class scatter matrix, more commonly denoted S_w, and SB the
# between-class scatter matrix, more commonly denoted S_b (the underscore means
# subscript). This measure can be thought of as the normalized average distance
# between the clusters, the normalization being provided by average cluster
# covariance SW^-1. Therefore, the larger the value of this quality measure, the
# better the separation between the clusters. Since this measure has its roots in
# the Fisher linear discriminant function, we incorporate the word 'fisher' in the
# name of the quality measure. Note that this measure is good only when the clusters
# are disjoint. When the clusters exhibit significant overlap, the numbers produced
# by this quality measure tend to be generally meaningless. As an extreme case,
# let's say your data was produced by a set of Gaussians, all with the same mean
# vector, but each with a distinct covariance. For this extreme case, this measure
# will produce a value close to zero --- depending on the accuracy with which the
# means are estimated --- even when your clusterer is doing a good job of identifying
# the individual clusters.
sub clustering_quality_fisher {
my $self = shift;
my @cluster_quality_indices;
my $fisher_trace = 0;
my $S_w =
Math::GSL::Matrix->new($self->{_data_dimensions}, $self->{_data_dimensions});
$S_w->zero;
my $S_b =
Math::GSL::Matrix->new($self->{_data_dimensions}, $self->{_data_dimensions});
$S_b->zero;
my $global_mean = Math::GSL::Matrix->new($self->{_data_dimensions},1);
$global_mean->zero;
foreach my $cluster_index(0..$self->{_K}-1) {
$global_mean = $self->{_class_priors}->[$cluster_index] *
$self->{_cluster_means}->[$cluster_index];
}
foreach my $cluster_index(0..$self->{_K}-1) {
$S_w += $self->{_cluster_covariances}->[$cluster_index] *
$self->{_class_priors}->[$cluster_index];
my $class_mean_minus_global_mean = $self->{_cluster_means}->[$cluster_index]
- $global_mean;
my $outer_product = outer_product( $class_mean_minus_global_mean,
$class_mean_minus_global_mean );
$S_b += $self->{_class_priors}->[$cluster_index] * $outer_product;
}
my $fisher = matrix_multiply($S_w->inverse, $S_b);
return $fisher unless defined blessed($fisher);
return matrix_trace($fisher);
}
sub display_seeding_stats {
my $self = shift;
foreach my $cluster_index(0..$self->{_K}-1) {
print "\nSeeding for cluster $cluster_index:\n";
my $mean = $self->{_cluster_means}->[$cluster_index];
display_matrix("The mean is: ", $mean);
my $covariance = $self->{_cluster_covariances}->[$cluster_index];
display_matrix("The covariance is: ", $covariance);
}
}
sub display_fisher_quality_vs_iterations {
my $self = shift;
print "\n\nFisher Quality vs. Iterations: " .
"@{$self->{_fisher_quality_vs_iteration}}\n\n";
lib/Algorithm/ExpectationMaximization.pm view on Meta::CPAN
open FILEHANDLE, "| sort > $filename" or die "Unable to open file: $!";
foreach my $ele (@{$class_distributions[$i-1]}) {
print FILEHANDLE "$ele\n";
}
close FILEHANDLE;
}
}
sub DESTROY {
unlink "__temp_" . basename($_[0]->{_datafile});
unlink "__temp_data_" . basename($_[0]->{_datafile});
unlink "__temp2_" . basename($_[0]->{_datafile});
unlink glob "__temp1dhist*";
unlink glob "__contour*";
}
############################# Visualization Code ###############################
# The visualize_clusters() implementation displays as a plot in your terminal window
# the clusters constructed by the EM algorithm. It can show either 2D plots or
# 3D plots that you can rotate interactively for better visualization. For
# multidimensional data, as to which 2D or 3D dimensions are used for visualization
# is controlled by the mask you must supply as an argument to the method. Should it
# happen that only one on bit is specified for the mask, visualize_clusters()
# aborts.
#
# The visualization code consists of first accessing each of clusters created by the
# EM() subroutine. Note that the clusters contain only the symbolic names for the
# individual records in the source data file. We therefore next reach into the
# $self->{_data} hash and get the data coordinates associated with each symbolic
# label in a cluster. The numerical data thus generated is then written out to a
# temp file. When doing so we must remember to insert TWO BLANK LINES between the
# data blocks corresponding to the different clusters. This constraint is imposed
# on us by Gnuplot when plotting data from the same file since we want to use
# different point styles for the data points in different cluster files.
# Subsequently, we call upon the Perl interface provided by the Graphics::GnuplotIF
# module to plot the data clusters.
sub visualize_clusters {
my $self = shift;
my $v_mask;
my $pause_time;
if (@_ == 1) {
$v_mask = shift || die "visualization mask missing";
} elsif (@_ == 2) {
$v_mask = shift || die "visualization mask missing";
$pause_time = shift;
} else {
die "visualize_clusters() called with wrong args";
}
my $master_datafile = $self->{_datafile};
my @v_mask = split //, $v_mask;
my $visualization_mask_width = @v_mask;
my $original_data_mask = $self->{_mask};
my @mask = split //, $original_data_mask;
my $data_field_width = scalar grep {$_ eq '1'} @mask;
die "\n\nABORTED: The width of the visualization mask (including " .
"all its 1s and 0s) must equal the width of the original mask " .
"used for reading the data file (counting only the 1's)"
if $visualization_mask_width != $data_field_width;
my $visualization_data_field_width = scalar grep {$_ eq '1'} @v_mask;
# The following section is for the superimposed one-Mahalanobis-distance-unit
# ellipses that are shown only for 2D plots:
if ($visualization_data_field_width == 2) {
foreach my $cluster_index (0..$self->{_K}-1) {
my $contour_filename = "__contour_" . $cluster_index . ".dat";
my $mean = $self->{_cluster_means}->[$cluster_index];
my $covariance = $self->{_cluster_covariances}->[$cluster_index];
my ($mux,$muy) = $mean->as_list();
my ($varx,$sigmaxy) = $covariance->row(0)->as_list();
my ($sigmayx,$vary) = $covariance->row(1)->as_list();
die "Your covariance matrix does not look right"
unless $sigmaxy == $sigmayx;
my ($sigmax,$sigmay) = (sqrt($varx),sqrt($vary));
my $argstring = <<"END";
set contour
mux = $mux
muy = $muy
sigmax = $sigmax
sigmay = $sigmay
sigmaxy = $sigmaxy
determinant = (sigmax**2)*(sigmay**2) - sigmaxy**2
exponent(x,y) = -0.5 * (1.0 / determinant) * ( ((x-mux)**2)*sigmay**2 + ((y-muy)**2)*sigmax**2 - 2*sigmaxy*(x-mux)*(y-muy) )
f(x,y) = exp( exponent(x,y) ) - 0.2
xmax = mux + 2 * sigmax
xmin = mux - 2 * sigmax
ymax = muy + 2 * sigmay
ymin = muy - 2 * sigmay
set xrange [ xmin : xmax ]
set yrange [ ymin : ymax ]
set isosamples 200
unset surface
set cntrparam levels discrete 0
set table \"$contour_filename\"
splot f(x,y)
unset table
END
my $plot = Graphics::GnuplotIF->new();
$plot->gnuplot_cmd( $argstring );
}
}
my %visualization_data;
while ( my ($record_id, $data) = each %{$self->{_data}} ) {
my @fields = @$data;
die "\nABORTED: Visualization mask size exceeds data record size"
if $#v_mask > $#fields;
my @data_fields;
foreach my $i (0..@fields-1) {
if ($v_mask[$i] eq '0') {
next;
} elsif ($v_mask[$i] eq '1') {
push @data_fields, $fields[$i];
} else {
die "Misformed visualization mask. It can only have 1s and 0s";
}
}
$visualization_data{ $record_id } = \@data_fields;
}
my $K = scalar @{$self->{_clusters}};
my $filename = basename($master_datafile);
my $temp_file = "__temp_" . $filename;
unlink $temp_file if -e $temp_file;
lib/Algorithm/ExpectationMaximization.pm view on Meta::CPAN
sub matrix_vector_multiply {
my $matrix1 = shift;
my $matrix2 = shift;
my ($nrows1, $ncols1) = ($matrix1->rows(), $matrix1->cols());
my ($nrows2, $ncols2) = ($matrix2->rows(), $matrix2->cols());
die "matrix multiplication called with non-matching matrix arguments"
unless $ncols1 == $nrows2 && $ncols2 == 1;
if ($nrows1 == 1) {
my @row = $matrix1->row(0)->as_list;
my @col = $matrix2->col(0)->as_list;
my $result;
foreach my $j (0..$ncols1-1) {
$result += $row[$j] * $col[$j];
}
return $result;
}
my $product = Math::GSL::Matrix->new($nrows1, 1);
my $col = $matrix2->col(0);
my @product_col;
foreach my $i (0..$nrows1-1) {
my $row = $matrix1->row($i);
my $row_times_col = matrix_vector_multiply($row, $col);
push @product_col, $row_times_col;
}
$product->set_col(0, \@product_col);
return $product;
}
sub matrix_trace {
my $matrix = shift;
my ($nrows, $ncols) = ($matrix->rows(), $matrix->cols());
die "trace can only be calculated for a square matrix"
unless $ncols == $nrows;
my @elements = $matrix->as_list;
my $trace = 0;
foreach my $i (0..$nrows-1) {
$trace += $elements[$i + $i * $ncols];
}
return $trace;
}
1;
=pod
=head1 NAME
Algorithm::ExpectationMaximization -- A Perl module for clustering numerical
multi-dimensional data with the Expectation-Maximization algorithm.
=head1 SYNOPSIS
use Algorithm::ExpectationMaximization;
# First name the data file:
my $datafile = "mydatafile.csv";
# Next, set the mask to indicate which columns of the datafile to use for
# clustering and which column contains a symbolic ID for each data record. For
# example, if the symbolic name is in the first column, you want the second column
# to be ignored, and you want the next three columns to be used for 3D clustering:
my $mask = "N0111";
# Now construct an instance of the clusterer. The parameter `K' controls the
# number of clusters. Here is an example call to the constructor for instance
# creation:
my $clusterer = Algorithm::ExpectationMaximization->new(
datafile => $datafile,
mask => $mask,
K => 3,
max_em_iterations => 300,
seeding => 'random',
terminal_output => 1,
);
# Note the choice for `seeding'. The choice `random' means that the clusterer will
# randomly select `K' data points to serve as initial cluster centers. Other
# possible choices for the constructor parameter `seeding' are `kmeans' and
# `manual'. With the `kmeans' option for `seeding', the output of a K-means
# clusterer is used for the cluster seeds and the initial cluster covariances. If
# you use the `manual' option for seeding, you must also specify the data elements
# to use for seeding the clusters.
# Here is an example of a call to the constructor when we choose the `manual'
# option for seeding the clusters and for specifying the data elements for
# seeding. The data elements are specified by their tag names. In this case,
# these names are `a26', `b53', and `c49':
my $clusterer = Algorithm::ExpectationMaximization->new(
datafile => $datafile,
mask => $mask,
class_priors => [0.6, 0.2, 0.2],
K => 3,
max_em_iterations => 300,
seeding => 'manual',
seed_tags => ['a26', 'b53', 'c49'],
terminal_output => 1,
);
# This example call to the constructor also illustrates how you can inject class
# priors into the clustering process. The class priors are the prior probabilities
# of the class distributions in your dataset. As explained later, injecting class
# priors in the manner shown above makes statistical sense only for the case of
# manual seeding. When you do inject class priors, the order in which the priors
# are expressed must correspond to the manually specified seeds for the clusters.
# After the invocation of the constructor, the following calls are mandatory
# for reasons that should be obvious from the names of the methods:
$clusterer->read_data_from_file();
srand(time);
$clusterer->seed_the_clusters();
$clusterer->EM();
$clusterer->run_bayes_classifier();
my $clusters = $clusterer->return_disjoint_clusters();
# where the call to `EM()' is the invocation of the expectation-maximization
# algorithm. The call to `srand(time)' is to seed the pseudo random number
lib/Algorithm/ExpectationMaximization.pm view on Meta::CPAN
module. The basic implementation code remains unchanged.
=head1 DESCRIPTION
B<Algorithm::ExpectationMaximization> is a I<perl5> module for the
Expectation-Maximization (EM) method of clustering numerical data that lends itself
to modeling as a Gaussian mixture. Since the module is entirely in Perl (in the
sense that it is not a Perl wrapper around a C library that actually does the
clustering), the code in the module can easily be modified to experiment with several
aspects of EM.
Gaussian Mixture Modeling (GMM) is based on the assumption that the data consists of
C<K> Gaussian components, each characterized by its own mean vector and its own
covariance matrix. Obviously, given observed data for clustering, we do not know
which of the C<K> Gaussian components was responsible for any of the data elements.
GMM also associates a prior probability with each Gaussian component. In general,
these priors will also be unknown. So the problem of clustering consists of
estimating the posterior class probability at each data element and also estimating
the class priors. Once these posterior class probabilities and the priors are
estimated with EM, we can use the naive Bayes' classifier to partition the data into
disjoint clusters. Or, for "soft" clustering, we can find all the data elements that
belong to a Gaussian component on the basis of the posterior class probabilities at
the data elements exceeding a prescribed threshold.
If you do not mind the fact that it is possible for the EM algorithm to occasionally
get stuck in a local maximum and to, therefore, produce a wrong answer even when you
know the data to be perfectly multimodal Gaussian, EM is probably the most magical
approach to clustering multidimensional data. Consider the case of clustering
three-dimensional data. Each Gaussian cluster in 3D space is characterized by the
following 10 variables: the 6 unique elements of the C<3x3> covariance matrix (which
must be symmetric positive-definite), the 3 unique elements of the mean, and the
prior associated with the Gaussian. Now let's say you expect to see six Gaussians in
your data. What that means is that you would want the values for 59 variables
(remember the unit-summation constraint on the class priors which reduces the overall
number of variables by one) to be estimated by the algorithm that seeks to discover
the clusters in your data. What's amazing is that, despite the large number of
variables that must be optimized simultaneously, the EM algorithm will very likely
give you a good approximation to the right answer.
At its core, EM depends on the notion of unobserved data and the averaging of the
log-likelihood of the data actually observed over all admissible probabilities for
the unobserved data. But what is unobserved data? While in some cases where EM is
used, the unobserved data is literally the missing data, in others, it is something
that cannot be seen directly but that nonetheless is relevant to the data actually
observed. For the case of clustering multidimensional numerical data that can be
modeled as a Gaussian mixture, it turns out that the best way to think of the
unobserved data is in terms of a sequence of random variables, one for each observed
data point, whose values dictate the selection of the Gaussian for that data point.
This point is explained in great detail in my on-line tutorial at
L<https://engineering.purdue.edu/kak/Tutorials/ExpectationMaximization.pdf>.
The EM algorithm in our context reduces to an iterative invocation of the following
steps: (1) Given the current guess for the means and the covariances of the different
Gaussians in our mixture model, use Bayes' Rule to update the posterior class
probabilities at each of the data points; (2) Using the updated posterior class
probabilities, first update the class priors; (3) Using the updated class priors,
update the class means and the class covariances; and go back to Step (1). Ideally,
the iterations should terminate when the expected log-likelihood of the observed data
has reached a maximum and does not change with any further iterations. The stopping
rule used in this module is the detection of no change over three consecutive
iterations in the values calculated for the priors.
This module provides three different choices for seeding the clusters: (1) random,
(2) kmeans, and (3) manual. When random seeding is chosen, the algorithm randomly
selects C<K> data elements as cluster seeds. That is, the data vectors associated
with these seeds are treated as initial guesses for the means of the Gaussian
distributions. The covariances are then set to the values calculated from the entire
dataset with respect to the means corresponding to the seeds. With kmeans seeding, on
the other hand, the means and the covariances are set to whatever values are returned
by the kmeans algorithm. And, when seeding is set to manual, you are allowed to
choose C<K> data elements --- by specifying their tag names --- for the seeds. The
rest of the EM initialization for the manual mode is the same as for the random mode.
The algorithm allows for the initial priors to be specified for the manual mode of
seeding.
Much of code for the kmeans based seeding of EM was drawn from the
C<Algorithm::KMeans> module by me. The code from that module used here corresponds to
the case when the C<cluster_seeding> option in the C<Algorithm::KMeans> module is set
to C<smart>. The C<smart> option for KMeans consists of subjecting the data to a
principal components analysis (PCA) to discover the direction of maximum variance in
the data space. The data points are then projected on to this direction and a
histogram constructed from the projections. Centers of the C<K> largest peaks in
this smoothed histogram are used to seed the KMeans based clusterer. As you'd
expect, the output of the KMeans used to seed the EM algorithm.
This module uses two different criteria to measure the quality of the clustering
achieved. The first is the Minimum Description Length (MDL) proposed originally by
Rissanen (J. Rissanen: "Modeling by Shortest Data Description," Automatica, 1978, and
"A Universal Prior for Integers and Estimation by Minimum Description Length," Annals
of Statistics, 1983.) The MDL criterion is a difference of a log-likelihood term for
all of the observed data and a model-complexity penalty term. In general, both the
log-likelihood and the model-complexity terms increase as the number of clusters
increases. The form of the MDL criterion in this module uses for the penalty term
the Bayesian Information Criterion (BIC) of G. Schwartz, "Estimating the Dimensions
of a Model," The Annals of Statistics, 1978. In general, the smaller the value of
MDL quality measure, the better the clustering of the data.
For our second measure of clustering quality, we use `trace( SW^-1 . SB)' where SW is
the within-class scatter matrix, more commonly denoted S_w, and SB the between-class
scatter matrix, more commonly denoted S_b (the underscore means subscript). This
measure can be thought of as the normalized average distance between the clusters,
the normalization being provided by average cluster covariance SW^-1. Therefore, the
larger the value of this quality measure, the better the separation between the
clusters. Since this measure has its roots in the Fisher linear discriminant
function, we incorporate the word C<fisher> in the name of the quality measure.
I<Note that this measure is good only when the clusters are disjoint.> When the
clusters exhibit significant overlap, the numbers produced by this quality measure
tend to be generally meaningless.
=head1 METHODS
The module provides the following methods for EM based
clustering, for cluster visualization, for data
visualization, and for the generation of data for testing a
clustering algorithm:
=over
=item B<new():>
A call to C<new()> constructs a new instance of the
C<Algorithm::ExpectationMaximization> class. A typical form
of this call when you want to use random option for seeding
the algorithm is:
my $clusterer = Algorithm::ExpectationMaximization->new(
datafile => $datafile,
mask => $mask,
K => 3,
max_em_iterations => 300,
seeding => 'random',
terminal_output => 1,
);
where C<K> is the expected number of clusters and
C<max_em_iterations> the maximum number of EM iterations
that you want to allow until convergence is achieved.
Depending on your dataset and on the choice of the initial
seeds, the actual number of iterations used could be as few
as 10 and as many as reaching 300. The output produced by
the algorithm shows the actual number of iterations used to
arrive at convergence.
The data file supplied through the C<datafile> option is
expected to contain entries in the following format
c20 0 10.7087017086940 9.63528386251712 10.9512155258108 ...
c7 0 12.8025925026787 10.6126270065785 10.5228482095349 ...
b9 0 7.60118206283120 5.05889245193079 5.82841781759102 ...
....
....
where the first column contains the symbolic ID tag for each
data record and the rest of the columns the numerical
information. As to which columns are actually used for
clustering is decided by the string value of the mask. For
example, if we wanted to cluster on the basis of the entries
in just the 3rd, the 4th, and the 5th columns above, the
mask value would be C<N0111> where the character C<N>
indicates that the ID tag is in the first column, the
character C<0> that the second column is to be ignored, and
the C<1>'s that follow that the 3rd, the 4th, and the 5th
columns are to be used for clustering.
If instead of random seeding, you wish to use the kmeans
based seeding, just replace the option C<random> supplied
for C<seeding> by C<kmeans>. You can also do manual seeding
by designating a specified set of data elements to serve as
cluster seeds. The call to the constructor in this case
looks like
my $clusterer = Algorithm::ExpectationMaximization->new(
datafile => $datafile,
mask => $mask,
K => 3,
max_em_iterations => 300,
seeding => 'manual',
seed_tags => ['a26', 'b53', 'c49'],
terminal_output => 1,
);
where the option C<seed_tags> is set to an anonymous array
of symbolic names associated with the data elements.
If you know the class priors, you can supply them through an
additional option to the constructor that looks like
class_priors => [0.6, 0.2, 0.2],
for the case of C<K> equal to 3. B<In general, this would
be a useful thing to do only for the case of manual
seeding.> If you go for manual seeding, the order in which
the priors are expressed should correspond to the order of
the manually chosen tags supplied through the C<seed_tags>
option.
Note that the parameter C<terminal_output> is boolean; when
not supplied in the call to C<new()> it defaults to 0. When
set, this parameter displays useful information in the
window of the terminal screen in which you invoke the
algorithm.
=item B<read_data_from_file():>
$clusterer->read_data_from_file()
This is a required call after the constructor is invoked. As
you would expect, this call reads in the data for
clustering.
=item B<seed_the_clusters():>
$clusterer->seed_the_clusters();
This is also a required call. It processes the option you
supplied for C<seeding> in the constructor call to choose
the data elements for seeding the C<K> clusters.
=item B<EM():>
$clusterer->EM();
lib/Algorithm/ExpectationMaximization.pm view on Meta::CPAN
highly anisotropic. The EM derived clustering for this data is shown in the files
C<save_example_5_cluster_plot.png> and C<save_example_5_posterior_prob_plot.png>, the
former displaying the hard clusters obtained by using the naive Bayes' classifier and
the latter showing the soft clusters obtained through the posterior class
probabilities at the data points.
=item I<canned_example6.pl>
This example, added in Version 1.2, demonstrates the use of this module for 1-D data.
In order to visualize the clusters for the 1-D case, we show them through their
respective histograms. The datafile used in this example is C<mydatafile7.dat>. The
data consists of two overlapping Gaussians. The EM derived clustering for this data
is shown in the files C<save_example_6_cluster_plot.png> and
C<save_example_6_posterior_prob_plot.png>, the former displaying the hard clusters
obtained by using the naive Bayes' classifier and the latter showing the soft
clusters obtained through the posterior class probabilities at the data points.
=back
Going through the six examples listed above will make you familiar with how to make
the calls to the clustering and the visualization methods. The C<examples> directory
also includes several parameter files with names like
param1.txt
param2.txt
param3.txt
...
These were used to generate the synthetic data for which the results are shown in the
C<examples> directory. Just make a copy of one of these files and edit it if you
would like to generate your own multivariate data for clustering. Note that you can
generate data with any dimensionality through appropriate entries in the parameter
file.
=head1 CAVEATS
When you run the scripts in the C<examples> directory, your results will NOT always
look like what I have shown in the PNG image files in the directory. As mentioned
earlier in Description, the EM algorithm starting from randomly chosen initial
guesses for the cluster means can get stuck in a local maximum.
That raises an interesting question of how one judges the correctness of clustering
results when dealing with real experimental data. For real data, the best approach
is to try the EM algorithm multiple times with all of the seeding options included in
this module. It would be safe to say that, at least in low dimensional spaces and
with sufficient data, a majority of your runs should yield "correct" results.
Also bear in mind that a pure Perl implementation is not meant for the clustering of
very large data files. It is really designed more for researching issues related to
EM based approaches to clustering.
=head1 REQUIRED
This module requires the following three modules:
Math::Random
Graphics::GnuplotIF
Math::GSL::Matrix
the first for generating the multivariate random numbers, the second for the
visualization of the clusters, and the last for access to the Perl wrappers for the
GNU Scientific Library. The C<Matrix> module of this library is used for various
algebraic operations on the covariance matrices of the Gaussians.
=head1 EXPORT
None by design.
=head1 BUGS
Please notify the author if you encounter any bugs. When sending email, please place
the string 'Algorithm EM' in the subject line.
=head1 INSTALLATION
Download the archive from CPAN in any directory of your choice. Unpack the archive
with a command that on a Linux machine would look like:
tar zxvf Algorithm-ExpectationMaximization-1.22.tar.gz
This will create an installation directory for you whose name will be
C<Algorithm-ExpectationMaximization-1.22>. Enter this directory and execute the
following commands for a standard install of the module if you have root privileges:
perl Makefile.PL
make
make test
sudo make install
If you do not have root privileges, you can carry out a non-standard install the
module in any directory of your choice by:
perl Makefile.PL prefix=/some/other/directory/
make
make test
make install
With a non-standard install, you may also have to set your PERL5LIB environment
variable so that this module can find the required other modules. How you do that
would depend on what platform you are working on. In order to install this module in
a Linux machine on which I use tcsh for the shell, I set the PERL5LIB environment
variable by
setenv PERL5LIB /some/other/directory/lib64/perl5/:/some/other/directory/share/perl5/
If I used bash, I'd need to declare:
export PERL5LIB=/some/other/directory/lib64/perl5/:/some/other/directory/share/perl5/
=head1 ACKNOWLEDGMENTS
Version 1.2 is a result of the feedback received from Paul
May of University of Birmingham. Thanks, Paul!
=head1 AUTHOR
Avinash Kak, kak@purdue.edu
( run in 0.629 second using v1.01-cache-2.11-cpan-39bf76dae61 )