Algorithm-LinearManifoldDataClusterer
view release on metacpan or search on metacpan
lib/Algorithm/LinearManifoldDataClusterer.pm view on Meta::CPAN
}
push @trailing_eigenvec_matrices_for_all_subspaces,$trailing_eigenvec_matrix;
push @reference_vecs_for_all_subspaces, $mean;
}
my %best_subspace_based_partition_of_data;
foreach my $i (0..$self->{_KM}-1) {
$best_subspace_based_partition_of_data{$i} = [];
}
foreach my $data_tag (@{$self->{_data_tags}}) {
my $data_vec = Math::GSL::Matrix->new($self->{_data_dimensions},1);
$data_vec->set_col(0, $self->{_data_hash}->{$data_tag});
my @errors = map {reconstruction_error($data_vec,
$trailing_eigenvec_matrices_for_all_subspaces[$_],
$reference_vecs_for_all_subspaces[$_])}
0 .. $self->{_KM}-1;
my ($minval, $index_for_closest_subspace) = minimum(\@errors);
$total_reconstruction_error_this_iteration += $minval;
push @{$best_subspace_based_partition_of_data{$index_for_closest_subspace}},
$data_tag;
}
print "empty-cluster jag: total reconstruction error in this iteration: \n" .
"$total_reconstruction_error_this_iteration\n"
if $self->{_debug};
foreach my $i (0..$self->{_KM}-1) {
$clusters[$i] = $best_subspace_based_partition_of_data{$i};
}
display_clusters(\@newclusters) if $self->{_terminal_output};
@clusters = grep {@$_ != 0} @newclusters;
die "linear manifold based algorithm does not appear to work in this case $!"
unless @clusters == $self->{_KM};
}# end of foreach my $cluster (@clusters) ... loop followed by if clause for empty clusters
if ($self->{_data_dimensions} == 3) {
$visualization_msg = "clustering_at_iteration_$iteration_index";
$self->visualize_clusters_on_sphere($visualization_msg, \@clusters)
if $self->{_visualize_each_iteration};
$self->visualize_clusters_on_sphere($visualization_msg, \@clusters, "png")
if $self->{_make_png_for_each_iteration};
}
my @cluster_unimodality_quotients = map {$self->cluster_unimodality_quotient($clusters[$_],
$reference_vecs_for_all_subspaces[$_])} 0..@clusters-1;
my $min_value_for_unimodality_quotient = List::Util::min @cluster_unimodality_quotients;
print "\nCluster unimodality quotients: @cluster_unimodality_quotients\n" if $self->{_terminal_output};
die "\n\nBailing out!\n" .
"It does not look like these iterations will lead to a good clustering result.\n" .
"Program terminating. Try running again.\n"
if defined($previous_min_value_for_unimodality_quotient)
&& ($min_value_for_unimodality_quotient < 0.4)
&& ($min_value_for_unimodality_quotient < (0.5 * $previous_min_value_for_unimodality_quotient));
if ( $min_value_for_unimodality_quotient < 0.5 ) {
$unimodal_correction_flag = 1;
print "\nApplying unimodality correction:\n\n" if $self->{_terminal_output};
my @sorted_cluster_indexes =
sort {$cluster_unimodality_quotients[$b] <=> $cluster_unimodality_quotients[$a]} 0..@clusters-1;
my @newclusters;
foreach my $cluster_index (0..@clusters - 1) {
push @newclusters, $clusters[$sorted_cluster_indexes[$cluster_index]];
}
@clusters = @newclusters;
my $worst_cluster = pop @clusters;
print "\nthe worst cluster: @$worst_cluster\n" if $self->{_terminal_output};
my $second_worst_cluster = pop @clusters;
print "\nthe second worst cluster: @$second_worst_cluster\n" if $self->{_terminal_output};
push @$worst_cluster, @$second_worst_cluster;
fisher_yates_shuffle($worst_cluster);
my @first_half = @$worst_cluster[0 .. int(scalar(@$worst_cluster)/2) - 1];
my @second_half = @$worst_cluster[int(scalar(@$worst_cluster)/2) .. @$worst_cluster - 1];
push @clusters, \@first_half;
push @clusters, \@second_half;
if ($self->{_terminal_output}) {
print "\n\nShowing the clusters obtained after applying the unimodality correction:\n";
display_clusters(\@clusters);
}
}
if (@{$self->{_reconstruction_error_as_a_function_of_iteration}} > 0) {
my $last_recon_error = pop @{$self->{_reconstruction_error_as_a_function_of_iteration}};
push @{$self->{_reconstruction_error_as_a_function_of_iteration}}, $last_recon_error;
if (($last_recon_error - $total_reconstruction_error_this_iteration)
< $self->{_delta_normalized_error}) {
push @{$self->{_reconstruction_error_as_a_function_of_iteration}},
$total_reconstruction_error_this_iteration;
last;
}
}
push @{$self->{_reconstruction_error_as_a_function_of_iteration}},
$total_reconstruction_error_this_iteration;
$iteration_index++;
$previous_min_value_for_unimodality_quotient = $min_value_for_unimodality_quotient;
} # end of while loop on iteration_index
$self->{_num_iterations_actually_used} =
scalar @{$self->{_reconstruction_error_as_a_function_of_iteration}};
if ($self->{_terminal_output}) {
print "\nIterations of the main loop terminated at iteration number $iteration_index.\n";
print "Will now invoke graph partitioning to discover dominant clusters and to\n" .
"merge small clusters.\n\n" if $self->{_cluster_search_multiplier} > 1;
print "Total reconstruction error as a function of iterations: " .
"@{$self->{_reconstruction_error_as_a_function_of_iteration}}";
}
# now merge sub-clusters if cluster_search_multiplier > 1
my @final_clusters;
if ($self->{_cluster_search_multiplier} > 1) {
print "\n\nInvoking recursive graph partitioning to merge small clusters\n\n";
my @array_of_partitioned_cluster_groups = (\@clusters);
my @partitioned_cluster_groups;
my $how_many_clusters_looking_for = $self->{_K};
while (scalar(@final_clusters) < $self->{_K}) {
@partitioned_cluster_groups =
$self->graph_partition(shift @array_of_partitioned_cluster_groups,
$how_many_clusters_looking_for );
if (@{$partitioned_cluster_groups[0]} == 1) {
my $singular_cluster = shift @{$partitioned_cluster_groups[0]};
push @final_clusters, $singular_cluster;
$how_many_clusters_looking_for--;
push @array_of_partitioned_cluster_groups, $partitioned_cluster_groups[1];
} elsif (@{$partitioned_cluster_groups[1]} == 1) {
my $singular_cluster = shift @{$partitioned_cluster_groups[1]};
push @final_clusters, $singular_cluster;
$how_many_clusters_looking_for--;
push @array_of_partitioned_cluster_groups, $partitioned_cluster_groups[0];
} else {
push @array_of_partitioned_cluster_groups, $partitioned_cluster_groups[0];
push @array_of_partitioned_cluster_groups, $partitioned_cluster_groups[1];
}
}
my @data_clustered;
foreach my $cluster (@final_clusters) {
push @data_clustered, @$cluster;
}
unless (scalar(@data_clustered) == scalar(@{$self->{_data_tags}})) {
lib/Algorithm/LinearManifoldDataClusterer.pm view on Meta::CPAN
} else {
push @negative_clusters, $clusters->[$i];
}
}
return (\@positive_clusters, \@negative_clusters);
}
}
sub pairwise_cluster_similarity {
my $self = shift;
my $cluster1 = shift;
my $trailing_eigenvec_matrix_cluster1 = shift;
my $reference_vec_cluster1 = shift;
my $cluster2 = shift;
my $trailing_eigenvec_matrix_cluster2 = shift;
my $reference_vec_cluster2 = shift;
my $total_reconstruction_error_in_this_iteration = 0;
my @errors_for_1_on_2 = map {my $data_vec = Math::GSL::Matrix->new($self->{_data_dimensions},1);
$data_vec->set_col(0, $self->{_data_hash}->{$_});
$self->reconstruction_error($data_vec,
$trailing_eigenvec_matrix_cluster2,
$reference_vec_cluster2)}
@$cluster1;
my @errors_for_2_on_1 = map {my $data_vec = Math::GSL::Matrix->new($self->{_data_dimensions},1);
$data_vec->set_col(0, $self->{_data_hash}->{$_});
$self->reconstruction_error($data_vec,
$trailing_eigenvec_matrix_cluster1,
$reference_vec_cluster1)}
@$cluster2;
my $type_1_error = reduce {abs($a) + abs($b)} @errors_for_1_on_2;
my $type_2_error = reduce {abs($a) + abs($b)} @errors_for_2_on_1;
my $total_reconstruction_error = $type_1_error + $type_2_error;
my $diff_between_the_means = $reference_vec_cluster1 - $reference_vec_cluster2;
my $dist_squared = transpose($diff_between_the_means) * $diff_between_the_means;
my @dist_squared_as_list = $dist_squared->as_list();
my $dist_between_means_based_error = shift @dist_squared_as_list;
return ($total_reconstruction_error, $dist_between_means_based_error);
}
# delta ball
sub cluster_unimodality_quotient {
my $self = shift;
my $cluster = shift;
my $mean = shift;
my $delta = 0.4 * $self->{_scale_factor}; # Radius of the delta ball along each dimension
my @mean = $mean->as_list;
my @data_tags_for_range_tests;
foreach my $dimen (0..$self->{_data_dimensions}-1) {
my @values = map {$_->[$dimen]} map {$self->{_data_hash}->{$_}} @$cluster;
my ($min, $max) = (List::Util::min(@values), List::Util::max(@values));
my $range = $max - $min;
my $mean_along_this_dimen = $mean[$dimen];
my @tags = grep {$_}
map { ( ($self->{_data_hash}->{$_}->[$dimen] > $mean_along_this_dimen - $delta * $range)
&&
($self->{_data_hash}->{$_}->[$dimen] < $mean_along_this_dimen + $delta * $range) )
? $_ : undef }
@$cluster;
push @data_tags_for_range_tests, \@tags;
}
# Now find the intersection of the tag sets for each of the dimensions
my %intersection_hash;
foreach my $dimen (0..$self->{_data_dimensions}-1) {
my %tag_hash_for_this_dimen = map {$_ => 1} @{$data_tags_for_range_tests[$dimen]};
if ($dimen == 0) {
%intersection_hash = %tag_hash_for_this_dimen;
} else {
%intersection_hash = map {$_ => 1} grep {$tag_hash_for_this_dimen{$_}}
keys %intersection_hash;
}
}
my @intersection_set = keys %intersection_hash;
my $cluster_unimodality_index = scalar(@intersection_set) / scalar(@$cluster);
return $cluster_unimodality_index;
}
sub find_best_ref_vector {
my $self = shift;
my $cluster = shift;
my $trailing_eigenvec_matrix = shift;
my $mean = shift; # a GSL marix ref
my @min_bounds;
my @max_bounds;
my @ranges;
foreach my $dimen (0..$self->{_data_dimensions}-1) {
my @values = map {$_->[$dimen]} map {$self->{_data_hash}->{$_}} @$cluster;
my ($min, $max) = (List::Util::min(@values), List::Util::max(@values));
push @min_bounds, $min;
push @max_bounds, $max;
push @ranges, $max - $min;
}
print "min bounds are: @min_bounds\n";
print "max bounds are: @max_bounds\n";
my $max_iterations = 100;
my @random_points;
my $iteration = 0;
while ($iteration++ < $max_iterations) {
my @coordinate_vec;
foreach my $dimen (0..$self->{_data_dimensions}-1) {
push @coordinate_vec, $min_bounds[$dimen] + rand($ranges[$dimen]);
}
push @random_points, \@coordinate_vec;
}
if ($self->{_debug}) {
print "\nrandom points\n";
map {print "@$_\n"} @random_points;
}
my @mean = $mean->as_list;
unshift @random_points, \@mean;
my @reconstruction_errors;
foreach my $candidate_ref_vec (@random_points) {
my $ref_vec = Math::GSL::Matrix->new($self->{_data_dimensions},1);
$ref_vec->set_col(0, $candidate_ref_vec);
my $reconstruction_error_for_a_ref_vec = 0;
foreach my $data_tag (@{$self->{_data_tags}}) {
my $data_vec = Math::GSL::Matrix->new($self->{_data_dimensions},1);
$data_vec->set_col(0, $self->{_data_hash}->{$data_tag});
my $error = $self->reconstruction_error($data_vec,$trailing_eigenvec_matrix,$ref_vec);
$reconstruction_error_for_a_ref_vec += $error;
}
push @reconstruction_errors, $reconstruction_error_for_a_ref_vec;
}
my $recon_error_for_original_mean = shift @reconstruction_errors;
my $smallest_error_randomly_selected_ref_vecs = List::Util::min(@reconstruction_errors);
my $minindex = List::Util::first { $_ == $smallest_error_randomly_selected_ref_vecs }
@reconstruction_errors;
my $refvec = $random_points[$minindex];
return $refvec;
}
## The reconstruction error relates to the size of the perpendicular from a data
## point X to the hyperplane that defines a given subspace on the manifold.
sub reconstruction_error {
lib/Algorithm/LinearManifoldDataClusterer.pm view on Meta::CPAN
######################### Generating Synthetic Data for Manifold Clustering ##########################
################################## Class DataGenerator ########################################
## The embedded class defined below is for generating synthetic data for
## experimenting with linear manifold clustering when the data resides on the
## surface of a sphere. See the script generate_data_on_a_sphere.pl in the
## `examples' directory for how to specify the number of clusters and the spread of
## each cluster in the data that is generated.
package DataGenerator;
use strict;
use Carp;
sub new {
my ($class, %args) = @_;
my @params = keys %args;
croak "\nYou have used a wrong name for a keyword argument " .
"--- perhaps a misspelling\n"
if _check_for_illegal_params3(@params) == 0;
bless {
_output_file => $args{output_file}
|| croak("name for output_file required"),
_total_number_of_samples_needed => $args{total_number_of_samples_needed}
|| croak("total_number_of_samples_needed required"),
_number_of_clusters_on_sphere => $args{number_of_clusters_on_sphere} || 3,
_cluster_width => $args{cluster_width} || 0.1,
_show_hidden_in_3D_plots => $args{show_hidden_in_3D_plots} || 1,
_debug => $args{debug} || 0,
}, $class;
}
sub _check_for_illegal_params3 {
my @params = @_;
my @legal_params = qw / output_file
total_number_of_samples_needed
number_of_clusters_on_sphere
cluster_width
show_hidden_in_3D_plots
/;
my $found_match_flag;
foreach my $param (@params) {
foreach my $legal (@legal_params) {
$found_match_flag = 0;
if ($param eq $legal) {
$found_match_flag = 1;
last;
}
}
last if $found_match_flag == 0;
}
return $found_match_flag;
}
## We first generate a set of points randomly on the unit sphere --- the number of
## points being equal to the number of clusters desired. These points will serve as
## cluster means (or, as cluster centroids) subsequently when we ask
## Math::Random::random_multivariate_normal($N, @m, @covar) to return $N number of
## points on the sphere. The second argument is the cluster mean and the third
## argument the cluster covariance. For the synthetic data, we set the cluster
## covariance to a 2x2 diagonal matrix, with the (0,0) element corresponding to the
## variance along the azimuth direction and the (1,1) element corresponding to the
## variance along the elevation direction.
##
## When you generate the points in the 2D spherical coordinates of
## (azimuth,elevation), you also need `wrap-around' logic for those points yielded by
## the multivariate-normal function whose azimuth angle is outside the interval
## (0,360) and/or whose elevation angle is outside the interval (-90,90).
##
## Note that the first of the two dimensions for which the multivariate-normal
## function returns the points is for the azimuth angle and the second for the
## elevation angle.
##
## With regard to the relationship of the Cartesian coordinates to the spherical
## (azimuth, elevation) coordinates, we assume that (x,y) is the horizontal plane
## and z the vertical axis. The elevation angle theta is measure with respect to
## the XY-plane. The highest point on the sphere (the Zenith) corresponds to the
## elevation angle of +90 and the lowest points on the sphere (the Nadir)
## corresponds to the elevation angle of -90. The azimuth is measured with respect
## X-axis. The range of the azimuth is from 0 to 360 degrees. The elevation is
## measured from the XY plane and its range is (-90,90) degrees.
sub gen_data_and_write_to_csv {
my $self = shift;
my $K = $self->{_number_of_clusters_on_sphere};
# $N is the number of samples to be generated for each cluster:
my $N = int($self->{_total_number_of_samples_needed} / $K);
my $output_file = $self->{_output_file};
# Designated all of the data elements in a cluster by a letter that is followed by
# an integer that identifies a specific data element.
my @point_labels = ('a'..'z');
# Our first job is to define $K random points in the 2D space (azimuth,
# elevation) to serve as cluster centers on the surface of the sphere. This we
# do by calling a uniformly distributed 1-D random number generator, first for
# the azimuth and then for the elevation in the loop shown below:
my @cluster_centers;
my @covariances;
foreach my $i (0..$K-1) {
my $azimuth = rand(360);
my $elevation = rand(90) - 90;
my @mean = ($azimuth, $elevation);
push @cluster_centers, \@mean;
my $cluster_covariance;
# The j-th dimension is for azimuth and k-th for elevation for the directions
# to surface of the sphere:
foreach my $j (0..1) {
foreach my $k (0..1) {
$cluster_covariance->[$j]->[$k] = ($self->{_cluster_width} * 360.0) ** 2
if $j == 0 && $k == 0;
$cluster_covariance->[$j]->[$k] = ($self->{_cluster_width} * 180.0) ** 2
if $j == 1 && $k == 1;
$cluster_covariance->[$j]->[$k] = 0.0 if $j != $k;
}
}
push @covariances, $cluster_covariance;
}
if ($self->{_debug}) {
foreach my $i (0..$K-1) {
print "\n\nCluster center: @{$cluster_centers[$i]}\n";
print "\nCovariance:\n";
foreach my $j (0..1) {
foreach my $k (0..1) {
print "$covariances[$i]->[$j]->[$k] ";
}
print "\n";
}
}
}
my @data_dump;
foreach my $i (0..$K-1) {
my @m = @{shift @cluster_centers};
my @covar = @{shift @covariances};
lib/Algorithm/LinearManifoldDataClusterer.pm view on Meta::CPAN
my $mask = "N111";
# which says that the symbolic tag is in the first column and that the numerical
# data in the next three columns is to be used for clustering. If your data file
# had, say, five columns and you wanted only the last three columns to be
# clustered, the mask would become `N0111' assuming that that the symbolic tag is
# still in the first column.
# Now you must construct an instance of the clusterer through a call such as:
my $clusterer = Algorithm::LinearManifoldDataClusterer->new(
datafile => $datafile,
mask => $mask,
K => 3,
P => 2,
max_iterations => 15,
cluster_search_multiplier => 2,
delta_reconstruction_error => 0.001,
terminal_output => 1,
visualize_each_iteration => 1,
show_hidden_in_3D_plots => 1,
make_png_for_each_iteration => 1,
);
# where the parameter K specifies the number of clusters you expect to find in
# your data and the parameter P is the dimensionality of the manifold on which the
# data resides. The parameter cluster_search_multiplier is for increasing the
# odds that the random seeds chosen initially for clustering will populate all the
# clusters. Set this parameter to a low number like 2 or 3. The parameter
# max_iterations places a hard limit on the number of iterations that the
# algorithm is allowed. The actual number of iterations is controlled by the
# parameter delta_reconstruction_error. The iterations stop when the change in
# the total "reconstruction error" from one iteration to the next is smaller than
# the value specified by delta_reconstruction_error.
# Next, you must get the module to read the data for clustering:
$clusterer->get_data_from_csv();
# Finally, you invoke linear manifold clustering by:
my $clusters = $clusterer->linear_manifold_clusterer();
# The value returned by this call is a reference to an array of anonymous arrays,
# with each anonymous array holding one cluster. If you wish, you can have the
# module write the clusters to individual files by the following call:
$clusterer->write_clusters_to_files($clusters);
# If you want to see how the reconstruction error changes with the iterations, you
# can make the call:
$clusterer->display_reconstruction_errors_as_a_function_of_iterations();
# When your data is 3-dimensional and when the clusters reside on a surface that
# is more or less spherical, you can visualize the clusters by calling
$clusterer->visualize_clusters_on_sphere("final clustering", $clusters);
# where the first argument is a label to be displayed in the 3D plot and the
# second argument the value returned by calling linear_manifold_clusterer().
# SYNTHETIC DATA GENERATION:
# The module includes an embedded class, DataGenerator, for generating synthetic
# three-dimensional data that can be used to experiment with the clustering code.
# The synthetic data, written out to a CSV file, consists of Gaussian clusters on
# the surface of a sphere. You can control the number of clusters, the width of
# each cluster, and the number of samples in the clusters by giving appropriate
# values to the constructor parameters as shown below:
use strict;
use Algorithm::LinearManifoldDataClusterer;
my $output_file = "4_clusters_on_a_sphere_1000_samples.csv";
my $training_data_gen = DataGenerator->new(
output_file => $output_file,
cluster_width => 0.015,
total_number_of_samples_needed => 1000,
number_of_clusters_on_sphere => 4,
show_hidden_in_3D_plots => 0,
);
$training_data_gen->gen_data_and_write_to_csv();
$training_data_gen->visualize_data_on_sphere($output_file);
=head1 CHANGES
Version 1.01: Typos and other errors removed in the documentation. Also included in
the documentation a link to a tutorial on data processing on manifolds.
=head1 DESCRIPTION
If you are new to machine learning and data clustering on linear and nonlinear
manifolds, your first question is likely to be: What is a manifold? A manifold is a
space that is locally Euclidean. And a space is locally Euclidean if it allows for
the points in a small neighborhood to be represented by, say, the Cartesian
coordinates and if the distances between the points in the neighborhood are given by
the Euclidean metric. For an example, the set of all points on the surface of a
sphere does NOT constitute a Euclidean space. Nonetheless, if you confined your
attention to a small enough neighborhood around a point, the space would seem to be
locally Euclidean. The surface of a sphere is a 2-dimensional manifold embedded in a
3-dimensional space. A plane in a 3-dimensional space is also a 2-dimensional
manifold. You would think of the surface of a sphere as a nonlinear manifold, whereas
a plane would be a linear manifold. However, note that any nonlinear manifold is
locally a linear manifold. That is, given a sufficiently small neighborhood on a
nonlinear manifold, you can always think of it as a locally flat surface.
As to why we need machine learning and data clustering on manifolds, there exist many
important applications in which the measured data resides on a nonlinear manifold.
For example, when you record images of a human face from different angles, all the
image pixels taken together fall on a low-dimensional surface in a high-dimensional
measurement space. The same is believed to be true for the satellite images of a land
mass that are recorded with the sun at different angles with respect to the direction
of the camera.
Reducing the dimensionality of the sort of data mentioned above is critical to the
proper functioning of downstream classification algorithms, and the most popular
traditional method for dimensionality reduction is the Principal Components Analysis
(PCA) algorithm. However, using PCA is tantamount to passing a linear least-squares
hyperplane through the surface on which the data actually resides. As to why that
might be a bad thing to do, just imagine the consequences of assuming that your data
falls on a straight line when, in reality, it falls on a strongly curving arc. This
is exactly what happens with PCA --- it gives you a linear manifold approximation to
your data that may actually reside on a curved surface.
That brings us to the purpose of this module, which is to cluster data that resides
on a nonlinear manifold. Since a nonlinear manifold is locally linear, we can think
of each data cluster on a nonlinear manifold as falling on a locally linear portion
of the manifold, meaning on a hyperplane. The logic of the module is based on
finding a set of hyperplanes that best describes the data, with each hyperplane
derived from a local data cluster. This is like constructing a piecewise linear
approximation to data that falls on a curve as opposed to constructing a single
straight line approximation to all of the data. So whereas the frequently used PCA
algorithm gives you a single hyperplane approximation to all your data, what this
module returns is a set of hyperplane approximations, with each hyperplane derived by
applying the PCA algorithm locally to a data cluster.
That brings us to the problem of how to actually discover the best set of hyperplane
approximations to the data. What is probably the most popular algorithm today for
that purpose is based on the following key idea: Given a set of subspaces to which a
data element can be assigned, you assign it to that subspace for which the
B<reconstruction error> is the least. But what do we mean by a B<subspace> and what
is B<reconstruction error>?
To understand the notions of B<subspace> and B<reconstruction-error>, let's revisit
the traditional approach of dimensionality reduction by the PCA algorithm. The PCA
algorithm consists of: (1) Subtracting from each data element the global mean of the
data; (2) Calculating the covariance matrix of the data; (3) Carrying out an
eigendecomposition of the covariance matrix and ordering the eigenvectors according
to decreasing values of the corresponding eigenvalues; (4) Forming a B<subspace> by
discarding the trailing eigenvectors whose corresponding eigenvalues are relatively
small; and, finally, (5) projecting all the data elements into the subspace so
formed. The error incurred in representing a data element by its projection into the
subspace is known as the B<reconstruction error>. This error is the projection of
the data element into the space spanned by the discarded trailing eigenvectors.
I<In linear-manifold based machine learning, instead of constructing a single
subspace in the manner described above, we construct a set of subspaces, one for each
data cluster on the nonlinear manifold. After the subspaces have been constructed, a
data element is assigned to that subspace for which the reconstruction error is the
least.> On the face of it, this sounds like a chicken-and-egg sort of a problem. You
need to have already clustered the data in order to construct the subspaces at
different places on the manifold so that you can figure out which cluster to place a
data element in.
Such problems, when they do possess a solution, are best tackled through iterative
algorithms in which you start with a guess for the final solution, you rearrange the
measured data on the basis of the guess, and you then use the new arrangement of the
data to refine the guess. Subsequently, you iterate through the second and the third
steps until you do not see any discernible changes in the new arrangements of the
data. This forms the basis of the clustering algorithm that is described under
B<Phase 1> in the section that follows. This algorithm was first proposed in the
article "Dimension Reduction by Local Principal Component Analysis" by Kambhatla and
Leen that appeared in the journal Neural Computation in 1997.
Unfortunately, experiments show that the algorithm as proposed by Kambhatla and Leen
is much too sensitive to how the clusters are seeded initially. To get around this
limitation of the basic clustering-by-minimization-of-reconstruction-error, this
module implements a two phased approach. In B<Phase 1>, we introduce a multiplier
effect in our search for clusters by looking for C<M*K> clusters instead of the main
C<K> clusters. In this manner, we increase the odds that each original cluster will
be visited by one or more of the C<M*K> randomly selected seeds at the beginning,
where C<M> is the integer value given to the constructor parameter
C<cluster_search_multiplier>. Subsequently, we merge the clusters that belong
together in order to form the final C<K> clusters. That work is done in B<Phase 2>
of the algorithm.
For the cluster merging operation in Phase 2, we model the C<M*K> clusters as the
nodes of an attributed graph in which the weight given to an edge connecting a pair
of nodes is a measure of the similarity between the two clusters corresponding to the
two nodes. Subsequently, we use spectral clustering to merge the most similar nodes
in our quest to partition the data into C<K> clusters. For that purpose, we use the
Shi-Malik normalized cuts algorithm. The pairwise node similarity required by this
algorithm is measured by the C<pairwise_cluster_similarity()> method of the
C<LinearManifoldDataClusterer> class. The smaller the overall reconstruction error
when all of the data elements in one cluster are projected into the other's subspace
and vice versa, the greater the similarity between two clusters. Additionally, the
smaller the distance between the mean vectors of the clusters, the greater the
similarity between two clusters. The overall similarity between a pair of clusters
is a combination of these two similarity measures.
For additional information regarding the theoretical underpinnings of the algorithm
implemented in this module, visit
L<https://engineering.purdue.edu/kak/Tutorials/ClusteringDataOnManifolds.pdf>
=head1 SUMMARY OF THE ALGORITHM
We now present a summary of the two phases of the algorithm implemented in this
module. Note particularly the important role played by the constructor parameter
C<cluster_search_multiplier>. It is only when the integer value given to this
parameter is greater than 1 that Phase 2 of the algorithm kicks in.
=over 4
=item B<Phase 1:>
Through iterative minimization of the total reconstruction error, this phase of the
algorithm returns C<M*K> clusters where C<K> is the actual number of clusters you
expect to find in your data and where C<M> is the integer value given to the
constructor parameter C<cluster_search_multiplier>. As previously mentioned, on
account of the sensitivity of the reconstruction-error based clustering to how the
clusters are initially seeded, our goal is to look for C<M*K> clusters with the idea
of increasing the odds that each of the C<K> clusters will see at least one seed at
the beginning of the algorithm.
=over 4
=item Step 1:
Randomly choose C<M*K> data elements to serve as the seeds for that many clusters.
=item Step 2:
Construct initial C<M*K> clusters by assigning each data element to that cluster
whose seed it is closest to.
=item Step 3:
Calculate the mean and the covariance matrix for each of the C<M*K> clusters and
carry out an eigendecomposition of the covariance matrix. Order the eigenvectors in
decreasing order of the corresponding eigenvalues. The first C<P> eigenvectors
define the subspace for that cluster. Use the space spanned by the remaining
eigenvectors --- we refer to them as the trailing eigenvectors --- for calculating
the reconstruction error.
=item Step 4:
Taking into account the mean associated with each cluster, re-cluster the entire data
set on the basis of the least reconstruction error. That is, assign each data
element to that subspace for which it has the smallest reconstruction error.
Calculate the total reconstruction error associated with all the data elements. (See
the definition of the reconstruction error in the C<Description> section.)
=item Step 5:
Stop iterating if the change in the total reconstruction error from the previous
iteration to the current iteration is less than the value specified by the constructor
parameter C<delta_reconstruction_error>. Otherwise, go back to Step 3.
=back
=item B<Phase 2:>
This phase of the algorithm uses graph partitioning to merge the C<M*K> clusters back
into the C<K> clusters you expect to see in your data. Since the algorithm whose
steps are presented below is invoked recursively, let's assume that we have C<N>
clusters that need to be merged by invoking the Shi-Malik spectral clustering
algorithm as described below:
=over 4
=item Step 1:
Form a graph whose C<N> nodes represent the C<N> clusters. (For the very first
invocation of this step, we have C<N = M*K>.)
=item Step 2:
Construct an C<NxN> similarity matrix for the nodes in the graph. The C<(i,j)>-th
element of this matrix is the pairwise similarity between the clusters indexed C<i>
and C<j>. Calculate this similarity on the basis of the following two criteria: (1)
The total reconstruction error when the data elements in the cluster indexed C<j> are
projected into the subspace for the cluster indexed C<i> and vice versa. And (2) The
distance between the mean vectors associated with the two clusters.
=item Step 3:
Calculate the symmetric normalized Laplacian of the similarity matrix. We use C<A>
to denote the symmetric normalized Laplacian.
=item Step 4:
Carry out an eigendecomposition of the C<A> matrix and choose the eigenvector
corresponding to the second smallest eigenvalue for bipartitioning the graph on the
basis of the sign of the values in the eigenvector.
=item Step 5:
If the bipartition of the previous step yields one-versus-the-rest kind of a
partition, add the `one' cluster to the output list of clusters and invoke graph
partitioning recursively on the `rest' by going back to Step 1. On the other hand,
if the cardinality of both the partitions returned by Step 4 exceeds 1, invoke graph
partitioning recursively on both partitions. Stop when the list of clusters in the
output list equals C<K>.
=item Step 6:
In general, the C<K> clusters obtained by recursive graph partitioning will not cover
all of the data. So, for the final step, assign each data element not covered by the
C<K> clusters to that cluster for which its reconstruction error is the least.
=back
=back
=head1 FAIL-FIRST BIAS OF THE MODULE
As you would expect for all such iterative algorithms, the module carries no
theoretical guarantee that it will give you correct results. But what does that mean?
Suppose you create synthetic data that consists of Gaussian looking disjoint clusters
on the surface of a sphere, would the module always succeed in separating out the
clusters? The module carries no guarantees to that effect --- especially considering
that Phase 1 of the algorithm is sensitive to how the clusters are seeded at the
beginning. Although this sensitivity is mitigated by the cluster merging step when
greater-than-1 value is given to the constructor option C<cluster_search_multiplier>,
a plain vanilla implementation of the steps in Phase 1 and Phase 2 would nonetheless
carry significant risk that you'll end up with incorrect clustering results.
To further reduce this risk, the module has been programmed so that it terminates
immediately if it suspects that the cluster solution being developed is unlikely to
be fruitful. The heuristics used for such terminations are conservative --- since
the cost of termination is only that the user will have to run the code again, which
at worst only carries an element of annoyance with it. The three "Fail First"
heuristics currently programmed into the module are based on simple "unimodality
testing", testing for "congruent clusters," and testing for dominant cluster support
in the final stage of the recursive invocation of the graph partitioning step. The
unimodality testing is as elementary as it can be --- it only checks for the number
of data samples within a certain radius of the mean in relation to the total number
of data samples in the cluster.
When the program terminates under such conditions, it prints out the following message
in your terminal window:
Bailing out!
Given the very simple nature of testing that is carried for the "Fail First" bias, do
not be surprised if the results you get for your data simply look wrong. If most
runs of the module produce wrong results for your application, that means that the
module logic needs to be strengthened further. The author of this module would love
to hear from you if that is the case.
=head1 METHODS
The module provides the following methods for linear-manifold based clustering, for
lib/Algorithm/LinearManifoldDataClusterer.pm view on Meta::CPAN
=item B<new():>
my $clusterer = Algorithm::LinearManifoldDataClusterer->new(
datafile => $datafile,
mask => $mask,
K => $K,
P => $P,
cluster_search_multiplier => $C,
max_iterations => $max_iter,
delta_reconstruction_error => 0.001,
terminal_output => 1,
write_clusters_to_files => 1,
visualize_each_iteration => 1,
show_hidden_in_3D_plots => 1,
make_png_for_each_iteration => 1,
);
A call to C<new()> constructs a new instance of the
C<Algorithm::LinearManifoldDataClusterer> class.
=back
=head2 Constructor Parameters
=over 8
=item C<datafile>:
This parameter names the data file that contains the multidimensional data records
you want the module to cluster. This file must be in CSV format and each record in
the file must include a symbolic tag for the record. Here are first few rows of such
a CSV file in the C<examples> directory:
d_161,0.0739248630173239,0.231119293395665,-0.970112873251437
a_59,0.459932215884786,0.0110216469739639,0.887885623314902
a_225,0.440503220903039,-0.00543366086464691,0.897734586447273
a_203,0.441656364946433,0.0437191337788422,0.896118459046532
...
...
What you see in the first column --- C<d_161>, C<a_59>, C<a_225>, C<a_203> --- are
the symbolic tags associated with four 3-dimensional data records.
=item C<mask>:
This parameter supplies the mask to be applied to the columns of your data file. For
the data file whose first few records are shown above, the mask should be C<N111>
since the symbolic tag is in the first column of the CSV file and since, presumably,
you want to cluster the data in the next three columns.
=item C<K>:
This parameter supplies the number of clusters you are looking for.
=item C<P>:
This parameter specifies the dimensionality of the manifold on which the data resides.
=item C<cluster_search_multiplier>:
As should be clear from the C<Summary of the Algorithm> section, this parameter plays
a very important role in the successful clustering of your data. As explained in
C<Description>, the basic algorithm used for clustering in Phase 1 --- clustering by
the minimization of the reconstruction error --- is sensitive to the choice of the
cluster seeds that are selected randomly at the beginning of the algorithm. Should
it happen that the seeds miss one or more of the clusters, the clustering produced is
likely to not be correct. By giving an integer value to C<cluster_search_multiplier>
that is greater than 1, you'll increase the odds that the randomly selected seeds
will see all clusters. When you set C<cluster_search_multiplier> to C<M>, you ask
Phase 1 of the algorithm to construct C<M*K> clusters as opposed to just C<K>
clusters. Subsequently, in Phase 2, the module uses inter-cluster similarity based
graph partitioning to merge the C<M*K> clusters into C<K> clusters.
=item C<max_iterations>:
This hard limits the number of iterations in Phase 1 of the algorithm. Ordinarily,
the iterations stop automatically when the change in the total reconstruction error
from one iteration to the next is less than the value specified by the parameter
C<delta_reconstruction_error>.
=item C<delta_reconstruction_error>:
It is this parameter that determines when the iterations will actually terminate in
Phase 1 of the algorithm. When the difference in the total reconstruction error from
one iteration to the next is less than the value given to this parameter, the
iterations come to an end. B<IMPORTANT: I have noticed that the larger the number of
data samples that need to be clustered, the larger must be the value give to this
parameter. That makes intuitive sense since the total reconstruction error is the
sum of all such errors for all of the data elements.> Unfortunately, the best value
for this parameter does NOT appear to depend linearly on the total number of data
records to be clustered.
=item C<terminal_output>:
When this parameter is set, you will see in your terminal window the different
clusters as lists of the symbolic tags associated with the data records. You will
also see in your terminal window the output produced by the different steps of the
graph partitioning algorithm as smaller clusters are merged to form the final C<K>
clusters --- assuming that you set the parameter C<cluster_search_multiplier> to an
integer value that is greater than 1.
=item C<visualize_each_iteration>:
As its name implies, when this option is set to 1, you'll see 3D plots of the
clustering results for each iteration --- but only if your data is 3-dimensional.
=item C<show_hidden_in_3D_plots>:
This parameter is important for controlling the visualization of the clusters on the
surface of a sphere. If the clusters are too spread out, seeing all of the clusters
all at once can be visually confusing. When you set this parameter, the clusters on
the back side of the sphere will not be visible. Note that no matter how you set
this parameter, you can interact with the 3D plot of the data and rotate it with your
mouse pointer to see all of the data that is output by the clustering code.
=item C<make_png_for_each_iteration>:
If you set this option to 1, the module will output a Gnuplot in the form of a PNG
image for each iteration in Phase 1 of the algorithm. In Phase 2, the module will
output the clustering result produced by the graph partitioning algorithm.
=back
=over
=item B<get_data_from_csv()>:
$clusterer->get_data_from_csv();
As you can guess from its name, the method extracts the data from the CSV file named
in the constructor.
=item B<linear_manifold_clusterer()>:
$clusterer->linear_manifold_clusterer();
or
my $clusters = $clusterer->linear_manifold_clusterer();
This is the main call to the linear-manifold based clusterer. The first call works
by side-effect, meaning that you will see the clusters in your terminal window and
they would be written out to disk files (depending on the constructor options you
have set). The second call also returns the clusters as a reference to an array of
anonymous arrays, each holding the symbolic tags for a cluster.
=item B<display_reconstruction_errors_as_a_function_of_iterations()>:
$clusterer->display_reconstruction_errors_as_a_function_of_iterations();
This method would normally be called after the clustering is completed to see how the
reconstruction errors decreased with the iterations in Phase 1 of the overall
algorithm.
=item B<write_clusters_to_files()>:
$clusterer->write_clusters_to_files($clusters);
As its name implies, when you call this methods, the final clusters would be written
out to disk files. The files have names like:
cluster0.txt
cluster1.txt
cluster2.txt
...
...
Before the clusters are written to these files, the module destroys all files with
such names in the directory in which you call the module.
=item B<visualize_clusters_on_sphere()>:
$clusterer->visualize_clusters_on_sphere("final clustering", $clusters);
or
$clusterer->visualize_clusters_on_sphere("final_clustering", $clusters, "png");
If your data is 3-dimensional and it resides on the surface of a sphere (or in the
vicinity of such a surface), you may be able to use these methods for the
visualization of the clusters produced by the algorithm. The first invocation
produces a Gnuplot in a terminal window that you can rotate with your mouse pointer.
The second invocation produces a `.png' image of the plot.
=item B<auto_retry_clusterer()>:
$clusterer->auto_retry_clusterer();
or
my $clusters = $clusterer->auto_retry_clusterer();
As mentioned earlier, the module is programmed in such a way that it is more likely
to fail than to give you a wrong answer. If manually trying the clusterer repeatedly
on a data file is frustrating, you can use C<auto_retry_clusterer()> to automatically
make repeated attempts for you. See the script C<example4.pl> for how you can use
C<auto_retry_clusterer()> in your own code.
=back
=head1 GENERATING SYNTHETIC DATA FOR EXPERIMENTING WITH THE CLUSTERER
The module file also contains a class named C<DataGenerator> for generating synthetic
data for experimenting with linear-manifold based clustering. At this time, only
3-dimensional data that resides in the form of Gaussian clusters on the surface of a
sphere is generated. The generated data is placed in a CSV file. You construct an
instance of the C<DataGenerator> class by a call like:
=over 4
=item B<new():>
my $training_data_gen = DataGenerator->new(
output_file => $output_file,
cluster_width => 0.0005,
total_number_of_samples_needed => 1000,
number_of_clusters_on_sphere => 4,
show_hidden_in_3D_plots => 0,
);
=back
=head2 Parameters for the DataGenerator constructor:
=over 8
=item C<output_file>:
The numeric values are generated using a bivariate Gaussian distribution whose two
independent variables are the azimuth and the elevation angles on the surface of a
unit sphere. The mean of each cluster is chosen randomly and its covariance set in
proportion to the value supplied for the C< cluster_width> parameter.
=item C<cluster_width>:
This parameter controls the spread of each cluster on the surface of the unit sphere.
=item C<total_number_of_samples_needed>:
As its name implies, this parameter specifies the total number of data samples that
will be written out to the output file --- provided this number is divisible by the
number of clusters you asked for. If the divisibility condition is not satisfied,
the number of data samples actually written out will be the closest it can be to the
( run in 0.904 second using v1.01-cache-2.11-cpan-39bf76dae61 )