AI-Calibrate
view release on metacpan or search on metacpan
- Added ./t/AI-Calibrate-KL.t using Kun Liu's dataset.
- Added ./t/AI-Calibrate-pathologies.t to test for pathological cases.
1.3 Fri Nov 4
- Removed dependency on Test::Deep, added explicit declaration of
dependency on Test::More to Makefile.PL
1.2 Thu Nov 3
- Fixed test ./t/AI-Calibrate-NB.t so that test wouldn't fail. Used to
call is_deeply, which was failing on slight differences between
floating point numbers. Now compares with a small tolerance.
1.1 Thu Feb 28 19:00:06 2008
- Added new function print_mapping
- Added new test file AI-Calibrate-NB.t which, if AI::NaiveBayes1 is
present, trains a classifier and calibrates it.
1.0 Thu Feb 05 11:37:31 2008
- First public release to CPAN.
0.01 Thu Jan 24 11:37:31 2008
- original version; created by h2xs 1.23 with options
-XA -n AI::Calibrate
{
"abstract" : "Perl module for producing probabilities from classifier scores",
"author" : [
"Tom Fawcett <tfawcett@acm.org>"
],
"dynamic_config" : 1,
"generated_by" : "ExtUtils::MakeMaker version 6.62, CPAN::Meta::Converter version 2.112150",
"license" : [
"unknown"
],
"meta-spec" : {
"url" : "http://search.cpan.org/perldoc?CPAN::Meta::Spec",
},
"name" : "AI-Calibrate",
"no_index" : {
"directory" : [
"t",
"inc"
]
},
"prereqs" : {
"build" : {
"requires" : {
"ExtUtils::MakeMaker" : 0
}
},
"configure" : {
"requires" : {
"ExtUtils::MakeMaker" : 0
}
},
"runtime" : {
"requires" : {
"Test::More" : 0
}
}
},
"release_status" : "stable",
"version" : "1.5"
}
---
abstract: 'Perl module for producing probabilities from classifier scores'
author:
- 'Tom Fawcett <tfawcett@acm.org>'
build_requires:
ExtUtils::MakeMaker: 0
configure_requires:
ExtUtils::MakeMaker: 0
dynamic_config: 1
generated_by: 'ExtUtils::MakeMaker version 6.62, CPAN::Meta::Converter version 2.112150'
license: unknown
meta-spec:
url: http://module-build.sourceforge.net/META-spec-v1.4.html
version: 1.4
name: AI-Calibrate
no_index:
directory:
- t
- inc
requires:
Test::More: 0
version: 1.5
-*- Mode: Text -*-
AI-Calibrate version 1.0
=========================
AI::Calibrate - Perl module for producing probabilities from classifier scores
In AI, classifiers usually return some sort of an instance score with their
classifications. These scores can be used as probabilities in various
calculations, but first they need to be calibrated. Naive Bayes, for example,
is a very useful classifier, but the scores it produces are usually "bunched"
around 0 and 1, making these scores poor probability estimates. Support
vector machines have a similar problem. Both classifier types should be
calibrated before their scores are used as probability estimates. This module
calibrates a classifier using the Pool Adjacent Violators algorithm.
INSTALLATION
To install this module type the following:
perl Makefile.PL
make
make test
make install
lib/AI/Calibrate.pm view on Meta::CPAN
our @EXPORT = qw( );
use constant DEBUG => 0;
# Structure slot names
use constant SCORE => 0;
use constant PROB => 1;
=head1 NAME
AI::Calibrate - Perl module for producing probabilities from classifier scores
=head1 SYNOPSIS
use AI::Calibrate ':all';
... train a classifier ...
... test classifier on $points ...
$calibrated = calibrate($points);
=head1 DESCRIPTION
Classifiers usually return some sort of an instance score with their
classifications. These scores can be used as probabilities in various
calculations, but first they need to be I<calibrated>. Naive Bayes, for
example, is a very useful classifier, but the scores it produces are usually
"bunched" around 0 and 1, making these scores poor probability estimates.
Support vector machines have a similar problem. Both classifier types should
be calibrated before their scores are used as probability estimates.
This module calibrates classifier scores using a method called the Pool
Adjacent Violators (PAV) algorithm. After you train a classifier, you take a
(usually separate) set of test instances and run them through the classifier,
collecting the scores assigned to each. You then supply this set of instances
to the calibrate function defined here, and it will return a set of ranges
mapping from a score range to a probability estimate.
For example, assume you have the following set of instance results from your
classifier. Each result is of the form C<[ASSIGNED_SCORE, TRUE_CLASS]>:
my $points = [
[.9, 1],
[.8, 1],
[.7, 0],
[.6, 1],
[.55, 1],
[.5, 1],
[.45, 0],
[.4, 1],
lib/AI/Calibrate.pm view on Meta::CPAN
(positive class).
$sorted is boolean (0 by default) indicating whether the data are already
sorted by score. Unless this is set to 1, calibrate() will sort the data
itself.
Calibrate returns a reference to an ordered list of references:
[ [score, prob], [score, prob], [score, prob] ... ]
Scores will be in descending numerical order. See the DESCRIPTION section for
how this structure is interpreted. You can pass this structure to the
B<score_prob> function, along with a new score, to get a probability.
=cut
sub calibrate {
my($data, $sorted) = @_;
if (DEBUG) {
print "Original data:\n";
lib/AI/Calibrate.pm view on Meta::CPAN
$new_data = [ sort { $b->[SCORE] <=> $a->[SCORE] } @$new_data ];
}
PAV($new_data);
if (DEBUG) {
print("After PAV, vector is:\n");
print_vector($new_data);
}
my(@result);
my( $last_prob, $last_score);
push(@$new_data, [-1e10, 0]);
for my $pair (@$new_data) {
print "Seeing @$pair\n" if DEBUG;
my($score, $prob) = @$pair;
if (defined($last_prob) and $prob < $last_prob) {
print("Pushing [$last_score, $last_prob]\n") if DEBUG;
push(@result, [$last_score, $last_prob] );
}
$last_prob = $prob;
$last_score = $score;
}
return \@result;
}
sub PAV {
my ( $result ) = @_;
for ( my $i = 0; $i < @$result - 1; $i++ ) {
if ( $result->[$i][PROB] < $result->[ $i + 1 ][PROB] ) {
$result->[$i][PROB] =
( $result->[$i][PROB] + $result->[ $i + 1 ][PROB] ) / 2;
$result->[ $i + 1 ][PROB] = $result->[$i][PROB];
print "Averaging elements $i and ", $i + 1, "\n" if DEBUG;
for ( my $j = $i - 1; $j >= 0; $j-- ) {
if ( $result->[$j][PROB] < $result->[ $i + 1 ][PROB] ) {
my $d = ( $i + 1 ) - $j + 1;
flatten( $result, $j, $d );
}
else {
last;
}
}
}
}
}
sub print_vector {
lib/AI/Calibrate.pm view on Meta::CPAN
print_mapping($calibrated);
Sample output:
1.00 > SCORE >= 1.00 prob = 1.000
1.00 > SCORE >= 0.71 prob = 0.667
0.71 > SCORE >= 0.39 prob = 0.000
0.39 > SCORE >= 0.00 prob = 0.000
These ranges are not necessarily compressed/optimized, as this sample output
shows.
=back
=cut
sub print_mapping {
my($calibrated) = @_;
my $last_bound = 1.0;
for my $tuple (@$calibrated) {
my($bound, $prob) = @$tuple;
lib/AI/Calibrate.pm view on Meta::CPAN
}
if ($last_bound != 0) {
printf("%0.3f > SCORE >= %0.3f prob = %0.3f\n",
$last_bound, 0, 0);
}
}
=head1 DETAILS
The PAV algorithm is conceptually straightforward. Given a set of training
cases ordered by the scores assigned by the classifier, it first assigns a
probability of one to each positive instance and a probability of zero to each
negative instance, and puts each instance in its own group. It then looks, at
each iteration, for adjacent violators: adjacent groups whose probabilities
locally increase rather than decrease. When it finds such groups, it pools
them and replaces their probability estimates with the average of the group's
values. It continues this process of averaging and replacement until the
entire sequence is monotonically decreasing. The result is a sequence of
instances, each of which has a score and an associated probability estimate,
which can then be used to map scores into probability estimates.
For further information on the PAV algorithm, you can read the section in my
paper referenced below.
=head1 EXPORT
This module exports three functions: calibrate, score_prob and print_mapping.
=head1 BUGS
lib/AI/Calibrate.pm view on Meta::CPAN
The AI::NaiveBayes1 perl module.
My paper "PAV and the ROC Convex Hull" has a good discussion of the PAV
algorithm, including examples:
L<http://home.comcast.net/~tom.fawcett/public_html/papers/PAV-ROCCH-dist.pdf>
If you want to read more about the general issue of classifier calibration,
here are some good papers, which are freely available on the web:
I<"Transforming classifier scores into accurate multiclass probability estimates">
by Bianca Zadrozny and Charles Elkan
I<"Predicting Good Probabilities With Supervised Learning">
by A. Niculescu-Mizil and R. Caruana
=head1 AUTHOR
Tom Fawcett, E<lt>tom.fawcett@gmail.comE<gt>
t/AI-Calibrate-1.t view on Meta::CPAN
my($array) = shift;
my($i);
for ($i = @$array ; --$i; ) {
my $j = int rand ($i+1);
next if $i == $j;
@$array[$i,$j] = @$array[$j,$i]
}
}
# These points are from the ROCCH-PAV paper, Table 1
# Format of each point is [Threshold, Class].
my $points = [
[.9, 1],
[.8, 1],
[.7, 0],
[.6, 1],
[.55, 1],
[.5, 1],
[.45, 0],
[.4, 1],
[.35, 1],
t/AI-Calibrate-1.t view on Meta::CPAN
# Shuffle the arrays a bit and try calibrating again
for (1 .. 10) {
shuffle_array($points);
my $calibrated_got = calibrate($points, 0);
ok(deeply_approx($calibrated_got, $calibrated_expected),
"unsorted cal $_");
}
# Tweak the thresholds
for (1 .. 10) {
my $delta = rand;
my @delta_points;
for my $point (@$points) {
my($thresh, $class) = @$point;
push(@delta_points, [ $thresh+$delta, $class]);
}
my @delta_expected;
for my $point (@$calibrated_expected) {
my($thresh, $class) = @$point;
push(@delta_expected, [ $thresh+$delta, $class]);
}
my $delta_got = calibrate(\@delta_points, 0);
ok(deeply_approx($delta_got, \@delta_expected), "unsorted cal $_");
}
my @test_estimates =
( [100, 1],
[.9, 1 ],
[.8, 1],
[.7, 3/4 ],
t/AI-Calibrate-KL.t view on Meta::CPAN
BEGIN { use_ok('AI::Calibrate', ':all') };
sub trim($) {
my $string = shift;
$string =~ s/^\s+//;
$string =~ s/\s+$//;
return $string;
}
# These points are from Kun Liu
# Format of each point is [Threshold, Class].
my $points = [
[0.999, 1],
[0.998, 1],
[0.742, 0],
[0.737, 1],
[0.685, 1],
[0.636, 1],
[0.613, 1],
[0.598, 1],
[0.559, 1],
t/AI-Calibrate-NB.t view on Meta::CPAN
# -*- Mode: CPerl -*-
use English;
use strict;
# Before `make install' is performed this script should be runnable with
# `make test'. After `make install' it should work as `perl AI-Calibrate.t'
use Test::More;
eval("use AI::NaiveBayes1");
if ($EVAL_ERROR) {
plan skip_all => 'AI::NaiveBayes1 does not seem to be present';
} else {
plan tests => 2;
}
use_ok('AI::Calibrate', ':all');
my @instances =
( [ { outlook=>'sunny',temperature=>85,humidity=>85,windy=>'FALSE'},
'no'],
[ {outlook=>'sunny',temperature=>80,humidity=>90,windy=>'TRUE'},
t/AI-Calibrate-NB.t view on Meta::CPAN
diag(sprintf( "Got: %f\n", $got->[$i]));
diag(sprintf( "Expected: %f\n", $expected->[$i]));
return 0;
}
}
}
return 1;
}
ok(lists_close_enough($calibrated, \@expected),
'Calibration of NB1 results');
( run in 1.489 second using v1.01-cache-2.11-cpan-49f99fa48dc )