Algorithm-LibLinear
view release on metacpan or search on metacpan
lib/Algorithm/LibLinear/Model.pm view on Meta::CPAN
...
=head1 DESCRIPTION
This class represents a classifier or an estimated function generated as a return value of L<Algorithm::LibLinear>'s C<train> method.
If you have model files generated by LIBLINEAR's C<train> command or this class's C<save> method, you can C<load> them.
=head1 METHOD
Note that the constructor C<new> is B<not> a part of public API. You can get a instance via C<< Algorithm::LibLinaear->train >>. i.e., C<Algorithm::LibLinear> is a factory class.
=head2 load(filename => $path)
Class method. Loads a LIBLINEAR's model file and returns an instance of this class.
=head2 bias([$index])
Returns value of the bias term corresponding to the C<$index>-th class. In case of one-class SVM (i.e., when C<is_oneclass_model> is true,) the C<$index> is ignored.
Recall that a trained model can be represented as a function f(x) = W^t x + b, where W is a F x C matrix, b is a C-sized vector and C and F are the numbers of classes and features, respectively. This method returns b(C<$index>) in this notation.
src/liblinear/linear.cpp view on Meta::CPAN
va_start(ap,fmt);
vsprintf(buf,fmt,ap);
va_end(ap);
(*liblinear_print_string)(buf);
}
#else
static void info(const char *fmt,...) {}
#endif
class sparse_operator
{
public:
static double nrm2_sq(const feature_node *x)
{
double ret = 0;
while(x->index != -1)
{
ret += x->value*x->value;
x++;
}
return ret;
}
src/liblinear/linear.cpp view on Meta::CPAN
{
y[x->index-1] += a*x->value;
x++;
}
}
};
// L2-regularized empirical risk minimization
// min_w w^Tw/2 + \sum C_i \xi(w^Tx_i), where \xi() is the loss
class l2r_erm_fun: public function
{
public:
l2r_erm_fun(const problem *prob, const parameter *param, double *C);
~l2r_erm_fun();
double fun(double *w);
double linesearch_and_update(double *w, double *d, double *f, double *g, double alpha);
int get_nr_variable(void);
protected:
virtual double C_times_loss(int i, double wx_i) = 0;
void Xv(double *v, double *Xv);
src/liblinear/linear.cpp view on Meta::CPAN
int l=prob->l;
int w_size=get_nr_variable();
feature_node **x=prob->x;
for(i=0;i<w_size;i++)
XTv[i]=0;
for(i=0;i<l;i++)
sparse_operator::axpy(v[i], x[i], XTv);
}
class l2r_lr_fun: public l2r_erm_fun
{
public:
l2r_lr_fun(const problem *prob, const parameter *param, double *C);
~l2r_lr_fun();
void grad(double *w, double *g);
void Hv(double *s, double *Hs);
void get_diag_preconditioner(double *M);
private:
double *D;
src/liblinear/linear.cpp view on Meta::CPAN
xTs = C[i]*D[i]*xTs;
sparse_operator::axpy(xTs, xi, Hs);
}
for(i=0;i<w_size;i++)
Hs[i] = s[i] + Hs[i];
if(regularize_bias == 0)
Hs[w_size-1] -= s[w_size-1];
}
class l2r_l2_svc_fun: public l2r_erm_fun
{
public:
l2r_l2_svc_fun(const problem *prob, const parameter *param, double *C);
~l2r_l2_svc_fun();
void grad(double *w, double *g);
void Hv(double *s, double *Hs);
void get_diag_preconditioner(double *M);
protected:
void subXTv(double *v, double *XTv);
src/liblinear/linear.cpp view on Meta::CPAN
int i;
int w_size=get_nr_variable();
feature_node **x=prob->x;
for(i=0;i<w_size;i++)
XTv[i]=0;
for(i=0;i<sizeI;i++)
sparse_operator::axpy(v[i], x[I[i]], XTv);
}
class l2r_l2_svr_fun: public l2r_l2_svc_fun
{
public:
l2r_l2_svr_fun(const problem *prob, const parameter *param, double *C);
void grad(double *w, double *g);
private:
double C_times_loss(int i, double wx_i);
double p;
};
l2r_l2_svr_fun::l2r_l2_svr_fun(const problem *prob, const parameter *param, double *C):
src/liblinear/linear.cpp view on Meta::CPAN
//
// solution will be put in w
//
// See Appendix of LIBLINEAR paper, Fan et al. (2008)
#define GETI(i) ((int) prob->y[i])
// To support weights for instances, use GETI(i) (i)
class Solver_MCSVM_CS
{
public:
Solver_MCSVM_CS(const problem *prob, int nr_class, double *C, double eps=0.1, int max_iter=100000);
~Solver_MCSVM_CS();
void Solve(double *w);
private:
void solve_sub_problem(double A_i, int yi, double C_yi, int active_i, double *alpha_new);
bool be_shrunk(int i, int m, int yi, double alpha_i, double minG);
double *B, *C, *G;
int w_size, l;
int nr_class;
int max_iter;
src/liblinear/newton.h view on Meta::CPAN
#ifndef _NEWTON_H
#define _NEWTON_H
class function
{
public:
virtual double fun(double *w) = 0 ;
virtual void grad(double *w, double *g) = 0 ;
virtual void Hv(double *s, double *Hs) = 0 ;
virtual int get_nr_variable(void) = 0 ;
virtual void get_diag_preconditioner(double *M) = 0 ;
virtual ~function(void){}
// base implementation in newton.cpp
virtual double linesearch_and_update(double *w, double *s, double *f, double *g, double alpha);
};
class NEWTON
{
public:
NEWTON(const function *fun_obj, double eps = 0.1, double eps_cg = 0.5, int max_iter = 1000);
~NEWTON();
void newton(double *w);
void set_print_string(void (*i_print) (const char *buf));
private:
int pcg(double *g, double *M, double *s, double *r);
double eps;
( run in 0.404 second using v1.01-cache-2.11-cpan-64827b87656 )