10
typedef signed char schar;
12
template <class T> inline T min(T x,T y) { return (x<y)?x:y; }
15
template <class T> inline T max(T x,T y) { return (x>y)?x:y; }
17
template <class T> inline void swap(T& x, T& y) { T t=x; x=y; y=t; }
18
template <class S, class T> inline void clone(T*& dst, S* src, int n)
21
memcpy((void *)dst,(void *)src,sizeof(T)*n);
23
inline double powi(double base, int times)
25
double tmp = base, ret = 1.0;
27
for(int t=times; t>0; t/=2)
36
#define Malloc(type,n) (type *)malloc((n)*sizeof(type))
38
void info(char *fmt,...)
50
void info(char *fmt,...) {}
57
// l is the number of total data items
58
// size is the cache size limit in bytes
63
Cache(int l,int size);
66
// request data [0,len)
67
// return some position p where [p,len) need to be filled
68
// (p >= len if nothing needs to be filled)
69
int get_data(const int index, Qfloat **data, int len);
70
void swap_index(int i, int j); // future_option
76
head_t *prev, *next; // a cicular list
78
int len; // data[0,len) is cached in this entry
83
void lru_delete(head_t *h);
84
void lru_insert(head_t *h);
87
Cache::Cache(int l_,int size_):l(l_),size(size_)
89
head = (head_t *)calloc(l,sizeof(head_t)); // initialized to 0
90
size /= sizeof(Qfloat);
91
size -= l * sizeof(head_t) / sizeof(Qfloat);
92
size = max(size, 2*l); // cache must be large enough for two columns
93
lru_head.next = lru_head.prev = &lru_head;
98
for(head_t *h = lru_head.next; h != &lru_head; h=h->next)
103
void Cache::lru_delete(head_t *h)
105
// delete from current location
106
h->prev->next = h->next;
107
h->next->prev = h->prev;
110
void Cache::lru_insert(head_t *h)
112
// insert to last position
114
h->prev = lru_head.prev;
119
int Cache::get_data(const int index, Qfloat **data, int len)
121
head_t *h = &head[index];
122
if(h->len) lru_delete(h);
123
int more = len - h->len;
130
head_t *old = lru_head.next;
138
// allocate new space
139
h->data = (Qfloat *)realloc(h->data,sizeof(Qfloat)*len);
149
void Cache::swap_index(int i, int j)
153
if(head[i].len) lru_delete(&head[i]);
154
if(head[j].len) lru_delete(&head[j]);
155
swap(head[i].data,head[j].data);
156
swap(head[i].len,head[j].len);
157
if(head[i].len) lru_insert(&head[i]);
158
if(head[j].len) lru_insert(&head[j]);
161
for(head_t *h = lru_head.next; h!=&lru_head; h=h->next)
166
swap(h->data[i],h->data[j]);
183
// the static method k_function is for doing single kernel evaluation
184
// the constructor of Kernel prepares to calculate the l*l kernel matrix
185
// the member function get_Q is for getting one column from the Q Matrix
189
virtual Qfloat *get_Q(int column, int len) const = 0;
190
virtual Qfloat *get_QD() const = 0;
191
virtual void swap_index(int i, int j) const = 0;
192
virtual ~QMatrix() {}
195
class Kernel: public QMatrix {
197
Kernel(int l, svm_node * const * x, const svm_parameter& param);
200
static double k_function(const svm_node *x, const svm_node *y,
201
const svm_parameter& param);
202
virtual Qfloat *get_Q(int column, int len) const = 0;
203
virtual Qfloat *get_QD() const = 0;
204
virtual void swap_index(int i, int j) const // no so const...
207
if(x_square) swap(x_square[i],x_square[j]);
211
double (Kernel::*kernel_function)(int i, int j) const;
218
const int kernel_type;
223
static double dot(const svm_node *px, const svm_node *py);
224
double kernel_linear(int i, int j) const
226
return dot(x[i],x[j]);
228
double kernel_poly(int i, int j) const
230
return powi(gamma*dot(x[i],x[j])+coef0,degree);
232
double kernel_rbf(int i, int j) const
234
return exp(-gamma*(x_square[i]+x_square[j]-2*dot(x[i],x[j])));
236
double kernel_sigmoid(int i, int j) const
238
return tanh(gamma*dot(x[i],x[j])+coef0);
240
double kernel_precomputed(int i, int j) const
242
return x[i][(int)(x[j][0].value)].value;
246
Kernel::Kernel(int l, svm_node * const * x_, const svm_parameter& param)
247
:kernel_type(param.kernel_type), degree(param.degree),
248
gamma(param.gamma), coef0(param.coef0)
253
kernel_function = &Kernel::kernel_linear;
256
kernel_function = &Kernel::kernel_poly;
259
kernel_function = &Kernel::kernel_rbf;
262
kernel_function = &Kernel::kernel_sigmoid;
264
kernel_function = &Kernel::kernel_precomputed;
270
if(kernel_type == RBF)
272
x_square = new double[l];
274
x_square[i] = dot(x[i],x[i]);
286
double Kernel::dot(const svm_node *px, const svm_node *py)
289
while(px->index != -1 && py->index != -1)
291
if(px->index == py->index)
293
sum += px->value * py->value;
299
if(px->index > py->index)
308
double Kernel::k_function(const svm_node *x, const svm_node *y,
309
const svm_parameter& param)
311
switch(param.kernel_type)
316
return powi(param.gamma*dot(x,y)+param.coef0,param.degree);
320
while(x->index != -1 && y->index !=-1)
322
if(x->index == y->index)
324
double d = x->value - y->value;
331
if(x->index > y->index)
333
sum += y->value * y->value;
338
sum += x->value * x->value;
344
while(x->index != -1)
346
sum += x->value * x->value;
350
while(y->index != -1)
352
sum += y->value * y->value;
356
return exp(-param.gamma*sum);
359
return tanh(param.gamma*dot(x,y)+param.coef0);
360
case PRECOMPUTED: //x: test (validation), y: SV
361
return x[(int)(y->value)].value;
363
return 0; /* Unreachable */
367
// Generalized SMO+SVMlight algorithm
370
// min 0.5(\alpha^T Q \alpha) + b^T \alpha
372
// y^T \alpha = \delta
374
// 0 <= alpha_i <= Cp for y_i = 1
375
// 0 <= alpha_i <= Cn for y_i = -1
379
// Q, b, y, Cp, Cn, and an initial feasible point \alpha
380
// l is the size of vectors and matrices
381
// eps is the stopping criterion
383
// solution will be put in \alpha, objective value will be put in obj
388
virtual ~Solver() {};
390
struct SolutionInfo {
393
double upper_bound_p;
394
double upper_bound_n;
395
double r; // for Solver_NU
398
void Solve(int l, const QMatrix& Q, const double *b_, const schar *y_,
399
double *alpha_, double Cp, double Cn, double eps,
400
SolutionInfo* si, int shrinking);
404
double *G; // gradient of objective function
405
enum { LOWER_BOUND, UPPER_BOUND, FREE };
406
char *alpha_status; // LOWER_BOUND, UPPER_BOUND, FREE
414
double *G_bar; // gradient, if we treat free variables as 0
416
bool unshrinked; // XXX
420
return (y[i] > 0)? Cp : Cn;
422
void update_alpha_status(int i)
424
if(alpha[i] >= get_C(i))
425
alpha_status[i] = UPPER_BOUND;
426
else if(alpha[i] <= 0)
427
alpha_status[i] = LOWER_BOUND;
428
else alpha_status[i] = FREE;
430
bool is_upper_bound(int i) { return alpha_status[i] == UPPER_BOUND; }
431
bool is_lower_bound(int i) { return alpha_status[i] == LOWER_BOUND; }
432
bool is_free(int i) { return alpha_status[i] == FREE; }
433
void swap_index(int i, int j);
434
void reconstruct_gradient();
435
virtual int select_working_set(int &i, int &j);
436
virtual int max_violating_pair(int &i, int &j);
437
virtual double calculate_rho();
438
virtual void do_shrinking();
441
void Solver::swap_index(int i, int j)
446
swap(alpha_status[i],alpha_status[j]);
447
swap(alpha[i],alpha[j]);
449
swap(active_set[i],active_set[j]);
450
swap(G_bar[i],G_bar[j]);
453
void Solver::reconstruct_gradient()
455
// reconstruct inactive elements of G from G_bar and free variables
457
if(active_size == l) return;
460
for(i=active_size;i<l;i++)
461
G[i] = G_bar[i] + b[i];
463
for(i=0;i<active_size;i++)
466
const Qfloat *Q_i = Q->get_Q(i,l);
467
double alpha_i = alpha[i];
468
for(int j=active_size;j<l;j++)
469
G[j] += alpha_i * Q_i[j];
473
void Solver::Solve(int l, const QMatrix& Q, const double *b_, const schar *y_,
474
double *alpha_, double Cp, double Cn, double eps,
475
SolutionInfo* si, int shrinking)
482
clone(alpha,alpha_,l);
488
// initialize alpha_status
490
alpha_status = new char[l];
492
update_alpha_status(i);
495
// initialize active set (for shrinking)
497
active_set = new int[l];
503
// initialize gradient
506
G_bar = new double[l];
514
if(!is_lower_bound(i))
516
const Qfloat *Q_i = Q.get_Q(i,l);
517
double alpha_i = alpha[i];
520
G[j] += alpha_i*Q_i[j];
521
if(is_upper_bound(i))
523
G_bar[j] += get_C(i) * Q_i[j];
530
int counter = min(l,1000)+1;
534
// show progress and do shrinking
538
counter = min(l,1000);
539
if(shrinking) do_shrinking();
540
info("."); info_flush();
544
if(select_working_set(i,j)!=0)
546
// reconstruct the whole gradient
547
reconstruct_gradient();
548
// reset active set size and check
550
info("*"); info_flush();
551
if(select_working_set(i,j)!=0)
554
counter = 1; // do shrinking next iteration
559
// update alpha[i] and alpha[j], handle bounds carefully
561
const Qfloat *Q_i = Q.get_Q(i,active_size);
562
const Qfloat *Q_j = Q.get_Q(j,active_size);
564
double C_i = get_C(i);
565
double C_j = get_C(j);
567
double old_alpha_i = alpha[i];
568
double old_alpha_j = alpha[j];
572
double quad_coef = Q_i[i]+Q_j[j]+2*Q_i[j];
575
double delta = (-G[i]-G[j])/quad_coef;
576
double diff = alpha[i] - alpha[j];
601
alpha[j] = C_i - diff;
609
alpha[i] = C_j + diff;
615
double quad_coef = Q_i[i]+Q_j[j]-2*Q_i[j];
618
double delta = (G[i]-G[j])/quad_coef;
619
double sum = alpha[i] + alpha[j];
628
alpha[j] = sum - C_i;
644
alpha[i] = sum - C_j;
659
double delta_alpha_i = alpha[i] - old_alpha_i;
660
double delta_alpha_j = alpha[j] - old_alpha_j;
662
for(int k=0;k<active_size;k++)
664
G[k] += Q_i[k]*delta_alpha_i + Q_j[k]*delta_alpha_j;
667
// update alpha_status and G_bar
670
bool ui = is_upper_bound(i);
671
bool uj = is_upper_bound(j);
672
update_alpha_status(i);
673
update_alpha_status(j);
675
if(ui != is_upper_bound(i))
680
G_bar[k] -= C_i * Q_i[k];
683
G_bar[k] += C_i * Q_i[k];
686
if(uj != is_upper_bound(j))
691
G_bar[k] -= C_j * Q_j[k];
694
G_bar[k] += C_j * Q_j[k];
701
si->rho = calculate_rho();
703
// calculate objective value
708
v += alpha[i] * (G[i] + b[i]);
713
// put back the solution
716
alpha_[active_set[i]] = alpha[i];
719
// juggle everything back
722
while(active_set[i] != i)
723
swap_index(i,active_set[i]);
724
// or Q.swap_index(i,active_set[i]);
727
si->upper_bound_p = Cp;
728
si->upper_bound_n = Cn;
730
info("\noptimization finished, #iter = %d\n",iter);
735
delete[] alpha_status;
741
// return 1 if already optimal, return 0 otherwise
742
int Solver::select_working_set(int &out_i, int &out_j)
744
// return i,j such that
745
// i: maximizes -y_i * grad(f)_i, i in I_up(\alpha)
746
// j: minimizes the decrease of obj value
747
// (if quadratic coefficeint <= 0, replace it with tau)
748
// -y_j*grad(f)_j < -y_i*grad(f)_i, j in I_low(\alpha)
754
double obj_diff_min = INF;
756
for(int t=0;t<active_size;t++)
759
if(!is_upper_bound(t))
768
if(!is_lower_bound(t))
777
const Qfloat *Q_i = NULL;
778
if(i != -1) // NULL Q_i not accessed: Gmax=-INF if i=-1
779
Q_i = Q->get_Q(i,active_size);
781
for(int j=0;j<active_size;j++)
785
if (!is_lower_bound(j))
787
double grad_diff=Gmax+G[j];
793
double quad_coef=Q_i[i]+QD[j]-2*y[i]*Q_i[j];
795
obj_diff = -(grad_diff*grad_diff)/quad_coef;
797
obj_diff = -(grad_diff*grad_diff)/TAU;
799
if (obj_diff <= obj_diff_min)
802
obj_diff_min = obj_diff;
809
if (!is_upper_bound(j))
811
double grad_diff= Gmax-G[j];
817
double quad_coef=Q_i[i]+QD[j]+2*y[i]*Q_i[j];
819
obj_diff = -(grad_diff*grad_diff)/quad_coef;
821
obj_diff = -(grad_diff*grad_diff)/TAU;
823
if (obj_diff <= obj_diff_min)
826
obj_diff_min = obj_diff;
841
// return 1 if already optimal, return 0 otherwise
842
int Solver::max_violating_pair(int &out_i, int &out_j)
844
// return i,j: maximal violating pair
846
double Gmax1 = -INF; // max { -y_i * grad(f)_i | i in I_up(\alpha) }
849
double Gmax2 = -INF; // max { y_i * grad(f)_i | i in I_low(\alpha) }
852
for(int i=0;i<active_size;i++)
854
if(y[i]==+1) // y = +1
856
if(!is_upper_bound(i)) // d = +1
864
if(!is_lower_bound(i)) // d = -1
875
if(!is_upper_bound(i)) // d = +1
883
if(!is_lower_bound(i)) // d = -1
894
if(Gmax1+Gmax2 < eps)
902
void Solver::do_shrinking()
905
if(max_violating_pair(i,j)!=0) return;
906
double Gm1 = -y[j]*G[j];
907
double Gm2 = y[i]*G[i];
911
for(k=0;k<active_size;k++)
913
if(is_lower_bound(k))
917
if(-G[k] >= Gm1) continue;
919
else if(-G[k] >= Gm2) continue;
921
else if(is_upper_bound(k))
925
if(G[k] >= Gm2) continue;
927
else if(G[k] >= Gm1) continue;
932
swap_index(k,active_size);
933
--k; // look at the newcomer
936
// unshrink, check all variables again before final iterations
938
if(unshrinked || -(Gm1 + Gm2) > eps*10) return;
941
reconstruct_gradient();
943
for(k=l-1;k>=active_size;k--)
945
if(is_lower_bound(k))
949
if(-G[k] < Gm1) continue;
951
else if(-G[k] < Gm2) continue;
953
else if(is_upper_bound(k))
957
if(G[k] < Gm2) continue;
959
else if(G[k] < Gm1) continue;
963
swap_index(k,active_size);
965
++k; // look at the newcomer
969
double Solver::calculate_rho()
973
double ub = INF, lb = -INF, sum_free = 0;
974
for(int i=0;i<active_size;i++)
976
double yG = y[i]*G[i];
978
if(is_lower_bound(i))
985
else if(is_upper_bound(i))
1000
r = sum_free/nr_free;
1008
// Solver for nu-svm classification and regression
1010
// additional constraint: e^T \alpha = constant
1012
class Solver_NU : public Solver
1016
void Solve(int l, const QMatrix& Q, const double *b, const schar *y,
1017
double *alpha, double Cp, double Cn, double eps,
1018
SolutionInfo* si, int shrinking)
1021
Solver::Solve(l,Q,b,y,alpha,Cp,Cn,eps,si,shrinking);
1025
int select_working_set(int &i, int &j);
1026
double calculate_rho();
1027
void do_shrinking();
1030
// return 1 if already optimal, return 0 otherwise
1031
int Solver_NU::select_working_set(int &out_i, int &out_j)
1033
// return i,j such that y_i = y_j and
1034
// i: maximizes -y_i * grad(f)_i, i in I_up(\alpha)
1035
// j: minimizes the decrease of obj value
1036
// (if quadratic coefficeint <= 0, replace it with tau)
1037
// -y_j*grad(f)_j < -y_i*grad(f)_i, j in I_low(\alpha)
1039
double Gmaxp = -INF;
1040
double Gmaxp2 = -INF;
1043
double Gmaxn = -INF;
1044
double Gmaxn2 = -INF;
1048
double obj_diff_min = INF;
1050
for(int t=0;t<active_size;t++)
1053
if(!is_upper_bound(t))
1062
if(!is_lower_bound(t))
1072
const Qfloat *Q_ip = NULL;
1073
const Qfloat *Q_in = NULL;
1074
if(ip != -1) // NULL Q_ip not accessed: Gmaxp=-INF if ip=-1
1075
Q_ip = Q->get_Q(ip,active_size);
1077
Q_in = Q->get_Q(in,active_size);
1079
for(int j=0;j<active_size;j++)
1083
if (!is_lower_bound(j))
1085
double grad_diff=Gmaxp+G[j];
1091
double quad_coef = Q_ip[ip]+QD[j]-2*Q_ip[j];
1093
obj_diff = -(grad_diff*grad_diff)/quad_coef;
1095
obj_diff = -(grad_diff*grad_diff)/TAU;
1097
if (obj_diff <= obj_diff_min)
1100
obj_diff_min = obj_diff;
1107
if (!is_upper_bound(j))
1109
double grad_diff=Gmaxn-G[j];
1110
if (-G[j] >= Gmaxn2)
1115
double quad_coef = Q_in[in]+QD[j]-2*Q_in[j];
1117
obj_diff = -(grad_diff*grad_diff)/quad_coef;
1119
obj_diff = -(grad_diff*grad_diff)/TAU;
1121
if (obj_diff <= obj_diff_min)
1124
obj_diff_min = obj_diff;
1131
if(max(Gmaxp+Gmaxp2,Gmaxn+Gmaxn2) < eps)
1134
if (y[Gmin_idx] == +1)
1143
void Solver_NU::do_shrinking()
1145
double Gmax1 = -INF; // max { -y_i * grad(f)_i | y_i = +1, i in I_up(\alpha) }
1146
double Gmax2 = -INF; // max { y_i * grad(f)_i | y_i = +1, i in I_low(\alpha) }
1147
double Gmax3 = -INF; // max { -y_i * grad(f)_i | y_i = -1, i in I_up(\alpha) }
1148
double Gmax4 = -INF; // max { y_i * grad(f)_i | y_i = -1, i in I_low(\alpha) }
1150
// find maximal violating pair first
1152
for(k=0;k<active_size;k++)
1154
if(!is_upper_bound(k))
1158
if(-G[k] > Gmax1) Gmax1 = -G[k];
1160
else if(-G[k] > Gmax3) Gmax3 = -G[k];
1162
if(!is_lower_bound(k))
1166
if(G[k] > Gmax2) Gmax2 = G[k];
1168
else if(G[k] > Gmax4) Gmax4 = G[k];
1174
double Gm1 = -Gmax2;
1175
double Gm2 = -Gmax1;
1176
double Gm3 = -Gmax4;
1177
double Gm4 = -Gmax3;
1179
for(k=0;k<active_size;k++)
1181
if(is_lower_bound(k))
1185
if(-G[k] >= Gm1) continue;
1187
else if(-G[k] >= Gm3) continue;
1189
else if(is_upper_bound(k))
1193
if(G[k] >= Gm2) continue;
1195
else if(G[k] >= Gm4) continue;
1200
swap_index(k,active_size);
1201
--k; // look at the newcomer
1204
// unshrink, check all variables again before final iterations
1206
if(unshrinked || max(-(Gm1+Gm2),-(Gm3+Gm4)) > eps*10) return;
1209
reconstruct_gradient();
1211
for(k=l-1;k>=active_size;k--)
1213
if(is_lower_bound(k))
1217
if(-G[k] < Gm1) continue;
1219
else if(-G[k] < Gm3) continue;
1221
else if(is_upper_bound(k))
1225
if(G[k] < Gm2) continue;
1227
else if(G[k] < Gm4) continue;
1231
swap_index(k,active_size);
1233
++k; // look at the newcomer
1237
double Solver_NU::calculate_rho()
1239
int nr_free1 = 0,nr_free2 = 0;
1240
double ub1 = INF, ub2 = INF;
1241
double lb1 = -INF, lb2 = -INF;
1242
double sum_free1 = 0, sum_free2 = 0;
1244
for(int i=0;i<active_size;i++)
1248
if(is_lower_bound(i))
1249
ub1 = min(ub1,G[i]);
1250
else if(is_upper_bound(i))
1251
lb1 = max(lb1,G[i]);
1260
if(is_lower_bound(i))
1261
ub2 = min(ub2,G[i]);
1262
else if(is_upper_bound(i))
1263
lb2 = max(lb2,G[i]);
1274
r1 = sum_free1/nr_free1;
1279
r2 = sum_free2/nr_free2;
1288
// Q matrices for various formulations
1290
class SVC_Q: public Kernel
1293
SVC_Q(const svm_problem& prob, const svm_parameter& param, const schar *y_)
1294
:Kernel(prob.l, prob.x, param)
1297
cache = new Cache(prob.l,(int)(param.cache_size*(1<<20)));
1298
QD = new Qfloat[prob.l];
1299
for(int i=0;i<prob.l;i++)
1300
QD[i]= (Qfloat)(this->*kernel_function)(i,i);
1303
Qfloat *get_Q(int i, int len) const
1307
if((start = cache->get_data(i,&data,len)) < len)
1309
for(int j=start;j<len;j++)
1310
data[j] = (Qfloat)(y[i]*y[j]*(this->*kernel_function)(i,j));
1315
Qfloat *get_QD() const
1320
void swap_index(int i, int j) const
1322
cache->swap_index(i,j);
1323
Kernel::swap_index(i,j);
1340
class ONE_CLASS_Q: public Kernel
1343
ONE_CLASS_Q(const svm_problem& prob, const svm_parameter& param)
1344
:Kernel(prob.l, prob.x, param)
1346
cache = new Cache(prob.l,(int)(param.cache_size*(1<<20)));
1347
QD = new Qfloat[prob.l];
1348
for(int i=0;i<prob.l;i++)
1349
QD[i]= (Qfloat)(this->*kernel_function)(i,i);
1352
Qfloat *get_Q(int i, int len) const
1356
if((start = cache->get_data(i,&data,len)) < len)
1358
for(int j=start;j<len;j++)
1359
data[j] = (Qfloat)(this->*kernel_function)(i,j);
1364
Qfloat *get_QD() const
1369
void swap_index(int i, int j) const
1371
cache->swap_index(i,j);
1372
Kernel::swap_index(i,j);
1386
class SVR_Q: public Kernel
1389
SVR_Q(const svm_problem& prob, const svm_parameter& param)
1390
:Kernel(prob.l, prob.x, param)
1393
cache = new Cache(l,(int)(param.cache_size*(1<<20)));
1394
QD = new Qfloat[2*l];
1395
sign = new schar[2*l];
1396
index = new int[2*l];
1397
for(int k=0;k<l;k++)
1403
QD[k]= (Qfloat)(this->*kernel_function)(k,k);
1406
buffer[0] = new Qfloat[2*l];
1407
buffer[1] = new Qfloat[2*l];
1411
void swap_index(int i, int j) const
1413
swap(sign[i],sign[j]);
1414
swap(index[i],index[j]);
1418
Qfloat *get_Q(int i, int len) const
1421
int real_i = index[i];
1422
if(cache->get_data(real_i,&data,l) < l)
1424
for(int j=0;j<l;j++)
1425
data[j] = (Qfloat)(this->*kernel_function)(real_i,j);
1429
Qfloat *buf = buffer[next_buffer];
1430
next_buffer = 1 - next_buffer;
1432
for(int j=0;j<len;j++)
1433
buf[j] = si * sign[j] * data[index[j]];
1437
Qfloat *get_QD() const
1456
mutable int next_buffer;
1462
// construct and solve various formulations
1464
static void solve_c_svc(
1465
const svm_problem *prob, const svm_parameter* param,
1466
double *alpha, Solver::SolutionInfo* si, double Cp, double Cn)
1469
double *minus_ones = new double[l];
1470
schar *y = new schar[l];
1478
if(prob->y[i] > 0) y[i] = +1; else y[i]=-1;
1482
s.Solve(l, SVC_Q(*prob,*param,y), minus_ones, y,
1483
alpha, Cp, Cn, param->eps, si, param->shrinking);
1487
sum_alpha += alpha[i];
1490
info("nu = %f\n", sum_alpha/(Cp*prob->l));
1495
delete[] minus_ones;
1499
static void solve_nu_svc(
1500
const svm_problem *prob, const svm_parameter *param,
1501
double *alpha, Solver::SolutionInfo* si)
1505
double nu = param->nu;
1507
schar *y = new schar[l];
1515
double sum_pos = nu*l/2;
1516
double sum_neg = nu*l/2;
1521
alpha[i] = min(1.0,sum_pos);
1522
sum_pos -= alpha[i];
1526
alpha[i] = min(1.0,sum_neg);
1527
sum_neg -= alpha[i];
1530
double *zeros = new double[l];
1536
s.Solve(l, SVC_Q(*prob,*param,y), zeros, y,
1537
alpha, 1.0, 1.0, param->eps, si, param->shrinking);
1540
info("C = %f\n",1/r);
1547
si->upper_bound_p = 1/r;
1548
si->upper_bound_n = 1/r;
1554
static void solve_one_class(
1555
const svm_problem *prob, const svm_parameter *param,
1556
double *alpha, Solver::SolutionInfo* si)
1559
double *zeros = new double[l];
1560
schar *ones = new schar[l];
1563
int n = (int)(param->nu*prob->l); // # of alpha's at upper bound
1568
alpha[n] = param->nu * prob->l - n;
1579
s.Solve(l, ONE_CLASS_Q(*prob,*param), zeros, ones,
1580
alpha, 1.0, 1.0, param->eps, si, param->shrinking);
1586
static void solve_epsilon_svr(
1587
const svm_problem *prob, const svm_parameter *param,
1588
double *alpha, Solver::SolutionInfo* si)
1591
double *alpha2 = new double[2*l];
1592
double *linear_term = new double[2*l];
1593
schar *y = new schar[2*l];
1599
linear_term[i] = param->p - prob->y[i];
1603
linear_term[i+l] = param->p + prob->y[i];
1608
s.Solve(2*l, SVR_Q(*prob,*param), linear_term, y,
1609
alpha2, param->C, param->C, param->eps, si, param->shrinking);
1611
double sum_alpha = 0;
1614
alpha[i] = alpha2[i] - alpha2[i+l];
1615
sum_alpha += fabs(alpha[i]);
1617
info("nu = %f\n",sum_alpha/(param->C*l));
1620
delete[] linear_term;
1624
static void solve_nu_svr(
1625
const svm_problem *prob, const svm_parameter *param,
1626
double *alpha, Solver::SolutionInfo* si)
1629
double C = param->C;
1630
double *alpha2 = new double[2*l];
1631
double *linear_term = new double[2*l];
1632
schar *y = new schar[2*l];
1635
double sum = C * param->nu * l / 2;
1638
alpha2[i] = alpha2[i+l] = min(sum,C);
1641
linear_term[i] = - prob->y[i];
1644
linear_term[i+l] = prob->y[i];
1649
s.Solve(2*l, SVR_Q(*prob,*param), linear_term, y,
1650
alpha2, C, C, param->eps, si, param->shrinking);
1652
info("epsilon = %f\n",-si->r);
1655
alpha[i] = alpha2[i] - alpha2[i+l];
1658
delete[] linear_term;
1663
// decision_function
1665
struct decision_function
1671
decision_function svm_train_one(
1672
const svm_problem *prob, const svm_parameter *param,
1673
double Cp, double Cn)
1675
double *alpha = Malloc(double,prob->l);
1676
Solver::SolutionInfo si;
1677
switch(param->svm_type)
1680
solve_c_svc(prob,param,alpha,&si,Cp,Cn);
1683
solve_nu_svc(prob,param,alpha,&si);
1686
solve_one_class(prob,param,alpha,&si);
1689
solve_epsilon_svr(prob,param,alpha,&si);
1692
solve_nu_svr(prob,param,alpha,&si);
1696
info("obj = %f, rho = %f\n",si.obj,si.rho);
1702
for(int i=0;i<prob->l;i++)
1704
if(fabs(alpha[i]) > 0)
1709
if(fabs(alpha[i]) >= si.upper_bound_p)
1714
if(fabs(alpha[i]) >= si.upper_bound_n)
1720
info("nSV = %d, nBSV = %d\n",nSV,nBSV);
1722
decision_function f;
1733
svm_parameter param; // parameter
1734
int nr_class; // number of classes, = 2 in regression/one class svm
1736
svm_node **SV; // SVs (SV[l])
1737
double **sv_coef; // coefficients for SVs in decision functions (sv_coef[n-1][l])
1738
double *rho; // constants in decision functions (rho[n*(n-1)/2])
1739
double *probA; // pariwise probability information
1742
// for classification only
1744
int *label; // label of each class (label[n])
1745
int *nSV; // number of SVs for each class (nSV[n])
1746
// nSV[0] + nSV[1] + ... + nSV[n-1] = l
1748
int free_sv; // 1 if svm_model is created by svm_load_model
1749
// 0 if svm_model is created by svm_train
1752
// Platt's binary SVM Probablistic Output: an improvement from Lin et al.
1754
int l, const double *dec_values, const double *labels,
1755
double& A, double& B)
1757
double prior1=0, prior0 = 0;
1761
if (labels[i] > 0) prior1+=1;
1764
int max_iter=100; // Maximal number of iterations
1765
double min_step=1e-10; // Minimal step taken in line search
1766
double sigma=1e-3; // For numerically strict PD of Hessian
1768
double hiTarget=(prior1+1.0)/(prior1+2.0);
1769
double loTarget=1/(prior0+2.0);
1770
double *t=Malloc(double,l);
1771
double fApB,p,q,h11,h22,h21,g1,g2,det,dA,dB,gd,stepsize;
1772
double newA,newB,newf,d1,d2;
1775
// Initial Point and Initial Fun Value
1776
A=0.0; B=log((prior0+1.0)/(prior1+1.0));
1781
if (labels[i]>0) t[i]=hiTarget;
1783
fApB = dec_values[i]*A+B;
1785
fval += t[i]*fApB + log(1+exp(-fApB));
1787
fval += (t[i] - 1)*fApB +log(1+exp(fApB));
1789
for (iter=0;iter<max_iter;iter++)
1791
// Update Gradient and Hessian (use H' = H + sigma I)
1792
h11=sigma; // numerically ensures strict PD
1794
h21=0.0;g1=0.0;g2=0.0;
1797
fApB = dec_values[i]*A+B;
1800
p=exp(-fApB)/(1.0+exp(-fApB));
1801
q=1.0/(1.0+exp(-fApB));
1805
p=1.0/(1.0+exp(fApB));
1806
q=exp(fApB)/(1.0+exp(fApB));
1809
h11+=dec_values[i]*dec_values[i]*d2;
1811
h21+=dec_values[i]*d2;
1813
g1+=dec_values[i]*d1;
1817
// Stopping Criteria
1818
if (fabs(g1)<eps && fabs(g2)<eps)
1821
// Finding Newton direction: -inv(H') * g
1822
det=h11*h22-h21*h21;
1823
dA=-(h22*g1 - h21 * g2) / det;
1824
dB=-(-h21*g1+ h11 * g2) / det;
1828
stepsize = 1; // Line Search
1829
while (stepsize >= min_step)
1831
newA = A + stepsize * dA;
1832
newB = B + stepsize * dB;
1834
// New function value
1838
fApB = dec_values[i]*newA+newB;
1840
newf += t[i]*fApB + log(1+exp(-fApB));
1842
newf += (t[i] - 1)*fApB +log(1+exp(fApB));
1844
// Check sufficient decrease
1845
if (newf<fval+0.0001*stepsize*gd)
1847
A=newA;B=newB;fval=newf;
1851
stepsize = stepsize / 2.0;
1854
if (stepsize < min_step)
1856
info("Line search fails in two-class probability estimates\n");
1862
info("Reaching maximal iterations in two-class probability estimates\n");
1866
double sigmoid_predict(double decision_value, double A, double B)
1868
double fApB = decision_value*A+B;
1870
return exp(-fApB)/(1.0+exp(-fApB));
1872
return 1.0/(1+exp(fApB)) ;
1875
// Method 2 from the multiclass_prob paper by Wu, Lin, and Weng
1876
void multiclass_probability(int k, double **r, double *p)
1879
int iter = 0, max_iter=max(100,k);
1880
double **Q=Malloc(double *,k);
1881
double *Qp=Malloc(double,k);
1882
double pQp, eps=0.005/k;
1886
p[t]=1.0/k; // Valid if k = 1
1887
Q[t]=Malloc(double,k);
1891
Q[t][t]+=r[j][t]*r[j][t];
1896
Q[t][t]+=r[j][t]*r[j][t];
1897
Q[t][j]=-r[j][t]*r[t][j];
1900
for (iter=0;iter<max_iter;iter++)
1902
// stopping condition, recalculate QP,pQP for numerical accuracy
1908
Qp[t]+=Q[t][j]*p[j];
1914
double error=fabs(Qp[t]-pQp);
1915
if (error>max_error)
1918
if (max_error<eps) break;
1922
double diff=(-Qp[t]+pQp)/Q[t][t];
1924
pQp=(pQp+diff*(diff*Q[t][t]+2*Qp[t]))/(1+diff)/(1+diff);
1927
Qp[j]=(Qp[j]+diff*Q[t][j])/(1+diff);
1933
info("Exceeds max_iter in multiclass_prob\n");
1934
for(t=0;t<k;t++) free(Q[t]);
1939
// Cross-validation decision values for probability estimates
1940
void svm_binary_svc_probability(
1941
const svm_problem *prob, const svm_parameter *param,
1942
double Cp, double Cn, double& probA, double& probB)
1946
int *perm = Malloc(int,prob->l);
1947
double *dec_values = Malloc(double,prob->l);
1950
for(i=0;i<prob->l;i++) perm[i]=i;
1951
for(i=0;i<prob->l;i++)
1953
int j = i+rand()%(prob->l-i);
1954
swap(perm[i],perm[j]);
1956
for(i=0;i<nr_fold;i++)
1958
int begin = i*prob->l/nr_fold;
1959
int end = (i+1)*prob->l/nr_fold;
1961
struct svm_problem subprob;
1963
subprob.l = prob->l-(end-begin);
1964
subprob.x = Malloc(struct svm_node*,subprob.l);
1965
subprob.y = Malloc(double,subprob.l);
1968
for(j=0;j<begin;j++)
1970
subprob.x[k] = prob->x[perm[j]];
1971
subprob.y[k] = prob->y[perm[j]];
1974
for(j=end;j<prob->l;j++)
1976
subprob.x[k] = prob->x[perm[j]];
1977
subprob.y[k] = prob->y[perm[j]];
1980
int p_count=0,n_count=0;
1987
if(p_count==0 && n_count==0)
1988
for(j=begin;j<end;j++)
1989
dec_values[perm[j]] = 0;
1990
else if(p_count > 0 && n_count == 0)
1991
for(j=begin;j<end;j++)
1992
dec_values[perm[j]] = 1;
1993
else if(p_count == 0 && n_count > 0)
1994
for(j=begin;j<end;j++)
1995
dec_values[perm[j]] = -1;
1998
svm_parameter subparam = *param;
1999
subparam.probability=0;
2001
subparam.nr_weight=2;
2002
subparam.weight_label = Malloc(int,2);
2003
subparam.weight = Malloc(double,2);
2004
subparam.weight_label[0]=+1;
2005
subparam.weight_label[1]=-1;
2006
subparam.weight[0]=Cp;
2007
subparam.weight[1]=Cn;
2008
struct svm_model *submodel = svm_train(&subprob,&subparam);
2009
for(j=begin;j<end;j++)
2011
svm_predict_values(submodel,prob->x[perm[j]],&(dec_values[perm[j]]));
2012
// ensure +1 -1 order; reason not using CV subroutine
2013
dec_values[perm[j]] *= submodel->label[0];
2015
svm_destroy_model(submodel);
2016
svm_destroy_param(&subparam);
2021
sigmoid_train(prob->l,dec_values,prob->y,probA,probB);
2026
// Return parameter of a Laplace distribution
2027
double svm_svr_probability(
2028
const svm_problem *prob, const svm_parameter *param)
2032
double *ymv = Malloc(double,prob->l);
2035
svm_parameter newparam = *param;
2036
newparam.probability = 0;
2037
svm_cross_validation(prob,&newparam,nr_fold,ymv);
2038
for(i=0;i<prob->l;i++)
2040
ymv[i]=prob->y[i]-ymv[i];
2041
mae += fabs(ymv[i]);
2044
double std=sqrt(2*mae*mae);
2047
for(i=0;i<prob->l;i++)
2048
if (fabs(ymv[i]) > 5*std)
2052
mae /= (prob->l-count);
2053
info("Prob. model for test data: target value = predicted value + z,\nz: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma= %g\n",mae);
2059
// label: label name, start: begin of each class, count: #data of classes, perm: indices to the original data
2060
// perm, length l, must be allocated before calling this subroutine
2061
void svm_group_classes(const svm_problem *prob, int *nr_class_ret, int **label_ret, int **start_ret, int **count_ret, int *perm)
2064
int max_nr_class = 16;
2066
int *label = Malloc(int,max_nr_class);
2067
int *count = Malloc(int,max_nr_class);
2068
int *data_label = Malloc(int,l);
2073
int this_label = (int)prob->y[i];
2075
for(j=0;j<nr_class;j++)
2077
if(this_label == label[j])
2086
if(nr_class == max_nr_class)
2089
label = (int *)realloc(label,max_nr_class*sizeof(int));
2090
count = (int *)realloc(count,max_nr_class*sizeof(int));
2092
label[nr_class] = this_label;
2093
count[nr_class] = 1;
2098
int *start = Malloc(int,nr_class);
2100
for(i=1;i<nr_class;i++)
2101
start[i] = start[i-1]+count[i-1];
2104
perm[start[data_label[i]]] = i;
2105
++start[data_label[i]];
2108
for(i=1;i<nr_class;i++)
2109
start[i] = start[i-1]+count[i-1];
2111
*nr_class_ret = nr_class;
2119
// Interface functions
2121
svm_model *svm_train(const svm_problem *prob, const svm_parameter *param)
2123
svm_model *model = Malloc(svm_model,1);
2124
model->param = *param;
2125
model->free_sv = 0; // XXX
2127
if(param->svm_type == ONE_CLASS ||
2128
param->svm_type == EPSILON_SVR ||
2129
param->svm_type == NU_SVR)
2131
// regression or one-class-svm
2132
model->nr_class = 2;
2133
model->label = NULL;
2135
model->probA = NULL; model->probB = NULL;
2136
model->sv_coef = Malloc(double *,1);
2138
if(param->probability &&
2139
(param->svm_type == EPSILON_SVR ||
2140
param->svm_type == NU_SVR))
2142
model->probA = Malloc(double,1);
2143
model->probA[0] = svm_svr_probability(prob,param);
2146
decision_function f = svm_train_one(prob,param,0,0);
2147
model->rho = Malloc(double,1);
2148
model->rho[0] = f.rho;
2152
for(i=0;i<prob->l;i++)
2153
if(fabs(f.alpha[i]) > 0) ++nSV;
2155
model->SV = Malloc(svm_node *,nSV);
2156
model->sv_coef[0] = Malloc(double,nSV);
2158
for(i=0;i<prob->l;i++)
2159
if(fabs(f.alpha[i]) > 0)
2161
model->SV[j] = prob->x[i];
2162
model->sv_coef[0][j] = f.alpha[i];
2176
int *perm = Malloc(int,l);
2178
// group training data of the same class
2179
svm_group_classes(prob,&nr_class,&label,&start,&count,perm);
2180
svm_node **x = Malloc(svm_node *,l);
2183
x[i] = prob->x[perm[i]];
2185
// calculate weighted C
2187
double *weighted_C = Malloc(double, nr_class);
2188
for(i=0;i<nr_class;i++)
2189
weighted_C[i] = param->C;
2190
for(i=0;i<param->nr_weight;i++)
2193
for(j=0;j<nr_class;j++)
2194
if(param->weight_label[i] == label[j])
2197
fprintf(stderr,"warning: class label %d specified in weight is not found\n", param->weight_label[i]);
2199
weighted_C[j] *= param->weight[i];
2202
// train k*(k-1)/2 models
2204
bool *nonzero = Malloc(bool,l);
2207
decision_function *f = Malloc(decision_function,nr_class*(nr_class-1)/2);
2209
double *probA=NULL,*probB=NULL;
2210
if (param->probability)
2212
probA=Malloc(double,nr_class*(nr_class-1)/2);
2213
probB=Malloc(double,nr_class*(nr_class-1)/2);
2217
for(i=0;i<nr_class;i++)
2218
for(int j=i+1;j<nr_class;j++)
2220
svm_problem sub_prob;
2221
int si = start[i], sj = start[j];
2222
int ci = count[i], cj = count[j];
2224
sub_prob.x = Malloc(svm_node *,sub_prob.l);
2225
sub_prob.y = Malloc(double,sub_prob.l);
2229
sub_prob.x[k] = x[si+k];
2234
sub_prob.x[ci+k] = x[sj+k];
2235
sub_prob.y[ci+k] = -1;
2238
if(param->probability)
2239
svm_binary_svc_probability(&sub_prob,param,weighted_C[i],weighted_C[j],probA[p],probB[p]);
2241
f[p] = svm_train_one(&sub_prob,param,weighted_C[i],weighted_C[j]);
2243
if(!nonzero[si+k] && fabs(f[p].alpha[k]) > 0)
2244
nonzero[si+k] = true;
2246
if(!nonzero[sj+k] && fabs(f[p].alpha[ci+k]) > 0)
2247
nonzero[sj+k] = true;
2255
model->nr_class = nr_class;
2257
model->label = Malloc(int,nr_class);
2258
for(i=0;i<nr_class;i++)
2259
model->label[i] = label[i];
2261
model->rho = Malloc(double,nr_class*(nr_class-1)/2);
2262
for(i=0;i<nr_class*(nr_class-1)/2;i++)
2263
model->rho[i] = f[i].rho;
2265
if(param->probability)
2267
model->probA = Malloc(double,nr_class*(nr_class-1)/2);
2268
model->probB = Malloc(double,nr_class*(nr_class-1)/2);
2269
for(i=0;i<nr_class*(nr_class-1)/2;i++)
2271
model->probA[i] = probA[i];
2272
model->probB[i] = probB[i];
2282
int *nz_count = Malloc(int,nr_class);
2283
model->nSV = Malloc(int,nr_class);
2284
for(i=0;i<nr_class;i++)
2287
for(int j=0;j<count[i];j++)
2288
if(nonzero[start[i]+j])
2293
model->nSV[i] = nSV;
2297
info("Total nSV = %d\n",total_sv);
2299
model->l = total_sv;
2300
model->SV = Malloc(svm_node *,total_sv);
2303
if(nonzero[i]) model->SV[p++] = x[i];
2305
int *nz_start = Malloc(int,nr_class);
2307
for(i=1;i<nr_class;i++)
2308
nz_start[i] = nz_start[i-1]+nz_count[i-1];
2310
model->sv_coef = Malloc(double *,nr_class-1);
2311
for(i=0;i<nr_class-1;i++)
2312
model->sv_coef[i] = Malloc(double,total_sv);
2315
for(i=0;i<nr_class;i++)
2316
for(int j=i+1;j<nr_class;j++)
2318
// classifier (i,j): coefficients with
2319
// i are in sv_coef[j-1][nz_start[i]...],
2320
// j are in sv_coef[i][nz_start[j]...]
2327
int q = nz_start[i];
2331
model->sv_coef[j-1][q++] = f[p].alpha[k];
2335
model->sv_coef[i][q++] = f[p].alpha[ci+k];
2348
for(i=0;i<nr_class*(nr_class-1)/2;i++)
2357
// Stratified cross validation
2358
void svm_cross_validation(const svm_problem *prob, const svm_parameter *param, int nr_fold, double *target)
2361
int *fold_start = Malloc(int,nr_fold+1);
2363
int *perm = Malloc(int,l);
2366
// stratified cv may not give leave-one-out rate
2367
// Each class to l folds -> some folds may have zero elements
2368
if((param->svm_type == C_SVC ||
2369
param->svm_type == NU_SVC) && nr_fold < l)
2374
svm_group_classes(prob,&nr_class,&label,&start,&count,perm);
2376
// random shuffle and then data grouped by fold using the array perm
2377
int *fold_count = Malloc(int,nr_fold);
2379
int *index = Malloc(int,l);
2382
for (c=0; c<nr_class; c++)
2383
for(i=0;i<count[c];i++)
2385
int j = i+rand()%(count[c]-i);
2386
swap(index[start[c]+j],index[start[c]+i]);
2388
for(i=0;i<nr_fold;i++)
2391
for (c=0; c<nr_class;c++)
2392
fold_count[i]+=(i+1)*count[c]/nr_fold-i*count[c]/nr_fold;
2395
for (i=1;i<=nr_fold;i++)
2396
fold_start[i] = fold_start[i-1]+fold_count[i-1];
2397
for (c=0; c<nr_class;c++)
2398
for(i=0;i<nr_fold;i++)
2400
int begin = start[c]+i*count[c]/nr_fold;
2401
int end = start[c]+(i+1)*count[c]/nr_fold;
2402
for(int j=begin;j<end;j++)
2404
perm[fold_start[i]] = index[j];
2409
for (i=1;i<=nr_fold;i++)
2410
fold_start[i] = fold_start[i-1]+fold_count[i-1];
2419
for(i=0;i<l;i++) perm[i]=i;
2422
int j = i+rand()%(l-i);
2423
swap(perm[i],perm[j]);
2425
for(i=0;i<=nr_fold;i++)
2426
fold_start[i]=i*l/nr_fold;
2429
for(i=0;i<nr_fold;i++)
2431
int begin = fold_start[i];
2432
int end = fold_start[i+1];
2434
struct svm_problem subprob;
2436
subprob.l = l-(end-begin);
2437
subprob.x = Malloc(struct svm_node*,subprob.l);
2438
subprob.y = Malloc(double,subprob.l);
2441
for(j=0;j<begin;j++)
2443
subprob.x[k] = prob->x[perm[j]];
2444
subprob.y[k] = prob->y[perm[j]];
2449
subprob.x[k] = prob->x[perm[j]];
2450
subprob.y[k] = prob->y[perm[j]];
2453
struct svm_model *submodel = svm_train(&subprob,param);
2454
if(param->probability &&
2455
(param->svm_type == C_SVC || param->svm_type == NU_SVC))
2457
double *prob_estimates=Malloc(double,svm_get_nr_class(submodel));
2458
for(j=begin;j<end;j++)
2459
target[perm[j]] = svm_predict_probability(submodel,prob->x[perm[j]],prob_estimates);
2460
free(prob_estimates);
2463
for(j=begin;j<end;j++)
2464
target[perm[j]] = svm_predict(submodel,prob->x[perm[j]]);
2465
svm_destroy_model(submodel);
2474
int svm_get_svm_type(const svm_model *model)
2476
return model->param.svm_type;
2479
int svm_get_nr_class(const svm_model *model)
2481
return model->nr_class;
2484
void svm_get_labels(const svm_model *model, int* label)
2486
if (model->label != NULL)
2487
for(int i=0;i<model->nr_class;i++)
2488
label[i] = model->label[i];
2491
double svm_get_svr_probability(const svm_model *model)
2493
if ((model->param.svm_type == EPSILON_SVR || model->param.svm_type == NU_SVR) &&
2495
return model->probA[0];
2498
info("Model doesn't contain information for SVR probability inference\n");
2503
void svm_predict_values(const svm_model *model, const svm_node *x, double* dec_values)
2505
if(model->param.svm_type == ONE_CLASS ||
2506
model->param.svm_type == EPSILON_SVR ||
2507
model->param.svm_type == NU_SVR)
2509
double *sv_coef = model->sv_coef[0];
2511
for(int i=0;i<model->l;i++)
2512
sum += sv_coef[i] * Kernel::k_function(x,model->SV[i],model->param);
2513
sum -= model->rho[0];
2519
int nr_class = model->nr_class;
2522
double *kvalue = Malloc(double,l);
2524
kvalue[i] = Kernel::k_function(x,model->SV[i],model->param);
2526
int *start = Malloc(int,nr_class);
2528
for(i=1;i<nr_class;i++)
2529
start[i] = start[i-1]+model->nSV[i-1];
2533
for(i=0;i<nr_class;i++)
2534
for(int j=i+1;j<nr_class;j++)
2539
int ci = model->nSV[i];
2540
int cj = model->nSV[j];
2543
double *coef1 = model->sv_coef[j-1];
2544
double *coef2 = model->sv_coef[i];
2546
sum += coef1[si+k] * kvalue[si+k];
2548
sum += coef2[sj+k] * kvalue[sj+k];
2549
sum -= model->rho[p++];
2550
dec_values[pos++] = sum;
2558
double svm_predict(const svm_model *model, const svm_node *x)
2560
if(model->param.svm_type == ONE_CLASS ||
2561
model->param.svm_type == EPSILON_SVR ||
2562
model->param.svm_type == NU_SVR)
2565
svm_predict_values(model, x, &res);
2567
if(model->param.svm_type == ONE_CLASS)
2568
return (res>0)?1:-1;
2575
int nr_class = model->nr_class;
2576
double *dec_values = Malloc(double, nr_class*(nr_class-1)/2);
2577
svm_predict_values(model, x, dec_values);
2579
int *vote = Malloc(int,nr_class);
2580
for(i=0;i<nr_class;i++)
2583
for(i=0;i<nr_class;i++)
2584
for(int j=i+1;j<nr_class;j++)
2586
if(dec_values[pos++] > 0)
2592
int vote_max_idx = 0;
2593
for(i=1;i<nr_class;i++)
2594
if(vote[i] > vote[vote_max_idx])
2598
return model->label[vote_max_idx];
2602
double svm_predict_probability(
2603
const svm_model *model, const svm_node *x, double *prob_estimates)
2605
if ((model->param.svm_type == C_SVC || model->param.svm_type == NU_SVC) &&
2606
model->probA!=NULL && model->probB!=NULL)
2609
int nr_class = model->nr_class;
2610
double *dec_values = Malloc(double, nr_class*(nr_class-1)/2);
2611
svm_predict_values(model, x, dec_values);
2613
double min_prob=1e-7;
2614
double **pairwise_prob=Malloc(double *,nr_class);
2615
for(i=0;i<nr_class;i++)
2616
pairwise_prob[i]=Malloc(double,nr_class);
2618
for(i=0;i<nr_class;i++)
2619
for(int j=i+1;j<nr_class;j++)
2621
pairwise_prob[i][j]=min(max(sigmoid_predict(dec_values[k],model->probA[k],model->probB[k]),min_prob),1-min_prob);
2622
pairwise_prob[j][i]=1-pairwise_prob[i][j];
2625
multiclass_probability(nr_class,pairwise_prob,prob_estimates);
2627
int prob_max_idx = 0;
2628
for(i=1;i<nr_class;i++)
2629
if(prob_estimates[i] > prob_estimates[prob_max_idx])
2631
for(i=0;i<nr_class;i++)
2632
free(pairwise_prob[i]);
2634
free(pairwise_prob);
2635
return model->label[prob_max_idx];
2638
return svm_predict(model, x);
2641
const char *svm_type_table[] =
2643
"c_svc","nu_svc","one_class","epsilon_svr","nu_svr",NULL
2646
const char *kernel_type_table[]=
2648
"linear","polynomial","rbf","sigmoid","precomputed",NULL
2651
int svm_save_model(const char *model_file_name, const svm_model *model)
2653
FILE *fp = fopen(model_file_name,"w");
2654
if(fp==NULL) return -1;
2656
const svm_parameter& param = model->param;
2658
fprintf(fp,"svm_type %s\n", svm_type_table[param.svm_type]);
2659
fprintf(fp,"kernel_type %s\n", kernel_type_table[param.kernel_type]);
2661
if(param.kernel_type == POLY)
2662
fprintf(fp,"degree %d\n", param.degree);
2664
if(param.kernel_type == POLY || param.kernel_type == RBF || param.kernel_type == SIGMOID)
2665
fprintf(fp,"gamma %g\n", param.gamma);
2667
if(param.kernel_type == POLY || param.kernel_type == SIGMOID)
2668
fprintf(fp,"coef0 %g\n", param.coef0);
2670
int nr_class = model->nr_class;
2672
fprintf(fp, "nr_class %d\n", nr_class);
2673
fprintf(fp, "total_sv %d\n",l);
2677
for(int i=0;i<nr_class*(nr_class-1)/2;i++)
2678
fprintf(fp," %g",model->rho[i]);
2684
fprintf(fp, "label");
2685
for(int i=0;i<nr_class;i++)
2686
fprintf(fp," %d",model->label[i]);
2690
if(model->probA) // regression has probA only
2692
fprintf(fp, "probA");
2693
for(int i=0;i<nr_class*(nr_class-1)/2;i++)
2694
fprintf(fp," %g",model->probA[i]);
2699
fprintf(fp, "probB");
2700
for(int i=0;i<nr_class*(nr_class-1)/2;i++)
2701
fprintf(fp," %g",model->probB[i]);
2707
fprintf(fp, "nr_sv");
2708
for(int i=0;i<nr_class;i++)
2709
fprintf(fp," %d",model->nSV[i]);
2713
fprintf(fp, "SV\n");
2714
const double * const *sv_coef = model->sv_coef;
2715
const svm_node * const *SV = model->SV;
2717
for(int i=0;i<l;i++)
2719
for(int j=0;j<nr_class-1;j++)
2720
fprintf(fp, "%.16g ",sv_coef[j][i]);
2722
const svm_node *p = SV[i];
2724
if(param.kernel_type == PRECOMPUTED)
2725
fprintf(fp,"0:%d ",(int)(p->value));
2727
while(p->index != -1)
2729
fprintf(fp,"%d:%.8g ",p->index,p->value);
2739
svm_model *svm_load_model(const char *model_file_name)
2741
FILE *fp = fopen(model_file_name,"rb");
2742
if(fp==NULL) return NULL;
2746
svm_model *model = Malloc(svm_model,1);
2747
svm_parameter& param = model->param;
2749
model->probA = NULL;
2750
model->probB = NULL;
2751
model->label = NULL;
2757
fscanf(fp,"%80s",cmd);
2759
if(strcmp(cmd,"svm_type")==0)
2761
fscanf(fp,"%80s",cmd);
2763
for(i=0;svm_type_table[i];i++)
2765
if(strcmp(svm_type_table[i],cmd)==0)
2771
if(svm_type_table[i] == NULL)
2773
fprintf(stderr,"unknown svm type.\n");
2781
else if(strcmp(cmd,"kernel_type")==0)
2783
fscanf(fp,"%80s",cmd);
2785
for(i=0;kernel_type_table[i];i++)
2787
if(strcmp(kernel_type_table[i],cmd)==0)
2789
param.kernel_type=i;
2793
if(kernel_type_table[i] == NULL)
2795
fprintf(stderr,"unknown kernel function.\n");
2803
else if(strcmp(cmd,"degree")==0)
2804
fscanf(fp,"%d",¶m.degree);
2805
else if(strcmp(cmd,"gamma")==0)
2806
fscanf(fp,"%lf",¶m.gamma);
2807
else if(strcmp(cmd,"coef0")==0)
2808
fscanf(fp,"%lf",¶m.coef0);
2809
else if(strcmp(cmd,"nr_class")==0)
2810
fscanf(fp,"%d",&model->nr_class);
2811
else if(strcmp(cmd,"total_sv")==0)
2812
fscanf(fp,"%d",&model->l);
2813
else if(strcmp(cmd,"rho")==0)
2815
int n = model->nr_class * (model->nr_class-1)/2;
2816
model->rho = Malloc(double,n);
2817
for(int i=0;i<n;i++)
2818
fscanf(fp,"%lf",&model->rho[i]);
2820
else if(strcmp(cmd,"label")==0)
2822
int n = model->nr_class;
2823
model->label = Malloc(int,n);
2824
for(int i=0;i<n;i++)
2825
fscanf(fp,"%d",&model->label[i]);
2827
else if(strcmp(cmd,"probA")==0)
2829
int n = model->nr_class * (model->nr_class-1)/2;
2830
model->probA = Malloc(double,n);
2831
for(int i=0;i<n;i++)
2832
fscanf(fp,"%lf",&model->probA[i]);
2834
else if(strcmp(cmd,"probB")==0)
2836
int n = model->nr_class * (model->nr_class-1)/2;
2837
model->probB = Malloc(double,n);
2838
for(int i=0;i<n;i++)
2839
fscanf(fp,"%lf",&model->probB[i]);
2841
else if(strcmp(cmd,"nr_sv")==0)
2843
int n = model->nr_class;
2844
model->nSV = Malloc(int,n);
2845
for(int i=0;i<n;i++)
2846
fscanf(fp,"%d",&model->nSV[i]);
2848
else if(strcmp(cmd,"SV")==0)
2853
if(c==EOF || c=='\n') break;
2859
fprintf(stderr,"unknown text in model file\n");
2868
// read sv_coef and SV
2871
long pos = ftell(fp);
2879
// count the '-1' element
2890
fseek(fp,pos,SEEK_SET);
2892
int m = model->nr_class - 1;
2894
model->sv_coef = Malloc(double *,m);
2897
model->sv_coef[i] = Malloc(double,l);
2898
model->SV = Malloc(svm_node*,l);
2899
svm_node *x_space=NULL;
2900
if(l>0) x_space = Malloc(svm_node,elements);
2905
model->SV[i] = &x_space[j];
2906
for(int k=0;k<m;k++)
2907
fscanf(fp,"%lf",&model->sv_coef[k][i]);
2913
if(c=='\n') goto out2;
2914
} while(isspace(c));
2916
fscanf(fp,"%d:%lf",&(x_space[j].index),&(x_space[j].value));
2920
x_space[j++].index = -1;
2925
model->free_sv = 1; // XXX
2929
void svm_destroy_model(svm_model* model)
2931
if(model->free_sv && model->l > 0)
2932
free((void *)(model->SV[0]));
2933
for(int i=0;i<model->nr_class-1;i++)
2934
free(model->sv_coef[i]);
2936
free(model->sv_coef);
2945
void svm_destroy_param(svm_parameter* param)
2947
free(param->weight_label);
2948
free(param->weight);
2951
const char *svm_check_parameter(const svm_problem *prob, const svm_parameter *param)
2955
int svm_type = param->svm_type;
2956
if(svm_type != C_SVC &&
2957
svm_type != NU_SVC &&
2958
svm_type != ONE_CLASS &&
2959
svm_type != EPSILON_SVR &&
2961
return "unknown svm type";
2963
// kernel_type, degree
2965
int kernel_type = param->kernel_type;
2966
if(kernel_type != LINEAR &&
2967
kernel_type != POLY &&
2968
kernel_type != RBF &&
2969
kernel_type != SIGMOID &&
2970
kernel_type != PRECOMPUTED)
2971
return "unknown kernel type";
2973
if(param->degree < 0)
2974
return "degree of polynomial kernel < 0";
2976
// cache_size,eps,C,nu,p,shrinking
2978
if(param->cache_size <= 0)
2979
return "cache_size <= 0";
2984
if(svm_type == C_SVC ||
2985
svm_type == EPSILON_SVR ||
2990
if(svm_type == NU_SVC ||
2991
svm_type == ONE_CLASS ||
2993
if(param->nu <= 0 || param->nu > 1)
2994
return "nu <= 0 or nu > 1";
2996
if(svm_type == EPSILON_SVR)
3000
if(param->shrinking != 0 &&
3001
param->shrinking != 1)
3002
return "shrinking != 0 and shrinking != 1";
3004
if(param->probability != 0 &&
3005
param->probability != 1)
3006
return "probability != 0 and probability != 1";
3008
if(param->probability == 1 &&
3009
svm_type == ONE_CLASS)
3010
return "one-class SVM probability output not supported yet";
3013
// check whether nu-svc is feasible
3015
if(svm_type == NU_SVC)
3018
int max_nr_class = 16;
3020
int *label = Malloc(int,max_nr_class);
3021
int *count = Malloc(int,max_nr_class);
3026
int this_label = (int)prob->y[i];
3028
for(j=0;j<nr_class;j++)
3029
if(this_label == label[j])
3036
if(nr_class == max_nr_class)
3039
label = (int *)realloc(label,max_nr_class*sizeof(int));
3040
count = (int *)realloc(count,max_nr_class*sizeof(int));
3042
label[nr_class] = this_label;
3043
count[nr_class] = 1;
3048
for(i=0;i<nr_class;i++)
3051
for(int j=i+1;j<nr_class;j++)
3054
if(param->nu*(n1+n2)/2 > min(n1,n2))
3058
return "specified nu is infeasible";
3069
int svm_check_probability_model(const svm_model *model)
3071
return ((model->param.svm_type == C_SVC || model->param.svm_type == NU_SVC) &&
3072
model->probA!=NULL && model->probB!=NULL) ||
3073
((model->param.svm_type == EPSILON_SVR || model->param.svm_type == NU_SVR) &&
3074
model->probA!=NULL);