29 using namespace KKMLL;
35 #pragma warning(disable : 4996
) 45 const double* dec_values,
95 double tmp = base, ret = 1.0;
97 for (
kkint32 t = times; t > 0; t /= 2)
123 clone (y, _prob.y, l);
143 for (idx = 0; idx <
l; idx++)
174 return x.FileDesc ();
253 cerr << endl <<
"SVM289_BFS::svm_parameter::svm_parameter Not Doing anything with 'paramStr'" << endl << endl;
308 cmdStr <<
"-CalcProb " << ((
probability == 1) ?
"Yes" :
"No") <<
" " 311 <<
"-e " <<
eps <<
" " 312 <<
"-g " <<
gamma <<
" " 315 <<
"-n " <<
nu <<
" " 316 <<
"-p " <<
p <<
" ";
319 cmdStr <<
"-ProbParam " <<
probParam <<
" ";
321 cmdStr <<
"-r " <<
coef0 <<
" " 392 weight_label = (kkint32 *) realloc (weight_label,
sizeof (kkint32) * nr_weight);
393 weight = (
double *) realloc (weight,
sizeof (
double) * nr_weight);
394 weight_label[nr_weight - 1] = atoi (cmd.SubStrPart (2).Str ());
416 <<
"degree" <<
"\t" <<
degree <<
"\t" 417 <<
"gamma" <<
"\t" <<
gamma <<
"\t" 418 <<
"coef0" <<
"\t" <<
coef0 <<
"\t" 420 <<
"eps" <<
"\t" <<
eps <<
"\t" 421 <<
"C" <<
"\t" <<
C <<
"\t" 422 <<
"nr_weight" <<
"\t" <<
nr_weight <<
"\t";
428 if (x > 0) result <<
",";
435 if (x > 0) result <<
",";
441 result <<
"nu" <<
"\t" <<
nu <<
"\t" 442 <<
"p" <<
"\t" <<
p <<
"\t" 443 <<
"shrinking" <<
"\t" <<
shrinking <<
"\t" 447 result <<
"\t" <<
"ProbParam" <<
"\t" <<
probParam;
467 if (field
== "svm_type")
470 else if (field
== "kernel_type")
473 else if (field
== "degree")
476 else if (field
== "gamma")
479 else if (field
== "coef0")
481 else if (field
== "cache_size")
484 else if (field
== "eps")
487 else if (field
== "C")
490 else if (field
== "nr_weight")
514 else if (field
== "nu")
517 else if (field
== "p")
520 else if (field
== "shrinking")
523 else if (field
== "probability")
604 case LINEAR:
return "linear";
605 case POLY:
return "polynomial";
606 case RBF:
return "rbf";
607 case SIGMOID:
return "sigmoid";
626 static void info(
const char *fmt,...)
632 #if defined(USE_SECURE_FUNCS
) 633 vsprintf_s(buf, BUFSIZ, fmt, ap);
635 vsprintf(buf,fmt,ap);
639 (*SVM289_BFS::svm_print_string)(buf);
642 static void info(
const char *fmt,...) {}
692 void lru_delete (head_t * h);
693 void lru_insert (head_t * h);
704 head = (head_t *)calloc (l,
sizeof (head_t));
706 size -= l *
sizeof (head_t) /
sizeof (
Qfloat);
707 size = Max (size, 2 * (kkint32) l);
708 lru_head.next = lru_head.prev = &lru_head;
715 for (head_t* h = lru_head.next; h != &lru_head; h = h->next)
716 {
delete (h->data); h->data = NULL;}
717 delete head; head = NULL;
722 void Cache::lru_delete (head_t *h)
725 h->prev->next = h->next;
726 h->next->prev = h->prev;
731 void Cache::lru_insert (head_t *h)
735 h->prev = lru_head.prev;
747 head_t* h = &head[index];
758 head_t* old = lru_head.next;
760 delete old->data; old->data = NULL;
767 h->data = (Qfloat *)realloc(h->data,
sizeof (Qfloat) * len);
769 SVM289_BFS::swap (h->len, len);
785 lru_delete (&head[i]);
788 lru_delete(&head[j]);
791 SVM289_BFS::swap (head[i].data, head[j].data);
793 SVM289_BFS::swap (head[i].len, head[j].len);
796 lru_insert(&head[i]);
799 lru_insert(&head[j]);
802 SVM289_BFS::swap(i, j);
805 for (head_t* h = lru_head.next; h != &lru_head; h = h->next)
811 SVM289_BFS::swap (h->data[i], h->data[j]);
817 delete h->data; h->data = NULL;
881 x->SwapIndexes (i, j);
883 swap (x_square[i], x_square[j]);
893 FeatureVectorListPtr x;
911 return dot ((*x)[i], (*x)[j]);
917 return powi (gamma * dot((*x)[i], (*x)[j]) + coef0, degree);
923 return exp (-gamma * (x_square[i] + x_square[j] - 2 * dot ((*x)[i], (*x)[j])));
929 return tanh (gamma * dot ((*x)[i], (*x)[j]) + coef0);
936 return preComputed[i][j];
965 selFeatures =
new kkint32[numSelFeatures];
966 for (
kkint32 zed = 0; zed < numSelFeatures; zed++)
967 selFeatures[zed] = _selFeatures
[zed];
992 preComputed =
new float*[l];
993 for (z1 = 0; z1 < l; z1++)
995 preComputed[z1] =
new float[l];
996 for (z2 = 0; z2 < l; z2++)
997 preComputed[z1][z2] = 0.0f;
1003 if (kernel_type ==
RBF)
1005 x_square =
new double[l];
1006 for (kkint32 i = 0; i < l; i++)
1007 x_square[i] = dot ((*x)[i], (*x)[i]);
1021 delete[] selFeatures; selFeatures = NULL;
1022 delete[] x_square; x_square = NULL;
1028 for (z1 = 0; z1 < l; z1++)
1029 delete preComputed[z1];
1052 for (idx = 0; idx < numSelFeatures; idx++)
1054 fn = selFeatures[idx];
1055 sum += fvX[fn] * fvY[fn];
1077 for (idx = 0; idx < numFeatures; idx++)
1079 fn = selFeatures
[idx];
1080 sum += fvX[fn] * fvY[fn];
1115 for (idx = 0; idx < numSelFeatures; idx++)
1117 fn = selFeatures
[idx];
1118 double d = fvX[fn] - fvY[fn];
1122 return exp (-param
.gamma * sum);
1131 <<
"SVM289_BFS::Kernel::k_function ***ERROR*** does not support 'PRECOMPUTED'." << endl
1209 return (
y[i] > 0) ?
Cp :
Cn;
1218 else if(
alpha[i] <= 0)
1253 SVM289_BFS::swap (y[i], y[j]);
1254 SVM289_BFS::swap (G[i], G[j]);
1255 SVM289_BFS::swap (alpha_status[i], alpha_status[j]);
1256 SVM289_BFS::swap (alpha[i], alpha[j]);
1257 SVM289_BFS::swap (p[i], p[j]);
1258 SVM289_BFS::swap (active_set[i],active_set[j]);
1259 SVM289_BFS::swap (G_bar[i], G_bar[j]);
1284 info("\nWarning: using -h 0 may be faster\n");
1294 G[i] +=
alpha[j] * Q_i[j];
1305 double alpha_i =
alpha[i];
1307 G[j] += alpha_i * Q_i[j];
1334 clone(alpha,alpha_,l);
1343 for (
kkint32 i = 0; i < l; i++)
1350 for (
kkint32 i = 0; i < l; i++)
1358 G_bar =
new double[l];
1360 for (i = 0; i < l; i++)
1371 double alpha_i =
alpha[i];
1373 for (j = 0; j < l; j++)
1374 G[j] += alpha_i*Q_i[j];
1378 for (j = 0; j < l; j++)
1388 kkint32 counter = Min (l, 1000) + 1;
1396 counter = Min (l, 1000);
1426 double old_alpha_i =
alpha[i];
1427 double old_alpha_j =
alpha[j];
1431 double quad_coef = Q_i[i] + Q_j[j] +2 *Q_i[j];
1435 double delta = (-
G[i] -
G[j]) / quad_coef;
1456 if (diff > C_i - C_j)
1461 alpha[j] = C_i - diff;
1469 alpha[i] = C_j + diff;
1475 double quad_coef = Q_i[i] + Q_j[j] - 2 * Q_i[j];
1480 double delta = (
G[i] -
G[j]) / quad_coef;
1490 alpha[j] = sum - C_i;
1506 alpha[i] = sum - C_j;
1521 double delta_alpha_i =
alpha[i] - old_alpha_i;
1522 double delta_alpha_j =
alpha[j] - old_alpha_j;
1526 G[k] += Q_i[k] * delta_alpha_i + Q_j[k] * delta_alpha_j;
1542 for (k = 0; k < l; k++)
1543 G_bar[k] -= C_i * Q_i[k];
1547 for (k = 0; k < l; k++)
1548 G_bar[k] += C_i * Q_i[k];
1557 for (k = 0; k < l; k++)
1558 G_bar[k] -= C_j * Q_j[k];
1562 for (k = 0; k < l; k++)
1563 G_bar[k] += C_j * Q_j[k];
1576 for (i = 0; i < l; i++)
1577 v +=
alpha[i] * (
G[i] +
p[i]);
1584 for (
kkint32 i = 0; i < l; i++)
1627 double Gmax2 = -
INF;
1630 double obj_diff_min =
INF;
1660 const Qfloat *Q_i = NULL;
1670 double grad_diff=Gmax+
G[j];
1677 double quad_coef = Q_i[i] +
QD[j] - 2.0 *
y[i] * Q_i[j];
1680 obj_diff = -(grad_diff*grad_diff)/quad_coef;
1682 obj_diff = -(grad_diff*grad_diff)/
TAU;
1684 if (obj_diff <= obj_diff_min)
1687 obj_diff_min = obj_diff;
1696 double grad_diff= Gmax-
G[j];
1702 double quad_coef=Q_i[i]+
QD[j]+2.0*
y[i]*Q_i[j];
1704 obj_diff = -(grad_diff*grad_diff)/quad_coef;
1706 obj_diff = -(grad_diff*grad_diff)/
TAU;
1708 if (obj_diff <= obj_diff_min)
1711 obj_diff_min = obj_diff;
1718 if (Gmax + Gmax2 <
eps)
1735 return (-
G[i] > Gmax1);
1737 return (-
G[i] > Gmax2);
1743 return (
G[i] > Gmax2);
1745 return (
G[i] > Gmax1);
1760 double Gmax1 = -
INF;
1761 double Gmax2 = -
INF;
1796 if ((
unshrink ==
false) && ((Gmax1 + Gmax2) <= (
eps * 10)))
1806 if (be_shrunk(i, Gmax1, Gmax2))
1831 double sum_free = 0;
1835 double yG =
y[i] *
G[i];
1860 r = sum_free / nr_free;
1902 double calculate_rho ();
1911 void do_shrinking ();
1927 double Gmaxp = -
INF;
1928 double Gmaxp2 = -
INF;
1931 double Gmaxn = -
INF;
1932 double Gmaxn2 = -
INF;
1936 double obj_diff_min =
INF;
1967 const Qfloat *Q_ip = NULL;
1968 const Qfloat *Q_in = NULL;
1980 double grad_diff=Gmaxp+
G[j];
1986 double quad_coef = Q_ip[ip]+
QD[j]-2*Q_ip[j];
1988 obj_diff = -(grad_diff*grad_diff)/quad_coef;
1990 obj_diff = -(grad_diff*grad_diff)/
TAU;
1992 if (obj_diff <= obj_diff_min)
1995 obj_diff_min = obj_diff;
2004 double grad_diff=Gmaxn-
G[j];
2005 if (-
G[j] >= Gmaxn2)
2010 double quad_coef = Q_in[in]+
QD[j]-2*Q_in[j];
2012 obj_diff = -(grad_diff*grad_diff)/quad_coef;
2014 obj_diff = -(grad_diff*grad_diff)/
TAU;
2016 if (obj_diff <= obj_diff_min)
2019 obj_diff_min = obj_diff;
2026 if (Max (Gmaxp + Gmaxp2, Gmaxn + Gmaxn2) < eps)
2029 if (
y[Gmin_idx] == +1)
2051 return (-
G[i] > Gmax1);
2053 return (-
G[i] > Gmax4);
2059 return (
G[i] > Gmax2);
2061 return (
G[i] > Gmax3);
2074 double Gmax1 = -
INF;
2075 double Gmax2 = -
INF;
2076 double Gmax3 = -
INF;
2077 double Gmax4 = -
INF;
2090 else if(-
G[i] > Gmax4) Gmax4 = -
G[i];
2097 if (
G[i] > Gmax2) Gmax2 =
G[i];
2099 else if (
G[i] > Gmax3)
2106 if (unshrink ==
false && Max (Gmax1 + Gmax2, Gmax3 + Gmax4) <= eps * 10)
2109 reconstruct_gradient();
2115 if (be_shrunk (i, Gmax1, Gmax2, Gmax3, Gmax4))
2120 if (!be_shrunk (
active_size, Gmax1, Gmax2, Gmax3, Gmax4))
2141 double sum_free1 = 0;
2142 double sum_free2 = 0;
2149 lb1 = Max (lb1, G[i]);
2152 ub1 = Min (ub1, G[i]);
2163 lb2 = Max (lb2, G[i]);
2166 ub2 = Min (ub2, G[i]);
2178 r1 = sum_free1 / nr_free1;
2183 r2 = sum_free2 / nr_free2;
2187 si
->r = (r1+r2) / 2;
2188 return (r1 - r2) / 2;
2208 clone (y, y_, prob.l);
2223 for (j = start; j < len; j++)
2241 SVM289_BFS::swap (y[i],y[j]);
2242 SVM289_BFS::swap (QD[i],QD[j]);
2285 for (j=start; j < len; j++)
2302 SVM289_BFS::swap(QD[i],QD[j]);
2333 sign =
new schar [2 * l];
2336 for (
kkint32 k = 0; k < l; k++)
2347 buffer [0] =
new Qfloat [2 * l];
2348 buffer [1] =
new Qfloat [2 * l];
2355 SVM289_BFS::swap (sign [i], sign [j]);
2356 SVM289_BFS::swap (index [i], index [j]);
2357 SVM289_BFS::swap (QD [i], QD [j]);
2369 for (j = 0; j < l; j++)
2374 Qfloat *buf = buffer [next_buffer];
2375 next_buffer = 1 - next_buffer;
2377 for (j = 0; j < len; j++)
2378 buf[j] = (
Qfloat) si * (
Qfloat) sign[j] * data[index[j]];
2462 double* minus_ones =
new double[l];
2467 for (i = 0; i < l; i++)
2495 double sum_alpha =0;
2497 for (i = 0; i < l; i++)
2498 sum_alpha += alpha[i];
2503 for (i = 0; i < l; i++)
2506 delete[] minus_ones;
2522 double nu = param
->nu;
2526 for (i = 0; i < l; i++)
2535 double sum_pos = nu * l / 2;
2536 double sum_neg = nu * l / 2;
2538 for (i = 0; i < l; i++)
2542 alpha[i] = Min(1.0, sum_pos);
2543 sum_pos -= alpha[i];
2547 alpha[i] = Min(1.0,sum_neg);
2548 sum_neg -= alpha[i];
2552 double *zeros =
new double[l];
2554 for (i = 0; i < l; i++)
2580 for (i = 0; i < l; i++)
2581 alpha[i] *= y[i] / r;
2604 double* zeros =
new double[l];
2610 for (i = 0; i < n; i++)
2614 alpha[n] = param
->nu * prob
->l - n;
2616 for (i = n + 1; i < l; i++)
2619 for (i = 0; i < l; i++)
2658 double* alpha2 =
new double [2 * l];
2659 double* linear_term =
new double [2 * l];
2663 for (i = 0; i < l; i++)
2666 linear_term[i] = param
->p - prob
->y[i];
2670 linear_term [i + l] = param
->p + prob
->y[i];
2692 double sum_alpha = 0;
2693 for (i = 0; i < l; i++)
2695 alpha[i] = alpha2[i] - alpha2[i+l];
2696 sum_alpha += fabs (alpha[i]);
2699 info ("nu = %f\n", sum_alpha / (param
->C * l)
);
2702 delete[] linear_term;
2717 double C = param
->C;
2719 double* alpha2 =
new double [2 * l];
2720 double* linear_term =
new double [2 * l];
2724 double sum = C * param
->nu * l / 2;
2726 for (i = 0; i < l; i++)
2728 alpha2[i] = alpha2[i + l] = Min (sum, C);
2731 linear_term[i] = - prob
->y[i];
2734 linear_term[i + l] = prob
->y[i];
2757 for (i = 0; i < l; i++)
2758 alpha[i] = alpha2[i] - alpha2[i + l];
2761 delete[] linear_term;
2788 double* alpha =
new double [prob
.l];
2815 KKStr errMsg =
"SVM289_BFS::svm_train_one ***ERROR*** Invalid Solver Defined.";
2816 errMsg <<
" Solver[" << (
int)param
.svm_type <<
"]";
2817 _log.Level (-1) << endl << endl << errMsg << endl << endl;
2826 std::vector<kkint32> SVIndex;
2832 if (fabs (alpha[i]) > 0)
2835 SVIndex.push_back (i);
2838 if (fabs (alpha[i]) >= si.upper_bound_p)
2846 if (fabs (alpha[i]) >= si.upper_bound_n)
2861 std::vector<kkint32>::iterator it,it2;
2862 double kvalue = 0.0;
2864 for (it = SVIndex.begin(); it < SVIndex.end(); it++)
2866 for (it2 = SVIndex.begin(); it2 < SVIndex.end(); it2++)
2871 kvalue = Kernel::k_function (prob.x[k], prob.x[kk], param, prob.SelFeatures ());
2873 sum += prob
.y[k] * prob
.y[kk] * alpha[k] * alpha[kk] * kvalue;
2877 sum /= SVIndex.size();
2880 for (it = SVIndex.begin(); it < SVIndex.end(); it++)
2900 const double* dec_values,
2901 const double* labels,
2919 double min_step = 1e-10;
2920 double sigma = 1e-12;
2922 double hiTarget = (prior1 + 1.0) / (prior1 + 2.0);
2923 double loTarget = 1 / (prior0 + 2.0);
2924 double* t =
new double[l];
2925 double fApB, p, q, h11, h22, h21, g1, g2, det, dA, dB, gd, stepsize;
2926 double newA, newB, newf, d1, d2;
2931 B = log ((prior0 + 1.0) / (prior1 + 1.0));
2934 for (i = 0; i < l; i++)
2941 fApB = dec_values[i] * A + B;
2944 fval += t[i] * fApB + log (1 + exp (-fApB));
2946 fval += (t[i] - 1) * fApB + log (1 + exp (fApB));
2949 for (iter=0; iter < max_iter; iter++)
2957 for (i = 0; i < l; i++)
2959 fApB = dec_values[i] * A + B;
2962 p = exp (-fApB) / (1.0 + exp(-fApB));
2963 q = 1.0 / (1.0 + exp(-fApB));
2967 p = 1.0 / (1.0 + exp (fApB));
2968 q = exp (fApB) / (1.0 + exp (fApB));
2972 h11 += dec_values[i] * dec_values[i] * d2;
2974 h21 += dec_values[i] * d2;
2976 g1 += dec_values[i] * d1;
2981 if ((fabs (g1) < eps) && (fabs(g2) < eps))
2985 det = h11 * h22 - h21 * h21;
2986 dA = -(h22*g1 - h21 * g2) / det;
2987 dB = -(-h21 * g1 + h11 * g2) / det;
2988 gd = g1 * dA + g2 * dB;
2992 while (stepsize >= min_step)
2994 newA = A + stepsize * dA;
2995 newB = B + stepsize * dB;
2999 for (i = 0; i < l; i++)
3001 fApB = dec_values[i] * newA + newB;
3003 newf += t[i] * fApB + log (1 + exp (-fApB));
3005 newf += (t[i] - 1) * fApB + log (1 + exp (fApB));
3008 if (newf < fval + 0.0001 * stepsize * gd)
3016 stepsize = stepsize / 2.0;
3019 if (stepsize < min_step)
3021 info("Line search fails in two-class probability estimates\n");
3026 if (iter >= max_iter)
3027 info ("Reaching maximal iterations in two-class probability estimates\n");
3029 delete[] t; t = NULL;
3039 double fApB = decision_value * A + B;
3041 return exp (-fApB) / (1.0 + exp (-fApB));
3043 return 1.0 / (1 + exp (fApB));
3058 kkint32 max_iter = Max (100, k);
3060 double** Q =
new double*[k];
3061 double* Qp =
new double[k];
3063 double eps = 0.005 / k;
3065 for (t = 0; t < k; t++)
3068 Q[t] =
new double[k];
3070 for (j = 0; j < t; j++)
3072 Q[t][t] += r[j][t] * r[j][t];
3076 for (j = t + 1; j < k; j++)
3078 Q[t][t] += r[j][t] * r[j][t];
3079 Q[t][j] =- r[j][t] * r[t][j];
3083 for (iter = 0; iter <max_iter; iter++)
3087 for (t = 0; t < k; t++)
3090 for (j = 0; j < k; j++)
3091 Qp[t] += Q[t][j] * p[j];
3092 pQp += p[t] * Qp[t];
3094 double max_error = 0;
3095 for (t = 0; t < k; t++)
3097 double error = fabs (Qp[t] - pQp);
3098 if (error > max_error)
3102 if (max_error < eps)
3105 for (t = 0; t < k; t++)
3107 double diff = (-Qp[t] +pQp) / Q[t][t];
3109 pQp = (pQp + diff * (diff * Q[t][t] + 2 * Qp[t])) / (1 + diff) / (1 + diff);
3110 for (j = 0; j < k; j++)
3112 Qp[j] = (Qp[j] + diff * Q[t][j]) / (1 + diff);
3117 if (iter >= max_iter)
3118 info ("Exceeds max_iter in multiclass_prob\n");
3120 for (t = 0; t < k; t++)
3121 {
delete Q[t]; Q[t] = NULL;}
3124 delete Qp; Qp = NULL;
3144 FeatureVectorPtr* subX = NULL;
3147 double *dec_values =
new double[prob
->l];
3150 for (i = 0; i < prob
->l; i++)
3153 for (i = 0; i < prob
->l; i++)
3155 kkint32 j = i + rand() % (prob->l-i);
3156 SVM289_BFS::swap (perm[i], perm[j]);
3159 for (i = 0; i < nr_fold; i++)
3166 subX =
new FeatureVectorPtr[subL];
3167 for (j = 0; j < subL; j++)
3169 float* subY =
new float[subL];
3172 for (j = 0; j < begin; j++)
3174 subX[k] = prob->x.IdxToPtr (perm[j]);
3175 subY[k] = (
float)prob
->y[perm[j]];
3179 for (j = end; j < prob
->l; j++)
3181 subX[k] = prob->x.IdxToPtr (perm[j]);
3182 subY[k] = (
float)prob
->y[perm[j]];
3187 FeatureVectorListPtr subXX =
new FeatureVectorList (prob->x.FileDesc (),
false);
3188 for (j = 0; j < k; j++)
3194 kkint32 p_count=0, n_count = 0;
3196 for (j = 0; j < k; j++)
3205 if ((p_count == 0) && (n_count == 0))
3207 for (j = begin; j < end; j++)
3208 dec_values[perm[j]] = 0;
3211 else if ((p_count > 0) && (n_count == 0))
3213 for (j = begin; j < end; j++)
3214 dec_values[perm[j]] = 1;
3217 else if ((p_count == 0) && (n_count > 0))
3219 for (j = begin; j < end; j++)
3220 dec_values[perm[j]] = -1;
3230 subparam
.weight =
new double[2];
3237 for (j = begin; j < end; j++)
3239 svm_predict_values (submodel, prob->x[perm[j]], &(dec_values[perm[j]]));
3241 dec_values[perm[j]] *= submodel
->label[0];
3244 delete submodel; submodel = NULL;
3248 delete subProb; subProb = NULL;
3249 delete subX; subX = NULL;
3250 delete subY; subY = NULL;
3254 delete dec_values; dec_values = NULL;
3255 delete perm; perm = NULL;
3272 double *ymv =
new double[prob
.l];
3279 for (i = 0; i < prob
.l; i++)
3281 ymv[i] = prob
.y[i] - ymv[i];
3282 mae += fabs (ymv[i]);
3286 double std = sqrt (2 * mae * mae);
3289 for (i = 0; i < prob
.l; i++)
3291 if (fabs(ymv[i]) > (5 * std))
3294 mae += fabs (ymv[i]);
3297 mae /= (prob
.l - count);
3299 info("Prob. model for test data: target value = predicted value + z,\nz: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma= %g\n", mae
);
3300 delete ymv; ymv = NULL;
3327 for (i = 0; i < l; i++)
3331 for (j = 0; j < nr_class; j++)
3333 if (this_label == label[j])
3344 if (nr_class == max_nr_class)
3346 kkint32 newMaxNumClass = max_nr_class * 2;
3347 label = GrowAllocation (label, max_nr_class, newMaxNumClass);
3348 count = GrowAllocation (count, max_nr_class, newMaxNumClass);
3349 max_nr_class = newMaxNumClass;
3351 label[nr_class] = this_label;
3352 count[nr_class] = 1;
3359 for (i = 1; i < nr_class; i++)
3360 start[i] = start[i - 1] + count[i - 1];
3362 for (i = 0; i < l; i++)
3364 perm[start[data_label[i]]] = i;
3365 ++start[data_label[i]];
3369 for (i = 1; i < nr_class; i++)
3370 start[i] = start[i - 1] + count[i - 1];
3372 *nr_class_ret = nr_class;
3376 delete data_label; data_label = NULL;
3414 model
->rho =
new double[1];
3419 for (i = 0; i < prob
.l; i++)
3421 if (fabs(f.alpha[i]) > 0)
3428 model->SV.Owner (
true);
3431 for (i = 0; i < prob
.l; i++)
3433 if (fabs (f.alpha[i]) > 0)
3436 model->SV.PushOnBack (
new FeatureVector (prob.x[i]));
3437 model->sv_coef[0][j] = f.alpha[i];
3463 kkint32 numBinaryCombos = nr_class * (nr_class - 1) / 2;
3469 for (i = 0; i < l; i++)
3472 x.PushOnBack (prob.x.IdxToPtr (perm[i]));
3476 double* weighted_C =
new double[nr_class];
3477 for (i = 0; i < nr_class; i++)
3478 weighted_C[i] = param
.C;
3483 for (j = 0; j < nr_class; j++)
3490 fprintf(stderr,
"warning: class label %d specified in weight is not found\n", param.weight_label[i]);
3492 weighted_C[j] *= param
.weight[i];
3497 bool *nonzero =
new bool[l];
3499 for (i = 0; i < l; i++)
3504 double* probA = NULL;
3505 double* probB = NULL;
3509 probA =
new double[numBinaryCombos];
3510 probB =
new double[numBinaryCombos];
3514 for (i = 0; i < nr_class; i++)
3516 for (
kkint32 j = i + 1; j < nr_class; j++)
3519 kkint32 si = start[i], sj = start[j];
3520 kkint32 ci = count[i], cj = count[j];
3521 sub_prob
.l = ci + cj;
3523 sub_prob
.y =
new double[sub_prob
.l];
3525 for (k = 0; k < ci; k++)
3528 sub_prob.x.PushOnBack (x.IdxToPtr (si + k));
3531 for (k = 0; k < cj; k++)
3534 sub_prob.x.PushOnBack (x.IdxToPtr (sj + k));
3535 sub_prob
.y[ci + k] = -1;
3544 for (k = 0; k < ci; k++)
3546 if (!nonzero[si + k] && fabs(f[p].alpha[k]) > 0)
3547 nonzero[si + k] =
true;
3550 for (k = 0; k < cj; k++)
3552 if (!nonzero[sj + k] && fabs(f[p].alpha[ci+k]) > 0)
3553 nonzero[sj + k] =
true;
3571 for (i = 0; i < nr_class; i++)
3574 model
->rho =
new double[numBinaryCombos];
3575 for (i = 0; i < numBinaryCombos; i++)
3580 model
->probA =
new double[numBinaryCombos];
3581 model
->probB =
new double[numBinaryCombos];
3582 for (i = 0; i < numBinaryCombos; i++)
3598 for (i = 0; i < nr_class; i++)
3601 for (
kkint32 j = 0; j < count[i]; j++)
3603 if (nonzero[start[i] + j])
3609 model
->nSV[i] = nSV;
3613 info("Total nSV = %d\n",total_sv
);
3615 model
->l = total_sv;
3617 model->SV.DeleteContents ();
3618 model->SV.Owner (
false);
3622 for (i = 0; i < l; i++)
3627 model->SV.PushOnBack (x.IdxToPtr (i));
3634 for (i = 1; i < nr_class; i++)
3635 nz_start[i] = nz_start[i - 1] + nz_count[i - 1];
3637 model
->sv_coef =
new double*[nr_class - 1];
3638 for (i = 0; i < nr_class - 1; i++)
3639 model
->sv_coef[i] =
new double[total_sv];
3642 for (i = 0; i < nr_class; i++)
3644 for (
kkint32 j = i + 1; j < nr_class; j++)
3658 for (k = 0; k < ci; k++)
3660 if (nonzero[si + k])
3665 for (k = 0; k < cj; k++)
3667 if (nonzero[sj + k])
3674 delete[] label; label = NULL;
3675 delete[] probA; probA = NULL;
3676 delete[] probB; probB = NULL;
3677 delete[] count; count = NULL;
3678 delete[] perm; perm = NULL;
3679 delete[] start; start = NULL;
3681 delete[] weighted_C; weighted_C = NULL;
3682 delete[] nonzero; nonzero = NULL;
3683 for (i = 0; i < numBinaryCombos; i++)
3688 delete[] f; f = NULL;
3689 delete[] nz_count; nz_count = NULL;
3690 delete[] nz_start; nz_start = NULL;
3729 for (i = 0; i < l; i++)
3732 for (c = 0; c < nr_class; c++)
3734 for (i = 0; i < count[c]; i++)
3736 kkint32 j = i + rand() % (count[c]-i);
3737 SVM289_BFS::swap (index[start[c]+j], index[start[c]+i]);
3741 for (i = 0; i < nr_fold; i++)
3744 for (c = 0; c < nr_class; c++)
3745 fold_count[i] += (i + 1) * count[c] / nr_fold - i * count[c] / nr_fold;
3749 for (i = 1; i <= nr_fold; i++)
3750 fold_start[i] = fold_start[i-1] + fold_count[i-1];
3752 for (c=0; c<nr_class;c++)
3754 for(i=0;i<nr_fold;i++)
3756 kkint32 begin = start[c]+i*count[c]/nr_fold;
3757 kkint32 end = start[c]+(i+1)*count[c]/nr_fold;
3758 for(
kkint32 j=begin;j<end;j++)
3760 perm[fold_start[i]] = index[j];
3767 for (i=1;i<=nr_fold;i++)
3768 fold_start[i] = fold_start[i-1]+fold_count[i-1];
3770 delete[] start; start = NULL;
3771 delete[] label; label = NULL;
3772 delete[] count; count = NULL;
3773 delete[] index; index = NULL;
3774 delete[] fold_count; fold_count = NULL;
3778 for (i = 0; i < l; i++)
3781 for (i = 0; i < l; i++)
3783 kkint32 j = i + rand() % (l - i);
3784 SVM289_BFS::swap (perm[i], perm[j]);
3786 for (i = 0; i <= nr_fold; i++)
3787 fold_start[i] = i * l / nr_fold;
3790 for (i = 0; i < nr_fold; i++)
3792 kkint32 begin = fold_start[i];
3793 kkint32 end = fold_start[i+1];
3798 subprob
.l = l - (end - begin);
3801 subprob
.y =
new double[subprob
.l];
3804 for (j = 0; j < begin; j++)
3807 subprob.x.PushOnBack (prob.x.IdxToPtr (perm[j]));
3808 subprob
.y[k] = prob
.y[perm[j]];
3812 for (j = end; j < l; j++)
3815 subprob.x.PushOnBack (prob.x.IdxToPtr (perm[j]));
3816 subprob
.y[k] = prob
.y[perm[j]];
3826 for (j = begin; j < end; j++)
3827 target[perm[j]] = svm_predict_probability (submodel, prob.x[perm[j]], prob_estimates, votes);
3828 delete[] prob_estimates; prob_estimates = NULL;
3829 delete[] votes; votes = NULL;
3833 for (j = begin; j < end; j++)
3834 target[perm[j]] = svm_predict (submodel, prob.x[perm[j]]);
3842 delete subprob
.y; subprob
.y = NULL;
3845 delete[] fold_start; fold_start = NULL;
3846 delete[] perm; perm = NULL;
3887 fprintf(stderr,
"Model doesn't contain information for SVR probability inference\n");
3907 for (kkint32 i = 0; i < model->l; i++)
3908 sum += sv_coef[i] * Kernel::k_function (x,
3913 sum -= model
->rho[0];
3922 double *kvalue =
new double[l];
3923 for (i = 0; i < l; i++)
3924 kvalue[i] = Kernel::k_function (x, model->SV[i], model->param, model->selFeatures);
3928 for (i = 1; i < nr_class; i++)
3929 start[i] = start[i-1]+model
->nSV[i-1];
3932 for (i = 0; i < nr_class; i++)
3934 for (
kkint32 j = i + 1; j < nr_class; j++)
3945 for (k = 0; k < ci; k++)
3946 sum += coef1[si + k] * kvalue[si + k];
3949 for (k = 0; k < cj; k++)
3950 sum += coef2[sj + k] * kvalue[sj + k];
3952 sum -= model
->rho[p];
3953 dec_values[p] = sum;
3958 delete kvalue; kvalue = NULL;
3959 delete start; start = NULL;
3982 return (res > 0) ? 1:-1;
3990 double *dec_values =
new double[nr_class * (nr_class - 1) / 2];
3994 for (i = 0; i < nr_class; i++)
3998 for (i = 0; i < nr_class; i++)
4000 for (
kkint32 j = i + 1; j < nr_class; j++)
4002 if (dec_values[pos++] > 0)
4010 for(i = 1; i < nr_class; i++)
4012 if (vote[i] > vote[vote_max_idx])
4016 delete[] vote; vote = NULL;
4017 delete[] dec_values; dec_values = NULL;
4019 return model
->label[vote_max_idx];
4029 double* classProbabilities,
4036 ((model
->probA != NULL && model
->probB != NULL) || (probParam > 0.0))
4046 for (i = 0; i < nr_class; i++)
4051 double min_prob = 1e-7;
4054 for (i = 0; i < nr_class; i++)
4056 for (
kkint32 j = i + 1; j < nr_class; j++)
4058 if (probParam > 0.0)
4060 double probability = (
double)(1.0 / (1.0 + exp (-1.0 * probParam * dec_values[k])));
4061 pairwise_prob[i][j] = Min (Max (probability, min_prob), 1 - min_prob);
4062 pairwise_prob[j][i] = 1 - pairwise_prob[i][j];
4066 pairwise_prob[i][j] = Min (Max (sigmoid_predict (dec_values[k], model->probA[k], model->probB[k]), min_prob), 1 - min_prob);
4067 pairwise_prob[j][i] = 1 - pairwise_prob[i][j];
4070 if (pairwise_prob[i][j] > 0.5)
4087 for (i = 1; i < nr_class; i++)
4089 if (prob_estimates[i] > prob_estimates[prob_max_idx])
4093 for (i = 0; i < nr_class; i++)
4094 classProbabilities[model
->label[i]] = prob_estimates[i];
4096 return model
->label[prob_max_idx];
4131 FeatureVectorList::const_iterator idx;
4132 SV.Owner (weOwnSupportVectors);
4133 for (idx = _model.SV.begin (); idx != _model.SV.end (); idx++)
4135 FeatureVectorPtr fv = *idx;
4136 if (weOwnSupportVectors)
4137 SV.push_back (
new FeatureVector (*fv));
4146 for (
kkint32 j = 0; j < m; j++)
4157 rho =
new double[numBinaryCombos];
4158 for (
kkint32 i = 0; i < numBinaryCombos; i++)
4164 probA =
new double[numBinaryCombos];
4165 for (
kkint32 i = 0; i < numBinaryCombos; i++)
4171 probB =
new double[numBinaryCombos];
4172 for (
kkint32 i = 0; i < numBinaryCombos; i++)
4268 if (weOwnSupportVectors)
4277 for (i = 0; i < (
nr_class - 1); i++)
4291 for (i = 0; i < (
nr_class - 1); i++)
4337 o <<
"<Svm_Model>" << endl;
4338 o <<
"svm_type" <<
"\t" << SVM_Type_ToStr (param.svm_type) << endl;
4339 o <<
"kernel_type" <<
"\t" << Kernel_Type_ToStr (param.kernel_type) << endl;
4341 if (param.kernel_type == POLY)
4342 o <<
"degree" <<
"\t" << param.degree << endl;
4344 if (param.kernel_type == POLY || param.kernel_type == RBF || param.kernel_type == SIGMOID)
4345 o <<
"gamma" <<
"\t" << param.gamma << endl;
4347 if (param.kernel_type == POLY || param.kernel_type == SIGMOID)
4348 o <<
"coef0" <<
"\t" << param.coef0 << endl;
4350 o <<
"SelFeatures" <<
"\t" << selFeatures.ToCommaDelStr () << endl;
4352 o <<
"nr_class" <<
"\t" << nr_class << endl;
4355 o <<
"total_sv" <<
"\t" << l << endl;
4359 for (kkint32 i = 0; numBinaryCombos; i++)
4360 o <<
"\t" << rho[i];
4367 for (kkint32 i = 0; i < nr_class; i++)
4368 o <<
"\t" << label[i];
4375 for (kkint32 i = 0; i < numBinaryCombos; i++)
4376 o <<
"\t" << probA[i];
4383 for (kkint32 i = 0; i < numBinaryCombos; i++)
4384 o <<
"\t" << probB[i];
4391 for (kkint32 i = 0; i < nr_class; i++)
4392 o <<
"\t" << nSV[i];
4401 kkint32 origPrec = (kkint32)o.precision ();
4406 o <<
"\t" << sv_coef[j][i];
4415 o <<
"\t" << p.FeatureData (0);
4419 for (kkint32 zed = 0; zed < p.NumOfFeatures (); zed++)
4420 o <<
"\t" << zed <<
":" << p.FeatureData (zed);
4425 o <<
"</Svm_Model>" << endl;
4442 SV.DeleteContents ();
4445 char* buff =
new char[buffLen];
4454 in.getline (buff,
sizeof (buffLen));
4458 if (line.SubStrPart (0, 1) ==
"//")
4461 KKStr fieldName = line.ExtractToken2 (
"\t\n\r");
4462 if (fieldName.EqualIgnoreCase (
"</Svm_Model>"))
4465 if (fieldName.EqualIgnoreCase (
"svm_type"))
4467 param.svm_type = SVM_Type_FromStr (line);
4468 if (param.svm_type == SVM_Type::SVM_NULL)
4470 KKStr errorMsg =
"SVM289_BFS::svm_model::Read ***ERROR*** Invalid SVM_Type[" + line +
"].";
4471 log.Level (-1) << endl << errorMsg << endl << endl;
4478 else if (fieldName.EqualIgnoreCase (
"kernel_type") == 0)
4480 param.kernel_type = Kernel_Type_FromStr (line);
4481 if (param.kernel_type == Kernel_NULL)
4483 KKStr errorMsg =
"SVM289_BFS::svm_model::Read ***ERROR*** Invalid kernel_type[" + line +
"].";
4484 log.Level (-1) << endl << errorMsg << endl << endl;
4485 delete[] buff; buff = NULL;
4490 else if (fieldName.EqualIgnoreCase (
"degree"))
4491 param.degree = line.ExtractTokenInt (
"\t\n\r");
4493 else if (fieldName.EqualIgnoreCase (
"gamma"))
4494 param.gamma = line.ExtractTokenDouble (
"\t\n\r");
4496 else if (fieldName.EqualIgnoreCase (
"coef0"))
4497 param.coef0 = line.ExtractTokenDouble (
"\t\n\r");
4499 else if (fieldName.EqualIgnoreCase (
"nr_class"))
4501 nr_class = line.ExtractTokenInt (
"\t\n\r");
4502 numBinaryCombos = nr_class * (nr_class - 1) / 2;
4505 else if (fieldName.EqualIgnoreCase (
"total_sv"))
4506 l = line.ExtractTokenInt (
"\t\n\r");
4508 else if (fieldName.EqualIgnoreCase (
"rho"))
4510 rho =
new double[numBinaryCombos];
4511 for (kkint32 i = 0; i < numBinaryCombos; i++)
4512 rho[i] = line.ExtractTokenDouble (
"\t\n\r");
4515 else if (fieldName.EqualIgnoreCase (
"label"))
4517 label =
new kkint32[nr_class];
4518 for (kkint32 i=0; i < nr_class; i++)
4519 label[i] = line.ExtractTokenInt (
"\t\n\r");
4522 else if (fieldName.EqualIgnoreCase (
"probA"))
4524 probA =
new double[numBinaryCombos];
4525 for (kkint32 i = 0; i < numBinaryCombos; i++)
4526 probA[i] = line.ExtractTokenDouble (
"\t\n\r");
4529 else if (fieldName.EqualIgnoreCase (
"probB"))
4531 probB =
new double[numBinaryCombos];
4532 for (kkint32 i = 0; i < numBinaryCombos; i++)
4533 probB[i] = line.ExtractTokenDouble (
"\t\n\r");
4536 else if (fieldName.EqualIgnoreCase (
"nr_sv"))
4538 nSV =
new kkint32[nr_class];
4539 for (kkint32 i = 0; i < nr_class; i++)
4540 nSV[i] = line.ExtractTokenInt (
"\t\n\r");
4543 else if (fieldName.EqualIgnoreCase (
"SelFeatures"))
4546 selFeatures = FeatureNumList (line, valid);
4550 else if (fieldName.EqualIgnoreCase (
"SupportVector"))
4554 kkint32 m = nr_class - 1;
4559 sv_coef =
new double*[m];
4560 for (i = 0; i < m; i++)
4562 sv_coef[i] =
new double[l];
4563 for (j = 0; j < l; j++)
4564 sv_coef[i][j] = 0.0;
4568 if (SV.QueueSize () >= l)
4570 KKStr errorMsg =
"SVM289_BFS::svm_model::Read ***ERROR*** To many Support Vector's Defined.";
4571 log.Level (-1) << endl << errorMsg << endl << endl;
4580 KKStr imageFileName = line.ExtractToken2 (
"\t");
4582 FeatureVectorPtr fv =
new FeatureVector (fileDesc->NumOfFields ());
4583 fv->ExampleFileName (imageFileName);
4585 for (j = 0; (j < (nr_class - 1)) && (!eol); j++)
4586 sv_coef[j][i] = line.ExtractTokenDouble (
"\t");
4588 if (param.kernel_type == PRECOMPUTED)
4590 log.Level (-1) << endl << endl
4591 <<
"SVM289_BFS::svm_model::Read ***ERROR*** PRECOMPUTED Can not Handle." << endl
4596 for (kkuint32 zed = 0; (zed < fileDesc->NumOfFields ()) && (!eol); zed++)
4598 KKStr featureField = line.ExtractToken2 (
"\t");
4599 kkint32 featureNum = featureField.ExtractTokenInt (
":");
4600 float featureValue = (
float)featureField.ExtractTokenDouble (
"\t\n\r");
4601 fv->FeatureData (featureNum, featureValue);
4629 double totalProb = 0.0;
4653 if (model->weOwnSupportVectors)
4654 model->SV.Owner (
true);
4656 model->SV.Owner (
false);
4686 return "unknown svm type";
4692 if (kernel_type !=
LINEAR &&
4693 kernel_type !=
POLY &&
4694 kernel_type !=
RBF &&
4698 return "unknown kernel type";
4701 return "degree of polynomial kernel < 0";
4706 return "cache_size <= 0";
4708 if (param
->eps <= 0)
4722 if ((param
->nu <= 0) || (param
->nu > 1))
4723 return "nu <= 0 or nu > 1";
4732 return "shrinking != 0 and shrinking != 1";
4735 return "probability != 0 and probability != 1";
4738 return "one-class SVM probability output not supported yet";
4752 for (i = 0; i < l; i++)
4756 for (j = 0; j < nr_class; j++)
4758 if (this_label == label[j])
4767 if (nr_class == max_nr_class)
4769 kkint32 oldMaxNrClass = max_nr_class;
4771 label = GrowAllocation (label, oldMaxNrClass, max_nr_class);
4772 count = GrowAllocation (count, oldMaxNrClass, max_nr_class);
4774 label[nr_class] = this_label;
4775 count[nr_class] = 1;
4781 for (i = 0; i < nr_class; i++)
4784 for (
kkint32 j = i + 1; j < nr_class; j++)
4787 if ((param->nu * (n1 + n2) / 2) > Min (n1, n2))
4789 delete[] label; label = NULL;
4790 delete[] count; count = NULL;
4791 return "specified nu is infeasible";
4796 delete[] label; label = NULL;
4797 delete[] count; count = NULL;
KKStr(kkint32 size)
Creates a KKStr object that pre-allocates space for 'size' characters.
void PushOnBack(FeatureVectorPtr image)
Overloading the PushOnBack function in KKQueue so we can monitor the Version and Sort Order...
static void print_string_stdout(const char *s)
kkint32 svm_check_probability_model(const svm_model *model)
void svm_cross_validation(const svm_problem &prob, const svm_parameter ¶m, kkint32 nr_fold, double *target, RunLog &log)
svm_parameter & operator=(const svm_parameter &right)
Qfloat * get_Q(kkint32 i, kkint32 len) const
bool EqualIgnoreCase(const char *s2) const
KKStr ToCmdLineStr() const
svm_model(const svm_model &_model, FileDescPtr _fileDesc, RunLog &_log)
T * GrowAllocation(T *src, kkint32 origSize, kkint32 newSize)
FeatureVectorList(const FeatureVectorList &examples, bool _owner)
Create a duplicate list, depending on the '_owner' parameter may also duplicate the contents...
const float * FeatureData() const
Returns as a pointer to the feature data itself.
KKStr ExtractToken2(const char *delStr="\n\t\r ")
Extract first Token from the string.
Keeps track of selected features.
FeatureNumList(FileDescPtr _fileDesc)
virtual Qfloat * get_Q(kkint32 column, kkint32 len) const =0
Namespace used to wrap implementation of libSVM version 2.89 to be used as a pair-wise SVM...
KKStr SVM_Type_ToStr(SVM_Type svmType)
KKStr Kernel_Type_ToStr(Kernel_Type kernelType)
svm_model(const svm_parameter &_param, const FeatureNumList &_selFeatures, FileDescPtr _fileDesc, RunLog &_log)
void swap_index(kkint32 i, kkint32 j)
FeatureNumList selFeatures
void reconstruct_gradient()
virtual kkint32 select_working_set(kkint32 &i, kkint32 &j)
static void info(const char *fmt,...)
void svm_group_classes(const svm_problem *prob, kkint32 *nr_class_ret, kkint32 **label_ret, kkint32 **start_ret, kkint32 **count_ret, kkint32 *perm)
bool operator==(const char *rtStr) const
Kernel_Type Kernel_Type_FromStr(KKStr s)
const char * svm_check_parameter(const svm_problem *prob, const svm_parameter *param)
virtual Qfloat * get_QD() const =0
void Solve(kkint32 l, QMatrix &Q, const double *p, const schar *y, double *alpha, double Cp, double Cn, double eps, SolutionInfo *si, kkint32 shrinking)
SVM_Type svm_get_svm_type(const svm_model *model)
virtual double calculate_rho()
void swap_index(kkint32 i, kkint32 j)
void solve_epsilon_svr(const svm_problem *prob, const svm_parameter *param, double *alpha, Solver::SolutionInfo *si, RunLog &_log)
double sigmoid_predict(double decision_value, double A, double B)
void svm_predict_values(const svm_model *model, const FeatureVector &x, double *dec_values)
Kernel(const FeatureVectorList &_x, const FeatureNumList &_selFeatures, const svm_parameter &_param, RunLog &_log)
svm_problem(const svm_problem &_prob)
void ProcessSvmParameter(const KKStr &cmd, const KKStr &value, bool &parmUsed)
FeatureNumList(const FeatureNumList &featureNumList)
Copy constructor.
void swap_index(kkint32 i, kkint32 j)
FeatureVectorList(FileDescPtr _fileDesc, bool _owner)
Will create a new empty list of FeatureVector's.
double svm_predict_probability(svm_model *model, const FeatureVector &x, double *prob_estimates, kkint32 *votes)
void Read(istream &i, FileDescPtr fileDesc, RunLog &log)
kkint32 svm_get_nr_class(const svm_model *model)
void svm_get_labels(const svm_model *model, kkint32 *label)
Container class for FeatureVector derived objects.
Qfloat * get_Q(kkint32 i, kkint32 len) const
double(Kernel::* kernel_function)(kkint32 i, kkint32 j) const
svm_model(istream &_fileName, FileDescPtr _fileDesc, RunLog &_log)
void solve_one_class(const svm_problem *prob, const svm_parameter *param, double *alpha, Solver::SolutionInfo *si, RunLog &_log)
void swap_index(kkint32 i, kkint32 j)
void multiclass_probability(kkint32 k, double **r, double *p)
kkuint16 operator[](kkint32 idx) const
Returns back the selected feature.
svm_parameter(const svm_parameter &_param)
static double k_function(const FeatureVector &x, const FeatureVector &y, const svm_parameter ¶m, const FeatureNumList &selFeatures)
Kernel evaluation.
KKStr ToTabDelStr() const
Cache(kkint32 l, kkint32 size)
Kernel Cache.
double ExtractTokenDouble(const char *delStr)
void svm_destroy_param(svm_parameter *¶m)
virtual void swap_index(kkint32 i, kkint32 j)
Qfloat * get_Q(kkint32 i, kkint32 len) const
static double DotStatic(const FeatureVector &px, const FeatureVector &py, const FeatureNumList &selFeatures)
FileDescPtr FileDesc() const
double powi(double base, kkint32 times)
void ParseTabDelStr(const KKStr &_str)
SVR_Q(const svm_problem &prob, const svm_parameter ¶m, RunLog &_log)
double svm_predict(const struct svm_model *model, const FeatureVector &x)
ONE_CLASS_Q(const svm_problem &prob, const svm_parameter ¶m, RunLog &_log)
double svm_svr_probability(const svm_problem &prob, const svm_parameter ¶m, RunLog &log)
void svm_binary_svc_probability(const svm_problem *prob, const svm_parameter *param, double Cp, double Cn, double &probA, double &probB, RunLog &log)
static KKStr Concat(const std::vector< std::string > &values)
Concatenates the list of 'std::string' strings.
void Upper()
Converts all characters in string to their Upper case equivalents via 'toupper'.
bool is_upper_bound(kkint32 i)
void NormalizeProbability()
Derining multiclass probability as done in "Recognizing Plankton Images From the SIPPER".
svm_problem(const FeatureVectorList &_x, const float *_y, const FeatureNumList &_selFeatures)
virtual void swap_index(kkint32 i, kkint32 j)=0
void sigmoid_train(kkint32 l, const double *dec_values, const double *labels, double &A, double &B)
decision_function svm_train_one(const svm_problem &prob, const svm_parameter ¶m, double Cp, double Cn, RunLog &_log)
double svm_get_svr_probability(const svm_model *model)
svm_model * svm_train(const svm_problem &prob, const svm_parameter ¶m, RunLog &log)
std::ostream &__cdecl operator<<(std::ostream &os, const KKStr &str)
kkint32 ExtractTokenInt(const char *delStr)
svm_parameter(KKStr ¶mStr)
void update_alpha_status(kkint32 i)
kkint32 NumSelFeatures() const
void svm_destroy_model(struct svm_model *&model)
SVM_Type SVM_Type_FromStr(KKStr s)
virtual void do_shrinking()
virtual Qfloat * get_QD() const =0
Used for logging messages.
void EncodeProblem(const struct svm_paramater ¶m, struct svm_problem &prob_in, struct svm_problem &prob_out)
svm_model(FileDescPtr _fileDesc, RunLog &_log)
void swap_index(kkint32 i, kkint32 j)
virtual Qfloat * get_Q(kkint32 column, kkint32 len) const =0
KKException(const KKStr &_exceptionStr)
bool ToBool() const
Returns the bool equivalent of the string, ex 'Yes' = true, 'No' = false, 'True' = true...
FeatureNumList selFeatures
Represents a Feature Vector of a single example, labeled or unlabeled.
static void solve_nu_svr(const svm_problem *prob, const svm_parameter *param, double *alpha, Solver::SolutionInfo *si, RunLog &_log)
kkint32 get_data(const kkint32 index, Qfloat **data, kkint32 len)
Request data [0,len)
bool is_lower_bound(kkint32 i)
void(* svm_print_string)(const char *)
SVC_Q(const svm_problem &prob, const svm_parameter ¶m, const schar *y_, RunLog &_log)
svm_problem(const FeatureNumList &_selFeatures, FileDescPtr _fileDesc, RunLog &_log)
void Solve(kkint32 l, QMatrix &Q, const double *p_, const schar *y_, double *alpha_, double Cp, double Cn, double eps, SolutionInfo *si, kkint32 shrinking)
void solve_nu_svc(const svm_problem *prob, const svm_parameter *param, double *alpha, Solver::SolutionInfo *si, RunLog &_log)
void solve_c_svc(const svm_problem *prob, const svm_parameter *param, double *alpha, Solver::SolutionInfo *si, double Cp, double Cn, RunLog &_log)
const FeatureNumList & SelFeatures() const
const KKStr & ExampleFileName() const
Name of file that this FeatureVector was computed from.