view release on metacpan or search on metacpan
char * training_data
PREINIT:
I32* temp;
PPCODE:
temp = PL_markstack_ptr++;
Inline_Stack_Vars;
Inline_Stack_Reset;
int i;
ccv_enable_default_cache();
ccv_dense_matrix_t* image = 0;
/* TODO: Make the cascade accessible from the outside */
ccv_bbf_classifier_cascade_t* cascade = ccv_bbf_read_classifier_cascade(training_data);
ccv_read(filename, &image, CCV_IO_GRAY | CCV_IO_ANY_FILE);
if (image != 0)
{
/* TODO: Make the BBF parameters accessible from the outside */
ccv_bbf_param_t params = { .interval = 5, .min_neighbors = 2, .accurate = 1, .flags = 0, .size = ccv_size(24, 24) };
ccv_array_t* seq = ccv_bbf_detect_objects(image, &cascade, 1, params);
for (i = 0; i < seq->rnum; i++)
{
ccv_comp_t* comp = (ccv_comp_t*)ccv_array_get(seq, i);
/* Create the new 5-item array */
AV* res = newAV();
av_push( res, newSVnv( comp->rect.x ));
av_push( res, newSVnv( comp->rect.y ));
av_push( res, newSVnv( comp->rect.width ));
av_push( res, newSVnv( comp->rect.height ));
av_push( res, newSVnv( comp->classification.confidence ));
Inline_Stack_Push(sv_2mortal(newRV_noinc((SV*) res)));
}
ccv_array_free(seq);
ccv_matrix_free(image);
}
ccv_bbf_classifier_cascade_free(cascade);
ccv_disable_cache();
Inline_Stack_Done;
if (PL_markstack_ptr != temp) {
/* truly void, because dXSARGS not invoked */
PL_markstack_ptr = temp;
XSRETURN_EMPTY; /* return empty stack */
}
/* must have used dXSARGS; list context implied */
return; /* assume stack size is correct */
ccv-src/lib/nnc/cmd/relu/ccv_nnc_relu.c
ccv-src/lib/nnc/cmd/relu/ccv_nnc_relu_cpu_ref.c
ccv-src/lib/nnc/cmd/softmax/ccv_nnc_softmax.c
ccv-src/lib/nnc/cmd/softmax/ccv_nnc_softmax_cpu_ref.c
ccv-src/lib/nnc/cmd/util/ccv_nnc_util.c
ccv-src/lib/nnc/cmd/util/ccv_nnc_util_cpu_ref.c
ccv-src/lib/nnc/gpu/ccv_nnc_compat.h
ccv-src/README.md
ccv-src/samples/car.m
ccv-src/samples/cmyk-jpeg-format.jpg
ccv-src/samples/face/cascade.txt
ccv-src/samples/face/stage-0.txt
ccv-src/samples/face/stage-1.txt
ccv-src/samples/face/stage-10.txt
ccv-src/samples/face/stage-11.txt
ccv-src/samples/face/stage-12.txt
ccv-src/samples/face/stage-13.txt
ccv-src/samples/face/stage-14.txt
ccv-src/samples/face/stage-15.txt
ccv-src/samples/face/stage-2.txt
ccv-src/samples/face/stage-3.txt
ccv-src/samples/face/stage-4.txt
ccv-src/samples/face/stage-5.txt
ccv-src/samples/face/stage-6.txt
ccv-src/samples/face/stage-7.txt
ccv-src/samples/face/stage-8.txt
ccv-src/samples/face/stage-9.txt
ccv-src/samples/pedestrian.icf
ccv-src/samples/pedestrian.m
ccv-src/samples/pedestrian/cascade-1
ccv-src/samples/pedestrian/cascade-2
ccv-src/samples/pedestrian/cascade-3
ccv-src/samples/pedestrian/cascade-4
ccv-src/samples/pedestrian/cascade-5
ccv-src/samples/pedestrian/cascade-6
ccv-src/samples/pedestrian/multiscale
ccv-src/serve/.gitignore
ccv-src/serve/async.c
ccv-src/serve/async.h
ccv-src/serve/bbf.c
ccv-src/serve/convnet.c
ccv-src/serve/dpm.c
ccv-src/serve/ebb.c
ccv-src/serve/ebb.h
ccv-src/serve/ebb_request_parser.c
examples/facecrop.pl
examples/facetest.pl
examples/gen_examples_pod.pl
examples/README
examples/sift-video.pl
examples/sifttest.pl
inc/Devel/CheckLib.pm
INLINE_CCV.h
lib/Image/CCV.pm
lib/Image/CCV/Examples.pm
lib/Image/CCV/facedetect/cascade.txt
lib/Image/CCV/facedetect/stage-0.txt
lib/Image/CCV/facedetect/stage-1.txt
lib/Image/CCV/facedetect/stage-10.txt
lib/Image/CCV/facedetect/stage-11.txt
lib/Image/CCV/facedetect/stage-12.txt
lib/Image/CCV/facedetect/stage-13.txt
lib/Image/CCV/facedetect/stage-14.txt
lib/Image/CCV/facedetect/stage-15.txt
lib/Image/CCV/facedetect/stage-2.txt
lib/Image/CCV/facedetect/stage-3.txt
ccv-src/bin/bbfcreate.c view on Meta::CPAN
"\n \033[1mUSAGE\033[0m\n\n bbfcreate [OPTION...]\n\n"
" \033[1mREQUIRED OPTIONS\033[0m\n\n"
" --positive-list : text file contains a list of positive files (cropped and scaled to the same size)\n"
" --background-list : text file contains a list of image files that don't contain any target objects\n"
" --negative-count : the number of negative examples we should collect from background files to initialize SVM\n"
" --working-dir : the directory to save progress and produce result model\n"
" --width : the width of positive image\n"
" --height : the height of positive image\n\n"
" \033[1mOTHER OPTIONS\033[0m\n\n"
" --base-dir : change the base directory so that the program can read images from there\n"
" --layer : how many layers needed for cascade classifier [DEFAULT TO 24]\n"
" --positive-criteria : what's the percentage of positive examples need to pass for the next layer [DEFAULT TO 0.9975]\n"
" --negative-criteria : what's the percentage of negative examples need to reject for the next layer [DEFAULT TO 0.5]\n"
" --balance : the balance weight for positive examples v.s. negative examples [DEFAULT TO 1.0]\n"
" --feature-number : how big our feature pool should be [DEFAULT TO 100 (thus, 100 * 100 = 10000 features)]\n\n"
);
exit(-1);
}
int main(int argc, char** argv)
{
ccv-src/bin/bbfcreate.c view on Meta::CPAN
++size;
if (size >= capacity)
{
capacity *= 2;
bgfiles = (char**)ccrealloc(bgfiles, sizeof(char*) * capacity);
}
}
fclose(r1);
int bgnum = size;
free(file);
ccv_bbf_classifier_cascade_new(posimg, posnum, bgfiles, bgnum, negnum, ccv_size(width, height), working_dir, params);
for (i = 0; i < bgnum; i++)
free(bgfiles[i]);
for (i = 0; i < posnum; i++)
ccv_matrix_free(&posimg[i]);
free(posimg);
free(bgfiles);
ccv_disable_cache();
return 0;
}
ccv-src/bin/bbfdetect.c view on Meta::CPAN
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000 + tv.tv_usec / 1000;
}
int main(int argc, char** argv)
{
assert(argc >= 3);
int i;
ccv_enable_default_cache();
ccv_dense_matrix_t* image = 0;
ccv_bbf_classifier_cascade_t* cascade = ccv_bbf_read_classifier_cascade(argv[2]);
ccv_read(argv[1], &image, CCV_IO_GRAY | CCV_IO_ANY_FILE);
if (image != 0)
{
unsigned int elapsed_time = get_current_time();
ccv_array_t* seq = ccv_bbf_detect_objects(image, &cascade, 1, ccv_bbf_default_params);
elapsed_time = get_current_time() - elapsed_time;
for (i = 0; i < seq->rnum; i++)
{
ccv_comp_t* comp = (ccv_comp_t*)ccv_array_get(seq, i);
printf("%d %d %d %d %f\n", comp->rect.x, comp->rect.y, comp->rect.width, comp->rect.height, comp->classification.confidence);
}
printf("total : %d in time %dms\n", seq->rnum, elapsed_time);
ccv_array_free(seq);
ccv_matrix_free(image);
} else {
ccv-src/bin/bbfdetect.c view on Meta::CPAN
char* file = (char*)malloc(len);
ssize_t read;
while((read = getline(&file, &len, r)) != -1)
{
while(read > 1 && isspace(file[read - 1]))
read--;
file[read] = 0;
image = 0;
ccv_read(file, &image, CCV_IO_GRAY | CCV_IO_ANY_FILE);
assert(image != 0);
ccv_array_t* seq = ccv_bbf_detect_objects(image, &cascade, 1, ccv_bbf_default_params);
printf("%s %d\n", file, seq->rnum);
for (i = 0; i < seq->rnum; i++)
{
ccv_comp_t* comp = (ccv_comp_t*)ccv_array_get(seq, i);
printf("%d %d %d %d %f\n", comp->rect.x, comp->rect.y, comp->rect.width, comp->rect.height, comp->classification.confidence);
}
ccv_array_free(seq);
ccv_matrix_free(image);
}
free(file);
fclose(r);
}
}
ccv_bbf_classifier_cascade_free(cascade);
ccv_disable_cache();
return 0;
}
ccv-src/bin/bbffmt.c view on Meta::CPAN
#include "ccv.h"
#include <string.h>
void write_c(ccv_bbf_classifier_cascade_t* cascade)
{
printf("ccv_bbf_classifier_cascade_t* ccv_bbf_read_classifier_cascade()\n"
"{\n"
" ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)malloc(sizeof(ccv_bbf_classifier_cascade_t));\n"
" cascade->count = %d;\n"
" cascade->size = ccv_size(%d, %d);\n"
" cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)malloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t));\n",
cascade->count, cascade->size.width, cascade->size.height);
int i, j, k;
for (i = 0; i < cascade->count; i++)
{
printf(" {\n");
printf(" ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier + %d;\n", i);
printf(" classifier->count = %d;\n"
" classifier->threshold = %f;\n",
cascade->stage_classifier[i].count, cascade->stage_classifier[i].threshold);
printf(" classifier->feature = (ccv_bbf_feature_t*)malloc(classifier->count * sizeof(ccv_bbf_feature_t));\n"
" classifier->alpha = (float*)malloc(classifier->count * 2 * sizeof(float));\n");
for (j = 0; j < cascade->stage_classifier[i].count; j++)
{
printf(" classifier->feature[%d].size = %d;\n",
j, cascade->stage_classifier[i].feature[j].size);
for (k = 0; k < cascade->stage_classifier[i].feature[j].size; k++)
{
printf(" classifier->feature[%d].px[%d] = %d;\n"
" classifier->feature[%d].py[%d] = %d;\n"
" classifier->feature[%d].pz[%d] = %d;\n",
j, k, cascade->stage_classifier[i].feature[j].px[k],
j, k, cascade->stage_classifier[i].feature[j].py[k],
j, k, cascade->stage_classifier[i].feature[j].pz[k]);
printf(" classifier->feature[%d].nx[%d] = %d;\n"
" classifier->feature[%d].ny[%d] = %d;\n"
" classifier->feature[%d].nz[%d] = %d;\n",
j, k, cascade->stage_classifier[i].feature[j].nx[k],
j, k, cascade->stage_classifier[i].feature[j].ny[k],
j, k, cascade->stage_classifier[i].feature[j].nz[k]);
}
printf(" classifier->alpha[%d] = %f;\n"
" classifier->alpha[%d] = %f;\n",
j * 2, cascade->stage_classifier[i].alpha[j * 2], j * 2 + 1, cascade->stage_classifier[i].alpha[j * 2 + 1]);
}
printf(" }\n");
}
printf(" return cascade;\n}");
}
void write_json(ccv_bbf_classifier_cascade_t* cascade)
{
printf("{\"count\" : %d, \"width\" : %d, \"height\" : %d, \"stage_classifier\" : [",
cascade->count, cascade->size.width, cascade->size.height);
int i, j, k;
for (i = 0; i < cascade->count; i++)
{
printf("{\"count\":%d,\"threshold\":%le,\"feature\":[",
cascade->stage_classifier[i].count, cascade->stage_classifier[i].threshold);
for (j = 0; j < cascade->stage_classifier[i].count; j++)
{
printf("{\"size\":%d,\"px\":[%d", cascade->stage_classifier[i].feature[j].size, cascade->stage_classifier[i].feature[j].px[0]);
for (k = 1; k < cascade->stage_classifier[i].feature[j].size; k++)
printf(",%d", cascade->stage_classifier[i].feature[j].px[k]);
printf("],\"py\":[%d", cascade->stage_classifier[i].feature[j].py[0]);
for (k = 1; k < cascade->stage_classifier[i].feature[j].size; k++)
printf(",%d", cascade->stage_classifier[i].feature[j].py[k]);
printf("],\"pz\":[%d", cascade->stage_classifier[i].feature[j].pz[0]);
for (k = 1; k < cascade->stage_classifier[i].feature[j].size; k++)
printf(",%d", cascade->stage_classifier[i].feature[j].pz[k]);
printf("],\"nx\":[%d", cascade->stage_classifier[i].feature[j].nx[0]);
for (k = 1; k < cascade->stage_classifier[i].feature[j].size; k++)
printf(",%d", cascade->stage_classifier[i].feature[j].nx[k]);
printf("],\"ny\":[%d", cascade->stage_classifier[i].feature[j].ny[0]);
for (k = 1; k < cascade->stage_classifier[i].feature[j].size; k++)
printf(",%d", cascade->stage_classifier[i].feature[j].ny[k]);
printf("],\"nz\":[%d", cascade->stage_classifier[i].feature[j].nz[0]);
for (k = 1; k < cascade->stage_classifier[i].feature[j].size; k++)
printf(",%d", cascade->stage_classifier[i].feature[j].nz[k]);
if (j < cascade->stage_classifier[i].count - 1)
printf("]},");
else
printf("]}");
}
printf("],\"alpha\":[%le,%le", cascade->stage_classifier[i].alpha[0], cascade->stage_classifier[i].alpha[1]);
for (j = 1; j < cascade->stage_classifier[i].count; j++)
printf(",%le,%le", cascade->stage_classifier[i].alpha[j * 2], cascade->stage_classifier[i].alpha[j * 2 + 1]);
if (i < cascade->count - 1)
printf("]},");
else
printf("]}");
}
printf("]};\n");
}
int main(int argc, char** argv)
{
assert(argc >= 3);
ccv_bbf_classifier_cascade_t* cascade = ccv_bbf_read_classifier_cascade(argv[1]);
if (strcmp(argv[2], "bin") == 0)
{
assert(argc >= 4);
int len = ccv_bbf_classifier_cascade_write_binary(cascade, NULL, 0);
char* s = malloc(len);
ccv_bbf_classifier_cascade_write_binary(cascade, s, len);
FILE* w = fopen(argv[3], "w");
fwrite(s, 1, len, w);
fclose(w);
free(s);
} else if (strcmp(argv[2], "c") == 0) {
write_c(cascade);
} else if (strcmp(argv[2], "json") == 0) {
write_json(cascade);
} else if (strcmp(argv[2], "cbin") == 0) {
int len = ccv_bbf_classifier_cascade_write_binary(cascade, NULL, 0);
char* s = malloc(len);
ccv_bbf_classifier_cascade_write_binary(cascade, s, len);
int i;
for (i = 0; i < len; i++)
printf("\\x%x", (unsigned char)s[i]);
fflush(NULL);
free(s);
}
return 0;
}
ccv-src/bin/icfcreate.c view on Meta::CPAN
{
strncpy(file_info.filename, base_dir, 1024);
file_info.filename[dirlen - 1] = '/';
}
strncpy(file_info.filename + dirlen, file, 1024 - dirlen);
file_info.pose = pose;
ccv_array_push(validatefiles, &file_info);
}
fclose(r2);
free(file);
ccv_icf_classifier_cascade_t* classifier = ccv_icf_classifier_cascade_new(posfiles, positive_count, bgfiles, negative_count, validatefiles, working_dir, params);
char filename[1024];
snprintf(filename, 1024, "%s/final-cascade", working_dir);
ccv_icf_write_classifier_cascade(classifier, filename);
for (i = 0; i < posfiles->rnum; i++)
{
ccv_file_info_t* file_info = (ccv_file_info_t*)ccv_array_get(posfiles, i);
free(file_info->filename);
}
ccv_array_free(posfiles);
for (i = 0; i < bgfiles->rnum; i++)
{
ccv_file_info_t* file_info = (ccv_file_info_t*)ccv_array_get(bgfiles, i);
free(file_info->filename);
ccv-src/bin/icfdetect.c view on Meta::CPAN
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000 + tv.tv_usec / 1000;
}
int main(int argc, char** argv)
{
assert(argc >= 3);
int i;
ccv_enable_default_cache();
ccv_dense_matrix_t* image = 0;
ccv_icf_classifier_cascade_t* cascade = ccv_icf_read_classifier_cascade(argv[2]);
ccv_read(argv[1], &image, CCV_IO_ANY_FILE | CCV_IO_RGB_COLOR);
if (image != 0)
{
unsigned int elapsed_time = get_current_time();
ccv_array_t* seq = ccv_icf_detect_objects(image, &cascade, 1, ccv_icf_default_params);
elapsed_time = get_current_time() - elapsed_time;
for (i = 0; i < seq->rnum; i++)
{
ccv_comp_t* comp = (ccv_comp_t*)ccv_array_get(seq, i);
printf("%d %d %d %d %f\n", comp->rect.x, comp->rect.y, comp->rect.width, comp->rect.height, comp->classification.confidence);
}
printf("total : %d in time %dms\n", seq->rnum, elapsed_time);
ccv_array_free(seq);
ccv_matrix_free(image);
} else {
ccv-src/bin/icfdetect.c view on Meta::CPAN
char* file = (char*)malloc(len);
ssize_t read;
while((read = getline(&file, &len, r)) != -1)
{
while(read > 1 && isspace(file[read - 1]))
read--;
file[read] = 0;
image = 0;
ccv_read(file, &image, CCV_IO_ANY_FILE | CCV_IO_RGB_COLOR);
assert(image != 0);
ccv_array_t* seq = ccv_icf_detect_objects(image, &cascade, 1, ccv_icf_default_params);
for (i = 0; i < seq->rnum; i++)
{
ccv_comp_t* comp = (ccv_comp_t*)ccv_array_get(seq, i);
printf("%s %d %d %d %d %f\n", file, comp->rect.x, comp->rect.y, comp->rect.width, comp->rect.height, comp->classification.confidence);
}
ccv_array_free(seq);
ccv_matrix_free(image);
}
free(file);
fclose(r);
}
}
ccv_icf_classifier_cascade_free(cascade);
ccv_disable_cache();
return 0;
}
ccv-src/bin/icfoptimize.c view on Meta::CPAN
#include <getopt.h>
static void exit_with_help(void)
{
printf(
"\n \033[1mUSAGE\033[0m\n\n icfoptimize [OPTION...]\n\n"
" \033[1mREQUIRED OPTIONS\033[0m\n\n"
" --positive-list : text file contains a list of positive files in format:\n"
" <file name> center-x center-y horizontal-axis-length vertical-axis-length object-roll object-pitch object-yaw \\newline\n"
" --acceptance : what percentage of positive examples that we should accept for soft cascading\n"
" --classifier-cascade : the model file that we will compute soft cascading thresholds on\n\n"
" \033[1mOTHER OPTIONS\033[0m\n\n"
" --base-dir : change the base directory so that the program can read images from there\n\n"
);
exit(-1);
}
int main(int argc, char** argv)
{
static struct option icf_options[] = {
/* help */
{"help", 0, 0, 0},
/* required parameters */
{"positive-list", 1, 0, 0},
{"classifier-cascade", 1, 0, 0},
{"acceptance", 1, 0, 0},
/* optional parameters */
{"base-dir", 1, 0, 0},
{0, 0, 0, 0}
};
char* positive_list = 0;
char* classifier_cascade = 0;
char* base_dir = 0;
double acceptance = 0;
int i, k;
while (getopt_long_only(argc, argv, "", icf_options, &k) != -1)
{
switch (k)
{
case 0:
exit_with_help();
case 1:
positive_list = optarg;
break;
case 2:
classifier_cascade = optarg;
break;
case 3:
acceptance = atof(optarg);
break;
case 4:
base_dir = optarg;
break;
}
}
assert(positive_list != 0);
assert(classifier_cascade != 0);
ccv_enable_cache(512 * 1024 * 1024);
FILE* r0 = fopen(positive_list, "r");
assert(r0 && "positive-list doesn't exists");
char* file = (char*)malloc(1024);
ccv_decimal_pose_t pose;
ccv_array_t* posfiles = ccv_array_new(sizeof(ccv_file_info_t), 32, 0);
int dirlen = (base_dir != 0) ? strlen(base_dir) + 1 : 0;
// roll pitch yaw
while (fscanf(r0, "%s %f %f %f %f %f %f %f", file, &pose.x, &pose.y, &pose.a, &pose.b, &pose.roll, &pose.pitch, &pose.yaw) != EOF)
{
ccv-src/bin/icfoptimize.c view on Meta::CPAN
strncpy(file_info.filename, base_dir, 1024);
file_info.filename[dirlen - 1] = '/';
}
strncpy(file_info.filename + dirlen, file, 1024 - dirlen);
// blow up pose a little bit for INRIA data (16px on four strides)
file_info.pose = pose;
ccv_array_push(posfiles, &file_info);
}
fclose(r0);
free(file);
ccv_icf_classifier_cascade_t* cascade = ccv_icf_read_classifier_cascade(classifier_cascade);
assert(cascade && "classifier cascade doesn't exists");
ccv_icf_classifier_cascade_soft(cascade, posfiles, acceptance);
ccv_icf_write_classifier_cascade(cascade, classifier_cascade);
for (i = 0; i < posfiles->rnum; i++)
{
ccv_file_info_t* file_info = (ccv_file_info_t*)ccv_array_get(posfiles, i);
free(file_info->filename);
}
ccv_array_free(posfiles);
ccv_disable_cache();
return 0;
}
ccv-src/bin/scdcreate.c view on Meta::CPAN
.accu_false_positive_rate = 1e-7,
.auc_crit = 1e-5,
.maximum_feature = 2048,
.prune_stage = 3,
.prune_feature = 4,
},
.weight_trimming = 0.98,
.C = 0.0005,
.grayscale = 0,
};
ccv_scd_classifier_cascade_t* cascade = ccv_scd_classifier_cascade_new(posfiles, hard_mine, negative_count, working_dir, params);
ccv_scd_classifier_cascade_write(cascade, working_dir);
return 0;
}
ccv-src/bin/scddetect.c view on Meta::CPAN
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000 + tv.tv_usec / 1000;
}
int main(int argc, char** argv)
{
assert(argc >= 3);
int i;
ccv_enable_default_cache();
ccv_dense_matrix_t* image = 0;
ccv_scd_classifier_cascade_t* cascade = ccv_scd_classifier_cascade_read(argv[2]);
ccv_read(argv[1], &image, CCV_IO_RGB_COLOR | CCV_IO_ANY_FILE);
if (image != 0)
{
unsigned int elapsed_time = get_current_time();
ccv_array_t* seq = ccv_scd_detect_objects(image, &cascade, 1, ccv_scd_default_params);
elapsed_time = get_current_time() - elapsed_time;
for (i = 0; i < seq->rnum; i++)
{
ccv_comp_t* comp = (ccv_comp_t*)ccv_array_get(seq, i);
printf("%d %d %d %d %f\n", comp->rect.x, comp->rect.y, comp->rect.width, comp->rect.height, comp->classification.confidence);
}
printf("total : %d in time %dms\n", seq->rnum, elapsed_time);
ccv_array_free(seq);
ccv_matrix_free(image);
} else {
ccv-src/bin/scddetect.c view on Meta::CPAN
while((read = getline(&file, &len, r)) != -1)
{
while(read > 1 && isspace(file[read - 1]))
read--;
file[read] = 0;
image = 0;
ccv_read(file, &image, CCV_IO_RGB_COLOR | CCV_IO_ANY_FILE);
assert(image != 0);
ccv_scd_param_t params = ccv_scd_default_params;
params.size = ccv_size(24, 24);
ccv_array_t* seq = ccv_scd_detect_objects(image, &cascade, 1, params);
printf("%s %d\n", file, seq->rnum);
for (i = 0; i < seq->rnum; i++)
{
ccv_comp_t* comp = (ccv_comp_t*)ccv_array_get(seq, i);
printf("%d %d %d %d %f\n", comp->rect.x, comp->rect.y, comp->rect.width, comp->rect.height, comp->classification.confidence);
}
ccv_array_free(seq);
ccv_matrix_free(image);
}
free(file);
fclose(r);
}
}
ccv_scd_classifier_cascade_free(cascade);
ccv_disable_cache();
return 0;
}
ccv-src/doc/icf.md view on Meta::CPAN
You also want to have a collection of background (none pedestrian) files, I combined data from
both INRIA and VOC2011 to generates that list:
find ../data/negs/*.jpg > no-pedestrian.txt
After all these ready, and have a PC with enough computational power:
./icfcreate --positive-list pedestrian.icf_samples --background-list no-pedestrian.txt --validate-list pedestrian.icf_test --negative-count 10000 --positive-count 10000 --feature-size 50000 --weak-classifier-count 2000 --size 30x90 --margin 10,10,10...
The classifier cascade will be bootstrapping 3 times, pooling from 50,000 features, and the
final boosted classifier will have 2,000 weak classifier. On the PC that I am running (with
SSD / hard-drive hybrid (through flashcache), 32GiB memory and Core i7 3770K), it takes a day
to finish training one classifier. At minimal, you should have about 16GB available memory to
get the program finish running.
The final-cascade file in your working directory is the classifier model file that you can
use. Using ./bin/icfoptimize, you should be able to set proper soft cascading thresholds for
the classifier to speed up detection:
./icfoptimize --positive-list pedestrian.icf_test --classifier-cascade icf-data/final-cascade --acceptance 0.7 --base-dir ../data/INRIAPerson/Test/pos/
ccv-src/js/ccv.js view on Meta::CPAN
node1 = node[node1].parent;
if(node[node1].rank >= 0)
node[node1].rank = ~class_idx++;
j = ~node[node1].rank;
}
idx[i] = j;
}
return {"index" : idx, "cat" : class_idx};
},
detect_objects : parallable("ccv.js", function (canvas, cascade, interval, min_neighbors) {
if (this.shared !== undefined) {
var params = get_named_arguments(arguments, ["canvas", "cascade", "interval", "min_neighbors"]);
this.shared.canvas = params.canvas;
this.shared.interval = params.interval;
this.shared.min_neighbors = params.min_neighbors;
this.shared.cascade = params.cascade;
this.shared.scale = Math.pow(2, 1 / (params.interval + 1));
this.shared.next = params.interval + 1;
this.shared.scale_upto = Math.floor(Math.log(Math.min(params.canvas.width / params.cascade.width, params.canvas.height / params.cascade.height)) / Math.log(this.shared.scale));
var i;
for (i = 0; i < this.shared.cascade.stage_classifier.length; i++)
this.shared.cascade.stage_classifier[i].orig_feature = this.shared.cascade.stage_classifier[i].feature;
}
function pre(worker_num) {
var canvas = this.shared.canvas;
var interval = this.shared.interval;
var scale = this.shared.scale;
var next = this.shared.next;
var scale_upto = this.shared.scale_upto;
var pyr = new Array((scale_upto + next * 2) * 4);
var ret = new Array((scale_upto + next * 2) * 4);
pyr[0] = canvas;
ccv-src/js/ccv.js view on Meta::CPAN
pyr[i * 4 + 3].height = Math.floor(pyr[i * 4 - next * 4].height / 2);
pyr[i * 4 + 3].getContext("2d").drawImage(pyr[i * 4 - next * 4], 1, 1, pyr[i * 4 - next * 4].width - 1, pyr[i * 4 - next * 4].height - 1, 0, 0, pyr[i * 4 + 3].width - 2, pyr[i * 4 + 3].height - 2);
ret[i * 4 + 3] = { "width" : pyr[i * 4 + 3].width,
"height" : pyr[i * 4 + 3].height,
"data" : pyr[i * 4 + 3].getContext("2d").getImageData(0, 0, pyr[i * 4 + 3].width, pyr[i * 4 + 3].height).data };
}
return [ret];
};
function core(pyr, id, worker_num) {
var cascade = this.shared.cascade;
var interval = this.shared.interval;
var scale = this.shared.scale;
var next = this.shared.next;
var scale_upto = this.shared.scale_upto;
var i, j, k, x, y, q;
var scale_x = 1, scale_y = 1;
var dx = [0, 1, 0, 1];
var dy = [0, 0, 1, 1];
var seq = [];
for (i = 0; i < scale_upto; i++) {
var qw = pyr[i * 4 + next * 8].width - Math.floor(cascade.width / 4);
var qh = pyr[i * 4 + next * 8].height - Math.floor(cascade.height / 4);
var step = [pyr[i * 4].width * 4, pyr[i * 4 + next * 4].width * 4, pyr[i * 4 + next * 8].width * 4];
var paddings = [pyr[i * 4].width * 16 - qw * 16,
pyr[i * 4 + next * 4].width * 8 - qw * 8,
pyr[i * 4 + next * 8].width * 4 - qw * 4];
for (j = 0; j < cascade.stage_classifier.length; j++) {
var orig_feature = cascade.stage_classifier[j].orig_feature;
var feature = cascade.stage_classifier[j].feature = new Array(cascade.stage_classifier[j].count);
for (k = 0; k < cascade.stage_classifier[j].count; k++) {
feature[k] = {"size" : orig_feature[k].size,
"px" : new Array(orig_feature[k].size),
"pz" : new Array(orig_feature[k].size),
"nx" : new Array(orig_feature[k].size),
"nz" : new Array(orig_feature[k].size)};
for (q = 0; q < orig_feature[k].size; q++) {
feature[k].px[q] = orig_feature[k].px[q] * 4 + orig_feature[k].py[q] * step[orig_feature[k].pz[q]];
feature[k].pz[q] = orig_feature[k].pz[q];
feature[k].nx[q] = orig_feature[k].nx[q] * 4 + orig_feature[k].ny[q] * step[orig_feature[k].nz[q]];
feature[k].nz[q] = orig_feature[k].nz[q];
}
}
}
for (q = 0; q < 4; q++) {
var u8 = [pyr[i * 4].data, pyr[i * 4 + next * 4].data, pyr[i * 4 + next * 8 + q].data];
var u8o = [dx[q] * 8 + dy[q] * pyr[i * 4].width * 8, dx[q] * 4 + dy[q] * pyr[i * 4 + next * 4].width * 4, 0];
for (y = 0; y < qh; y++) {
for (x = 0; x < qw; x++) {
var sum = 0;
var flag = true;
for (j = 0; j < cascade.stage_classifier.length; j++) {
sum = 0;
var alpha = cascade.stage_classifier[j].alpha;
var feature = cascade.stage_classifier[j].feature;
for (k = 0; k < cascade.stage_classifier[j].count; k++) {
var feature_k = feature[k];
var p, pmin = u8[feature_k.pz[0]][u8o[feature_k.pz[0]] + feature_k.px[0]];
var n, nmax = u8[feature_k.nz[0]][u8o[feature_k.nz[0]] + feature_k.nx[0]];
if (pmin <= nmax) {
sum += alpha[k * 2];
} else {
var f, shortcut = true;
for (f = 0; f < feature_k.size; f++) {
if (feature_k.pz[f] >= 0) {
p = u8[feature_k.pz[f]][u8o[feature_k.pz[f]] + feature_k.px[f]];
ccv-src/js/ccv.js view on Meta::CPAN
shortcut = false;
break;
}
nmax = n;
}
}
}
sum += (shortcut) ? alpha[k * 2 + 1] : alpha[k * 2];
}
}
if (sum < cascade.stage_classifier[j].threshold) {
flag = false;
break;
}
}
if (flag) {
seq.push({"x" : (x * 4 + dx[q] * 2) * scale_x,
"y" : (y * 4 + dy[q] * 2) * scale_y,
"width" : cascade.width * scale_x,
"height" : cascade.height * scale_y,
"neighbor" : 1,
"confidence" : sum});
}
u8o[0] += 16;
u8o[1] += 8;
u8o[2] += 4;
}
u8o[0] += paddings[0];
u8o[1] += paddings[1];
u8o[2] += paddings[2];
}
}
scale_x *= scale;
scale_y *= scale;
}
return seq;
};
function post(seq) {
var min_neighbors = this.shared.min_neighbors;
var cascade = this.shared.cascade;
var interval = this.shared.interval;
var scale = this.shared.scale;
var next = this.shared.next;
var scale_upto = this.shared.scale_upto;
var i, j;
for (i = 0; i < cascade.stage_classifier.length; i++)
cascade.stage_classifier[i].feature = cascade.stage_classifier[i].orig_feature;
seq = seq[0];
if (!(min_neighbors > 0))
return seq;
else {
var result = ccv.array_group(seq, function (r1, r2) {
var distance = Math.floor(r1.width * 0.25 + 0.5);
return r2.x <= r1.x + distance &&
r2.x >= r1.x - distance &&
r2.y <= r1.y + distance &&
ccv-src/js/face.js view on Meta::CPAN
var cascade = {"count" : 16, "width" : 24, "height" : 24, "stage_classifier" : [{"count":4,"threshold":-4.577530e+00,"feature":[{"size":4,"px":[3,5,8,11],"py":[2,2,6,3],"pz":[2,1,1,0],"nx":[8,4,0,0],"ny":[4,4,0,0],"nz":[1,1,-1,-1]},{"size":3,"px":[3,...
ccv-src/js/index.html view on Meta::CPAN
for (var i = 0; i < comp.length; i++) {
ctx.beginPath();
ctx.arc((comp[i].x + comp[i].width * 0.5) * scale, (comp[i].y + comp[i].height * 0.5) * scale,
(comp[i].width + comp[i].height) * 0.25 * scale * 1.2, 0, Math.PI * 2);
ctx.stroke();
}
}
/* call main detect_objects function */
if (async) {
ccv.detect_objects({ "canvas" : ccv.grayscale(ccv.pre(image)),
"cascade" : cascade,
"interval" : 5,
"min_neighbors" : 1,
"async" : true,
"worker" : 1 })(post);
} else {
var comp = ccv.detect_objects({ "canvas" : ccv.grayscale(ccv.pre(image)),
"cascade" : cascade,
"interval" : 5,
"min_neighbors" : 1 });
post(comp);
}
};
image.src = src;
}
function handleLocalFile(file) {
if (file.type.match(/image.*/)) {
ccv-src/lib/ccv.h view on Meta::CPAN
int count;
float threshold;
ccv_bbf_feature_t* feature;
float* alpha;
} ccv_bbf_stage_classifier_t;
typedef struct {
int count;
ccv_size_t size;
ccv_bbf_stage_classifier_t* stage_classifier;
} ccv_bbf_classifier_cascade_t;
enum {
CCV_BBF_GENETIC_OPT = 0x01,
CCV_BBF_FLOAT_OPT = 0x02,
};
typedef struct {
int interval; /**< Interval images between the full size image and the half size one. e.g. 2 will generate 2 images in between full size image and half size one: image with full size, image with 5/6 size, image with 2/3 size, image with 1/2 size. */
int min_neighbors; /**< 0: no grouping afterwards. 1: group objects that intersects each other. > 1: group objects that intersects each other, and only passes these that have at least **min_neighbors** intersected objects. */
int flags; /**< CCV_BBF_NO_NESTED, if one class of object is inside another class of object, this flag will reject the first object. */
int accurate; /**< BBF will generates 4 spatial scale variations for better accuracy. Set this parameter to 0 will reduce to 1 scale variation, and thus 3 times faster but lower the general accuracy of the detector. */
ccv_size_t size; /**< The smallest object size that will be interesting to us. */
} ccv_bbf_param_t;
typedef struct {
double pos_crit; /**< Positive criteria or the targeted recall ratio, BBF classifier tries to adjust the constant to meet this criteria. */
double neg_crit; /**< Negative criteria or the targeted reject ratio, BBF classifier tries to include more weak features until meet this criteria. */
double balance_k; /**< Weight positive examples differently from negative examples. */
int layer; /**< The maximum layer trained for the classifier cascade. */
int feature_number; /**< The maximum feature number for each classifier. */
int optimizer; /**< CCV_BBF_GENETIC_OPT, using genetic algorithm to search the best weak feature; CCV_BBF_FLOAT_OPT, using float search to improve the found best weak feature. */
ccv_bbf_param_t detector; /**< A **ccv_bbf_params_t** structure that will be used to search negative examples from background images. */
} ccv_bbf_new_param_t;
enum {
CCV_BBF_NO_NESTED = 0x10000000,
};
extern const ccv_bbf_param_t ccv_bbf_default_params;
/**
* Create a new BBF classifier cascade from given positive examples and background images. This function has a hard dependency on [GSL](http://www.gnu.org/software/gsl/).
* @param posimg An array of positive examples.
* @param posnum Number of positive examples.
* @param bgfiles An array of background images.
* @param bgnum Number of background images.
* @param negnum Number of negative examples that is harvested from background images.
* @param size The image size of positive examples.
* @param dir The working directory to store/retrieve intermediate data.
* @param params A **ccv_bbf_new_param_t** structure that defines various aspects of the training function.
*/
void ccv_bbf_classifier_cascade_new(ccv_dense_matrix_t** posimg, int posnum, char** bgfiles, int bgnum, int negnum, ccv_size_t size, const char* dir, ccv_bbf_new_param_t params);
/**
* Using a BBF classifier cascade to detect objects in a given image. If you have several classifier cascades, it is better to use them in one method call. In this way, ccv will try to optimize the overall performance.
* @param a The input image.
* @param cascade An array of classifier cascades.
* @param count How many classifier cascades you've passed in.
* @param params A **ccv_bbf_param_t** structure that defines various aspects of the detector.
* @return A **ccv_array_t** of **ccv_comp_t** for detection results.
*/
CCV_WARN_UNUSED(ccv_array_t*) ccv_bbf_detect_objects(ccv_dense_matrix_t* a, ccv_bbf_classifier_cascade_t** cascade, int count, ccv_bbf_param_t params);
/**
* Read BBF classifier cascade from working directory.
* @param directory The working directory that trains a BBF classifier cascade.
* @return A classifier cascade, 0 if no valid classifier cascade available.
*/
CCV_WARN_UNUSED(ccv_bbf_classifier_cascade_t*) ccv_bbf_read_classifier_cascade(const char* directory);
/**
* Free up the memory of BBF classifier cascade.
* @param cascade The BBF classifier cascade.
*/
void ccv_bbf_classifier_cascade_free(ccv_bbf_classifier_cascade_t* cascade);
/**
* Load BBF classifier cascade from a memory region.
* @param s The memory region of binarized BBF classifier cascade.
* @return A classifier cascade, 0 if no valid classifier cascade available.
*/
CCV_WARN_UNUSED(ccv_bbf_classifier_cascade_t*) ccv_bbf_classifier_cascade_read_binary(char* s);
/**
* Write BBF classifier cascade to a memory region.
* @param cascade The BBF classifier cascade.
* @param s The designated memory region.
* @param slen The size of the designated memory region.
* @return The actual size of the binarized BBF classifier cascade, if this size is larger than **slen**, please reallocate the memory region and do it again.
*/
int ccv_bbf_classifier_cascade_write_binary(ccv_bbf_classifier_cascade_t* cascade, char* s, int slen);
/** @} */
/* Ferns classifier: this is a fern implementation that specifically used for TLD
* see: http://cvlab.epfl.ch/alumni/oezuysal/ferns.html for more about ferns */
typedef struct {
int structs;
int features;
int scales;
int posteriors;
ccv-src/lib/ccv.h view on Meta::CPAN
CCV_ICF_CLASSIFIER_TYPE_B = 0x2,
};
typedef struct {
int type;
int count;
int grayscale;
ccv_margin_t margin;
ccv_size_t size; // this is the size includes the margin
ccv_icf_decision_tree_t* weak_classifiers;
} ccv_icf_classifier_cascade_t; // Type A, scale image
typedef struct {
int type;
int count;
int octave;
int grayscale;
ccv_icf_classifier_cascade_t* cascade;
} ccv_icf_multiscale_classifier_cascade_t; // Type B, scale the classifier
typedef struct {
int min_neighbors; /**< 0: no grouping afterwards. 1: group objects that intersects each other. > 1: group objects that intersects each other, and only passes these that have at least **min_neighbors** intersected objects. */
int flags;
int step_through; /**< The step size for detection. */
int interval; /**< Interval images between the full size image and the half size one. e.g. 2 will generate 2 images in between full size image and half size one: image with full size, image with 5/6 size, image with 2/3 size, image with 1/2 size. */
float threshold;
} ccv_icf_param_t;
extern const ccv_icf_param_t ccv_icf_default_params;
ccv-src/lib/ccv.h view on Meta::CPAN
float deform_angle; /**< The range of rotations to add distortion, in radius. */
float deform_scale; /**< The range of scale changes to add distortion. */
float deform_shift; /**< The range of translations to add distortion, in pixel. */
double acceptance; /**< The percentage of validation examples will be accepted when soft cascading the classifiers that will be sued for bootstrap. */
} ccv_icf_new_param_t;
void ccv_icf(ccv_dense_matrix_t* a, ccv_dense_matrix_t** b, int type);
/* ICF for single scale */
/**
* Create a new ICF classifier cascade from given positive examples and background images. This function has a hard dependency on [GSL](http://www.gnu.org/software/gsl/) and better be used with [libdispatch](http://libdispatch.macosforge.org/) for ma...
* @param posfiles An array of **ccv_file_info_t** that gives the positive examples and their locations.
* @param posnum The number of positive examples that we want to use (with certain random distortions if so choose).
* @param bgfiles An array of **ccv_file_info_t** that gives the background images.
* @param negnum The number of negative examples will be collected during bootstrapping / initialization.
* @param testfiles An array of **ccv_file_info_t** that gives the validation examples and their locations.
* @param dir The directory that saves the progress.
* @param params A **ccv_icf_new_param_t** structure that defines various aspects of the training function.
* @return A trained classifier cascade.
*/
CCV_WARN_UNUSED(ccv_icf_classifier_cascade_t*) ccv_icf_classifier_cascade_new(ccv_array_t* posfiles, int posnum, ccv_array_t* bgfiles, int negnum, ccv_array_t* testfiles, const char* dir, ccv_icf_new_param_t params);
/**
* Compute soft cascade thresholds to speed up the classifier cascade performance.
* @param cascade The trained classifier that we want to optimize soft cascade thresholds on.
* @param posfiles An array of **ccv_array_t** that gives the positive examples and their locations.
* @param acceptance The percentage of positive examples will be accepted when optimizing the soft cascade thresholds.
*/
void ccv_icf_classifier_cascade_soft(ccv_icf_classifier_cascade_t* cascade, ccv_array_t* posfiles, double acceptance);
/**
* Read a ICF classifier from a file.
* @param filename The file path that contains the trained ICF classifier.
* @return The classifier cascade, 0 if no valid classifier cascade available.
*/
CCV_WARN_UNUSED(ccv_icf_classifier_cascade_t*) ccv_icf_read_classifier_cascade(const char* filename);
/**
* Write a ICF classifier to a file.
* @param classifier The classifier that we want to write to file.
* @param filename The file path that we want to persist the ICF classifier.
*/
void ccv_icf_write_classifier_cascade(ccv_icf_classifier_cascade_t* classifier, const char* filename);
/**
* Free up the memory of ICF classifier cascade.
* @param classifier The ICF classifier cascade.
*/
void ccv_icf_classifier_cascade_free(ccv_icf_classifier_cascade_t* classifier);
/* ICF for multiple scale */
CCV_WARN_UNUSED(ccv_icf_multiscale_classifier_cascade_t*) ccv_icf_multiscale_classifier_cascade_new(ccv_icf_classifier_cascade_t* cascades, int octave, int interval);
CCV_WARN_UNUSED(ccv_icf_multiscale_classifier_cascade_t*) ccv_icf_read_multiscale_classifier_cascade(const char* directory);
void ccv_icf_write_multiscale_classifier_cascade(ccv_icf_multiscale_classifier_cascade_t* classifier, const char* directory);
void ccv_icf_multiscale_classifier_cascade_free(ccv_icf_multiscale_classifier_cascade_t* classifier);
/* polymorph function to run ICF based detector */
/**
* Using a ICF classifier cascade to detect objects in a given image. If you have several classifier cascades, it is better to use them in one method call. In this way, ccv will try to optimize the overall performance.
* @param a The input image.
* @param cascade An array of classifier cascades.
* @param count How many classifier cascades you've passed in.
* @param params A **ccv_icf_param_t** structure that defines various aspects of the detector.
* @return A **ccv_array_t** of **ccv_comp_t** with detection results.
*/
CCV_WARN_UNUSED(ccv_array_t*) ccv_icf_detect_objects(ccv_dense_matrix_t* a, void* cascade, int count, ccv_icf_param_t params);
/** @} */
/* SCD: SURF-Cascade Detector
* This is a variant of VJ framework for object detection
* Read: Learning SURF Cascade for Fast and Accurate Object Detection
*/
/**
* @defgroup ccv_scd surf-cascade detection
* @{
*/
typedef struct {
int sx[4];
int sy[4];
int dx[4];
int dy[4];
float bias;
float w[32];
ccv-src/lib/ccv.h view on Meta::CPAN
float beta[6];
float threshold;
} ccv_scd_decision_tree_t;
typedef struct {
int count;
ccv_margin_t margin;
ccv_size_t size;
ccv_scd_stump_classifier_t* classifiers;
// the last stage classifier is a hybrid of scd feature with icf-like feature
// this is trained as soft-cascade classifier, and select between a depth-2 decision tree
// or the scd feature.
struct {
int count;
ccv_scd_decision_tree_t* tree;
} decision;
} ccv_scd_classifier_cascade_t;
typedef struct {
int min_neighbors; /**< 0: no grouping afterwards. 1: group objects that intersects each other. > 1: group objects that intersects each other, and only passes these that have at least **min_neighbors** intersected objects. */
int step_through; /**< The step size for detection. */
int interval; /**< Interval images between the full size image and the half size one. e.g. 2 will generate 2 images in between full size image and half size one: image with full size, image with 5/6 size, image with 2/3 size, image with 1/2 size. */
ccv_size_t size; /**< The smallest object size that will be interesting to us. */
} ccv_scd_param_t;
typedef struct {
int boosting; /**< How many stages of boosting should be performed. */
ccv_size_t size; /**< What's the window size of the final classifier. */
struct {
ccv_size_t base; /**< [feature.base] A **ccv_size_t** structure defines the minimal feature dimensions. */
int range_through; /**< [feature.range_through] The step size to increase feature dimensions. */
int step_through; /**< [feature.step_through] The step size to move to cover the whole window size. */
} feature;
struct {
float hit_rate; /**< [stop_criteria.hit_rate] The targeted hit rate for each stage of classifier. */
float false_positive_rate; /**< [stop_criteria.false_positive_rate] The targeted false positive rate for each stage of classifier. */
float accu_false_positive_rate; /**< [stop_criteria.accu_false_positive_rate] The targeted accumulative false positive rate for classifier cascade, the training will be terminated once the accumulative false positive rate target reached. */
float auc_crit; /**< [stop_criteria.auc_crit] The epsilon to decide if auc (area under curve) can no longer be improved. Once auc can no longer be improved and the targeted false positive rate reached, this stage of training will be terminated and ...
int maximum_feature; /**< [stop_criteria.maximum_feature] Maximum number of features one stage can have. */
int prune_stage; /**< [stop_criteria.prune_stage] How many stages will act as "prune" stage, which means will take minimal effort to prune as much negative areas as possible. */
int prune_feature; /**< [stop_criteria.prune_feature] How many features a prune stage should have, it should be a very small number to enable efficient pruning. */
} stop_criteria;
double weight_trimming; /**< Only consider examples with weights in this percentile for training, this avoid to consider examples with tiny weights. */
double C; /**< The C parameter to train the weak linear SVM classifier. */
int grayscale; /**< To train the classifier with grayscale image. */
} ccv_scd_train_param_t;
extern const ccv_scd_param_t ccv_scd_default_params;
/**
* Create a new SCD classifier cascade from given positive examples and background images. This function has a hard dependency on [GSL](http://www.gnu.org/software/gsl/).
* @param posfiles An array of **ccv_file_info_t** that gives the positive examples.
* @param hard_mine An array of **ccv_file_info_t** that gives images don't contain any positive examples (for example, to train a face detector, these are images that doesn't contain any faces).
* @param negative_count Number of negative examples that is harvested from background images.
* @param filename The file that saves both progress and final classifier, this will be in sqlite3 database format.
* @param params A **ccv_scd_train_param_t** that defines various aspects of the training function.
* @return The trained SCD classifier cascade.
*/
CCV_WARN_UNUSED(ccv_scd_classifier_cascade_t*) ccv_scd_classifier_cascade_new(ccv_array_t* posfiles, ccv_array_t* hard_mine, int negative_count, const char* filename, ccv_scd_train_param_t params);
/**
* Write SCD classifier cascade to a file.
* @param cascade The BBF classifier cascade.
* @param filename The file that will be written to, it is in sqlite3 database format.
*/
void ccv_scd_classifier_cascade_write(ccv_scd_classifier_cascade_t* cascade, const char* filename);
/**
* Read SCD classifier cascade from file.
* @param filename The file that contains a SCD classifier cascade, it is in sqlite3 database format.
* @return A classifier cascade, 0 returned if no valid classifier cascade available.
*/
CCV_WARN_UNUSED(ccv_scd_classifier_cascade_t*) ccv_scd_classifier_cascade_read(const char* filename);
/**
* Free up the memory of SCD classifier cascade.
* @param cascade The SCD classifier cascade.
*/
void ccv_scd_classifier_cascade_free(ccv_scd_classifier_cascade_t* cascade);
/**
* Generate 8-channel output matrix which extract SURF features (dx, dy, |dx|, |dy|, du, dv, |du|, |dv|) for input. If input is multi-channel matrix (such as RGB), will pick the strongest responses among these channels.
* @param a The input matrix.
* @param b The output matrix.
* @param type The type of output matrix, if 0, ccv will try to match the input matrix for appropriate type.
*/
void ccv_scd(ccv_dense_matrix_t* a, ccv_dense_matrix_t** b, int type);
/**
* Using a SCD classifier cascade to detect objects in a given image. If you have several classifier cascades, it is better to use them in one method call. In this way, ccv will try to optimize the overall performance.
* @param a The input image.
* @param cascades An array of classifier cascades.
* @param count How many classifier cascades you've passed in.
* @param params A **ccv_scd_param_t** structure that defines various aspects of the detector.
* @return A **ccv_array_t** of **ccv_comp_t** with detection results.
*/
CCV_WARN_UNUSED(ccv_array_t*) ccv_scd_detect_objects(ccv_dense_matrix_t* a, ccv_scd_classifier_cascade_t** cascades, int count, ccv_scd_param_t params);
/** @} */
/* categorization types and methods for training */
enum {
CCV_CATEGORIZED_DENSE_MATRIX = 0x01,
CCV_CATEGORIZED_FILE = 0x02,
};
typedef struct {
ccv-src/lib/ccv_bbf.c view on Meta::CPAN
unsigned char* u8[] = { negdata[i], negdata[i] + isizs0, negdata[i] + isizs01 };
float sum = 0;
float* alpha = classifier->alpha;
ccv_bbf_feature_t* feature = classifier->feature;
for (j = 0; j < classifier->count; ++j, alpha += 2, ++feature)
sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)];
neval[i] = sum;
}
}
static int _ccv_prune_positive_data(ccv_bbf_classifier_cascade_t* cascade, unsigned char** posdata, int posnum, ccv_size_t size)
{
float* peval = (float*)ccmalloc(posnum * sizeof(float));
int i, j, k, rpos = posnum;
for (i = 0; i < cascade->count; i++)
{
_ccv_bbf_eval_data(cascade->stage_classifier + i, posdata, rpos, 0, 0, size, peval, 0);
k = 0;
for (j = 0; j < rpos; j++)
if (peval[j] >= cascade->stage_classifier[i].threshold)
{
posdata[k] = posdata[j];
++k;
} else {
ccfree(posdata[j]);
}
rpos = k;
}
ccfree(peval);
return rpos;
}
static int _ccv_prepare_background_data(ccv_bbf_classifier_cascade_t* cascade, char** bgfiles, int bgnum, unsigned char** negdata, int negnum)
{
int t, i, j, k, q;
int negperbg;
int negtotal = 0;
int steps[] = { _ccv_width_padding(cascade->size.width),
_ccv_width_padding(cascade->size.width >> 1),
_ccv_width_padding(cascade->size.width >> 2) };
int isizs0 = steps[0] * cascade->size.height;
int isizs1 = steps[1] * (cascade->size.height >> 1);
int isizs2 = steps[2] * (cascade->size.height >> 2);
int* idcheck = (int*)ccmalloc(negnum * sizeof(int));
gsl_rng_env_setup();
gsl_rng* rng = gsl_rng_alloc(gsl_rng_default);
gsl_rng_set(rng, (unsigned long int)idcheck);
ccv_size_t imgsz = cascade->size;
int rneg = negtotal;
for (t = 0; negtotal < negnum; t++)
{
PRINT(CCV_CLI_INFO, "preparing negative data ... 0%%");
for (i = 0; i < bgnum; i++)
{
negperbg = (t < 2) ? (negnum - negtotal) / (bgnum - i) + 1 : negnum - negtotal;
ccv_dense_matrix_t* image = 0;
ccv_read(bgfiles[i], &image, CCV_IO_GRAY | CCV_IO_ANY_FILE);
assert((image->type & CCV_C1) && (image->type & CCV_8U));
if (image == 0)
{
PRINT(CCV_CLI_ERROR, "\n%s file corrupted\n", bgfiles[i]);
continue;
}
if (t % 2 != 0)
ccv_flip(image, 0, 0, CCV_FLIP_X);
if (t % 4 >= 2)
ccv_flip(image, 0, 0, CCV_FLIP_Y);
ccv_bbf_param_t params = { .interval = 3, .min_neighbors = 0, .accurate = 1, .flags = 0, .size = cascade->size };
ccv_array_t* detected = ccv_bbf_detect_objects(image, &cascade, 1, params);
memset(idcheck, 0, ccv_min(detected->rnum, negperbg) * sizeof(int));
for (j = 0; j < ccv_min(detected->rnum, negperbg); j++)
{
int r = gsl_rng_uniform_int(rng, detected->rnum);
int flag = 1;
ccv_rect_t* rect = (ccv_rect_t*)ccv_array_get(detected, r);
while (flag) {
flag = 0;
for (k = 0; k < j; k++)
if (r == idcheck[k])
ccv-src/lib/ccv_bbf.c view on Meta::CPAN
unsigned char* u8s2 = negdata[negtotal] + isizs0 + isizs1;
unsigned char* u8[] = { u8s0, u8s1, u8s2 };
memcpy(u8s0, imgs0->data.u8, imgs0->rows * imgs0->step);
ccv_matrix_free(imgs0);
memcpy(u8s1, imgs1->data.u8, imgs1->rows * imgs1->step);
ccv_matrix_free(imgs1);
memcpy(u8s2, imgs2->data.u8, imgs2->rows * imgs2->step);
ccv_matrix_free(imgs2);
flag = 1;
ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier;
for (k = 0; k < cascade->count; ++k, ++classifier)
{
float sum = 0;
float* alpha = classifier->alpha;
ccv_bbf_feature_t* feature = classifier->feature;
for (q = 0; q < classifier->count; ++q, alpha += 2, ++feature)
sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)];
if (sum < classifier->threshold)
{
flag = 0;
break;
ccv-src/lib/ccv_bbf.c view on Meta::CPAN
int i;
int isizs012 = _ccv_width_padding(size.width) * size.height +
_ccv_width_padding(size.width >> 1) * (size.height >> 1) +
_ccv_width_padding(size.width >> 2) * (size.height >> 2);
for (i = 0; i < negnum; i++)
fwrite(negdata[i], 1, isizs012, w);
fclose(w);
return 0;
}
static int _ccv_resume_bbf_cascade_training_state(const char* file, int* i, int* k, int* bg, double* pw, double* nw, int posnum, int negnum)
{
int stat = 0;
FILE* r = fopen(file, "r");
if (r == 0) return -1;
stat |= fscanf(r, "%d %d %d", i, k, bg);
int j;
union { double db; int i[2]; } dbi;
for (j = 0; j < posnum; j++)
{
stat |= fscanf(r, "%d %d", &dbi.i[0], &dbi.i[1]);
ccv-src/lib/ccv_bbf.c view on Meta::CPAN
for (j = 0; j < negnum; ++j)
{
dbi.db = nw[j];
fprintf(w, "%d %d ", dbi.i[0], dbi.i[1]);
}
fprintf(w, "\n");
fclose(w);
return 0;
}
void ccv_bbf_classifier_cascade_new(ccv_dense_matrix_t** posimg, int posnum, char** bgfiles, int bgnum, int negnum, ccv_size_t size, const char* dir, ccv_bbf_new_param_t params)
{
int i, j, k;
/* allocate memory for usage */
ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t));
cascade->count = 0;
cascade->size = size;
cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(sizeof(ccv_bbf_stage_classifier_t));
unsigned char** posdata = (unsigned char**)ccmalloc(posnum * sizeof(unsigned char*));
unsigned char** negdata = (unsigned char**)ccmalloc(negnum * sizeof(unsigned char*));
double* pw = (double*)ccmalloc(posnum * sizeof(double));
double* nw = (double*)ccmalloc(negnum * sizeof(double));
float* peval = (float*)ccmalloc(posnum * sizeof(float));
float* neval = (float*)ccmalloc(negnum * sizeof(float));
double inv_balance_k = 1. / params.balance_k;
/* balance factor k, and weighted with 0.01 */
params.balance_k *= 0.01;
inv_balance_k *= 0.01;
int steps[] = { _ccv_width_padding(cascade->size.width),
_ccv_width_padding(cascade->size.width >> 1),
_ccv_width_padding(cascade->size.width >> 2) };
int isizs0 = steps[0] * cascade->size.height;
int isizs01 = isizs0 + steps[1] * (cascade->size.height >> 1);
i = 0;
k = 0;
int bg = 0;
int cacheK = 10;
/* state resume code */
char buf[1024];
sprintf(buf, "%s/stat.txt", dir);
_ccv_resume_bbf_cascade_training_state(buf, &i, &k, &bg, pw, nw, posnum, negnum);
if (i > 0)
{
cascade->count = i;
ccfree(cascade->stage_classifier);
cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(i * sizeof(ccv_bbf_stage_classifier_t));
for (j = 0; j < i; j++)
{
sprintf(buf, "%s/stage-%d.txt", dir, j);
_ccv_read_bbf_stage_classifier(buf, &cascade->stage_classifier[j]);
}
}
if (k > 0)
cacheK = k;
int rpos, rneg = 0;
if (bg)
{
sprintf(buf, "%s/negs.txt", dir);
_ccv_read_background_data(buf, negdata, &rneg, cascade->size);
}
for (; i < params.layer; i++)
{
if (!bg)
{
rneg = _ccv_prepare_background_data(cascade, bgfiles, bgnum, negdata, negnum);
/* save state of background data */
sprintf(buf, "%s/negs.txt", dir);
_ccv_write_background_data(buf, negdata, rneg, cascade->size);
bg = 1;
}
double totalw;
/* save state of cascade : level, weight etc. */
sprintf(buf, "%s/stat.txt", dir);
_ccv_save_bbf_cacade_training_state(buf, i, k, bg, pw, nw, posnum, negnum);
ccv_bbf_stage_classifier_t classifier;
if (k > 0)
{
/* resume state of classifier */
sprintf( buf, "%s/stage-%d.txt", dir, i );
_ccv_read_bbf_stage_classifier(buf, &classifier);
} else {
/* initialize classifier */
for (j = 0; j < posnum; j++)
pw[j] = params.balance_k;
for (j = 0; j < rneg; j++)
nw[j] = inv_balance_k;
classifier.count = k;
classifier.threshold = 0;
classifier.feature = (ccv_bbf_feature_t*)ccmalloc(cacheK * sizeof(ccv_bbf_feature_t));
classifier.alpha = (float*)ccmalloc(cacheK * 2 * sizeof(float));
}
_ccv_prepare_positive_data(posimg, posdata, cascade->size, posnum);
rpos = _ccv_prune_positive_data(cascade, posdata, posnum, cascade->size);
PRINT(CCV_CLI_INFO, "%d postivie data and %d negative data in training\n", rpos, rneg);
/* reweight to 1.00 */
totalw = 0;
for (j = 0; j < rpos; j++)
totalw += pw[j];
for (j = 0; j < rneg; j++)
totalw += nw[j];
for (j = 0; j < rpos; j++)
pw[j] = pw[j] / totalw;
for (j = 0; j < rneg; j++)
nw[j] = nw[j] / totalw;
for (; ; k++)
{
/* get overall true-positive, false-positive rate and threshold */
double tp = 0, fp = 0, etp = 0, efp = 0;
_ccv_bbf_eval_data(&classifier, posdata, rpos, negdata, rneg, cascade->size, peval, neval);
_ccv_sort_32f(peval, rpos, 0);
classifier.threshold = peval[(int)((1. - params.pos_crit) * rpos)] - 1e-6;
for (j = 0; j < rpos; j++)
{
if (peval[j] >= 0)
++tp;
if (peval[j] >= classifier.threshold)
++etp;
}
tp /= rpos; etp /= rpos;
ccv-src/lib/ccv_bbf.c view on Meta::CPAN
_ccv_save_bbf_cacade_training_state(buf, i, k, bg, pw, nw, posnum, negnum);
}
if (etp > params.pos_crit && efp < params.neg_crit)
break;
/* TODO: more post-process is needed in here */
/* select the best feature in current distribution through genetic algorithm optimization */
ccv_bbf_feature_t best;
if (params.optimizer == CCV_BBF_GENETIC_OPT)
{
best = _ccv_bbf_genetic_optimize(posdata, rpos, negdata, rneg, params.feature_number, cascade->size, pw, nw);
} else if (params.optimizer == CCV_BBF_FLOAT_OPT) {
best = _ccv_bbf_convex_optimize(posdata, rpos, negdata, rneg, 0, cascade->size, pw, nw);
} else {
best = _ccv_bbf_genetic_optimize(posdata, rpos, negdata, rneg, params.feature_number, cascade->size, pw, nw);
best = _ccv_bbf_convex_optimize(posdata, rpos, negdata, rneg, &best, cascade->size, pw, nw);
}
double err = _ccv_bbf_error_rate(&best, posdata, rpos, negdata, rneg, cascade->size, pw, nw);
double rw = (1 - err) / err;
totalw = 0;
/* reweight */
for (j = 0; j < rpos; j++)
{
unsigned char* u8[] = { posdata[j], posdata[j] + isizs0, posdata[j] + isizs01 };
if (!_ccv_run_bbf_feature(&best, steps, u8))
pw[j] *= rw;
pw[j] *= params.balance_k;
totalw += pw[j];
ccv-src/lib/ccv_bbf.c view on Meta::CPAN
ccfree(classifier.alpha);
classifier.feature = feature;
classifier.alpha = alpha;
cacheK *= 2;
}
/* setup new feature */
classifier.feature[k] = best;
classifier.alpha[k * 2] = -c;
classifier.alpha[k * 2 + 1] = c;
}
cascade->count = i + 1;
ccv_bbf_stage_classifier_t* stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t));
memcpy(stage_classifier, cascade->stage_classifier, i * sizeof(ccv_bbf_stage_classifier_t));
ccfree(cascade->stage_classifier);
stage_classifier[i] = classifier;
cascade->stage_classifier = stage_classifier;
k = 0;
bg = 0;
for (j = 0; j < rpos; j++)
ccfree(posdata[j]);
for (j = 0; j < rneg; j++)
ccfree(negdata[j]);
}
ccfree(neval);
ccfree(peval);
ccfree(nw);
ccfree(pw);
ccfree(negdata);
ccfree(posdata);
ccfree(cascade);
}
#else
void ccv_bbf_classifier_cascade_new(ccv_dense_matrix_t** posimg, int posnum, char** bgfiles, int bgnum, int negnum, ccv_size_t size, const char* dir, ccv_bbf_new_param_t params)
{
fprintf(stderr, " ccv_bbf_classifier_cascade_new requires libgsl support, please compile ccv with libgsl.\n");
}
#endif
static int _ccv_is_equal(const void* _r1, const void* _r2, void* data)
{
const ccv_comp_t* r1 = (const ccv_comp_t*)_r1;
const ccv_comp_t* r2 = (const ccv_comp_t*)_r2;
int distance = (int)(r1->rect.width * 0.25 + 0.5);
return r2->rect.x <= r1->rect.x + distance &&
ccv-src/lib/ccv_bbf.c view on Meta::CPAN
return r2->classification.id == r1->classification.id &&
r2->rect.x <= r1->rect.x + distance &&
r2->rect.x >= r1->rect.x - distance &&
r2->rect.y <= r1->rect.y + distance &&
r2->rect.y >= r1->rect.y - distance &&
r2->rect.width <= (int)(r1->rect.width * 1.5 + 0.5) &&
(int)(r2->rect.width * 1.5 + 0.5) >= r1->rect.width;
}
ccv_array_t* ccv_bbf_detect_objects(ccv_dense_matrix_t* a, ccv_bbf_classifier_cascade_t** _cascade, int count, ccv_bbf_param_t params)
{
int hr = a->rows / params.size.height;
int wr = a->cols / params.size.width;
double scale = pow(2., 1. / (params.interval + 1.));
int next = params.interval + 1;
int scale_upto = (int)(log((double)ccv_min(hr, wr)) / log(scale));
ccv_dense_matrix_t** pyr = (ccv_dense_matrix_t**)alloca((scale_upto + next * 2) * 4 * sizeof(ccv_dense_matrix_t*));
memset(pyr, 0, (scale_upto + next * 2) * 4 * sizeof(ccv_dense_matrix_t*));
if (params.size.height != _cascade[0]->size.height || params.size.width != _cascade[0]->size.width)
ccv_resample(a, &pyr[0], 0, a->rows * _cascade[0]->size.height / params.size.height, a->cols * _cascade[0]->size.width / params.size.width, CCV_INTER_AREA);
else
pyr[0] = a;
int i, j, k, t, x, y, q;
for (i = 1; i < ccv_min(params.interval + 1, scale_upto + next * 2); i++)
ccv_resample(pyr[0], &pyr[i * 4], 0, (int)(pyr[0]->rows / pow(scale, i)), (int)(pyr[0]->cols / pow(scale, i)), CCV_INTER_AREA);
for (i = next; i < scale_upto + next * 2; i++)
ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4], 0, 0, 0);
if (params.accurate)
for (i = next * 2; i < scale_upto + next * 2; i++)
{
ccv-src/lib/ccv_bbf.c view on Meta::CPAN
ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4 + 2], 0, 0, 1);
ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4 + 3], 0, 1, 1);
}
ccv_array_t* idx_seq;
ccv_array_t* seq = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
ccv_array_t* seq2 = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
ccv_array_t* result_seq = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
/* detect in multi scale */
for (t = 0; t < count; t++)
{
ccv_bbf_classifier_cascade_t* cascade = _cascade[t];
float scale_x = (float) params.size.width / (float) cascade->size.width;
float scale_y = (float) params.size.height / (float) cascade->size.height;
ccv_array_clear(seq);
for (i = 0; i < scale_upto; i++)
{
int dx[] = {0, 1, 0, 1};
int dy[] = {0, 0, 1, 1};
int i_rows = pyr[i * 4 + next * 8]->rows - (cascade->size.height >> 2);
int steps[] = { pyr[i * 4]->step, pyr[i * 4 + next * 4]->step, pyr[i * 4 + next * 8]->step };
int i_cols = pyr[i * 4 + next * 8]->cols - (cascade->size.width >> 2);
int paddings[] = { pyr[i * 4]->step * 4 - i_cols * 4,
pyr[i * 4 + next * 4]->step * 2 - i_cols * 2,
pyr[i * 4 + next * 8]->step - i_cols };
for (q = 0; q < (params.accurate ? 4 : 1); q++)
{
unsigned char* u8[] = { pyr[i * 4]->data.u8 + dx[q] * 2 + dy[q] * pyr[i * 4]->step * 2, pyr[i * 4 + next * 4]->data.u8 + dx[q] + dy[q] * pyr[i * 4 + next * 4]->step, pyr[i * 4 + next * 8 + q]->data.u8 };
for (y = 0; y < i_rows; y++)
{
for (x = 0; x < i_cols; x++)
{
float sum;
int flag = 1;
ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier;
for (j = 0; j < cascade->count; ++j, ++classifier)
{
sum = 0;
float* alpha = classifier->alpha;
ccv_bbf_feature_t* feature = classifier->feature;
for (k = 0; k < classifier->count; ++k, alpha += 2, ++feature)
sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)];
if (sum < classifier->threshold)
{
flag = 0;
break;
}
}
if (flag)
{
ccv_comp_t comp;
comp.rect = ccv_rect((int)((x * 4 + dx[q] * 2) * scale_x + 0.5), (int)((y * 4 + dy[q] * 2) * scale_y + 0.5), (int)(cascade->size.width * scale_x + 0.5), (int)(cascade->size.height * scale_y + 0.5));
comp.neighbors = 1;
comp.classification.id = t;
comp.classification.confidence = sum;
ccv_array_push(seq, &comp);
}
u8[0] += 4;
u8[1] += 2;
u8[2] += 1;
}
u8[0] += paddings[0];
ccv-src/lib/ccv_bbf.c view on Meta::CPAN
for (i = 1; i < scale_upto + next * 2; i++)
ccv_matrix_free(pyr[i * 4]);
if (params.accurate)
for (i = next * 2; i < scale_upto + next * 2; i++)
{
ccv_matrix_free(pyr[i * 4 + 1]);
ccv_matrix_free(pyr[i * 4 + 2]);
ccv_matrix_free(pyr[i * 4 + 3]);
}
if (params.size.height != _cascade[0]->size.height || params.size.width != _cascade[0]->size.width)
ccv_matrix_free(pyr[0]);
return result_seq2;
}
ccv_bbf_classifier_cascade_t* ccv_bbf_read_classifier_cascade(const char* directory)
{
char buf[1024];
sprintf(buf, "%s/cascade.txt", directory);
int s, i;
FILE* r = fopen(buf, "r");
if (r == 0)
return 0;
ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t));
s = fscanf(r, "%d %d %d", &cascade->count, &cascade->size.width, &cascade->size.height);
assert(s > 0);
cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t));
for (i = 0; i < cascade->count; i++)
{
sprintf(buf, "%s/stage-%d.txt", directory, i);
if (_ccv_read_bbf_stage_classifier(buf, &cascade->stage_classifier[i]) < 0)
{
cascade->count = i;
break;
}
}
fclose(r);
return cascade;
}
ccv_bbf_classifier_cascade_t* ccv_bbf_classifier_cascade_read_binary(char* s)
{
int i;
ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t));
memcpy(&cascade->count, s, sizeof(cascade->count)); s += sizeof(cascade->count);
memcpy(&cascade->size.width, s, sizeof(cascade->size.width)); s += sizeof(cascade->size.width);
memcpy(&cascade->size.height, s, sizeof(cascade->size.height)); s += sizeof(cascade->size.height);
ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t));
for (i = 0; i < cascade->count; i++, classifier++)
{
memcpy(&classifier->count, s, sizeof(classifier->count)); s += sizeof(classifier->count);
memcpy(&classifier->threshold, s, sizeof(classifier->threshold)); s += sizeof(classifier->threshold);
classifier->feature = (ccv_bbf_feature_t*)ccmalloc(classifier->count * sizeof(ccv_bbf_feature_t));
classifier->alpha = (float*)ccmalloc(classifier->count * 2 * sizeof(float));
memcpy(classifier->feature, s, classifier->count * sizeof(ccv_bbf_feature_t)); s += classifier->count * sizeof(ccv_bbf_feature_t);
memcpy(classifier->alpha, s, classifier->count * 2 * sizeof(float)); s += classifier->count * 2 * sizeof(float);
}
return cascade;
}
int ccv_bbf_classifier_cascade_write_binary(ccv_bbf_classifier_cascade_t* cascade, char* s, int slen)
{
int i;
int len = sizeof(cascade->count) + sizeof(cascade->size.width) + sizeof(cascade->size.height);
ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier;
for (i = 0; i < cascade->count; i++, classifier++)
len += sizeof(classifier->count) + sizeof(classifier->threshold) + classifier->count * sizeof(ccv_bbf_feature_t) + classifier->count * 2 * sizeof(float);
if (slen >= len)
{
memcpy(s, &cascade->count, sizeof(cascade->count)); s += sizeof(cascade->count);
memcpy(s, &cascade->size.width, sizeof(cascade->size.width)); s += sizeof(cascade->size.width);
memcpy(s, &cascade->size.height, sizeof(cascade->size.height)); s += sizeof(cascade->size.height);
classifier = cascade->stage_classifier;
for (i = 0; i < cascade->count; i++, classifier++)
{
memcpy(s, &classifier->count, sizeof(classifier->count)); s += sizeof(classifier->count);
memcpy(s, &classifier->threshold, sizeof(classifier->threshold)); s += sizeof(classifier->threshold);
memcpy(s, classifier->feature, classifier->count * sizeof(ccv_bbf_feature_t)); s += classifier->count * sizeof(ccv_bbf_feature_t);
memcpy(s, classifier->alpha, classifier->count * 2 * sizeof(float)); s += classifier->count * 2 * sizeof(float);
}
}
return len;
}
void ccv_bbf_classifier_cascade_free(ccv_bbf_classifier_cascade_t* cascade)
{
int i;
for (i = 0; i < cascade->count; ++i)
{
ccfree(cascade->stage_classifier[i].feature);
ccfree(cascade->stage_classifier[i].alpha);
}
ccfree(cascade->stage_classifier);
ccfree(cascade);
}
ccv-src/lib/ccv_dpm.c view on Meta::CPAN
_ccv_dpm_write_checkpoint(model, 1, checkpoint);
PRINT(CCV_CLI_INFO, "done\n");
remove(gradient_progress_checkpoint);
_ccv_dpm_mixture_model_cleanup(model);
ccfree(model);
gsl_rng_free(rng);
}
#else
void ccv_dpm_mixture_model_new(char** posfiles, ccv_rect_t* bboxes, int posnum, char** bgfiles, int bgnum, int negnum, const char* dir, ccv_dpm_new_param_t params)
{
fprintf(stderr, " ccv_dpm_classifier_cascade_new requires libgsl and liblinear support, please compile ccv with them.\n");
}
#endif
#else
void ccv_dpm_mixture_model_new(char** posfiles, ccv_rect_t* bboxes, int posnum, char** bgfiles, int bgnum, int negnum, const char* dir, ccv_dpm_new_param_t params)
{
fprintf(stderr, " ccv_dpm_classifier_cascade_new requires libgsl and liblinear support, please compile ccv with them.\n");
}
#endif
static int _ccv_is_equal(const void* _r1, const void* _r2, void* data)
{
const ccv_root_comp_t* r1 = (const ccv_root_comp_t*)_r1;
const ccv_root_comp_t* r2 = (const ccv_root_comp_t*)_r2;
int distance = (int)(ccv_min(r1->rect.width, r1->rect.height) * 0.25 + 0.5);
return r2->rect.x <= r1->rect.x + distance &&
ccv-src/lib/ccv_icf.c view on Meta::CPAN
float rate;
} ccv_icf_example_state_t;
typedef struct {
uint8_t classifier:1;
uint8_t positives:1;
uint8_t negatives:1;
uint8_t features:1;
uint8_t example_state:1;
uint8_t precomputed:1;
} ccv_icf_classifier_cascade_persistence_state_t;
typedef struct {
uint32_t index;
float value;
} ccv_icf_value_index_t;
typedef struct {
ccv_function_state_reserve_field;
int i;
int bootstrap;
ccv_icf_new_param_t params;
ccv_icf_classifier_cascade_t* classifier;
ccv_array_t* positives;
ccv_array_t* negatives;
ccv_icf_feature_t* features;
ccv_size_t size;
ccv_margin_t margin;
ccv_icf_example_state_t* example_state;
uint8_t* precomputed;
ccv_icf_classifier_cascade_persistence_state_t x;
} ccv_icf_classifier_cascade_state_t;
static void _ccv_icf_write_classifier_cascade_state(ccv_icf_classifier_cascade_state_t* state, const char* directory)
{
char filename[1024];
snprintf(filename, 1024, "%s/state", directory);
FILE* w = fopen(filename, "w+");
fprintf(w, "%d %d %d\n", state->line_no, state->i, state->bootstrap);
fprintf(w, "%d %d %d\n", state->params.feature_size, state->size.width, state->size.height);
fprintf(w, "%d %d %d %d\n", state->margin.left, state->margin.top, state->margin.right, state->margin.bottom);
fclose(w);
int i, q;
if (!state->x.positives)
ccv-src/lib/ccv_icf.c view on Meta::CPAN
{
size_t step = (3 * (state->positives->rnum + state->negatives->rnum) + 3) & -4;
snprintf(filename, 1024, "%s/precomputed", directory);
w = fopen(filename, "wb+");
fwrite(state->precomputed, 1, step * state->params.feature_size, w);
fclose(w);
state->x.precomputed = 1;
}
if (!state->x.classifier)
{
snprintf(filename, 1024, "%s/cascade", directory);
ccv_icf_write_classifier_cascade(state->classifier, filename);
state->x.classifier = 1;
}
}
static void _ccv_icf_read_classifier_cascade_state(const char* directory, ccv_icf_classifier_cascade_state_t* state)
{
char filename[1024];
state->line_no = state->i = 0;
state->bootstrap = 0;
snprintf(filename, 1024, "%s/state", directory);
FILE* r = fopen(filename, "r");
if (r)
{
int feature_size;
fscanf(r, "%d %d %d", &state->line_no, &state->i, &state->bootstrap);
ccv-src/lib/ccv_icf.c view on Meta::CPAN
snprintf(filename, 1024, "%s/precomputed", directory);
r = fopen(filename, "rb");
if (r)
{
size_t step = (3 * (state->positives->rnum + state->negatives->rnum) + 3) & -4;
state->precomputed = (uint8_t*)ccmalloc(sizeof(uint8_t) * state->params.feature_size * step);
fread(state->precomputed, 1, step * state->params.feature_size, r);
fclose(r);
} else
state->precomputed = 0;
snprintf(filename, 1024, "%s/cascade", directory);
state->classifier = ccv_icf_read_classifier_cascade(filename);
if (!state->classifier)
{
state->classifier = (ccv_icf_classifier_cascade_t*)ccmalloc(sizeof(ccv_icf_classifier_cascade_t));
state->classifier->count = 0;
state->classifier->grayscale = state->params.grayscale;
state->classifier->weak_classifiers = (ccv_icf_decision_tree_t*)ccmalloc(sizeof(ccv_icf_decision_tree_t) * state->params.weak_classifier);
} else {
if (state->classifier->count < state->params.weak_classifier)
state->classifier->weak_classifiers = (ccv_icf_decision_tree_t*)ccrealloc(state->classifier->weak_classifiers, sizeof(ccv_icf_decision_tree_t) * state->params.weak_classifier);
}
}
#define less_than(s1, s2, aux) ((s1).value < (s2).value)
ccv-src/lib/ccv_icf.c view on Meta::CPAN
if (i >= posnum)
break;
}
ccv_matrix_free(image);
}
}
PRINT(CCV_CLI_INFO, "\n");
return positives;
}
static uint64_t* _ccv_icf_precompute_classifier_cascade(ccv_icf_classifier_cascade_t* cascade, ccv_array_t* positives)
{
int step = ((cascade->count - 1) >> 6) + 1;
uint64_t* precomputed = (uint64_t*)ccmalloc(sizeof(uint64_t) * positives->rnum * step);
uint64_t* result = precomputed;
int i, j;
for (i = 0; i < positives->rnum; i++)
{
ccv_dense_matrix_t* a = (ccv_dense_matrix_t*)(ccv_array_get(positives, i));
a->data.u8 = (uint8_t*)(a + 1);
ccv_dense_matrix_t* icf = 0;
ccv_icf(a, &icf, 0);
ccv_dense_matrix_t* sat = 0;
ccv_sat(icf, &sat, 0, CCV_PADDING_ZERO);
ccv_matrix_free(icf);
float* ptr = sat->data.f32;
int ch = CCV_GET_CHANNEL(sat->type);
for (j = 0; j < cascade->count; j++)
if (_ccv_icf_run_weak_classifier(cascade->weak_classifiers + j, ptr, sat->cols, ch, 1, 1))
precomputed[j >> 6] |= (1UL << (j & 63));
else
precomputed[j >> 6] &= ~(1UL << (j & 63));
ccv_matrix_free(sat);
precomputed += step;
}
return result;
}
#define less_than(s1, s2, aux) ((s1) > (s2))
static CCV_IMPLEMENT_QSORT(_ccv_icf_threshold_rating, float, less_than)
#undef less_than
static void _ccv_icf_classifier_cascade_soft_with_validates(ccv_array_t* validates, ccv_icf_classifier_cascade_t* cascade, double min_accept)
{
int i, j;
int step = ((cascade->count - 1) >> 6) + 1;
uint64_t* precomputed = _ccv_icf_precompute_classifier_cascade(cascade, validates);
float* positive_rate = (float*)ccmalloc(sizeof(float) * validates->rnum);
uint64_t* computed = precomputed;
for (i = 0; i < validates->rnum; i++)
{
positive_rate[i] = 0;
for (j = 0; j < cascade->count; j++)
{
uint64_t accept = computed[j >> 6] & (1UL << (j & 63));
positive_rate[i] += cascade->weak_classifiers[j].weigh[!!accept];
}
computed += step;
}
_ccv_icf_threshold_rating(positive_rate, validates->rnum, 0);
float threshold = positive_rate[ccv_min((int)(min_accept * (validates->rnum + 0.5) - 0.5), validates->rnum - 1)];
ccfree(positive_rate);
computed = precomputed;
// compute the final acceptance per validates / negatives with final threshold
uint64_t* acceptance = (uint64_t*)cccalloc(((validates->rnum - 1) >> 6) + 1, sizeof(uint64_t));
int true_positives = 0;
for (i = 0; i < validates->rnum; i++)
{
float rate = 0;
for (j = 0; j < cascade->count; j++)
{
uint64_t accept = computed[j >> 6] & (1UL << (j & 63));
rate += cascade->weak_classifiers[j].weigh[!!accept];
}
if (rate >= threshold)
{
acceptance[i >> 6] |= (1UL << (i & 63));
++true_positives;
} else
acceptance[i >> 6] &= ~(1UL << (i & 63));
computed += step;
}
PRINT(CCV_CLI_INFO, " - at threshold %f, true positive rate: %f%%\n", threshold, (float)true_positives * 100 / validates->rnum);
float* rate = (float*)cccalloc(validates->rnum, sizeof(float));
for (j = 0; j < cascade->count; j++)
{
computed = precomputed;
for (i = 0; i < validates->rnum; i++)
{
uint64_t correct = computed[j >> 6] & (1UL << (j & 63));
rate[i] += cascade->weak_classifiers[j].weigh[!!correct];
computed += step;
}
float threshold = FLT_MAX;
// find a threshold that keeps all accepted validates still acceptable
for (i = 0; i < validates->rnum; i++)
{
uint64_t correct = acceptance[i >> 6] & (1UL << (i & 63));
if (correct && rate[i] < threshold)
threshold = rate[i];
}
cascade->weak_classifiers[j].threshold = threshold - 1e-10;
}
ccfree(rate);
ccfree(acceptance);
ccfree(precomputed);
}
typedef struct {
ccv_point_t point;
float sum;
} ccv_point_with_sum_t;
static void _ccv_icf_bootstrap_negatives(ccv_icf_classifier_cascade_t* cascade, ccv_array_t* negatives, gsl_rng* rng, ccv_array_t* bgfiles, int negnum, int grayscale, int spread, ccv_icf_param_t params)
{
#ifdef USE_DISPATCH
__block int i;
#else
int i;
#endif
#ifdef USE_DISPATCH
__block int fppi = 0, is = 0;
#else
int fppi = 0, is = 0;
ccv-src/lib/ccv_icf.c view on Meta::CPAN
double ratio = (double)(negnum - i) / bgfiles->rnum;
#ifdef USE_DISPATCH
dispatch_semaphore_t sem = dispatch_semaphore_create(1);
dispatch_apply(bgfiles->rnum, dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^(size_t j) {
#else
size_t j;
for (j = 0; j < bgfiles->rnum; j++)
{
#endif
int k, x, y, q, p;
ccv_dense_matrix_t* a = (ccv_dense_matrix_t*)ccmalloc(ccv_compute_dense_matrix_size(cascade->size.height + 2, cascade->size.width + 2, (grayscale ? CCV_C1 : CCV_C3) | CCV_8U));
#ifdef USE_DISPATCH
dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER);
#endif
if (i >= negnum || (spread && ratio < 1 && gsl_rng_uniform(rng) > ratio))
{
ccfree(a);
#ifdef USE_DISPATCH
dispatch_semaphore_signal(sem);
return;
#else
ccv-src/lib/ccv_icf.c view on Meta::CPAN
PRINT(CCV_CLI_ERROR, "\n - %s: cannot be open, possibly corrupted\n", file_info->filename);
ccfree(a);
#ifdef USE_DISPATCH
gsl_rng_free(crng);
return;
#else
continue;
#endif
}
if (ccv_max(image->rows, image->cols) < 800 ||
image->rows <= (cascade->size.height - cascade->margin.top - cascade->margin.bottom) ||
image->cols <= (cascade->size.width - cascade->margin.left - cascade->margin.right)) // background is too small, blow it up to next scale
{
ccv_dense_matrix_t* blowup = 0;
ccv_sample_up(image, &blowup, 0, 0, 0);
ccv_matrix_free(image);
image = blowup;
}
if (image->rows <= (cascade->size.height - cascade->margin.top - cascade->margin.bottom) ||
image->cols <= (cascade->size.width - cascade->margin.left - cascade->margin.right)) // background is still too small, abort
{
ccv_matrix_free(image);
ccfree(a);
#ifdef USE_DISPATCH
gsl_rng_free(crng);
return;
#else
continue;
#endif
}
double scale = pow(2., 1. / (params.interval + 1.));
int next = params.interval + 1;
int scale_upto = (int)(log(ccv_min((double)image->rows / (cascade->size.height - cascade->margin.top - cascade->margin.bottom), (double)image->cols / (cascade->size.width - cascade->margin.left - cascade->margin.right))) / log(scale) - DBL_MIN) + ...
ccv_dense_matrix_t** pyr = (ccv_dense_matrix_t**)ccmalloc(scale_upto * sizeof(ccv_dense_matrix_t*));
memset(pyr, 0, scale_upto * sizeof(ccv_dense_matrix_t*));
#ifdef USE_DISPATCH
dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER);
#endif
++is; // how many images are scanned
#ifdef USE_DISPATCH
dispatch_semaphore_signal(sem);
#endif
if (t % 2 != 0)
ccv-src/lib/ccv_icf.c view on Meta::CPAN
#ifdef USE_DISPATCH
dispatch_semaphore_signal(sem);
#endif
ccv_matrix_free(pyr[q]);
continue;
}
#ifdef USE_DISPATCH
dispatch_semaphore_signal(sem);
#endif
ccv_dense_matrix_t* bordered = 0;
ccv_border(pyr[q], (ccv_matrix_t**)&bordered, 0, cascade->margin);
ccv_matrix_free(pyr[q]);
ccv_dense_matrix_t* icf = 0;
ccv_icf(bordered, &icf, 0);
ccv_dense_matrix_t* sat = 0;
ccv_sat(icf, &sat, 0, CCV_PADDING_ZERO);
ccv_matrix_free(icf);
assert(sat->rows == bordered->rows + 1 && sat->cols == bordered->cols + 1);
int ch = CCV_GET_CHANNEL(sat->type);
float* ptr = sat->data.f32 + sat->cols * ch;
ccv_array_t* seq = ccv_array_new(sizeof(ccv_point_with_sum_t), 64, 0);
for (y = 1; y < sat->rows - cascade->size.height - 2; y += params.step_through)
{
for (x = 1; x < sat->cols - cascade->size.width - 2; x += params.step_through)
{
int pass = 1;
float sum = 0;
for (p = 0; p < cascade->count; p++)
{
ccv_icf_decision_tree_t* weak_classifier = cascade->weak_classifiers + p;
int c = _ccv_icf_run_weak_classifier(weak_classifier, ptr, sat->cols, ch, x, 0);
sum += weak_classifier->weigh[c];
if (sum < weak_classifier->threshold)
{
pass = 0;
break;
}
}
if (pass)
{
ccv-src/lib/ccv_icf.c view on Meta::CPAN
fppi += seq->rnum; // how many detections we have in total
#ifdef USE_DISPATCH
dispatch_semaphore_signal(sem);
#endif
if (seq->rnum > 0)
{
gsl_ran_shuffle(crng, ccv_array_get(seq, 0), seq->rnum, seq->rsize);
/* so that we at least collect 10 from each scale */
for (p = 0; p < (spread ? ccv_min(10, seq->rnum) : seq->rnum); p++) // collect enough negatives from this scale
{
a = ccv_dense_matrix_new(cascade->size.height + 2, cascade->size.width + 2, (grayscale ? CCV_C1 : CCV_C3) | CCV_8U, a, 0);
ccv_point_with_sum_t* point = (ccv_point_with_sum_t*)ccv_array_get(seq, p);
ccv_slice(bordered, (ccv_matrix_t**)&a, 0, point->point.y, point->point.x, a->rows, a->cols);
assert(bordered->rows >= point->point.y + a->rows && bordered->cols >= point->point.x + a->cols);
a->sig = 0;
// verify the data we sliced is worthy negative
ccv_dense_matrix_t* icf = 0;
ccv_icf(a, &icf, 0);
ccv_dense_matrix_t* sat = 0;
ccv_sat(icf, &sat, 0, CCV_PADDING_ZERO);
ccv_matrix_free(icf);
float* ptr = sat->data.f32;
int ch = CCV_GET_CHANNEL(sat->type);
int pass = 1;
float sum = 0;
for (k = 0; k < cascade->count; k++)
{
ccv_icf_decision_tree_t* weak_classifier = cascade->weak_classifiers + k;
int c = _ccv_icf_run_weak_classifier(weak_classifier, ptr, sat->cols, ch, 1, 1);
sum += weak_classifier->weigh[c];
if (sum < weak_classifier->threshold)
{
pass = 0;
break;
}
}
ccv_matrix_free(sat);
if (pass)
ccv-src/lib/ccv_icf.c view on Meta::CPAN
assert(!example_state[i].correct);
}
}
ccv_matrix_free(sat);
}
return rate;
}
#endif
#endif
ccv_icf_classifier_cascade_t* ccv_icf_classifier_cascade_new(ccv_array_t* posfiles, int posnum, ccv_array_t* bgfiles, int negnum, ccv_array_t* validatefiles, const char* dir, ccv_icf_new_param_t params)
{
#ifdef HAVE_GSL
_ccv_icf_check_params(params);
assert(posfiles->rnum > 0);
assert(bgfiles->rnum > 0);
assert(posnum > 0 && negnum > 0);
PRINT(CCV_CLI_INFO, "with %d positive examples and %d negative examples\n"
"positive examples are going to be collected from %d positive images\n"
"negative examples are are going to be collected from %d background images\n",
posnum, negnum, posfiles->rnum, bgfiles->rnum);
PRINT(CCV_CLI_INFO, "use color? %s\n", params.grayscale ? "no" : "yes");
PRINT(CCV_CLI_INFO, "feature pool size : %d\n"
"weak classifier count : %d\n"
"soft cascade acceptance : %lf\n"
"minimum dimension of ICF feature : %d\n"
"number of bootstrap : %d\n"
"distortion on translation : %f\n"
"distortion on rotation : %f\n"
"distortion on scale : %f\n"
"learn ICF classifier cascade at size %dx%d with margin (%d,%d,%d,%d)\n"
"------------------------\n",
params.feature_size, params.weak_classifier, params.acceptance, params.min_dimension, params.bootstrap, params.deform_shift, params.deform_angle, params.deform_scale, params.size.width, params.size.height, params.margin.left, params.margin.top,...
gsl_rng_env_setup();
gsl_rng* rng = gsl_rng_alloc(gsl_rng_default);
// we will keep all states inside this structure for easier save / resume across process
// this should work better than ad-hoc one we used in DPM / BBF implementation
ccv_icf_classifier_cascade_state_t z;
z.params = params;
ccv_function_state_begin(_ccv_icf_read_classifier_cascade_state, z, dir);
z.classifier->grayscale = params.grayscale;
z.size = params.size;
z.margin = params.margin;
z.classifier->size = ccv_size(z.size.width + z.margin.left + z.margin.right, z.size.height + z.margin.top + z.margin.bottom);
z.features = (ccv_icf_feature_t*)ccmalloc(sizeof(ccv_icf_feature_t) * params.feature_size);
// generate random features
for (z.i = 0; z.i < params.feature_size; z.i++)
_ccv_icf_randomize_feature(rng, z.classifier->size, params.min_dimension, z.features + z.i, params.grayscale);
z.x.features = 0;
ccv_function_state_resume(_ccv_icf_write_classifier_cascade_state, z, dir);
z.positives = _ccv_icf_collect_positives(rng, z.size, z.margin, posfiles, posnum, params.deform_angle, params.deform_scale, params.deform_shift, params.grayscale);
z.x.positives = 0;
ccv_function_state_resume(_ccv_icf_write_classifier_cascade_state, z, dir);
z.negatives = _ccv_icf_collect_negatives(rng, z.size, z.margin, bgfiles, negnum, params.deform_angle, params.deform_scale, params.deform_shift, params.grayscale);
z.x.negatives = 0;
ccv_function_state_resume(_ccv_icf_write_classifier_cascade_state, z, dir);
for (z.bootstrap = 0; z.bootstrap <= params.bootstrap; z.bootstrap++)
{
z.example_state = (ccv_icf_example_state_t*)ccmalloc(sizeof(ccv_icf_example_state_t) * (z.negatives->rnum + z.positives->rnum));
memset(z.example_state, 0, sizeof(ccv_icf_example_state_t) * (z.negatives->rnum + z.positives->rnum));
for (z.i = 0; z.i < z.positives->rnum + z.negatives->rnum; z.i++)
z.example_state[z.i].weight = (z.i < z.positives->rnum) ? 0.5 / z.positives->rnum : 0.5 / z.negatives->rnum;
z.x.example_state = 0;
ccv_function_state_resume(_ccv_icf_write_classifier_cascade_state, z, dir);
z.precomputed = _ccv_icf_precompute_features(z.features, params.feature_size, z.positives, z.negatives);
z.x.precomputed = 0;
ccv_function_state_resume(_ccv_icf_write_classifier_cascade_state, z, dir);
for (z.i = 0; z.i < params.weak_classifier; z.i++)
{
z.classifier->count = z.i + 1;
PRINT(CCV_CLI_INFO, " - boost weak classifier %d of %d\n", z.i + 1, params.weak_classifier);
int j;
ccv_icf_decision_tree_t weak_classifier;
double rate = _ccv_icf_find_best_weak_classifier(z.features, params.feature_size, z.positives, z.negatives, z.precomputed, z.example_state, &weak_classifier);
assert(rate > 0.5); // it has to be better than random chance
#ifdef USE_SANITY_ASSERTION
double confirm_rate = _ccv_icf_rate_weak_classifier(&weak_classifier, z.positives, z.negatives, z.example_state);
ccv-src/lib/ccv_icf.c view on Meta::CPAN
for (j = 0; j < weak_classifier.features[2].count; j++)
PRINT(CCV_CLI_INFO, " - | - %d - (%d, %d) - (%d, %d)\n", weak_classifier.features[2].channel[j], weak_classifier.features[2].sat[j * 2].x, weak_classifier.features[2].sat[j * 2].y, weak_classifier.features[2].sat[j * 2 + 1].x, weak_classifier.fe...
}
z.classifier->count = z.i + 1; // update count
z.classifier->size = ccv_size(z.size.width + z.margin.left + z.margin.right, z.size.height + z.margin.top + z.margin.bottom);
z.classifier->margin = z.margin;
if (z.i + 1 < params.weak_classifier)
{
z.x.example_state = 0;
z.x.classifier = 0;
ccv_function_state_resume(_ccv_icf_write_classifier_cascade_state, z, dir);
}
}
if (z.bootstrap < params.bootstrap) // collecting negatives, again
{
// free expensive memory
ccfree(z.example_state);
z.example_state = 0;
ccfree(z.precomputed);
z.precomputed = 0;
_ccv_icf_classifier_cascade_soft_with_validates(z.positives, z.classifier, 1); // assuming perfect score, what's the soft cascading will be
int exists = z.negatives->rnum;
int spread_policy = z.bootstrap < 2; // we don't spread bootstrapping anymore after the first two bootstrappings
// try to boostrap half negatives from perfect scoring
_ccv_icf_bootstrap_negatives(z.classifier, z.negatives, rng, bgfiles, (negnum + 1) / 2, params.grayscale, spread_policy, params.detector);
int leftover = negnum - (z.negatives->rnum - exists);
if (leftover > 0)
{
// if we cannot get enough negative examples, now will use the validates data set to extract more
ccv_array_t* validates = _ccv_icf_collect_validates(rng, z.size, z.margin, validatefiles, params.grayscale);
_ccv_icf_classifier_cascade_soft_with_validates(validates, z.classifier, params.acceptance);
ccv_array_free(validates);
_ccv_icf_bootstrap_negatives(z.classifier, z.negatives, rng, bgfiles, leftover, params.grayscale, spread_policy, params.detector);
}
PRINT(CCV_CLI_INFO, " - after %d bootstrapping, learn with %d positives and %d negatives\n", z.bootstrap + 1, z.positives->rnum, z.negatives->rnum);
z.classifier->count = 0; // reset everything
z.x.negatives = 0;
} else {
z.x.example_state = 0;
z.x.classifier = 0;
ccv_function_state_resume(_ccv_icf_write_classifier_cascade_state, z, dir);
}
}
if (z.precomputed)
ccfree(z.precomputed);
if (z.example_state)
ccfree(z.example_state);
ccfree(z.features);
ccv_array_free(z.positives);
ccv_array_free(z.negatives);
gsl_rng_free(rng);
ccv_function_state_finish();
return z.classifier;
#else
assert(0 && "ccv_icf_classifier_cascade_new requires GSL library support");
return 0;
#endif
}
void ccv_icf_classifier_cascade_soft(ccv_icf_classifier_cascade_t* cascade, ccv_array_t* posfiles, double acceptance)
{
#ifdef HAVE_GSL
PRINT(CCV_CLI_INFO, "with %d positive examples\n"
"going to accept %.2lf%% positive examples\n",
posfiles->rnum, acceptance * 100);
ccv_size_t size = ccv_size(cascade->size.width - cascade->margin.left - cascade->margin.right, cascade->size.height - cascade->margin.top - cascade->margin.bottom);
PRINT(CCV_CLI_INFO, "use color? %s\n", cascade->grayscale ? "no" : "yes");
PRINT(CCV_CLI_INFO, "compute soft cascading thresholds for ICF classifier cascade at size %dx%d with margin (%d,%d,%d,%d)\n"
"------------------------\n",
size.width, size.height, cascade->margin.left, cascade->margin.top, cascade->margin.right, cascade->margin.bottom);
gsl_rng_env_setup();
gsl_rng* rng = gsl_rng_alloc(gsl_rng_default);
/* collect positives */
double weigh[2] = {
0, 0
};
int i;
for (i = 0; i < cascade->count; i++)
{
ccv_icf_decision_tree_t* weak_classifier = cascade->weak_classifiers + i;
weigh[0] += weak_classifier->weigh[0];
weigh[1] += weak_classifier->weigh[1];
}
weigh[0] = 1 / fabs(weigh[0]), weigh[1] = 1 / fabs(weigh[1]);
for (i = 0; i < cascade->count; i++)
{
ccv_icf_decision_tree_t* weak_classifier = cascade->weak_classifiers + i;
weak_classifier->weigh[0] = weak_classifier->weigh[0] * weigh[0];
weak_classifier->weigh[1] = weak_classifier->weigh[1] * weigh[1];
}
ccv_array_t* validates = _ccv_icf_collect_validates(rng, size, cascade->margin, posfiles, cascade->grayscale);
/* compute soft cascading thresholds */
_ccv_icf_classifier_cascade_soft_with_validates(validates, cascade, acceptance);
ccv_array_free(validates);
gsl_rng_free(rng);
#else
assert(0 && "ccv_icf_classifier_cascade_soft requires GSL library support");
#endif
}
static void _ccv_icf_read_classifier_cascade_with_fd(FILE* r, ccv_icf_classifier_cascade_t* cascade)
{
cascade->type = CCV_ICF_CLASSIFIER_TYPE_A;
fscanf(r, "%d %d %d %d", &cascade->count, &cascade->size.width, &cascade->size.height, &cascade->grayscale);
fscanf(r, "%d %d %d %d", &cascade->margin.left, &cascade->margin.top, &cascade->margin.right, &cascade->margin.bottom);
cascade->weak_classifiers = (ccv_icf_decision_tree_t*)ccmalloc(sizeof(ccv_icf_decision_tree_t) * cascade->count);
int i, q;
for (i = 0; i < cascade->count; i++)
{
ccv_icf_decision_tree_t* weak_classifier = cascade->weak_classifiers + i;
fscanf(r, "%u %a %a %a", &weak_classifier->pass, &weak_classifier->weigh[0], &weak_classifier->weigh[1], &weak_classifier->threshold);
fscanf(r, "%d %a", &weak_classifier->features[0].count, &weak_classifier->features[0].beta);
for (q = 0; q < weak_classifier->features[0].count; q++)
fscanf(r, "%d %a %d %d %d %d", &weak_classifier->features[0].channel[q], &weak_classifier->features[0].alpha[q], &weak_classifier->features[0].sat[q * 2].x, &weak_classifier->features[0].sat[q * 2].y, &weak_classifier->features[0].sat[q * 2 + 1].x...
if (weak_classifier->pass & 0x2)
{
fscanf(r, "%d %a", &weak_classifier->features[1].count, &weak_classifier->features[1].beta);
for (q = 0; q < weak_classifier->features[1].count; q++)
fscanf(r, "%d %a %d %d %d %d", &weak_classifier->features[1].channel[q], &weak_classifier->features[1].alpha[q], &weak_classifier->features[1].sat[q * 2].x, &weak_classifier->features[1].sat[q * 2].y, &weak_classifier->features[1].sat[q * 2 + 1]....
}
if (weak_classifier->pass & 0x1)
{
fscanf(r, "%d %a", &weak_classifier->features[2].count, &weak_classifier->features[2].beta);
for (q = 0; q < weak_classifier->features[2].count; q++)
fscanf(r, "%d %a %d %d %d %d", &weak_classifier->features[2].channel[q], &weak_classifier->features[2].alpha[q], &weak_classifier->features[2].sat[q * 2].x, &weak_classifier->features[2].sat[q * 2].y, &weak_classifier->features[2].sat[q * 2 + 1]....
}
}
}
static void _ccv_icf_write_classifier_cascade_with_fd(ccv_icf_classifier_cascade_t* cascade, FILE* w)
{
int i, q;
fprintf(w, "%d %d %d %d\n", cascade->count, cascade->size.width, cascade->size.height, cascade->grayscale);
fprintf(w, "%d %d %d %d\n", cascade->margin.left, cascade->margin.top, cascade->margin.right, cascade->margin.bottom);
for (i = 0; i < cascade->count; i++)
{
ccv_icf_decision_tree_t* weak_classifier = cascade->weak_classifiers + i;
fprintf(w, "%u %a %a %a\n", weak_classifier->pass, weak_classifier->weigh[0], weak_classifier->weigh[1], weak_classifier->threshold);
fprintf(w, "%d %a\n", weak_classifier->features[0].count, weak_classifier->features[0].beta);
for (q = 0; q < weak_classifier->features[0].count; q++)
fprintf(w, "%d %a\n%d %d %d %d\n", weak_classifier->features[0].channel[q], weak_classifier->features[0].alpha[q], weak_classifier->features[0].sat[q * 2].x, weak_classifier->features[0].sat[q * 2].y, weak_classifier->features[0].sat[q * 2 + 1].x,...
if (weak_classifier->pass & 0x2)
{
fprintf(w, "%d %a\n", weak_classifier->features[1].count, weak_classifier->features[1].beta);
for (q = 0; q < weak_classifier->features[1].count; q++)
fprintf(w, "%d %a\n%d %d %d %d\n", weak_classifier->features[1].channel[q], weak_classifier->features[1].alpha[q], weak_classifier->features[1].sat[q * 2].x, weak_classifier->features[1].sat[q * 2].y, weak_classifier->features[1].sat[q * 2 + 1].x...
}
if (weak_classifier->pass & 0x1)
{
fprintf(w, "%d %a\n", weak_classifier->features[2].count, weak_classifier->features[2].beta);
for (q = 0; q < weak_classifier->features[2].count; q++)
fprintf(w, "%d %a\n%d %d %d %d\n", weak_classifier->features[2].channel[q], weak_classifier->features[2].alpha[q], weak_classifier->features[2].sat[q * 2].x, weak_classifier->features[2].sat[q * 2].y, weak_classifier->features[2].sat[q * 2 + 1].x...
}
}
}
ccv_icf_classifier_cascade_t* ccv_icf_read_classifier_cascade(const char* filename)
{
FILE* r = fopen(filename, "r");
ccv_icf_classifier_cascade_t* cascade = 0;
if (r)
{
cascade = (ccv_icf_classifier_cascade_t*)ccmalloc(sizeof(ccv_icf_classifier_cascade_t));
_ccv_icf_read_classifier_cascade_with_fd(r, cascade);
fclose(r);
}
return cascade;
}
void ccv_icf_write_classifier_cascade(ccv_icf_classifier_cascade_t* cascade, const char* filename)
{
FILE* w = fopen(filename, "w+");
if (w)
{
_ccv_icf_write_classifier_cascade_with_fd(cascade, w);
fclose(w);
}
}
void ccv_icf_classifier_cascade_free(ccv_icf_classifier_cascade_t* classifier)
{
ccfree(classifier->weak_classifiers);
ccfree(classifier);
}
ccv_icf_multiscale_classifier_cascade_t* ccv_icf_read_multiscale_classifier_cascade(const char* directory)
{
char filename[1024];
snprintf(filename, 1024, "%s/multiscale", directory);
FILE* r = fopen(filename, "r");
if (r)
{
int octave = 0, count = 0, grayscale = 0;
fscanf(r, "%d %d %d", &octave, &count, &grayscale);
fclose(r);
ccv_icf_multiscale_classifier_cascade_t* classifier = (ccv_icf_multiscale_classifier_cascade_t*)ccmalloc(sizeof(ccv_icf_multiscale_classifier_cascade_t) + sizeof(ccv_icf_classifier_cascade_t) * count);
classifier->type = CCV_ICF_CLASSIFIER_TYPE_B;
classifier->octave = octave;
classifier->count = count;
classifier->grayscale = grayscale;
classifier->cascade = (ccv_icf_classifier_cascade_t*)(classifier + 1);
int i;
for (i = 0; i < count; i++)
{
snprintf(filename, 1024, "%s/cascade-%d", directory, i + 1);
r = fopen(filename, "r");
if (r)
{
ccv_icf_classifier_cascade_t* cascade = classifier->cascade + i;
_ccv_icf_read_classifier_cascade_with_fd(r, cascade);
fclose(r);
}
}
return classifier;
}
return 0;
}
void ccv_icf_write_multiscale_classifier_cascade(ccv_icf_multiscale_classifier_cascade_t* classifier, const char* directory)
{
char filename[1024];
snprintf(filename, 1024, "%s/multiscale", directory);
FILE* w = fopen(filename, "w+");
fprintf(w, "%d %d %d\n", classifier->octave, classifier->count, classifier->grayscale);
fclose(w);
int i;
for (i = 0; i < classifier->count; i++)
{
snprintf(filename, 1024, "%s/cascade-%d", directory, i + 1);
w = fopen(filename, "w+");
_ccv_icf_write_classifier_cascade_with_fd(classifier->cascade + i, w);
fclose(w);
}
}
void ccv_icf_multiscale_classifier_cascade_free(ccv_icf_multiscale_classifier_cascade_t* classifier)
{
int i;
for (i = 0; i < classifier->count; i++)
ccfree(classifier->cascade[i].weak_classifiers);
ccfree(classifier);
}
static int _ccv_is_equal_same_class(const void* _r1, const void* _r2, void* data)
{
const ccv_comp_t* r1 = (const ccv_comp_t*)_r1;
const ccv_comp_t* r2 = (const ccv_comp_t*)_r2;
int distance = (int)(ccv_min(r1->rect.width, r1->rect.height) * 0.25 + 0.5);
return r2->classification.id == r1->classification.id &&
r2->rect.x <= r1->rect.x + distance &&
r2->rect.x >= r1->rect.x - distance &&
r2->rect.y <= r1->rect.y + distance &&
r2->rect.y >= r1->rect.y - distance &&
r2->rect.width <= (int)(r1->rect.width * 1.5 + 0.5) &&
(int)(r2->rect.width * 1.5 + 0.5) >= r1->rect.width &&
r2->rect.height <= (int)(r1->rect.height * 1.5 + 0.5) &&
(int)(r2->rect.height * 1.5 + 0.5) >= r1->rect.height;
}
static void _ccv_icf_detect_objects_with_classifier_cascade(ccv_dense_matrix_t* a, ccv_icf_classifier_cascade_t** cascades, int count, ccv_icf_param_t params, ccv_array_t* seq[])
{
int i, j, k, q, x, y;
int scale_upto = 1;
for (i = 0; i < count; i++)
scale_upto = ccv_max(scale_upto, (int)(log(ccv_min((double)a->rows / (cascades[i]->size.height - cascades[i]->margin.top - cascades[i]->margin.bottom), (double)a->cols / (cascades[i]->size.width - cascades[i]->margin.left - cascades[i]->margin.righ...
ccv_dense_matrix_t** pyr = (ccv_dense_matrix_t**)alloca(sizeof(ccv_dense_matrix_t*) * scale_upto);
pyr[0] = a;
for (i = 1; i < scale_upto; i++)
{
pyr[i] = 0;
ccv_sample_down(pyr[i - 1], &pyr[i], 0, 0, 0);
}
for (i = 0; i < scale_upto; i++)
{
// run it
for (j = 0; j < count; j++)
{
double scale_ratio = pow(2., 1. / (params.interval + 1));
double scale = 1;
ccv_icf_classifier_cascade_t* cascade = cascades[j];
for (k = 0; k <= params.interval; k++)
{
int rows = (int)(pyr[i]->rows / scale + 0.5);
int cols = (int)(pyr[i]->cols / scale + 0.5);
if (rows < cascade->size.height || cols < cascade->size.width)
break;
ccv_dense_matrix_t* image = k == 0 ? pyr[i] : 0;
if (k > 0)
ccv_resample(pyr[i], &image, 0, rows, cols, CCV_INTER_AREA);
ccv_dense_matrix_t* bordered = 0;
ccv_border(image, (ccv_matrix_t**)&bordered, 0, cascade->margin);
if (k > 0)
ccv_matrix_free(image);
rows = bordered->rows;
cols = bordered->cols;
ccv_dense_matrix_t* icf = 0;
ccv_icf(bordered, &icf, 0);
ccv_matrix_free(bordered);
ccv_dense_matrix_t* sat = 0;
ccv_sat(icf, &sat, 0, CCV_PADDING_ZERO);
ccv_matrix_free(icf);
int ch = CCV_GET_CHANNEL(sat->type);
float* ptr = sat->data.f32;
for (y = 0; y < rows; y += params.step_through)
{
if (y >= sat->rows - cascade->size.height - 1)
break;
for (x = 0; x < cols; x += params.step_through)
{
if (x >= sat->cols - cascade->size.width - 1)
break;
int pass = 1;
float sum = 0;
for (q = 0; q < cascade->count; q++)
{
ccv_icf_decision_tree_t* weak_classifier = cascade->weak_classifiers + q;
int c = _ccv_icf_run_weak_classifier(weak_classifier, ptr, sat->cols, ch, x, 0);
sum += weak_classifier->weigh[c];
if (sum < weak_classifier->threshold)
{
pass = 0;
break;
}
}
if (pass)
{
ccv_comp_t comp;
comp.rect = ccv_rect((int)((x + 0.5) * scale * (1 << i) - 0.5), (int)((y + 0.5) * scale * (1 << i) - 0.5), (cascade->size.width - cascade->margin.left - cascade->margin.right) * scale * (1 << i), (cascade->size.height - cascade->margin.top - c...
comp.neighbors = 1;
comp.classification.id = j + 1;
comp.classification.confidence = sum;
ccv_array_push(seq[j], &comp);
}
}
ptr += sat->cols * ch * params.step_through;
}
ccv_matrix_free(sat);
scale *= scale_ratio;
}
}
}
for (i = 1; i < scale_upto; i++)
ccv_matrix_free(pyr[i]);
}
static void _ccv_icf_detect_objects_with_multiscale_classifier_cascade(ccv_dense_matrix_t* a, ccv_icf_multiscale_classifier_cascade_t** multiscale_cascade, int count, ccv_icf_param_t params, ccv_array_t* seq[])
{
int i, j, k, q, x, y, ix, iy, py;
assert(multiscale_cascade[0]->count % multiscale_cascade[0]->octave == 0);
ccv_margin_t margin = multiscale_cascade[0]->cascade[multiscale_cascade[0]->count - 1].margin;
for (i = 1; i < count; i++)
{
assert(multiscale_cascade[i]->count % multiscale_cascade[i]->octave == 0);
assert(multiscale_cascade[i - 1]->grayscale == multiscale_cascade[i]->grayscale);
assert(multiscale_cascade[i - 1]->count == multiscale_cascade[i]->count);
assert(multiscale_cascade[i - 1]->octave == multiscale_cascade[i]->octave);
ccv_icf_classifier_cascade_t* cascade = multiscale_cascade[i]->cascade + multiscale_cascade[i]->count - 1;
margin.top = ccv_max(margin.top, cascade->margin.top);
margin.right = ccv_max(margin.right, cascade->margin.right);
margin.bottom = ccv_max(margin.bottom, cascade->margin.bottom);
margin.left = ccv_max(margin.left, cascade->margin.left);
}
int scale_upto = 1;
for (i = 0; i < count; i++)
scale_upto = ccv_max(scale_upto, (int)(log(ccv_min((double)a->rows / (multiscale_cascade[i]->cascade[0].size.height - multiscale_cascade[i]->cascade[0].margin.top - multiscale_cascade[i]->cascade[0].margin.bottom), (double)a->cols / (multiscale_cas...
ccv_dense_matrix_t** pyr = (ccv_dense_matrix_t**)alloca(sizeof(ccv_dense_matrix_t*) * scale_upto);
pyr[0] = a;
for (i = 1; i < scale_upto; i++)
{
pyr[i] = 0;
ccv_sample_down(pyr[i - 1], &pyr[i], 0, 0, 0);
}
for (i = 0; i < scale_upto; i++)
{
ccv_dense_matrix_t* bordered = 0;
ccv-src/lib/ccv_icf.c view on Meta::CPAN
ccv_icf(bordered, &icf, 0);
ccv_matrix_free(bordered);
ccv_dense_matrix_t* sat = 0;
ccv_sat(icf, &sat, 0, CCV_PADDING_ZERO);
ccv_matrix_free(icf);
int ch = CCV_GET_CHANNEL(sat->type);
assert(CCV_GET_DATA_TYPE(sat->type) == CCV_32F);
// run it
for (j = 0; j < count; j++)
{
double scale_ratio = pow(2., (double)multiscale_cascade[j]->octave / multiscale_cascade[j]->count);
int starter = i > 0 ? multiscale_cascade[j]->count - (multiscale_cascade[j]->count / multiscale_cascade[j]->octave) : 0;
double scale = pow(scale_ratio, starter);
for (k = starter; k < multiscale_cascade[j]->count; k++)
{
ccv_icf_classifier_cascade_t* cascade = multiscale_cascade[j]->cascade + k;
int rows = (int)(pyr[i]->rows / scale + cascade->margin.top + 0.5);
int cols = (int)(pyr[i]->cols / scale + cascade->margin.left + 0.5);
int top = margin.top - cascade->margin.top;
int right = margin.right - cascade->margin.right;
int bottom = margin.bottom - cascade->margin.bottom;
int left = margin.left - cascade->margin.left;
if (sat->rows - top - bottom <= cascade->size.height || sat->cols - left - right <= cascade->size.width)
break;
float* ptr = sat->data.f32 + top * sat->cols * ch;
for (y = 0, iy = py = top; y < rows; y += params.step_through)
{
iy = (int)((y + 0.5) * scale + top);
if (iy >= sat->rows - cascade->size.height - 1)
break;
if (iy > py)
{
ptr += sat->cols * ch * (iy - py);
py = iy;
}
for (x = 0; x < cols; x += params.step_through)
{
ix = (int)((x + 0.5) * scale + left);
if (ix >= sat->cols - cascade->size.width - 1)
break;
int pass = 1;
float sum = 0;
for (q = 0; q < cascade->count; q++)
{
ccv_icf_decision_tree_t* weak_classifier = cascade->weak_classifiers + q;
int c = _ccv_icf_run_weak_classifier(weak_classifier, ptr, sat->cols, ch, ix, 0);
sum += weak_classifier->weigh[c];
if (sum < weak_classifier->threshold)
{
pass = 0;
break;
}
}
if (pass)
{
ccv_comp_t comp;
comp.rect = ccv_rect((int)((x + 0.5) * scale * (1 << i)), (int)((y + 0.5) * scale * (1 << i)), (cascade->size.width - cascade->margin.left - cascade->margin.right) << i, (cascade->size.height - cascade->margin.top - cascade->margin.bottom) << ...
comp.neighbors = 1;
comp.classification.id = j + 1;
comp.classification.confidence = sum;
ccv_array_push(seq[j], &comp);
}
}
}
scale *= scale_ratio;
}
}
ccv_matrix_free(sat);
}
for (i = 1; i < scale_upto; i++)
ccv_matrix_free(pyr[i]);
}
ccv_array_t* ccv_icf_detect_objects(ccv_dense_matrix_t* a, void* cascade, int count, ccv_icf_param_t params)
{
assert(count > 0);
int i, j, k;
int type = *(((int**)cascade)[0]);
for (i = 1; i < count; i++)
{
// check all types to be the same
assert(*(((int**)cascade)[i]) == type);
}
ccv_array_t** seq = (ccv_array_t**)alloca(sizeof(ccv_array_t*) * count);
for (i = 0; i < count; i++)
seq[i] = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
switch (type)
{
case CCV_ICF_CLASSIFIER_TYPE_A:
_ccv_icf_detect_objects_with_classifier_cascade(a, (ccv_icf_classifier_cascade_t**)cascade, count, params, seq);
break;
case CCV_ICF_CLASSIFIER_TYPE_B:
_ccv_icf_detect_objects_with_multiscale_classifier_cascade(a, (ccv_icf_multiscale_classifier_cascade_t**)cascade, count, params, seq);
break;
}
ccv_array_t* result_seq = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
ccv_array_t* seq2 = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
for (k = 0; k < count; k++)
{
/* the following code from OpenCV's haar feature implementation */
if(params.min_neighbors == 0)
{
for (i = 0; i < seq[k]->rnum; i++)
ccv-src/lib/ccv_scd.c view on Meta::CPAN
for (i = 0; i < negnum; i++)
if (s[i + posnum] > threshold)
++fp;
if (tp_out)
*tp_out = (float)tp / posnum;
if (fp_out)
*fp_out = (float)fp / negnum;
return threshold;
}
static int _ccv_scd_classifier_cascade_pass(ccv_scd_classifier_cascade_t* cascade, ccv_dense_matrix_t* a)
{
#if defined(HAVE_SSE2)
__m128 surf[8];
#else
float surf[32];
#endif
ccv_dense_matrix_t* b = 0;
ccv_scd(a, &b, 0);
ccv_dense_matrix_t* sat = 0;
ccv_sat(b, &sat, 0, CCV_PADDING_ZERO);
ccv_matrix_free(b);
int pass = 1;
int i, j;
for (i = 0; i < cascade->count; i++)
{
ccv_scd_stump_classifier_t* classifier = cascade->classifiers + i;
float v = 0;
for (j = 0; j < classifier->count; j++)
{
ccv_scd_stump_feature_t* feature = classifier->features + j;
#if defined(HAVE_SSE2)
_ccv_scd_run_feature_at_sse2(sat->data.f32, sat->cols, feature, surf);
__m128 u0 = _mm_add_ps(_mm_mul_ps(surf[0], _mm_loadu_ps(feature->w)), _mm_mul_ps(surf[1], _mm_loadu_ps(feature->w + 4)));
__m128 u1 = _mm_add_ps(_mm_mul_ps(surf[2], _mm_loadu_ps(feature->w + 8)), _mm_mul_ps(surf[3], _mm_loadu_ps(feature->w + 12)));
__m128 u2 = _mm_add_ps(_mm_mul_ps(surf[4], _mm_loadu_ps(feature->w + 16)), _mm_mul_ps(surf[5], _mm_loadu_ps(feature->w + 20)));
__m128 u3 = _mm_add_ps(_mm_mul_ps(surf[6], _mm_loadu_ps(feature->w + 24)), _mm_mul_ps(surf[7], _mm_loadu_ps(feature->w + 28)));
ccv-src/lib/ccv_scd.c view on Meta::CPAN
if (v <= classifier->threshold)
{
pass = 0;
break;
}
}
ccv_matrix_free(sat);
return pass;
}
static ccv_array_t* _ccv_scd_hard_mining(gsl_rng* rng, ccv_scd_classifier_cascade_t* cascade, ccv_array_t* hard_mine, ccv_array_t* negatives, int negative_count, int grayscale, int even_dist)
{
ccv_array_t* hard_negatives = ccv_array_new(ccv_compute_dense_matrix_size(cascade->size.height, cascade->size.width, CCV_8U | (grayscale ? CCV_C1 : CCV_C3)), negative_count, 0);
int i, j, t;
for (i = 0; i < negatives->rnum; i++)
{
ccv_dense_matrix_t* a = (ccv_dense_matrix_t*)ccv_array_get(negatives, i);
a->data.u8 = (unsigned char*)(a + 1);
if (_ccv_scd_classifier_cascade_pass(cascade, a))
ccv_array_push(hard_negatives, a);
}
int n_per_mine = ccv_max((negative_count - hard_negatives->rnum) / hard_mine->rnum, 10);
// the hard mining comes in following fashion:
// 1). original, with n_per_mine set;
// 2). horizontal flip, with n_per_mine set;
// 3). vertical flip, with n_per_mine set;
// 4). 180 rotation, with n_per_mine set;
// 5~8). repeat above, but with no n_per_mine set;
// after above, if we still cannot collect enough, so be it.
ccv-src/lib/ccv_scd.c view on Meta::CPAN
if (t % 2 != 0)
ccv_flip(image, 0, 0, CCV_FLIP_X);
if (t % 4 >= 2)
ccv_flip(image, 0, 0, CCV_FLIP_Y);
if (t >= 4)
n_per_mine = negative_count; // no hard limit on n_per_mine anymore for the last pass
ccv_scd_param_t params = {
.interval = 3,
.min_neighbors = 0,
.step_through = 4,
.size = cascade->size,
};
ccv_array_t* objects = ccv_scd_detect_objects(image, &cascade, 1, params);
if (objects->rnum > 0)
{
gsl_ran_shuffle(rng, objects->data, objects->rnum, objects->rsize);
for (j = 0; j < ccv_min(objects->rnum, n_per_mine); j++)
{
ccv_rect_t* rect = (ccv_rect_t*)ccv_array_get(objects, j);
if (rect->x < 0 || rect->y < 0 || rect->x + rect->width > image->cols || rect->y + rect->height > image->rows)
continue;
ccv_dense_matrix_t* sliced = 0;
ccv_slice(image, (ccv_matrix_t**)&sliced, 0, rect->y, rect->x, rect->height, rect->width);
ccv_dense_matrix_t* resized = 0;
assert(sliced->rows >= cascade->size.height && sliced->cols >= cascade->size.width);
if (sliced->rows > cascade->size.height || sliced->cols > cascade->size.width)
{
ccv_resample(sliced, &resized, 0, cascade->size.height, cascade->size.width, CCV_INTER_CUBIC);
ccv_matrix_free(sliced);
} else {
resized = sliced;
}
if (_ccv_scd_classifier_cascade_pass(cascade, resized))
ccv_array_push(hard_negatives, resized);
ccv_matrix_free(resized);
if (hard_negatives->rnum >= negative_count)
break;
}
}
ccv_matrix_free(image);
if (hard_negatives->rnum >= negative_count)
break;
}
ccv-src/lib/ccv_scd.c view on Meta::CPAN
ccv_array_t* features;
ccv_array_t* positives;
ccv_array_t* negatives;
double* s;
double* pw;
double* nw;
float* fv; // feature vector for examples * feature
double auc_prev;
double accu_true_positive_rate;
double accu_false_positive_rate;
ccv_scd_classifier_cascade_t* cascade;
ccv_scd_train_param_t params;
} ccv_scd_classifier_cascade_new_function_state_t;
static void _ccv_scd_classifier_cascade_new_function_state_read(const char* filename, ccv_scd_classifier_cascade_new_function_state_t* z)
{
ccv_scd_classifier_cascade_t* cascade = ccv_scd_classifier_cascade_read(filename);
if (!cascade)
return;
if (z->cascade)
ccv_scd_classifier_cascade_free(z->cascade);
z->cascade = cascade;
assert(z->cascade->size.width == z->params.size.width);
assert(z->cascade->size.height == z->params.size.height);
sqlite3* db = 0;
if (SQLITE_OK == sqlite3_open(filename, &db))
{
const char negative_data_qs[] =
"SELECT data, rnum, rsize FROM negative_data WHERE id=0;";
sqlite3_stmt* negative_data_stmt = 0;
if (SQLITE_OK == sqlite3_prepare_v2(db, negative_data_qs, sizeof(negative_data_qs), &negative_data_stmt, 0))
{
if (sqlite3_step(negative_data_stmt) == SQLITE_ROW)
{
int rsize = ccv_compute_dense_matrix_size(z->cascade->size.height, z->cascade->size.width, CCV_8U | (z->params.grayscale ? CCV_C1 : CCV_C3));
int rnum = sqlite3_column_int(negative_data_stmt, 1);
assert(sqlite3_column_int(negative_data_stmt, 2) == rsize);
size_t size = sqlite3_column_bytes(negative_data_stmt, 0);
assert(size == (size_t)rsize * rnum);
if (z->negatives)
ccv_array_clear(z->negatives);
else
z->negatives = ccv_array_new(rsize, rnum, 0);
int i;
const uint8_t* data = (const uint8_t*)sqlite3_column_blob(negative_data_stmt, 0);
ccv-src/lib/ccv_scd.c view on Meta::CPAN
const void* nw = sqlite3_column_blob(function_state_stmt, 9);
memcpy(z->nw, nw, size);
}
sqlite3_finalize(function_state_stmt);
}
_ccv_scd_precompute_feature_vectors(z->features, z->positives, z->negatives, z->fv);
sqlite3_close(db);
}
}
static void _ccv_scd_classifier_cascade_new_function_state_write(ccv_scd_classifier_cascade_new_function_state_t* z, const char* filename)
{
ccv_scd_classifier_cascade_write(z->cascade, filename);
sqlite3* db = 0;
if (SQLITE_OK == sqlite3_open(filename, &db))
{
const char function_state_create_table_qs[] =
"CREATE TABLE IF NOT EXISTS function_state "
"(fsid INTEGER PRIMARY KEY ASC, t INTEGER, k INTEGER, positive_count INTEGER, auc_prev DOUBLE, accu_true_positive_rate DOUBLE, accu_false_positive_rate DOUBLE, line_no INTEGER, s BLOB, pw BLOB, nw BLOB);"
"CREATE TABLE IF NOT EXISTS negative_data "
"(id INTEGER PRIMARY KEY ASC, data BLOB, rnum INTEGER, rsize INTEGER);";
assert(SQLITE_OK == sqlite3_exec(db, function_state_create_table_qs, 0, 0, 0));
const char function_state_insert_qs[] =
ccv-src/lib/ccv_scd.c view on Meta::CPAN
sqlite3_bind_int(negative_data_insert_stmt, 3, z->negatives->rsize);
assert(SQLITE_DONE == sqlite3_step(negative_data_insert_stmt));
sqlite3_finalize(negative_data_insert_stmt);
z->array_signature = z->negatives->sig;
}
sqlite3_close(db);
}
}
#endif
ccv_scd_classifier_cascade_t* ccv_scd_classifier_cascade_new(ccv_array_t* posfiles, ccv_array_t* hard_mine, int negative_count, const char* filename, ccv_scd_train_param_t params)
{
#ifdef HAVE_GSL
assert(posfiles->rnum > 0);
assert(hard_mine->rnum > 0);
gsl_rng_env_setup();
gsl_rng* rng = gsl_rng_alloc(gsl_rng_default);
ccv_scd_classifier_cascade_new_function_state_t z = {0};
z.features = _ccv_scd_stump_features(params.feature.base, params.feature.range_through, params.feature.step_through, params.size);
PRINT(CCV_CLI_INFO, " - using %d features\n", z.features->rnum);
int i, j, p, q;
z.positives = _ccv_scd_collect_positives(params.size, posfiles, params.grayscale);
double* h = (double*)ccmalloc(sizeof(double) * (z.positives->rnum + negative_count));
z.s = (double*)ccmalloc(sizeof(double) * (z.positives->rnum + negative_count));
assert(z.s);
z.pw = (double*)ccmalloc(sizeof(double) * z.positives->rnum);
assert(z.pw);
z.nw = (double*)ccmalloc(sizeof(double) * negative_count);
assert(z.nw);
ccmemalign((void**)&z.fv, 16, sizeof(float) * (z.positives->rnum + negative_count) * z.features->rnum * 32);
assert(z.fv);
z.params = params;
ccv_function_state_begin(_ccv_scd_classifier_cascade_new_function_state_read, z, filename);
z.negatives = _ccv_scd_collect_negatives(rng, params.size, hard_mine, negative_count, params.grayscale);
_ccv_scd_precompute_feature_vectors(z.features, z.positives, z.negatives, z.fv);
z.cascade = (ccv_scd_classifier_cascade_t*)ccmalloc(sizeof(ccv_scd_classifier_cascade_t));
z.cascade->margin = ccv_margin(0, 0, 0, 0);
z.cascade->size = params.size;
z.cascade->count = 0;
z.cascade->classifiers = 0;
z.accu_true_positive_rate = 1;
z.accu_false_positive_rate = 1;
ccv_function_state_resume(_ccv_scd_classifier_cascade_new_function_state_write, z, filename);
for (z.t = 0; z.t < params.boosting; z.t++)
{
for (i = 0; i < z.positives->rnum; i++)
z.pw[i] = 0.5 / z.positives->rnum;
for (i = 0; i < z.negatives->rnum; i++)
z.nw[i] = 0.5 / z.negatives->rnum;
memset(z.s, 0, sizeof(double) * (z.positives->rnum + z.negatives->rnum));
z.cascade->classifiers = (ccv_scd_stump_classifier_t*)ccrealloc(z.cascade->classifiers, sizeof(ccv_scd_stump_classifier_t) * (z.t + 1));
z.cascade->count = z.t + 1;
z.cascade->classifiers[z.t].threshold = 0;
z.cascade->classifiers[z.t].features = 0;
z.cascade->classifiers[z.t].count = 0;
z.auc_prev = 0;
assert(z.positives->rnum > 0 && z.negatives->rnum > 0);
// for the first prune stages, we have more restrictive number of features (faster)
for (z.k = 0; z.k < (z.t < params.stop_criteria.prune_stage ? params.stop_criteria.prune_feature : params.stop_criteria.maximum_feature); z.k++)
{
ccv_scd_stump_classifier_t* classifier = z.cascade->classifiers + z.t;
classifier->features = (ccv_scd_stump_feature_t*)ccrealloc(classifier->features, sizeof(ccv_scd_stump_feature_t) * (z.k + 1));
_ccv_scd_stump_feature_supervised_train(rng, z.features, z.positives->rnum, z.negatives->rnum, z.pw, z.nw, z.fv, params.C, params.weight_trimming);
int best_feature_no = _ccv_scd_best_feature_gentle_adaboost(z.s, z.features, z.pw, z.nw, z.positives->rnum, z.negatives->rnum, z.fv);
ccv_scd_stump_feature_t best_feature = *(ccv_scd_stump_feature_t*)ccv_array_get(z.features, best_feature_no);
for (i = 0; i < z.positives->rnum + z.negatives->rnum; i++)
{
float* surf = _ccv_scd_get_surf_at(z.fv, best_feature_no, i, z.positives->rnum, z.negatives->rnum);
float v = best_feature.bias;
for (j = 0; j < 32; j++)
v += best_feature.w[j] * surf[j];
ccv-src/lib/ccv_scd.c view on Meta::CPAN
w += z.pw[i];
w = 0.5 / w;
for (i = 0; i < z.positives->rnum; i++)
z.pw[i] *= w;
w = 0;
for (i = 0; i < z.negatives->rnum; i++)
w += z.nw[i];
w = 0.5 / w;
for (i = 0; i < z.negatives->rnum; i++)
z.nw[i] *= w;
ccv_function_state_resume(_ccv_scd_classifier_cascade_new_function_state_write, z, filename);
}
// backtrack removal
while (z.cascade->classifiers[z.t].count > 1)
{
double max_auc = 0;
p = -1;
for (i = 0; i < z.cascade->classifiers[z.t].count; i++)
{
ccv_scd_stump_feature_t* feature = z.cascade->classifiers[z.t].features + i;
int k = _ccv_scd_find_match_feature(feature, z.features);
assert(k >= 0);
for (j = 0; j < z.positives->rnum + z.negatives->rnum; j++)
{
float* surf = _ccv_scd_get_surf_at(z.fv, k, j, z.positives->rnum, z.negatives->rnum);
float v = feature->bias;
for (q = 0; q < 32; q++)
v += feature->w[q]* surf[q];
v = expf(v);
h[j] = z.s[j] - (v - 1) / (v + 1);
}
double auc = _ccv_scd_auc(h, z.positives->rnum, z.negatives->rnum);
FLUSH(CCV_CLI_INFO, " - attempting without %d-th feature, auc: %lf", i + 1, auc);
if (auc >= max_auc)
max_auc = auc, p = i;
}
if (max_auc >= z.auc_prev)
{
FLUSH(CCV_CLI_INFO, " - remove %d-th feature with new auc %lf\n", p + 1, max_auc);
ccv_scd_stump_feature_t* feature = z.cascade->classifiers[z.t].features + p;
int k = _ccv_scd_find_match_feature(feature, z.features);
assert(k >= 0);
for (j = 0; j < z.positives->rnum + z.negatives->rnum; j++)
{
float* surf = _ccv_scd_get_surf_at(z.fv, k, j, z.positives->rnum, z.negatives->rnum);
float v = feature->bias;
for (q = 0; q < 32; q++)
v += feature->w[q] * surf[q];
v = expf(v);
z.s[j] -= (v - 1) / (v + 1);
}
z.auc_prev = _ccv_scd_auc(z.s, z.positives->rnum, z.negatives->rnum);
--z.cascade->classifiers[z.t].count;
if (p < z.cascade->classifiers[z.t].count)
memmove(z.cascade->classifiers[z.t].features + p + 1, z.cascade->classifiers[z.t].features + p, sizeof(ccv_scd_stump_feature_t) * (z.cascade->classifiers[z.t].count - p));
} else
break;
}
float true_positive_rate = 0;
float false_positive_rate = 0;
z.cascade->classifiers[z.t].threshold = _ccv_scd_threshold_at_hit_rate(z.s, z.positives->rnum, z.negatives->rnum, params.stop_criteria.hit_rate, &true_positive_rate, &false_positive_rate);
z.accu_true_positive_rate *= true_positive_rate;
z.accu_false_positive_rate *= false_positive_rate;
FLUSH(CCV_CLI_INFO, " - %d-th stage classifier TP rate : %f, FP rate : %f, ATP rate : %lf, AFP rate : %lg, at threshold : %f\n", z.t + 1, true_positive_rate, false_positive_rate, z.accu_true_positive_rate, z.accu_false_positive_rate, z.cascade->cla...
if (z.accu_false_positive_rate < params.stop_criteria.accu_false_positive_rate)
break;
ccv_function_state_resume(_ccv_scd_classifier_cascade_new_function_state_write, z, filename);
if (z.t < params.boosting - 1)
{
int pass = 0;
for (i = 0; i < z.positives->rnum; i++)
{
ccv_dense_matrix_t* a = (ccv_dense_matrix_t*)ccv_array_get(z.positives, i);
a->data.u8 = (unsigned char*)(a + 1);
if (_ccv_scd_classifier_cascade_pass(z.cascade, a))
++pass;
}
PRINT(CCV_CLI_INFO, " - %d-th stage classifier TP rate (with pass) : %f\n", z.t + 1, (float)pass / z.positives->rnum);
ccv_array_t* hard_negatives = _ccv_scd_hard_mining(rng, z.cascade, hard_mine, z.negatives, negative_count, params.grayscale, z.t < params.stop_criteria.prune_stage /* try to balance even distribution among negatives when we are in prune stage */);
ccv_array_free(z.negatives);
z.negatives = hard_negatives;
_ccv_scd_precompute_feature_vectors(z.features, z.positives, z.negatives, z.fv);
}
ccv_function_state_resume(_ccv_scd_classifier_cascade_new_function_state_write, z, filename);
}
ccv_array_free(z.negatives);
ccv_function_state_finish();
ccv_array_free(z.features);
ccv_array_free(z.positives);
ccfree(h);
ccfree(z.s);
ccfree(z.pw);
ccfree(z.nw);
ccfree(z.fv);
gsl_rng_free(rng);
return z.cascade;
#else
assert(0 && "ccv_scd_classifier_cascade_new requires GSL library and support");
return 0;
#endif
}
void ccv_scd_classifier_cascade_write(ccv_scd_classifier_cascade_t* cascade, const char* filename)
{
sqlite3* db = 0;
if (SQLITE_OK == sqlite3_open(filename, &db))
{
const char create_table_qs[] =
"CREATE TABLE IF NOT EXISTS cascade_params "
"(id INTEGER PRIMARY KEY ASC, count INTEGER, "
"margin_left INTEGER, margin_top INTEGER, margin_right INTEGER, margin_bottom INTEGER, "
"size_width INTEGER, size_height INTEGER);"
"CREATE TABLE IF NOT EXISTS classifier_params "
"(classifier INTEGER PRIMARY KEY ASC, count INTEGER, threshold DOUBLE);"
"CREATE TABLE IF NOT EXISTS feature_params "
"(classifier INTEGER, id INTEGER, "
"sx_0 INTEGER, sy_0 INTEGER, dx_0 INTEGER, dy_0 INTEGER, "
"sx_1 INTEGER, sy_1 INTEGER, dx_1 INTEGER, dy_1 INTEGER, "
"sx_2 INTEGER, sy_2 INTEGER, dx_2 INTEGER, dy_2 INTEGER, "
"sx_3 INTEGER, sy_3 INTEGER, dx_3 INTEGER, dy_3 INTEGER, "
"bias DOUBLE, w BLOB, UNIQUE (classifier, id));";
assert(SQLITE_OK == sqlite3_exec(db, create_table_qs, 0, 0, 0));
const char cascade_params_insert_qs[] =
"REPLACE INTO cascade_params "
"(id, count, "
"margin_left, margin_top, margin_right, margin_bottom, "
"size_width, size_height) VALUES "
"(0, $count, " // 0
"$margin_left, $margin_top, $margin_bottom, $margin_right, " // 4
"$size_width, $size_height);"; // 6
sqlite3_stmt* cascade_params_insert_stmt = 0;
assert(SQLITE_OK == sqlite3_prepare_v2(db, cascade_params_insert_qs, sizeof(cascade_params_insert_qs), &cascade_params_insert_stmt, 0));
sqlite3_bind_int(cascade_params_insert_stmt, 1, cascade->count);
sqlite3_bind_int(cascade_params_insert_stmt, 2, cascade->margin.left);
sqlite3_bind_int(cascade_params_insert_stmt, 3, cascade->margin.top);
sqlite3_bind_int(cascade_params_insert_stmt, 4, cascade->margin.right);
sqlite3_bind_int(cascade_params_insert_stmt, 5, cascade->margin.bottom);
sqlite3_bind_int(cascade_params_insert_stmt, 6, cascade->size.width);
sqlite3_bind_int(cascade_params_insert_stmt, 7, cascade->size.height);
assert(SQLITE_DONE == sqlite3_step(cascade_params_insert_stmt));
sqlite3_finalize(cascade_params_insert_stmt);
const char classifier_params_insert_qs[] =
"REPLACE INTO classifier_params "
"(classifier, count, threshold) VALUES "
"($classifier, $count, $threshold);";
sqlite3_stmt* classifier_params_insert_stmt = 0;
assert(SQLITE_OK == sqlite3_prepare_v2(db, classifier_params_insert_qs, sizeof(classifier_params_insert_qs), &classifier_params_insert_stmt, 0));
const char feature_params_insert_qs[] =
"REPLACE INTO feature_params "
"(classifier, id, "
"sx_0, sy_0, dx_0, dy_0, "
ccv-src/lib/ccv_scd.c view on Meta::CPAN
"bias, w) VALUES "
"($classifier, $id, " // 1
"$sx_0, $sy_0, $dx_0, $dy_0, " // 5
"$sx_1, $sy_1, $dx_1, $dy_1, " // 9
"$sx_2, $sy_2, $dx_2, $dy_2, " // 13
"$sx_3, $sy_3, $dx_3, $dy_3, " // 17
"$bias, $w);"; // 19
sqlite3_stmt* feature_params_insert_stmt = 0;
assert(SQLITE_OK == sqlite3_prepare_v2(db, feature_params_insert_qs, sizeof(feature_params_insert_qs), &feature_params_insert_stmt, 0));
int i, j, k;
for (i = 0; i < cascade->count; i++)
{
ccv_scd_stump_classifier_t* classifier = cascade->classifiers + i;
sqlite3_bind_int(classifier_params_insert_stmt, 1, i);
sqlite3_bind_int(classifier_params_insert_stmt, 2, classifier->count);
sqlite3_bind_double(classifier_params_insert_stmt, 3, classifier->threshold);
assert(SQLITE_DONE == sqlite3_step(classifier_params_insert_stmt));
sqlite3_reset(classifier_params_insert_stmt);
sqlite3_clear_bindings(classifier_params_insert_stmt);
for (j = 0; j < classifier->count; j++)
{
ccv_scd_stump_feature_t* feature = classifier->features + j;
sqlite3_bind_int(feature_params_insert_stmt, 1, i);
ccv-src/lib/ccv_scd.c view on Meta::CPAN
sqlite3_reset(feature_params_insert_stmt);
sqlite3_clear_bindings(feature_params_insert_stmt);
}
}
sqlite3_finalize(classifier_params_insert_stmt);
sqlite3_finalize(feature_params_insert_stmt);
sqlite3_close(db);
}
}
ccv_scd_classifier_cascade_t* ccv_scd_classifier_cascade_read(const char* filename)
{
int i;
sqlite3* db = 0;
ccv_scd_classifier_cascade_t* cascade = 0;
if (SQLITE_OK == sqlite3_open(filename, &db))
{
const char cascade_params_qs[] =
"SELECT count, " // 1
"margin_left, margin_top, margin_right, margin_bottom, " // 5
"size_width, size_height FROM cascade_params WHERE id = 0;"; // 7
sqlite3_stmt* cascade_params_stmt = 0;
if (SQLITE_OK == sqlite3_prepare_v2(db, cascade_params_qs, sizeof(cascade_params_qs), &cascade_params_stmt, 0))
{
if (sqlite3_step(cascade_params_stmt) == SQLITE_ROW)
{
cascade = (ccv_scd_classifier_cascade_t*)ccmalloc(sizeof(ccv_scd_classifier_cascade_t));
cascade->count = sqlite3_column_int(cascade_params_stmt, 0);
cascade->classifiers = (ccv_scd_stump_classifier_t*)cccalloc(cascade->count, sizeof(ccv_scd_stump_classifier_t));
cascade->margin = ccv_margin(sqlite3_column_int(cascade_params_stmt, 1), sqlite3_column_int(cascade_params_stmt, 2), sqlite3_column_int(cascade_params_stmt, 3), sqlite3_column_int(cascade_params_stmt, 4));
cascade->size = ccv_size(sqlite3_column_int(cascade_params_stmt, 5), sqlite3_column_int(cascade_params_stmt, 6));
}
sqlite3_finalize(cascade_params_stmt);
}
if (cascade)
{
const char classifier_params_qs[] =
"SELECT classifier, count, threshold FROM classifier_params ORDER BY classifier ASC;";
sqlite3_stmt* classifier_params_stmt = 0;
if (SQLITE_OK == sqlite3_prepare_v2(db, classifier_params_qs, sizeof(classifier_params_qs), &classifier_params_stmt, 0))
{
while (sqlite3_step(classifier_params_stmt) == SQLITE_ROW)
if (sqlite3_column_int(classifier_params_stmt, 0) < cascade->count)
{
ccv_scd_stump_classifier_t* classifier = cascade->classifiers + sqlite3_column_int(classifier_params_stmt, 0);
classifier->count = sqlite3_column_int(classifier_params_stmt, 1);
classifier->features = (ccv_scd_stump_feature_t*)ccmalloc(sizeof(ccv_scd_stump_feature_t) * classifier->count);
classifier->threshold = (float)sqlite3_column_double(classifier_params_stmt, 2);
}
sqlite3_finalize(classifier_params_stmt);
}
const char feature_params_qs[] =
"SELECT classifier, id, "
"sx_0, sy_0, dx_0, dy_0, "
"sx_1, sy_1, dx_1, dy_1, "
"sx_2, sy_2, dx_2, dy_2, "
"sx_3, sy_3, dx_3, dy_3, "
"bias, w FROM feature_params ORDER BY classifier, id ASC;";
sqlite3_stmt* feature_params_stmt = 0;
if (SQLITE_OK == sqlite3_prepare_v2(db, feature_params_qs, sizeof(feature_params_qs), &feature_params_stmt, 0))
{
while (sqlite3_step(feature_params_stmt) == SQLITE_ROW)
if (sqlite3_column_int(feature_params_stmt, 0) < cascade->count)
{
ccv_scd_stump_classifier_t* classifier = cascade->classifiers + sqlite3_column_int(feature_params_stmt, 0);
if (sqlite3_column_int(feature_params_stmt, 1) < classifier->count)
{
ccv_scd_stump_feature_t* feature = classifier->features + sqlite3_column_int(feature_params_stmt, 1);
for (i = 0; i < 4; i++)
{
feature->sx[i] = sqlite3_column_int(feature_params_stmt, 2 + i * 4);
feature->sy[i] = sqlite3_column_int(feature_params_stmt, 3 + i * 4);
feature->dx[i] = sqlite3_column_int(feature_params_stmt, 4 + i * 4);
feature->dy[i] = sqlite3_column_int(feature_params_stmt, 5 + i * 4);
}
ccv-src/lib/ccv_scd.c view on Meta::CPAN
assert(wnum == 32 * sizeof(float));
const void* w = sqlite3_column_blob(feature_params_stmt, 19);
memcpy(feature->w, w, sizeof(float) * 32);
}
}
sqlite3_finalize(feature_params_stmt);
}
}
sqlite3_close(db);
}
return cascade;
}
void ccv_scd_classifier_cascade_free(ccv_scd_classifier_cascade_t* cascade)
{
int i;
for (i = 0; i < cascade->count; i++)
{
ccv_scd_stump_classifier_t* classifier = cascade->classifiers + i;
ccfree(classifier->features);
}
ccfree(cascade->classifiers);
ccfree(cascade);
}
static int _ccv_is_equal_same_class(const void* _r1, const void* _r2, void* data)
{
const ccv_comp_t* r1 = (const ccv_comp_t*)_r1;
const ccv_comp_t* r2 = (const ccv_comp_t*)_r2;
if (r2->classification.id != r1->classification.id)
return 0;
int i = ccv_max(ccv_min(r2->rect.x + r2->rect.width, r1->rect.x + r1->rect.width) - ccv_max(r2->rect.x, r1->rect.x), 0) * ccv_max(ccv_min(r2->rect.y + r2->rect.height, r1->rect.y + r1->rect.height) - ccv_max(r2->rect.y, r1->rect.y), 0);
int m = ccv_min(r2->rect.width * r2->rect.height, r1->rect.width * r1->rect.height);
return i >= 0.3 * m; // IoM > 0.3 like HeadHunter does
}
ccv_array_t* ccv_scd_detect_objects(ccv_dense_matrix_t* a, ccv_scd_classifier_cascade_t** cascades, int count, ccv_scd_param_t params)
{
int i, j, k, x, y, p, q;
int scale_upto = 1;
float up_ratio = 1.0;
for (i = 0; i < count; i++)
up_ratio = ccv_max(up_ratio, ccv_max((float)cascades[i]->size.width / params.size.width, (float)cascades[i]->size.height / params.size.height));
if (up_ratio - 1.0 > 1e-4)
{
ccv_dense_matrix_t* resized = 0;
ccv_resample(a, &resized, 0, (int)(a->rows * up_ratio + 0.5), (int)(a->cols * up_ratio + 0.5), CCV_INTER_CUBIC);
a = resized;
}
for (i = 0; i < count; i++)
scale_upto = ccv_max(scale_upto, (int)(log(ccv_min((double)a->rows / (cascades[i]->size.height - cascades[i]->margin.top - cascades[i]->margin.bottom), (double)a->cols / (cascades[i]->size.width - cascades[i]->margin.left - cascades[i]->margin.righ...
ccv_dense_matrix_t** pyr = (ccv_dense_matrix_t**)alloca(sizeof(ccv_dense_matrix_t*) * scale_upto);
pyr[0] = a;
for (i = 1; i < scale_upto; i++)
{
pyr[i] = 0;
ccv_sample_down(pyr[i - 1], &pyr[i], 0, 0, 0);
}
#if defined(HAVE_SSE2)
__m128 surf[8];
#else
ccv-src/lib/ccv_scd.c view on Meta::CPAN
ccv_array_t** seq = (ccv_array_t**)alloca(sizeof(ccv_array_t*) * count);
for (i = 0; i < count; i++)
seq[i] = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
for (i = 0; i < scale_upto; i++)
{
// run it
for (j = 0; j < count; j++)
{
double scale_ratio = pow(2., 1. / (params.interval + 1));
double scale = 1;
ccv_scd_classifier_cascade_t* cascade = cascades[j];
for (k = 0; k <= params.interval; k++)
{
int rows = (int)(pyr[i]->rows / scale + 0.5);
int cols = (int)(pyr[i]->cols / scale + 0.5);
if (rows < cascade->size.height || cols < cascade->size.width)
break;
ccv_dense_matrix_t* image = k == 0 ? pyr[i] : 0;
if (k > 0)
ccv_resample(pyr[i], &image, 0, rows, cols, CCV_INTER_AREA);
ccv_dense_matrix_t* scd = 0;
if (cascade->margin.left == 0 && cascade->margin.top == 0 && cascade->margin.right == 0 && cascade->margin.bottom == 0)
{
ccv_scd(image, &scd, 0);
if (k > 0)
ccv_matrix_free(image);
} else {
ccv_dense_matrix_t* bordered = 0;
ccv_border(image, (ccv_matrix_t**)&bordered, 0, cascade->margin);
if (k > 0)
ccv_matrix_free(image);
ccv_scd(bordered, &scd, 0);
ccv_matrix_free(bordered);
}
ccv_dense_matrix_t* sat = 0;
ccv_sat(scd, &sat, 0, CCV_PADDING_ZERO);
assert(CCV_GET_CHANNEL(sat->type) == CCV_SCD_CHANNEL);
ccv_matrix_free(scd);
float* ptr = sat->data.f32;
for (y = 0; y < rows; y += params.step_through)
{
if (y >= sat->rows - cascade->size.height - 1)
break;
for (x = 0; x < cols; x += params.step_through)
{
if (x >= sat->cols - cascade->size.width - 1)
break;
int pass = 1;
float sum = 0;
for (p = 0; p < cascade->count; p++)
{
ccv_scd_stump_classifier_t* classifier = cascade->classifiers + p;
float v = 0;
for (q = 0; q < classifier->count; q++)
{
ccv_scd_stump_feature_t* feature = classifier->features + q;
#if defined(HAVE_SSE2)
_ccv_scd_run_feature_at_sse2(ptr + x * CCV_SCD_CHANNEL, sat->cols, feature, surf);
__m128 u0 = _mm_add_ps(_mm_mul_ps(surf[0], _mm_loadu_ps(feature->w)), _mm_mul_ps(surf[1], _mm_loadu_ps(feature->w + 4)));
__m128 u1 = _mm_add_ps(_mm_mul_ps(surf[2], _mm_loadu_ps(feature->w + 8)), _mm_mul_ps(surf[3], _mm_loadu_ps(feature->w + 12)));
__m128 u2 = _mm_add_ps(_mm_mul_ps(surf[4], _mm_loadu_ps(feature->w + 16)), _mm_mul_ps(surf[5], _mm_loadu_ps(feature->w + 20)));
__m128 u3 = _mm_add_ps(_mm_mul_ps(surf[6], _mm_loadu_ps(feature->w + 24)), _mm_mul_ps(surf[7], _mm_loadu_ps(feature->w + 28)));
ccv-src/lib/ccv_scd.c view on Meta::CPAN
pass = 0;
break;
}
sum = v / classifier->count;
}
if (pass)
{
ccv_comp_t comp;
comp.rect = ccv_rect((int)((x + 0.5) * (scale / up_ratio) * (1 << i) - 0.5),
(int)((y + 0.5) * (scale / up_ratio) * (1 << i) - 0.5),
(cascade->size.width - cascade->margin.left - cascade->margin.right) * (scale / up_ratio) * (1 << i),
(cascade->size.height - cascade->margin.top - cascade->margin.bottom) * (scale / up_ratio) * (1 << i));
comp.neighbors = 1;
comp.classification.id = j + 1;
comp.classification.confidence = sum + (cascade->count - 1);
ccv_array_push(seq[j], &comp);
}
}
ptr += sat->cols * CCV_SCD_CHANNEL * params.step_through;
}
ccv_matrix_free(sat);
scale *= scale_ratio;
}
}
}
ccv-src/serve/bbf.c view on Meta::CPAN
{
.property = "source",
.type = PARAM_TYPE_BODY,
.on_blob = uri_bbf_on_source_blob,
.offset = 0,
},
};
typedef struct {
ebb_buf desc;
ccv_bbf_classifier_cascade_t* face;
} bbf_context_t;
typedef struct {
param_parser_t param_parser;
bbf_context_t* context;
ccv_bbf_uri_param_t params;
ccv_bbf_classifier_cascade_t* cascade;
ebb_buf source;
} bbf_param_parser_t;
static void uri_bbf_param_parser_init(bbf_param_parser_t* parser)
{
param_parser_init(&parser->param_parser, param_map, sizeof(param_map) / sizeof(param_dispatch_t), &parser->params, parser);
parser->params.params = ccv_bbf_default_params;
parser->params.max_dimension = 0;
parser->cascade = 0;
parser->source.data = 0;
}
static void uri_bbf_on_model_string(void* context, char* string)
{
bbf_param_parser_t* parser = (bbf_param_parser_t*)context;
if (strcmp(string, "face") == 0)
parser->cascade = parser->context->face;
}
static void uri_bbf_on_source_blob(void* context, ebb_buf data)
{
bbf_param_parser_t* parser = (bbf_param_parser_t*)context;
parser->source = data;
}
void* uri_bbf_detect_objects_parse(const void* context, void* parsed, int resource_id, const char* buf, size_t len, uri_parse_state_t state, int header_index)
{
ccv-src/serve/bbf.c view on Meta::CPAN
case URI_MULTIPART_DATA:
param_parser_execute(&parser->param_parser, resource_id, buf, len, state, header_index);
break;
}
return parser;
}
void* uri_bbf_detect_objects_init(void)
{
bbf_context_t* context = (bbf_context_t*)malloc(sizeof(bbf_context_t));
context->face = ccv_bbf_read_classifier_cascade("../samples/face");
assert(context->face);
assert(param_parser_map_alphabet(param_map, sizeof(param_map) / sizeof(param_dispatch_t)) == 0);
context->desc = param_parser_map_http_body(param_map, sizeof(param_map) / sizeof(param_dispatch_t),
"[{"
"\"x\":\"number\","
"\"y\":\"number\","
"\"width\":\"number\","
"\"height\":\"number\","
"\"confidence\":\"number\""
"}]");
return context;
}
void uri_bbf_detect_objects_destroy(void* context)
{
bbf_context_t* bbf_context = (bbf_context_t*)context;
ccv_bbf_classifier_cascade_free(bbf_context->face);
free(bbf_context->desc.data);
free(bbf_context);
}
int uri_bbf_detect_objects_intro(const void* context, const void* parsed, ebb_buf* buf)
{
bbf_context_t* bbf_context = (bbf_context_t*)context;
buf->data = bbf_context->desc.data;
buf->len = bbf_context->desc.len;
return 0;
ccv-src/serve/bbf.c view on Meta::CPAN
{
if (!parsed)
return -1;
bbf_param_parser_t* parser = (bbf_param_parser_t*)parsed;
param_parser_terminate(&parser->param_parser);
if (parser->source.data == 0)
{
free(parser);
return -1;
}
if (parser->cascade == 0)
{
free(parser->source.data);
free(parser);
return -1;
}
ccv_dense_matrix_t* image = 0;
ccv_read(parser->source.data, &image, CCV_IO_ANY_STREAM | CCV_IO_GRAY, parser->source.written);
free(parser->source.data);
if (image == 0)
{
free(parser);
return -1;
}
ccv_dense_matrix_t* resize = 0;
if (parser->params.max_dimension > 0 && (image->rows > parser->params.max_dimension || image->cols > parser->params.max_dimension))
{
ccv_resample(image, &resize, 0, ccv_min(parser->params.max_dimension, (int)(image->rows * (float)parser->params.max_dimension / image->cols + 0.5)), ccv_min(parser->params.max_dimension, (int)(image->cols * (float)parser->params.max_dimension / ima...
ccv_matrix_free(image);
} else
resize = image;
ccv_array_t* seq = ccv_bbf_detect_objects(resize, &parser->cascade, 1, parser->params.params);
float width = resize->cols, height = resize->rows;
ccv_matrix_free(resize);
if (seq == 0)
{
free(parser);
return -1;
}
if (seq->rnum > 0)
{
int i;
ccv-src/serve/icf.c view on Meta::CPAN
},
{
.property = "step_through",
.type = PARAM_TYPE_INT,
.offset = offsetof(ccv_icf_uri_param_t, params) + offsetof(ccv_icf_param_t, step_through),
},
};
typedef struct {
ebb_buf desc;
ccv_icf_classifier_cascade_t* pedestrian;
} icf_context_t;
typedef struct {
param_parser_t param_parser;
icf_context_t* context;
ccv_icf_uri_param_t params;
ccv_icf_classifier_cascade_t* cascade;
ebb_buf source;
} icf_param_parser_t;
static void uri_icf_param_parser_init(icf_param_parser_t* parser)
{
param_parser_init(&parser->param_parser, param_map, sizeof(param_map) / sizeof(param_dispatch_t), &parser->params, parser);
parser->params.params = ccv_icf_default_params;
parser->params.max_dimension = 0;
parser->cascade = 0;
parser->source.data = 0;
}
static void uri_icf_on_model_string(void* context, char* string)
{
icf_param_parser_t* parser = (icf_param_parser_t*)context;
if (strcmp(string, "pedestrian") == 0)
parser->cascade = parser->context->pedestrian;
}
static void uri_icf_on_source_blob(void* context, ebb_buf data)
{
icf_param_parser_t* parser = (icf_param_parser_t*)context;
parser->source = data;
}
void* uri_icf_detect_objects_parse(const void* context, void* parsed, int resource_id, const char* buf, size_t len, uri_parse_state_t state, int header_index)
{
ccv-src/serve/icf.c view on Meta::CPAN
case URI_MULTIPART_DATA:
param_parser_execute(&parser->param_parser, resource_id, buf, len, state, header_index);
break;
}
return parser;
}
void* uri_icf_detect_objects_init(void)
{
icf_context_t* context = (icf_context_t*)malloc(sizeof(icf_context_t));
context->pedestrian = ccv_icf_read_classifier_cascade("../samples/pedestrian.icf");
assert(context->pedestrian);
assert(param_parser_map_alphabet(param_map, sizeof(param_map) / sizeof(param_dispatch_t)) == 0);
context->desc = param_parser_map_http_body(param_map, sizeof(param_map) / sizeof(param_dispatch_t),
"[{"
"\"x\":\"number\","
"\"y\":\"number\","
"\"width\":\"number\","
"\"height\":\"number\","
"\"confidence\":\"number\""
"}]");
return context;
}
void uri_icf_detect_objects_destroy(void* context)
{
icf_context_t* icf_context = (icf_context_t*)context;
ccv_icf_classifier_cascade_free(icf_context->pedestrian);
free(icf_context->desc.data);
free(icf_context);
}
int uri_icf_detect_objects_intro(const void* context, const void* parsed, ebb_buf* buf)
{
icf_context_t* icf_context = (icf_context_t*)context;
buf->data = icf_context->desc.data;
buf->len = icf_context->desc.len;
return 0;
ccv-src/serve/icf.c view on Meta::CPAN
{
if (!parsed)
return -1;
icf_param_parser_t* parser = (icf_param_parser_t*)parsed;
param_parser_terminate(&parser->param_parser);
if (parser->source.data == 0)
{
free(parser);
return -1;
}
if (parser->cascade == 0)
{
free(parser->source.data);
free(parser);
return -1;
}
ccv_dense_matrix_t* image = 0;
ccv_read(parser->source.data, &image, CCV_IO_ANY_STREAM | CCV_IO_RGB_COLOR, parser->source.written);
free(parser->source.data);
if (image == 0)
{
free(parser);
return -1;
}
ccv_dense_matrix_t* resize = 0;
if (parser->params.max_dimension > 0 && (image->rows > parser->params.max_dimension || image->cols > parser->params.max_dimension))
{
ccv_resample(image, &resize, 0, ccv_min(parser->params.max_dimension, (int)(image->rows * (float)parser->params.max_dimension / image->cols + 0.5)), ccv_min(parser->params.max_dimension, (int)(image->cols * (float)parser->params.max_dimension / ima...
ccv_matrix_free(image);
} else
resize = image;
ccv_array_t* seq = ccv_icf_detect_objects(resize, &parser->cascade, 1, parser->params.params);
float width = resize->cols, height = resize->rows;
ccv_matrix_free(resize);
if (seq == 0)
{
free(parser);
return -1;
}
if (seq->rnum > 0)
{
int i;
ccv-src/serve/scd.c view on Meta::CPAN
{
.property = "source",
.type = PARAM_TYPE_BODY,
.on_blob = uri_scd_on_source_blob,
.offset = 0,
},
};
typedef struct {
ebb_buf desc;
ccv_scd_classifier_cascade_t* face;
} scd_context_t;
typedef struct {
param_parser_t param_parser;
scd_context_t* context;
ccv_scd_uri_param_t params;
ccv_scd_classifier_cascade_t* cascade;
ebb_buf source;
} scd_param_parser_t;
static void uri_scd_param_parser_init(scd_param_parser_t* parser)
{
param_parser_init(&parser->param_parser, param_map, sizeof(param_map) / sizeof(param_dispatch_t), &parser->params, parser);
parser->params.params = ccv_scd_default_params;
parser->params.max_dimension = 0;
parser->cascade = 0;
parser->source.data = 0;
}
static void uri_scd_on_model_string(void* context, char* string)
{
scd_param_parser_t* parser = (scd_param_parser_t*)context;
if (strcmp(string, "face") == 0)
parser->cascade = parser->context->face;
}
static void uri_scd_on_source_blob(void* context, ebb_buf data)
{
scd_param_parser_t* parser = (scd_param_parser_t*)context;
parser->source = data;
}
void* uri_scd_detect_objects_parse(const void* context, void* parsed, int resource_id, const char* buf, size_t len, uri_parse_state_t state, int header_index)
{
ccv-src/serve/scd.c view on Meta::CPAN
case URI_MULTIPART_DATA:
param_parser_execute(&parser->param_parser, resource_id, buf, len, state, header_index);
break;
}
return parser;
}
void* uri_scd_detect_objects_init(void)
{
scd_context_t* context = (scd_context_t*)malloc(sizeof(scd_context_t));
context->face = ccv_scd_classifier_cascade_read("../samples/face.sqlite3");
assert(context->face);
assert(param_parser_map_alphabet(param_map, sizeof(param_map) / sizeof(param_dispatch_t)) == 0);
context->desc = param_parser_map_http_body(param_map, sizeof(param_map) / sizeof(param_dispatch_t),
"[{"
"\"x\":\"number\","
"\"y\":\"number\","
"\"width\":\"number\","
"\"height\":\"number\","
"\"confidence\":\"number\""
"}]");
return context;
}
void uri_scd_detect_objects_destroy(void* context)
{
scd_context_t* scd_context = (scd_context_t*)context;
ccv_scd_classifier_cascade_free(scd_context->face);
free(scd_context->desc.data);
free(scd_context);
}
int uri_scd_detect_objects_intro(const void* context, const void* parsed, ebb_buf* buf)
{
scd_context_t* scd_context = (scd_context_t*)context;
buf->data = scd_context->desc.data;
buf->len = scd_context->desc.len;
return 0;
ccv-src/serve/scd.c view on Meta::CPAN
{
if (!parsed)
return -1;
scd_param_parser_t* parser = (scd_param_parser_t*)parsed;
param_parser_terminate(&parser->param_parser);
if (parser->source.data == 0)
{
free(parser);
return -1;
}
if (parser->cascade == 0)
{
free(parser->source.data);
free(parser);
return -1;
}
ccv_dense_matrix_t* image = 0;
ccv_read(parser->source.data, &image, CCV_IO_ANY_STREAM | CCV_IO_GRAY, parser->source.written);
free(parser->source.data);
if (image == 0)
{
free(parser);
return -1;
}
ccv_dense_matrix_t* resize = 0;
if (parser->params.max_dimension > 0 && (image->rows > parser->params.max_dimension || image->cols > parser->params.max_dimension))
{
ccv_resample(image, &resize, 0, ccv_min(parser->params.max_dimension, (int)(image->rows * (float)parser->params.max_dimension / image->cols + 0.5)), ccv_min(parser->params.max_dimension, (int)(image->cols * (float)parser->params.max_dimension / ima...
ccv_matrix_free(image);
} else
resize = image;
ccv_array_t* seq = ccv_scd_detect_objects(resize, &parser->cascade, 1, parser->params.params);
float width = resize->cols, height = resize->rows;
ccv_matrix_free(resize);
if (seq == 0)
{
free(parser);
return -1;
}
if (seq->rnum > 0)
{
int i;
TYPEMAP
ccv_sift_param_t* T_PTROBJ
ccv_array_t* T_PTROBJ
ccv_bbf_classifier_cascade_t* T_PTROBJ