转摘自http://www.cnblogs.com/denny402/p/5032839.html

opencv3中的ml类与opencv2中发生了变化,下面列举opencv3的机器学习类方法实例:

用途是opencv自带的ocr样本的分类功能,其中神经网络和adaboost训练速度很慢,效果还是knn的最好;

 #include <opencv2/opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
using namespace cv::ml; // 读取文件数据
bool read_num_class_data(const string& filename, int var_count, Mat* _data, Mat* _responses)
{
const int M = ;
char buf[M + ]; Mat el_ptr(, var_count, CV_32F);
int i;
vector<int> responses; _data->release();
_responses->release();
FILE *f;
fopen_s(&f, filename.c_str(), "rt");
if (!f)
{
cout << "Could not read the database " << filename << endl;
return false;
} for (;;)
{
char* ptr;
if (!fgets(buf, M, f) || !strchr(buf, ','))
break;
responses.push_back((int)buf[]);
ptr = buf + ;
for (i = ; i < var_count; i++)
{
int n = ;
sscanf_s(ptr, "%f%n", &el_ptr.at<float>(i), &n);
ptr += n + ;
}
if (i < var_count)
break;
_data->push_back(el_ptr);
}
fclose(f);
Mat(responses).copyTo(*_responses);
return true;
} //准备训练数据
Ptr<TrainData> prepare_train_data(const Mat& data, const Mat& responses, int ntrain_samples)
{
Mat sample_idx = Mat::zeros(, data.rows, CV_8U);
Mat train_samples = sample_idx.colRange(, ntrain_samples);
train_samples.setTo(Scalar::all()); int nvars = data.cols;
Mat var_type(nvars + , , CV_8U);
var_type.setTo(Scalar::all(VAR_ORDERED));
var_type.at<uchar>(nvars) = VAR_CATEGORICAL; return TrainData::create(data, ROW_SAMPLE, responses,
noArray(), sample_idx, noArray(), var_type);
} //设置迭代条件
inline TermCriteria TC(int iters, double eps)
{
return TermCriteria(TermCriteria::MAX_ITER + (eps > ? TermCriteria::EPS : ), iters, eps);
} //分类预测
void test_and_save_classifier(const Ptr<StatModel>& model, const Mat& data, const Mat& responses,
int ntrain_samples, int rdelta)
{
int i, nsamples_all = data.rows;
double train_hr = , test_hr = ; // compute prediction error on train and test data
for (i = ; i < nsamples_all; i++)
{
Mat sample = data.row(i); float r = model->predict(sample);
r = std::abs(r + rdelta - responses.at<int>(i)) <= FLT_EPSILON ? .f : .f; if (i < ntrain_samples)
train_hr += r;
else
test_hr += r;
} test_hr /= nsamples_all - ntrain_samples;
train_hr = ntrain_samples > ? train_hr / ntrain_samples : .; printf("Recognition rate: train = %.1f%%, test = %.1f%%\n",
train_hr*., test_hr*.);
} //随机树分类
bool build_rtrees_classifier(const string& data_filename)
{
Mat data;
Mat responses;
read_num_class_data(data_filename, , &data, &responses); int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8); Ptr<RTrees> model;
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
model = RTrees::create();
model->setMaxDepth();
model->setMinSampleCount();
model->setRegressionAccuracy();
model->setUseSurrogates(false);
model->setMaxCategories();
model->setPriors(Mat());
model->setCalculateVarImportance(true);
model->setActiveVarCount();
model->setTermCriteria(TC(, 0.01f));
model->train(tdata);
test_and_save_classifier(model, data, responses, ntrain_samples, );
cout << "Number of trees: " << model->getRoots().size() << endl; // Print variable importance
Mat var_importance = model->getVarImportance();
if (!var_importance.empty())
{
double rt_imp_sum = sum(var_importance)[];
printf("var#\timportance (in %%):\n");
int i, n = (int)var_importance.total();
for (i = ; i < n; i++)
printf("%-2d\t%-4.1f\n", i, .f*var_importance.at<float>(i) / rt_imp_sum);
} return true;
} //adaboost分类
bool build_boost_classifier(const string& data_filename)
{
const int class_count = ;
Mat data;
Mat responses;
Mat weak_responses; read_num_class_data(data_filename, , &data, &responses);
int i, j, k;
Ptr<Boost> model; int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.5);
int var_count = data.cols; Mat new_data(ntrain_samples*class_count, var_count + , CV_32F);
Mat new_responses(ntrain_samples*class_count, , CV_32S); for (i = ; i < ntrain_samples; i++)
{
const float* data_row = data.ptr<float>(i);
for (j = ; j < class_count; j++)
{
float* new_data_row = (float*)new_data.ptr<float>(i*class_count + j);
memcpy(new_data_row, data_row, var_count * sizeof(data_row[]));
new_data_row[var_count] = (float)j;
new_responses.at<int>(i*class_count + j) = responses.at<int>(i) == j + 'A';
}
} Mat var_type(, var_count + , CV_8U);
var_type.setTo(Scalar::all(VAR_ORDERED));
var_type.at<uchar>(var_count) = var_type.at<uchar>(var_count + ) = VAR_CATEGORICAL; Ptr<TrainData> tdata = TrainData::create(new_data, ROW_SAMPLE, new_responses,
noArray(), noArray(), noArray(), var_type);
vector<double> priors();
priors[] = ;
priors[] = ; model = Boost::create();
model->setBoostType(Boost::GENTLE);
model->setWeakCount();
model->setWeightTrimRate(0.95);
model->setMaxDepth();
model->setUseSurrogates(false);
model->setPriors(Mat(priors));
model->train(tdata);
Mat temp_sample(, var_count + , CV_32F);
float* tptr = temp_sample.ptr<float>(); // compute prediction error on train and test data
double train_hr = , test_hr = ;
for (i = ; i < nsamples_all; i++)
{
int best_class = ;
double max_sum = -DBL_MAX;
const float* ptr = data.ptr<float>(i);
for (k = ; k < var_count; k++)
tptr[k] = ptr[k]; for (j = ; j < class_count; j++)
{
tptr[var_count] = (float)j;
float s = model->predict(temp_sample, noArray(), StatModel::RAW_OUTPUT);
if (max_sum < s)
{
max_sum = s;
best_class = j + 'A';
}
} double r = std::abs(best_class - responses.at<int>(i)) < FLT_EPSILON ? : ;
if (i < ntrain_samples)
train_hr += r;
else
test_hr += r;
} test_hr /= nsamples_all - ntrain_samples;
train_hr = ntrain_samples > ? train_hr / ntrain_samples : .;
printf("Recognition rate: train = %.1f%%, test = %.1f%%\n",
train_hr*., test_hr*.); cout << "Number of trees: " << model->getRoots().size() << endl;
return true;
} //多层感知机分类(ANN)
bool build_mlp_classifier(const string& data_filename)
{
const int class_count = ;
Mat data;
Mat responses; read_num_class_data(data_filename, , &data, &responses);
Ptr<ANN_MLP> model; int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8);
Mat train_data = data.rowRange(, ntrain_samples);
Mat train_responses = Mat::zeros(ntrain_samples, class_count, CV_32F); // 1. unroll the responses
cout << "Unrolling the responses...\n";
for (int i = ; i < ntrain_samples; i++)
{
int cls_label = responses.at<int>(i) - 'A';
train_responses.at<float>(i, cls_label) = .f;
} // 2. train classifier
int layer_sz[] = { data.cols, , , class_count };
int nlayers = (int)(sizeof(layer_sz) / sizeof(layer_sz[]));
Mat layer_sizes(, nlayers, CV_32S, layer_sz); #if 1
int method = ANN_MLP::BACKPROP;
double method_param = 0.001;
int max_iter = ;
#else
int method = ANN_MLP::RPROP;
double method_param = 0.1;
int max_iter = ;
#endif Ptr<TrainData> tdata = TrainData::create(train_data, ROW_SAMPLE, train_responses);
model = ANN_MLP::create();
model->setLayerSizes(layer_sizes);
model->setActivationFunction(ANN_MLP::SIGMOID_SYM, , );
model->setTermCriteria(TC(max_iter, ));
model->setTrainMethod(method, method_param);
model->train(tdata);
return true;
} //K最近邻分类
bool build_knearest_classifier(const string& data_filename, int K)
{
Mat data;
Mat responses;
read_num_class_data(data_filename, , &data, &responses);
int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8); Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
Ptr<KNearest> model = KNearest::create();
model->setDefaultK(K);
model->setIsClassifier(true);
model->train(tdata); test_and_save_classifier(model, data, responses, ntrain_samples, );
return true;
} //贝叶斯分类
bool build_nbayes_classifier(const string& data_filename)
{
Mat data;
Mat responses;
read_num_class_data(data_filename, , &data, &responses); int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8); Ptr<NormalBayesClassifier> model;
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
model = NormalBayesClassifier::create();
model->train(tdata); test_and_save_classifier(model, data, responses, ntrain_samples, );
return true;
} //svm分类
bool build_svm_classifier(const string& data_filename)
{
Mat data;
Mat responses;
read_num_class_data(data_filename, , &data, &responses); int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8); Ptr<SVM> model;
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
model = SVM::create();
model->setType(SVM::C_SVC);
model->setKernel(SVM::LINEAR);
model->setC();
model->train(tdata); test_and_save_classifier(model, data, responses, ntrain_samples, );
return true;
} int main()
{
string data_filename = "D:\\Program Files\\opencv\\sources\\samples\\data\\letter-recognition.data"; //字母数据 cout << "svm分类:" << endl;
build_svm_classifier(data_filename); cout << "贝叶斯分类:" << endl;
build_nbayes_classifier(data_filename); cout << "K最近邻分类:" << endl;
build_knearest_classifier(data_filename, ); cout << "随机树分类:" << endl;
build_rtrees_classifier(data_filename); cout << "adaboost分类:" << endl;
build_boost_classifier(data_filename); cout << "ANN(多层感知机)分类:" << endl;
build_mlp_classifier(data_filename); system("pause");
return ;
}

OpenCV3 SVM ANN Adaboost KNN 随机森林等机器学习方法对OCR分类的更多相关文章

  1. paper 130:MatLab分类器大全(svm,knn,随机森林等)

    train_data是训练特征数据, train_label是分类标签.Predict_label是预测的标签.MatLab训练数据, 得到语义标签向量 Scores(概率输出).1.逻辑回归(多项式 ...

  2. 美团店铺评价语言处理以及分类(tfidf,SVM,决策树,随机森林,Knn,ensemble)

    第一篇 数据清洗与分析部分 第二篇 可视化部分, 第三篇 朴素贝叶斯文本分类 支持向量机分类 支持向量机 网格搜索 临近法 决策树 随机森林 bagging方法 import pandas as pd ...

  3. MatLab2012b/MatLab2013b 分类器大全(svm,knn,随机森林等)

    train_data是训练特征数据, train_label是分类标签.Predict_label是预测的标签.MatLab训练数据, 得到语义标签向量 Scores(概率输出). 1.逻辑回归(多项 ...

  4. Adaboost和随机森林

    在集成学习中,主要分为bagging算法和boosting算法.随机森林属于集成学习(Ensemble Learning)中的bagging算法. Bagging和Boosting的概念与区别该部分主 ...

  5. 机器学习第5周--炼数成金-----决策树,组合提升算法,bagging和adaboost,随机森林。

    决策树decision tree 什么是决策树输入:学习集输出:分类觃则(决策树) 决策树算法概述 70年代后期至80年代初期,Quinlan开发了ID3算法(迭代的二分器)Quinlan改迚了ID3 ...

  6. [Python] 波士顿房价的7种模型(线性拟合、二次多项式、Ridge、Lasso、SVM、决策树、随机森林)的训练效果对比

    目录 1. 载入数据 列解释Columns: 2. 数据分析 2.1 预处理 2.2 可视化 3. 训练模型 3.1 线性拟合 3.2 多项式回归(二次) 3.3 脊回归(Ridge Regressi ...

  7. 100天搞定机器学习|Day33-34 随机森林

    前情回顾 机器学习100天|Day1数据预处理 100天搞定机器学习|Day2简单线性回归分析 100天搞定机器学习|Day3多元线性回归 100天搞定机器学习|Day4-6 逻辑回归 100天搞定机 ...

  8. R语言︱决策树族——随机森林算法

    每每以为攀得众山小,可.每每又切实来到起点,大牛们,缓缓脚步来俺笔记葩分享一下吧,please~ --------------------------- 笔者寄语:有一篇<有监督学习选择深度学习 ...

  9. sklearn_随机森林random forest原理_乳腺癌分类器建模(推荐AAA)

     sklearn实战-乳腺癌细胞数据挖掘(博主亲自录制视频) https://study.163.com/course/introduction.htm?courseId=1005269003& ...

随机推荐

  1. Java爬取B站弹幕 —— Python云图Wordcloud生成弹幕词云

    一 . Java爬取B站弹幕 弹幕的存储位置 如何通过B站视频AV号找到弹幕对应的xml文件号 首先爬取视频网页,将对应视频网页源码获得 就可以找到该视频的av号aid=8678034 还有弹幕序号, ...

  2. Netbeans and Remote Host for C/C++ Developing

    Netbeans and Remote Host for C/C++ Developing 很久以来,因为我不适应在 Linux 下使用 Vim, GCC, GDB 开发 C/C++ 程序,所以我一直 ...

  3. 剑指offer(16)合并两个排序的链表

    题目描述 输入两个单调递增的链表,输出两个链表合成后的链表,当然我们需要合成后的链表满足单调不减规则. 题目分析 重点抓住这两个链表都是单挑递增的,因此我们只需要不断地比较他们的头结点就行,明显这是个 ...

  4. 【JS】Js对json的转换

    将json字符串转换为json对象的方法.在数据传输过程中,json是以文本,即字符串的形式传递的,而JS操作的是JSON对象,所以,JSON对象和JSON字符串之间的相互转换是关键 例如: JSON ...

  5. codeforce gym/100495/problem/K—Wolf and sheep 两圆求相交面积 与 gym/100495/problem/E—Simple sequence思路简述

    之前几乎没写过什么这种几何的计算题.在众多大佬的博客下终于记起来了当时的公式.嘚赶快补计算几何和概率论的坑了... 这题的要求,在对两圆相交的板子略做修改后,很容易实现.这里直接给出代码.重点的部分有 ...

  6. npm init 命令生成package.json文件

    通过npm init 命令可以生成一个package.json文件.这个文件是 整个项目的描述文件.通过这个文件可以清楚的知道项目的包依赖关系,版本,作者等信息.每个NPM包都有自己的package. ...

  7. Redis实现文章投票功能

    Redis的具体操作这里就不说了,说一下需求和设计思路. 需求:自己实现一个文章投票的功能1.能够按照时间分页倒叙查看文章信息2.能够给文章投票,一个用户给一篇文章只能投票一次3.需要记录分值.每次投 ...

  8. 综述 - 染色质可及性与调控表观基因组 | Chromatin accessibility and the regulatory epigenome

    RNA-seq这个工具该什么时候用?ATAC-seq该什么时候用?有相当一部分项目设计不行,导致花大钱测了一些没有意义的数据. 还是在中心法则这个框架下来解释,这是生物信息的核心.打开华大科技服务官网 ...

  9. 音乐推荐与Audioscrobbler数据集

    1. Audioscrobbler数据集 数据下载地址: http://www.iro.umontreal.ca/~lisa/datasets/profiledata_06-May-2005.tar. ...

  10. 『TensorFlow』slim模块常用API

    辅助函数 slim.arg_scope() slim.arg_scope可以定义一些函数的默认参数值,在scope内,我们重复用到这些函数时可以不用把所有参数都写一遍,注意它没有tf.variable ...