OpenCV3 SVM ANN Adaboost KNN 随机森林等机器学习方法对OCR分类
转摘自http://www.cnblogs.com/denny402/p/5032839.html
opencv3中的ml类与opencv2中发生了变化,下面列举opencv3的机器学习类方法实例:
用途是opencv自带的ocr样本的分类功能,其中神经网络和adaboost训练速度很慢,效果还是knn的最好;
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
using namespace cv::ml; // 读取文件数据
bool read_num_class_data(const string& filename, int var_count, Mat* _data, Mat* _responses)
{
const int M = ;
char buf[M + ]; Mat el_ptr(, var_count, CV_32F);
int i;
vector<int> responses; _data->release();
_responses->release();
FILE *f;
fopen_s(&f, filename.c_str(), "rt");
if (!f)
{
cout << "Could not read the database " << filename << endl;
return false;
} for (;;)
{
char* ptr;
if (!fgets(buf, M, f) || !strchr(buf, ','))
break;
responses.push_back((int)buf[]);
ptr = buf + ;
for (i = ; i < var_count; i++)
{
int n = ;
sscanf_s(ptr, "%f%n", &el_ptr.at<float>(i), &n);
ptr += n + ;
}
if (i < var_count)
break;
_data->push_back(el_ptr);
}
fclose(f);
Mat(responses).copyTo(*_responses);
return true;
} //准备训练数据
Ptr<TrainData> prepare_train_data(const Mat& data, const Mat& responses, int ntrain_samples)
{
Mat sample_idx = Mat::zeros(, data.rows, CV_8U);
Mat train_samples = sample_idx.colRange(, ntrain_samples);
train_samples.setTo(Scalar::all()); int nvars = data.cols;
Mat var_type(nvars + , , CV_8U);
var_type.setTo(Scalar::all(VAR_ORDERED));
var_type.at<uchar>(nvars) = VAR_CATEGORICAL; return TrainData::create(data, ROW_SAMPLE, responses,
noArray(), sample_idx, noArray(), var_type);
} //设置迭代条件
inline TermCriteria TC(int iters, double eps)
{
return TermCriteria(TermCriteria::MAX_ITER + (eps > ? TermCriteria::EPS : ), iters, eps);
} //分类预测
void test_and_save_classifier(const Ptr<StatModel>& model, const Mat& data, const Mat& responses,
int ntrain_samples, int rdelta)
{
int i, nsamples_all = data.rows;
double train_hr = , test_hr = ; // compute prediction error on train and test data
for (i = ; i < nsamples_all; i++)
{
Mat sample = data.row(i); float r = model->predict(sample);
r = std::abs(r + rdelta - responses.at<int>(i)) <= FLT_EPSILON ? .f : .f; if (i < ntrain_samples)
train_hr += r;
else
test_hr += r;
} test_hr /= nsamples_all - ntrain_samples;
train_hr = ntrain_samples > ? train_hr / ntrain_samples : .; printf("Recognition rate: train = %.1f%%, test = %.1f%%\n",
train_hr*., test_hr*.);
} //随机树分类
bool build_rtrees_classifier(const string& data_filename)
{
Mat data;
Mat responses;
read_num_class_data(data_filename, , &data, &responses); int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8); Ptr<RTrees> model;
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
model = RTrees::create();
model->setMaxDepth();
model->setMinSampleCount();
model->setRegressionAccuracy();
model->setUseSurrogates(false);
model->setMaxCategories();
model->setPriors(Mat());
model->setCalculateVarImportance(true);
model->setActiveVarCount();
model->setTermCriteria(TC(, 0.01f));
model->train(tdata);
test_and_save_classifier(model, data, responses, ntrain_samples, );
cout << "Number of trees: " << model->getRoots().size() << endl; // Print variable importance
Mat var_importance = model->getVarImportance();
if (!var_importance.empty())
{
double rt_imp_sum = sum(var_importance)[];
printf("var#\timportance (in %%):\n");
int i, n = (int)var_importance.total();
for (i = ; i < n; i++)
printf("%-2d\t%-4.1f\n", i, .f*var_importance.at<float>(i) / rt_imp_sum);
} return true;
} //adaboost分类
bool build_boost_classifier(const string& data_filename)
{
const int class_count = ;
Mat data;
Mat responses;
Mat weak_responses; read_num_class_data(data_filename, , &data, &responses);
int i, j, k;
Ptr<Boost> model; int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.5);
int var_count = data.cols; Mat new_data(ntrain_samples*class_count, var_count + , CV_32F);
Mat new_responses(ntrain_samples*class_count, , CV_32S); for (i = ; i < ntrain_samples; i++)
{
const float* data_row = data.ptr<float>(i);
for (j = ; j < class_count; j++)
{
float* new_data_row = (float*)new_data.ptr<float>(i*class_count + j);
memcpy(new_data_row, data_row, var_count * sizeof(data_row[]));
new_data_row[var_count] = (float)j;
new_responses.at<int>(i*class_count + j) = responses.at<int>(i) == j + 'A';
}
} Mat var_type(, var_count + , CV_8U);
var_type.setTo(Scalar::all(VAR_ORDERED));
var_type.at<uchar>(var_count) = var_type.at<uchar>(var_count + ) = VAR_CATEGORICAL; Ptr<TrainData> tdata = TrainData::create(new_data, ROW_SAMPLE, new_responses,
noArray(), noArray(), noArray(), var_type);
vector<double> priors();
priors[] = ;
priors[] = ; model = Boost::create();
model->setBoostType(Boost::GENTLE);
model->setWeakCount();
model->setWeightTrimRate(0.95);
model->setMaxDepth();
model->setUseSurrogates(false);
model->setPriors(Mat(priors));
model->train(tdata);
Mat temp_sample(, var_count + , CV_32F);
float* tptr = temp_sample.ptr<float>(); // compute prediction error on train and test data
double train_hr = , test_hr = ;
for (i = ; i < nsamples_all; i++)
{
int best_class = ;
double max_sum = -DBL_MAX;
const float* ptr = data.ptr<float>(i);
for (k = ; k < var_count; k++)
tptr[k] = ptr[k]; for (j = ; j < class_count; j++)
{
tptr[var_count] = (float)j;
float s = model->predict(temp_sample, noArray(), StatModel::RAW_OUTPUT);
if (max_sum < s)
{
max_sum = s;
best_class = j + 'A';
}
} double r = std::abs(best_class - responses.at<int>(i)) < FLT_EPSILON ? : ;
if (i < ntrain_samples)
train_hr += r;
else
test_hr += r;
} test_hr /= nsamples_all - ntrain_samples;
train_hr = ntrain_samples > ? train_hr / ntrain_samples : .;
printf("Recognition rate: train = %.1f%%, test = %.1f%%\n",
train_hr*., test_hr*.); cout << "Number of trees: " << model->getRoots().size() << endl;
return true;
} //多层感知机分类(ANN)
bool build_mlp_classifier(const string& data_filename)
{
const int class_count = ;
Mat data;
Mat responses; read_num_class_data(data_filename, , &data, &responses);
Ptr<ANN_MLP> model; int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8);
Mat train_data = data.rowRange(, ntrain_samples);
Mat train_responses = Mat::zeros(ntrain_samples, class_count, CV_32F); // 1. unroll the responses
cout << "Unrolling the responses...\n";
for (int i = ; i < ntrain_samples; i++)
{
int cls_label = responses.at<int>(i) - 'A';
train_responses.at<float>(i, cls_label) = .f;
} // 2. train classifier
int layer_sz[] = { data.cols, , , class_count };
int nlayers = (int)(sizeof(layer_sz) / sizeof(layer_sz[]));
Mat layer_sizes(, nlayers, CV_32S, layer_sz); #if 1
int method = ANN_MLP::BACKPROP;
double method_param = 0.001;
int max_iter = ;
#else
int method = ANN_MLP::RPROP;
double method_param = 0.1;
int max_iter = ;
#endif Ptr<TrainData> tdata = TrainData::create(train_data, ROW_SAMPLE, train_responses);
model = ANN_MLP::create();
model->setLayerSizes(layer_sizes);
model->setActivationFunction(ANN_MLP::SIGMOID_SYM, , );
model->setTermCriteria(TC(max_iter, ));
model->setTrainMethod(method, method_param);
model->train(tdata);
return true;
} //K最近邻分类
bool build_knearest_classifier(const string& data_filename, int K)
{
Mat data;
Mat responses;
read_num_class_data(data_filename, , &data, &responses);
int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8); Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
Ptr<KNearest> model = KNearest::create();
model->setDefaultK(K);
model->setIsClassifier(true);
model->train(tdata); test_and_save_classifier(model, data, responses, ntrain_samples, );
return true;
} //贝叶斯分类
bool build_nbayes_classifier(const string& data_filename)
{
Mat data;
Mat responses;
read_num_class_data(data_filename, , &data, &responses); int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8); Ptr<NormalBayesClassifier> model;
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
model = NormalBayesClassifier::create();
model->train(tdata); test_and_save_classifier(model, data, responses, ntrain_samples, );
return true;
} //svm分类
bool build_svm_classifier(const string& data_filename)
{
Mat data;
Mat responses;
read_num_class_data(data_filename, , &data, &responses); int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8); Ptr<SVM> model;
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
model = SVM::create();
model->setType(SVM::C_SVC);
model->setKernel(SVM::LINEAR);
model->setC();
model->train(tdata); test_and_save_classifier(model, data, responses, ntrain_samples, );
return true;
} int main()
{
string data_filename = "D:\\Program Files\\opencv\\sources\\samples\\data\\letter-recognition.data"; //字母数据 cout << "svm分类:" << endl;
build_svm_classifier(data_filename); cout << "贝叶斯分类:" << endl;
build_nbayes_classifier(data_filename); cout << "K最近邻分类:" << endl;
build_knearest_classifier(data_filename, ); cout << "随机树分类:" << endl;
build_rtrees_classifier(data_filename); cout << "adaboost分类:" << endl;
build_boost_classifier(data_filename); cout << "ANN(多层感知机)分类:" << endl;
build_mlp_classifier(data_filename); system("pause");
return ;
}
OpenCV3 SVM ANN Adaboost KNN 随机森林等机器学习方法对OCR分类的更多相关文章
- paper 130:MatLab分类器大全(svm,knn,随机森林等)
train_data是训练特征数据, train_label是分类标签.Predict_label是预测的标签.MatLab训练数据, 得到语义标签向量 Scores(概率输出).1.逻辑回归(多项式 ...
- 美团店铺评价语言处理以及分类(tfidf,SVM,决策树,随机森林,Knn,ensemble)
第一篇 数据清洗与分析部分 第二篇 可视化部分, 第三篇 朴素贝叶斯文本分类 支持向量机分类 支持向量机 网格搜索 临近法 决策树 随机森林 bagging方法 import pandas as pd ...
- MatLab2012b/MatLab2013b 分类器大全(svm,knn,随机森林等)
train_data是训练特征数据, train_label是分类标签.Predict_label是预测的标签.MatLab训练数据, 得到语义标签向量 Scores(概率输出). 1.逻辑回归(多项 ...
- Adaboost和随机森林
在集成学习中,主要分为bagging算法和boosting算法.随机森林属于集成学习(Ensemble Learning)中的bagging算法. Bagging和Boosting的概念与区别该部分主 ...
- 机器学习第5周--炼数成金-----决策树,组合提升算法,bagging和adaboost,随机森林。
决策树decision tree 什么是决策树输入:学习集输出:分类觃则(决策树) 决策树算法概述 70年代后期至80年代初期,Quinlan开发了ID3算法(迭代的二分器)Quinlan改迚了ID3 ...
- [Python] 波士顿房价的7种模型(线性拟合、二次多项式、Ridge、Lasso、SVM、决策树、随机森林)的训练效果对比
目录 1. 载入数据 列解释Columns: 2. 数据分析 2.1 预处理 2.2 可视化 3. 训练模型 3.1 线性拟合 3.2 多项式回归(二次) 3.3 脊回归(Ridge Regressi ...
- 100天搞定机器学习|Day33-34 随机森林
前情回顾 机器学习100天|Day1数据预处理 100天搞定机器学习|Day2简单线性回归分析 100天搞定机器学习|Day3多元线性回归 100天搞定机器学习|Day4-6 逻辑回归 100天搞定机 ...
- R语言︱决策树族——随机森林算法
每每以为攀得众山小,可.每每又切实来到起点,大牛们,缓缓脚步来俺笔记葩分享一下吧,please~ --------------------------- 笔者寄语:有一篇<有监督学习选择深度学习 ...
- sklearn_随机森林random forest原理_乳腺癌分类器建模(推荐AAA)
sklearn实战-乳腺癌细胞数据挖掘(博主亲自录制视频) https://study.163.com/course/introduction.htm?courseId=1005269003& ...
随机推荐
- index read-only
系统重启后,Eleastisearch6.5.0在给 Eleastisearch 更新索引的时候报了一个错误:ClusterBlockException[blocked by: [FORBIDDEN/ ...
- 剑指offer(56)删除链表中重复的节点
一直忘记更新了,把剑指offer更新完吧.... 题目描述 在一个排序的链表中,存在重复的结点,请删除该链表中重复的结点,重复的结点不保留,返回链表头指针. 例如,链表1->2->3-&g ...
- Oracle 实例名/服务名 请问SID和Service_Name有什么区别啊?
可以简单的这样理解:一个公司比喻成一台服务器,数据库是这个公司中的一个部门. 1.SID:一个数据库可以有多个实例(如RAC),SID是用来标识这个数据库内部每个实例的名字, 就好像一个部门里,每个人 ...
- 《深入理解Nginx:模块开发与架构解析》读书笔记
1.nginx的特点:快.扩展性强.可靠性强.内存低消耗.支持高并发.热部署.开源免费 2.nginx由master进程来管理多个(CPU数)worker进程 3.配置按功能分,有4类: 1)用于调试 ...
- yii2 adminlte后台搭建
加载第三方扩展, composer require dmstr/yii2-adminlte-asset "2.*" composer require mdmsoft/yii2-ad ...
- IDEA-------破解方法
① 到这个地方下载 IntelliJ IDEA 注册码:http://idea.lanyus.com/ 就是这个jar包:JetbrainsCrack-2.6.10-release-enc.jar ...
- 【转】CSS3属性 @font-face 整理
原文: http://www.w3cplus.com/content/css3-font-face 出自: w3cplus.com 一.语法规则 @font-face { font-family: & ...
- [atcoder contest 010] F - Tree Game
[atcoder contest 010] F - Tree Game Time limit : 2sec / Memory limit : 256MB Score : 1600 points Pro ...
- Python—字符串的操作
字符串的操作 变量: 变量只能是 字母,数字或下划线的任意组合,但首个字符不能为数字,且不能有空格 以下关键字不能声明为变量: and ,as, assert, break ,class ,conti ...
- 跟随我在oracle学习php(4)
制作html表单 客户端(每个用户的计算机)在浏览网页时,都会向服务器(后台)端索要数据,然后将得 到的数据呈现在浏览器当中.除了索要数据之外,有时客户端也希望能够向服务器端发送 一些数据. 这时我们 ...