OpenCV3 SVM ANN Adaboost KNN 随机森林等机器学习方法对OCR分类
转摘自http://www.cnblogs.com/denny402/p/5032839.html
opencv3中的ml类与opencv2中发生了变化,下面列举opencv3的机器学习类方法实例:
用途是opencv自带的ocr样本的分类功能,其中神经网络和adaboost训练速度很慢,效果还是knn的最好;
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
using namespace cv::ml; // 读取文件数据
bool read_num_class_data(const string& filename, int var_count, Mat* _data, Mat* _responses)
{
const int M = ;
char buf[M + ]; Mat el_ptr(, var_count, CV_32F);
int i;
vector<int> responses; _data->release();
_responses->release();
FILE *f;
fopen_s(&f, filename.c_str(), "rt");
if (!f)
{
cout << "Could not read the database " << filename << endl;
return false;
} for (;;)
{
char* ptr;
if (!fgets(buf, M, f) || !strchr(buf, ','))
break;
responses.push_back((int)buf[]);
ptr = buf + ;
for (i = ; i < var_count; i++)
{
int n = ;
sscanf_s(ptr, "%f%n", &el_ptr.at<float>(i), &n);
ptr += n + ;
}
if (i < var_count)
break;
_data->push_back(el_ptr);
}
fclose(f);
Mat(responses).copyTo(*_responses);
return true;
} //准备训练数据
Ptr<TrainData> prepare_train_data(const Mat& data, const Mat& responses, int ntrain_samples)
{
Mat sample_idx = Mat::zeros(, data.rows, CV_8U);
Mat train_samples = sample_idx.colRange(, ntrain_samples);
train_samples.setTo(Scalar::all()); int nvars = data.cols;
Mat var_type(nvars + , , CV_8U);
var_type.setTo(Scalar::all(VAR_ORDERED));
var_type.at<uchar>(nvars) = VAR_CATEGORICAL; return TrainData::create(data, ROW_SAMPLE, responses,
noArray(), sample_idx, noArray(), var_type);
} //设置迭代条件
inline TermCriteria TC(int iters, double eps)
{
return TermCriteria(TermCriteria::MAX_ITER + (eps > ? TermCriteria::EPS : ), iters, eps);
} //分类预测
void test_and_save_classifier(const Ptr<StatModel>& model, const Mat& data, const Mat& responses,
int ntrain_samples, int rdelta)
{
int i, nsamples_all = data.rows;
double train_hr = , test_hr = ; // compute prediction error on train and test data
for (i = ; i < nsamples_all; i++)
{
Mat sample = data.row(i); float r = model->predict(sample);
r = std::abs(r + rdelta - responses.at<int>(i)) <= FLT_EPSILON ? .f : .f; if (i < ntrain_samples)
train_hr += r;
else
test_hr += r;
} test_hr /= nsamples_all - ntrain_samples;
train_hr = ntrain_samples > ? train_hr / ntrain_samples : .; printf("Recognition rate: train = %.1f%%, test = %.1f%%\n",
train_hr*., test_hr*.);
} //随机树分类
bool build_rtrees_classifier(const string& data_filename)
{
Mat data;
Mat responses;
read_num_class_data(data_filename, , &data, &responses); int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8); Ptr<RTrees> model;
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
model = RTrees::create();
model->setMaxDepth();
model->setMinSampleCount();
model->setRegressionAccuracy();
model->setUseSurrogates(false);
model->setMaxCategories();
model->setPriors(Mat());
model->setCalculateVarImportance(true);
model->setActiveVarCount();
model->setTermCriteria(TC(, 0.01f));
model->train(tdata);
test_and_save_classifier(model, data, responses, ntrain_samples, );
cout << "Number of trees: " << model->getRoots().size() << endl; // Print variable importance
Mat var_importance = model->getVarImportance();
if (!var_importance.empty())
{
double rt_imp_sum = sum(var_importance)[];
printf("var#\timportance (in %%):\n");
int i, n = (int)var_importance.total();
for (i = ; i < n; i++)
printf("%-2d\t%-4.1f\n", i, .f*var_importance.at<float>(i) / rt_imp_sum);
} return true;
} //adaboost分类
bool build_boost_classifier(const string& data_filename)
{
const int class_count = ;
Mat data;
Mat responses;
Mat weak_responses; read_num_class_data(data_filename, , &data, &responses);
int i, j, k;
Ptr<Boost> model; int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.5);
int var_count = data.cols; Mat new_data(ntrain_samples*class_count, var_count + , CV_32F);
Mat new_responses(ntrain_samples*class_count, , CV_32S); for (i = ; i < ntrain_samples; i++)
{
const float* data_row = data.ptr<float>(i);
for (j = ; j < class_count; j++)
{
float* new_data_row = (float*)new_data.ptr<float>(i*class_count + j);
memcpy(new_data_row, data_row, var_count * sizeof(data_row[]));
new_data_row[var_count] = (float)j;
new_responses.at<int>(i*class_count + j) = responses.at<int>(i) == j + 'A';
}
} Mat var_type(, var_count + , CV_8U);
var_type.setTo(Scalar::all(VAR_ORDERED));
var_type.at<uchar>(var_count) = var_type.at<uchar>(var_count + ) = VAR_CATEGORICAL; Ptr<TrainData> tdata = TrainData::create(new_data, ROW_SAMPLE, new_responses,
noArray(), noArray(), noArray(), var_type);
vector<double> priors();
priors[] = ;
priors[] = ; model = Boost::create();
model->setBoostType(Boost::GENTLE);
model->setWeakCount();
model->setWeightTrimRate(0.95);
model->setMaxDepth();
model->setUseSurrogates(false);
model->setPriors(Mat(priors));
model->train(tdata);
Mat temp_sample(, var_count + , CV_32F);
float* tptr = temp_sample.ptr<float>(); // compute prediction error on train and test data
double train_hr = , test_hr = ;
for (i = ; i < nsamples_all; i++)
{
int best_class = ;
double max_sum = -DBL_MAX;
const float* ptr = data.ptr<float>(i);
for (k = ; k < var_count; k++)
tptr[k] = ptr[k]; for (j = ; j < class_count; j++)
{
tptr[var_count] = (float)j;
float s = model->predict(temp_sample, noArray(), StatModel::RAW_OUTPUT);
if (max_sum < s)
{
max_sum = s;
best_class = j + 'A';
}
} double r = std::abs(best_class - responses.at<int>(i)) < FLT_EPSILON ? : ;
if (i < ntrain_samples)
train_hr += r;
else
test_hr += r;
} test_hr /= nsamples_all - ntrain_samples;
train_hr = ntrain_samples > ? train_hr / ntrain_samples : .;
printf("Recognition rate: train = %.1f%%, test = %.1f%%\n",
train_hr*., test_hr*.); cout << "Number of trees: " << model->getRoots().size() << endl;
return true;
} //多层感知机分类(ANN)
bool build_mlp_classifier(const string& data_filename)
{
const int class_count = ;
Mat data;
Mat responses; read_num_class_data(data_filename, , &data, &responses);
Ptr<ANN_MLP> model; int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8);
Mat train_data = data.rowRange(, ntrain_samples);
Mat train_responses = Mat::zeros(ntrain_samples, class_count, CV_32F); // 1. unroll the responses
cout << "Unrolling the responses...\n";
for (int i = ; i < ntrain_samples; i++)
{
int cls_label = responses.at<int>(i) - 'A';
train_responses.at<float>(i, cls_label) = .f;
} // 2. train classifier
int layer_sz[] = { data.cols, , , class_count };
int nlayers = (int)(sizeof(layer_sz) / sizeof(layer_sz[]));
Mat layer_sizes(, nlayers, CV_32S, layer_sz); #if 1
int method = ANN_MLP::BACKPROP;
double method_param = 0.001;
int max_iter = ;
#else
int method = ANN_MLP::RPROP;
double method_param = 0.1;
int max_iter = ;
#endif Ptr<TrainData> tdata = TrainData::create(train_data, ROW_SAMPLE, train_responses);
model = ANN_MLP::create();
model->setLayerSizes(layer_sizes);
model->setActivationFunction(ANN_MLP::SIGMOID_SYM, , );
model->setTermCriteria(TC(max_iter, ));
model->setTrainMethod(method, method_param);
model->train(tdata);
return true;
} //K最近邻分类
bool build_knearest_classifier(const string& data_filename, int K)
{
Mat data;
Mat responses;
read_num_class_data(data_filename, , &data, &responses);
int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8); Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
Ptr<KNearest> model = KNearest::create();
model->setDefaultK(K);
model->setIsClassifier(true);
model->train(tdata); test_and_save_classifier(model, data, responses, ntrain_samples, );
return true;
} //贝叶斯分类
bool build_nbayes_classifier(const string& data_filename)
{
Mat data;
Mat responses;
read_num_class_data(data_filename, , &data, &responses); int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8); Ptr<NormalBayesClassifier> model;
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
model = NormalBayesClassifier::create();
model->train(tdata); test_and_save_classifier(model, data, responses, ntrain_samples, );
return true;
} //svm分类
bool build_svm_classifier(const string& data_filename)
{
Mat data;
Mat responses;
read_num_class_data(data_filename, , &data, &responses); int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8); Ptr<SVM> model;
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
model = SVM::create();
model->setType(SVM::C_SVC);
model->setKernel(SVM::LINEAR);
model->setC();
model->train(tdata); test_and_save_classifier(model, data, responses, ntrain_samples, );
return true;
} int main()
{
string data_filename = "D:\\Program Files\\opencv\\sources\\samples\\data\\letter-recognition.data"; //字母数据 cout << "svm分类:" << endl;
build_svm_classifier(data_filename); cout << "贝叶斯分类:" << endl;
build_nbayes_classifier(data_filename); cout << "K最近邻分类:" << endl;
build_knearest_classifier(data_filename, ); cout << "随机树分类:" << endl;
build_rtrees_classifier(data_filename); cout << "adaboost分类:" << endl;
build_boost_classifier(data_filename); cout << "ANN(多层感知机)分类:" << endl;
build_mlp_classifier(data_filename); system("pause");
return ;
}
OpenCV3 SVM ANN Adaboost KNN 随机森林等机器学习方法对OCR分类的更多相关文章
- paper 130:MatLab分类器大全(svm,knn,随机森林等)
train_data是训练特征数据, train_label是分类标签.Predict_label是预测的标签.MatLab训练数据, 得到语义标签向量 Scores(概率输出).1.逻辑回归(多项式 ...
- 美团店铺评价语言处理以及分类(tfidf,SVM,决策树,随机森林,Knn,ensemble)
第一篇 数据清洗与分析部分 第二篇 可视化部分, 第三篇 朴素贝叶斯文本分类 支持向量机分类 支持向量机 网格搜索 临近法 决策树 随机森林 bagging方法 import pandas as pd ...
- MatLab2012b/MatLab2013b 分类器大全(svm,knn,随机森林等)
train_data是训练特征数据, train_label是分类标签.Predict_label是预测的标签.MatLab训练数据, 得到语义标签向量 Scores(概率输出). 1.逻辑回归(多项 ...
- Adaboost和随机森林
在集成学习中,主要分为bagging算法和boosting算法.随机森林属于集成学习(Ensemble Learning)中的bagging算法. Bagging和Boosting的概念与区别该部分主 ...
- 机器学习第5周--炼数成金-----决策树,组合提升算法,bagging和adaboost,随机森林。
决策树decision tree 什么是决策树输入:学习集输出:分类觃则(决策树) 决策树算法概述 70年代后期至80年代初期,Quinlan开发了ID3算法(迭代的二分器)Quinlan改迚了ID3 ...
- [Python] 波士顿房价的7种模型(线性拟合、二次多项式、Ridge、Lasso、SVM、决策树、随机森林)的训练效果对比
目录 1. 载入数据 列解释Columns: 2. 数据分析 2.1 预处理 2.2 可视化 3. 训练模型 3.1 线性拟合 3.2 多项式回归(二次) 3.3 脊回归(Ridge Regressi ...
- 100天搞定机器学习|Day33-34 随机森林
前情回顾 机器学习100天|Day1数据预处理 100天搞定机器学习|Day2简单线性回归分析 100天搞定机器学习|Day3多元线性回归 100天搞定机器学习|Day4-6 逻辑回归 100天搞定机 ...
- R语言︱决策树族——随机森林算法
每每以为攀得众山小,可.每每又切实来到起点,大牛们,缓缓脚步来俺笔记葩分享一下吧,please~ --------------------------- 笔者寄语:有一篇<有监督学习选择深度学习 ...
- sklearn_随机森林random forest原理_乳腺癌分类器建模(推荐AAA)
sklearn实战-乳腺癌细胞数据挖掘(博主亲自录制视频) https://study.163.com/course/introduction.htm?courseId=1005269003& ...
随机推荐
- 剑指offer(8)跳台阶
题目描述 一只青蛙一次可以跳上1级台阶,也可以跳上2级.求该青蛙跳上一个n级的台阶总共有多少种跳法. 题目分析 题目很简单,稍微分析就知道这是斐波那契数列,所以可以动态规划来做 a.如果两种跳法,1阶 ...
- 7.26-Codeforces Round #372 (Div. 2)
C. Plus and Square Root 链接:codeforces.com/group/1EzrFFyOc0/contest/716/problem/C 题型:构造 题意:起始数 x 为 2, ...
- 关于乱码(MessyCode)问题
乱码本质:读取二进制时采用的编码和最初将字符转成二进制时的编码不一致 编码时(得二进制数组时)不抛出异常,数据就不会被破坏 Java关于乱码(MessyCode)问题 Java使用的是Unicode编 ...
- win7 64位下redis的安装
1.下载Redis安装包. 下载地址 https://github.com/MSOpenTech/redis,找到Release,点击前往下载页面,点击Redis-x64-3.2.100.msi下载. ...
- 阿里技术专家详解Dubbo实践,演进及未来规划
https://mp.weixin.qq.com/s/9rVGHYfeE8yM2qkSVd2yEQ
- P2922 [USACO08DEC]秘密消息Secret Message
传送门 思路: 还是比较水的(不看题解不看书),用 vis 存字典树上的每个点是多少个单词的前缀,bo 来存每个点是多少个单词的结尾(坑点:会有很多相同的单词,不能只有 bool 来存).统计时:① ...
- [原]编译flightGear
参考:flightgear编译博客201705 flightGear是三维飞行仿真软件,这个款软件是开源的,我们尝试用其源码完整编译一遍这个工程,并使用它. 它用到里以下扩展库: 空气动力学库:JSB ...
- web 服务发布注意事项
1.在发布的时候首先查看服务器对外开放的端口,如果没有最好和客户进行沟通需要开放那些对应的端口,要不外界无法访问发布的站点. 2.在oracle需要远程控制服务器的数据库的时候需要开发1521端口.
- 『Python CoolBook』Cython_高效数组操作
数组运算加速是至关科学计算重要的领域,本节我们以一个简单函数为例,使用C语言为python数组加速. 一.Cython 本函数为一维数组修剪最大最小值 version1 @cython.boundsc ...
- 『TensorFlow』函数查询列表_数值计算
基本算术运算 操作 描述 tf.add(x, y, name=None) 求和 tf.sub(x, y, name=None) 减法 tf.mul(x, y, name=None) 乘法 tf.div ...