转摘自http://www.cnblogs.com/denny402/p/5032839.html

opencv3中的ml类与opencv2中发生了变化,下面列举opencv3的机器学习类方法实例:

用途是opencv自带的ocr样本的分类功能,其中神经网络和adaboost训练速度很慢,效果还是knn的最好;

 #include <opencv2/opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
using namespace cv::ml; // 读取文件数据
bool read_num_class_data(const string& filename, int var_count, Mat* _data, Mat* _responses)
{
const int M = ;
char buf[M + ]; Mat el_ptr(, var_count, CV_32F);
int i;
vector<int> responses; _data->release();
_responses->release();
FILE *f;
fopen_s(&f, filename.c_str(), "rt");
if (!f)
{
cout << "Could not read the database " << filename << endl;
return false;
} for (;;)
{
char* ptr;
if (!fgets(buf, M, f) || !strchr(buf, ','))
break;
responses.push_back((int)buf[]);
ptr = buf + ;
for (i = ; i < var_count; i++)
{
int n = ;
sscanf_s(ptr, "%f%n", &el_ptr.at<float>(i), &n);
ptr += n + ;
}
if (i < var_count)
break;
_data->push_back(el_ptr);
}
fclose(f);
Mat(responses).copyTo(*_responses);
return true;
} //准备训练数据
Ptr<TrainData> prepare_train_data(const Mat& data, const Mat& responses, int ntrain_samples)
{
Mat sample_idx = Mat::zeros(, data.rows, CV_8U);
Mat train_samples = sample_idx.colRange(, ntrain_samples);
train_samples.setTo(Scalar::all()); int nvars = data.cols;
Mat var_type(nvars + , , CV_8U);
var_type.setTo(Scalar::all(VAR_ORDERED));
var_type.at<uchar>(nvars) = VAR_CATEGORICAL; return TrainData::create(data, ROW_SAMPLE, responses,
noArray(), sample_idx, noArray(), var_type);
} //设置迭代条件
inline TermCriteria TC(int iters, double eps)
{
return TermCriteria(TermCriteria::MAX_ITER + (eps > ? TermCriteria::EPS : ), iters, eps);
} //分类预测
void test_and_save_classifier(const Ptr<StatModel>& model, const Mat& data, const Mat& responses,
int ntrain_samples, int rdelta)
{
int i, nsamples_all = data.rows;
double train_hr = , test_hr = ; // compute prediction error on train and test data
for (i = ; i < nsamples_all; i++)
{
Mat sample = data.row(i); float r = model->predict(sample);
r = std::abs(r + rdelta - responses.at<int>(i)) <= FLT_EPSILON ? .f : .f; if (i < ntrain_samples)
train_hr += r;
else
test_hr += r;
} test_hr /= nsamples_all - ntrain_samples;
train_hr = ntrain_samples > ? train_hr / ntrain_samples : .; printf("Recognition rate: train = %.1f%%, test = %.1f%%\n",
train_hr*., test_hr*.);
} //随机树分类
bool build_rtrees_classifier(const string& data_filename)
{
Mat data;
Mat responses;
read_num_class_data(data_filename, , &data, &responses); int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8); Ptr<RTrees> model;
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
model = RTrees::create();
model->setMaxDepth();
model->setMinSampleCount();
model->setRegressionAccuracy();
model->setUseSurrogates(false);
model->setMaxCategories();
model->setPriors(Mat());
model->setCalculateVarImportance(true);
model->setActiveVarCount();
model->setTermCriteria(TC(, 0.01f));
model->train(tdata);
test_and_save_classifier(model, data, responses, ntrain_samples, );
cout << "Number of trees: " << model->getRoots().size() << endl; // Print variable importance
Mat var_importance = model->getVarImportance();
if (!var_importance.empty())
{
double rt_imp_sum = sum(var_importance)[];
printf("var#\timportance (in %%):\n");
int i, n = (int)var_importance.total();
for (i = ; i < n; i++)
printf("%-2d\t%-4.1f\n", i, .f*var_importance.at<float>(i) / rt_imp_sum);
} return true;
} //adaboost分类
bool build_boost_classifier(const string& data_filename)
{
const int class_count = ;
Mat data;
Mat responses;
Mat weak_responses; read_num_class_data(data_filename, , &data, &responses);
int i, j, k;
Ptr<Boost> model; int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.5);
int var_count = data.cols; Mat new_data(ntrain_samples*class_count, var_count + , CV_32F);
Mat new_responses(ntrain_samples*class_count, , CV_32S); for (i = ; i < ntrain_samples; i++)
{
const float* data_row = data.ptr<float>(i);
for (j = ; j < class_count; j++)
{
float* new_data_row = (float*)new_data.ptr<float>(i*class_count + j);
memcpy(new_data_row, data_row, var_count * sizeof(data_row[]));
new_data_row[var_count] = (float)j;
new_responses.at<int>(i*class_count + j) = responses.at<int>(i) == j + 'A';
}
} Mat var_type(, var_count + , CV_8U);
var_type.setTo(Scalar::all(VAR_ORDERED));
var_type.at<uchar>(var_count) = var_type.at<uchar>(var_count + ) = VAR_CATEGORICAL; Ptr<TrainData> tdata = TrainData::create(new_data, ROW_SAMPLE, new_responses,
noArray(), noArray(), noArray(), var_type);
vector<double> priors();
priors[] = ;
priors[] = ; model = Boost::create();
model->setBoostType(Boost::GENTLE);
model->setWeakCount();
model->setWeightTrimRate(0.95);
model->setMaxDepth();
model->setUseSurrogates(false);
model->setPriors(Mat(priors));
model->train(tdata);
Mat temp_sample(, var_count + , CV_32F);
float* tptr = temp_sample.ptr<float>(); // compute prediction error on train and test data
double train_hr = , test_hr = ;
for (i = ; i < nsamples_all; i++)
{
int best_class = ;
double max_sum = -DBL_MAX;
const float* ptr = data.ptr<float>(i);
for (k = ; k < var_count; k++)
tptr[k] = ptr[k]; for (j = ; j < class_count; j++)
{
tptr[var_count] = (float)j;
float s = model->predict(temp_sample, noArray(), StatModel::RAW_OUTPUT);
if (max_sum < s)
{
max_sum = s;
best_class = j + 'A';
}
} double r = std::abs(best_class - responses.at<int>(i)) < FLT_EPSILON ? : ;
if (i < ntrain_samples)
train_hr += r;
else
test_hr += r;
} test_hr /= nsamples_all - ntrain_samples;
train_hr = ntrain_samples > ? train_hr / ntrain_samples : .;
printf("Recognition rate: train = %.1f%%, test = %.1f%%\n",
train_hr*., test_hr*.); cout << "Number of trees: " << model->getRoots().size() << endl;
return true;
} //多层感知机分类(ANN)
bool build_mlp_classifier(const string& data_filename)
{
const int class_count = ;
Mat data;
Mat responses; read_num_class_data(data_filename, , &data, &responses);
Ptr<ANN_MLP> model; int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8);
Mat train_data = data.rowRange(, ntrain_samples);
Mat train_responses = Mat::zeros(ntrain_samples, class_count, CV_32F); // 1. unroll the responses
cout << "Unrolling the responses...\n";
for (int i = ; i < ntrain_samples; i++)
{
int cls_label = responses.at<int>(i) - 'A';
train_responses.at<float>(i, cls_label) = .f;
} // 2. train classifier
int layer_sz[] = { data.cols, , , class_count };
int nlayers = (int)(sizeof(layer_sz) / sizeof(layer_sz[]));
Mat layer_sizes(, nlayers, CV_32S, layer_sz); #if 1
int method = ANN_MLP::BACKPROP;
double method_param = 0.001;
int max_iter = ;
#else
int method = ANN_MLP::RPROP;
double method_param = 0.1;
int max_iter = ;
#endif Ptr<TrainData> tdata = TrainData::create(train_data, ROW_SAMPLE, train_responses);
model = ANN_MLP::create();
model->setLayerSizes(layer_sizes);
model->setActivationFunction(ANN_MLP::SIGMOID_SYM, , );
model->setTermCriteria(TC(max_iter, ));
model->setTrainMethod(method, method_param);
model->train(tdata);
return true;
} //K最近邻分类
bool build_knearest_classifier(const string& data_filename, int K)
{
Mat data;
Mat responses;
read_num_class_data(data_filename, , &data, &responses);
int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8); Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
Ptr<KNearest> model = KNearest::create();
model->setDefaultK(K);
model->setIsClassifier(true);
model->train(tdata); test_and_save_classifier(model, data, responses, ntrain_samples, );
return true;
} //贝叶斯分类
bool build_nbayes_classifier(const string& data_filename)
{
Mat data;
Mat responses;
read_num_class_data(data_filename, , &data, &responses); int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8); Ptr<NormalBayesClassifier> model;
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
model = NormalBayesClassifier::create();
model->train(tdata); test_and_save_classifier(model, data, responses, ntrain_samples, );
return true;
} //svm分类
bool build_svm_classifier(const string& data_filename)
{
Mat data;
Mat responses;
read_num_class_data(data_filename, , &data, &responses); int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8); Ptr<SVM> model;
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
model = SVM::create();
model->setType(SVM::C_SVC);
model->setKernel(SVM::LINEAR);
model->setC();
model->train(tdata); test_and_save_classifier(model, data, responses, ntrain_samples, );
return true;
} int main()
{
string data_filename = "D:\\Program Files\\opencv\\sources\\samples\\data\\letter-recognition.data"; //字母数据 cout << "svm分类:" << endl;
build_svm_classifier(data_filename); cout << "贝叶斯分类:" << endl;
build_nbayes_classifier(data_filename); cout << "K最近邻分类:" << endl;
build_knearest_classifier(data_filename, ); cout << "随机树分类:" << endl;
build_rtrees_classifier(data_filename); cout << "adaboost分类:" << endl;
build_boost_classifier(data_filename); cout << "ANN(多层感知机)分类:" << endl;
build_mlp_classifier(data_filename); system("pause");
return ;
}

OpenCV3 SVM ANN Adaboost KNN 随机森林等机器学习方法对OCR分类的更多相关文章

  1. paper 130:MatLab分类器大全(svm,knn,随机森林等)

    train_data是训练特征数据, train_label是分类标签.Predict_label是预测的标签.MatLab训练数据, 得到语义标签向量 Scores(概率输出).1.逻辑回归(多项式 ...

  2. 美团店铺评价语言处理以及分类(tfidf,SVM,决策树,随机森林,Knn,ensemble)

    第一篇 数据清洗与分析部分 第二篇 可视化部分, 第三篇 朴素贝叶斯文本分类 支持向量机分类 支持向量机 网格搜索 临近法 决策树 随机森林 bagging方法 import pandas as pd ...

  3. MatLab2012b/MatLab2013b 分类器大全(svm,knn,随机森林等)

    train_data是训练特征数据, train_label是分类标签.Predict_label是预测的标签.MatLab训练数据, 得到语义标签向量 Scores(概率输出). 1.逻辑回归(多项 ...

  4. Adaboost和随机森林

    在集成学习中,主要分为bagging算法和boosting算法.随机森林属于集成学习(Ensemble Learning)中的bagging算法. Bagging和Boosting的概念与区别该部分主 ...

  5. 机器学习第5周--炼数成金-----决策树,组合提升算法,bagging和adaboost,随机森林。

    决策树decision tree 什么是决策树输入:学习集输出:分类觃则(决策树) 决策树算法概述 70年代后期至80年代初期,Quinlan开发了ID3算法(迭代的二分器)Quinlan改迚了ID3 ...

  6. [Python] 波士顿房价的7种模型(线性拟合、二次多项式、Ridge、Lasso、SVM、决策树、随机森林)的训练效果对比

    目录 1. 载入数据 列解释Columns: 2. 数据分析 2.1 预处理 2.2 可视化 3. 训练模型 3.1 线性拟合 3.2 多项式回归(二次) 3.3 脊回归(Ridge Regressi ...

  7. 100天搞定机器学习|Day33-34 随机森林

    前情回顾 机器学习100天|Day1数据预处理 100天搞定机器学习|Day2简单线性回归分析 100天搞定机器学习|Day3多元线性回归 100天搞定机器学习|Day4-6 逻辑回归 100天搞定机 ...

  8. R语言︱决策树族——随机森林算法

    每每以为攀得众山小,可.每每又切实来到起点,大牛们,缓缓脚步来俺笔记葩分享一下吧,please~ --------------------------- 笔者寄语:有一篇<有监督学习选择深度学习 ...

  9. sklearn_随机森林random forest原理_乳腺癌分类器建模(推荐AAA)

     sklearn实战-乳腺癌细胞数据挖掘(博主亲自录制视频) https://study.163.com/course/introduction.htm?courseId=1005269003& ...

随机推荐

  1. python 排序算法

    冒泡排序: 一. 冒泡排序的定义 冒泡排序(英语:Bubble Sort)是一种简单的排序算法.它重复地遍历要排序的数列,一次比较两个元素,如果他们的顺序错误就把他们交换过来.遍历数列的工作是重复地进 ...

  2. bitbucket迁移

    bitbucket 迁移 1.停止向旧仓库地址提交代码 [dev]2.导入代码至新仓库地址 [op]3.修改本地仓库地址 第一种方式:git remote set-url origin [url] ; ...

  3. 2018-2019-1 20189206 《Linux内核原理与分析》第八周作业

    #linux内核分析学习笔记 --第七章 可执行程序工作原理 学习目标:了解一个可执行程序是如何作为一个进程工作的. ELF文件 目标文件:是指由汇编产生的(*.o)文件和可执行文件. 即 可执行或可 ...

  4. spring boot2+jpa+thymeleaf增删改查例子

    参考这遍文章做了一个例子,稍微不同之处,原文是spring boot.mysql,这里改成了spring boot 2.Oracle. 一.pom.xml引入相关模块web.jpa.thymeleaf ...

  5. rabbitmq channel参数详解

    文章转载自: https://www.cnblogs.com/piaolingzxh/p/5448927.html    部分参数说明有修改 1.Channel 1.1 channel.exchang ...

  6. Hibernate向数据库存入BLOB和CLOB类型的数据

    我选用的是byte[] +@Lob 刚开始采用的java.sql.Blob,将上传的图片getBytes()后,通过Hibernate.getLobCreator(HibernateSessionFa ...

  7. git项目远程地址修改后本地如何处理

    今天运维人员为了方便管理,将远程的项目地址给迁移了, 原来是 git@git.lalala.com:yuanlaide/happy.git 变成了 git@git.lalala.com:houlaid ...

  8. Andorid Studio中运行模拟器--夜神模拟器

    这样可以直接在夜神模拟器上运行app然后在androidstudio上查看log…. 1.下载夜神模拟器 2.修改配置 点击右上角的设置图标,对夜神模拟器的分辨率进行选择,手机版的480×800的就差 ...

  9. iOS 开发:绘制像素到屏幕

    转载:https://segmentfault.com/a/1190000000390012 译注:这篇文章虽然比较长,但是里面的内容还是很有价值的. 像素是如何绘制到屏幕上面的?把数据输出到屏幕的方 ...

  10. OpenRASP安装使用教程

    一.说明 1.1 RASP和WAF的区别 WAF,Web Application Firewall,应用防火墙.其原理是拦截原始http数据包,然后使用规则对数据包进行匹配扫描,如果没有规则匹配上那就 ...