opencv人脸识别C++代码

/*
* Copyright (c) 2011,2012. Philipp Wagner <bytefish[at]gmx[dot]de>.
* Released to public domain under terms of the BSD Simplified license.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the organization nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* See <http://www.opensource.org/licenses/bsd-license>
*/
#include "precomp.hpp"
#include <set> namespace cv
{ using std::set; // Reads a sequence from a FileNode::SEQ with type _Tp into a result vector.
template<typename _Tp>
inline void readFileNodeList(const FileNode& fn, vector<_Tp>& result) {
if (fn.type() == FileNode::SEQ) {
for (FileNodeIterator it = fn.begin(); it != fn.end();) {
_Tp item;
it >> item;
result.push_back(item);
}
}
} // Writes the a list of given items to a cv::FileStorage.
template<typename _Tp>
inline void writeFileNodeList(FileStorage& fs, const string& name,
const vector<_Tp>& items) {
// typedefs
typedef typename vector<_Tp>::const_iterator constVecIterator;
// write the elements in item to fs
fs << name << "[";
for (constVecIterator it = items.begin(); it != items.end(); ++it) {
fs << *it;
}
fs << "]";
} static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=, double beta=) {
// make sure the input data is a vector of matrices or vector of vector
if(src.kind() != _InputArray::STD_VECTOR_MAT && src.kind() != _InputArray::STD_VECTOR_VECTOR) {
string error_message = "The data is expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< vector<...> >).";
CV_Error(CV_StsBadArg, error_message);
}
// number of samples
size_t n = src.total();
// return empty matrix if no matrices given
if(n == )
return Mat();
// dimensionality of (reshaped) samples
size_t d = src.getMat().total();
// create data matrix
Mat data((int)n, (int)d, rtype);
// now copy data
for(unsigned int i = ; i < n; i++) {
// make sure data can be reshaped, throw exception if not!
if(src.getMat(i).total() != d) {
string error_message = format("Wrong number of elements in matrix #%d! Expected %d was %d.", i, d, src.getMat(i).total());
CV_Error(CV_StsBadArg, error_message);
}
// get a hold of the current row
Mat xi = data.row(i);
// make reshape happy by cloning for non-continuous matrices
if(src.getMat(i).isContinuous()) {
src.getMat(i).reshape(, ).convertTo(xi, rtype, alpha, beta);
} else {
src.getMat(i).clone().reshape(, ).convertTo(xi, rtype, alpha, beta);
}
}
return data;
} // Removes duplicate elements in a given vector.
template<typename _Tp>
inline vector<_Tp> remove_dups(const vector<_Tp>& src) {
typedef typename set<_Tp>::const_iterator constSetIterator;
typedef typename vector<_Tp>::const_iterator constVecIterator;
set<_Tp> set_elems;
for (constVecIterator it = src.begin(); it != src.end(); ++it)
set_elems.insert(*it);
vector<_Tp> elems;
for (constSetIterator it = set_elems.begin(); it != set_elems.end(); ++it)
elems.push_back(*it);
return elems;
} // Turk, M., and Pentland, A. "Eigenfaces for recognition.". Journal of
// Cognitive Neuroscience 3 (1991), 71–86.
//特征脸类
class Eigenfaces : public FaceRecognizer
{
private:
int _num_components;//对应“数学上的事”中所提到的q个主成分
double _threshold;
vector<Mat> _projections;//原始向量投影后的坐标
Mat _labels;//每幅图像的标签,用于分类
Mat _eigenvectors;//特征向量
Mat _eigenvalues;//特征值
Mat _mean;//均值 public:
using FaceRecognizer::save;
using FaceRecognizer::load; // Initializes an empty Eigenfaces model.
Eigenfaces(int num_components = , double threshold = DBL_MAX) :
_num_components(num_components),
_threshold(threshold) {} // Initializes and computes an Eigenfaces model with images in src and
// corresponding labels in labels. num_components will be kept for
// classification.
Eigenfaces(InputArrayOfArrays src, InputArray labels,
int num_components = , double threshold = DBL_MAX) :
_num_components(num_components),
_threshold(threshold) {
train(src, labels);
} // Computes an Eigenfaces model with images in src and corresponding labels
// in labels.
void train(InputArrayOfArrays src, InputArray labels); // Predicts the label of a query image in src.
int predict(InputArray src) const; // Predicts the label and confidence for a given sample.
void predict(InputArray _src, int &label, double &dist) const; // See FaceRecognizer::load.
void load(const FileStorage& fs); // See FaceRecognizer::save.
void save(FileStorage& fs) const; AlgorithmInfo* info() const;
}; // Belhumeur, P. N., Hespanha, J., and Kriegman, D. "Eigenfaces vs. Fisher-
// faces: Recognition using class specific linear projection.". IEEE
// Transactions on Pattern Analysis and Machine Intelligence 19, 7 (1997),
// 711–720.
class Fisherfaces: public FaceRecognizer
{
private:
int _num_components;
double _threshold;
Mat _eigenvectors;
Mat _eigenvalues;
Mat _mean;
vector<Mat> _projections;
Mat _labels; public:
using FaceRecognizer::save;
using FaceRecognizer::load; // Initializes an empty Fisherfaces model.
Fisherfaces(int num_components = , double threshold = DBL_MAX) :
_num_components(num_components),
_threshold(threshold) {} // Initializes and computes a Fisherfaces model with images in src and
// corresponding labels in labels. num_components will be kept for
// classification.
Fisherfaces(InputArrayOfArrays src, InputArray labels,
int num_components = , double threshold = DBL_MAX) :
_num_components(num_components),
_threshold(threshold) {
train(src, labels);
} ~Fisherfaces() {} // Computes a Fisherfaces model with images in src and corresponding labels
// in labels.
void train(InputArrayOfArrays src, InputArray labels); // Predicts the label of a query image in src.
int predict(InputArray src) const; // Predicts the label and confidence for a given sample.
void predict(InputArray _src, int &label, double &dist) const; // See FaceRecognizer::load.
void load(const FileStorage& fs); // See FaceRecognizer::save.
void save(FileStorage& fs) const; AlgorithmInfo* info() const;
}; // Face Recognition based on Local Binary Patterns.
//
// Ahonen T, Hadid A. and Pietikäinen M. "Face description with local binary
// patterns: Application to face recognition." IEEE Transactions on Pattern
// Analysis and Machine Intelligence, 28(12):2037-2041.
//
class LBPH : public FaceRecognizer
{
private:
int _grid_x;
int _grid_y;
int _radius;
int _neighbors;
double _threshold; vector<Mat> _histograms;
Mat _labels; // Computes a LBPH model with images in src and
// corresponding labels in labels, possibly preserving
// old model data.
void train(InputArrayOfArrays src, InputArray labels, bool preserveData); public:
using FaceRecognizer::save;
using FaceRecognizer::load; // Initializes this LBPH Model. The current implementation is rather fixed
// as it uses the Extended Local Binary Patterns per default.
//
// radius, neighbors are used in the local binary patterns creation.
// grid_x, grid_y control the grid size of the spatial histograms.
LBPH(int radius_=, int neighbors_=,
int gridx=, int gridy=,
double threshold = DBL_MAX) :
_grid_x(gridx),
_grid_y(gridy),
_radius(radius_),
_neighbors(neighbors_),
_threshold(threshold) {} // Initializes and computes this LBPH Model. The current implementation is
// rather fixed as it uses the Extended Local Binary Patterns per default.
//
// (radius=1), (neighbors=8) are used in the local binary patterns creation.
// (grid_x=8), (grid_y=8) controls the grid size of the spatial histograms.
LBPH(InputArrayOfArrays src,
InputArray labels,
int radius_=, int neighbors_=,
int gridx=, int gridy=,
double threshold = DBL_MAX) :
_grid_x(gridx),
_grid_y(gridy),
_radius(radius_),
_neighbors(neighbors_),
_threshold(threshold) {
train(src, labels);
} ~LBPH() { } // Computes a LBPH model with images in src and
// corresponding labels in labels.
void train(InputArrayOfArrays src, InputArray labels); // Updates this LBPH model with images in src and
// corresponding labels in labels.
void update(InputArrayOfArrays src, InputArray labels); // Predicts the label of a query image in src.
int predict(InputArray src) const; // Predicts the label and confidence for a given sample.
void predict(InputArray _src, int &label, double &dist) const; // See FaceRecognizer::load.
void load(const FileStorage& fs); // See FaceRecognizer::save.
void save(FileStorage& fs) const; // Getter functions.
int neighbors() const { return _neighbors; }
int radius() const { return _radius; }
int grid_x() const { return _grid_x; }
int grid_y() const { return _grid_y; } AlgorithmInfo* info() const;
}; //------------------------------------------------------------------------------
// FaceRecognizer
//------------------------------------------------------------------------------
void FaceRecognizer::update(InputArrayOfArrays src, InputArray labels ) {
if( dynamic_cast<LBPH*>(this) != )
{
dynamic_cast<LBPH*>(this)->update( src, labels );
return;
} string error_msg = format("This FaceRecognizer (%s) does not support updating, you have to use FaceRecognizer::train to update it.", this->name().c_str());
CV_Error(CV_StsNotImplemented, error_msg);
} void FaceRecognizer::save(const string& filename) const {
FileStorage fs(filename, FileStorage::WRITE);
if (!fs.isOpened())
CV_Error(CV_StsError, "File can't be opened for writing!");
this->save(fs);
fs.release();
} void FaceRecognizer::load(const string& filename) {
FileStorage fs(filename, FileStorage::READ);
if (!fs.isOpened())
CV_Error(CV_StsError, "File can't be opened for writing!");
this->load(fs);
fs.release();
} //------------------------------------------------------------------------------
// Eigenfaces特征脸训练函数
//------------------------------------------------------------------------------
void Eigenfaces::train(InputArrayOfArrays _src, InputArray _local_labels) {
if(_src.total() == ) {
string error_message = format("Empty training data was given. You'll need more than one sample to learn a model.");
CV_Error(CV_StsBadArg, error_message);
} else if(_local_labels.getMat().type() != CV_32SC1) {
string error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _local_labels.type());
CV_Error(CV_StsBadArg, error_message);
}
// make sure data has correct size确保输入的图像数据尺寸正确(所有尺寸相同)
if(_src.total() > ) {
for(int i = ; i < static_cast<int>(_src.total()); i++) {
if(_src.getMat(i-).total() != _src.getMat(i).total()) {
string error_message = format("In the Eigenfaces method all input samples (training images) must be of equal size! Expected %d pixels, but was %d pixels.", _src.getMat(i-).total(), _src.getMat(i).total());
CV_Error(CV_StsUnsupportedFormat, error_message);
}
}
}
// get labels
Mat labels = _local_labels.getMat();
// observations in row
Mat data = asRowMatrix(_src, CV_64FC1);//将_src中存放的图像列表中的每幅图像(reshape成1行)作为data的一行 // number of samples
int n = data.rows;
// assert there are as much samples as labels
if(static_cast<int>(labels.total()) != n) {
string error_message = format("The number of samples (src) must equal the number of labels (labels)! len(src)=%d, len(labels)=%d.", n, labels.total());
CV_Error(CV_StsBadArg, error_message);
}
// clear existing model data
_labels.release();
_projections.clear();
// clip number of components to be valid
if((_num_components <= ) || (_num_components > n))
_num_components = n; // perform the PCA
PCA pca(data, Mat(), CV_PCA_DATA_AS_ROW, _num_components);
// copy the PCA results
_mean = pca.mean.reshape(,); // store the mean vector获取均值向量
_eigenvalues = pca.eigenvalues.clone(); // eigenvalues by row获取特征值
transpose(pca.eigenvectors, _eigenvectors); // eigenvectors by column获取特征向量
// store labels for prediction
_labels = labels.clone();//获取分类标签
// save projections
for(int sampleIdx = ; sampleIdx < data.rows; sampleIdx++) {
Mat p = subspaceProject(_eigenvectors, _mean, data.row(sampleIdx));
_projections.push_back(p);
}
} void Eigenfaces::predict(InputArray _src, int &minClass, double &minDist) const {
// get data
Mat src = _src.getMat();
// make sure the user is passing correct data
if(_projections.empty()) {
// throw error if no data (or simply return -1?)
string error_message = "This Eigenfaces model is not computed yet. Did you call Eigenfaces::train?";
CV_Error(CV_StsError, error_message);
} else if(_eigenvectors.rows != static_cast<int>(src.total())) {
// check data alignment just for clearer exception messages
string error_message = format("Wrong input image size. Reason: Training and Test images must be of equal size! Expected an image with %d elements, but got %d.", _eigenvectors.rows, src.total());
CV_Error(CV_StsBadArg, error_message);
}
// project into PCA subspace
Mat q = subspaceProject(_eigenvectors, _mean, src.reshape(,));// 投影到PCA的主成分空间
minDist = DBL_MAX;
minClass = -;
//求L2范数也就是欧式距离
for(size_t sampleIdx = ; sampleIdx < _projections.size(); sampleIdx++) {
double dist = norm(_projections[sampleIdx], q, NORM_L2);
if((dist < minDist) && (dist < _threshold)) {
minDist = dist;
minClass = _labels.at<int>((int)sampleIdx);
}
}
} int Eigenfaces::predict(InputArray _src) const {
int label;
double dummy;
predict(_src, label, dummy);
return label;
} void Eigenfaces::load(const FileStorage& fs) {
//read matrices
fs["num_components"] >> _num_components;
fs["mean"] >> _mean;
fs["eigenvalues"] >> _eigenvalues;
fs["eigenvectors"] >> _eigenvectors;
// read sequences
readFileNodeList(fs["projections"], _projections);
fs["labels"] >> _labels;
} void Eigenfaces::save(FileStorage& fs) const {
// write matrices
fs << "num_components" << _num_components;
fs << "mean" << _mean;
fs << "eigenvalues" << _eigenvalues;
fs << "eigenvectors" << _eigenvectors;
// write sequences
writeFileNodeList(fs, "projections", _projections);
fs << "labels" << _labels;
} //------------------------------------------------------------------------------
// Fisherfaces
//------------------------------------------------------------------------------
void Fisherfaces::train(InputArrayOfArrays src, InputArray _lbls) {
if(src.total() == ) {
string error_message = format("Empty training data was given. You'll need more than one sample to learn a model.");
CV_Error(CV_StsBadArg, error_message);
} else if(_lbls.getMat().type() != CV_32SC1) {
string error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _lbls.type());
CV_Error(CV_StsBadArg, error_message);
}
// make sure data has correct size
if(src.total() > ) {
for(int i = ; i < static_cast<int>(src.total()); i++) {
if(src.getMat(i-).total() != src.getMat(i).total()) {
string error_message = format("In the Fisherfaces method all input samples (training images) must be of equal size! Expected %d pixels, but was %d pixels.", src.getMat(i-).total(), src.getMat(i).total());
CV_Error(CV_StsUnsupportedFormat, error_message);
}
}
}
// get data
Mat labels = _lbls.getMat();
Mat data = asRowMatrix(src, CV_64FC1);
// number of samples
int N = data.rows;
// make sure labels are passed in correct shape
if(labels.total() != (size_t) N) {
string error_message = format("The number of samples (src) must equal the number of labels (labels)! len(src)=%d, len(labels)=%d.", N, labels.total());
CV_Error(CV_StsBadArg, error_message);
} else if(labels.rows != && labels.cols != ) {
string error_message = format("Expected the labels in a matrix with one row or column! Given dimensions are rows=%s, cols=%d.", labels.rows, labels.cols);
CV_Error(CV_StsBadArg, error_message);
}
// clear existing model data
_labels.release();
_projections.clear();
// safely copy from cv::Mat to std::vector
vector<int> ll;
for(unsigned int i = ; i < labels.total(); i++) {
ll.push_back(labels.at<int>(i));
}
// get the number of unique classes
int C = (int) remove_dups(ll).size();
// clip number of components to be a valid number
if((_num_components <= ) || (_num_components > (C-)))
_num_components = (C-);
// perform a PCA and keep (N-C) components
PCA pca(data, Mat(), CV_PCA_DATA_AS_ROW, (N-C));
// project the data and perform a LDA on it
LDA lda(pca.project(data),labels, _num_components);
// store the total mean vector
_mean = pca.mean.reshape(,);
// store labels
_labels = labels.clone();
// store the eigenvalues of the discriminants
lda.eigenvalues().convertTo(_eigenvalues, CV_64FC1);
// Now calculate the projection matrix as pca.eigenvectors * lda.eigenvectors.
// Note: OpenCV stores the eigenvectors by row, so we need to transpose it!
gemm(pca.eigenvectors, lda.eigenvectors(), 1.0, Mat(), 0.0, _eigenvectors, GEMM_1_T);
// store the projections of the original data
for(int sampleIdx = ; sampleIdx < data.rows; sampleIdx++) {
Mat p = subspaceProject(_eigenvectors, _mean, data.row(sampleIdx));
_projections.push_back(p);
}
} void Fisherfaces::predict(InputArray _src, int &minClass, double &minDist) const {
Mat src = _src.getMat();
// check data alignment just for clearer exception messages
if(_projections.empty()) {
// throw error if no data (or simply return -1?)
string error_message = "This Fisherfaces model is not computed yet. Did you call Fisherfaces::train?";
CV_Error(CV_StsBadArg, error_message);
} else if(src.total() != (size_t) _eigenvectors.rows) {
string error_message = format("Wrong input image size. Reason: Training and Test images must be of equal size! Expected an image with %d elements, but got %d.", _eigenvectors.rows, src.total());
CV_Error(CV_StsBadArg, error_message);
}
// project into LDA subspace
Mat q = subspaceProject(_eigenvectors, _mean, src.reshape(,));
// find 1-nearest neighbor
minDist = DBL_MAX;
minClass = -;
for(size_t sampleIdx = ; sampleIdx < _projections.size(); sampleIdx++) {
double dist = norm(_projections[sampleIdx], q, NORM_L2);
if((dist < minDist) && (dist < _threshold)) {
minDist = dist;
minClass = _labels.at<int>((int)sampleIdx);
}
}
} int Fisherfaces::predict(InputArray _src) const {
int label;
double dummy;
predict(_src, label, dummy);
return label;
} // See FaceRecognizer::load.
void Fisherfaces::load(const FileStorage& fs) {
//read matrices
fs["num_components"] >> _num_components;
fs["mean"] >> _mean;
fs["eigenvalues"] >> _eigenvalues;
fs["eigenvectors"] >> _eigenvectors;
// read sequences
readFileNodeList(fs["projections"], _projections);
fs["labels"] >> _labels;
} // See FaceRecognizer::save.
void Fisherfaces::save(FileStorage& fs) const {
// write matrices
fs << "num_components" << _num_components;
fs << "mean" << _mean;
fs << "eigenvalues" << _eigenvalues;
fs << "eigenvectors" << _eigenvectors;
// write sequences
writeFileNodeList(fs, "projections", _projections);
fs << "labels" << _labels;
} //------------------------------------------------------------------------------
// LBPH
//------------------------------------------------------------------------------ template <typename _Tp> static
void olbp_(InputArray _src, OutputArray _dst) {
// get matrices
Mat src = _src.getMat();
// allocate memory for result
_dst.create(src.rows-, src.cols-, CV_8UC1);
Mat dst = _dst.getMat();
// zero the result matrix
dst.setTo();
// calculate patterns
for(int i=;i<src.rows-;i++) {
for(int j=;j<src.cols-;j++) {
_Tp center = src.at<_Tp>(i,j);
unsigned char code = ;
code |= (src.at<_Tp>(i-,j-) >= center) << ;
code |= (src.at<_Tp>(i-,j) >= center) << ;
code |= (src.at<_Tp>(i-,j+) >= center) << ;
code |= (src.at<_Tp>(i,j+) >= center) << ;
code |= (src.at<_Tp>(i+,j+) >= center) << ;
code |= (src.at<_Tp>(i+,j) >= center) << ;
code |= (src.at<_Tp>(i+,j-) >= center) << ;
code |= (src.at<_Tp>(i,j-) >= center) << ;
dst.at<unsigned char>(i-,j-) = code;
}
}
} //------------------------------------------------------------------------------
// cv::elbp
//------------------------------------------------------------------------------
template <typename _Tp> static
inline void elbp_(InputArray _src, OutputArray _dst, int radius, int neighbors) {
//get matrices
Mat src = _src.getMat();
// allocate memory for result
_dst.create(src.rows-*radius, src.cols-*radius, CV_32SC1);
Mat dst = _dst.getMat();
// zero
dst.setTo();
for(int n=; n<neighbors; n++) {
// sample points
float x = static_cast<float>(radius * cos(2.0*CV_PI*n/static_cast<float>(neighbors)));
float y = static_cast<float>(-radius * sin(2.0*CV_PI*n/static_cast<float>(neighbors)));
// relative indices
int fx = static_cast<int>(floor(x));
int fy = static_cast<int>(floor(y));
int cx = static_cast<int>(ceil(x));
int cy = static_cast<int>(ceil(y));
// fractional part
float ty = y - fy;
float tx = x - fx;
// set interpolation weights
float w1 = ( - tx) * ( - ty);
float w2 = tx * ( - ty);
float w3 = ( - tx) * ty;
float w4 = tx * ty;
// iterate through your data
for(int i=radius; i < src.rows-radius;i++) {
for(int j=radius;j < src.cols-radius;j++) {
// calculate interpolated value
float t = static_cast<float>(w1*src.at<_Tp>(i+fy,j+fx) + w2*src.at<_Tp>(i+fy,j+cx) + w3*src.at<_Tp>(i+cy,j+fx) + w4*src.at<_Tp>(i+cy,j+cx));
// floating point precision, so check some machine-dependent epsilon
dst.at<int>(i-radius,j-radius) += ((t > src.at<_Tp>(i,j)) || (std::abs(t-src.at<_Tp>(i,j)) < std::numeric_limits<float>::epsilon())) << n;
}
}
}
} static void elbp(InputArray src, OutputArray dst, int radius, int neighbors)
{
int type = src.type();
switch (type) {
case CV_8SC1: elbp_<char>(src,dst, radius, neighbors); break;
case CV_8UC1: elbp_<unsigned char>(src, dst, radius, neighbors); break;
case CV_16SC1: elbp_<short>(src,dst, radius, neighbors); break;
case CV_16UC1: elbp_<unsigned short>(src,dst, radius, neighbors); break;
case CV_32SC1: elbp_<int>(src,dst, radius, neighbors); break;
case CV_32FC1: elbp_<float>(src,dst, radius, neighbors); break;
case CV_64FC1: elbp_<double>(src,dst, radius, neighbors); break;
default:
string error_msg = format("Using Original Local Binary Patterns for feature extraction only works on single-channel images (given %d). Please pass the image data as a grayscale image!", type);
CV_Error(CV_StsNotImplemented, error_msg);
break;
}
} static Mat
histc_(const Mat& src, int minVal=, int maxVal=, bool normed=false)
{
Mat result;
// Establish the number of bins.
int histSize = maxVal-minVal+;
// Set the ranges.
float range[] = { static_cast<float>(minVal), static_cast<float>(maxVal+) };
const float* histRange = { range };
// calc histogram
calcHist(&src, , , Mat(), result, , &histSize, &histRange, true, false);
// normalize
if(normed) {
result /= (int)src.total();
}
return result.reshape(,);
} static Mat histc(InputArray _src, int minVal, int maxVal, bool normed)
{
Mat src = _src.getMat();
switch (src.type()) {
case CV_8SC1:
return histc_(Mat_<float>(src), minVal, maxVal, normed);
break;
case CV_8UC1:
return histc_(src, minVal, maxVal, normed);
break;
case CV_16SC1:
return histc_(Mat_<float>(src), minVal, maxVal, normed);
break;
case CV_16UC1:
return histc_(src, minVal, maxVal, normed);
break;
case CV_32SC1:
return histc_(Mat_<float>(src), minVal, maxVal, normed);
break;
case CV_32FC1:
return histc_(src, minVal, maxVal, normed);
break;
default:
CV_Error(CV_StsUnmatchedFormats, "This type is not implemented yet."); break;
}
return Mat();
} static Mat spatial_histogram(InputArray _src, int numPatterns,
int grid_x, int grid_y, bool /*normed*/)
{
Mat src = _src.getMat();
// calculate LBP patch size
int width = src.cols/grid_x;
int height = src.rows/grid_y;
// allocate memory for the spatial histogram
Mat result = Mat::zeros(grid_x * grid_y, numPatterns, CV_32FC1);
// return matrix with zeros if no data was given
if(src.empty())
return result.reshape(,);
// initial result_row
int resultRowIdx = ;
// iterate through grid
for(int i = ; i < grid_y; i++) {
for(int j = ; j < grid_x; j++) {
Mat src_cell = Mat(src, Range(i*height,(i+)*height), Range(j*width,(j+)*width));
Mat cell_hist = histc(src_cell, , (numPatterns-), true);
// copy to the result matrix
Mat result_row = result.row(resultRowIdx);
cell_hist.reshape(,).convertTo(result_row, CV_32FC1);
// increase row count in result matrix
resultRowIdx++;
}
}
// return result as reshaped feature vector
return result.reshape(,);
} //------------------------------------------------------------------------------
// wrapper to cv::elbp (extended local binary patterns)
//------------------------------------------------------------------------------ static Mat elbp(InputArray src, int radius, int neighbors) {
Mat dst;
elbp(src, dst, radius, neighbors);
return dst;
} void LBPH::load(const FileStorage& fs) {
fs["radius"] >> _radius;
fs["neighbors"] >> _neighbors;
fs["grid_x"] >> _grid_x;
fs["grid_y"] >> _grid_y;
//read matrices
readFileNodeList(fs["histograms"], _histograms);
fs["labels"] >> _labels;
} // See FaceRecognizer::save.
void LBPH::save(FileStorage& fs) const {
fs << "radius" << _radius;
fs << "neighbors" << _neighbors;
fs << "grid_x" << _grid_x;
fs << "grid_y" << _grid_y;
// write matrices
writeFileNodeList(fs, "histograms", _histograms);
fs << "labels" << _labels;
} void LBPH::train(InputArrayOfArrays _in_src, InputArray _in_labels) {
this->train(_in_src, _in_labels, false);
} void LBPH::update(InputArrayOfArrays _in_src, InputArray _in_labels) {
// got no data, just return
if(_in_src.total() == )
return; this->train(_in_src, _in_labels, true);
} void LBPH::train(InputArrayOfArrays _in_src, InputArray _in_labels, bool preserveData) {
if(_in_src.kind() != _InputArray::STD_VECTOR_MAT && _in_src.kind() != _InputArray::STD_VECTOR_VECTOR) {
string error_message = "The images are expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< vector<...> >).";
CV_Error(CV_StsBadArg, error_message);
}
if(_in_src.total() == ) {
string error_message = format("Empty training data was given. You'll need more than one sample to learn a model.");
CV_Error(CV_StsUnsupportedFormat, error_message);
} else if(_in_labels.getMat().type() != CV_32SC1) {
string error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _in_labels.type());
CV_Error(CV_StsUnsupportedFormat, error_message);
}
// get the vector of matrices
vector<Mat> src;
_in_src.getMatVector(src);
// get the label matrix
Mat labels = _in_labels.getMat();
// check if data is well- aligned
if(labels.total() != src.size()) {
string error_message = format("The number of samples (src) must equal the number of labels (labels). Was len(samples)=%d, len(labels)=%d.", src.size(), _labels.total());
CV_Error(CV_StsBadArg, error_message);
}
// if this model should be trained without preserving old data, delete old model data
if(!preserveData) {
_labels.release();
_histograms.clear();
}
// append labels to _labels matrix
for(size_t labelIdx = ; labelIdx < labels.total(); labelIdx++) {
_labels.push_back(labels.at<int>((int)labelIdx));
}
// store the spatial histograms of the original data
for(size_t sampleIdx = ; sampleIdx < src.size(); sampleIdx++) {
// calculate lbp image
Mat lbp_image = elbp(src[sampleIdx], _radius, _neighbors);
// get spatial histogram from this lbp image
Mat p = spatial_histogram(
lbp_image, /* lbp_image */
static_cast<int>(std::pow(2.0, static_cast<double>(_neighbors))), /* number of possible patterns */
_grid_x, /* grid size x */
_grid_y, /* grid size y */
true);
// add to templates
_histograms.push_back(p);
}
} void LBPH::predict(InputArray _src, int &minClass, double &minDist) const {
if(_histograms.empty()) {
// throw error if no data (or simply return -1?)
string error_message = "This LBPH model is not computed yet. Did you call the train method?";
CV_Error(CV_StsBadArg, error_message);
}
Mat src = _src.getMat();
// get the spatial histogram from input image
Mat lbp_image = elbp(src, _radius, _neighbors);
Mat query = spatial_histogram(
lbp_image, /* lbp_image */
static_cast<int>(std::pow(2.0, static_cast<double>(_neighbors))), /* number of possible patterns */
_grid_x, /* grid size x */
_grid_y, /* grid size y */
true /* normed histograms */);
// find 1-nearest neighbor
minDist = DBL_MAX;
minClass = -;
for(size_t sampleIdx = ; sampleIdx < _histograms.size(); sampleIdx++) {
double dist = compareHist(_histograms[sampleIdx], query, CV_COMP_CHISQR);
if((dist < minDist) && (dist < _threshold)) {
minDist = dist;
minClass = _labels.at<int>((int) sampleIdx);
}
}
} int LBPH::predict(InputArray _src) const {
int label;
double dummy;
predict(_src, label, dummy);
return label;
} Ptr<FaceRecognizer> createEigenFaceRecognizer(int num_components, double threshold)
{
return new Eigenfaces(num_components, threshold);
} Ptr<FaceRecognizer> createFisherFaceRecognizer(int num_components, double threshold)
{
return new Fisherfaces(num_components, threshold);
} Ptr<FaceRecognizer> createLBPHFaceRecognizer(int radius, int neighbors,
int grid_x, int grid_y, double threshold)
{
return new LBPH(radius, neighbors, grid_x, grid_y, threshold);
} CV_INIT_ALGORITHM(Eigenfaces, "FaceRecognizer.Eigenfaces",
obj.info()->addParam(obj, "ncomponents", obj._num_components);
obj.info()->addParam(obj, "threshold", obj._threshold);
obj.info()->addParam(obj, "projections", obj._projections, true);
obj.info()->addParam(obj, "labels", obj._labels, true);
obj.info()->addParam(obj, "eigenvectors", obj._eigenvectors, true);
obj.info()->addParam(obj, "eigenvalues", obj._eigenvalues, true);
obj.info()->addParam(obj, "mean", obj._mean, true)); CV_INIT_ALGORITHM(Fisherfaces, "FaceRecognizer.Fisherfaces",
obj.info()->addParam(obj, "ncomponents", obj._num_components);
obj.info()->addParam(obj, "threshold", obj._threshold);
obj.info()->addParam(obj, "projections", obj._projections, true);
obj.info()->addParam(obj, "labels", obj._labels, true);
obj.info()->addParam(obj, "eigenvectors", obj._eigenvectors, true);
obj.info()->addParam(obj, "eigenvalues", obj._eigenvalues, true);
obj.info()->addParam(obj, "mean", obj._mean, true)); CV_INIT_ALGORITHM(LBPH, "FaceRecognizer.LBPH",
obj.info()->addParam(obj, "radius", obj._radius);
obj.info()->addParam(obj, "neighbors", obj._neighbors);
obj.info()->addParam(obj, "grid_x", obj._grid_x);
obj.info()->addParam(obj, "grid_y", obj._grid_y);
obj.info()->addParam(obj, "threshold", obj._threshold);
obj.info()->addParam(obj, "histograms", obj._histograms, true);
obj.info()->addParam(obj, "labels", obj._labels, true)); bool initModule_contrib()
{
Ptr<Algorithm> efaces = createEigenfaces(), ffaces = createFisherfaces(), lbph = createLBPH();
return efaces->info() != && ffaces->info() != && lbph->info() != ;
} }

http://read.pudn.com/downloads674/sourcecode/graph/opencv/2728222/facerec.cpp__.htm

opencv人脸识别代码的更多相关文章

  1. OpenCV人脸识别的原理 .

    OpenCV人脸识别的原理 . 在之前讲到的人脸测试后,提取出人脸来,并且保存下来,以供训练或识别是用,提取人脸的代码如下: void GetImageRect(IplImage* orgImage, ...

  2. opencv人脸识别提取手机相册内人物充当数据集,身份识别学习(草稿)

    未写完 采用C++,opencv+opencv contrib 4.1.0 对手机相册内人物opencv人脸识别,身份识别学习 最近事情多,介绍就先不介绍了 photocut.c #include & ...

  3. OpenCV人脸识别Eigen算法源码分析

    1 理论基础 学习Eigen人脸识别算法需要了解一下它用到的几个理论基础,现总结如下: 1.1 协方差矩阵 首先需要了解一下公式: 共公式可以看出:均值描述的是样本集合的平均值,而标准差描述的则是样本 ...

  4. OpenCV人脸识别LBPH算法源码分析

    1 背景及理论基础 人脸识别是指将一个需要识别的人脸和人脸库中的某个人脸对应起来(类似于指纹识别),目的是完成识别功能,该术语需要和人脸检测进行区分,人脸检测是在一张图片中把人脸定位出来,完成的是搜寻 ...

  5. opencv 人脸识别

      背景知识 OpenCV 是一个开源的计算机视觉和机器学习库.它包含成千上万优化过的算法,为各种计算机视觉应用提供了一个通用工具包.根据这个项目的关于页面,OpenCV 已被广泛运用在各种项目上,从 ...

  6. opencv颜色识别代码分享

    android 平台 opencv 实现颜色识别代码:http://www.eyesourcecode.com/thread-40682-1-1.htmlopencv的颜色识别简单实现的代码:http ...

  7. olivettifaces数据集实现人脸识别代码

    数据集: # -*- coding: utf-8 -*- """ Created on Wed Apr 24 18:21:21 2019 @author: 92958 & ...

  8. OpenCV 人脸识别 C++实例代码

    #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/imgproc.hpp> #include & ...

  9. opencv 人脸识别 (二)训练和识别

    上一篇中我们对训练数据做了一些预处理,检测出人脸并保存在\pic\color\x文件夹下(x=1,2,3,...类别号),本文做训练和识别.为了识别,首先将人脸训练数据 转为灰度.对齐.归一化,再放入 ...

随机推荐

  1. linux 删除日志

    https://jingyan.baidu.com/album/c1a3101e73129ade656deb9d.html?picindex=2 里面的 ls -s 可以看到目录 https://zh ...

  2. findbugs 安装及使用

    1.eclipse安装findbugs插件 Help -> install new softWare 输入 http://findbugs.cs.umd.edu/eclipse 2.使用find ...

  3. <YaRN><Official doc><RM REST API's>

    Overview ... YARN Architecture The fundamental idea of YARN is to split up the functionalities of re ...

  4. hbuilder注意事项

    App开发注意事项App开发注意事项 单个页面由多个webView组成注意事项. 有两种不同的加载方式. 1.在加载主webView时,初始化Init()时就加载子页面, 此时为异步加载! 2.在主w ...

  5. 使用python绘出常见函数

    '''''' ''' mpl.rcParams['font.sans-serif'] = ['SimHei'] mpl.rcParams['axes.unicode_minus'] = False用来 ...

  6. 数据库开启对sys用户的审计

    需求:客户想对数据库开启sys用户的审计功能,关闭其它用户的审计功能. 1)再一次巡检报告中,我们发现数据库版本11.2.0.4,开启审计功能,提供的建议如下,关闭审计alter system set ...

  7. jQuery对标签、类样式、值、文档、DOM对象的操作

    jquery的标签属性操作 使用attr()方法对html标签属性进行操作,attr如果参数是一个参数,表示获取html标签的属性值,如果是两个参数则是设置标签属性名以及对象的属性值 .prop()适 ...

  8. 新添加一块硬盘制作LVM卷并进行分区挂载

    linux服务器新添加一块硬盘,可以直接将盘格式化挂载就能用,比如挂载在/usr/local目录,但是这样有一个弊端,就是如果这一块磁盘满了,后续想要扩容的话,不能继续挂载这个/usr/local挂载 ...

  9. JS书写规范

    1.js代码是由语句组成的,每一条语句以分号结尾: 语句是有关键字,元素符,表达式组成的:2.js代码严格区分大小写3.所有的标点符号都是英文的4.//表示单行注释,/* */表示多行注释

  10. [LeetCode&Python] Problem 653. Two Sum IV - Input is a BST

    Given a Binary Search Tree and a target number, return true if there exist two elements in the BST s ...