人脸检测的API例子
package cliu.TutorialOnFaceDetect;
/*
* MyImageView.java
* Download by http://www.codefans.net
* [AUTHOR]: Chunyen Liu
* [SDK ]: Android SDK 2.1 and up
* [NOTE ]: developer.com tutorial, "Face Detection with Android APIs"
*/
import android.content.Context;
import android.graphics.Bitmap;
import android.graphics.Canvas;
import android.graphics.Paint;
import android.util.AttributeSet;
import android.widget.ImageView;
class MyImageView extends ImageView {
private Bitmap mBitmap;
private Canvas mCanvas;
private int mBitmapWidth = 200;
private int mBitmapHeight = 200;
private Paint mPaint = new Paint(Paint.ANTI_ALIAS_FLAG);
private int mDisplayStyle = 0;
private int [] mPX = null;
private int [] mPY = null;
public MyImageView(Context c) {
super(c);
init();
}
public MyImageView(Context c, AttributeSet attrs) {
super(c, attrs);
init();
}
private void init() {
mBitmap = Bitmap.createBitmap(mBitmapWidth, mBitmapHeight, Bitmap.Config.RGB_565);
mCanvas = new Canvas(mBitmap);
mPaint.setStyle(Paint.Style.STROKE);
mPaint.setStrokeCap(Paint.Cap.ROUND);
mPaint.setColor(0x80ff0000);
mPaint.setStrokeWidth(3);
}
public Bitmap getBitmap() {
return mBitmap;
}
@Override
public void setImageBitmap(Bitmap bm) {
if (bm != null) {
mBitmapWidth = bm.getWidth();
mBitmapHeight = bm.getHeight();
mBitmap = Bitmap.createBitmap(mBitmapWidth, mBitmapHeight, Bitmap.Config.RGB_565);
mCanvas = new Canvas();
mCanvas.setBitmap(mBitmap);
mCanvas.drawBitmap(bm, 0, 0, null);
}
super.setImageBitmap(bm);
}
@Override
protected void onSizeChanged(int w, int h, int oldw, int oldh) {
super.onSizeChanged(w, h, oldw, oldh);
mBitmapWidth = (mBitmap != null) ? mBitmap.getWidth() : 0;
mBitmapHeight = (mBitmap != null) ? mBitmap.getHeight() : 0;
if (mBitmapWidth == w && mBitmapHeight == h) {
return;
}
if (mBitmapWidth < w) mBitmapWidth = w;
if (mBitmapHeight < h) mBitmapHeight = h;
}
// set up detected face features for display
public void setDisplayPoints(int [] xx, int [] yy, int total, int style) {
mDisplayStyle = style;
mPX = null;
mPY = null;
if (xx != null && yy != null && total > 0) {
mPX = new int[total];
mPY = new int[total];
for (int i = 0; i < total; i++) {
mPX[i] = xx[i];
mPY[i] = yy[i];
}
}
}
@Override
protected void onDraw(Canvas canvas) {
super.onDraw(canvas);
if (mBitmap != null) {
canvas.drawBitmap(mBitmap, 0, 0, null);
if (mPX != null && mPY != null) {
for (int i = 0; i < mPX.length; i++) {
if (mDisplayStyle == 1) {
canvas.drawCircle(mPX[i], mPY[i], 10.0f, mPaint);
} else {
canvas.drawRect(mPX[i] - 20, mPY[i] - 20, mPX[i] + 20, mPY[i] + 20, mPaint);
}
}
}
}
}
}
----------------------------------------------
package cliu.TutorialOnFaceDetect;
/*
* TutorialOnFaceDetect
* Download by http://www.codefans.net
* [AUTHOR]: Chunyen Liu
* [SDK ]: Android SDK 2.1 and up
* [NOTE ]: developer.com tutorial, "Face Detection with Android APIs"
*/
import android.app.Activity;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.graphics.PointF;
import android.media.FaceDetector;
import android.os.Bundle;
import android.os.Handler;
import android.os.Message;
import android.util.Log;
import android.widget.LinearLayout.LayoutParams;
public class TutorialOnFaceDetect extends Activity {
private MyImageView mIV;
private Bitmap mFaceBitmap;
private int mFaceWidth = 200;
private int mFaceHeight = 200;
private static final int MAX_FACES = 10;
private static String TAG = "TutorialOnFaceDetect";
private static boolean DEBUG = false;
protected static final int GUIUPDATE_SETFACE = 999;
protected Handler mHandler = new Handler(){
// @Override
public void handleMessage(Message msg) {
mIV.invalidate();
super.handleMessage(msg);
}
};
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
mIV = new MyImageView(this);
setContentView(mIV, new LayoutParams(LayoutParams.WRAP_CONTENT, LayoutParams.WRAP_CONTENT));
// load the photo
Bitmap b = BitmapFactory.decodeResource(getResources(), R.drawable.face3);
mFaceBitmap = b.copy(Bitmap.Config.RGB_565, true);
b.recycle();
mFaceWidth = mFaceBitmap.getWidth();
mFaceHeight = mFaceBitmap.getHeight();
mIV.setImageBitmap(mFaceBitmap);
mIV.invalidate();
// perform face detection in setFace() in a background thread
doLengthyCalc();
}
public void setFace() {
FaceDetector fd;
FaceDetector.Face [] faces = new FaceDetector.Face[MAX_FACES];
PointF eyescenter = new PointF();
float eyesdist = 0.0f;
int [] fpx = null;
int [] fpy = null;
int count = 0;
try {
fd = new FaceDetector(mFaceWidth, mFaceHeight, MAX_FACES);
count = fd.findFaces(mFaceBitmap, faces);
} catch (Exception e) {
Log.e(TAG, "setFace(): " + e.toString());
return;
}
// check if we detect any faces
if (count > 0) {
fpx = new int[count * 2];
fpy = new int[count * 2];
for (int i = 0; i < count; i++) {
try {
faces[i].getMidPoint(eyescenter);
eyesdist = faces[i].eyesDistance();
// set up left eye location
fpx[2 * i] = (int)(eyescenter.x - eyesdist / 2);
fpy[2 * i] = (int)eyescenter.y;
// set up right eye location
fpx[2 * i + 1] = (int)(eyescenter.x + eyesdist / 2);
fpy[2 * i + 1] = (int)eyescenter.y;
if (DEBUG)
Log.e(TAG, "setFace(): face " + i + ": confidence = " + faces[i].confidence()
+ ", eyes distance = " + faces[i].eyesDistance()
+ ", pose = ("+ faces[i].pose(FaceDetector.Face.EULER_X) + ","
+ faces[i].pose(FaceDetector.Face.EULER_Y) + ","
+ faces[i].pose(FaceDetector.Face.EULER_Z) + ")"
+ ", eyes midpoint = (" + eyescenter.x + "," + eyescenter.y +")");
} catch (Exception e) {
Log.e(TAG, "setFace(): face " + i + ": " + e.toString());
}
}
}
mIV.setDisplayPoints(fpx, fpy, count * 2, 1);
}
private void doLengthyCalc() {
Thread t = new Thread() {
Message m = new Message();
public void run() {
try {
setFace();
m.what = TutorialOnFaceDetect.GUIUPDATE_SETFACE;
TutorialOnFaceDetect.this.mHandler.sendMessage(m);
} catch (Exception e) {
Log.e(TAG, "doLengthyCalc(): " + e.toString());
}
}
};
t.start();
}
}
-------------------------------------------------------
package cliu.TutorialOnFaceDetect;
/*
* TutorialOnFaceDetect1
* Download by http://www.codefans.net
* [AUTHOR]: Chunyen Liu
* [SDK ]: Android SDK 2.1 and up
* [NOTE ]: developer.com tutorial, "Face Detection with Android APIs"
*/
import android.app.Activity;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.graphics.PointF;
import android.media.FaceDetector;
import android.os.Bundle;
import android.util.Log;
import android.widget.LinearLayout.LayoutParams;
public class TutorialOnFaceDetect1 extends Activity {
private MyImageView mIV;
private Bitmap mFaceBitmap;
private int mFaceWidth = 200;
private int mFaceHeight = 200;
private static final int MAX_FACES = 10;
private static String TAG = "TutorialOnFaceDetect";
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
mIV = new MyImageView(this);
setContentView(mIV, new LayoutParams(LayoutParams.WRAP_CONTENT, LayoutParams.WRAP_CONTENT));
// load the photo
Bitmap b = BitmapFactory.decodeResource(getResources(), R.drawable.face3);
mFaceBitmap = b.copy(Bitmap.Config.RGB_565, true);
b.recycle();
mFaceWidth = mFaceBitmap.getWidth();
mFaceHeight = mFaceBitmap.getHeight();
mIV.setImageBitmap(mFaceBitmap);
// perform face detection and set the feature points
setFace();
mIV.invalidate();
}
public void setFace() {
FaceDetector fd;
FaceDetector.Face [] faces = new FaceDetector.Face[MAX_FACES];
PointF midpoint = new PointF();
int [] fpx = null;
int [] fpy = null;
int count = 0;
try {
fd = new FaceDetector(mFaceWidth, mFaceHeight, MAX_FACES);
count = fd.findFaces(mFaceBitmap, faces);
} catch (Exception e) {
Log.e(TAG, "setFace(): " + e.toString());
return;
}
// check if we detect any faces
if (count > 0) {
fpx = new int[count];
fpy = new int[count];
for (int i = 0; i < count; i++) {
try {
faces[i].getMidPoint(midpoint);
fpx[i] = (int)midpoint.x;
fpy[i] = (int)midpoint.y;
} catch (Exception e) {
Log.e(TAG, "setFace(): face " + i + ": " + e.toString());
}
}
}
mIV.setDisplayPoints(fpx, fpy, count, 0);
}
}
人脸检测的API例子的更多相关文章
- caffe_实战之两个简单的例子(物体分类和人脸检测)
一.物体分类: 这里使用的是caffe官网中自带的例子,我这里主要是对代码的解释~ 首先导入一些必要的库: import caffe import numpy as np import matplot ...
- paper 88:人脸检测和识别的Web服务API
本文汇总了全球范围内提供基于Web服务的人脸检测和识别的API,便于网络中快速部署和人脸相关的一些应用. 1:从How-old的火爆说起 最开始,网站的开发者只是给一个几百人的群发送email,请他们 ...
- [转]40多个关于人脸检测/识别的API、库和软件
[转]40多个关于人脸检测/识别的API.库和软件 http://news.cnblogs.com/n/185616/ 英文原文:List of 40+ Face Detection / Recogn ...
- 40多个关于人脸检测/识别的API、库和软件
英文原文:List of 40+ Face Detection / Recognition APIs, libraries, and software 译者:@吕抒真 译文:链接 自从谷歌眼镜被推出以 ...
- 转:40多个关于人脸检测/识别的API、库和软件
文章来自于:http://blog.jobbole.com/45936/ 自从谷歌眼镜被推出以来,围绕人脸识别,出现了很多争议.我们相信,不管是不是通过智能眼镜,人脸识别将在人与人交往甚至人与物交互中 ...
- javacv 340使用 人脸检测例子【转载】
Java下使用opencv进行人脸检测 工作需要,研究下人脸识别,发现opencv比较常用,尽管能检测人脸,但识别率不高,多数是用来获取摄像头的视频流的,提取里面的视频帧,实现人脸识别时通常会和其他框 ...
- 基于TensorFlow Object Detection API进行迁移学习训练自己的人脸检测模型(二)
前言 已完成数据预处理工作,具体参照: 基于TensorFlow Object Detection API进行迁移学习训练自己的人脸检测模型(一) 设置配置文件 新建目录face_faster_rcn ...
- 虹软人脸检测和识别C# - API
using System; using System.Collections.Generic; using System.Drawing; using System.Drawing.Drawing2D ...
- OpenCV + Python 人脸检测
必备知识 Haar-like opencv api 读取图片 灰度转换 画图 显示图像 获取人脸识别训练数据 探测人脸 处理人脸探测的结果 实例 图片素材 人脸检测代码 人脸检测结果 总结 下午的时候 ...
随机推荐
- makefile 学习(一)
一.Makefile的基本规则 GNU make 规则: target ... : prerequisites ... command .... .... target - 目 ...
- memcached学习(4). memcached的分布式算法
memcached的分布式 正如第1次中介绍的那样, memcached虽然称为"分布式"缓存服务器,但服务器端并没有"分布式"功能. 服务器端仅包括 第2次. ...
- windows对象分类
用户对象 GDI对象 内核对象 如何判断一个对象是不是内核对象 看创建函数,几乎所有的内核对象创建函数都会有PSECURITY_ATTRIBUTES参数
- procfs
https://www.kernel.org/doc/Documentation/filesystems/proc.txt /proc/stat cpu 493610 1050 955506 6140 ...
- .Net性能优化时应该关注的数据
解决性能问题的时候,我往往会让客户添加下面一些计数器进行性能收集. Process object下的所有计数器: Processor object下的所有计数器: System object下的所有计 ...
- meta 标签属性(网站兼容与优化需要)
概要 标签提供关于HTML文档的元数据.元数据不会显示在页面上,但是对于机器是可读的.它可用于浏览器(如何显示内容或重新加载页面),搜索引擎(关键词),或其他 web 服务. —— W3School ...
- Tomcat 7优化前及优化后的性能对比
Tomcat 7在我们日常开发.测试.生产环境都会使用到,但对于大部分开发人员来说,对其性能还是没有多大了解.本文就对它做一次性能测试,对比优化前后的性能区别. 一.运行环境 CPU: Intel(R ...
- my vimrc
runtime! debian.vim "设置编码 ,ucs-bom,shift-jis,gb18030,gbk,gb2312,cp936 ,ucs-bom,chinese "语言 ...
- Linux下运行top命令显示的PR\NI\RES\SHR\S\%MEM TIME+都代表什么
PID 进程号 USER 用户名 PR 优先级 NI nice值.负值表示高优先级,正值表示低优先级 RES 进程使用的.未被换出的物理内存大小,单位Kb S 进程状态: D 不可中断的睡眠状态 R ...
- Centos6.5(final)安装gcc和g++,python以及导致问题的解决方法
安装gcc:yum install gcc 安装g++:yum install gcc-c++ 安装python: centos默认是2.6的版本, 下载python ,我下载的是2.7.10. 1 ...