python 人脸识别
"""Performs face alignment and calculates L2 distance between the embeddings of images.""" # MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE. from __future__ import absolute_import
from __future__ import division
from __future__ import print_function from scipy import misc
import tensorflow as tf
import numpy as np
import sys
import os
import argparse
import facenet
import align.detect_face def main():
model = "../models/20170216-091149"
image_files = ['compare_images/index10.png', 'compare_images/index73.png']
image_size = 160
margin = 44
gpu_memory_fraction = 0.5 images = load_and_align_data(image_files, image_size, margin, gpu_memory_fraction)
with tf.Graph().as_default(): with tf.Session() as sess: # Load the model
facenet.load_model(model) # Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0") # Run forward pass to calculate embeddings
feed_dict = { images_placeholder: images, phase_train_placeholder:False }
emb = sess.run(embeddings, feed_dict=feed_dict) nrof_images = len(image_files) print('Images:')
for i in range(nrof_images):
print('%1d: %s' % (i, image_files[i]))
print('') # Print distance matrix
print('Distance matrix')
print(' ', end='')
for i in range(nrof_images):
print(' %1d ' % i, end='')
print('')
for i in range(nrof_images):
print('%1d ' % i, end='')
for j in range(nrof_images):
dist = np.sqrt(np.sum(np.square(np.subtract(emb[i,:], emb[j,:]))))
print(' %1.4f ' % dist, end='')
print('') def load_and_align_data(image_paths, image_size, margin, gpu_memory_fraction): minsize = 20 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
factor = 0.709 # scale factor print('Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None) nrof_samples = len(image_paths)
img_list = [None] * nrof_samples
for i in range(nrof_samples):
img = misc.imread(os.path.expanduser(image_paths[i]))
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
det = np.squeeze(bounding_boxes[0,0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0]-margin/2, 0)
bb[1] = np.maximum(det[1]-margin/2, 0)
bb[2] = np.minimum(det[2]+margin/2, img_size[1])
bb[3] = np.minimum(det[3]+margin/2, img_size[0])
cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]
aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
prewhitened = facenet.prewhiten(aligned)
img_list[i] = prewhitened
images = np.stack(img_list)
return images # def parse_arguments(argv):
# parser = argparse.ArgumentParser()
#
# parser.add_argument('model', type=str, default="./models/20170216-091149",
# help='Could be either a directory containing the meta_file and ckpt_file or a model protobuf (.pb) file')
# parser.add_argument('image_files', type=str, default="src/compare_images/index10.png src/compare_images/index73.png "
# , nargs='+', help='Images to compare')
# parser.add_argument('--image_size', type=int,
# help='Image size (height, width) in pixels.', default=160)
# parser.add_argument('--margin', type=int,
# help='Margin for the crop around the bounding box (height, width) in pixels.', default=44)
# parser.add_argument('--gpu_memory_fraction', type=float,
# help='Upper bound on the amount of GPU memory that will be used by the process.', default=0.5)
# return parser.parse_args(argv) if __name__ == '__main__':
main()
"""Validate a face recognizer on the "Labeled Faces in the Wild" dataset (http://vis-www.cs.umass.edu/lfw/).
Embeddings are calculated using the pairs from http://vis-www.cs.umass.edu/lfw/pairs.txt and the ROC curve
is calculated and plotted. Both the model metagraph and the model parameters need to exist
in the same directory, and the metagraph should have the extension '.meta'.
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE. from __future__ import absolute_import
from __future__ import division
from __future__ import print_function import tensorflow as tf
import numpy as np
import argparse
import facenet
import lfw
import os
import sys
import math
from sklearn import metrics
from scipy.optimize import brentq
from scipy import interpolate
import numpy
from PIL import Image,ImageDraw
import cv2 from scipy import misc
import argparse
import align.detect_face def detetet_face_init():
cap = cv2.VideoCapture(0)
print(cap.isOpened())
classifier=cv2.CascadeClassifier("./xml/haarcascade_frontalface_alt.xml")
count=0
return cap,classifier,count def detect_face_clear():
cap.release()
cv2.destroyAllWindows() def load_and_align_data(image_paths, image_size, margin, gpu_memory_fraction): minsize = 20 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
factor = 0.709 # scale factor print('Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None) nrof_samples = len(image_paths)
img_list = [None] * nrof_samples
for i in range(nrof_samples):
img = misc.imread(os.path.expanduser(image_paths[i]))
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
det = np.squeeze(bounding_boxes[0,0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0]-margin/2, 0)
bb[1] = np.maximum(det[1]-margin/2, 0)
bb[2] = np.minimum(det[2]+margin/2, img_size[1])
bb[3] = np.minimum(det[3]+margin/2, img_size[0])
cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]
aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
prewhitened = facenet.prewhiten(aligned)
img_list[i] = prewhitened
images = np.stack(img_list)
return images def compare_facevec(facevec1, facevec2):
dist = np.sqrt(np.sum(np.square(np.subtract(facevec1, facevec2))))
#print(' %1.4f ' % dist, end='')
return dist def face_recognition_using_facenet():
cap,classifier,count = detetet_face_init()
model = "../models/20170216-091149"
image_files = ['compare_images/index10.png', 'compare_images/index73.png']
image_size = 160
margin = 44
gpu_memory_fraction = 0.5
with tf.Graph().as_default():
with tf.Session() as sess:
# Load the model
facenet.load_model(model) # Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0") #image_size = images_placeholder.get_shape()[1] # For some reason this doesn't work for frozen graphs
image_size = 160
embedding_size = embeddings.get_shape()[1]
index = 0
th = 0.7
face_recognition_tag = True
color = (0,255,0)
exist_face_vec = []
face_detect_vec = []
while count > -1:
ret,img = cap.read()
faceRects = classifier.detectMultiScale(img, 1.2, 2, cv2.CASCADE_SCALE_IMAGE,(20,20))
if len(faceRects)>0:
for faceRect in faceRects:
x, y, w, h = faceRect
#cv2.rectangle(img, (int(x), int(y)), (int(x)+int(w), int(y)+int(h)), (0,255,0), 2,0)
#print "save faceimg"
face_win = img[int(y):int(y) + int(h), int(x):int(x) + int(w)]
face_detect = cv2.resize(face_win,(image_size,image_size),interpolation=cv2.INTER_CUBIC)
#cv2.imwrite('faceimg/index' + str(index) + '.bmp', face_win)
# Run forward pass to calculate embeddings
#print('Runnning forward pass on face detect')
nrof_samples = 1
img_list = [None] * nrof_samples
prewhitened = facenet.prewhiten(face_detect)
img_list[0] = prewhitened
images = np.stack(img_list)
if index == 10:
feed_dict = {images_placeholder:images, phase_train_placeholder:False }
exist_face_vec = sess.run(embeddings, feed_dict=feed_dict)
elif index > 10 and index % 10 == 0:
feed_dict = {images_placeholder:images, phase_train_placeholder:False }
face_detect_vec = sess.run(embeddings, feed_dict=feed_dict)
cp = compare_facevec(face_detect_vec, exist_face_vec)
print("index ", index, " dist ", cp)
if cp < th:
print(True)
face_recognition_tag = True
else:
print(False)
face_recognition_tag = False
index +=1
# if face_recognition_tag == True:
# cv2.rectangle(img, (int(x), int(y)), (int(x)+int(w), int(y)+int(h)), (255,0,0), 2,0)
# else:
# cv2.rectangle(img, (int(x), int(y)), (int(x)+int(w), int(y)+int(h)), (0,255,0), 2,0)
cv2.rectangle(img, (int(x), int(y)), (int(x)+int(w), int(y)+int(h)), (0,255,0), 2,0) cv2.imshow('video',img)
key=cv2.waitKey(1)
if key==ord('q'):
break
detect_face_clear(cap) if __name__ == '__main__':
face_recognition_using_facenet()
python 人脸识别的更多相关文章
- Python人脸识别最佳教材典范,40行代码搭建人脸识别系统!
Face Id是一款高端的人脸解锁软件,官方称:"在一百万张脸中识别出你的脸."百度.谷歌.腾讯等各大企业都花费数亿来鞭策人工智能的崛起,而实际的人脸识别技术是否有那么神奇? 绿帽 ...
- python人脸识别
需要掌握知识python,opencv和机器学习一类的基础 过一段时间代码上传github,本人菜j一个,虽然是我自己谢的,也有好多不懂,或者我这就是错误方向 链接:https://pan.baidu ...
- 【python人脸识别】使用opencv识别图片中的人脸
概述: OpenCV是一个基于BSD许可(开源)发行的跨平台计算机视觉库 为什么有OpenCV? 计算机视觉市场巨大而且持续增长,且这方面没有标准API,如今的计算机视觉软件大概有以下三种: 1.研究 ...
- Python人脸识别 + 手机推送,老板来了你就会收到短信提示
- 总结几个简单好用的Python人脸识别算法
原文连接:https://mp.weixin.qq.com/s/3BgDld9hILPLCIlyysZs6Q 哈喽,大家好. 今天给大家总结几个简单.好用的人脸识别算法. 人脸识别是计算机视觉中比较常 ...
- OpenCV+python 人脸识别
首先给大家推荐一本书:机器学习算法原理与编程实践 本文内容全部转载于书中,相当于一个读书笔记了吧 绪论 1992年麻省理工学院通过实验对比了基于结构特征的方法与基于模版匹配的方法,发现模版匹配的方法要 ...
- 简单的 Python 人脸识别实例
案例一 导入图片 思路: 1.导入库 2.加载图片 3.创建窗口 4.显示图片 5.暂停窗口 6.关闭窗口 # 1.导入库 import cv2 # 2.加载图片 img = cv2.imread(' ...
- python人脸识别项目face-recognition
该项目基于Github上面的开源项目人脸识别face-recognition,主要是对图像和视频中的人脸进行识别,在开源项目给出的例子基础上对视频人脸识别的KNN算法进行了实现. 0x1 工程项目结构 ...
- python 人脸识别试水(一)
1.安装python,在这里我的版本是python 3.6 2.安装pycharm,我的版本是pycharm 2017 3.安装pip pip 版本10 4.安装 numpy :pip ins ...
随机推荐
- JavaScript indexof方法
1.indexof方法 indexof方法可以在字符串和数组上使用. 2.字符串使用 indexOf() 方法可返回某个指定的字符串值在字符串中首次出现的位置. <!DOCTYPE html&g ...
- [Swift A] - Using Swift with Cocoa and Objective-C--Mix and Match
Swift与Objective-C的兼容能力允许你在同一个工程中同时使用两种语言.你可以用这种叫做“mix and match”的特性来开发基于混合语言的应用.使用Swfit的最新特性--“mix a ...
- 前端project与性能优化(长文)
原文链接:http://fex.baidu.com/blog/2014/03/fis-optimize/ 每一个參与过开发企业级 web 应用的前端project师也许都曾思考过前端性能优化方面的问题 ...
- 初学者必知的Python中优雅的用法
转自:http://python.jobbole.com/81393/
- ERP,SCM,CRM,BRP,OMS,WMS 企业管理的6大核心系统
[导读]:人体共有八大系统:运动系统.神经系统.内分泌系统.循环系统.呼吸系统.消化系统.泌尿系统.生殖系统.这些系统协调配合,使人体内各种复杂的生命活动能够正常进行. 同理,企业要想健康的运转,长久 ...
- IntelliJ IDEA导航特性Top20
在前面的文章里,我介绍了IntelliJ IDEA(以下称IntelliJ)中与代码补全及重构相关的特性.今天我将介绍另外一个利器——IntelliJ的导航(Navigation),只要运用得当,它将 ...
- HDU 3667 Transportation(网络流之费用流)
题目地址:HDU 3667 这题的建图真是巧妙...为了保证流量正好达到k.须要让每一次增广到的流量都是1,这就须要把每一条边的流量都是1才行.可是每条边的流量并非1,该怎么办呢.这个时候能够拆边,反 ...
- Ant打包Android代码生成apk文件
可參考下面,实现一套代码不同渠道的打包 http://blog.csdn.net/liuhe688/article/details/6679879 http://cnn237111.blog.51ct ...
- Spring IOC和AOP 基础
1 spring中注入资源是通过描述来实现的,在 spring 中是通过注解或者 XML 描述.spring 中IOC 注入方式有三种 1)构造方法注入 2)setter 注入 3)接口注入 1.1) ...
- Java类载入器(一)——类载入器层次与模型
类载入器 虚拟机设计团队把类载入阶段中的"通过一个类的全限定名来获取描写叙述此类的二进制字节流"这个动作放到Java虚拟机外部去实现.以便让应用程序自己决定怎样去获取所须要的类 ...