openpose pytorch代码分析
github: https://github.com/tensorboy/pytorch_Realtime_Multi-Person_Pose_Estimation
# -*- coding: utf-8 -*
import os
import re
import sys
import cv2
import math
import time
import scipy
import argparse
import matplotlib
import numpy as np
import pylab as plt
from joblib import Parallel, delayed
import util
import torch
import torch as T
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from collections import OrderedDict
from config_reader import config_reader
from scipy.ndimage.filters import gaussian_filter
#parser = argparse.ArgumentParser()
#parser.add_argument('--t7_file', required=True)
#parser.add_argument('--pth_file', required=True)
#args = parser.parse_args() torch.set_num_threads(torch.get_num_threads())
weight_name = './model/pose_model.pth' blocks = {}
# 从1开始算的limb,图对应:Pose Output Format
# find connection in the specified sequence, center 29 is in the position 15
limbSeq = [[2,3], [2,6], [3,4], [4,5], [6,7], [7,8], [2,9], [9,10], \
[10,11], [2,12], [12,13], [13,14], [2,1], [1,15], [15,17], \
[1,16], [16,18], [3,17], [6,18]] # the middle joints heatmap correpondence
mapIdx = [[31,32], [39,40], [33,34], [35,36], [41,42], [43,44], [19,20], [21,22], \
[23,24], [25,26], [27,28], [29,30], [47,48], [49,50], [53,54], [51,52], \
[55,56], [37,38], [45,46]] # visualize
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]] # heatmap channel为19 表示关节点的score
# PAF channel为38 表示limb的单位向量
block0 = [{'conv1_1':[3,64,3,1,1]},{'conv1_2':[64,64,3,1,1]},{'pool1_stage1':[2,2,0]},{'conv2_1':[64,128,3,1,1]},{'conv2_2':[128,128,3,1,1]},{'pool2_stage1':[2,2,0]},{'conv3_1':[128,256,3,1,1]},{'conv3_2':[256,256,3,1,1]},{'conv3_3':[256,256,3,1,1]},{'conv3_4':[256,256,3,1,1]},{'pool3_stage1':[2,2,0]},{'conv4_1':[256,512,3,1,1]},{'conv4_2':[512,512,3,1,1]},{'conv4_3_CPM':[512,256,3,1,1]},{'conv4_4_CPM':[256,128,3,1,1]}] blocks['block1_1'] = [{'conv5_1_CPM_L1':[128,128,3,1,1]},{'conv5_2_CPM_L1':[128,128,3,1,1]},{'conv5_3_CPM_L1':[128,128,3,1,1]},{'conv5_4_CPM_L1':[128,512,1,1,0]},{'conv5_5_CPM_L1':[512,38,1,1,0]}] blocks['block1_2'] = [{'conv5_1_CPM_L2':[128,128,3,1,1]},{'conv5_2_CPM_L2':[128,128,3,1,1]},{'conv5_3_CPM_L2':[128,128,3,1,1]},{'conv5_4_CPM_L2':[128,512,1,1,0]},{'conv5_5_CPM_L2':[512,19,1,1,0]}] for i in range(2,7):
blocks['block%d_1'%i] = [{'Mconv1_stage%d_L1'%i:[185,128,7,1,3]},{'Mconv2_stage%d_L1'%i:[128,128,7,1,3]},{'Mconv3_stage%d_L1'%i:[128,128,7,1,3]},{'Mconv4_stage%d_L1'%i:[128,128,7,1,3]},
{'Mconv5_stage%d_L1'%i:[128,128,7,1,3]},{'Mconv6_stage%d_L1'%i:[128,128,1,1,0]},{'Mconv7_stage%d_L1'%i:[128,38,1,1,0]}]
blocks['block%d_2'%i] = [{'Mconv1_stage%d_L2'%i:[185,128,7,1,3]},{'Mconv2_stage%d_L2'%i:[128,128,7,1,3]},{'Mconv3_stage%d_L2'%i:[128,128,7,1,3]},{'Mconv4_stage%d_L2'%i:[128,128,7,1,3]},
{'Mconv5_stage%d_L2'%i:[128,128,7,1,3]},{'Mconv6_stage%d_L2'%i:[128,128,1,1,0]},{'Mconv7_stage%d_L2'%i:[128,19,1,1,0]}] def make_layers(cfg_dict):
layers = []
for i in range(len(cfg_dict)-1):
one_ = cfg_dict[i]
for k,v in one_.iteritems():
if 'pool' in k:
layers += [nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2] )]
else:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride = v[3], padding=v[4])
layers += [conv2d, nn.ReLU(inplace=True)]
one_ = cfg_dict[-1].keys()
k = one_[0]
v = cfg_dict[-1][k]
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride = v[3], padding=v[4])
layers += [conv2d]
return nn.Sequential(*layers) layers = []
for i in range(len(block0)):
one_ = block0[i]
for k,v in one_.iteritems():
if 'pool' in k:
layers += [nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2] )]
else:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride = v[3], padding=v[4])
layers += [conv2d, nn.ReLU(inplace=True)] models = {}
models['block0']=nn.Sequential(*layers) for k,v in blocks.iteritems():
models[k] = make_layers(v) class pose_model(nn.Module):
def __init__(self,model_dict,transform_input=False):
super(pose_model, self).__init__()
self.model0 = model_dict['block0']
self.model1_1 = model_dict['block1_1']
self.model2_1 = model_dict['block2_1']
self.model3_1 = model_dict['block3_1']
self.model4_1 = model_dict['block4_1']
self.model5_1 = model_dict['block5_1']
self.model6_1 = model_dict['block6_1'] self.model1_2 = model_dict['block1_2']
self.model2_2 = model_dict['block2_2']
self.model3_2 = model_dict['block3_2']
self.model4_2 = model_dict['block4_2']
self.model5_2 = model_dict['block5_2']
self.model6_2 = model_dict['block6_2'] def forward(self, x):
out1 = self.model0(x) out1_1 = self.model1_1(out1)
out1_2 = self.model1_2(out1)
out2 = torch.cat([out1_1,out1_2,out1],1) out2_1 = self.model2_1(out2)
out2_2 = self.model2_2(out2)
out3 = torch.cat([out2_1,out2_2,out1],1) out3_1 = self.model3_1(out3)
out3_2 = self.model3_2(out3)
out4 = torch.cat([out3_1,out3_2,out1],1) out4_1 = self.model4_1(out4)
out4_2 = self.model4_2(out4)
out5 = torch.cat([out4_1,out4_2,out1],1) out5_1 = self.model5_1(out5)
out5_2 = self.model5_2(out5)
out6 = torch.cat([out5_1,out5_2,out1],1) out6_1 = self.model6_1(out6)
out6_2 = self.model6_2(out6) return out6_1,out6_2 model = pose_model(models)
model.load_state_dict(torch.load(weight_name))
model.cuda()
model.float()
model.eval() param_, model_ = config_reader() #torch.nn.functional.pad(img pad, mode='constant', value=model_['padValue'])
tic = time.time()
test_image = './sample_image/ski.jpg'
#test_image = 'a.jpg'
oriImg = cv2.imread(test_image) # B,G,R order
imageToTest = Variable(T.transpose(T.transpose(T.unsqueeze(torch.from_numpy(oriImg).float(),0),2,3),1,2),volatile=True).cuda() multiplier = [x * model_['boxsize'] / oriImg.shape[0] for x in param_['scale_search']] # 不同scale输入 heatmap_avg = torch.zeros((len(multiplier),19,oriImg.shape[0], oriImg.shape[1])).cuda()
paf_avg = torch.zeros((len(multiplier),38,oriImg.shape[0], oriImg.shape[1])).cuda()
#print heatmap_avg.size() toc =time.time()
print 'time is %.5f'%(toc-tic)
tic = time.time()
for m in range(len(multiplier)):
scale = multiplier[m]
h = int(oriImg.shape[0]*scale)
w = int(oriImg.shape[1]*scale)
pad_h = 0 if (h%model_['stride']==0) else model_['stride'] - (h % model_['stride'])
pad_w = 0 if (w%model_['stride']==0) else model_['stride'] - (w % model_['stride'])
new_h = h+pad_h
new_w = w+pad_w imageToTest = cv2.resize(oriImg, (0,0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
imageToTest_padded, pad = util.padRightDownCorner(imageToTest, model_['stride'], model_['padValue'])
imageToTest_padded = np.transpose(np.float32(imageToTest_padded[:,:,:,np.newaxis]), (3,2,0,1))/256 - 0.5
# (-0.5~0.5)
feed = Variable(T.from_numpy(imageToTest_padded)).cuda()
output1,output2 = model(feed)
print output1.size()
print output2.size()
heatmap = nn.UpsamplingBilinear2d((oriImg.shape[0], oriImg.shape[1])).cuda()(output2) # 对output上采样至原图大小 paf = nn.UpsamplingBilinear2d((oriImg.shape[0], oriImg.shape[1])).cuda()(output1) # 同理 heatmap_avg[m] = heatmap[0].data
paf_avg[m] = paf[0].data toc =time.time()
print 'time is %.5f'%(toc-tic)
tic = time.time()
# 不同scale的heatmap和PAF取均值
heatmap_avg = T.transpose(T.transpose(T.squeeze(T.mean(heatmap_avg, 0)),0,1),1,2).cuda()
paf_avg = T.transpose(T.transpose(T.squeeze(T.mean(paf_avg, 0)),0,1),1,2).cuda()
heatmap_avg=heatmap_avg.cpu().numpy()
paf_avg = paf_avg.cpu().numpy()
toc =time.time()
print 'time is %.5f'%(toc-tic)
tic = time.time() all_peaks = []
peak_counter = 0 #maps =
# picture array is reversed
for part in range(18): # 18个关节点的featuremap
map_ori = heatmap_avg[:,:,part]
map = gaussian_filter(map_ori, sigma=3) map_left = np.zeros(map.shape)
map_left[1:,:] = map[:-1,:]
map_right = np.zeros(map.shape)
map_right[:-1,:] = map[1:,:]
map_up = np.zeros(map.shape)
map_up[:,1:] = map[:,:-1]
map_down = np.zeros(map.shape)
map_down[:,:-1] = map[:,1:] # 计算是否为局部极值
peaks_binary = np.logical_and.reduce((map>=map_left, map>=map_right, map>=map_up, map>=map_down, map > param_['thre1']))
# peaks_binary = T.eq(
# peaks = zip(T.nonzero(peaks_binary)[0],T.nonzero(peaks_binary)[0]) peaks = zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0]) # note reverse peaks_with_score = [x + (map_ori[x[1],x[0]],) for x in peaks]
id = range(peak_counter, peak_counter + len(peaks))
peaks_with_score_and_id = [peaks_with_score[i] + (id[i],) for i in range(len(id))] all_peaks.append(peaks_with_score_and_id) # 一个关节点featuremap上不同人的peak [[y, x, peak_score, id)],...]
peak_counter += len(peaks) # 计算线性积分 采样10个点计算
connection_all = []
special_k = []
mid_num = 10 for k in range(len(mapIdx)):
score_mid = paf_avg[:,:,[x-19 for x in mapIdx[k]]] # channel为2的paf_avg,表示PAF向量
candA = all_peaks[limbSeq[k][0]-1] #第k个limb中左关节点的候选集合A(不同人的关节点)
candB = all_peaks[limbSeq[k][1]-1] #第k个limb中右关节点的候选集合B(不同人的关节点)
nA = len(candA)
nB = len(candB)
# indexA, indexB = limbSeq[k]
if(nA != 0 and nB != 0): # 有候选时开始连接
connection_candidate = []
for i in range(nA):
for j in range(nB):
vec = np.subtract(candB[j][:2], candA[i][:2])
norm = math.sqrt(vec[0]*vec[0] + vec[1]*vec[1])
vec = np.divide(vec, norm) # 计算单位向量 startend = zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \
np.linspace(candA[i][1], candB[j][1], num=mid_num)) # 在A[i],B[j]连接线上采样mid_num个点 # 计算采样点的PAF向量
vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \
for I in range(len(startend))])
vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \
for I in range(len(startend))]) # 采样点的PAF向量与limb的单位向量计算余弦相似度score,内积
score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])
score_with_dist_prior = sum(score_midpts)/len(score_midpts) + min(0.5*oriImg.shape[0]/norm-1, 0)
criterion1 = len(np.nonzero(score_midpts > param_['thre2'])[0]) > 0.8 * len(score_midpts)
criterion2 = score_with_dist_prior > 0
if criterion1 and criterion2:
# (i,j,score,score_all)
connection_candidate.append([i, j, score_with_dist_prior, score_with_dist_prior+candA[i][2]+candB[j][2]]) connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True) # 按score排序
connection = np.zeros((0,5))
for c in range(len(connection_candidate)):
i,j,s = connection_candidate[c][0:3]
if(i not in connection[:,3] and j not in connection[:,4]):
connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]]) # A_id, B_id, score, i, j
if(len(connection) >= min(nA, nB)):
break connection_all.append(connection) # 多个符合当前limb的组合 [[A_id, B_id, score, i, j],...]
else:
special_k.append(k)
connection_all.append([]) '''
function: 关节点连成每个人的limb
subset: last number in each row is the total parts number of that person
subset: the second last number in each row is the score of the overall configuration
candidate: 候选关节点
connection_all: 候选limb '''
subset = -1 * np.ones((0, 20))
candidate = np.array([item for sublist in all_peaks for item in sublist]) # 一个id的(y,x,score,id)(关节点) for k in range(len(mapIdx)):
if k not in special_k:
partAs = connection_all[k][:,0] # 第k个limb,左端点的候选id集合
partBs = connection_all[k][:,1] # 第k个limb,右端点的候选id集合
indexA, indexB = np.array(limbSeq[k]) - 1 # 关节点index for i in range(len(connection_all[k])): #= 1:size(temp,1)
found = 0
subset_idx = [-1, -1]
for j in range(len(subset)): #1:size(subset,1): 遍历subset里每个人,看当前两个关节点出现过几次
if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]:
subset_idx[found] = j
found += 1 if found == 1: # 在这个人的subset里连上这个limb
j = subset_idx[0]
if(subset[j][indexB] != partBs[i]):
subset[j][indexB] = partBs[i]
subset[j][-1] += 1
subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
elif(subset[j][indexA] != partAs[i]):
subset[j][indexA] = partAs[i]
subset[j][-1] += 1
subset[j][-2] += candidate[partAs[i].astype(int), 2] + connection_all[k][i][2] elif found == 2: # if found 2 and disjoint, merge them
j1, j2 = subset_idx
print "found = 2"
membership = ((subset[j1]>=0).astype(int) + (subset[j2]>=0).astype(int))[:-2]
if len(np.nonzero(membership == 2)[0]) == 0:
# 如果两个人的相同关节点没有在各自的subset中都连成limb,那么合并两个subset构成一个人
subset[j1][:-2] += (subset[j2][:-2] + 1)
subset[j1][-2:] += subset[j2][-2:]
subset[j1][-2] += connection_all[k][i][2]
subset = np.delete(subset, j2, 0)
# To-Do 这里有问题, 怎么合并才对?
# else: # as like found == 1
# subset[j1][indexB] = partBs[i]
# subset[j1][-1] += 1
# subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] # if find no partA in the subset, create a new subset
elif not found and k < 17:
row = -1 * np.ones(20)
row[indexA] = partAs[i]
row[indexB] = partBs[i]
row[-1] = 2
row[-2] = sum(candidate[connection_all[k][i,:2].astype(int), 2]) + connection_all[k][i][2]
subset = np.vstack([subset, row]) # delete some rows of subset which has few parts occur
deleteIdx = [];
for i in range(len(subset)):
if subset[i][-1] < 4 or subset[i][-2]/subset[i][-1] < 0.4:
deleteIdx.append(i)
subset = np.delete(subset, deleteIdx, axis=0) canvas = cv2.imread(test_image) # B,G,R order
for i in range(18):
for j in range(len(all_peaks[i])):
cv2.circle(canvas, all_peaks[i][j][0:2], 4, colors[i], thickness=-1) stickwidth = 4 for i in range(17):
for n in range(len(subset)):
index = subset[n][np.array(limbSeq[i])-1] # limb的两个关节点index
if -1 in index:
continue
cur_canvas = canvas.copy()
Y = candidate[index.astype(int), 0] # 两个index点的纵坐标
X = candidate[index.astype(int), 1] # 两个index点的横坐标
mX = np.mean(X)
mY = np.mean(Y)
length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
polygon = cv2.ellipse2Poly((int(mY),int(mX)), (int(length/2), stickwidth), int(angle), 0, 360, 1)
cv2.fillConvexPoly(cur_canvas, polygon, colors[i])
canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0) #Parallel(n_jobs=1)(delayed(handle_one)(i) for i in range(18)) toc =time.time()
print 'time is %.5f'%(toc-tic)
cv2.imwrite('result.png',canvas)
openpose pytorch代码分析的更多相关文章
- (原)SphereFace及其pytorch代码
转载请注明出处: http://www.cnblogs.com/darkknightzh/p/8524937.html 论文: SphereFace: Deep Hypersphere Embeddi ...
- 目标检测之Faster-RCNN的pytorch代码详解(数据预处理篇)
首先贴上代码原作者的github:https://github.com/chenyuntc/simple-faster-rcnn-pytorch(非代码作者,博文只解释代码) 今天看完了simple- ...
- 残差网络resnet理解与pytorch代码实现
写在前面 深度残差网络(Deep residual network, ResNet)自提出起,一次次刷新CNN模型在ImageNet中的成绩,解决了CNN模型难训练的问题.何凯明大神的工作令人佩服 ...
- Android代码分析工具lint学习
1 lint简介 1.1 概述 lint是随Android SDK自带的一个静态代码分析工具.它用来对Android工程的源文件进行检查,找出在正确性.安全.性能.可使用性.可访问性及国际化等方面可能 ...
- pmd静态代码分析
在正式进入测试之前,进行一定的静态代码分析及code review对代码质量及系统提高是有帮助的,以上为数据证明 Pmd 它是一个基于静态规则集的Java源码分析器,它可以识别出潜在的如下问题:– 可 ...
- [Asp.net 5] DependencyInjection项目代码分析-目录
微软DI文章系列如下所示: [Asp.net 5] DependencyInjection项目代码分析 [Asp.net 5] DependencyInjection项目代码分析2-Autofac [ ...
- [Asp.net 5] DependencyInjection项目代码分析4-微软的实现(5)(IEnumerable<>补充)
Asp.net 5的依赖注入注入系列可以参考链接: [Asp.net 5] DependencyInjection项目代码分析-目录 我们在之前讲微软的实现时,对于OpenIEnumerableSer ...
- 完整全面的Java资源库(包括构建、操作、代码分析、编译器、数据库、社区等等)
构建 这里搜集了用来构建应用程序的工具. Apache Maven:Maven使用声明进行构建并进行依赖管理,偏向于使用约定而不是配置进行构建.Maven优于Apache Ant.后者采用了一种过程化 ...
- STM32启动代码分析 IAR 比较好
stm32启动代码分析 (2012-06-12 09:43:31) 转载▼ 最近开始使用ST的stm32w108芯片(也是一款zigbee芯片).开始看他的启动代码看的晕晕呼呼呼的. 还好在c ...
随机推荐
- sudo su 和 sudo -s【转】
sudo su 和 sudo -s 都是切换到root用户,不同的是 sudo su 环境用的是目标用户 (root)的环境 sudo -s 环境用的是当前用户本身的环境 转自 sudo su 和 s ...
- 鼠标事件event和坐标
鼠标事件(e=e||window.event) event.clientX.event.clientY 鼠标相对于浏览器窗口可视区域的X,Y坐标(窗口坐标),可视区域不包括工具栏和滚动条.IE事件和标 ...
- VS 2008的64位编译环境的安装和使用
1. 安装64位编译环境 最近准备编译64位版本的程序.因为之前已经安装了VS 2008,开始以为只是使用VS 2008的安装文件添加功能即可,后来发现没这么简单.直接双击VS 2008的安装文件来安 ...
- 题解-UOJ284 快乐游戏鸡
Problem uoj 题意大意: 一棵树,点权\(w_i\),每次玩家可以在树上行走,一条边需要\(1\)的时间,只能往儿子走.每次游戏需要从\(s\)到\(t\). 玩家有一个总死亡次数,初始为\ ...
- 【转】STL中vector、list、deque和map的区别
1.vector 向量 相当于一个数组 在内存中分配一块连续的内容空间进行存储.支持不指定vector大小的存储.STL内部实现时,首先分配一个非常大的内存空间预备进行存储,即capacity()函数 ...
- 解决Windows 10笔记本接显示器分屏后没有声音的问题
Windows 10 版本号:17763.292 1.首先右键点击任务栏托盘中的[扬声器]图标,选择[声音],如下图所示. 2.选择[播放],然后选择[扬声器],再点击[设为默认值],如下所示. 3. ...
- python操作三大主流数据库(10)python操作mongodb数据库④mongodb新闻项目实战
python操作mongodb数据库④mongodb新闻项目实战 参考文档:http://flask-mongoengine.readthedocs.io/en/latest/ 目录: [root@n ...
- jquery简单使用入门
<!DOCTYPE html> <html> <head> <title>jquery</title> <meta charset=& ...
- UniversalImageLoader(异步加载大量图片)
UniversalImageLoader是用于加载图片的一个开源项目,UniversalImageLoader是实现异步加载大量图片的源码和例子,包括缓存.硬盘缓存.容错机制等技术.在其项目介绍中是这 ...
- 51nod--1298 (计算几何基础)
题目: 1298 圆与三角形 题目来源: HackerRank 基准时间限制:1 秒 空间限制:131072 KB 分值: 0 难度:基础题 收藏 关注 给出圆的圆心和半径,以及三角形的三个顶点,问圆 ...