VGGNet学习——实践
0 - DataSet
http://www.csc.kth.se/~att/Site/Animals.html
1 - Code
1.1 - Import Packages
import tensorflow as tf
import os, glob
import numpy as np
from skimage import io, transform
1.2 - Initialize Parameters
DATA_PATH = "animal_database/"
INPUT_W = 224
INPUT_H = 224
INPUT_C = 3
OUTPUT_C = 19
TRAINING_STEPS = 50
MODEL_SAVE_PATH = "model"
MODEL_NAME = "model.ckpt"
BATCH_SIZE = 64
LEARNING_RATE_BASE = 1e-6
LEARNING_RATE_DECAY = 0.99
MOMENTUM = 0.9
TRAIN_KEEP_PROB = 0.6
VAL_KEEP_PROB = 1.0
TEST_KEEP_PROB = 1.0
1.3 - Build Data Reader
class DCdataset(object):
def __init__(self, path, w, h, c, ratio=0.8): def onehot(n):
l = np.zeros([OUTPUT_C])
l[n] = 1
return l print("Process images start")
cate = [path+x for x in os.listdir(path) if os.path.isdir(path+x)]
x = []
y = []
for (i, folder) in enumerate(cate):
for img_path in glob.glob(folder+"/original/*.jpg"):
# print("reading the image: %s" % img_path)
img = io.imread(img_path)
img = transform.resize(img, (w, h, c))
x.append(img)
y.append(i)
x = np.asarray(x, np.float32)
y = np.asarray(y, np.int32) num_example = x.shape[0]
arr = np.arange(num_example)
np.random.shuffle(arr)
x = x[arr]
y = y[arr]
x = np.asarray([np.reshape(x_, (w, h, c)) for x_ in x])
y = np.asarray([onehot(y_) for y_ in y])
s = np.int(num_example * ratio)
self.x_train, self.x_val = x[:s], x[s:]
self.y_train, self.y_val = y[:s], y[s:]
self.train_size = s
self.val_size = num_example - s
print("Process images end") def next_batch(self, batch_size):
arr = np.arange(self.train_size)
np.random.shuffle(arr)
arr = arr[:batch_size]
batch_x = self.x_train[arr]
batch_y = self.y_train[arr]
return batch_x, batch_y def next_val_batch(self, batch_size):
arr = np.arange(self.val_size)
np.random.shuffle(arr)
arr = arr[:batch_size]
batch_x = self.x_val[arr]
batch_y = self.y_val[arr]
return batch_x, batch_y
1.4 - Build Network
def conv_op(input_op, name, kh, kw, n_out, dh, dw, p):
n_in = input_op.get_shape()[-1].value with tf.name_scope(name) as scope:
kernel = tf.get_variable(scope+"w",
shape=[kh, kw, n_in, n_out], dtype=tf.float32,
# initializer=tf.truncated_normal_initializer(mean=0, stddev=10e-2))
initializer=tf.contrib.layers.xavier_initializer_conv2d())
conv = tf.nn.conv2d(input_op, kernel, (1, dh, dw, 1), padding="SAME")
bias_init_val = tf.constant(0.0, shape=[n_out], dtype=tf.float32)
biases = tf.Variable(bias_init_val, trainable=True, name="b")
z = tf.nn.bias_add(conv, biases)
activation = tf.nn.relu(z, name=scope)
p += [kernel, biases]
return activation
def fc_op(input_op, name, n_out, p):
n_in = input_op.get_shape()[-1].value with tf.name_scope(name) as scope:
kernel = tf.get_variable(scope+"w",
shape=[n_in, n_out], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
biases = tf.Variable(tf.constant(0.1, shape=[n_out],
dtype=tf.float32), name="b")
activation = tf.nn.relu_layer(input_op, kernel, biases, name=scope)
p += [kernel, biases]
return activation
def mpool_op(input_op, name, kh, kw, dh, dw):
return tf.nn.max_pool(input_op,
ksize=[1, kh, kw, 1],
strides=[1, dh, dw, 1],
padding="SAME",
name=name)
def inference_op(input_op, keep_prob):
p = [] conv1_1 = conv_op(input_op, name="conv1_1", kh=3, kw=3, n_out=64, dh=1, dw=1, p=p)
conv1_2 = conv_op(conv1_1, name="conv1_2", kh=3, kw=3, n_out=64, dh=1, dw=1, p=p)
pool1 = mpool_op(conv1_2, name="pool1", kh=2, kw=2, dh=2, dw=2) conv2_1 = conv_op(pool1, name="conv2_1", kh=3, kw=3, n_out=128, dh=1, dw=1, p=p)
conv2_2 = conv_op(conv2_1, name="conv2_2", kh=3, kw=3, n_out=128, dh=1, dw=1, p=p)
pool2 = mpool_op(conv2_2, name="pool2", kh=2, kw=2, dh=2, dw=2) conv3_1 = conv_op(pool2, name="conv3_1", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p)
conv3_2 = conv_op(conv3_1, name="conv3_2", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p)
conv3_3 = conv_op(conv3_2, name="conv3_3", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p)
pool3 = mpool_op(conv3_3, name="pool3", kh=2, kw=2, dh=2, dw=2) conv4_1 = conv_op(pool3, name="conv4_1", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
conv4_2 = conv_op(conv4_1, name="conv4_2", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
conv4_3 = conv_op(conv4_2, name="conv4_3", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
pool4 = mpool_op(conv4_3, name="pool4", kh=2, kw=2, dh=2, dw=2) conv5_1 = conv_op(pool4, name="conv5_1", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
conv5_2 = conv_op(conv5_1, name="conv5_2", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
conv5_3 = conv_op(conv5_2, name="conv5_3", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
pool5 = mpool_op(conv5_3, name="pool5", kh=2, kw=2, dh=2, dw=2) shp = pool5.get_shape()
flattened_shape = shp[1].value * shp[2].value * shp[3].value
resh1 = tf.reshape(pool5, [-1, flattened_shape], name="resh1") fc6 = fc_op(resh1, name="fc6", n_out=4096, p=p)
fc6_drop = tf.nn.dropout(fc6, keep_prob, name="fc6_drop") fc7 = fc_op(fc6_drop, name="fc7", n_out=4096, p=p)
fc7_drop = tf.nn.dropout(fc7, keep_prob, name="fc7_drop") fc8 = fc_op(fc7_drop, name="fc8", n_out=OUTPUT_C, p=p)
# softmax = tf.nn.softmax(fc8)
# predictions = tf.argmax(softmax, 1) return fc8, p
1.5 - Train
def train():
x = tf.placeholder(tf.float32, [None, INPUT_W, INPUT_H, INPUT_C], name="x-input")
y_ = tf.placeholder(tf.float32, [None, OUTPUT_C], name="y-input")
keep_prob = tf.placeholder(tf.float32, name="keep_prob") dataset = DCdataset(DATA_PATH, INPUT_W, INPUT_H, INPUT_C) global_step = tf.Variable(0, trainable=False) y, p = inference_op(x, keep_prob)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1)))
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(tf.nn.softmax(y), 1), tf.argmax(y_, 1)), tf.float32))
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
dataset.train_size / BATCH_SIZE,
LEARNING_RATE_DECAY
)
optimizer = tf.train.MomentumOptimizer(learning_rate, MOMENTUM).minimize(loss, global_step=global_step) # tf.reset_default_graph()
with tf.Session() as sess:
tf.initialize_all_variables().run() saver = tf.train.Saver()
for i in range(TRAINING_STEPS):
xs, ys = dataset.next_batch(BATCH_SIZE)
_, loss_value, accuracy_value, step = sess.run([optimizer, loss, accuracy, global_step],
feed_dict={x: xs, y_: ys, keep_prob: TRAIN_KEEP_PROB})
print("After %d training step(s), loss on training batch is %g, accuracy on training batch is %g%%." % (step, loss_value, accuracy_value*100)) if i % 2 == 0:
xs, ys = dataset.next_val_batch(BATCH_SIZE)
_, loss_value, accuracy_value, step = sess.run([optimizer, loss, accuracy, global_step],
feed_dict={x: xs, y_: ys, keep_prob: VAL_KEEP_PROB})
print("[Validation] Step %d: Validation loss is %g and Validation accuracy is %g%%." % (step, loss_value, accuracy_value*100))
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
train()
1.6 - Test
def test(img_path, model_path):
with tf.Session() as sess:
saver = tf.train.import_meta_graph(model_path+".meta")
saver.restore(sess, model_path)
graph = tf.get_default_graph()
x = graph.get_tensor_by_name("x-input:0")
keep_prob = graph.get_tensor_by_name("keep_prob:0")
fc8 = graph.get_tensor_by_name("fc8:0")
img = io.imread(img_path)
img = transform.resize(img, (INPUT_W, INPUT_H, INPUT_C))
y = sess.run(fc8, feed_dict={
x: np.reshape(img, [-1, INPUT_W, INPUT_H, INPUT_C]),
keep_prob: TEST_KEEP_PEOB
})
softmax = tf.nn.softmax(y)
prediction_labels = tf.argmax(softmax, 1)
print("label: ", sess.run(softmax))
img_path = os.path.join(DATA_PATH, "cougar", "original", "4400.jpg")
model_path = os.path.join(MODEL_SAVE_PATH, MODEL_NAME+"-2")
test(img_path, model_path)
VGGNet学习——实践的更多相关文章
- 使用sklearn进行集成学习——实践
系列 <使用sklearn进行集成学习——理论> <使用sklearn进行集成学习——实践> 目录 1 Random Forest和Gradient Tree Boosting ...
- Nagios学习实践系列——基本安装篇
开篇介绍 最近由于工作需要,学习研究了一下Nagios的安装.配置.使用,关于Nagios的介绍,可以参考我上篇随笔Nagios学习实践系列——产品介绍篇 实验环境 操作系统:Red Hat Ente ...
- Nagios学习实践系列——配置研究[监控当前服务器]
其实上篇Nagios学习实践系列——基本安装篇只是安装了Nagios基本组件,虽然能够打开主页,但是如果不配置相关配置文件文件,那么左边菜单很多页面都打不开,相当于只是一个空壳子.接下来,我们来学习研 ...
- 前端学习实践笔记--JavaScript深入【1】
这一年中零零散散看过几本javascript的书,回过头看之前写过的javascript学习笔记,未免有点汗颜,突出“肤浅”二字,然越深入越觉得javascript的博大精深,有种只缘身在此山中的感觉 ...
- Appium学习实践(四)结构优化
随着我们测试脚本中的用例越来越多,我们不可能将所有的用例都放在同一个脚本中,所以我们需要优化我们的结构.将脚本放在一个文件夹中,再通过别的脚本来执行脚本.这样,我们也可以有选择性的执行我们的脚本 先来 ...
- Appium学习实践(三)测试用例脚本以及测试报告输出
之前Appium学习实践(二)Python简单脚本以及元素的属性设置中的脚本,会有一个问题,就是在每个测试用例完成之后都会执行tearDown,然后重新setUp,这样导致脚本的执行效率偏低,而且会有 ...
- Appium学习实践(二)Python简单脚本以及元素的属性设置
1.简单的Python脚本 Appium中的设置与Appium学习实践(一)简易运行Appium中的一致 Launch后,执行脚本 #coding:utf-8 import unittest impo ...
- ReactNative学习实践--Navigator实践
离上次写RN笔记有一段时间了,期间参与了一个新项目,只在最近的空余时间继续学习实践,因此进度比较缓慢,不过这并不代表没有新进展,其实这个小东西离上次发文时已经有了相当大的变化了,其中影响最大的变化就是 ...
- ReactNative学习实践--动画初探之加载动画
学习和实践react已经有一段时间了,在经历了从最初的彷徨到解决痛点时的兴奋,再到不断实践后遭遇问题时的苦闷,确实被这一种新的思维方式和开发模式所折服,react不是万能的,在很多场景下滥用反而会适得 ...
随机推荐
- pytest 8 参数化parametrize
pytest.mark.parametrize装饰器可以实现用例参数化 1.以下是一个实现检查一定的输入和期望输出测试功能的典型例子 import pytest @pytest.mark.parame ...
- 实现迁徙学习-《Tensorflow 实战Google深度学习框架》代码详解
为了实现迁徙学习,首先是数据集的下载 #利用curl下载数据集 curl -o flower_photos.tgz http://download.tensorflow.org/example_ima ...
- linux umask使用方法
A 什么是umask? 当我们登录系统之后创建一个文件总是有一个默认权限的,那么这个权限是怎么来的呢?这就是umask干的事情.umask设置了用户创建文件的默认 权限,它与chmod的效果刚好相 ...
- smarty缓存
huancun.php代码 <?php$p =1;if( !empty($_GET["page"])){ $p =$_GET["page"];}$file ...
- jquery 实现按回车键登录功能的写法
<script> //登录的逻辑函数 自己写 function submitFuc(){ var loginName= $("#loginName").val(); v ...
- springcloud学习笔记
一.什么是springcloud,有什么作用 Spring Cloud是一系列框架的有序集合.它利用Spring Boot的开发便利性巧妙地简化了分布式系统基础设施的开发,如服务发现注册.配置中心.消 ...
- WebAPI性能优化之压缩解压
有时候为了提升WebAPI的性能,减少响应时间,我们会使用压缩和解压,而现在大多数客户端浏览器都提供了内置的解压支持.在WebAPI请求的资源越大时,使用压缩对性能提升的效果越明显,而当请求的资源很小 ...
- bzoj千题计划317:bzoj4650: [Noi2016]优秀的拆分(后缀数组+差分)
https://www.lydsy.com/JudgeOnline/problem.php?id=4650 如果能够预处理出 suf[i] 以i结尾的形式为AA的子串个数 pre[i] 以i开头的形式 ...
- Nginx 学习笔记(十)介绍HTTP / 2服务器推送(译)
原文地址:https://www.nginx.com/blog/nginx-1-13-9-http2-server-push/ 我们很高兴地宣布,2018年2月20日发布的NGINX 1.13.9支持 ...
- PHP7 学习笔记(十一)使用phpstudy快速配置一个虚拟主机
说明:为了windows本地开发php方便,这里推荐使用PHP集成环境phpstudy. 目的:使用域名访问项目(tinywan.test) 1.官网:http://www.phpstudy.net ...