0 - DataSet

http://www.csc.kth.se/~att/Site/Animals.html

1 - Code

1.1 - Import Packages

import tensorflow as tf
import os, glob
import numpy as np
from skimage import io, transform

1.2 - Initialize Parameters

DATA_PATH = "animal_database/"
INPUT_W = 224
INPUT_H = 224
INPUT_C = 3
OUTPUT_C = 19
TRAINING_STEPS = 50
MODEL_SAVE_PATH = "model"
MODEL_NAME = "model.ckpt"
BATCH_SIZE = 64
LEARNING_RATE_BASE = 1e-6
LEARNING_RATE_DECAY = 0.99
MOMENTUM = 0.9
TRAIN_KEEP_PROB = 0.6
VAL_KEEP_PROB = 1.0
TEST_KEEP_PROB = 1.0

1.3 - Build Data Reader

class DCdataset(object):
def __init__(self, path, w, h, c, ratio=0.8): def onehot(n):
l = np.zeros([OUTPUT_C])
l[n] = 1
return l print("Process images start")
cate = [path+x for x in os.listdir(path) if os.path.isdir(path+x)]
x = []
y = []
for (i, folder) in enumerate(cate):
for img_path in glob.glob(folder+"/original/*.jpg"):
# print("reading the image: %s" % img_path)
img = io.imread(img_path)
img = transform.resize(img, (w, h, c))
x.append(img)
y.append(i)
x = np.asarray(x, np.float32)
y = np.asarray(y, np.int32) num_example = x.shape[0]
arr = np.arange(num_example)
np.random.shuffle(arr)
x = x[arr]
y = y[arr]
x = np.asarray([np.reshape(x_, (w, h, c)) for x_ in x])
y = np.asarray([onehot(y_) for y_ in y])
s = np.int(num_example * ratio)
self.x_train, self.x_val = x[:s], x[s:]
self.y_train, self.y_val = y[:s], y[s:]
self.train_size = s
self.val_size = num_example - s
print("Process images end") def next_batch(self, batch_size):
arr = np.arange(self.train_size)
np.random.shuffle(arr)
arr = arr[:batch_size]
batch_x = self.x_train[arr]
batch_y = self.y_train[arr]
return batch_x, batch_y def next_val_batch(self, batch_size):
arr = np.arange(self.val_size)
np.random.shuffle(arr)
arr = arr[:batch_size]
batch_x = self.x_val[arr]
batch_y = self.y_val[arr]
return batch_x, batch_y

1.4 - Build Network

def conv_op(input_op, name, kh, kw, n_out, dh, dw, p):
n_in = input_op.get_shape()[-1].value with tf.name_scope(name) as scope:
kernel = tf.get_variable(scope+"w",
shape=[kh, kw, n_in, n_out], dtype=tf.float32,
# initializer=tf.truncated_normal_initializer(mean=0, stddev=10e-2))
initializer=tf.contrib.layers.xavier_initializer_conv2d())
conv = tf.nn.conv2d(input_op, kernel, (1, dh, dw, 1), padding="SAME")
bias_init_val = tf.constant(0.0, shape=[n_out], dtype=tf.float32)
biases = tf.Variable(bias_init_val, trainable=True, name="b")
z = tf.nn.bias_add(conv, biases)
activation = tf.nn.relu(z, name=scope)
p += [kernel, biases]
return activation
def fc_op(input_op, name, n_out, p):
n_in = input_op.get_shape()[-1].value with tf.name_scope(name) as scope:
kernel = tf.get_variable(scope+"w",
shape=[n_in, n_out], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
biases = tf.Variable(tf.constant(0.1, shape=[n_out],
dtype=tf.float32), name="b")
activation = tf.nn.relu_layer(input_op, kernel, biases, name=scope)
p += [kernel, biases]
return activation
def mpool_op(input_op, name, kh, kw, dh, dw):
return tf.nn.max_pool(input_op,
ksize=[1, kh, kw, 1],
strides=[1, dh, dw, 1],
padding="SAME",
name=name)
def inference_op(input_op, keep_prob):
p = [] conv1_1 = conv_op(input_op, name="conv1_1", kh=3, kw=3, n_out=64, dh=1, dw=1, p=p)
conv1_2 = conv_op(conv1_1, name="conv1_2", kh=3, kw=3, n_out=64, dh=1, dw=1, p=p)
pool1 = mpool_op(conv1_2, name="pool1", kh=2, kw=2, dh=2, dw=2) conv2_1 = conv_op(pool1, name="conv2_1", kh=3, kw=3, n_out=128, dh=1, dw=1, p=p)
conv2_2 = conv_op(conv2_1, name="conv2_2", kh=3, kw=3, n_out=128, dh=1, dw=1, p=p)
pool2 = mpool_op(conv2_2, name="pool2", kh=2, kw=2, dh=2, dw=2) conv3_1 = conv_op(pool2, name="conv3_1", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p)
conv3_2 = conv_op(conv3_1, name="conv3_2", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p)
conv3_3 = conv_op(conv3_2, name="conv3_3", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p)
pool3 = mpool_op(conv3_3, name="pool3", kh=2, kw=2, dh=2, dw=2) conv4_1 = conv_op(pool3, name="conv4_1", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
conv4_2 = conv_op(conv4_1, name="conv4_2", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
conv4_3 = conv_op(conv4_2, name="conv4_3", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
pool4 = mpool_op(conv4_3, name="pool4", kh=2, kw=2, dh=2, dw=2) conv5_1 = conv_op(pool4, name="conv5_1", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
conv5_2 = conv_op(conv5_1, name="conv5_2", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
conv5_3 = conv_op(conv5_2, name="conv5_3", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
pool5 = mpool_op(conv5_3, name="pool5", kh=2, kw=2, dh=2, dw=2) shp = pool5.get_shape()
flattened_shape = shp[1].value * shp[2].value * shp[3].value
resh1 = tf.reshape(pool5, [-1, flattened_shape], name="resh1") fc6 = fc_op(resh1, name="fc6", n_out=4096, p=p)
fc6_drop = tf.nn.dropout(fc6, keep_prob, name="fc6_drop") fc7 = fc_op(fc6_drop, name="fc7", n_out=4096, p=p)
fc7_drop = tf.nn.dropout(fc7, keep_prob, name="fc7_drop") fc8 = fc_op(fc7_drop, name="fc8", n_out=OUTPUT_C, p=p)
# softmax = tf.nn.softmax(fc8)
# predictions = tf.argmax(softmax, 1) return fc8, p

1.5 - Train

def train():
x = tf.placeholder(tf.float32, [None, INPUT_W, INPUT_H, INPUT_C], name="x-input")
y_ = tf.placeholder(tf.float32, [None, OUTPUT_C], name="y-input")
keep_prob = tf.placeholder(tf.float32, name="keep_prob") dataset = DCdataset(DATA_PATH, INPUT_W, INPUT_H, INPUT_C) global_step = tf.Variable(0, trainable=False) y, p = inference_op(x, keep_prob)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1)))
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(tf.nn.softmax(y), 1), tf.argmax(y_, 1)), tf.float32))
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
dataset.train_size / BATCH_SIZE,
LEARNING_RATE_DECAY
)
optimizer = tf.train.MomentumOptimizer(learning_rate, MOMENTUM).minimize(loss, global_step=global_step) # tf.reset_default_graph()
with tf.Session() as sess:
tf.initialize_all_variables().run() saver = tf.train.Saver()
for i in range(TRAINING_STEPS):
xs, ys = dataset.next_batch(BATCH_SIZE)
_, loss_value, accuracy_value, step = sess.run([optimizer, loss, accuracy, global_step],
feed_dict={x: xs, y_: ys, keep_prob: TRAIN_KEEP_PROB})
print("After %d training step(s), loss on training batch is %g, accuracy on training batch is %g%%." % (step, loss_value, accuracy_value*100)) if i % 2 == 0:
xs, ys = dataset.next_val_batch(BATCH_SIZE)
_, loss_value, accuracy_value, step = sess.run([optimizer, loss, accuracy, global_step],
feed_dict={x: xs, y_: ys, keep_prob: VAL_KEEP_PROB})
print("[Validation] Step %d: Validation loss is %g and Validation accuracy is %g%%." % (step, loss_value, accuracy_value*100))
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
train()

1.6 - Test

def test(img_path, model_path):

    with tf.Session() as sess:
saver = tf.train.import_meta_graph(model_path+".meta")
saver.restore(sess, model_path)
graph = tf.get_default_graph() x = graph.get_tensor_by_name("x-input:0")
keep_prob = graph.get_tensor_by_name("keep_prob:0")
fc8 = graph.get_tensor_by_name("fc8:0") img = io.imread(img_path)
img = transform.resize(img, (INPUT_W, INPUT_H, INPUT_C))
y = sess.run(fc8, feed_dict={
x: np.reshape(img, [-1, INPUT_W, INPUT_H, INPUT_C]),
keep_prob: TEST_KEEP_PEOB
})
softmax = tf.nn.softmax(y)
prediction_labels = tf.argmax(softmax, 1)
print("label: ", sess.run(softmax))
img_path = os.path.join(DATA_PATH, "cougar", "original", "4400.jpg")
model_path = os.path.join(MODEL_SAVE_PATH, MODEL_NAME+"-2")
test(img_path, model_path)

VGGNet学习——实践的更多相关文章

  1. 使用sklearn进行集成学习——实践

    系列 <使用sklearn进行集成学习——理论> <使用sklearn进行集成学习——实践> 目录 1 Random Forest和Gradient Tree Boosting ...

  2. Nagios学习实践系列——基本安装篇

    开篇介绍 最近由于工作需要,学习研究了一下Nagios的安装.配置.使用,关于Nagios的介绍,可以参考我上篇随笔Nagios学习实践系列——产品介绍篇 实验环境 操作系统:Red Hat Ente ...

  3. Nagios学习实践系列——配置研究[监控当前服务器]

    其实上篇Nagios学习实践系列——基本安装篇只是安装了Nagios基本组件,虽然能够打开主页,但是如果不配置相关配置文件文件,那么左边菜单很多页面都打不开,相当于只是一个空壳子.接下来,我们来学习研 ...

  4. 前端学习实践笔记--JavaScript深入【1】

    这一年中零零散散看过几本javascript的书,回过头看之前写过的javascript学习笔记,未免有点汗颜,突出“肤浅”二字,然越深入越觉得javascript的博大精深,有种只缘身在此山中的感觉 ...

  5. Appium学习实践(四)结构优化

    随着我们测试脚本中的用例越来越多,我们不可能将所有的用例都放在同一个脚本中,所以我们需要优化我们的结构.将脚本放在一个文件夹中,再通过别的脚本来执行脚本.这样,我们也可以有选择性的执行我们的脚本 先来 ...

  6. Appium学习实践(三)测试用例脚本以及测试报告输出

    之前Appium学习实践(二)Python简单脚本以及元素的属性设置中的脚本,会有一个问题,就是在每个测试用例完成之后都会执行tearDown,然后重新setUp,这样导致脚本的执行效率偏低,而且会有 ...

  7. Appium学习实践(二)Python简单脚本以及元素的属性设置

    1.简单的Python脚本 Appium中的设置与Appium学习实践(一)简易运行Appium中的一致 Launch后,执行脚本 #coding:utf-8 import unittest impo ...

  8. ReactNative学习实践--Navigator实践

    离上次写RN笔记有一段时间了,期间参与了一个新项目,只在最近的空余时间继续学习实践,因此进度比较缓慢,不过这并不代表没有新进展,其实这个小东西离上次发文时已经有了相当大的变化了,其中影响最大的变化就是 ...

  9. ReactNative学习实践--动画初探之加载动画

    学习和实践react已经有一段时间了,在经历了从最初的彷徨到解决痛点时的兴奋,再到不断实践后遭遇问题时的苦闷,确实被这一种新的思维方式和开发模式所折服,react不是万能的,在很多场景下滥用反而会适得 ...

随机推荐

  1. js数组歌

    判断是不是数组,isArray最靠谱. 按照条件来判断,every/some给答案 是否包含此元素,includes最快速. find/findIndex很相似,按条件给第一个值. indexOf/l ...

  2. POJ 1971 Parallelogram Counting (Hash)

          Parallelogram Counting Time Limit: 5000MS   Memory Limit: 65536K Total Submissions: 6895   Acc ...

  3. Salt初识和安装

    Salt Salt是一个配置管理系统,能够根据定义的状态,配置远程节点,比如保证远程节点上指定的安装包安装,运行指定的服务.Salt也是一个分布式远程执行系统,用于在远程节点上执行命令和请求数据,不论 ...

  4. mariadb-5.5安装

    mariadb-5.5 Windows10安装 1.官网下载:https://downloads.mariadb.org/ 2.解压mariadb-5.5.58-winx64.zip,目录C:\mar ...

  5. 设计模式_策略模式_在Spring中的应用

    一.理论 在spring中经常有读取配置文件的需求,这里就会用到一个Spring提供的Resource接口 Resource 接口是具体资源访问策略的抽象,也是所有资源访问类所实现的接口.Resour ...

  6. MySQL5.7 的GTID复制

    MySQL5.7 的GTID复制 作者:尹正杰  版权声明:原创作品,谢绝转载!否则将追究法律责任. 在MySQL5.6之后其官方推出了GTID复制方式,和传统的基于bin log复制方式有所不同,接 ...

  7. sp_change_users_login 'Update_One', '用户名', '登录名';

    每次从服务器上备份好数据库(Sql Server数据库),如果将备份数据库文件在本地恢复,总会产生用户权限的问题. 经过很多次的实验后,我发现有那么一条语句可以发挥作用,就是sp_change_use ...

  8. Redis基本概念、基本使用与单机集群部署

    1. Redis基础 1.1 Redis概述 Redis是一个开源.先进的key-value存储,并用于构建高性能.可扩展的应用程序的完美解决方案. Redis从它的许多竞争继承了三个主要特点:    ...

  9. for each循环(增强for循环)

    底层实现是使用了迭代器,简化了迭代器的书写 格式: for(集合/数组的数据类型 变量名: 数组名/集合名) { // body } char[] chars = {'c', 'd', 'd', 'e ...

  10. Node.js学习入门

    Node.js是什么 Node.js是一个可以允许我们在服务器端运行JavaScript代码的程序. 这是什么意思呢?通常,我们写的JavaScript代码都是在浏览器中运行的. 实际上,浏览器就是一 ...