0 - DataSet

http://www.csc.kth.se/~att/Site/Animals.html

1 - Code

1.1 - Import Packages

import tensorflow as tf
import os, glob
import numpy as np
from skimage import io, transform

1.2 - Initialize Parameters

DATA_PATH = "animal_database/"
INPUT_W = 224
INPUT_H = 224
INPUT_C = 3
OUTPUT_C = 19
TRAINING_STEPS = 50
MODEL_SAVE_PATH = "model"
MODEL_NAME = "model.ckpt"
BATCH_SIZE = 64
LEARNING_RATE_BASE = 1e-6
LEARNING_RATE_DECAY = 0.99
MOMENTUM = 0.9
TRAIN_KEEP_PROB = 0.6
VAL_KEEP_PROB = 1.0
TEST_KEEP_PROB = 1.0

1.3 - Build Data Reader

class DCdataset(object):
def __init__(self, path, w, h, c, ratio=0.8): def onehot(n):
l = np.zeros([OUTPUT_C])
l[n] = 1
return l print("Process images start")
cate = [path+x for x in os.listdir(path) if os.path.isdir(path+x)]
x = []
y = []
for (i, folder) in enumerate(cate):
for img_path in glob.glob(folder+"/original/*.jpg"):
# print("reading the image: %s" % img_path)
img = io.imread(img_path)
img = transform.resize(img, (w, h, c))
x.append(img)
y.append(i)
x = np.asarray(x, np.float32)
y = np.asarray(y, np.int32) num_example = x.shape[0]
arr = np.arange(num_example)
np.random.shuffle(arr)
x = x[arr]
y = y[arr]
x = np.asarray([np.reshape(x_, (w, h, c)) for x_ in x])
y = np.asarray([onehot(y_) for y_ in y])
s = np.int(num_example * ratio)
self.x_train, self.x_val = x[:s], x[s:]
self.y_train, self.y_val = y[:s], y[s:]
self.train_size = s
self.val_size = num_example - s
print("Process images end") def next_batch(self, batch_size):
arr = np.arange(self.train_size)
np.random.shuffle(arr)
arr = arr[:batch_size]
batch_x = self.x_train[arr]
batch_y = self.y_train[arr]
return batch_x, batch_y def next_val_batch(self, batch_size):
arr = np.arange(self.val_size)
np.random.shuffle(arr)
arr = arr[:batch_size]
batch_x = self.x_val[arr]
batch_y = self.y_val[arr]
return batch_x, batch_y

1.4 - Build Network

def conv_op(input_op, name, kh, kw, n_out, dh, dw, p):
n_in = input_op.get_shape()[-1].value with tf.name_scope(name) as scope:
kernel = tf.get_variable(scope+"w",
shape=[kh, kw, n_in, n_out], dtype=tf.float32,
# initializer=tf.truncated_normal_initializer(mean=0, stddev=10e-2))
initializer=tf.contrib.layers.xavier_initializer_conv2d())
conv = tf.nn.conv2d(input_op, kernel, (1, dh, dw, 1), padding="SAME")
bias_init_val = tf.constant(0.0, shape=[n_out], dtype=tf.float32)
biases = tf.Variable(bias_init_val, trainable=True, name="b")
z = tf.nn.bias_add(conv, biases)
activation = tf.nn.relu(z, name=scope)
p += [kernel, biases]
return activation
def fc_op(input_op, name, n_out, p):
n_in = input_op.get_shape()[-1].value with tf.name_scope(name) as scope:
kernel = tf.get_variable(scope+"w",
shape=[n_in, n_out], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
biases = tf.Variable(tf.constant(0.1, shape=[n_out],
dtype=tf.float32), name="b")
activation = tf.nn.relu_layer(input_op, kernel, biases, name=scope)
p += [kernel, biases]
return activation
def mpool_op(input_op, name, kh, kw, dh, dw):
return tf.nn.max_pool(input_op,
ksize=[1, kh, kw, 1],
strides=[1, dh, dw, 1],
padding="SAME",
name=name)
def inference_op(input_op, keep_prob):
p = [] conv1_1 = conv_op(input_op, name="conv1_1", kh=3, kw=3, n_out=64, dh=1, dw=1, p=p)
conv1_2 = conv_op(conv1_1, name="conv1_2", kh=3, kw=3, n_out=64, dh=1, dw=1, p=p)
pool1 = mpool_op(conv1_2, name="pool1", kh=2, kw=2, dh=2, dw=2) conv2_1 = conv_op(pool1, name="conv2_1", kh=3, kw=3, n_out=128, dh=1, dw=1, p=p)
conv2_2 = conv_op(conv2_1, name="conv2_2", kh=3, kw=3, n_out=128, dh=1, dw=1, p=p)
pool2 = mpool_op(conv2_2, name="pool2", kh=2, kw=2, dh=2, dw=2) conv3_1 = conv_op(pool2, name="conv3_1", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p)
conv3_2 = conv_op(conv3_1, name="conv3_2", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p)
conv3_3 = conv_op(conv3_2, name="conv3_3", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p)
pool3 = mpool_op(conv3_3, name="pool3", kh=2, kw=2, dh=2, dw=2) conv4_1 = conv_op(pool3, name="conv4_1", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
conv4_2 = conv_op(conv4_1, name="conv4_2", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
conv4_3 = conv_op(conv4_2, name="conv4_3", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
pool4 = mpool_op(conv4_3, name="pool4", kh=2, kw=2, dh=2, dw=2) conv5_1 = conv_op(pool4, name="conv5_1", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
conv5_2 = conv_op(conv5_1, name="conv5_2", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
conv5_3 = conv_op(conv5_2, name="conv5_3", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
pool5 = mpool_op(conv5_3, name="pool5", kh=2, kw=2, dh=2, dw=2) shp = pool5.get_shape()
flattened_shape = shp[1].value * shp[2].value * shp[3].value
resh1 = tf.reshape(pool5, [-1, flattened_shape], name="resh1") fc6 = fc_op(resh1, name="fc6", n_out=4096, p=p)
fc6_drop = tf.nn.dropout(fc6, keep_prob, name="fc6_drop") fc7 = fc_op(fc6_drop, name="fc7", n_out=4096, p=p)
fc7_drop = tf.nn.dropout(fc7, keep_prob, name="fc7_drop") fc8 = fc_op(fc7_drop, name="fc8", n_out=OUTPUT_C, p=p)
# softmax = tf.nn.softmax(fc8)
# predictions = tf.argmax(softmax, 1) return fc8, p

1.5 - Train

def train():
x = tf.placeholder(tf.float32, [None, INPUT_W, INPUT_H, INPUT_C], name="x-input")
y_ = tf.placeholder(tf.float32, [None, OUTPUT_C], name="y-input")
keep_prob = tf.placeholder(tf.float32, name="keep_prob") dataset = DCdataset(DATA_PATH, INPUT_W, INPUT_H, INPUT_C) global_step = tf.Variable(0, trainable=False) y, p = inference_op(x, keep_prob)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1)))
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(tf.nn.softmax(y), 1), tf.argmax(y_, 1)), tf.float32))
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
dataset.train_size / BATCH_SIZE,
LEARNING_RATE_DECAY
)
optimizer = tf.train.MomentumOptimizer(learning_rate, MOMENTUM).minimize(loss, global_step=global_step) # tf.reset_default_graph()
with tf.Session() as sess:
tf.initialize_all_variables().run() saver = tf.train.Saver()
for i in range(TRAINING_STEPS):
xs, ys = dataset.next_batch(BATCH_SIZE)
_, loss_value, accuracy_value, step = sess.run([optimizer, loss, accuracy, global_step],
feed_dict={x: xs, y_: ys, keep_prob: TRAIN_KEEP_PROB})
print("After %d training step(s), loss on training batch is %g, accuracy on training batch is %g%%." % (step, loss_value, accuracy_value*100)) if i % 2 == 0:
xs, ys = dataset.next_val_batch(BATCH_SIZE)
_, loss_value, accuracy_value, step = sess.run([optimizer, loss, accuracy, global_step],
feed_dict={x: xs, y_: ys, keep_prob: VAL_KEEP_PROB})
print("[Validation] Step %d: Validation loss is %g and Validation accuracy is %g%%." % (step, loss_value, accuracy_value*100))
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
train()

1.6 - Test

def test(img_path, model_path):

    with tf.Session() as sess:
saver = tf.train.import_meta_graph(model_path+".meta")
saver.restore(sess, model_path)
graph = tf.get_default_graph() x = graph.get_tensor_by_name("x-input:0")
keep_prob = graph.get_tensor_by_name("keep_prob:0")
fc8 = graph.get_tensor_by_name("fc8:0") img = io.imread(img_path)
img = transform.resize(img, (INPUT_W, INPUT_H, INPUT_C))
y = sess.run(fc8, feed_dict={
x: np.reshape(img, [-1, INPUT_W, INPUT_H, INPUT_C]),
keep_prob: TEST_KEEP_PEOB
})
softmax = tf.nn.softmax(y)
prediction_labels = tf.argmax(softmax, 1)
print("label: ", sess.run(softmax))
img_path = os.path.join(DATA_PATH, "cougar", "original", "4400.jpg")
model_path = os.path.join(MODEL_SAVE_PATH, MODEL_NAME+"-2")
test(img_path, model_path)

VGGNet学习——实践的更多相关文章

  1. 使用sklearn进行集成学习——实践

    系列 <使用sklearn进行集成学习——理论> <使用sklearn进行集成学习——实践> 目录 1 Random Forest和Gradient Tree Boosting ...

  2. Nagios学习实践系列——基本安装篇

    开篇介绍 最近由于工作需要,学习研究了一下Nagios的安装.配置.使用,关于Nagios的介绍,可以参考我上篇随笔Nagios学习实践系列——产品介绍篇 实验环境 操作系统:Red Hat Ente ...

  3. Nagios学习实践系列——配置研究[监控当前服务器]

    其实上篇Nagios学习实践系列——基本安装篇只是安装了Nagios基本组件,虽然能够打开主页,但是如果不配置相关配置文件文件,那么左边菜单很多页面都打不开,相当于只是一个空壳子.接下来,我们来学习研 ...

  4. 前端学习实践笔记--JavaScript深入【1】

    这一年中零零散散看过几本javascript的书,回过头看之前写过的javascript学习笔记,未免有点汗颜,突出“肤浅”二字,然越深入越觉得javascript的博大精深,有种只缘身在此山中的感觉 ...

  5. Appium学习实践(四)结构优化

    随着我们测试脚本中的用例越来越多,我们不可能将所有的用例都放在同一个脚本中,所以我们需要优化我们的结构.将脚本放在一个文件夹中,再通过别的脚本来执行脚本.这样,我们也可以有选择性的执行我们的脚本 先来 ...

  6. Appium学习实践(三)测试用例脚本以及测试报告输出

    之前Appium学习实践(二)Python简单脚本以及元素的属性设置中的脚本,会有一个问题,就是在每个测试用例完成之后都会执行tearDown,然后重新setUp,这样导致脚本的执行效率偏低,而且会有 ...

  7. Appium学习实践(二)Python简单脚本以及元素的属性设置

    1.简单的Python脚本 Appium中的设置与Appium学习实践(一)简易运行Appium中的一致 Launch后,执行脚本 #coding:utf-8 import unittest impo ...

  8. ReactNative学习实践--Navigator实践

    离上次写RN笔记有一段时间了,期间参与了一个新项目,只在最近的空余时间继续学习实践,因此进度比较缓慢,不过这并不代表没有新进展,其实这个小东西离上次发文时已经有了相当大的变化了,其中影响最大的变化就是 ...

  9. ReactNative学习实践--动画初探之加载动画

    学习和实践react已经有一段时间了,在经历了从最初的彷徨到解决痛点时的兴奋,再到不断实践后遭遇问题时的苦闷,确实被这一种新的思维方式和开发模式所折服,react不是万能的,在很多场景下滥用反而会适得 ...

随机推荐

  1. Windows 查看端口占用情况

    今天打算运行一下当年的毕业设计,结果启动ActiveMQ的时候,发现报错 原来是端口占用了.在Windows上怎样看呢? Ctrl+Alt+Del 调出任务管理器 再找到资源监视器 原来是依赖于Erl ...

  2. SpringCloud第二弹(高可用Eureka+Ribbon负载均衡)

    先建立父工程 .. ..一路next 搭建注册中心(需要建立三个工程,端口不一样) .. .. .. 修改入口类 package com.cloud.eurekaserver1111; import ...

  3. Educational Codeforces Round 53 (Rated for Div. 2) A Diverse Substring

    传送门 https://www.cnblogs.com/violet-acmer/p/10163375.html 题意: 给出串是多态的定义“长度为 n 的串 s ,当串 s 中所有字母出现的次数严格 ...

  4. 微信小程序之自定义select下拉选项框组件

    知识点:组件,animation,获取当前点击元素的索引与内容 微信小程序中没有select下拉选项框,所以只有自定义.自定义的话,可以选择模板的方式,也可以选择组件的方式来创建. 这次我选择了组件, ...

  5. 加了synchronized后还是不安全的问题

    1.查看是不是多线程 2.用的是不是同一个锁

  6. MySQL8常见客户端和启动相关参数

    MySQL8常见客户端和启动相关参数 作者:尹正杰 版权声明:原创作品,谢绝转载!否则将追究法律责任. 一.MySQL常见的客户端 1>.使用MySQL服务自带的mysql连接工具 2>. ...

  7. 安装saltstack使用的shell

    sed -i 's/^#//g'  /etc/yum.repos.d/centos6.8.repo sed -i 's/enabled=0/enabled=1/g'  /etc/yum.repos.d ...

  8. java语音转文字

    用到的百度提供的api 需要把wav音频文件转成16k的频率,必须转,不转百度api解析不出来.显示音频文件不清晰错误.想要转化还必须要有ffmpeg程序,这个自己百度去下载.然后拿转好的文件扔到百度 ...

  9. Java 微信公众号导出所有粉丝(openId)

    由于公众号换了公司主体,需要做迁移,玩家的openId数据需要做处理. (我是按我要的json格式,将粉丝导成了1万条数据的一个json文件) 文件格式: { "info":[ { ...

  10. Hbase记录-hbase部署

    #版本支持 #官网下载二进制包,解压到/usr/app下,配置/etc/profile: export HBASE_HOME=/usr/app/hbase export PATH=$HBASE_HOM ...