import os
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageChops
from skimage import color,data,transform,io

#获取所有数据文件夹名称
fileList = os.listdir("F:\\data\\flowers")
trainDataList = []
trianLabel = []
testDataList = []
testLabel = []

for j in range(len(fileList)):
data = os.listdir("F:\\data\\flowers\\"+fileList[j])
testNum = int(len(data)*0.25)
while(testNum>0):
np.random.shuffle(data)
testNum -= 1
trainData = np.array(data[:-(int(len(data)*0.25))])
testData = np.array(data[-(int(len(data)*0.25)):])
for i in range(len(trainData)):
if(trainData[i][-3:]=="jpg"):
image = io.imread("F:\\data\\flowers\\"+fileList[j]+"\\"+trainData[i])
image=transform.resize(image,(64,64))
trainDataList.append(image)
trianLabel.append(int(j))
for i in range(len(testData)):
if(testData[i][-3:]=="jpg"):
image = io.imread("F:\\data\\flowers\\"+fileList[j]+"\\"+testData[i])
image=transform.resize(image,(64,64))
testDataList.append(image)
testLabel.append(int(j))
print("图片数据读取完了...")

print(np.shape(trainDataList))
print(np.shape(trianLabel))
print(np.shape(testDataList))
print(np.shape(testLabel))

print("正在写磁盘...")
np.save("G:\\trainDataList",trainDataList)
np.save("G:\\trianLabel",trianLabel)
np.save("G:\\testDataList",testDataList)
np.save("G:\\testLabel",testLabel)
print("数据处理完了...")

import numpy as np
from keras.utils import to_categorical

trainLabel = np.load("G:\\trianLabel.npy")
testLabel = np.load("G:\\testLabel.npy")
trainLabel_encoded = to_categorical(trainLabel)
testLabel_encoded = to_categorical(testLabel)
np.save("G:\\trianLabel",trainLabel_encoded)
np.save("G:\\testLabel",testLabel_encoded)
print("转码类别写盘完了...")

import random
import numpy as np

trainDataList = np.load("G:\\trainDataList.npy")
trianLabel = np.load("G:\\trianLabel.npy")
print("数据加载完了...")
trainIndex = [i for i in range(len(trianLabel))]
random.shuffle(trainIndex)
trainData = []
trainClass = []
for i in range(len(trainIndex)):
trainData.append(trainDataList[trainIndex[i]])
trainClass.append(trianLabel[trainIndex[i]])
print("训练数据shuffle完了...")
np.save("G:\\trainDataList",trainData)
np.save("G:\\trianLabel",trainClass)
print("训练数据写盘完毕...")

testDataList = np.load("G:\\testDataList.npy")
testLabel = np.load("G:\\testLabel.npy")
testIndex = [i for i in range(len(testLabel))]
random.shuffle(testIndex)
testData = []
testClass = []
for i in range(len(testIndex)):
testData.append(testDataList[testIndex[i]])
testClass.append(testLabel[testIndex[i]])
print("测试数据shuffle完了...")
np.save("G:\\testDataList",testData)
np.save("G:\\testLabel",testClass)
print("测试数据写盘完毕...")

# coding: utf-8

import tensorflow as tf
from random import shuffle

INPUT_NODE = 64*64
OUT_NODE = 5
IMAGE_SIZE = 64
NUM_CHANNELS = 3
NUM_LABELS = 5

#第一层卷积层的尺寸和深度
CONV1_DEEP = 16
CONV1_SIZE = 5
#第二层卷积层的尺寸和深度
CONV2_DEEP = 32
CONV2_SIZE = 5
#全连接层的节点数
FC_SIZE = 512

def inference(input_tensor, train, regularizer):
#卷积
with tf.variable_scope('layer1-conv1'):
conv1_weights = tf.Variable(tf.random_normal([CONV1_SIZE,CONV1_SIZE,NUM_CHANNELS,CONV1_DEEP],stddev=0.1),name='weight')
tf.summary.histogram('convLayer1/weights1', conv1_weights)
conv1_biases = tf.Variable(tf.Variable(tf.random_normal([CONV1_DEEP])),name="bias")
tf.summary.histogram('convLayer1/bias1', conv1_biases)
conv1 = tf.nn.conv2d(input_tensor,conv1_weights,strides=[1,1,1,1],padding='SAME')
tf.summary.histogram('convLayer1/conv1', conv1)
relu1 = tf.nn.relu(tf.nn.bias_add(conv1,conv1_biases))
tf.summary.histogram('ConvLayer1/relu1', relu1)
#池化
with tf.variable_scope('layer2-pool1'):
pool1 = tf.nn.max_pool(relu1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
tf.summary.histogram('ConvLayer1/pool1', pool1)
#卷积
with tf.variable_scope('layer3-conv2'):
conv2_weights = tf.Variable(tf.random_normal([CONV2_SIZE,CONV2_SIZE,CONV1_DEEP,CONV2_DEEP],stddev=0.1),name='weight')
tf.summary.histogram('convLayer2/weights2', conv2_weights)
conv2_biases = tf.Variable(tf.random_normal([CONV2_DEEP]),name="bias")
tf.summary.histogram('convLayer2/bias2', conv2_biases)
#卷积向前学习
conv2 = tf.nn.conv2d(pool1,conv2_weights,strides=[1,1,1,1],padding='SAME')
tf.summary.histogram('convLayer2/conv2', conv2)
relu2 = tf.nn.relu(tf.nn.bias_add(conv2,conv2_biases))
tf.summary.histogram('ConvLayer2/relu2', relu2)
#池化
with tf.variable_scope('layer4-pool2'):
pool2 = tf.nn.max_pool(relu2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
tf.summary.histogram('ConvLayer2/pool2', pool2)
#变型
pool_shape = pool2.get_shape().as_list()
#计算最后一次池化后对象的体积(数据个数\节点数\像素个数)
nodes = pool_shape[1]*pool_shape[2]*pool_shape[3]
#根据上面的nodes再次把最后池化的结果pool2变为batch行nodes列的数据
reshaped = tf.reshape(pool2,[-1,nodes])

#全连接层
with tf.variable_scope('layer5-fc1'):
fc1_weights = tf.Variable(tf.random_normal([nodes,FC_SIZE],stddev=0.1),name='weight')
if(regularizer != None):
tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(0.03)(fc1_weights))
fc1_biases = tf.Variable(tf.random_normal([FC_SIZE]),name="bias")
#预测
fc1 = tf.nn.relu(tf.matmul(reshaped,fc1_weights)+fc1_biases)
if(train):
fc1 = tf.nn.dropout(fc1,0.5)
#全连接层
with tf.variable_scope('layer6-fc2'):
fc2_weights = tf.Variable(tf.random_normal([FC_SIZE,64],stddev=0.1),name="weight")
if(regularizer != None):
tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(0.03)(fc2_weights))
fc2_biases = tf.Variable(tf.random_normal([64]),name="bias")
#预测
fc2 = tf.nn.relu(tf.matmul(fc1,fc2_weights)+fc2_biases)
if(train):
fc2 = tf.nn.dropout(fc2,0.5)
#全连接层
with tf.variable_scope('layer7-fc3'):
fc3_weights = tf.Variable(tf.random_normal([64,NUM_LABELS],stddev=0.1),name="weight")
if(regularizer != None):
tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(0.03)(fc3_weights))
fc3_biases = tf.Variable(tf.random_normal([NUM_LABELS]),name="bias")
#预测
logit = tf.matmul(fc2,fc3_weights)+fc3_biases
return logit

import time
import keras
import numpy as np
from keras.utils import np_utils

X = np.load("G:\\trainDataList.npy")
Y = np.load("G:\\trianLabel.npy")
print(np.shape(X))
print(np.shape(Y))
print(np.shape(testData))
print(np.shape(testLabel))

batch_size = 10
n_classes=5
epochs=16#循环次数
learning_rate=1e-4
batch_num=int(np.shape(X)[0]/batch_size)
dropout=0.75

x=tf.placeholder(tf.float32,[None,64,64,3])
y=tf.placeholder(tf.float32,[None,n_classes])
# keep_prob = tf.placeholder(tf.float32)
#加载测试数据集
test_X = np.load("G:\\testDataList.npy")
test_Y = np.load("G:\\testLabel.npy")
back = 64
ro = int(len(test_X)/back)

#调用神经网络方法
pred=inference(x,1,"regularizer")
cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred,labels=y))

# 三种优化方法选择一个就可以
optimizer=tf.train.AdamOptimizer(1e-4).minimize(cost)
# train_step = tf.train.GradientDescentOptimizer(0.001).minimize(cost)
# train_step = tf.train.MomentumOptimizer(0.001,0.9).minimize(cost)

#将预测label与真实比较
correct_pred=tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
#计算准确率
accuracy=tf.reduce_mean(tf.cast(correct_pred,tf.float32))
merged=tf.summary.merge_all()
#将tensorflow变量实例化
init=tf.global_variables_initializer()
start_time = time.time()

with tf.Session() as sess:
sess.run(init)
#保存tensorflow参数可视化文件
writer=tf.summary.FileWriter('F:/Flower_graph', sess.graph)
for i in range(epochs):
for j in range(batch_num):
offset = (j * batch_size) % (Y.shape[0] - batch_size)
# 准备数据
batch_data = X[offset:(offset + batch_size), :]
batch_labels = Y[offset:(offset + batch_size), :]
sess.run(optimizer, feed_dict={x:batch_data,y:batch_labels})
result=sess.run(merged, feed_dict={x:batch_data,y:batch_labels})
writer.add_summary(result, i)
loss,acc = sess.run([cost,accuracy],feed_dict={x:batch_data,y:batch_labels})
print("Epoch:", '%04d' % (i+1),"cost=", "{:.9f}".format(loss),"Training accuracy","{:.5f}".format(acc*100))
writer.close()
print("########################训练结束,下面开始测试###################")
for i in range(ro):
s = i*back
e = s+back
test_accuracy = sess.run(accuracy,feed_dict={x:test_X[s:e],y:test_Y[s:e]})
print("step:%d test accuracy = %.4f%%" % (i,test_accuracy*100))
print("Final test accuracy = %.4f%%" % (test_accuracy*100))

end_time = time.time()
print('Times:',(end_time-start_time))
print('Optimization Completed')

...................................

吴裕雄 python神经网络 花朵图片识别(10)的更多相关文章

  1. 吴裕雄 python神经网络 花朵图片识别(9)

    import osimport numpy as npimport matplotlib.pyplot as pltfrom PIL import Image, ImageChopsfrom skim ...

  2. 吴裕雄 python神经网络 水果图片识别(4)

    # coding: utf-8 # In[1]:import osimport numpy as npfrom skimage import color, data, transform, io # ...

  3. 吴裕雄 python神经网络 水果图片识别(2)

    import osimport numpy as npimport matplotlib.pyplot as pltfrom skimage import color,data,transform,i ...

  4. 吴裕雄 python神经网络 水果图片识别(5)

    #-*- coding:utf-8 -*-### required libaraiedimport osimport matplotlib.image as imgimport matplotlib. ...

  5. 吴裕雄 python神经网络 水果图片识别(3)

    import osimport kerasimport timeimport numpy as npimport tensorflow as tffrom random import shufflef ...

  6. 吴裕雄 python神经网络 水果图片识别(1)

    import osimport numpy as npimport matplotlib.pyplot as pltfrom skimage import color,data,transform,i ...

  7. 吴裕雄 python 神经网络——TensorFlow图片预处理调整图片

    import numpy as np import tensorflow as tf import matplotlib.pyplot as plt def distort_color(image, ...

  8. 吴裕雄 python 神经网络——TensorFlow 花瓣识别2

    import glob import os.path import numpy as np import tensorflow as tf from tensorflow.python.platfor ...

  9. 吴裕雄 python 神经网络——TensorFlow图片预处理

    import numpy as np import tensorflow as tf import matplotlib.pyplot as plt # 使用'r'会出错,无法解码,只能以2进制形式读 ...

随机推荐

  1. MYSQL存储过程中 使用变量 做表名

    ); DECLARE temp2 int; set temp1=m_tableName; set temp2=m_maxCount; set @sqlStr=CONCAT('select * from ...

  2. HBase的Shell命令和JavaAPI

    HBase的shell操作和JavaAPI的使用: Shell 表操作 创建表 create 'student','info' #表名 列族 插入表 put 'student','1001','inf ...

  3. celipse关联hadoop源码

    可以在这里下载hadoop的源码包 https://mirrors.tuna.tsinghua.edu.cn/apache/hadoop/common/ 我自己下载的是hadoop2.6.0的源码包 ...

  4. 阿里云发送短信验证码php_SDK

    1.登录阿里云账号下载——aliyun-dysms-php-sdk(我使用的php版本) 下载地址:https://help.aliyun.com/document_detail/55359.html ...

  5. 马哥Linux base学习笔记

     介绍课程: 中级: 初级:系统基础 中级:系统管理.服务安全及服务管理.shell脚本 高级: MySQL数据库: Cache & storgae 集群: Cluster   lb: 4la ...

  6. 模拟select控件,css模拟下拉

    <!DOCTYPE html > <head>     <meta http-equiv="Content-Type" content="t ...

  7. mybatis 设置新增数据后返回自增主键

    主要是注解@Options起作用,语句如下: @Insert({ "INSERT INTO application_open_up ( " + "app_open_hos ...

  8. 什么时候删除指针后,要给指针赋NULL

    删除后需要赋NULL: 1.当在一个类里的时候,删除类的某个成员对象,需要给它赋NULL,以防其他地方使用这个成员的时候,不知道这个成员是否存在 eg: ref1::ref1() { tPint = ...

  9. socket-tcp

    server import socketip_port=('127.0.0.1',8080);back_log=5buffer_size=1024 serv=socket.socket(socket. ...

  10. Java的I/O对文件的操作

    I/O操作主要是指使用Java进行输入,Java所有的I/O机制都是基于数据流进行输入输出,这些数据流表示了字符或者字节数据的流动序列. 主要是通过下面两个类实现对文件的输入输出操作: FileInp ...