封装TensorFlow神经网络
为了参加今年的软件杯设计大赛,这几个月学习了很多新知识。现在大赛的第二轮作品优化已经提交,开始对这四个月所学知识做一些总结与记录。
用TensorFlow搭建神经网络。TensorFlow将神经网络的进行封装,使得深度学习变得简单已用,即使是不懂的深度学习算法原理的人都可以很容易的搭建各种神经网络的模型。我为了搭建神经网络更加方便,对TensorFlow做了自己的封装。
神经网络的封装代码:
class NN:
'''
shape:训练集数据的维度,shape[0]表示输入数据维度,shape[1]表标签数据维度
'''
def __init__(self,shape):
with tf.name_scope("inputs"):
self._xs = tf.placeholder(tf.float32,[None,shape[0]],name="features")
self._ys = tf.placeholder(tf.float32,[None,shape[1]],name="labels")
self._keep_prob = tf.placeholder(tf.float32,name="keep_prob")
# 每一层的输出
self._layers_out_lst = [self._xs]
# 损失值
self._loss = None
# 预测值
self._prediction = None
# 运行session
self._sess = None
# 训练步骤
self._train_step = None
self._saver = None
pass '''
添加一个神经层
'''
def add_layer(self,in_size,out_size,activation_function=None,name=None):
with tf.name_scope(name):
with tf.name_scope("{}_weight".format(name)):
# 权值矩阵
weights = tf.Variable(tf.random_normal([in_size,out_size],dtype=tf.float32),name="weight")
with tf.name_scope("{}_biases".format(name)):
# 偏置量
biases = tf.Variable(tf.zeros([1,out_size],dtype=tf.float32)+0.1)
with tf.name_scope("{}_w_plus_b".format(name)):
# 计算输出
wx_plus_b = tf.matmul(self._layers_out_lst[-1],weights) + biases
with tf.name_scope("{}_output".format(name)):
if activation_function is None:
outputs = wx_plus_b
else:
outputs = activation_function(wx_plus_b)
tf.summary.histogram("{}_output".format(name),outputs)
self._prediction =outputs
self._layers_out_lst.append(outputs)
self._timer = None
pass def add_loss_layer(self,cost_function,name=None):
# self._loss = tf.reduce_mean()
with tf.name_scope("loss"):
self._loss = cost_function(
onehot_labels=self._ys,logits=self._layers_out_lst[-1])
tf.summary.scalar("loss",self._loss) '''
添加优化器,optimizer是TensorFlow框架提供的优化器,lr是学习率
'''
def add_optimizer(self,optimizer,lr=0.01):
with tf.name_scope("train_step"):
self._train_step = optimizer(lr).minimize(self._loss) def add_dropout(self,name=None):
with tf.name_scope(name):
x = self._layers_out_lst[-1]
x = tf.nn.dropout(x,keep_prob=self._keep_prob,name=name)
self._layers_out_lst.append(x) '''
inputs 是输入的训练数据
labels 是训练数据的标签
echop 表示训练次数
'''
def fit(self,inputs,labels,echop=100,keep_prob=0.5, savepath=None,logdir='logs',step=10):
# 如果savepath不为none,要保存训练的模型
if savepath is not None:
self._saver = tf.train.Saver()
# 初始化变量
init = tf.global_variables_initializer()
# 如果回话为none,说明还没有载入模型,需要打开回话
if self._sess is None :
self._sess = tf.Session()
# 记录训练过程
merge = tf.summary.merge_all()
# 可视化工具
writer = tf.summary.FileWriter(logdir,self._sess.graph)
# run初始化参数
self._sess.run(init)
self.initProgressbar()
for i in range(1,echop+1):
feed_dict = {self._xs:inputs,self._ys:labels,self._keep_prob:keep_prob}
self._sess.run(self._train_step,feed_dict=feed_dict)
if i % step == 0:
g = self._sess.run(merge,feed_dict=feed_dict)
writer.add_summary(g,global_step=i)
cost = self._sess.run(self._loss,feed_dict=feed_dict)
print("当前损失:%s"%(str(cost)),end='')
self.showProgressBar(i/echop) if savepath:
self._saver.save(self._sess,savepath,global_step=i)
pass def predict(self,inputs):
prediction = tf.argmax(self._prediction,1)
res = self._sess.run(prediction,feed_dict={self._xs:inputs,self._keep_prob:1.0})
# labels = np.array([True,False])
return res def evaluate(self,inputs,labels):
# y_pre = self._sess.run(self._prediction,feed_dict={self._xs:inputs,self._ys:labels,self._keep_prob:1})
correct_prediction = tf.equal(tf.argmax(self._prediction,1),tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
result = self._sess.run(accuracy,feed_dict={self._xs:inputs,self._ys:labels,self._keep_prob:1.0})
return result def restore(self,savepath):
self._saver = tf.train.Saver()
self._sess = tf.Session()
self._saver.restore(self._sess,savepath) def initProgressbar(self):
self._timer = Timer() def showProgressBar(self,rate):
s = self._timer.format("%dh %dm %ds %dms")
# length = int(round(rate*100))
# s1 = "#"*length
# s2 = "-"*(100-length)
# print("[",s1,s2,"]",s,rate)
print(s,"%4.2f%%"%(rate*100)) def __del__(self):
if self._sess is not None:
self._sess.close()
封装完毕后建立一个神经网络的代码:
def getNetWork(initshape=(205,2),path=None):
# 创建一个神经网络对象
nn = NN(initshape)
# 创建神经网络的结构
nn.add_layer(initshape[0],128,tf.nn.relu,name="input_layer")
nn.add_layer(128,64,tf.nn.sigmoid,name="hidden_layer_1")
nn.add_dropout(name="dropout_1")
nn.add_layer(64,32,tf.nn.tanh,name="hidden_layer_2")
nn.add_dropout(name="dropout_2")
nn.add_layer(32,16,tf.nn.softplus,name="hidden_layer_3")
nn.add_layer(16,2,tf.nn.elu,name="hidden_layer_4")
nn.add_dropout(name="dropout_3")
nn.add_loss_layer(tf.losses.softmax_cross_entropy,name="cost_layer")
nn.add_optimizer(tf.train.AdamOptimizer,lr=0.001)
return nn
封装卷积神经网络:
class CNN:
'''
shape:训练集的形状shape[0]表示输入feature的形状,shape[1]表示标签的形状
'''
def __init__(self,**args):
x_shape = args.get("x_shape")
y_shape = args.get("y_shape")
self._savepath = args.get("savepath")
self._logdir = args.get("logdir","log")
with tf.name_scope("inputs_plcaceholders"):
self._xs = tf.placeholder(tf.float32,x_shape)
self._ys = tf.placeholder(tf.float32,y_shape)
self._keep_prob = tf.placeholder(tf.float32)
self._layer_outs = [self._xs]
self._loss = None
self._train_step = None
self._sess = None
self._saver = None
pass
def __del__(self):
if self._sess is not None:
if self._saver is not None:
self._saver.save(self._sess,self._savepath)
self._sess.close()
pass '''
features:训练集特征值
labels:训练集标签
keep_prob:dropout保留率
train_rate:训练集中用来做训练的数据的比例
echop:训练次数
step:每次测试经过的训练次数
'''
def fit(self,features,labels,keep_prob=0.8,train_rate=1,echop=100,step=10):
# 训练集长度
length = int(train_rate * len(features))
def b():
return features[:length],labels[:length]
self.fitbatch(b,keep_prob,echop=echop,step=step)
pass def fitbatch(self,get_batch=None,keep_prob=0.8,echop=100,step=10):
if self._savepath:
self._saver = tf.train.Saver()
# 初始化参数
init = tf.global_variables_initializer()
# 开启回话
self._sess = tf.Session()
# 记录训练过程
merge = tf.summary.merge_all()
# 可视化工具
writer = tf.summary.FileWriter(self._logdir,self._sess.graph)
# 加载参数
self._sess.run(init)
timer = network.Timer()
# 开始训练
for counter in range(1,echop+1):
features,labels = get_batch(counter%6)
# 填充词典
feed_dict = {self._xs:features,self._ys:labels,self._keep_prob:keep_prob}
if counter == 1:
for out in self._layer_outs:
tmp = self._sess.run(out,feed_dict=feed_dict)
print(tmp.shape)
# 训练
self._sess.run(self._train_step,feed_dict=feed_dict)
# 显示预测结果
if counter % step == 0:
cost = self._sess.run(self._loss,feed_dict=feed_dict)
print(counter,'\tcost',cost,end='\t')
timer.log()
# 记录训练过程中的参数
g = self._sess.run(merge,feed_dict=feed_dict)
writer.add_summary(g,global_step=counter) def restore(self):
self._saver = tf.train.Saver()
self._sess = tf.Session()
self._saver.restore(self._sess,self._savepath)
pass def predict(self,features):
prediction = tf.argmax(self._layer_outs[-1],1)
# prediction=self._sess.run(self._layer_outs[-1],feed_dict={self._xs:features,self._keep_prob:1.0})
return self._sess.run(prediction,feed_dict={self._xs:features,self._keep_prob:1.0}) def evaluate(self,features,labels):
correct_prediction = tf.equal(tf.argmax(self._layer_outs[-1],1),tf.argmax(self._ys,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
feed_dict = {self._xs:features,self._ys:labels,self._keep_prob:1.0}
return self._sess.run(accuracy,feed_dict=feed_dict) def _get_w_b(self,shape,name=None):
with tf.name_scope("{}_weights".format(name)):
initial=tf.truncated_normal(shape=shape,stddev=0.01)
w = tf.Variable(initial,name="{}_weights".format(name))
with tf.name_scope("{}_bias".format(name)):
b = tf.Variable(tf.constant(0.1,shape=[shape[-1]]),name="{}_bias".format(name))
return w,b
'''
shape:窗口形状,长宽高与输出高
'''
def add_layer(self,shape,stddev=0.01,strides=[1,10,10,1],padding='SAME',activate_function=tf.nn.relu,name=None):
if name is None:
name = "hidden_layer_%d"%len(self._layer_outs)
with tf.name_scope(name):
w,b = self._get_w_b(shape,name=name)
outputs = tf.nn.conv2d(self._layer_outs[-1],w,strides=strides,padding=padding,name=name) + b
if activate_function:
outputs = activate_function(outputs)
self._layer_outs.append(outputs)
tf.summary.histogram("{}_output".format(name),outputs)
pass
'''
ksize:窗口大小
strides:步长
padding:填充方式
'''
def add_max_pool(self,ksize=[1,2,2,1],strides=[1,2,2,1],padding="SAME",name=None):
if name is None:
name = "max_pool_layer_%d"%len(self._layer_outs)
with tf.name_scope(name):
outputs = tf.nn.max_pool(self._layer_outs[-1],ksize=ksize,strides=strides,padding=padding,name=name)
self._layer_outs.append(outputs)
tf.summary.histogram("{}_output".format(name),outputs)
pass def add_dropout(self,name=None):
if name is None:
name = "dropout_%d"%len(self._layer_outs)
with tf.name_scope(name):
outputs = tf.nn.dropout(self._layer_outs[-1],self._keep_prob,name=name)
self._layer_outs.append(outputs)
pass def add_cost_layer(self,activate_function,name=None):
if name is None:
name = "cost_layer"
with tf.name_scope(name):
# self._loss = tf.reduce_mean(activate_function(self._ys,self._layer_outs[-1]))
self._loss = activate_function(self._ys,self._layer_outs[-1])
tf.summary.scalar("loss",self._loss)
pass def addOptimizer(self,optimizer,lr=0.01):
self._train_step = optimizer(lr).minimize(self._loss)
pass
'''
shape:shape[0]表示输入上一层输出的神经元个数(本层需要接受的个数)
shape[1]表示本层神经元的个数(本层输出的个数)
'''
def add_full_layer(self,shape,activate_function=tf.nn.relu,first=False,name=None):
if name is None:
name = "full_layer_%d"%len(self._layer_outs)
with tf.name_scope(name):
w,b = self._get_w_b(shape,name=name)
if first:
x = tf.reshape(self._layer_outs[-1],[-1,shape[0]])
else:
x = self._layer_outs[-1]
outputs = activate_function(tf.matmul(x,w)+b)
self._layer_outs.append(outputs)
tf.summary.histogram("{}_output".format(name),outputs)
pass
建立卷积神经网络的代码:
def main(isTrain=True):
cnn = CNN(x_shape=[None,102,102,2],
y_shape=[None,2],
logdir=r"E:\iqaa\logs",
savepath=r"E:\iqaa\model\model.ckpt")
cnn.add_layer([5,5,2,4],strides=[1,1,1,1])
cnn.add_max_pool(ksize=[1,3,3,1],strides=[1,3,3,1])
cnn.add_layer([5,5,4,8],strides=[1,1,1,1])
cnn.add_dropout()
cnn.add_layer([5,5,8,16],strides=[1,1,1,1])
cnn.add_max_pool([1,3,3,1],[1,3,3,1])
cnn.add_full_layer([12*12*16,64],first=True)
cnn.add_dropout()
cnn.add_full_layer([64,2],first=False)
cnn.add_cost_layer(lambda ys,y:tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=ys,logits=y)))
cnn.addOptimizer(tf.train.AdamOptimizer,0.001)
if isTrain:
cnn.fitbatch(lambda i : DataUtil.getbatch(i),keep_prob=0.8,echop=1000,step=20)
features,labels = DataUtil.getbatch(6)
print(cnn.evaluate(features,labels))
else:
cnn.restore()
return cnn
封装TensorFlow神经网络的更多相关文章
- TensorFlow神经网络集成方案
TensorFlow神经网络集成方案 创造张力流create_tensorflow_neuropod 将TensorFlow模型打包为neuropod包. create_tensorflow_neur ...
- tensorflow神经网络与单层手写字识别
1.知识点 """ 1.基础知识: 1.神经网络结构:1.输入层 2.隐含层 3.全连接层(类别个数=全连接层神经元个数)+softmax函数 4.输出层 2.逻辑回归: ...
- 【Magenta 项目初探】手把手教你用Tensorflow神经网络创造音乐
原文链接:http://www.cnblogs.com/learn-to-rock/p/5677458.html 偶然在网上看到了一个让我很感兴趣的项目 Magenta,用Tensorflow让神经网 ...
- Tensorflow 神经网络
Tensorflow让神经网络自动创造音乐 前几天看到一个有意思的分享,大意是讲如何用Tensorflow教神经网络自动创造音乐.听起来好好玩有木有!作为一个Coldplay死忠粉,第一想法就是自动生 ...
- 学习笔记TF055:TensorFlow神经网络简单实现一元二次函数
TensorFlow运行方式.加载数据.定义超参数,构建网络,训练模型,评估模型.预测. 构造一个满足一元二次函数y=ax^2+b原始数据,构建最简单神经网络,包含输入层.隐藏层.输出层.Tensor ...
- TensorFlow 神经网络相关函数
TensorFlow 激活函数 激活操作提供用于神经网络的不同类型的非线性.这些包括平滑的非线性(sigmoid,tanh,elu,softplus,和softsign),连续的,但不是到处可微函数( ...
- TensorFlow 神经网络教程
TensorFlow 是一个用于机器学习应用程序的开源库.它是谷歌大脑的第二代系统,在取代了近源的 DistBelief 之后,被谷歌用于研究和生产应用.TensorFlow 提供了很多种语言接口,包 ...
- tensorflow神经网络拟合非线性函数与操作指南
本实验通过建立一个含有两个隐含层的BP神经网络,拟合具有二次函数非线性关系的方程,并通过可视化展现学习到的拟合曲线,同时随机给定输入值,输出预测值,最后给出一些关键的提示. 源代码如下: # -*- ...
- TensorFlow神经网络中的激活函数
激活函数是人工神经网络的一个极其重要的特征.它决定一个神经元是否应该被激活,激活代表神经元接收的信息与给定的信息有关. 激活函数对输入信息进行非线性变换. 然后将变换后的输出信息作为输入信息传给下一层 ...
随机推荐
- [PHP]怎样在SAE的CodeIgniter项目中隐藏掉index.php
第一步:改动项目根文件夹的config.yaml文件.加入例如以下内容: handle: - rewrite: if(!is_dir() && !is_file() && ...
- cpc,a wonderful concert
做完这道题突然就感觉自己脑子是不是已经秀逗了,tle到死后才想起来找规律, 就是求排列数的题目,按插入点对状态进行分类,可以暴力tle... #include<iostream> #inc ...
- vs输出窗口,显示build的时间
https://stackoverflow.com/questions/82128/displaying-build-times-in-visual-studio Tools... Options.. ...
- POJ 2478 线性递推欧拉函数
题意: 求sigma phi(n) 思路: 线性递推欧拉函数 (维护前缀和) //By SiriusRen #include <cstdio> using namespace std; # ...
- [-] Failed to load plugin from /usr/share/metasploit-framework/plugins/db_autopwn: No classes were loaded from /usr/share/metasploit-framework/plugins/db_autopwn in the Msf::Plugin namespace.
问题详情 然后,执行,出现如下问题,则说明大家的这个文件,下载不是完整的或者你上传不完整. msf > load db_autopwn [-] Failed to load plugin fro ...
- Android textView开头空两格问题,排版缩进2个汉字
一般为了排版,textView中字符段落开头一般都会空两格显示,如下图 但是如果你靠敲击空格来解决那就错了,那样在不同的屏幕上显示会差异,完美的解决方法是用转义字符”\t“,在段首加\t\t就解决.加 ...
- 显示gif动画(帧动画的播放)
在android上显示gif不太方便,虽然有控件可以实现,但是效果不是很好,保险点儿的作法还是使用帧动画来处理.①在XML中定义animation-list:<?xml version=&quo ...
- RMAN备份脚本
单机环境全备 export ORACLE_BASE=/oracle export ORACLE_HOME=$ORACLE_BASE/product/10.2.0/db_1 export ORA ...
- GoldenGate 应用系统升级
(仅复制DML时)源端和目标端数据库增减复制表 增加复制表 在GoldenGate的进程参数中,如果通过*来匹配所有表,因此只要符合*所匹配的条件,那么只要在源端建立了表之后GoldenGate就能自 ...
- mcustomscrollbar滚动条美化插件
mCustomScrollbar 是个基于 jQuery UI 的自定义滚动条插件,它可以让你灵活的通过 CSS 定义网页的滚动条,并且垂直和水平两个方向的滚动条都可以定义,它通过 Brandon A ...