bp代码
#电池老化率测定的神经网络模型
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd path = 'SOH_Data.xlsx'
#训练集读取及归一化
xTrainData = pd.read_excel(path, sheetname = 0)
yTrainData = pd.read_excel(path, sheetname = 1)
n1 = np.shape(xTrainData)[1]
x_data = np.array(xTrainData).astype('float32')
for i in range(n1):
x_data[:, i] = (x_data[:, i] - np.amin(x_data[:, i]))/(np.amax(x_data[:, i]) - np.amin(x_data[:, i]))
y_data = np.array(yTrainData).astype('float32')
y_data[:] = (y_data[:] - np.amin(y_data[:]))/(np.amax(y_data[:]) - np.amin(y_data[:])) #测试集读取及归一化
xTestData = pd.read_excel(path, sheetname = 2)
yTestData = pd.read_excel(path, sheetname = 3)
xTest = np.array(xTestData).astype('float32')
n2 = np.shape(xTrainData)[1]
xTrain = np.array(xTrainData).astype('float32')
for i in range(n2):
xTest[:, i] = (xTest[:, i] - np.amin(xTest[:, i]))/(np.amax(xTest[:, i]) - np.amin(xTest[:, i]))
yTest = np.array(yTestData).astype('float32')
yTest[:] = (yTest[:] - np.amin(yTest[:]))/(np.amax(yTest[:]) - np.amin(yTest[:])) #参数概要
def variable_summaries(var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)#平均值
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)#标准差
tf.summary.scalar('max', tf.reduce_max(var))#最大值
tf.summary.scalar('min', tf.reduce_min(var))#最小值
tf.summary.histogram('histogram', var)#直方图 #5层神经网络,每层神经元个数
IHO = [12, 8, 5, 4, 1] #命名空间
with tf.name_scope('input'):
#定义两个placeholder
x = tf.placeholder(tf.float32, [None, 12], name = 'xInput')
y = tf.placeholder(tf.float32, [None, 1], name = 'y') #神经元中间层
with tf.name_scope('layer'):
with tf.name_scope('weights_L1'):
Weight_L1 = tf.Variable(tf.random_normal([12, 8]), name = 'W1')
variable_summaries(Weight_L1)
with tf.name_scope('bias_L1'):
biases_L1 = tf.Variable(tf.zeros([8]), name = 'b1')
variable_summaries(biases_L1)
with tf.name_scope('L_1'):
Wx_plus_b_L1 = tf.matmul(x, Weight_L1) + biases_L1
L1 = tf.nn.tanh(Wx_plus_b_L1) with tf.name_scope('weights_L2'):
Weight_L2 = tf.Variable(tf.random_normal([8, 5]), name = 'W2')
variable_summaries(Weight_L2)
with tf.name_scope('bias_L2'):
biases_L2 = tf.Variable(tf.zeros([5]), name = 'b2')
variable_summaries(biases_L2)
with tf.name_scope('L_2'):
Wx_plus_b_L2 = tf.matmul(L1, Weight_L2) + biases_L2
L2 = tf.nn.tanh(Wx_plus_b_L2) with tf.name_scope('weights_L3'):
Weight_L3 = tf.Variable(tf.random_normal([5, 4]), name = 'W3')
variable_summaries(Weight_L3)
with tf.name_scope('bias_L3'):
biases_L3 = tf.Variable(tf.zeros([4]), name = 'b3')
variable_summaries(biases_L3)
with tf.name_scope('L_3'):
Wx_plus_b_L3 = tf.matmul(L2, Weight_L3) + biases_L3
L3 = tf.nn.tanh(Wx_plus_b_L3)
#神经元输出层
with tf.name_scope('weights_L4'):
Weight_L4 = tf.Variable(tf.random_normal([4, 1]), name = 'W4')
variable_summaries(Weight_L4)
with tf.name_scope('bias_L4'):
biases_L4 = tf.Variable(tf.zeros([1]), name = 'b4')
variable_summaries(biases_L4)
with tf.name_scope('prediction'):
Wx_plus_b_L4 = tf.matmul(L3, Weight_L4) + biases_L4
prediction = tf.nn.tanh(Wx_plus_b_L4) #二次代价函数
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.square(y - prediction), name = 'loss')
tf.summary.scalar('loss', loss)
#使用梯度下降法训练
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss) #合并所有summary
merged = tf.summary.merge_all()
with tf.Session() as sess:
#变量初始化
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter('logs/', sess.graph)
for i in range(10000):
summary, _ = sess.run([merged, train_step], feed_dict = {x: x_data, y: y_data})
writer.add_summary(summary, i)
curr_loss = sess.run(loss, feed_dict = {x: x_data, y: y_data})
if (i + 1)%100 == 0:
print('第%d次迭代loss:'%(i + 1), curr_loss)
#训练集预测集
prediction_value = sess.run(prediction, feed_dict = {x: x_data})
#测试集预测集
prediction_value_test = sess.run(prediction, feed_dict = {x: xTest})
test_loss = sess.run(loss, feed_dict = {x: xTest, y: yTest})
print('测试误差:', test_loss)
print(prediction_value_test)
#电池老化率测定的神经网络模型
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from numpy import *
import os
np.set_printoptions(suppress=True)
np.set_printoptions(threshold=np.NaN)
BATCH_SIZE = 256
with open("D:\\bs\\finall_data\\marry2.11\\train\\nc_all.txt","rb") as fa,open("D:\\bs\\finall_data\\marry2.11\\train\\calipso_all.txt","rb") as fb,open("D:\\bs\\finall_data\\marry2.11\\test\\nc_text.txt","rb") as fc,open("D:\\bs\\finall_data\\marry2.11\\test\\calipso_text.txt","rb") as fd: #训练集读取及归一化
# xTrainData = pd.read_excel(path, sheetname = 0)
# yTrainData = pd.read_excel(path, sheetname = 1)
# # print(xTrainData)
# # print(yTrainData)
# n1 = np.shape(xTrainData)[1]
# # print(n1)
# x_data = np.array(xTrainData).astype('float32')
# print(x_data) #
# def Polyfit(x, y, degree):
# results = {}
# coeffs = np.polyfit(x, y, degree)
# # results['polynomial'] = coeffs.tolist()
#
# # r-squared
# p = np.poly1d(coeffs)
# # print(p)
# # # fit values, and mean
# # yhat = p(x) # or [p(z) for z in x]
# # ybar = np.sum(y) / len(y) # or sum(y)/len(y)
# # ssreg = np.sum((yhat - ybar) ** 2) # or sum([ (yihat - ybar)**2 for yihat in yhat])
# # sstot = np.sum((y - ybar) ** 2) # or sum([ (yi - ybar)**2 for yi in y])
# # results['determination'] = ssreg / sstot # 准确率
# return results
list_x = []
for i in fa.readlines():
# print(str(i))
x_data_1 = str(i).split(" ")[2:18]
for x_data_12 in x_data_1:
x_data_12=float(x_data_12)
list_x.append(x_data_12) mat_x = mat(list_x)
x_data = mat_x.reshape(-1, 16)
for i in range(16):
x_data[i, :] = (x_data[i, :] - np.amin(x_data[i, :])) / (np.amax(x_data[i, :]) - np.amin(x_data[i, :])) list_y=[]
for v in fb.readlines():
y_data_1 = str(v).split(" ")[2].split(" ")[0]
y_data_1=1/(1+float(y_data_1))
# print(y_data)
list_y.append(float(y_data_1))
# print(list_y)
mat_y=mat(list_y)
y_data = mat_y.reshape(-1, 1)
# print(y_data) # y_data = np.array(yTrainData).astype('float32')
# y_data[:] = (y_data[:] - np.amin(y_data[:]))/(np.amax(y_data[:]) - np.amin(y_data[:]))
# #
# # print(y_data[:])
# z1 = Polyfit(x_data, y_data, 2)
# plt.plot(x_data, y_data, 'o')
# # plt.plot(x_data, np.polyval(z1, x_data))
# plt.show()
# # #测试集读取及归一化
list_t_x = []
for m in fc.readlines():
x_data_t_1 = str(m).split(" ")[2:18]
for x_data_t_12 in x_data_t_1:
# print(x_data_t_12)
x_data_t_12 = float(x_data_t_12)
list_t_x.append(x_data_t_12) mat_t_x = mat(list_t_x)
xTest = mat_t_x.reshape(-1, 16)
# print(xTest.shape) #(1598,16)
# xTestData = pd.read_excel(path, sheetname = 2)
# yTestData = pd.read_excel(path, sheetname = 3)
# xTest = np.array(xTestData).astype('float32')
# n2 = np.shape(xTrainData)[1]
# xTrain = np.array(xTrainData).astype('float32')
for i in range(16):
xTest[i, :] = (xTest[i, :] - np.amin(xTest[i, :]))/(np.amax(xTest[i, :]) - np.amin(xTest[i, :])) list_t_y = []
for n in fd.readlines():
y_data_t_1 = str(n).split(" ")[2].split(" ")[0]
# print(y_data)
y_data_t_1=1/(1+float(y_data_t_1))
list_t_y.append(float(y_data_t_1))
# print(list_y)
mat_t_y = mat(list_t_y)
yTest = mat_t_y.reshape(-1, 1)
# print(yTest) # yTest = np.array(yTestData).astype('float32')
# yTest[:] = (yTest[:] - np.amin(yTest[:]))/(np.amax(yTest[:]) - np.amin(yTest[:]))
# print(np.amax(yTest[:]))
# print(np.amax(y_Test[:])) #参数概要
def variable_summaries(var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)#平均值
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)#标准差
tf.summary.scalar('max', tf.reduce_max(var))#最大值
tf.summary.scalar('min', tf.reduce_min(var))#最小值
tf.summary.histogram('histogram', var)#直方图 #5层神经网络,每层神经元个数
IHO = [16, 8, 5, 4, 1] #命名空间
with tf.name_scope('input'):
#定义两个placeholder
x = tf.placeholder(tf.float32, [None, 16], name = 'xInput')
y = tf.placeholder(tf.float32, [None, 1], name = 'y') #神经元中间层
with tf.name_scope('layer'):
with tf.name_scope('weights_L1'):
Weight_L1 = tf.Variable(tf.random_normal([16, 8]), name = 'W1')
variable_summaries(Weight_L1)
with tf.name_scope('bias_L1'):
biases_L1 = tf.Variable(tf.zeros([8]), name = 'b1')
variable_summaries(biases_L1)
with tf.name_scope('L_1'):
Wx_plus_b_L1 = tf.matmul(x, Weight_L1) + biases_L1
L1 = tf.nn.sigmoid(Wx_plus_b_L1) with tf.name_scope('weights_L2'):
Weight_L2 = tf.Variable(tf.random_normal([8, 5]), name = 'W2')
variable_summaries(Weight_L2)
with tf.name_scope('bias_L2'):
biases_L2 = tf.Variable(tf.zeros([5]), name = 'b2')
variable_summaries(biases_L2)
with tf.name_scope('L_2'):
Wx_plus_b_L2 = tf.matmul(L1, Weight_L2) + biases_L2
L2 = tf.nn.sigmoid(Wx_plus_b_L2) with tf.name_scope('weights_L3'):
Weight_L3 = tf.Variable(tf.random_normal([5, 4]), name = 'W3')
variable_summaries(Weight_L3)
with tf.name_scope('bias_L3'):
biases_L3 = tf.Variable(tf.zeros([4]), name = 'b3')
variable_summaries(biases_L3)
with tf.name_scope('L_3'):
Wx_plus_b_L3 = tf.matmul(L2, Weight_L3) + biases_L3
L3 = tf.nn.sigmoid(Wx_plus_b_L3)
#神经元输出层
with tf.name_scope('weights_L4'):
Weight_L4 = tf.Variable(tf.random_normal([4, 1]), name = 'W4')
variable_summaries(Weight_L4)
with tf.name_scope('bias_L4'):
biases_L4 = tf.Variable(tf.zeros([1]), name = 'b4')
variable_summaries(biases_L4)
with tf.name_scope('prediction'):
Wx_plus_b_L4 = tf.matmul(L3, Weight_L4) + biases_L4
prediction = tf.nn.sigmoid(Wx_plus_b_L4) #二次代价函数
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.square(y - prediction), name = 'loss')
tf.summary.scalar('loss', loss)
#使用梯度下降法训练
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss) #合并所有summary
merged = tf.summary.merge_all()
with tf.Session() as sess:
#变量初始化
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter('logs/', sess.graph)
for i in range(1000):
summary, _ = sess.run([merged, train_step], feed_dict = {x: x_data, y: y_data})
writer.add_summary(summary, i)
curr_loss = sess.run(loss, feed_dict = {x: x_data, y: y_data})
if (i + 1)%100 == 0:
print('第%d次迭代loss:'%(i + 1), curr_loss)
#训练集预测集
prediction_value = sess.run(prediction, feed_dict = {x: x_data})
#测试集预测集
prediction_value_test = sess.run(prediction, feed_dict = {x: xTest})
test_loss = sess.run(loss, feed_dict = {x: xTest, y: yTest})
print('测试误差:', test_loss)
# print(len(prediction_value_test)) #text数据的个数
# print(yTest)
print(prediction_value_test)
from __future__ import print_function import numpy as np
np.random.seed(1337) from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop
from keras.utils import np_utils
from keras.optimizers import SGD batch_size = 128 nb_classes = 10 nb_epoch = 20 (X_train, y_train), (X_test, y_test) = mnist.load_data() X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples') Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes) model1 = Sequential() model1.add(Dense(256, activation='relu', input_dim=784))
model1.add(Dropout(0.2))
model1.add(Dense(256, activation='relu'))
model1.add(Dropout(0.2))
model1.add(Dense(10, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model1.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy']) history1 = model1.fit(X_train, Y_train,
batch_size = batch_size,
epochs = nb_epoch,
verbose = 2,
validation_data = (X_test, Y_test)) model2 = Sequential() model2.add(Dense(256, activation='relu', input_dim=784)) model2.add(Dense(256, activation='relu')) model2.add(Dense(10, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model2.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy']) history2 = model2.fit(X_train, Y_train,
batch_size = batch_size,
epochs = nb_epoch,
verbose = 2,
validation_data = (X_test, Y_test))
model3 = Sequential() model3.add(Dense(256, activation='relu', input_dim=784)) model3.add(Dense(10, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model3.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy']) history3 = model3.fit(X_train, Y_train,
batch_size = batch_size,
epochs = nb_epoch,
verbose = 2,
validation_data = (X_test, Y_test)) import matplotlib.pyplot as plt
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history1.history['acc'])
plt.plot(history1.history['val_acc'])
plt.title('model1 accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(history2.history['acc'])
plt.plot(history2.history['val_acc'])
plt.title('model2 accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(history3.history['acc'])
plt.plot(history3.history['val_acc'])
plt.title('model3 accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(history1.history['val_acc'])
plt.plot(history2.history['val_acc'])
plt.plot(history3.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['model1', 'model2', 'model3'], loc='upper left')
plt.show() # summarize history for loss
plt.plot(history1.history['loss'])
plt.plot(history1.history['val_loss'])
plt.title('model1 loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(history2.history['loss'])
plt.plot(history2.history['val_loss'])
plt.title('model2 loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(history3.history['loss'])
plt.plot(history3.history['val_loss'])
plt.title('model3 loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
bp代码的更多相关文章
- rcu-bp关键代码解读
1 什么是TLS 原理在网上资料很多,这里不展开. 简单点说,动态申请的每线程变量.有一类比较熟悉的每线程变量是一个带__thread的每线程变量,两者的区别在于,TLS这类每线程变量是动态 ...
- 利用c++编写bp神经网络实现手写数字识别详解
利用c++编写bp神经网络实现手写数字识别 写在前面 从大一入学开始,本菜菜就一直想学习一下神经网络算法,但由于时间和资源所限,一直未展开比较透彻的学习.大二下人工智能课的修习,给了我一个学习的契机. ...
- Notes on Convolutional Neural Networks
这是Jake Bouvrie在2006年写的关于CNN的训练原理,虽然文献老了点,不过对理解经典CNN的训练过程还是很有帮助的.该作者是剑桥的研究认知科学的.翻译如有不对之处,还望告知,我好及时改正, ...
- Windows调试神器:WinDBG
Q:WinDBG的Watch窗口中我想要查看长字符串,但是后面的内容都被省略为...了怎么办? A:如图,双击你要查看的内容,出现光标后,移动光标即可查看后面被省略的内容 Q:WinDBG如何给程序设 ...
- NPashaP的二分图源码部分
源码链接:https://github.com/nelsonkuang/ant-admin/blob/master/src/utils/d3-viz.js 的二分图部分. 1.整体的级联结构 整个bp ...
- Kinect舒适区范围--UE4 的Blueprint测试范例
本文章由cartzhang编写,转载请注明出处. 所有权利保留. 文章链接: http://blog.csdn.net/cartzhang/article/details/44748475 作者:ca ...
- Python内存管理机制-《源码解析》
Python内存管理机制 Python 内存管理分层架构 /* An object allocator for Python. Here is an introduction to the layer ...
- 神经网络BP算法C和python代码
上面只显示代码. 详BP原理和神经网络的相关知识,请参阅:神经网络和反向传播算法推导 首先是前向传播的计算: 输入: 首先为正整数 n.m.p.t,分别代表特征个数.训练样本个数.隐藏层神经元个数.输 ...
- BP神经网络算法推导及代码实现笔记zz
一. 前言: 作为AI入门小白,参考了一些文章,想记点笔记加深印象,发出来是给有需求的童鞋学习共勉,大神轻拍! [毒鸡汤]:算法这东西,读完之后的状态多半是 --> “我是谁,我在哪?” 没事的 ...
随机推荐
- Lombok使用简介
1.在maven中加入Lombok引入 2.在开发环境中加入Lombok插件 3.在实体类的类名上增加注释@Date即可使用所有get,set方法
- python3基础-set
集合:无序的,不重复的数据组合 作用: 1.去重,把一个列表变成集合,就自动去重了 2.关系测试,测试两组数据之前的交集.差集.并集等关系 set和dict类似,也是一组key的集合,但不存储valu ...
- 如何监控Redis性能指标(译)
Redis给人的印象是简单.很快,但是不代表它不需要关注它的性能指标,此文简单地介绍了一部分Redis性能指标.翻译过程中加入了自己延伸的一些疑问信息,仍然还有一些东西没有完全弄明白.原文中Metri ...
- 初识Scratch 3.0
之前在帮朋友搜集少儿编程教育资料的时候,发现了麻省理工开发的积木式编程语言的Scratch,最近有空玩了下,感觉很惊艳,我能想象用它做一些有趣的事情,Scratch把编程元素变成像乐高积木一样,可以通 ...
- 【转】如何使用离线博客发布工具发布CSDN的博客文章
目前大部分的博客作者在用Word写博客这件事情上都会遇到以下3个痛点: 1.所有博客平台关闭了文档发布接口,用户无法使用Word,Windows Live Writer等工具来发布博客.使用Word写 ...
- ACM-ICPC 2018 南京赛区网络预赛 J.sum
A square-free integer is an integer which is indivisible by any square number except 11. For example ...
- 使用python备份文件
想写个定时备份文件的功能,这个功能需要实现:1.搜索指定的目录里是否存在当天的文件2.如果存在压缩并加密文件3.通过ftp上传到备份服务器4.在备份服务器上定时将文件拷贝到移动硬盘并定时清理文件 本来 ...
- centos下删除MYSQL 和重新安装MYSQL
centos下彻底删除MYSQL 和重新安装MYSQL 因centos系统自带的mysql版本比较低5.1,所以想卸载重新安装较新版本,下面是过程 1 删除Mysql yum remove mysql ...
- 50代码HTML5 Canvas 3D 编辑器优雅搞定
1024程序员节刚过,手痒想实现一个html的3d编辑器,看了three.js 同时还看了网上流传已久的<<基于 HTML5 Canvas 的简易 2D 3D 编辑器>>,都觉 ...
- Redhat系统上开启Telnet服务
https://blog.csdn.net/wolfofsiberian/article/details/51635952 1.操作系统 Redhat Step1:修改配置文件/etc/xinetd. ...