运行代码:

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt BATCH_START = 0
TIME_STEPS = 20
BATCH_SIZE = 50
INPUT_SIZE = 1
OUTPUT_SIZE = 1
CELL_SIZE = 10
LR = 0.006 def get_batch():
global BATCH_START, TIME_STEPS
# xs shape (50batch, 20steps)
xs = np.arange(BATCH_START, BATCH_START+TIME_STEPS*BATCH_SIZE).reshape((BATCH_SIZE, TIME_STEPS)) / (10*np.pi)
seq = np.sin(xs)
res = np.cos(xs)
BATCH_START += TIME_STEPS
# plt.plot(xs[0, :], res[0, :], 'r', xs[0, :], seq[0, :], 'b--')
# plt.show()
# returned seq, res and xs: shape (batch, step, input)
return [seq[:, :, np.newaxis], res[:, :, np.newaxis], xs] class LSTMRNN(object):
def __init__(self, n_steps, input_size, output_size, cell_size, batch_size):
self.n_steps = n_steps
self.input_size = input_size
self.output_size = output_size
self.cell_size = cell_size
self.batch_size = batch_size
with tf.name_scope('inputs'):
self.xs = tf.placeholder(tf.float32, [None, n_steps, input_size], name='xs')
self.ys = tf.placeholder(tf.float32, [None, n_steps, output_size], name='ys')
with tf.variable_scope('in_hidden'):
self.add_input_layer()
with tf.variable_scope('LSTM_cell'):
self.add_cell()
with tf.variable_scope('out_hidden'):
self.add_output_layer()
with tf.name_scope('cost'):
self.compute_cost()
with tf.name_scope('train'):
self.train_op = tf.train.AdamOptimizer(LR).minimize(self.cost) def add_input_layer(self,):
l_in_x = tf.reshape(self.xs, [-1, self.input_size], name='2_2D') # (batch*n_step, in_size)
# Ws (in_size, cell_size)
Ws_in = self._weight_variable([self.input_size, self.cell_size])
# bs (cell_size, )
bs_in = self._bias_variable([self.cell_size,])
# l_in_y = (batch * n_steps, cell_size)
with tf.name_scope('Wx_plus_b'):
l_in_y = tf.matmul(l_in_x, Ws_in) + bs_in
# reshape l_in_y ==> (batch, n_steps, cell_size)
self.l_in_y = tf.reshape(l_in_y, [-1, self.n_steps, self.cell_size], name='2_3D') def add_cell(self):
lstm_cell = tf.contrib.rnn.BasicLSTMCell(self.cell_size, forget_bias=1.0, state_is_tuple=True)
with tf.name_scope('initial_state'):
self.cell_init_state = lstm_cell.zero_state(self.batch_size, dtype=tf.float32)
self.cell_outputs, self.cell_final_state = tf.nn.dynamic_rnn(
lstm_cell, self.l_in_y, initial_state=self.cell_init_state, time_major=False) def add_output_layer(self):
# shape = (batch * steps, cell_size)
l_out_x = tf.reshape(self.cell_outputs, [-1, self.cell_size], name='2_2D')
Ws_out = self._weight_variable([self.cell_size, self.output_size])
bs_out = self._bias_variable([self.output_size, ])
# shape = (batch * steps, output_size)
with tf.name_scope('Wx_plus_b'):
self.pred = tf.matmul(l_out_x, Ws_out) + bs_out def compute_cost(self):
losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[tf.reshape(self.pred, [-1], name='reshape_pred')],
[tf.reshape(self.ys, [-1], name='reshape_target')],
[tf.ones([self.batch_size * self.n_steps], dtype=tf.float32)],
average_across_timesteps=True,
softmax_loss_function=self.ms_error,
name='losses'
)
with tf.name_scope('average_cost'):
self.cost = tf.div(
tf.reduce_sum(losses, name='losses_sum'),
self.batch_size,
name='average_cost')
tf.summary.scalar('cost', self.cost) @staticmethod
def ms_error(labels, logits):
return tf.square(tf.subtract(labels, logits)) def _weight_variable(self, shape, name='weights'):
initializer = tf.random_normal_initializer(mean=0., stddev=1.,)
return tf.get_variable(shape=shape, initializer=initializer, name=name) def _bias_variable(self, shape, name='biases'):
initializer = tf.constant_initializer(0.1)
return tf.get_variable(name=name, shape=shape, initializer=initializer) if __name__ == '__main__':
model = LSTMRNN(TIME_STEPS, INPUT_SIZE, OUTPUT_SIZE, CELL_SIZE, BATCH_SIZE)
sess = tf.Session()
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("logs", sess.graph)
init = tf.global_variables_initializer()
sess.run(init)
# relocate to the local dir and run this line to view it on Chrome (http://0.0.0.0:6006/):
# $ tensorboard --logdir='logs' plt.ion()
plt.show()
for i in range(200):
seq, res, xs = get_batch()
if i == 0:
feed_dict = {
model.xs: seq,
model.ys: res,
# create initial state
}
else:
feed_dict = {
model.xs: seq,
model.ys: res,
model.cell_init_state: state # use last state as the initial state for this run
} _, cost, state, pred = sess.run(
[model.train_op, model.cost, model.cell_final_state, model.pred],
feed_dict=feed_dict) # plotting
plt.plot(xs[0, :], res[0].flatten(), 'r', xs[0, :], pred.flatten()[:TIME_STEPS], 'b--')
plt.ylim((-1.2, 1.2))
plt.draw()
plt.pause(0.3) if i % 20 == 0:
print('cost: ', round(cost, 4))
result = sess.run(merged, feed_dict)
writer.add_summary(result, i)

运行结果:

TensorFlow从入门到理解(五):你的第一个循环神经网络RNN(回归例子)的更多相关文章

  1. TensorFlow从入门到理解(四):你的第一个循环神经网络RNN(分类例子)

    运行代码: import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data # set rando ...

  2. TensorFlow从入门到理解

    一.<莫烦Python>学习笔记: TensorFlow从入门到理解(一):搭建开发环境[基于Ubuntu18.04] TensorFlow从入门到理解(二):你的第一个神经网络 Tens ...

  3. 通过keras例子理解LSTM 循环神经网络(RNN)

    博文的翻译和实践: Understanding Stateful LSTM Recurrent Neural Networks in Python with Keras 正文 一个强大而流行的循环神经 ...

  4. 基于TensorFlow的循环神经网络(RNN)

    RNN适用场景 循环神经网络(Recurrent Neural Network)适合处理和预测时序数据 RNN的特点 RNN的隐藏层之间的节点是有连接的,他的输入是输入层的输出向量.extend(上一 ...

  5. TensorFlow从入门到理解(六):可视化梯度下降

    运行代码: import tensorflow as tf import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.m ...

  6. TensorFlow从入门到理解(三):你的第一个卷积神经网络(CNN)

    运行代码: from __future__ import print_function import tensorflow as tf from tensorflow.examples.tutoria ...

  7. TensorFlow从入门到理解(二):你的第一个神经网络

    运行代码: from __future__ import print_function import tensorflow as tf import numpy as np import matplo ...

  8. TensorFlow从入门到理解(一):搭建开发环境【基于Ubuntu18.04】

    *注:教程及本文章皆使用Python3+语言,执行.py文件都是用终端(如果使用Python2+和IDE都会和本文描述有点不符) 一.安装,测试,卸载 TensorFlow官网介绍得很全面,很完美了, ...

  9. 循环神经网络-RNN入门

    首先学习RNN需要一定的基础,即熟悉普通的前馈神经网络,特别是BP神经网络,最好能够手推. 所谓前馈,并不是说信号不能反向传递,而是网络在拓扑结构上不存在回路和环路. 而RNN最大的不同就是存在环路. ...

随机推荐

  1. 洛谷P1080 国王游戏

    两个难点. 怎么想到的贪心? 首先确定算法: 显然不是数据结构题.转成图论也不太可能. 考虑DP:f[i][j]表示前i个人取j状态的最小最大值......2^1000,直接放弃. 因为出现了“最大值 ...

  2. 【POJ2226】Muddy Fields

    题目大意:给定一个 N*M 的图,图中有一些格子不能被任何东西覆盖,现有一些宽度为 1,长度任意的骨牌覆盖这些可以被覆盖的格子,骨牌之间可以重叠,求将所有可以被覆盖的格子覆盖所需的最小骨牌数是多少. ...

  3. jenkins学习:jenkins+gitlab

    配置前提: 1.Jenkins已安装git plugin,gitlab plugin,安装过程可参考 https://www.cnblogs.com/zhizhiyin/p/9138309.html ...

  4. P3358 最长k可重区间集问题

    题目链接 \(Click\) \(Here\) 这题的写法非常巧妙. 每个位置的点向它的下一个位置连一个容量为\(INF\)的边,从区间的左端点往右端点拉一条容量为\(1\),费用为区间长度的边,从起 ...

  5. Luogu P2463 [SDOI2008]Sandy的卡片

    题目链接 \(Click\) \(Here\) 真的好麻烦啊..事实证明,理解是理解,一定要认认真真把板子打牢,不然调锅的时候真的会很痛苦..(最好是八分钟能无脑把\(SA\)码对的程度\(QAQ\) ...

  6. Linux系统诊断必备技能之一:lsof 用法详解!

    lsof(list open files)是一个查看当前系统文件的工具.在linux环境下,任何事物都以文件的形式存在,用户通过文件不仅可以访问常规数据,还可以访问网络连接和硬件:如传输控制协议 (T ...

  7. 解析:为什么设计师选择mac电脑居多?

    mac电脑的使用者中程序员和设计师居多,上篇文章说明了程序员选择mac的原因以及使用体验,这次,本文说明一下,设计师选择mac的原因. 解析:为什么程序员应该有一台Mac个人电脑? 1.外观. 设计师 ...

  8. Python递归_打印节点信息

    Python递归_打印节点信息 递归特性:1.必须由一个明确的结束条件2.每次进入更深一层递归时,问题规模相比上一次递归都应该有所减少3.递归效率不高,递归层次过多会导致栈溢出(在计算机中,函数调用时 ...

  9. Tensorflow object detection API 搭建物体识别模型(一)

    一.开发环境 1)python3.5 2)tensorflow1.12.0 3)Tensorflow object detection API :https://github.com/tensorfl ...

  10. Maven 本地资源库配置

    Maven 本地资源库配置 作者:尹正杰 版权声明:原创作品,谢绝转载!否则将追究法律责任. 一.操作系统环境 1>.查看操作系统环境(总共3台虚拟机) 2>.关闭防火墙并禁用开机自启动( ...