时序预测一直是比较重要的研究问题,在统计学中我们有各种的模型来解决时间序列问题,但是最近几年比较火的深度学习中也有能解决时序预测问题的方法,另外在深度学习领域中时序预测算法可以解决自然语言问题等。

在网上找到了    tensorflow 中   RNN    和    LSTM   算法预测  sin  曲线的代码,效果不错。

LSTM:

#encoding:UTF-8

import random
import numpy as np
import tensorflow as tf
from tensorflow.contrib.rnn.python.ops import core_rnn
from tensorflow.contrib.rnn.python.ops import core_rnn_cell def build_data(n):
xs = []
ys = []
for i in range(0, 2000):
k = random.uniform(1, 50) x = [[np.sin(k + j)] for j in range(0, n)]
y = [np.sin(k + n)] # x[i] = sin(k + i) (i = 0, 1, ..., n-1)
# y[i] = sin(k + n)
xs.append(x)
ys.append(y) train_x = np.array(xs[0: 1500])
train_y = np.array(ys[0: 1500])
test_x = np.array(xs[1500:])
test_y = np.array(ys[1500:])
return (train_x, train_y, test_x, test_y) length = 10
time_step_size = length
vector_size = 1
batch_size = 10
test_size = 10 # build data
(train_x, train_y, test_x, test_y) = build_data(length)
print(train_x.shape, train_y.shape, test_x.shape, test_y.shape) X = tf.placeholder("float", [None, length, vector_size])
Y = tf.placeholder("float", [None, 1]) # get lstm_size and output predicted value
W = tf.Variable(tf.random_normal([10, 1], stddev=0.01))
B = tf.Variable(tf.random_normal([1], stddev=0.01)) def seq_predict_model(X, w, b, time_step_size, vector_size):
# input X shape: [batch_size, time_step_size, vector_size]
# transpose X to [time_step_size, batch_size, vector_size]
X = tf.transpose(X, [1, 0, 2]) # reshape X to [time_step_size * batch_size, vector_size]
X = tf.reshape(X, [-1, vector_size]) # split X, array[time_step_size], shape: [batch_size, vector_size]
X = tf.split(X, time_step_size, 0) # LSTM model with state_size = 10
cell = core_rnn_cell.BasicLSTMCell(num_units=10,
forget_bias=1.0,
state_is_tuple=True)
outputs, _states = core_rnn.static_rnn(cell, X, dtype=tf.float32) # Linear activation
return tf.matmul(outputs[-1], w) + b, cell.state_size pred_y, _ = seq_predict_model(X, W, B, time_step_size, vector_size)
loss = tf.square(tf.subtract(Y, pred_y))
train_op = tf.train.GradientDescentOptimizer(0.001).minimize(loss) with tf.Session() as sess:
tf.global_variables_initializer().run() # train
for i in range(50):
# train
for end in range(batch_size, len(train_x), batch_size):
begin = end - batch_size
x_value = train_x[begin: end]
y_value = train_y[begin: end]
sess.run(train_op, feed_dict={X: x_value, Y: y_value}) # randomly select validation set from test set
test_indices = np.arange(len(test_x))
np.random.shuffle(test_indices)
test_indices = test_indices[0: test_size]
x_value = test_x[test_indices]
y_value = test_y[test_indices] # eval in validation set
val_loss = np.mean(sess.run(loss,
feed_dict={X: x_value, Y: y_value}))
print('Run %s' % i, val_loss) for b in range(0, len(test_x), test_size):
x_value = test_x[b: b + test_size]
y_value = test_y[b: b + test_size]
pred = sess.run(pred_y, feed_dict={X: x_value})
for i in range(len(pred)):
print(pred[i], y_value[i], pred[i] - y_value[i])

RNN :

import random

import numpy as np
import tensorflow as tf
from tensorflow.contrib.rnn.python.ops import core_rnn
from tensorflow.contrib.rnn.python.ops import core_rnn_cell def build_data(n):
xs = []
ys = []
for i in range(0, 2000):
k = random.uniform(1, 50) x = [[np.sin(k + j)] for j in range(0, n)]
y = [np.sin(k + n)] # x[i] = sin(k + i) (i = 0, 1, ..., n-1)
# y[i] = sin(k + n)
xs.append(x)
ys.append(y) train_x = np.array(xs[0: 1500])
train_y = np.array(ys[0: 1500])
test_x = np.array(xs[1500:])
test_y = np.array(ys[1500:])
return (train_x, train_y, test_x, test_y) length = 10
time_step_size = length
vector_size = 1
batch_size = 10
test_size = 10 # build data
(train_x, train_y, test_x, test_y) = build_data(length)
print(train_x.shape, train_y.shape, test_x.shape, test_y.shape) X = tf.placeholder("float", [None, length, vector_size])
Y = tf.placeholder("float", [None, 1]) # get lstm_size and output predicted value
W = tf.Variable(tf.random_normal([10, 1], stddev=0.01))
B = tf.Variable(tf.random_normal([1], stddev=0.01)) def seq_predict_model(X, w, b, time_step_size, vector_size):
# input X shape: [batch_size, time_step_size, vector_size]
# transpose X to [time_step_size, batch_size, vector_size]
X = tf.transpose(X, [1, 0, 2])
# reshape X to [time_step_size * batch_size, vector_size]
X = tf.reshape(X, [-1, vector_size])
# split X, array[time_step_size], shape: [batch_size, vector_size]
X = tf.split(X, time_step_size, 0) cell = core_rnn_cell.BasicRNNCell(num_units=10)
initial_state = tf.zeros([batch_size, cell.state_size])
outputs, _states = core_rnn.static_rnn(cell, X, initial_state=initial_state) # Linear activation
return tf.matmul(outputs[-1], w) + b, cell.state_size pred_y, _ = seq_predict_model(X, W, B, time_step_size, vector_size)
loss = tf.square(tf.subtract(Y, pred_y))
train_op = tf.train.GradientDescentOptimizer(0.001).minimize(loss) with tf.Session() as sess:
tf.global_variables_initializer().run() # train
for i in range(50):
# train
for end in range(batch_size, len(train_x), batch_size):
begin = end - batch_size
x_value = train_x[begin: end]
y_value = train_y[begin: end]
sess.run(train_op, feed_dict={X: x_value, Y: y_value}) # randomly select validation set from test set
test_indices = np.arange(len(test_x))
np.random.shuffle(test_indices)
test_indices = test_indices[0: test_size]
x_value = test_x[test_indices]
y_value = test_y[test_indices] # eval in validation set
val_loss = np.mean(sess.run(loss,
feed_dict={X: x_value, Y: y_value}))
print('Run %s' % i, val_loss) for b in range(0, len(test_x), test_size):
x_value = test_x[b: b + test_size]
y_value = test_y[b: b + test_size]
pred = sess.run(pred_y, feed_dict={X: x_value})
for i in range(len(pred)):
print(pred[i], y_value[i], pred[i] - y_value[i])

-------------------------------------------------------------------

Tensorflow 循环神经网络 基本 RNN 和 LSTM 网络 拟合、预测sin曲线的更多相关文章

  1. 深度学习项目——基于循环神经网络(RNN)的智能聊天机器人系统

    基于循环神经网络(RNN)的智能聊天机器人系统 本设计研究智能聊天机器人技术,基于循环神经网络构建了一套智能聊天机器人系统,系统将由以下几个部分构成:制作问答聊天数据集.RNN神经网络搭建.seq2s ...

  2. 大话循环神经网络(RNN)

      在上一篇文章中,介绍了 卷积神经网络(CNN)的算法原理,CNN在图像识别中有着强大.广泛的应用,但有一些场景用CNN却无法得到有效地解决,例如: 语音识别,要按顺序处理每一帧的声音信息,有些结果 ...

  3. Coursera Deep Learning笔记 序列模型(一)循环序列模型[RNN GRU LSTM]

    参考1 参考2 参考3 1. 为什么选择序列模型 序列模型能够应用在许多领域,例如: 语音识别 音乐发生器 情感分类 DNA序列分析 机器翻译 视频动作识别 命名实体识别 这些序列模型都可以称作使用标 ...

  4. 【学习笔记】循环神经网络(RNN)

    前言 多方寻找视频于博客.学习笔记,依然不能完全熟悉RNN,因此决定还是回到书本(<神经网络与深度学习>第六章),一点点把啃下来,因为这一章对于整个NLP学习十分重要,我想打好基础. 当然 ...

  5. TensorFlow(十一):递归神经网络(RNN与LSTM)

    RNN RNN(Recurrent Neural Networks,循环神经网络)不仅会学习当前时刻的信息,也会依赖之前的序列信息.由于其特殊的网络模型结构解决了信息保存的问题.所以RNN对处理时间序 ...

  6. CNN(卷积神经网络)、RNN(循环神经网络)、DNN,LSTM

    http://cs231n.github.io/neural-networks-1 https://arxiv.org/pdf/1603.07285.pdf https://adeshpande3.g ...

  7. 深度学习之循环神经网络(RNN)

    循环神经网络(Recurrent Neural Network,RNN)是一类具有短期记忆能力的神经网络,适合用于处理视频.语音.文本等与时序相关的问题.在循环神经网络中,神经元不但可以接收其他神经元 ...

  8. TensorFlow——循环神经网络基本结构

    1.导入依赖包,初始化一些常量 import collections import numpy as np import tensorflow as tf TRAIN_DATA = "./d ...

  9. [深度学习]理解RNN, GRU, LSTM 网络

    Recurrent Neural Networks(RNN) 人类并不是每时每刻都从一片空白的大脑开始他们的思考.在你阅读这篇文章时候,你都是基于自己已经拥有的对先前所见词的理解来推断当前词的真实含义 ...

随机推荐

  1. python 判断返回值是否是字典

    背景: 小鱼最近再调一个小工程时,需要对返回值进行处理(返回值如下),有的返回值 有data1 有的没有:需要做个判断,判断是否含有该key值 返回值: res1 = {"result&qu ...

  2. 行为型模式(四) 观察者模式(Observer)

    一.动机(Motivate) "观察者模式"在现实生活中,实例其实是很多的,比如:八九十年代我们订阅的报纸,我们会定期收到报纸,因为我们订阅了.银行可以给储户发手机短信,也是&qu ...

  3. python根据字典的值进行排序:

    有一个列表嵌套字典:[{"a": 5}, {"b": 4}, {"c": 1},{"e": 2}, {"d&q ...

  4. 一个关于gcd的等式的证明

    证:$a > b$ 且 $gcd(a,b)=1$,有 $gcd(a^n-b^n, a^m-b^m) = a^{gcd(n, m)} - b^{gcd(n,m)}$. 证明: 假设 $n > ...

  5. select下拉选中显示对应的div隐藏不相关的div

    <!DOCTYPE html> <html> <head> <meta charset="UTF-8"> <title> ...

  6. netty: 将传递数据格式转为String,并使用分隔符发送多条数据

    自定义分割符,用:DelimiterBasedFrameDecoder类 ByteBuf转String,用StringDecoder类 参考代码: //设置连接符/分隔符,换行显示 ByteBuf b ...

  7. learning armbian steps(3) ----- armbian 文件系统定制思路

    如何定制自已的armbian文件系统. 1)uboot 2)  kernel 3)  rootfs 针对linux 系统开发人员来说,真正有用的只是其armbian文件系统, 将其制作成所需的文件系统 ...

  8. QGraphicsView,QGraphicsScene,QGraphicsItem

    参考:Qt4 开发实践第八章 图形视图QGraphicsView #ifndef DRIVEDGRAPH_H #define DRIVEDGRAPH_H #include <QObject> ...

  9. man、whatis、apropos命令

    man命令类似于Linux的帮助文档. 1.man1提供给普通用户的可执行命令说明: 输入man 1 ls 后,结果如下: 结果分析: (1)NAME:命令的名称: (2)SYNOPSIS:参数的使用 ...

  10. 印象笔记作为todo(GTD相关)的一个尝试

    印象笔记作为todo(GTD相关)的一个尝试 上来说结果: 失败 原则上的原因: 印象笔记作为一个比较重的笔记, 重点也不在于这一点, 虽然是可以新建清单之类的. 还是比较小巧的好一些. 最后使用的软 ...