'''
Created on 2017年5月21日 @author: weizhen
'''
# 以下程序为预测离散化之后的sin函数
import numpy as np
import tensorflow as tf
from tensorflow.contrib import rnn # 加载matplotlib工具包,使用该工具包可以对预测的sin函数曲线进行绘图
import matplotlib as mpl
from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat
mpl.use('Agg')
from matplotlib import pyplot as plt
learn = tf.contrib.learn
HIDDEN_SIZE = 30 # Lstm中隐藏节点的个数
NUM_LAYERS = 2 # LSTM的层数
TIMESTEPS = 10 # 循环神经网络的截断长度
TRAINING_STEPS = 10000 # 训练轮数
BATCH_SIZE = 32 # batch大小 TRAINING_EXAMPLES = 10000 # 训练数据个数
TESTING_EXAMPLES = 1000 # 测试数据个数
SAMPLE_GAP = 0.01 # 采样间隔
# 定义生成正弦数据的函数
def generate_data(seq):
X = []
Y = []
# 序列的第i项和后面的TIMESTEPS-1项合在一起作为输入;第i+TIMESTEPS项作为输出
# 即用sin函数前面的TIMESTPES个点的信息,预测第i+TIMESTEPS个点的函数值
for i in range(len(seq) - TIMESTEPS - 1):
X.append([seq[i:i + TIMESTEPS]])
Y.append([seq[i + TIMESTEPS]])
return np.array(X, dtype=np.float32), np.array(Y, dtype=np.float32) def LstmCell():
lstm_cell = rnn.BasicLSTMCell(HIDDEN_SIZE,state_is_tuple=True)
return lstm_cell # 定义lstm模型
def lstm_model(X, y):
cell = rnn.MultiRNNCell([LstmCell() for _ in range(NUM_LAYERS)])
output, _ = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
output = tf.reshape(output, [-1, HIDDEN_SIZE])
# 通过无激活函数的全连接层计算线性回归,并将数据压缩成一维数组结构
predictions = tf.contrib.layers.fully_connected(output, 1, None) # 将predictions和labels调整统一的shape
labels = tf.reshape(y, [-1])
predictions = tf.reshape(predictions, [-1]) loss = tf.losses.mean_squared_error(predictions, labels)
train_op = tf.contrib.layers.optimize_loss(loss, tf.contrib.framework.get_global_step(),
optimizer="Adagrad",
learning_rate=0.1)
return predictions, loss, train_op # 进行训练
# 封装之前定义的lstm
regressor = SKCompat(learn.Estimator(model_fn=lstm_model, model_dir="Models/model_2"))
# 生成数据
test_start = TRAINING_EXAMPLES * SAMPLE_GAP
test_end = (TRAINING_EXAMPLES + TESTING_EXAMPLES) * SAMPLE_GAP
train_X, train_y = generate_data(np.sin(np.linspace(0, test_start, TRAINING_EXAMPLES, dtype=np.float32)))
test_X, test_y = generate_data(np.sin(np.linspace(test_start, test_end, TESTING_EXAMPLES, dtype=np.float32)))
# 拟合数据
regressor.fit(train_X, train_y, batch_size=BATCH_SIZE, steps=TRAINING_STEPS)
# 计算预测值
predicted = [[pred] for pred in regressor.predict(test_X)] # 计算MSE
rmse = np.sqrt(((predicted - test_y) ** 2).mean(axis=0))
print("Mean Square Error is:%f" % rmse[0]) plot_predicted, = plt.plot(predicted, label='predicted')
plot_test, = plt.plot(test_y, label='real_sin')
plt.legend([plot_predicted, plot_test],['predicted', 'real_sin'])
plt.show()

预测的结果如下所示

2017-05-21 17:43:49.057377: W c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\platform\cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE instructions, but these are available on your machine and could speed up CPU computations.
2017-05-21 17:43:49.057871: W c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\platform\cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE2 instructions, but these are available on your machine and could speed up CPU computations.
2017-05-21 17:43:49.058284: W c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\platform\cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE3 instructions, but these are available on your machine and could speed up CPU computations.
2017-05-21 17:43:49.058626: W c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\platform\cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.1 instructions, but these are available on your machine and could speed up CPU computations.
2017-05-21 17:43:49.058981: W c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\platform\cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.2 instructions, but these are available on your machine and could speed up CPU computations.
2017-05-21 17:43:49.059897: W c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\platform\cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX instructions, but these are available on your machine and could speed up CPU computations.
2017-05-21 17:43:49.060207: W c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\platform\cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX2 instructions, but these are available on your machine and could speed up CPU computations.
2017-05-21 17:43:49.060843: W c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\platform\cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use FMA instructions, but these are available on your machine and could speed up CPU computations.
Mean Square Error is:0.001686

86、使用Tensorflow实现,LSTM的时间序列预测,预测正弦函数的更多相关文章

  1. 使用tensorflow的lstm网络进行时间序列预测

    https://blog.csdn.net/flying_sfeng/article/details/78852816 版权声明:本文为博主原创文章,未经博主允许不得转载. https://blog. ...

  2. (数据科学学习手札40)tensorflow实现LSTM时间序列预测

    一.简介 上一篇中我们较为详细地铺垫了关于RNN及其变种LSTM的一些基本知识,也提到了LSTM在时间序列预测上优越的性能,本篇就将对如何利用tensorflow,在实际时间序列预测任务中搭建模型来完 ...

  3. 盘它!!一步到位,Tensorflow 2的实战 !!LSTM下的股票预测(附详尽代码及数据集)

    关键词:tensorflow2.LSTM.时间序列.股票预测 Tensorflow 2.0发布已经有一段时间了,各种新API的确简单易用,除了官方文档以外能够找到的学习资料也很多,但是大都没有给出实战 ...

  4. 用 LSTM 做时间序列预测的一个小例子(转自简书)

    问题:航班乘客预测 数据:1949 到 1960 一共 12 年,每年 12 个月的数据,一共 144 个数据,单位是 1000 下载地址 目标:预测国际航班未来 1 个月的乘客数 import nu ...

  5. 使用TensorFlow的递归神经网络(LSTM)进行序列预测

    本篇文章介绍使用TensorFlow的递归神经网络(LSTM)进行序列预测.作者在网上找到的使用LSTM模型的案例都是解决自然语言处理的问题,而没有一个是来预测连续值的. 所以呢,这里是基于历史观察数 ...

  6. 【原创】基于SVM作短期时间序列的预测

    [面试思路拓展] 对时间序列进行预测的方法有很多, 但如果只有几周的数据,而没有很多线性的趋势.各种实际的背景该如何去预测时间序列? 或许可以尝试下利用SVM去预测时间序列,那么如何提取预测的特征呢? ...

  7. 学习Tensorflow的LSTM的RNN例子

    学习Tensorflow的LSTM的RNN例子 基于TensorFlow一次简单的RNN实现 极客学院-递归神经网络 如何使用TensorFlow构建.训练和改进循环神经网络

  8. 时间序列挖掘-预测算法-三次指数平滑法(Holt-Winters)——三次指数平滑算法可以很好的保存时间序列数据的趋势和季节性信息

    from:http://www.cnblogs.com/kemaswill/archive/2013/04/01/2993583.html 在时间序列中,我们需要基于该时间序列当前已有的数据来预测其在 ...

  9. 时间序列预测——Tensorflow.Keras.LSTM

    1.测试数据下载 https://datamarket.com/data/set/22w6/portland-oregon-average-monthly-bus-ridership-100-janu ...

随机推荐

  1. 基础复习之HTML (doctype、标签语义化)

    这段时间找实习看的眼花缭乱的,然后也被拒得落花流水,啊哈哈-还是写博客好玩儿-嘿嘿,下面正题 一.doctype 标准模式 (Full Standards Mode) 接近标准模式 (Almost S ...

  2. 解决让刷新页面时不提示 "重试或取消”对话框

    如果刷新一个已经提交过的页面时,系统总是会提示一个 "重试或取消”的对话框.,如果是一个普通的页面,好象也无所谓,有就有,大不了多点一下.但是当我们是在子窗体中刷新父窗体时,就显得有点多余了 ...

  3. LeetCode 102. Binary Tree Level Order Traversal 动态演示

    按层遍历树,要用到queue class Solution { public: vector<vector<int>> levelOrder(TreeNode* root) { ...

  4. 通过生成HFile导入HBase

    要实现DataFrame通过HFile导入HBase有两个关键步骤 第一个是要生成Hfile第二个是HFile导入HBase 测试DataFrame数据来自mysql,如果对读取mysql作为Data ...

  5. 继承Process类,run函数的简单使用

    #定义一个类 继承Process类 from multiprocessing import Process import os import time class Download(Process): ...

  6. with cats as pets get cataracts and macular degeneration

    I really enjoyed this talk, optimistic and helpful. May I offer a small but perhaps helpful bit of k ...

  7. Redis 系列

    Redis 系列 [Redis 系列(01)安装配制] [Redis 系列(02)数据结构] [Redis 系列(03-1)进阶 - 发布订阅] [Redis 系列(03-2)进阶 - 事务] [Re ...

  8. UVA 11355 Cool Points( 极角计算 )

    We have a circle of radius R and several line segments situated within the circumference of this cir ...

  9. Ansible@一个高效的配置管理工具--Ansible configure management--翻译(三)

    未经书面许可.请勿转载 一张图简单概括 Simple Playbooks Ansible is useful as a command-line tool for making small chang ...

  10. 38.Subsets(子集和)

    Level:   Medium 题目描述: Given a set of distinct integers, nums, return all possible subsets (the power ...