import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt BATCH_START = 0
TIME_STEPS = 20
BATCH_SIZE = 20
INPUT_SIZE = 1
OUTPUT_SIZE = 1
CELL_SIZE = 10
LR = 0.0025 def get_batch():
global BATCH_START, TIME_STEPS
# xs shape (50batch, 20steps)
xs = np.arange(BATCH_START, BATCH_START+TIME_STEPS*BATCH_SIZE).reshape((BATCH_SIZE, TIME_STEPS)) / (10*np.pi)
seq = np.sin(xs)
res = np.cos(xs)
BATCH_START += TIME_STEPS
# plt.plot(xs[0, :], res[0, :], 'r', xs[0, :], seq[0, :], 'b--')
# plt.show()
# returned seq, res and xs: shape (batch, step, input)
return [seq[:, :, np.newaxis], res[:, :, np.newaxis], xs] class LSTMRNN(object):
def __init__(self, n_steps, input_size, output_size, cell_size, batch_size):
self.n_steps = n_steps
self.input_size = input_size
self.output_size = output_size
self.cell_size = cell_size
self.batch_size = batch_size
with tf.name_scope('inputs'):
self.xs = tf.placeholder(tf.float32, [None, n_steps, input_size], name='xs')
self.ys = tf.placeholder(tf.float32, [None, n_steps, output_size], name='ys')
with tf.variable_scope('in_hidden'):
self.add_input_layer()
with tf.variable_scope('LSTM_cell'):
self.add_cell()
with tf.variable_scope('out_hidden'):
self.add_output_layer()
with tf.name_scope('cost'):
self.compute_loss()
with tf.name_scope('train'):
self.train_op = tf.train.AdamOptimizer(LR).minimize(self.loss) def add_input_layer(self,):
l_in_x = tf.reshape(self.xs, [-1, self.input_size], name='2_2D') # (batch*n_step, in_size)
# Ws (in_size, cell_size)
Ws_in = self._weight_variable([self.input_size, self.cell_size])
# bs (cell_size, )
bs_in = self._bias_variable([self.cell_size,])
# l_in_y = (batch * n_steps, cell_size)
with tf.name_scope('Wx_plus_b'):
l_in_y = tf.matmul(l_in_x, Ws_in) + bs_in
# reshape l_in_y ==> (batch, n_steps, cell_size)
self.l_in_y = tf.reshape(l_in_y, [-1, self.n_steps, self.cell_size], name='2_3D') def add_cell(self):
lstm_cell = tf.contrib.rnn.BasicLSTMCell(self.cell_size, forget_bias=1.0, state_is_tuple=True)
with tf.name_scope('initial_state'):
self.cell_init_state = lstm_cell.zero_state(self.batch_size, dtype=tf.float32)
self.cell_outputs, self.cell_final_state = tf.nn.dynamic_rnn(
lstm_cell, self.l_in_y, initial_state=self.cell_init_state, time_major=False) def add_output_layer(self):
# shape = (batch * steps, cell_size)
l_out_x = tf.reshape(self.cell_outputs, [-1, self.cell_size], name='2_2D')
Ws_out = self._weight_variable([self.cell_size, self.output_size])
bs_out = self._bias_variable([self.output_size, ])
# shape = (batch * steps, output_size)
with tf.name_scope('Wx_plus_b'):
self.pred = tf.matmul(l_out_x, Ws_out) + bs_out # def compute_cost(self):
# losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
# [tf.reshape(self.pred, [-1], name='reshape_pred')],
# [tf.reshape(self.ys, [-1], name='reshape_target')],
# [tf.ones([self.batch_size * self.n_steps], dtype=tf.float32)],
# average_across_timesteps=True,
# softmax_loss_function=self.ms_error,
# name='losses'
# )
# with tf.name_scope('average_cost'):
# self.cost = tf.div(
# tf.reduce_sum(losses, name='losses_sum'),
# self.batch_size,
# name='average_cost') def compute_loss(self):
prediction = tf.reshape(self.pred, [-1], name='reshape_pred')
ys = tf.reshape(self.ys, [-1], name='reshape_target')
self.loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),
reduction_indices=[0])) @staticmethod
def ms_error(labels, logits):
return tf.square(tf.subtract(labels, logits)) def _weight_variable(self, shape, name='weights'):
initializer = tf.random_normal_initializer(mean=0., stddev=1.,)
return tf.get_variable(shape=shape, initializer=initializer, name=name) def _bias_variable(self, shape, name='biases'):
initializer = tf.constant_initializer(0.1)
return tf.get_variable(name=name, shape=shape, initializer=initializer) if __name__ == '__main__':
model = LSTMRNN(TIME_STEPS, INPUT_SIZE, OUTPUT_SIZE, CELL_SIZE, BATCH_SIZE)
sess = tf.Session()
# tf.initialize_all_variables() no long valid from
# 2017-03-02 if using tensorflow >= 0.12
if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:
init = tf.initialize_all_variables()
else:
init = tf.global_variables_initializer()
sess.run(init)
# relocate to the local dir and run this line to view it on Chrome (http://0.0.0.0:6006/):
# $ tensorboard --logdir='logs'
plt.figure(figsize=(12, 4))
plt.ion()
plt.show()
for i in range(300):
seq, res, xs = get_batch()
if i == 0:
feed_dict = {
model.xs: seq,
model.ys: res,
# create initial state
}
else:
feed_dict = {
model.xs: seq,
model.ys: res,
model.cell_init_state: state # use last state as the initial state for this run
} _, cost, state, pred = sess.run(
[model.train_op, model.loss, model.cell_final_state, model.pred],
feed_dict=feed_dict) # plotting
plt.plot(xs[0, :], seq[0].flatten(), 'r', xs[0, :], pred.flatten()[:TIME_STEPS], 'b--')
plt.ylim((-1.5, 1.5))
plt.draw()
plt.pause(0.1) if i % 20 == 0:
print('loss: ', round(cost, 4))

  

tensorflow1.0 lstm学习曲线的更多相关文章

  1. Ubuntu14.10安装TensorFlow1.0.1

    本文记录了在Ubuntu上安装TensorFlow的步骤.系统环境:Ubuntu14.10 64bitPython版本:Python 2.7.8TensorFlow版:TensorFlow 1.0.1 ...

  2. tensorflow1.0 构建lstm做图片分类

    import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data #this is data mni ...

  3. 初用Linux, 安装Ubuntu16.04+NVIDIA387+CUDA8.0+cudnn5.1+TensorFlow1.0.1

    因为最近Deep Learning十分热门, 装一下TensorFlow学习一下. 本文主要介绍安装流程, 将自己遇到的问题说明出来, 并记录自己如何处理, 原理方面并没有能力解释. 由于本人之前从来 ...

  4. tensorflow1.0.0 弃用了几个operator写法

    除法和取模运算符(/, //, %)现已匹配 Python(flooring)语义.这也适用于 tf.div 和 tf.mod.为了获取强制的基于整数截断的行为,你可以使用 tf.truncatedi ...

  5. tensorflow1.0中的改善

    TensorFlow 1.0 重大功能及改善 XLA(实验版):初始版本的XLA,针对TensorFlow图(graph)的专用编译器,面向CPU和GPU. TensorFlow Debugger(t ...

  6. tensorflow1.0 队列FIFOQueue管理实现异步读取训练

    import tensorflow as tf #模拟异步子线程 存入样本, 主线程 读取样本 # 1. 定义一个队列,1000 Q = tf.FIFOQueue(1000,tf.float32) # ...

  7. tensorflow1.0 数据队列FIFOQueue的使用

    import tensorflow as tf #模拟一下同步先处理数据,然后才能取数据训练 #tensorflow当中,运行操作有依赖性 #1.首先定义队列 Q = tf.FIFOQueue(3,t ...

  8. tensorflow1.0 dropout层

    """ Please note, this code is only for python 3+. If you are using python 2+, please ...

  9. tensorflow1.0 构建卷积神经网络

    import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data import os os.envi ...

随机推荐

  1. Servlet读取前端的request payload

    这几天遇见了一个很头疼的事,当我想用表单上传文件时,后端servlet读取不到文件的信息 网上搜索,说是要将form添加这个属性enctype="multipart/form-data&qu ...

  2. coding++:java操作 FastDFS(上传 | 下载 | 删除)

    开发工具  IDEAL2017  Springboot 1.5.21.RELEASE --------------------------------------------------------- ...

  3. Java 异常处理与输入输出

    一.异常 1.1 package exception; import java.util.Scanner; public class ArrayIndex { public static void m ...

  4. 【API知识】SpringBoot项目中@EnableXXX的原理

    @EnableXX注解的使用场景 SpringBoot为开发人员提供了很多便利,例如如果想要定时功能,只要添加@EnableSchedule,即可配合@Schedule注解实现定时任务功能,不需要额外 ...

  5. 前端之jQuery基础篇02-事件

    什么是事件: 在元素上移动鼠标. 选取单选按钮 点击元素 常见的DOM事件: 鼠标事件:click() 当鼠标单击发生click事件 : <!DOCTYPE html> <html& ...

  6. 2.Metasploit数据库配置及扫描模块介绍

    01.Metasploit数据库配置及扫描模块介绍     信息收集   信息收集是渗透测试中首先要做的重要事项之一,目的是尽可能多的查找关于目标的信息,我们掌握的信息越多,渗透成功的机会越大.在信息 ...

  7. Aplayer搭配Metingjs音乐插件的使用

    Aplayer搭配MetingJS音乐插件的使用 1. Aplayer和MetingJ的介绍 Aplayer官网文档:https://aplayer.js.org/#/ Metingjs官网文档:ht ...

  8. VUE CLI3.0安装及配置

    # 安装 npm install -g @vue/cli # 查看已安装版本vue --version 或者 vue -V # 卸载 npm uninstall @vue/cli -g # 新建项目 ...

  9. Halo博客的搭建

    今日主题:搭建一个私人博客 好多朋友和我说,能不能弄一个简单的私人博客啊,我说行吧,今天给你们一份福利啦! 搭建一个私人博客,就可以在自己的电脑上写博客了 Halo Halo 是一款现代化的个人独立博 ...

  10. 22.3 Extends 构造方法的执行顺序

    /** 1.有子父类继承关系的类中,创建父类对象未调用,执行父类无参构造* 2.有子父类继承关系的类中,创建子类对象未调用,执行顺序:默认先调用 父类无参构造---子类无参构造* 在子类的构造方法的第 ...