2.4scope
name_scope
variable_scope
scope (name_scope/variable_scope)
from __future__ import print_function
import tensorflow as tf with tf.name_scope("a_name_scope"):
initializer = tf.constant_initializer(value=1)
var1 = tf.get_variable(name='var1', shape=[1], dtype=tf.float32, initializer=initializer)
var2 = tf.Variable(name='var2', initial_value=[2], dtype=tf.float32)
var21 = tf.Variable(name='var2', initial_value=[2.1], dtype=tf.float32)
var22 = tf.Variable(name='var2', initial_value=[2.2], dtype=tf.float32) with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
print(var1.name) # var1:0 此种get_variable对于name_scope无效
print(sess.run(var1)) # [ 1.]
print(var2.name) # a_name_scope/var2:0
print(sess.run(var2)) # [ 2.]
print(var21.name) # a_name_scope/var2_1:0
print(sess.run(var21)) # [ 2.0999999]
print(var22.name) # a_name_scope/var2_2:0
print(sess.run(var22)) # [ 2.20000005] with tf.variable_scope("a_variable_scope") as scope:
initializer = tf.constant_initializer(value=3)
var3 = tf.get_variable(name='var3', shape=[1], dtype=tf.float32, initializer=initializer)
var4 = tf.Variable(name='var4', initial_value=[4], dtype=tf.float32)
var4_reuse = tf.Variable(name='var4', initial_value=[4], dtype=tf.float32)
scope.reuse_variables() #定义了可重复利用
var3_reuse = tf.get_variable(name='var3',) with tf.Session() as sess:
# tf.initialize_all_variables() no long valid from
# 2017-03-02 if using tensorflow >= 0.12
if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:
init = tf.initialize_all_variables()
else:
init = tf.global_variables_initializer()
sess.run(init)
print(var3.name) # a_variable_scope/var3:0
print(sess.run(var3)) # [ 3.]
print(var4.name) # a_variable_scope/var4:0
print(sess.run(var4)) # [ 4.]
print(var4_reuse.name) # a_variable_scope/var4_1:0
print(sess.run(var4_reuse)) # [ 4.]
print(var3_reuse.name) # a_variable_scope/var3:0
print(sess.run(var3_reuse)) # [ 3.]
通常在RNN中有一个重复循环机制,比如在training中和test中的结构是不同的,但是在两者的参数是相同的时候,就可以用到
scope.reuse_variables()
# visit https://morvanzhou.github.io/tutorials/ for more! # 22 scope (name_scope/variable_scope)
from __future__ import print_function
import tensorflow as tf class TrainConfig:
batch_size = 20
time_steps = 20
input_size = 10
output_size = 2
cell_size = 11
learning_rate = 0.01 class TestConfig(TrainConfig):
time_steps = 1 class RNN(object): def __init__(self, config):
self._batch_size = config.batch_size
self._time_steps = config.time_steps
self._input_size = config.input_size
self._output_size = config.output_size
self._cell_size = config.cell_size
self._lr = config.learning_rate
self._built_RNN() def _built_RNN(self):
with tf.variable_scope('inputs'):
self._xs = tf.placeholder(tf.float32, [self._batch_size, self._time_steps, self._input_size], name='xs')
self._ys = tf.placeholder(tf.float32, [self._batch_size, self._time_steps, self._output_size], name='ys')
with tf.name_scope('RNN'):
with tf.variable_scope('input_layer'):
l_in_x = tf.reshape(self._xs, [-1, self._input_size], name='2_2D') # (batch*n_step, in_size)
# Ws (in_size, cell_size)
Wi = self._weight_variable([self._input_size, self._cell_size])
print(Wi.name)
# bs (cell_size, )
bi = self._bias_variable([self._cell_size, ])
# l_in_y = (batch * n_steps, cell_size)
with tf.name_scope('Wx_plus_b'):
l_in_y = tf.matmul(l_in_x, Wi) + bi
l_in_y = tf.reshape(l_in_y, [-1, self._time_steps, self._cell_size], name='2_3D') with tf.variable_scope('cell'):
cell = tf.contrib.rnn.BasicLSTMCell(self._cell_size)
with tf.name_scope('initial_state'):
self._cell_initial_state = cell.zero_state(self._batch_size, dtype=tf.float32) self.cell_outputs = []
cell_state = self._cell_initial_state
for t in range(self._time_steps):
if t > 0: tf.get_variable_scope().reuse_variables()
cell_output, cell_state = cell(l_in_y[:, t, :], cell_state)
self.cell_outputs.append(cell_output)
self._cell_final_state = cell_state with tf.variable_scope('output_layer'):
# cell_outputs_reshaped (BATCH*TIME_STEP, CELL_SIZE)
cell_outputs_reshaped = tf.reshape(tf.concat(1, self.cell_outputs), [-1, self._cell_size])
Wo = self._weight_variable((self._cell_size, self._output_size))
bo = self._bias_variable((self._output_size,))
product = tf.matmul(cell_outputs_reshaped, Wo) + bo
# _pred shape (batch*time_step, output_size)
self._pred = tf.nn.relu(product) # for displacement with tf.name_scope('cost'):
_pred = tf.reshape(self._pred, [self._batch_size, self._time_steps, self._output_size])
mse = self.ms_error(_pred, self._ys)
mse_ave_across_batch = tf.reduce_mean(mse, 0)
mse_sum_across_time = tf.reduce_sum(mse_ave_across_batch, 0)
self._cost = mse_sum_across_time
self._cost_ave_time = self._cost / self._time_steps with tf.name_scope('trian'):
self._lr = tf.convert_to_tensor(self._lr)
self.train_op = tf.train.AdamOptimizer(self._lr).minimize(self._cost) @staticmethod
def ms_error(y_pre, y_target):
return tf.square(tf.sub(y_pre, y_target)) @staticmethod
def _weight_variable(shape, name='weights'):
initializer = tf.random_normal_initializer(mean=0., stddev=0.5, )
return tf.get_variable(shape=shape, initializer=initializer, name=name) @staticmethod
def _bias_variable(shape, name='biases'):
initializer = tf.constant_initializer(0.1)
return tf.get_variable(name=name, shape=shape, initializer=initializer) if __name__ == '__main__':
train_config = TrainConfig()
test_config = TestConfig() # the wrong method to reuse parameters in train rnn
with tf.variable_scope('train_rnn'):
train_rnn1 = RNN(train_config) #参数在train和test都是一致的
with tf.variable_scope('test_rnn'):
test_rnn1 = RNN(test_config) #参数在train和test都是一致的
# the right method to reuse parameters in train rnn
      with tf.variable_scope('rnn') as scope:
            sess = tf.Session()
            train_rnn2 = RNN(train_config)
            scope.reuse_variables()
            test_rnn2 = RNN(test_config)
            # tf.initialize_all_variables() no long valid from
            # 2017-03-02 if using tensorflow >= 0.12
            if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:
                  init = tf.initialize_all_variables()
            else:
                  init = tf.global_variables_initializer()
            sess.run(init)
2.4scope的更多相关文章
随机推荐
- 04-vi使用方法详细介绍
			
vi使用方法详细介绍 vi编辑器是所有Unix及Linux系统下标准的编辑器,它的强大不逊色于任何最新的文本编辑器,这里只是简单地介绍一下它的用法和一小部分指令.由于对Unix及Linux系统的任何版 ...
 - Docker background
			
什么是 Docker? Docker 提供了一个可以运行你的应用程序的封套(envelope),或者说容器.它原本是 dotCloud 启动的一个业余项目,并在前些时候开源了.它吸引了大量的关注和讨论 ...
 - NetBpm 安装篇(1)
			
尊重别人劳动成果 转载注明出处:http://www.cnblogs.com/anbylau2130/p/3875718.html 官方主页 http://www.netbpm.org/docs/in ...
 - vsftpd下错误之:500 OOPS
			
vsftpd下错误之:500 OOPS.vsftpd 是在Linux发行版中最推崇的一种FTP服务器程序,vsftpd的特点:小巧轻快.安全易用等. Linux也是为人们所常用的操作系统之一.这里主要 ...
 - gradle 两种更新方法
			
第一种.Android studio更新 第一步:在你所在项目文件夹下:你项目根目录gradlewrappergradle-wrapper.properties 替换 distributionUrl= ...
 - 剑指offer面试题5:逆序打印单链表(Java)
			
Java创建单链表(头插法.尾插法),并逆序打印单链表: package day_0324; import java.util.Scanner; import java.util.Stack; cla ...
 - iOS 事件的产生、传递、响应
			
一.事件的产生和传递 1.1.事件的产生 发生触摸事件后,系统会将该事件加入到一个由UIApplication管理的事件队列中为什么是队列而不是栈?因为队列的特定是先进先出,先产生的事件先处理才符合常 ...
 - Android学习之Spinner
			
Android给我们提供了一个spinner控件,这个控件主要就是一个列表,那么我们就来说说这个控件吧,这个控件在以前的也看见过,但今天还是从新介绍一遍吧.Spinner位于 android.widg ...
 - PHP 二叉树 二叉排序树实现
			
<?php /** * PHP 二叉树 * @author : xiaojiang 2014-01-01 * */ class Tree { protected $k = null; prote ...
 - VC利用调试寄存器实现硬件断点源码
			
[文章标题]:VC利用调试寄存器实现硬件断点源码 [文章作者]:yhswwr(SilenceRet) [作者QQ]:3412259 [编写语言]:C++ [使用工具]:VS2008.VC++9 [本文 ...