tensorflow note
#!/usr/bin/python
# -*- coding: UTF- -*-
# @date: // :
# @name: first_tf_1223
# @author:vickey-wu from __future__ import print_function
import tensorflow as tf
import os # disable error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '' # constant
node1 = tf.constant(3.0, dtype=tf.float32)
node2 = tf.constant(4.0) # node2 dtype also equal tf.float32 implicitly
print(node1, node2) # SSSession
sess = tf.Session() # SSSession # placeholder
a = tf.placeholder(tf.float32) # A placeholder is a promise to provide a value later
b = tf.placeholder(tf.float32)
adder_node = a + b
print(sess.run(adder_node, {a: , b: 4.5})) # fetches=a, feed_dict=dict
print(sess.run(adder_node, {a: [, ], b: [, ]})) # feed_dict=tuple # VVVariable
W = tf.Variable([.], dtype=tf.float32)
b = tf.Variable([-.], dtype=tf.float32)
x = tf.placeholder(tf.float32)
linear_model = W * x + b
init = tf.global_variables_initializer() # tf.Variable must be explicitly initialize, tf.constant
sess.run(init)
print(sess.run(linear_model, {x: [, , , ]})) # while x=, x=, ... linear_model = ? # loss function to evaluate a model we build is good or not
y = tf.placeholder(tf.float32) # desired values
squared_deltas = tf.square(linear_model - y) # creates a vector of error delta
loss = tf.reduce_sum(squared_deltas) # create a single scalar that abstracts the error of all examples
print(sess.run(loss, {x: [, , , ], y: [, -, -, -]})) # manually reassign the values of W and b to get optimal solution of linear_model
fixW = tf.assign(W, [-.]) # tf.assign change initialized Variable value
fixb = tf.assign(b, [.])
sess.run([fixW, fixb])
print(sess.run(loss, {x: [, , , ], y: [, -, -, -]})) # tf.train API
# machine learning is to find the correct model parameters automatically
# TensorFlow provides optimizers that slowly change each variable in order to minimize the loss function
# The simplest optimizer is gradient descent
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
sess.run(init)
for i in range():
sess.run(train, {x: [, , , , ], y: [, -, -, -]})
print(sess.run([W, b])) ###########################
# complete trainable linear regression model
# model parameters
W = tf.Variable([.], dtype=tf.float32)
b = tf.Variable([-.], dtype=tf.float32)
# model input and output
x = tf.placeholder(tf.float32)
y = tf.placeholder(tf.float32)
linear_model = W * x + b # loss
loss = tf.reduce_sum(tf.square(linear_model - y))
# optimizer
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss) # training data
x_train = [, , , ]
y_train = [, -, -, -]
# training loop
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for i in range():
sess.run(train, {x: x_train, y: y_train}) # evaluate training accuracy
curr_W, curr_b, curr_loss = sess.run([W, b, loss], {x: x_train, y: y_train})
print("W: %s b: %s loss: %s" % (curr_W, curr_b, curr_loss))
######################### ##########################
import numpy as np
# import tensorflow as tf # Declare list of features
feature_columns = [tf.feature_column.numeric_column("x", shape=[])]
# an estimator is the front end to invoke training and evaluation.
estimator = tf.estimator.LinearRegressor(feature_columns=feature_columns)
# tensorflow provides many helper method to read and set up data sets
x_train = np.array([., ., ., .])
y_train = np.array([., -., -., -.])
x_eval = np.array([., ., ., .])
y_eval = np.array([-1.01, -4.1, -, .])
input_fn = tf.estimator.inputs.numpy_input_fn(
{"x": x_train}, y_train, batch_size=, num_epochs=None, shuffle=True
)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
{"x": x_train}, y_train, batch_size=, num_epochs=, shuffle=False
)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
{"x": x_train}, y_train, batch_size=, num_epochs=, shuffle=False
) # we can invoke training steps by invoking the method and passing the training data set.
estimator.train(input_fn=input_fn, steps=) # Here we evaluate how well our model did.
train_metrics = estimator.evaluate(input_fn=train_input_fn)
eval_metrics = estimator.evaluate(input_fn=eval_input_fn)
print("train metrics: %r" % train_metrics)
print("eval metrics: %r" % eval_metrics) #######################
tensorflow note的更多相关文章
- TensorFlow Android Camera Demo 使用android studio编译安装和解决Execution failed for task ':buildNativeBazel'报错
可以参考官网:https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/android#android-stud ...
- How to install tensorflow on ubuntu 18.04 64bit
Ans:pip install tensorflow (note: version number of pip and python must be consistent)
- TensorFlow编译androiddemo
首先是把tensorflow克隆到本地一份. git clone --recurse-submodules https://github.com/tensorflow/tensorflow.git 既 ...
- TensorFlow Ops
TensorFlow Ops 1. Fun with TensorBoard In TensorFlow, you collectively call constants, variables, op ...
- awesome-nlp
awesome-nlp A curated list of resources dedicated to Natural Language Processing Maintainers - Keon ...
- Tensorflow二分类处理dense或者sparse(文本分类)的输入数据
这里做了一些小的修改,感谢谷歌rd的帮助,使得能够统一处理dense的数据,或者类似文本分类这样sparse的输入数据.后续会做进一步学习优化,比如如何多线程处理. 具体如何处理sparse 主要是使 ...
- Tensorflow mlp二分类
只是简单demo, 可以看出tensorflow非常简洁,适合快速实验 import tensorflow as tf import numpy as np import melt_datas ...
- (转)The Road to TensorFlow
Stephen Smith's Blog All things Sage 300… The Road to TensorFlow – Part 7: Finally Some Code leave a ...
- Tensorflow的CNN教程解析
之前的博客我们已经对RNN模型有了个粗略的了解.作为一个时序性模型,RNN的强大不需要我在这里重复了.今天,让我们来看看除了RNN外另一个特殊的,同时也是广为人知的强大的神经网络模型,即CNN模型.今 ...
随机推荐
- Know the Core Objects of Your App---了解应用程序的内核对象
Back to App Design You develop apps using the Cocoa application environment. Cocoa presents the app’ ...
- iView 实战系列教程(21课时)_1.iView 实战教程之配置篇
1.iView 实战教程之配置篇 点击添加插件,. 选中后安装 全部导入还是按需导入. 2.是否需要自定义主题变量 3.多语言的设置. 这里我们全部选择为默认 然后点击继续. 启动项目 入口文件导入了 ...
- Luogu P1373 小a和uim之大逃离【dp】By cellur925
题目传送门 $50pts$:容易设计出状态$f[i][j][l][r][st]$表示当前的这个人在($i$,$j$),小a和uim魔瓶中的含量分别为$l$,$r$,当$st=0$表明现在是小a在吃,当 ...
- iOS 将navigationItem.titleView设置为自定义UISearchBar (Ficow实例讲解)
这篇文章可以解决以下问题: 1.将searchBar设置为titleView后,无法调整位置的问题 : 2.searchBar的背景色无法设置为透明色的问题: 3.searchBar输入框内用户输入的 ...
- Jquery | 基础 | 慕课网 | (*选择器)
原生JS var elements1 = document.getElementsByTagName('*'); JQ var elements2 = $("*"); <!D ...
- About set HDU - 4680
https://vjudge.net/problem/HDU-4680 一直想写,终于写完了... 要点: 这个set不需要去重 操作4的做法就是暴力枚举取的数(最开始两个取set中最小两个,设这次取 ...
- Linux下cpu过高问题排查
原文地址:https://blog.csdn.net/chenjunan888/article/details/80447800 在服务器报cpu过高时,可使用以下命令,快速导出堆栈信息,以方便查看具 ...
- 学好Mac常用命令,助力iOS开发
原文出处: Jack_lin(@Jack_Lin_IOS ) 厚重·技术 序言 在iOS开发的过程中,更多地注重iOS开发的效率,熟练使用Mac终端操作的常用命令,可以让你更好的游刃于iOS繁重的开发 ...
- RHEL 6.5----SCSI存储
主机名 IP master 192.168.30.130 node-1 192.168.30.131 node-2 192.168.30.132 安装并启动 [root@master ~]# ll / ...
- canvas 平移&缩放
1.平移 canvas其实只是一个包装器,真正起着重要作用的部分是2D渲染上下文,这才是我们真正绘制图形的地方. 然而2D渲染上下文是一种基于屏幕的标准绘制平台.它采用屏幕的笛卡尔坐标系统,以左上角( ...