#!/usr/bin/python
# -*- coding: UTF- -*-
# @date: // :
# @name: first_tf_1223
# @author:vickey-wu from __future__ import print_function
import tensorflow as tf
import os # disable error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '' # constant
node1 = tf.constant(3.0, dtype=tf.float32)
node2 = tf.constant(4.0) # node2 dtype also equal tf.float32 implicitly
print(node1, node2) # SSSession
sess = tf.Session() # SSSession # placeholder
a = tf.placeholder(tf.float32) # A placeholder is a promise to provide a value later
b = tf.placeholder(tf.float32)
adder_node = a + b
print(sess.run(adder_node, {a: , b: 4.5})) # fetches=a, feed_dict=dict
print(sess.run(adder_node, {a: [, ], b: [, ]})) # feed_dict=tuple # VVVariable
W = tf.Variable([.], dtype=tf.float32)
b = tf.Variable([-.], dtype=tf.float32)
x = tf.placeholder(tf.float32)
linear_model = W * x + b
init = tf.global_variables_initializer() # tf.Variable must be explicitly initialize, tf.constant
sess.run(init)
print(sess.run(linear_model, {x: [, , , ]})) # while x=, x=, ... linear_model = ? # loss function to evaluate a model we build is good or not
y = tf.placeholder(tf.float32) # desired values
squared_deltas = tf.square(linear_model - y) # creates a vector of error delta
loss = tf.reduce_sum(squared_deltas) # create a single scalar that abstracts the error of all examples
print(sess.run(loss, {x: [, , , ], y: [, -, -, -]})) # manually reassign the values of W and b to get optimal solution of linear_model
fixW = tf.assign(W, [-.]) # tf.assign change initialized Variable value
fixb = tf.assign(b, [.])
sess.run([fixW, fixb])
print(sess.run(loss, {x: [, , , ], y: [, -, -, -]})) # tf.train API
# machine learning is to find the correct model parameters automatically
# TensorFlow provides optimizers that slowly change each variable in order to minimize the loss function
# The simplest optimizer is gradient descent
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
sess.run(init)
for i in range():
sess.run(train, {x: [, , , , ], y: [, -, -, -]})
print(sess.run([W, b])) ###########################
# complete trainable linear regression model
# model parameters
W = tf.Variable([.], dtype=tf.float32)
b = tf.Variable([-.], dtype=tf.float32)
# model input and output
x = tf.placeholder(tf.float32)
y = tf.placeholder(tf.float32)
linear_model = W * x + b # loss
loss = tf.reduce_sum(tf.square(linear_model - y))
# optimizer
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss) # training data
x_train = [, , , ]
y_train = [, -, -, -]
# training loop
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for i in range():
sess.run(train, {x: x_train, y: y_train}) # evaluate training accuracy
curr_W, curr_b, curr_loss = sess.run([W, b, loss], {x: x_train, y: y_train})
print("W: %s b: %s loss: %s" % (curr_W, curr_b, curr_loss))
######################### ##########################
import numpy as np
# import tensorflow as tf # Declare list of features
feature_columns = [tf.feature_column.numeric_column("x", shape=[])]
# an estimator is the front end to invoke training and evaluation.
estimator = tf.estimator.LinearRegressor(feature_columns=feature_columns)
# tensorflow provides many helper method to read and set up data sets
x_train = np.array([., ., ., .])
y_train = np.array([., -., -., -.])
x_eval = np.array([., ., ., .])
y_eval = np.array([-1.01, -4.1, -, .])
input_fn = tf.estimator.inputs.numpy_input_fn(
{"x": x_train}, y_train, batch_size=, num_epochs=None, shuffle=True
)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
{"x": x_train}, y_train, batch_size=, num_epochs=, shuffle=False
)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
{"x": x_train}, y_train, batch_size=, num_epochs=, shuffle=False
) # we can invoke training steps by invoking the method and passing the training data set.
estimator.train(input_fn=input_fn, steps=) # Here we evaluate how well our model did.
train_metrics = estimator.evaluate(input_fn=train_input_fn)
eval_metrics = estimator.evaluate(input_fn=eval_input_fn)
print("train metrics: %r" % train_metrics)
print("eval metrics: %r" % eval_metrics) #######################

tensorflow note的更多相关文章

  1. TensorFlow Android Camera Demo 使用android studio编译安装和解决Execution failed for task ':buildNativeBazel'报错

    可以参考官网:https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/android#android-stud ...

  2. How to install tensorflow on ubuntu 18.04 64bit

    Ans:pip install tensorflow (note:  version number of pip and python must be  consistent)

  3. TensorFlow编译androiddemo

    首先是把tensorflow克隆到本地一份. git clone --recurse-submodules https://github.com/tensorflow/tensorflow.git 既 ...

  4. TensorFlow Ops

    TensorFlow Ops 1. Fun with TensorBoard In TensorFlow, you collectively call constants, variables, op ...

  5. awesome-nlp

    awesome-nlp  A curated list of resources dedicated to Natural Language Processing Maintainers - Keon ...

  6. Tensorflow二分类处理dense或者sparse(文本分类)的输入数据

    这里做了一些小的修改,感谢谷歌rd的帮助,使得能够统一处理dense的数据,或者类似文本分类这样sparse的输入数据.后续会做进一步学习优化,比如如何多线程处理. 具体如何处理sparse 主要是使 ...

  7. Tensorflow mlp二分类

    只是简单demo, 可以看出tensorflow非常简洁,适合快速实验     import tensorflow as tf import numpy as np import melt_datas ...

  8. (转)The Road to TensorFlow

    Stephen Smith's Blog All things Sage 300… The Road to TensorFlow – Part 7: Finally Some Code leave a ...

  9. Tensorflow的CNN教程解析

    之前的博客我们已经对RNN模型有了个粗略的了解.作为一个时序性模型,RNN的强大不需要我在这里重复了.今天,让我们来看看除了RNN外另一个特殊的,同时也是广为人知的强大的神经网络模型,即CNN模型.今 ...

随机推荐

  1. ASP.NET Core MVC 2.x 全面教程_ASP.NET Core MVC 08. 输入Model和防止重复Post

    通过action的参数传入一个model 通过input的name属性和model属性对应上 通常是使用Http post去做 快速创建了这个Action 这个Action很简单我们只需要返回View ...

  2. 任务38:JWT 设计解析及定制

    任务38:JWT 设计解析及定制 改造jwt token token的值不放在Authorize里面,而是放在header的token里面 asp.net core的源代码 在Security的下面 ...

  3. Google Play应用商店的下载路径(转载)

    转自:http://blog.yanwen.org/archives/1660.html 其实,好久之前就想知道google play中下载的应用到哪里去了.之前用的MIUI系统里面,google p ...

  4. 从mysql高可用架构看高可用架构设计

    高可用HA(High Availability)是分布式系统架构设计中必须考虑的因素之一,它通常是指,通过设计减少系统不能提供服务的时间. 假设系统一直能够提供服务,我们说系统的可用性是100%.如果 ...

  5. (水题)洛谷 - P1051 - 谁拿了最多奖学金

    https://www.luogu.org/problemnew/show/P1051 这个根本就不用排序啊…… #include<bits/stdc++.h> using namespa ...

  6. Thrift 入门

    1下载 https://thrift.apache.org/download 或 Apache Thrift Archive 下载编译好的release可执行文件: Thrift compiler f ...

  7. NOIp 2015 Day1T3斗地主【搜索】

    题目传送门 昨天真题测试赛题目== 没想到一道纯到都不用剪枝的搜索会是noipT3难度. 不过因为我搜索弱啊所以打不出来== LA:这不就是一道简单模拟题么 码完此题能增加对搜索的理解== (闲话结束 ...

  8. Access 中case when then else end不支持使用switch代替

    Access 中case when then else end不支持使用switch代替 这里主要是实现一个表中多个字段,多个字段之间作比较然后取得最大值或者最小值用来处理 case when the ...

  9. Linux、Windows 下分割、合并rar文件

    1.分割rar 1.1 linux下分割压缩rar 安装rar和unrar和序 $sudo aptitude install rar unrar 示例,分割压缩temp文件,每个包为1MB $rar ...

  10. Backbone.js入门教程第二版笔记(2)

    关于手动触发router,之前看到的例子都是通过在url后面加上#xxx或者点击一个a链接方法来触发, 还有一种情况是通过触发一种规则,来触发另一种规则(表述无能),比如这个例子中,我在url后面加上 ...