1.从数据直接构建tensor

x = torch.tensor([5.5,3])

2.从已有的tensor构建一个tensor。这些方法会重用原来tensor的特征。

x = x.new_ones(5,3,dtype=torch.double)

torch.randn_like(x,dtype=torch.float)

3.得到tensor的形状

x.shape()

x.size()

4.tensor的运算

x = torch.rand(5,3)     y = torch.rand(5,3)

x+y    torch.add(x,y)

result = torch.empty(5,3)     result = x+y

y.add_()  #把结果保存到里面

5.numpy里面的indexing都可以在tensor使用

x[:,1:]

6.resizing(在numpy里面用reshape在torch里面用view)

x = torch.randn(4,4)   y = x.view(16)  z = x.view(-1,8)

7.如果只用一个元素的tensor 使用。item() 方法可以把里面的value 变成Python数值

x = torch.randn(1)     x.data   x.grad    x.item()    z.transpose(1,0)

8 .numpy和tensor 之间的转化

a = torch.ones(5)    b = a.numpy()  #a,b共享内存空间

a = np.ones(5)       b = torch.from_numpy(a)  #a,b共享内存空间

9.cuda tensor

if torch.cuda.is_available():
  device = torch.device("cuda")

y = torch.ones_like(x,device=device)

   x = x.to(device)

y.cpu().data.numpy()           y.to("cpu").data.numpy()  model = model.cuda()

10. 用numpy 实现两层神经网络

N , D_in, H, D_out = 64,1000,100,10

x = np.random.randn(N,D_in)
y = np.random.randn(N,D_out)
w1 = np.random.randn(D_in,H)
w2 = np.random.randn(H,D_out)
learning_rate = 1e-6
for t in range(500):
  h = x.dot(w1) #(N,H)
  h_relu = np.maxinum(h,0)
  y_pred = h_relu.dot(w2)
  #compute loss
  loss = np.square(y_pred - y).sum()
  print(t,loss)
grad_y_pred = 2.0*(y_pred-y)
grad_w2 = h_relu.T.dot(grad_y_pred)
grad_h_relu = grad_y_pred.dot(w2.T)
grad_h = grad_h_relu.copy()
grad_h[h<0] = 0
grad_w1 = x.T.dot(grad_h)
w1 -= learning_rate*grad_w1
w2 -=learning_rate*grad_w2

11.用tensors 实现两层神经网络

import torch

dtype = torch.float
device = torch.device("cpu")
# device = torch.device("cuda:0") # Uncomment this to run on GPU # N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10 # Create random input and output data
x = torch.randn(N, D_in, device=device, dtype=dtype)
y = torch.randn(N, D_out, device=device, dtype=dtype) # Randomly initialize weights
w1 = torch.randn(D_in, H, device=device, dtype=dtype)
w2 = torch.randn(H, D_out, device=device, dtype=dtype) learning_rate = 1e-6
for t in range(500):
# Forward pass: compute predicted y
h = x.mm(w1)
h_relu = h.clamp(min=0)
y_pred = h_relu.mm(w2) # Compute and print loss
loss = (y_pred - y).pow(2).sum().item()
print(t, loss) # Backprop to compute gradients of w1 and w2 with respect to loss
grad_y_pred = 2.0 * (y_pred - y)
grad_w2 = h_relu.t().mm(grad_y_pred)
grad_h_relu = grad_y_pred.mm(w2.t())
grad_h = grad_h_relu.clone()
grad_h[h < 0] = 0
grad_w1 = x.t().mm(grad_h) # Update weights using gradient descent
w1 -= learning_rate * grad_w1
w2 -= learning_rate * grad_w2

autograd

import torch

dtype = torch.float
device = torch.device("cpu")
# device = torch.device("cuda:0") # Uncomment this to run on GPU # N 是 batch size; D_in 是 input dimension;
# H 是 hidden dimension; D_out 是 output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10 # 创建随机的Tensor来保存输入和输出
# 设定requires_grad=False表示在反向传播的时候我们不需要计算gradient
x = torch.randn(N, D_in, device=device, dtype=dtype)
y = torch.randn(N, D_out, device=device, dtype=dtype) # 创建随机的Tensor和权重。
# 设置requires_grad=True表示我们希望反向传播的时候计算Tensor的gradient
w1 = torch.randn(D_in, H, device=device, dtype=dtype, requires_grad=True)
w2 = torch.randn(H, D_out, device=device, dtype=dtype, requires_grad=True) learning_rate = 1e-6
for t in range(500):
# 前向传播:通过Tensor预测y;这个和普通的神经网络的前向传播没有任何不同,
# 但是我们不需要保存网络的中间运算结果,因为我们不需要手动计算反向传播。
y_pred = x.mm(w1).clamp(min=0).mm(w2) # 通过前向传播计算loss
# loss是一个形状为(1,)的Tensor
# loss.item()可以给我们返回一个loss的scalar
loss = (y_pred - y).pow(2).sum()
print(t, loss.item()) # PyTorch给我们提供了autograd的方法做反向传播。如果一个Tensor的requires_grad=True,
# backward会自动计算loss相对于每个Tensor的gradient。在backward之后,
# w1.grad和w2.grad会包含两个loss相对于两个Tensor的gradient信息。
loss.backward() # 我们可以手动做gradient descent(后面我们会介绍自动的方法)。
# 用torch.no_grad()包含以下statements,因为w1和w2都是requires_grad=True,
# 但是在更新weights之后我们并不需要再做autograd。
# 另一种方法是在weight.data和weight.grad.data上做操作,这样就不会对grad产生影响。
# tensor.data会我们一个tensor,这个tensor和原来的tensor指向相同的内存空间,
# 但是不会记录计算图的历史。
with torch.no_grad():
w1 -= learning_rate * w1.grad
w2 -= learning_rate * w2.grad # Manually zero the gradients after updating weights
w1.grad.zero_()
w2.grad.zero_()

optim

import torch

# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10 # Create random Tensors to hold inputs and outputs
x = torch.randn(N, D_in)
y = torch.randn(N, D_out) # Use the nn package to define our model and loss function.
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
loss_fn = torch.nn.MSELoss(reduction='sum') # Use the optim package to define an Optimizer that will update the weights of
# the model for us. Here we will use Adam; the optim package contains many other
# optimization algoriths. The first argument to the Adam constructor tells the
# optimizer which Tensors it should update.
learning_rate = 1e-4
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
for t in range(500):
# Forward pass: compute predicted y by passing x to the model.
y_pred = model(x) # Compute and print loss.
loss = loss_fn(y_pred, y)
print(t, loss.item()) # Before the backward pass, use the optimizer object to zero all of the
# gradients for the variables it will update (which are the learnable
# weights of the model). This is because by default, gradients are
# accumulated in buffers( i.e, not overwritten) whenever .backward()
# is called. Checkout docs of torch.autograd.backward for more details.
optimizer.zero_grad() # Backward pass: compute gradient of the loss with respect to model
# parameters
loss.backward() # Calling the step function on an Optimizer makes an update to its
# parameters
optimizer.step()

自定义的nn Modules




import torch

class TwoLayerNet(torch.nn.Module):
    def __init__(self, D_in, H, D_out):
        """
        In the constructor we instantiate two nn.Linear modules and assign them as
        member variables.
        """
        super(TwoLayerNet, self).__init__()
        self.linear1 = torch.nn.Linear(D_in, H)
        self.linear2 = torch.nn.Linear(H, D_out)

def forward(self, x):
        """
        In the forward function we accept a Tensor of input data and we must return
        a Tensor of output data. We can use Modules defined in the constructor as
        well as arbitrary operators on Tensors.
        """
        h_relu = self.linear1(x).clamp(min=0)
        y_pred = self.linear2(h_relu)
        return y_pred

# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10

# Create random Tensors to hold inputs and outputs
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)

# Construct our model by instantiating the class defined above
model = TwoLayerNet(D_in, H, D_out)

# Construct our loss function and an Optimizer. The call to model.parameters()
# in the SGD constructor will contain the learnable parameters of the two
# nn.Linear modules which are members of the model.
criterion = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
for t in range(500):
    # Forward pass: Compute predicted y by passing x to the model
    y_pred = model(x)

# Compute and print loss
    loss = criterion(y_pred, y)
    print(t, loss.item())

# Zero gradients, perform a backward pass, and update the weights.
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()


torch的更多相关文章

  1. Torch Problems: require some packages doesn't work

    I've recently got a problem. require 'cutorch' doesn't work. But it was ok yesterday, although I hav ...

  2. Torch学习笔记1--Torch简介

    Torch是什么 Torch是一个由Lua语言开发的深度学习框架,目前支持Mac OS X 和Ubuntu 12及以上,官网 ,github地址. 具有如下特点: 交互式开发工具 可视化式的工具 第三 ...

  3. 深度学习框架 Torch 7 问题笔记

    深度学习框架 Torch 7 问题笔记 1. 尝试第一个 CNN 的 torch版本, 代码如下: -- We now have 5 steps left to do in training our ...

  4. Torch 网络层 参数的初始化问题

    Torch 网络层 参数的初始化问题 参考链接: https://github.com/Kaixhin/nninit 从 Torch 中自带的包,可以看到:https://github.com/tor ...

  5. Torch 7 load saved model failed, 加载保存的模型失败

    Torch 7 load saved model failed, 加载保存的模型失败: 可以尝试下面的解决方案:  

  6. Torch 日志文件的保存 logroll

    Torch 日志文件的保存 logroll 怎样将 Torch 在终端显示的信息,保存到 log 文件中 ?   现在介绍一种方法:利用 logroll 的方式.  参考 https://github ...

  7. torch 入门

    torch 入门1.安装环境我的环境mac book pro 集成显卡 Intel Iris不能用 cunn 模块,因为显卡不支持 CUDA2.安装步骤: 官方文档 (1).git clone htt ...

  8. 对torch的一点感想

    torch是一个基于LuaJIT的科学计算框架,知乎上有个人回答说torch比较适合科研用途, torch与matlab的很多函数很相似

  9. torch基本操作

    1.在terminal中th进入troch,th+文件名.lua运行文件.进入torch之后,dofile+"文件名.lua"运行文件

  10. torch基本命令

    命令行输入th进入torch框架 命令行输入th + lua文件表示用torch执行lua文件

随机推荐

  1. 彻底解决unable to find valid certification path to requested target

    安装证书. 下载证书 第一步是要下载证书 去你程序要访问的网站,点击那个锁按钮,并点击查看详情(chrome浏览器) 点击View certificate 点击详细信息 复制到文件 下一步 选择格式 ...

  2. Oracle 11g新特性direct path read引发的系统停运故障诊断处理

    黎俊杰 | 2016-07-28 14:37 声明:部分表名为了脱敏而用XX代替 1.故障现象 (1)一个业务系统输入用户名与密码后无法进入首页,表现为一直在运行等待,运行缓慢 (2)整个系统无法正常 ...

  3. ssm的maven项目启动tomcat时报错,Cannot find class: XXXX解决办法

    最近在写一个ssm的项目,启动总是报错.原因网上查了也没找到.最后终于解决.下面直接上代码 问题描述: 严重: Allocate exception for servlet ssm-dispatche ...

  4. Flask项目-循环导入及蓝图

    在学习flask时,肯定有许多人好奇,为什么一定要使用蓝图,而不能直接使用app应用程序对象导来导去,很多可能会说那是由于 循环导入的缘故,但是当我们通过url访问视图函数的时候,为什么会报404 n ...

  5. 2020秋招嵌入式面经——地平线、小米、CVTE、大华、绿米

    地平线提前批 一面凉: 投递简历过程:官网投递9月份之前投的都是提前批,投了北京.上海.南京地区的嵌入式软件研发岗,北京和上海的都被筛掉了,南京的捞了我,hr小姐姐打电话邀约面试. 一面凉: 电话面试 ...

  6. SHELL脚本编程-expect

    SHELL脚本编程-expect 作者:尹正杰 版权声明:原创作品,谢绝转载!否则将追究法律责任. 一.expect概述 1>.expect介绍 expect 是由Don Libes基于Tcl( ...

  7. Python入门篇-functools

    Python入门篇-functools 作者:尹正杰 版权声明:原创作品,谢绝转载!否则将追究法律责任. 一.reduce方法 reduce方法,顾名思义就是减少 reduce(function,se ...

  8. asp.net core 默认采用小驼峰命名和自定义模型验证

    services.AddMvc(options => { options.Filters.Add<ApiExceptionAttribute>(); }).SetCompatibil ...

  9. react-native npm install

    --create project react-native init myapp --version 0.55.4 cd myapp -- react ui npm i react-native-el ...

  10. PAT甲级1010踩坑记录(二分查找)——10测试点未过待更新

    题目分析: 首先这题有很多的坑点,我在写完之后依旧还有第10个测试点没有通过,而且代码写的不优美比较冗长勿喷,本篇博客用于记录写这道题的一些注意点 1.关于两个不同进制的数比大小一般采用将两个数都转化 ...