PyTorch(一)Basics
import torch
import torchvision
import torch.nn as nn
import numpy as np
import torchvision.transforms as transforms # ================================================================== #
# Table of Contents #
# ================================================================== # # 1. Basic autograd example 1 (Line 25 to 39)
# 2. Basic autograd example 2 (Line 46 to 83)
# 3. Loading data from numpy (Line 90 to 97)
# 4. Input pipline (Line 104 to 129)
# 5. Input pipline for custom dataset (Line 136 to 156)
# 6. Pretrained model (Line 163 to 176)
# 7. Save and load model (Line 183 to 189) # ================================================================== #
# 1. Basic autograd example 1 #
# ================================================================== # # Create tensors.
x = torch.tensor(1., requires_grad=True)
w = torch.tensor(2., requires_grad=True)
b = torch.tensor(3., requires_grad=True) # Build a computational graph.
y = w * x + b # y = 2 * x + 3 # Compute gradients.
y.backward() # Print out the gradients.
print(x.grad) # x.grad = 2
print(w.grad) # w.grad = 1
print(b.grad) # b.grad = 1 # ================================================================== #
# 2. Basic autograd example 2 #
# ================================================================== # # Create tensors of shape (10, 3) and (10, 2).
x = torch.randn(10, 3)
y = torch.randn(10, 2) # Build a fully connected layer.
linear = nn.Linear(3, 2) # x*weight^T + bias <--> y
print('w: ', linear.weight) # (out_features, in_features)
print('b: ', linear.bias) # out_features # Build loss function and optimizer.
criterion = nn.MSELoss(reduction='elementwise_mean') # mean square error
optimizer = torch.optim.SGD(linear.parameters(), lr=0.01) # Forward pass.
pred = linear(x) # Compute loss.
loss = criterion(pred, y)
print('loss: ', loss.item()) # Backward pass.
loss.backward() # Print out the gradients.
print('dL/dw: ', linear.weight.grad)
print('dL/db: ', linear.bias.grad) # 1-step gradient descent(one forward and backward).
optimizer.step() # You can also perform gradient descent at the low level.
# linear.weight.data.sub_(0.01 * linear.weight.grad.data)
# linear.bias.data.sub_(0.01 * linear.bias.grad.data) # Print out the loss after 1-step gradient descent.
pred = linear(x)
loss = criterion(pred, y)
print('loss after 1 step optimization: ', loss.item()) # ================================================================== #
# 3. Loading data from numpy #
# ================================================================== # # Create a numpy array.
x = np.array([[1, 2], [3, 4]]) # Convert the numpy array to a torch tensor.
y = torch.from_numpy(x) # Convert the torch tensor to a numpy array.
z = y.numpy() # ================================================================== #
# 4. Input pipline #
# ================================================================== # # Download and construct CIFAR-10 dataset.
train_dataset = torchvision.datasets.CIFAR10(root='../../data/',
train=True,
transform=transforms.ToTensor(),
download=True) # Fetch one data pair (read data from disk).
image, label = train_dataset[0]
print (image.size())
print (label) # Data loader (this provides queues and threads in a very simple way).
train_loader = torch.utils.data.DataLoader(dataset = train_dataset,
batch_size = 64,
shuffle = True) # When iteration starts, queue and thread start to load data from files.
data_iter = iter(train_loader) # Mini-batch images and labels.
images, labels = data_iter.next() # Actual usage of the data loader is as below.
for batch_idx, (image, labels) in enumerate(train_loader, 0):
# Training code should be written here.
pass # ================================================================== #
# 5. Input pipline for custom dataset #
# ================================================================== # # You should your build your custom dataset as below.
class CustomDataset(torch.utils.data.Dataset):
def __init__(self):
# TODO
# 1. Initialize file paths or a list of file names.
pass
# xy = np.loadtxt('../../data/diabets.csv.gz')
# self.len = xy.shape[0]
# self.x_data = torch.from_numpy(xy[:, 0:-1])
# self.y_data = torch.from_numpy(xy[:, [-1]]) def __getitem__(self, index):
# TODO
# 1. Read one data from file (e.g. using numpy.fromfile, PIL.Image.open).
# 2. Preprocess the data (e.g. torchvision.Transform).
# 3. Return a data pair (e.g. image and label).
pass
# return self.x_data[index], self.y_data[index] def __len__(self):
# You should change 0 to the total size of your dataset.
return 0
# return self.len # You can then use the prebuilt data loader.
custom_dataset = CustomDataset()
train_loader = torch.utils.data.DataLoader(dataset=custom_dataset,
batch_size=32,
shuffle=True) # ================================================================== #
# 6. Pretrained model #
# ================================================================== # # Download and load the pretrained ResNet-18.
resnet = torchvision.models.resnet18(pretrained=True) # If you want to finetune only the top layer of the model, set as below.
for param in resnet.parameters():
param.requires_grad = False # Replace the top layer for finetuning.
resnet.fc = nn.Linear(resnet.fc.in_features, 100) # 100 is an example. # Forward pass.
images = torch.randn(64, 3, 224, 224)
outputs = resnet(images)
print (outputs.size()) # (64, 100) # ================================================================== #
# 7. Save and load the model #
# ================================================================== # # Save and load the entire model.
torch.save(resnet, 'model.ckpt')
model = torch.load('model.ckpt') # Save and load only the model parameters (recommended).
torch.save(resnet.state_dict(), 'params.ckpt')
resnet.load_state_dict(torch.load('params.ckpt'))
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt # Hyper-parameters
input_size = 1
output_size = 1
num_epochs = 60
learning_rate = 0.001 # Toy dataset
x_train = np.array([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168],
[9.779], [6.182], [7.59], [2.167], [7.042],
[10.791], [5.313], [7.997], [3.1]], dtype=np.float32) y_train = np.array([[1.7], [2.76], [2.09], [3.19], [1.694], [1.573],
[3.366], [2.596], [2.53], [1.221], [2.827],
[3.465], [1.65], [2.904], [1.3]], dtype=np.float32) # Linear regression model
model = nn.Linear(input_size, output_size) # Loss and optimizer
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # Train the model
for epoch in range(num_epochs):
# Convert numpy arrays to torch tensors
inputs = torch.from_numpy(x_train)
targets = torch.from_numpy(y_train) # Forward pass
outputs = model(inputs)
loss = criterion(outputs, targets) # Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step() if (epoch+1) % 5 == 0:
print ('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item())) # Plot the graph
predicted = model(torch.from_numpy(x_train)).detach().numpy()
plt.plot(x_train, y_train, 'ro', label='Original data')
plt.plot(x_train, predicted, label='Fitted line')
plt.legend()
plt.show() # Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms # Hyper-parameters
input_size = 784
num_classes = 10
num_epochs = 5
batch_size = 100
learning_rate = 0.001 # MNIST dataset (images and labels)
train_dataset = torchvision.datasets.MNIST(root='../../data',
train=True,
transform=transforms.ToTensor(),
download=True) test_dataset = torchvision.datasets.MNIST(root='../../data',
train=False,
transform=transforms.ToTensor()) # Data loader (input pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True) test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False) # Logistic regression model
model = nn.Linear(input_size, num_classes) # Loss and optimizer
# nn.CrossEntropyLoss() computes softmax internally
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
# Reshape images to (batch_size, input_size)
images = images.reshape(-1, 28*28) # Forward pass
outputs = model(images)
loss = criterion(outputs, labels) # Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step() if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item())) # Test the model
# In test phase, we don't need to compute gradients (for memory efficiency)
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.reshape(-1, 28*28)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum() print('Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total)) # Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms # Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # Hyper-parameters
input_size = 784
hidden_size = 500
num_classes = 10
num_epochs = 5
batch_size = 100
learning_rate = 0.001 # MNIST dataset
train_dataset = torchvision.datasets.MNIST(root='../../data',
train=True,
transform=transforms.ToTensor(),
download=True) test_dataset = torchvision.datasets.MNIST(root='../../data',
train=False,
transform=transforms.ToTensor()) # Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True) test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False) # Fully connected neural network with one hidden layer
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, num_classes) def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
return out model = NeuralNet(input_size, hidden_size, num_classes).to(device) # Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) # Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
# Move tensors to the configured device
images = images.reshape(-1, 28*28).to(device)
labels = labels.to(device) # Forward pass
outputs = model(images)
loss = criterion(outputs, labels) # Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step() if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item())) # Test the model
# In test phase, we don't need to compute gradients (for memory efficiency)
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.reshape(-1, 28*28).to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item() print('Accuracy of the network on the 10000 test images: {} %'.format(100 * correct / total)) # Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')
PyTorch(一)Basics的更多相关文章
- Keras vs. PyTorch
We strongly recommend that you pick either Keras or PyTorch. These are powerful tools that are enjoy ...
- 吐血整理:PyTorch项目代码与资源列表 | 资源下载
http://www.sohu.com/a/164171974_741733 本文收集了大量基于 PyTorch 实现的代码链接,其中有适用于深度学习新手的“入门指导系列”,也有适用于老司机的论文 ...
- 转 Pytorch 教学资料
本文收集了大量PyTorch项目(备查) 转自:https://blog.csdn.net/fuckliuwenl/article/details/80554182 目录: 入门系列教程 入门实例 图 ...
- (原)pytorch中使用TensorRT
转载请注明出处: https://www.cnblogs.com/darkknightzh/p/11332155.html 代码网址: https://github.com/darkknightzh/ ...
- 数值最优化:一阶和二阶优化算法(Pytorch实现)
1 最优化概论 (1) 最优化的目标 最优化问题指的是找出实数函数的极大值或极小值,该函数称为目标函数.由于定位\(f(x)\)的极大值与找出\(-f(x)\)的极小值等价,在推导计算方式时仅考虑最小 ...
- 资源分享 | PyTea:不用运行代码,静态分析pytorch模型的错误
前言 本文介绍一个Pytorch模型的静态分析器 PyTea,它不需要运行代码,即可在几秒钟之内扫描分析出模型中的张量形状错误.文末附使用方法. 本文转载自机器之心 编辑:CV技 ...
- 使用PyTorch构建神经网络以及反向传播计算
使用PyTorch构建神经网络以及反向传播计算 前一段时间南京出现了疫情,大概原因是因为境外飞机清洁处理不恰当,导致清理人员感染.话说国外一天不消停,国内就得一直严防死守.沈阳出现了一例感染人员,我在 ...
- 使用PyTorch构建神经网络模型进行手写识别
使用PyTorch构建神经网络模型进行手写识别 PyTorch是一种基于Torch库的开源机器学习库,应用于计算机视觉和自然语言处理等应用,本章内容将从安装以及通过Torch构建基础的神经网络,计算梯 ...
- 通过示例学习PYTORCH
注意:这是旧版本的PyTorch教程的一部分.你可以在Learn the Basics查看最新的开始目录. 该教程通过几个独立的例子较少了PyTorch的基本概念. 核心是:PyTorch提供了两个主 ...
随机推荐
- 学习linux—— VMware 安装 ubantu 18 如何连接wifi
1 适配器设置 修改本地连接 2 修改VMware的虚拟网络编辑器 3 虚拟机设置
- CentOS6.8 使man支持显示中文
1.安装显示中文的man命令 wget https://src.fedoraproject.org/repo/pkgs/man-pages-zh-CN/manpages-zh-1.5.1.tar.gz ...
- 使用::befor和::after伪元素在网站中添加图标
css3为了区分伪类和伪元素,伪元素采用双冒号写法. 常见伪类——:hover,:link,:active,:target,:not(),:focus. 常见伪元素——::first-letter,: ...
- 你确定你真的懂Nginx与PHP的交互?
Nginx是俄国人最早开发的Webserver,现在已经风靡全球,相信大家并不陌生.PHP也通过二十多年的发展来到了7系列版本,更加关注性能.这对搭档在最近这些年,叱咤风云,基本上LNMP成了当下的标 ...
- Python学习积累:使用help();打印多个变量;fileno()
1.使用篇: 1.1如何从help()退出: 直接回车即可! 2.技能篇: 2.1 如何一次性打印多个变量? 多个变量中间使用逗号隔开,且引用变量为%(变量1,变量2,变量3), 2.2fileno( ...
- jenkins shell部署
jenkins执行shell脚本 jenkins执行shell 上一篇说的是jenkins+svn+maven把war包自动部署到Tomcat,这篇是从SVN上拉取代码maven生成jar文件,并且拷 ...
- 20172306 2018-2019《Java程序设计与数据结构课堂测试补充报告》
学号 2017-2018-2 <程序设计与数据结构>课堂测试补充报告 课程:<程序设计与数据结构> 班级: 1723 姓名: 刘辰 学号:20172306 实验教师:王志强 必 ...
- linux学习第七天 (Linux就该这么学)
今天讲了chmod (权限 设置)和 chown(属性 设置),特殊权限:SUID u+s 数字法是4 x=s - = S,SGID g+s 数字法是2 x=s -=S,SBIT o+t x=t ...
- Android 多线程编程
Android 多线程编程 //1.多线程 进程:操作系统的多道程序 线程:同一个程序的多条路径 //2.创建多线程程序 创建一个类extends Thread 重写run方法 在main方法中创建对 ...
- HTTP1.1协议-RFC2616-中文版
转自:http://www.cnblogs.com/k1988/archive/2010/01/12/2165683.html 说明 本文档规定了互联网社区的标准组协议,并需要讨论和建议以便更加完善. ...