【colab pytorch】使用tensorboardcolab可视化
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils, datasets !pip install tensorboardcolab
from tensorboardcolab import TensorBoardColab
class Network(nn.Module):
def __init__(self):
super(Network, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10) def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
class Config:
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value) model_config = Config(
cuda = True if torch.cuda.is_available() else False,
device = torch.device("cuda" if torch.cuda.is_available() else "cpu"),
seed = 2,
lr = 0.01,
epochs = 4,
save_model = False,
batch_size = 32,
log_interval = 100
) class Trainer: def __init__(self, config): self.cuda = config.cuda
self.device = config.device
self.seed = config.seed
self.lr = config.lr
self.epochs = config.epochs
self.save_model = config.save_model
self.batch_size = config.batch_size
self.log_interval = config.log_interval self.globaliter = 0
self.tb = TensorBoardColab() torch.manual_seed(self.seed) kwargs = {'num_workers': 1, 'pin_memory': True} if self.cuda else {} self.train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((MNIST_MEAN,), (MNIST_STD,))
])),
batch_size=self.batch_size, shuffle=True, **kwargs) self.test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((MNIST_MEAN,), (MNIST_STD,))
])),
batch_size=self.batch_size, shuffle=True, **kwargs) self.model = Network().to(self.device)
self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr) def train(self, epoch): self.model.train()
for batch_idx, (data, target) in enumerate(self.train_loader): self.globaliter += 1
data, target = data.to(self.device), target.to(self.device) self.optimizer.zero_grad()
predictions = self.model(data) loss = F.nll_loss(predictions, target)
loss.backward()
self.optimizer.step() if batch_idx % self.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(self.train_loader.dataset),
100. * batch_idx / len(self.train_loader), loss.item()))
self.tb.save_value('Train Loss', 'train_loss', self.globaliter, loss.item()) def test(self, epoch):
self.model.eval()
test_loss = 0
correct = 0 with torch.no_grad():
for data, target in self.test_loader:
data, target = data.to(self.device), target.to(self.device)
predictions = self.model(data) test_loss += F.nll_loss(predictions, target, reduction='sum').item()
prediction = predictions.argmax(dim=1, keepdim=True)
correct += prediction.eq(target.view_as(prediction)).sum().item() test_loss /= len(self.test_loader.dataset)
accuracy = 100. * correct / len(self.test_loader.dataset) print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(self.test_loader.dataset), accuracy)) def main(): trainer = Trainer(model_config) for epoch in range(1, trainer.epochs + 1):
trainer.train(epoch)
trainer.test(epoch)
trainer.tb.flush_line('train_loss') if (trainer.save_model):
torch.save(trainer.model.state_dict(),"mnist_cnn.pt")
main()
Wait for 8 seconds...
TensorBoard link:
http://db797eee.ngrok.io
Train Epoch: 1 [0/60000 (0%)] Loss: 2.320306
Train Epoch: 1 [3200/60000 (5%)] Loss: 0.881239
Train Epoch: 1 [6400/60000 (11%)] Loss: 0.013655
Train Epoch: 1 [9600/60000 (16%)] Loss: 0.013620
Train Epoch: 1 [12800/60000 (21%)] Loss: 0.225101
Train Epoch: 1 [16000/60000 (27%)] Loss: 0.248218
Train Epoch: 1 [19200/60000 (32%)] Loss: 0.207354
Train Epoch: 1 [22400/60000 (37%)] Loss: 0.139395
Train Epoch: 1 [25600/60000 (43%)] Loss: 0.206405
Train Epoch: 1 [28800/60000 (48%)] Loss: 0.090241
Train Epoch: 1 [32000/60000 (53%)] Loss: 0.216764
Train Epoch: 1 [35200/60000 (59%)] Loss: 0.295801
Train Epoch: 1 [38400/60000 (64%)] Loss: 0.021000
Train Epoch: 1 [41600/60000 (69%)] Loss: 0.050552
Train Epoch: 1 [44800/60000 (75%)] Loss: 0.238085
Train Epoch: 1 [48000/60000 (80%)] Loss: 0.298676
Train Epoch: 1 [51200/60000 (85%)] Loss: 0.301436
Train Epoch: 1 [54400/60000 (91%)] Loss: 0.271787
Train Epoch: 1 [57600/60000 (96%)] Loss: 0.019811 Test set: Average loss: 0.1088, Accuracy: 9677/10000 (97%) Train Epoch: 2 [0/60000 (0%)] Loss: 0.036418
Train Epoch: 2 [3200/60000 (5%)] Loss: 0.024196
Train Epoch: 2 [6400/60000 (11%)] Loss: 0.029856
Train Epoch: 2 [9600/60000 (16%)] Loss: 0.084013
Train Epoch: 2 [12800/60000 (21%)] Loss: 0.345446
Train Epoch: 2 [16000/60000 (27%)] Loss: 0.453756
Train Epoch: 2 [19200/60000 (32%)] Loss: 0.409682
Train Epoch: 2 [22400/60000 (37%)] Loss: 0.159656
Train Epoch: 2 [25600/60000 (43%)] Loss: 0.009557
Train Epoch: 2 [28800/60000 (48%)] Loss: 0.282826
Train Epoch: 2 [32000/60000 (53%)] Loss: 0.047159
Train Epoch: 2 [35200/60000 (59%)] Loss: 0.379264
Train Epoch: 2 [38400/60000 (64%)] Loss: 0.043181
Train Epoch: 2 [41600/60000 (69%)] Loss: 0.486660
Train Epoch: 2 [44800/60000 (75%)] Loss: 0.108486
Train Epoch: 2 [48000/60000 (80%)] Loss: 0.242821
Train Epoch: 2 [51200/60000 (85%)] Loss: 0.218120
Train Epoch: 2 [54400/60000 (91%)] Loss: 0.381496
Train Epoch: 2 [57600/60000 (96%)] Loss: 0.134828 Test set: Average loss: 0.1861, Accuracy: 9496/10000 (95%) Train Epoch: 3 [0/60000 (0%)] Loss: 0.081437
Train Epoch: 3 [3200/60000 (5%)] Loss: 0.121195
Train Epoch: 3 [6400/60000 (11%)] Loss: 0.054902
Train Epoch: 3 [9600/60000 (16%)] Loss: 0.031254
Train Epoch: 3 [12800/60000 (21%)] Loss: 0.036273
Train Epoch: 3 [16000/60000 (27%)] Loss: 0.162744
Train Epoch: 3 [19200/60000 (32%)] Loss: 0.028073
Train Epoch: 3 [22400/60000 (37%)] Loss: 0.114689
Train Epoch: 3 [25600/60000 (43%)] Loss: 0.139724
Train Epoch: 3 [28800/60000 (48%)] Loss: 0.353534
Train Epoch: 3 [32000/60000 (53%)] Loss: 0.001959
Train Epoch: 3 [35200/60000 (59%)] Loss: 0.117742
Train Epoch: 3 [38400/60000 (64%)] Loss: 0.024078
Train Epoch: 3 [41600/60000 (69%)] Loss: 0.063214
Train Epoch: 3 [44800/60000 (75%)] Loss: 0.068128
Train Epoch: 3 [48000/60000 (80%)] Loss: 0.055476
Train Epoch: 3 [51200/60000 (85%)] Loss: 0.025761
Train Epoch: 3 [54400/60000 (91%)] Loss: 0.490388
Train Epoch: 3 [57600/60000 (96%)] Loss: 0.275244 Test set: Average loss: 0.1570, Accuracy: 9594/10000 (96%) Train Epoch: 4 [0/60000 (0%)] Loss: 0.150237
Train Epoch: 4 [3200/60000 (5%)] Loss: 0.049188
Train Epoch: 4 [6400/60000 (11%)] Loss: 0.008692
Train Epoch: 4 [9600/60000 (16%)] Loss: 0.061360
Train Epoch: 4 [12800/60000 (21%)] Loss: 0.004389
Train Epoch: 4 [16000/60000 (27%)] Loss: 0.027968
Train Epoch: 4 [19200/60000 (32%)] Loss: 0.075881
Train Epoch: 4 [22400/60000 (37%)] Loss: 0.074000
Train Epoch: 4 [25600/60000 (43%)] Loss: 0.069731
Train Epoch: 4 [28800/60000 (48%)] Loss: 0.330368
Train Epoch: 4 [32000/60000 (53%)] Loss: 0.393174
Train Epoch: 4 [35200/60000 (59%)] Loss: 0.318519
Train Epoch: 4 [38400/60000 (64%)] Loss: 0.164669
Train Epoch: 4 [41600/60000 (69%)] Loss: 0.161486
Train Epoch: 4 [44800/60000 (75%)] Loss: 0.017525
Train Epoch: 4 [48000/60000 (80%)] Loss: 0.104918
Train Epoch: 4 [51200/60000 (85%)] Loss: 0.000450
Train Epoch: 4 [54400/60000 (91%)] Loss: 0.128227
Train Epoch: 4 [57600/60000 (96%)] Loss: 0.005374 Test set: Average loss: 0.1227, Accuracy: 9717/10000 (97%)
核心就是标红的地方。
【colab pytorch】使用tensorboardcolab可视化的更多相关文章
- Pytorch的网络结构可视化(tensorboardX)(详细)
版权声明:本文为博主原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明.本文链接:https://blog.csdn.net/xiaoxifei/article/det ...
- 【colab pytorch】使用tensorboard可视化
import datetime import torch import torch.nn as nn import torch.nn.functional as F import torch.opti ...
- Pytorch使用tensorboardX可视化。超详细!!!
tensorboard --logdir runs 改为 tensorboard --logdir=D:\model\tensorboard\runs 重点 在网上看了很多方法后发现将原本链接中的计算 ...
- pytorch模型结构可视化,可显示每层的尺寸
最近在学习一些检测方面的网络,使用的是pytorch.模型结构可视化是学习网络的有用的部分,pytorch没有原生支持这个功能,需要找一些其他方式,下面总结几种方法(推荐用4). 1. torch . ...
- Pytorch在colab和kaggle中使用TensorBoard/TensorboardX可视化
在colab和kaggle内核的Jupyter notebook中如何可视化深度学习模型的参数对于我们分析模型具有很大的意义,相比tensorflow, pytorch缺乏一些的可视化生态包,但是幸好 ...
- 【猫狗数据集】利用tensorboard可视化训练和测试过程
数据集下载地址: 链接:https://pan.baidu.com/s/1l1AnBgkAAEhh0vI5_loWKw提取码:2xq4 创建数据集:https://www.cnblogs.com/xi ...
- [源码解析] PyTorch分布式优化器(1)----基石篇
[源码解析] PyTorch分布式优化器(1)----基石篇 目录 [源码解析] PyTorch分布式优化器(1)----基石篇 0x00 摘要 0x01 从问题出发 1.1 示例 1.2 问题点 0 ...
- [源码解析] PyTorch分布式优化器(2)----数据并行优化器
[源码解析] PyTorch分布式优化器(2)----数据并行优化器 目录 [源码解析] PyTorch分布式优化器(2)----数据并行优化器 0x00 摘要 0x01 前文回顾 0x02 DP 之 ...
- 实战 | 源码入门之Faster RCNN
前言 学习深度学习和计算机视觉,特别是目标检测方向的学习者,一定听说过Faster Rcnn:在目标检测领域,Faster Rcnn表现出了极强的生命力,被大量的学习者学习,研究和工程应用.网上有很多 ...
随机推荐
- 3)小案例三,加乐前端入口index.php
之前的代码没有什么改动,唯一改动的就是我在之前的目录结构中加了 index.php作为前端的入口文件 目前,我的文件目录关系是: 然后我的index.php代码内容是: <?php /** * ...
- 常用的mysql操作
总结一下常用的mysql操作,避免下次遇到类似情况重复地去百度. 方法不是唯一的,但记录一种可行的方法就可以了. 遇到新的问题再继续补充. 1.增加一个列 ALTER TABLE 表名 ADD COL ...
- [LC] 66. Plus One
Given a non-empty array of digits representing a non-negative integer, plus one to the integer. The ...
- 吴裕雄--天生自然Android开发学习:Android studio 3.5安装详解
3. 建立AVD(安卓虚拟设备) 点击右上角AVD Manager图标,单击按钮Create Virtual Device,选择Nexus 5X,下一步,选择版本9.0,Download,然后Next ...
- jmeter压测遇到的问题
一.今天压力测试时,开始12秒后出现了很多异常, 都是 java.net.NoRouteToHostException: Cannot assign requested address. 1.首先我这 ...
- EventBus 3.0 的基本使用
EventBus 3.0 的基本使用 1.什么是EventBus? EventBus 是一个Android端优化的publish/subscribe消息总线,简化了应用程序内各组件间.组件与后台线程间 ...
- VRRP笔记一:基本简介(注意iptables和selinux的问题)
LAN客户端判定哪个路由器应该为其到达目标主机的下一跳网关的方式有动态及静态决策两种方式,其中,觉的动态路由发现方式有如下几种: 1.Proxy ARP —— 客户端使用ARP协议获取其想要到达的目标 ...
- Allure介绍
以下内容基于pytest的框架进行展示: 什么是Allure Allure是一个独立的报告插件,生成美观易读的报告,目前支持语言:Java, PHP, Ruby, Python, Scala, C#. ...
- (三)mybatis级联的实现
mybatis级联的实现 开篇 级联有三种对应关系: 1.一对一(association):如学号与学生 2.一对多(collection):如角色与用户 3.多对多(discri ...
- Android Pay正式启用 支付宝们还好吗
Pay正式启用 支付宝们还好吗" title="Android Pay正式启用 支付宝们还好吗"> 苹果发布会上能够真正让人眼前一亮的产品并不多,但对于" ...