import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils, datasets !pip install tensorboardcolab
from tensorboardcolab import TensorBoardColab
class Network(nn.Module):
def __init__(self):
super(Network, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10) def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
class Config:
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value) model_config = Config(
cuda = True if torch.cuda.is_available() else False,
device = torch.device("cuda" if torch.cuda.is_available() else "cpu"),
seed = 2,
lr = 0.01,
epochs = 4,
save_model = False,
batch_size = 32,
log_interval = 100
) class Trainer: def __init__(self, config): self.cuda = config.cuda
self.device = config.device
self.seed = config.seed
self.lr = config.lr
self.epochs = config.epochs
self.save_model = config.save_model
self.batch_size = config.batch_size
self.log_interval = config.log_interval self.globaliter = 0
self.tb = TensorBoardColab() torch.manual_seed(self.seed) kwargs = {'num_workers': 1, 'pin_memory': True} if self.cuda else {} self.train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((MNIST_MEAN,), (MNIST_STD,))
])),
batch_size=self.batch_size, shuffle=True, **kwargs) self.test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((MNIST_MEAN,), (MNIST_STD,))
])),
batch_size=self.batch_size, shuffle=True, **kwargs) self.model = Network().to(self.device)
self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr) def train(self, epoch): self.model.train()
for batch_idx, (data, target) in enumerate(self.train_loader): self.globaliter += 1
data, target = data.to(self.device), target.to(self.device) self.optimizer.zero_grad()
predictions = self.model(data) loss = F.nll_loss(predictions, target)
loss.backward()
self.optimizer.step() if batch_idx % self.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(self.train_loader.dataset),
100. * batch_idx / len(self.train_loader), loss.item()))
self.tb.save_value('Train Loss', 'train_loss', self.globaliter, loss.item()) def test(self, epoch):
self.model.eval()
test_loss = 0
correct = 0 with torch.no_grad():
for data, target in self.test_loader:
data, target = data.to(self.device), target.to(self.device)
predictions = self.model(data) test_loss += F.nll_loss(predictions, target, reduction='sum').item()
prediction = predictions.argmax(dim=1, keepdim=True)
correct += prediction.eq(target.view_as(prediction)).sum().item() test_loss /= len(self.test_loader.dataset)
accuracy = 100. * correct / len(self.test_loader.dataset) print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(self.test_loader.dataset), accuracy)) def main(): trainer = Trainer(model_config) for epoch in range(1, trainer.epochs + 1):
trainer.train(epoch)
trainer.test(epoch)
trainer.tb.flush_line('train_loss') if (trainer.save_model):
torch.save(trainer.model.state_dict(),"mnist_cnn.pt")
main()
Wait for 8 seconds...
TensorBoard link:
http://db797eee.ngrok.io
Train Epoch: 1 [0/60000 (0%)] Loss: 2.320306
Train Epoch: 1 [3200/60000 (5%)] Loss: 0.881239
Train Epoch: 1 [6400/60000 (11%)] Loss: 0.013655
Train Epoch: 1 [9600/60000 (16%)] Loss: 0.013620
Train Epoch: 1 [12800/60000 (21%)] Loss: 0.225101
Train Epoch: 1 [16000/60000 (27%)] Loss: 0.248218
Train Epoch: 1 [19200/60000 (32%)] Loss: 0.207354
Train Epoch: 1 [22400/60000 (37%)] Loss: 0.139395
Train Epoch: 1 [25600/60000 (43%)] Loss: 0.206405
Train Epoch: 1 [28800/60000 (48%)] Loss: 0.090241
Train Epoch: 1 [32000/60000 (53%)] Loss: 0.216764
Train Epoch: 1 [35200/60000 (59%)] Loss: 0.295801
Train Epoch: 1 [38400/60000 (64%)] Loss: 0.021000
Train Epoch: 1 [41600/60000 (69%)] Loss: 0.050552
Train Epoch: 1 [44800/60000 (75%)] Loss: 0.238085
Train Epoch: 1 [48000/60000 (80%)] Loss: 0.298676
Train Epoch: 1 [51200/60000 (85%)] Loss: 0.301436
Train Epoch: 1 [54400/60000 (91%)] Loss: 0.271787
Train Epoch: 1 [57600/60000 (96%)] Loss: 0.019811 Test set: Average loss: 0.1088, Accuracy: 9677/10000 (97%) Train Epoch: 2 [0/60000 (0%)] Loss: 0.036418
Train Epoch: 2 [3200/60000 (5%)] Loss: 0.024196
Train Epoch: 2 [6400/60000 (11%)] Loss: 0.029856
Train Epoch: 2 [9600/60000 (16%)] Loss: 0.084013
Train Epoch: 2 [12800/60000 (21%)] Loss: 0.345446
Train Epoch: 2 [16000/60000 (27%)] Loss: 0.453756
Train Epoch: 2 [19200/60000 (32%)] Loss: 0.409682
Train Epoch: 2 [22400/60000 (37%)] Loss: 0.159656
Train Epoch: 2 [25600/60000 (43%)] Loss: 0.009557
Train Epoch: 2 [28800/60000 (48%)] Loss: 0.282826
Train Epoch: 2 [32000/60000 (53%)] Loss: 0.047159
Train Epoch: 2 [35200/60000 (59%)] Loss: 0.379264
Train Epoch: 2 [38400/60000 (64%)] Loss: 0.043181
Train Epoch: 2 [41600/60000 (69%)] Loss: 0.486660
Train Epoch: 2 [44800/60000 (75%)] Loss: 0.108486
Train Epoch: 2 [48000/60000 (80%)] Loss: 0.242821
Train Epoch: 2 [51200/60000 (85%)] Loss: 0.218120
Train Epoch: 2 [54400/60000 (91%)] Loss: 0.381496
Train Epoch: 2 [57600/60000 (96%)] Loss: 0.134828 Test set: Average loss: 0.1861, Accuracy: 9496/10000 (95%) Train Epoch: 3 [0/60000 (0%)] Loss: 0.081437
Train Epoch: 3 [3200/60000 (5%)] Loss: 0.121195
Train Epoch: 3 [6400/60000 (11%)] Loss: 0.054902
Train Epoch: 3 [9600/60000 (16%)] Loss: 0.031254
Train Epoch: 3 [12800/60000 (21%)] Loss: 0.036273
Train Epoch: 3 [16000/60000 (27%)] Loss: 0.162744
Train Epoch: 3 [19200/60000 (32%)] Loss: 0.028073
Train Epoch: 3 [22400/60000 (37%)] Loss: 0.114689
Train Epoch: 3 [25600/60000 (43%)] Loss: 0.139724
Train Epoch: 3 [28800/60000 (48%)] Loss: 0.353534
Train Epoch: 3 [32000/60000 (53%)] Loss: 0.001959
Train Epoch: 3 [35200/60000 (59%)] Loss: 0.117742
Train Epoch: 3 [38400/60000 (64%)] Loss: 0.024078
Train Epoch: 3 [41600/60000 (69%)] Loss: 0.063214
Train Epoch: 3 [44800/60000 (75%)] Loss: 0.068128
Train Epoch: 3 [48000/60000 (80%)] Loss: 0.055476
Train Epoch: 3 [51200/60000 (85%)] Loss: 0.025761
Train Epoch: 3 [54400/60000 (91%)] Loss: 0.490388
Train Epoch: 3 [57600/60000 (96%)] Loss: 0.275244 Test set: Average loss: 0.1570, Accuracy: 9594/10000 (96%) Train Epoch: 4 [0/60000 (0%)] Loss: 0.150237
Train Epoch: 4 [3200/60000 (5%)] Loss: 0.049188
Train Epoch: 4 [6400/60000 (11%)] Loss: 0.008692
Train Epoch: 4 [9600/60000 (16%)] Loss: 0.061360
Train Epoch: 4 [12800/60000 (21%)] Loss: 0.004389
Train Epoch: 4 [16000/60000 (27%)] Loss: 0.027968
Train Epoch: 4 [19200/60000 (32%)] Loss: 0.075881
Train Epoch: 4 [22400/60000 (37%)] Loss: 0.074000
Train Epoch: 4 [25600/60000 (43%)] Loss: 0.069731
Train Epoch: 4 [28800/60000 (48%)] Loss: 0.330368
Train Epoch: 4 [32000/60000 (53%)] Loss: 0.393174
Train Epoch: 4 [35200/60000 (59%)] Loss: 0.318519
Train Epoch: 4 [38400/60000 (64%)] Loss: 0.164669
Train Epoch: 4 [41600/60000 (69%)] Loss: 0.161486
Train Epoch: 4 [44800/60000 (75%)] Loss: 0.017525
Train Epoch: 4 [48000/60000 (80%)] Loss: 0.104918
Train Epoch: 4 [51200/60000 (85%)] Loss: 0.000450
Train Epoch: 4 [54400/60000 (91%)] Loss: 0.128227
Train Epoch: 4 [57600/60000 (96%)] Loss: 0.005374 Test set: Average loss: 0.1227, Accuracy: 9717/10000 (97%)

核心就是标红的地方。

【colab pytorch】使用tensorboardcolab可视化的更多相关文章

  1. Pytorch的网络结构可视化(tensorboardX)(详细)

    版权声明:本文为博主原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明.本文链接:https://blog.csdn.net/xiaoxifei/article/det ...

  2. 【colab pytorch】使用tensorboard可视化

    import datetime import torch import torch.nn as nn import torch.nn.functional as F import torch.opti ...

  3. Pytorch使用tensorboardX可视化。超详细!!!

    tensorboard --logdir runs 改为 tensorboard --logdir=D:\model\tensorboard\runs 重点 在网上看了很多方法后发现将原本链接中的计算 ...

  4. pytorch模型结构可视化,可显示每层的尺寸

    最近在学习一些检测方面的网络,使用的是pytorch.模型结构可视化是学习网络的有用的部分,pytorch没有原生支持这个功能,需要找一些其他方式,下面总结几种方法(推荐用4). 1. torch . ...

  5. Pytorch在colab和kaggle中使用TensorBoard/TensorboardX可视化

    在colab和kaggle内核的Jupyter notebook中如何可视化深度学习模型的参数对于我们分析模型具有很大的意义,相比tensorflow, pytorch缺乏一些的可视化生态包,但是幸好 ...

  6. 【猫狗数据集】利用tensorboard可视化训练和测试过程

    数据集下载地址: 链接:https://pan.baidu.com/s/1l1AnBgkAAEhh0vI5_loWKw提取码:2xq4 创建数据集:https://www.cnblogs.com/xi ...

  7. [源码解析] PyTorch分布式优化器(1)----基石篇

    [源码解析] PyTorch分布式优化器(1)----基石篇 目录 [源码解析] PyTorch分布式优化器(1)----基石篇 0x00 摘要 0x01 从问题出发 1.1 示例 1.2 问题点 0 ...

  8. [源码解析] PyTorch分布式优化器(2)----数据并行优化器

    [源码解析] PyTorch分布式优化器(2)----数据并行优化器 目录 [源码解析] PyTorch分布式优化器(2)----数据并行优化器 0x00 摘要 0x01 前文回顾 0x02 DP 之 ...

  9. 实战 | 源码入门之Faster RCNN

    前言 学习深度学习和计算机视觉,特别是目标检测方向的学习者,一定听说过Faster Rcnn:在目标检测领域,Faster Rcnn表现出了极强的生命力,被大量的学习者学习,研究和工程应用.网上有很多 ...

随机推荐

  1. Spring的分模块开发的配置

    参考:Spring学习笔记-Spring的分模块开发的配置 在加载配置文件的时候,加载多个 例如把applicationContext.xml配置文件中的关于集合配置的部分剪切到application ...

  2. idea,2018版破解方法

    1.准备安装文件 2.安装 ideaIU-2018.3.5.exe ,如这里是安装到IDEA这个文件夹: 3.复制JetbrainsIdesCrack-4.2-releasexxx.jar放到IDEA ...

  3. pycharm里配置了项目虚拟环境,terminal打开并不是在虚拟环境下,如何解决

    在pycharm里,点开下方的Terminal,此时默认目录为当前项目对应的根目录, 第一步:输入  cd   venv\Scripts  进入Scripts目录 第二步:然后运行activate.b ...

  4. 学习python-20191107

    一.导入模块的两种方式 方式1:import 包名.模块名 [ as 别名]方式2:from 包名 import 模块名            from 包名.模块名 import 变量|函数|类 # ...

  5. 蓝桥杯-PREV3-带分数

    有人管蓝桥杯叫暴力杯,现在感觉还是挺贴切的.看到这题首先想到让i从1到n循环,首先判断i中无重复数字,再怎样判断能否用剩下的数构成n - i的假分数.之后看了题解.发现思路错了. 总结两点: 1.蓝桥 ...

  6. Golang Slice 总结

    数组 Go的切片是在数组之上的抽象数据类型,因此在了解切片之前必须要要理解数组.数组类型由指定和长度和元素类型定义.数组不需要显式的初始化:数组元素会自动初始化为零值:Go的数组是值语义.一个数组变量 ...

  7. 反编译.net下的exe程序

    1. 什么叫.net平台 .NET框架是一个多语言组件开发和执行环境,它提供了一个跨语言的统一编程环境..NET框架的目的是便于开发人员更容易地建立Web应用程序和Web服务,使得Internet上的 ...

  8. hibernate主键(generator)生成方式

    1) assigned 主键由外部程序负责生成,无需Hibernate参与. 2) hilo 通过hi/lo 算法实现的主键生成机制,需要额外的数据库表保存主键生成历史状态. 3) seqhilo 与 ...

  9. php--ip的处理

    1.获取ip /**获取请求ip**/ function _get_request_ip(){ //strcasecmp 比较两个字符,不区分大小写.返回0,>0,<0. if(geten ...

  10. Rime输入法一些设定

    有鉴于谷歌搜狗拼音等不太好用,但是博主一直页没找到合心的输入法,直到遇见Rime,中州韵就是我想要的输入法.记录一下自己用的时候的修改,以备查询.注意:缩进不要弄丢,所有更改完都需要重新部署才能生效. ...