Implement GAN from scratch
GANs from Scratch 1: A deep introduction. With code in PyTorch and TensorFlow
修改文章代码中的错误后的代码如下:
import torch
from torch import nn, optim
from torch.autograd.variable import Variable
from torchvision import transforms, datasets
import matplotlib.pyplot as plt
DATA_FOLDER = 'D:/WorkSpace/Data/torchvision_data'
def mnist_data():
compose = transforms.Compose(
[transforms.ToTensor(),
# transforms.Normalize((.5, .5, .5), (.5, .5, .5))
transforms.Normalize([0.5], [0.5]) # MNIST只有一个通道
])
return datasets.MNIST(root=DATA_FOLDER, train=True, transform=compose)
# Load data
data = mnist_data()
# Create loader with data, so that we can iterate over it
data_loader = torch.utils.data.DataLoader(data, batch_size=64, shuffle=True)
# Num batches
num_batches = len(data_loader)
class DiscriminatorNet(torch.nn.Module):
"""
A three hidden-layer discriminative neural network
"""
def __init__(self):
super(DiscriminatorNet, self).__init__()
n_features = 784
n_out = 1
self.hidden0 = nn.Sequential(
nn.Linear(n_features, 1024),
nn.LeakyReLU(0.2),
nn.Dropout(0.3)
)
self.hidden1 = nn.Sequential(
nn.Linear(1024, 512),
nn.LeakyReLU(0.2),
nn.Dropout(0.3)
)
self.hidden2 = nn.Sequential(
nn.Linear(512, 256),
nn.LeakyReLU(0.2),
nn.Dropout(0.3)
)
self.out = nn.Sequential(
torch.nn.Linear(256, n_out),
torch.nn.Sigmoid()
)
def forward(self, x):
x = self.hidden0(x)
x = self.hidden1(x)
x = self.hidden2(x)
x = self.out(x)
return x
def images_to_vectors(images):
return images.view(images.size(0), 784)
def vectors_to_images(vectors):
return vectors.view(vectors.size(0), 1, 28, 28)
class GeneratorNet(torch.nn.Module):
"""
A three hidden-layer generative neural network
"""
def __init__(self):
super(GeneratorNet, self).__init__()
n_features = 100
n_out = 784
self.hidden0 = nn.Sequential(
nn.Linear(n_features, 256),
nn.LeakyReLU(0.2)
)
self.hidden1 = nn.Sequential(
nn.Linear(256, 512),
nn.LeakyReLU(0.2)
)
self.hidden2 = nn.Sequential(
nn.Linear(512, 1024),
nn.LeakyReLU(0.2)
)
self.out = nn.Sequential(
nn.Linear(1024, n_out),
nn.Tanh()
)
def forward(self, x):
x = self.hidden0(x)
x = self.hidden1(x)
x = self.hidden2(x)
x = self.out(x)
return x
# Noise
def noise(size):
n = Variable(torch.randn(size, 100))
if torch.cuda.is_available(): return n.cuda()
return n
discriminator = DiscriminatorNet()
generator = GeneratorNet()
if torch.cuda.is_available():
discriminator.cuda()
generator.cuda()
# Optimizers
d_optimizer = optim.Adam(discriminator.parameters(), lr=0.0002)
g_optimizer = optim.Adam(generator.parameters(), lr=0.0002)
# Loss function
loss = nn.BCELoss()
# Number of steps to apply to the discriminator
d_steps = 1 # In Goodfellow et. al 2014 this variable is assigned to 1
# Number of epochs
num_epochs = 200
def real_data_target(size):
'''
Tensor containing ones, with shape = size
'''
data = Variable(torch.ones(size, 1))
if torch.cuda.is_available(): return data.cuda()
return data
def fake_data_target(size):
'''
Tensor containing zeros, with shape = size
'''
data = Variable(torch.zeros(size, 1))
if torch.cuda.is_available(): return data.cuda()
return data
def train_discriminator(optimizer, real_data, fake_data):
# Reset gradients
optimizer.zero_grad()
# 1.1 Train on Real Data
prediction_real = discriminator(real_data)
# Calculate error and backpropagate
error_real = loss(prediction_real, real_data_target(real_data.size(0)))
error_real.backward()
# 1.2 Train on Fake Data
prediction_fake = discriminator(fake_data)
# Calculate error and backpropagate
error_fake = loss(prediction_fake, fake_data_target(real_data.size(0)))
error_fake.backward()
# 1.3 Update weights with gradients
optimizer.step()
# Return error
return error_real + error_fake, prediction_real, prediction_fake
def train_generator(optimizer, fake_data):
# 2. Train Generator
# Reset gradients
optimizer.zero_grad()
# Sample noise and generate fake data
prediction = discriminator(fake_data)
# Calculate error and backpropagate
error = loss(prediction, real_data_target(prediction.size(0)))
error.backward()
# Update weights with gradients
optimizer.step()
# Return error
return error
num_test_samples = 16
test_noise = noise(num_test_samples)
for epoch in range(num_epochs):
for n_batch, (real_batch,_) in enumerate(data_loader):
# 1. Train Discriminator
real_data = Variable(images_to_vectors(real_batch))
if torch.cuda.is_available(): real_data = real_data.cuda()
# Generate fake data
fake_data = generator(noise(real_data.size(0))).detach()
# Train D
d_error, d_pred_real, d_pred_fake = train_discriminator(d_optimizer,
real_data, fake_data)
# 2. Train Generator
# Generate fake data
fake_data = generator(noise(real_batch.size(0)))
# Train G
g_error = train_generator(g_optimizer, fake_data)
# Display Progress
print('epoch ', epoch, ': ','d_error is ', d_error, 'g_error is ', g_error)
if (epoch) % 20 == 0:
test_images = vectors_to_images(generator(test_noise)).data.cpu()
fig = plt.figure()
for i in range(len(test_images)):
ax = fig.add_subplot(4, 4, i+1)
ax.imshow(test_images[i][0], cmap=plt.cm.gray)
plt.show()
Implement GAN from scratch的更多相关文章
- 机器学习算法之旅A Tour of Machine Learning Algorithms
In this post we take a tour of the most popular machine learning algorithms. It is useful to tour th ...
- ML-学习提纲2
https://machinelearningmastery.com/a-tour-of-machine-learning-algorithms/ http://blog.csdn.net/u0110 ...
- [C4] Andrew Ng - Improving Deep Neural Networks: Hyperparameter tuning, Regularization and Optimization
About this Course This course will teach you the "magic" of getting deep learning to work ...
- [RxJS] Implement the `map` Operator from Scratch in RxJS
While it's great to use the RxJS built-in operators, it's also important to realize you now have the ...
- How to implement an algorithm from a scientific paper
Author: Emmanuel Goossaert 翻译 This article is a short guide to implementing an algorithm from a scie ...
- A Complete Tutorial on Tree Based Modeling from Scratch (in R & Python)
A Complete Tutorial on Tree Based Modeling from Scratch (in R & Python) MACHINE LEARNING PYTHON ...
- Learning WCF Chapter1 Creating a New Service from Scratch
You’re about to be introduced to the WCF service. This lab isn’t your typical “Hello World”—it’s “He ...
- Developing a Custom Membership Provider from the scratch, and using it in the FBA (Form Based Authentication) in SharePoint 2010
//http://blog.sharedove.com/adisjugo/index.php/2011/01/05/writing-a-custom-membership-provider-and-u ...
- [Laravel] 14 - REST API: Laravel from scratch
前言 一.基础 Ref: Build a REST API with Laravel API resources Goto: [Node.js] 08 - Web Server and REST AP ...
随机推荐
- java限流工具类
代码 import com.google.common.util.concurrent.RateLimiter; import java.util.concurrent.ConcurrentHashM ...
- [转载]汇编语言assume伪指令的作用
原文:https://blog.csdn.net/u010234808/article/details/38366943 摘出关键部分: 编写程序,是写给编译软件的.由编译软件,编译成机器码,再去控制 ...
- O007、KVM 存储虚拟化
参考https://www.cnblogs.com/CloudMan6/p/5273283.html KVM 的存储虚拟化是通过存储池(Storage Pool) 和 卷(Volume)来管理的. ...
- Flask与微信小程序登录(后端)
开发微信小程序时,接入小程序的授权登录可以快速实现用户注册登录的步骤,是快速建立用户体系的重要一步.这篇文章将介绍 python + flask + 微信小程序实现用户快速注册登录方案(本文主要进行后 ...
- SpringBoot + Vue + nginx项目部署(零基础带你部署)
一.环境.工具 jdk1.8 maven spring-boot idea VSVode vue 百度网盘(vue+springboot+nginx源码): 链接:https://pan.baidu. ...
- 精通shell编程--最后的总结
不得不说shell语法是丑陋的,操作是简单高效的,最后一次学习总结shell shell总结 字符串删除与替换等常见操作 ## 字符串长度 a=1234 echo "${#a}" ...
- 从命令行运行postman脚本
为什么要在命令行中运行 可以在无UI界面的服务器上运行 可以在持续集成系统上运行 运行准备 导出collection 安装nodejs和npm(或cnpm) 安装Newman 运行及生成测试报告支持4 ...
- oracle存储过程临时表
接到一个以前领导的需求,说的大概意思是: 如果能关联上就取关联上的最大值更新到表里,没有关联上的就取原来的值. 写一个存储过程,这正好用到了临时表,上网查询,用的太乱了,特别记录. 准备阶段 创建PD ...
- Windows常用快捷键与常用命令
应用窗口: Alt+F4 关闭当前窗口Win+上 最大化当前窗口Win+下 最小化当前窗口WIN+D 最小化所有窗口/还原Win+Tab 切换窗口 常用工具: Win+R 打开运行对话框Win+E 打 ...
- Java缓存要点
1.缓存一般是这样的:先查缓存,查不到就查DB,如果DB查不到就结束,DB查到了就写入缓存. 如果用户一直在大量地查询不存在的数据,则所有的请求都会落到DB,而且没有数据写入缓存. 解决方法:把查不到 ...