数据集是10000个样本,前8000个训练集,后面的用来测试。每个样本是1*144(重构成12*12的矩阵),将原始BiGAN有编码器、判别器和生成器,将里面的全连接层全部替换成了卷积。

from __future__ import print_function, division

from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply, GaussianNoise
from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D
from keras.layers import MaxPooling2D, concatenate
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras import losses
from keras.utils import to_categorical
from pandas import read_csv
import keras.backend as K
import pandas as pd
import matplotlib.pyplot as plt import numpy as np class BIGAN():
def __init__(self):
self.img_rows = 12
self.img_cols = 12
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 100 optimizer = Adam(0.0002, 0.5) # Build and compile the discriminator
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss=['binary_crossentropy'],
optimizer=optimizer,
metrics=['accuracy']) # Build the generator
self.generator = self.build_generator() # Build the encoder
self.encoder = self.build_encoder() # The part of the bigan that trains the discriminator and encoder
self.discriminator.trainable = False # Generate image from sampled noise
z = Input(shape=(self.latent_dim, ))
img_ = self.generator(z) # Encode image
img = Input(shape=self.img_shape)
z_ = self.encoder(img) # Latent -> img is fake, and img -> latent is valid
fake = self.discriminator([z, img_])
valid = self.discriminator([z_, img]) # Set up and compile the combined model
# Trains generator to fool the discriminator
self.bigan_generator = Model([z, img], [fake, valid])
self.bigan_generator.compile(loss=['binary_crossentropy', 'binary_crossentropy'],
optimizer=optimizer) def build_encoder(self):
model = Sequential() model.add(Conv2D(16, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=1, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(self.latent_dim)) model.summary() img = Input(shape=self.img_shape)
z = model(img) return Model(img, z) def build_generator(self):
model = Sequential() model.add(Dense(64 * 3 * 3, activation="relu", input_dim=self.latent_dim))
model.add(Reshape((3, 3, 64)))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(32, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(Conv2D(self.channels, kernel_size=3, padding="same"))
model.add(Activation("tanh")) model.summary() z = Input(shape=(self.latent_dim,))
gen_img = model(z) return Model(z, gen_img) def build_discriminator(self): z = Input(shape=(self.latent_dim, ))
img = Input(shape=self.img_shape)
d_in = concatenate([z, Flatten()(img)]) model = Dense(14*14, activation="relu")(d_in)
model = Reshape((14, 14, 1))(model)
model = Conv2D(16, kernel_size=3, strides=2,padding="same")(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.25)(model)
model = Conv2D(32, kernel_size=3, strides=2, padding="same")(model)
model = ZeroPadding2D(padding=((0,1),(0,1)))(model)
model = BatchNormalization(momentum=0.8)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.25)(model)
model = Conv2D(64, kernel_size=3, strides=2, padding="same")(model)
model = BatchNormalization(momentum=0.8)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.25)(model)
model = Conv2D(128, kernel_size=3, strides=1, padding="same")(model)
model = BatchNormalization(momentum=0.8)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.25)(model)
model = Flatten()(model)
validity = Dense(1, activation="sigmoid")(model) return Model([z, img], validity) def train(self, epochs, batch_size=128, sample_interval=50): # Load the dataset
dataset = read_csv('GANData.csv')
values = dataset.values
XY= values
n_train_hours1 =8000
n_train_hours2 = n_train_hours1+1
x_train=XY[:n_train_hours1,:]
x_test =XY[n_train_hours2:, :]
X_train = x_train.reshape(-1,12,12,1)
X_test = x_test.reshape(-1,12,12,1) # Adversarial ground truths
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
FHZ=np.zeros((epochs, 3))
for epoch in range(epochs): # ---------------------
# Train Discriminator
# --------------------- # Sample noise and generate img
z = np.random.normal(size=(batch_size, self.latent_dim))
imgs_ = self.generator.predict(z) # Select a random batch of images and encode
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
z_ = self.encoder.predict(imgs) # Train the discriminator (img -> z is valid, z -> img is fake)
d_loss_real = self.discriminator.train_on_batch([z_, imgs], valid)
d_loss_fake = self.discriminator.train_on_batch([z, imgs_], fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake) # ---------------------
# Train Generator
# --------------------- # Train the generator (z -> img is valid and img -> z is is invalid)
g_loss = self.bigan_generator.train_on_batch([z, imgs], [valid, fake]) # Plot the progress
print ("%d [D loss: %f, acc: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss[0]))
FHZ[epoch,0]=d_loss[0]
FHZ[epoch,1]=d_loss[1]
FHZ[epoch,2]=g_loss[0]
# If at save interval => save generated image samples
if epoch % sample_interval == 0:
self.sample_interval(epoch)
return FHZ
def sample_interval(self, epoch):
r, c = 5, 5
z = np.random.normal(size=(25, self.latent_dim))
gen_imgs = self.generator.predict(z)
gen_imgs = 0.5 * gen_imgs + 0.5 decoded_imgs = gen_imgs.reshape((gen_imgs.shape[0], -1))
print('decoded_imgs.shape:',decoded_imgs.shape)
data=decoded_imgs
data_df = pd.DataFrame(data) # create and writer pd.DataFrame to excel
writer = pd.ExcelWriter('Result.xlsx')
data_df.to_excel(writer,'page_1',float_format='%.5f') # float_format 控制精度
writer.save()
# Rescale images 0 - 1
#fig.savefig("images/mnist_%d.png" % epoch) if __name__ == '__main__':
bigan = BIGAN()
d_loss=bigan.train(epochs=10, batch_size=32, sample_interval=9)
import numpy
numpy.savetxt("d_lossnum.csv", d_loss, delimiter=',')

BiGAN的复现的更多相关文章

  1. C++复现经典游戏——扫雷

    国庆小长假,当大家都去看人山人海的时候,我独自一人狂码代码.这两天想要实现的内容是Windows上的一个经典游戏——扫雷.相信90后和一些上班族对此并不陌生.然而,从win8开始,扫雷就不再是Wind ...

  2. [troubleshoot][archlinux][X] plasma(KDE) 窗口滚动刷新冻结(约延迟10s)(已解决,root cause不明,无法再次复现)

    现象: konsole,setting等plasma的系统应用反应缓慢,在滚动条滚动时,尤为明显. 触发条件: 并不是十分明确的系统滚动升级(Syu)后,产生. 现象收集: 可疑的dmesg [ :: ...

  3. 时空上下文视觉跟踪(STC)算法的解读与代码复现(转)

    时空上下文视觉跟踪(STC)算法的解读与代码复现 zouxy09@qq.com http://blog.csdn.net/zouxy09 本博文主要是关注一篇视觉跟踪的论文.这篇论文是Kaihua Z ...

  4. ShadowBroker释放的NSA工具中Esteemaudit漏洞复现过程

    没有时间测试呢,朋友们都成功复现,放上网盘地址:https://github.com/x0rz/EQGRP 近日臭名昭著的方程式组织工具包再次被公开,TheShadowBrokers在steemit. ...

  5. CVE-2017-8464远程命令执行漏洞(震网漏洞)复现

    前言 2017年6月13日,微软官方发布编号为CVE-2017-8464的漏洞公告,官方介绍Windows系统在解析快捷方式时存在远程执行任意代码的高危漏洞,黑客可以通过U盘.网络共享等途径触发漏洞, ...

  6. Samba远程代码执行漏洞(CVE-2017-7494)本地复现

    一.复现环境搭建 搭建Debian和kali两个虚拟机: 攻击机:kali (192.168.217.162): 靶机:debian (192.168.217.150). 二.Debian安装并配置s ...

  7. Office远程代码执行漏洞CVE-2017-0199复现

    在刚刚结束的BlackHat2017黑帽大会上,最佳客户端安全漏洞奖颁给了CVE-2017-0199漏洞,这个漏洞是Office系列办公软件中的一个逻辑漏洞,和常规的内存破坏型漏洞不同,这类漏洞无需复 ...

  8. Node.js CVE-2017-1484复现(详细步骤)

    0x00 前言 早上看Sec-news安全文摘的时候,发现腾讯安全应急响应中心发表了一篇文章,Node.js CVE-2017-14849 漏洞分析(https://security.tencent. ...

  9. 【S2-052】漏洞复现(CVE-2017-9805)

    一.漏洞描述 Struts2 的REST插件,如果带有XStream组件,那么在进行反序列化XML请求时,存在未对数据内容进行有效验证的安全隐患,可能发生远程命令执行. 二.受影响版本 Struts2 ...

随机推荐

  1. docker零碎知识

    一.关于容器的时区配置: docker中如果对时区不加限制,默认会采用GMT时间,对于东八区来说,需要修改,修改方式有多种: 1.在Dockerfile中的修改 FROM centos: MAINTA ...

  2. springboot 详解RestControllerAdvice(ControllerAdvice)(转)

    springboot 详解RestControllerAdvice(ControllerAdvice)拦截异常并统一处理简介 @Target({ElementType.TYPE}) @Retentio ...

  3. Go学习笔记(六) | 使用swaggo自动生成Restful API文档(转)

    关于Swaggo 或许你使用过Swagger, 而 swaggo就是代替了你手动编写yaml的部分.只要通过一个命令就可以将注释转换成文档,这让我们可以更加专注于代码. 目前swaggo主要实现了sw ...

  4. 根据xml文件生成javaBean

    原 根据xml文件生成javaBean 2017年08月15日 18:32:26 吃完喝完嚼益达 阅读数 1727 版权声明:本文为博主原创文章,遵循CC 4.0 by-sa版权协议,转载请附上原文出 ...

  5. Luogu P5022 旅行 搜索+贪心

    好吧...一直咕..现在才过...被卡常卡到爆... 写的垃圾版本,$n^2$无脑删边..可以发现走出来的是棵树...更优秀的及数据加强版先咕着...一定写.qwq #include<cstdi ...

  6. [Sdwc] 线段

    线段有如下两类特点:1 x y, 表示第 x 条线段和第 y 条线段相交 (相交在这里指至少有一个公共点)2 x y,表示第 x 条线段在第 y 条线段的左边,且它们不相交.共有 m 个特点,每个特点 ...

  7. 【8.27-模拟赛】remove

    题解: 代码: #include<iostream> #include<algorithm> #include<cstdio> #include<cstrin ...

  8. Linux设备驱动程序 之 字符设备的注册

    内核内部使用struct cdev结构来标识字符设备,在内核调用设备的操作之前,必须分配并注册一个或者多个上述结构,为此,我们的代码需要包含<linux/cdev.h>,其中定义了这个结构 ...

  9. ansiblle---roles

    使用ansible中的roles来管理主机. 剧本中的roles你现在已经学过 tasks 和 handlers,那怎样组织 playbook 才是最好的方式呢?简单的回答就是:使用 roles ! ...

  10. 3.JSON使用

    把 JSON 文本转换为 JavaScript 对象 JSON 最常见的用法之一,是从 web 服务器上读取 JSON 数据(作为文件或作为 HttpRequest),将 JSON 数据转换为 Jav ...