from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import Conv1D, MaxPooling1D
import scipy.io as sio
import matplotlib.pyplot as plt
from keras.utils import np_utils
import keras
import numpy as np
from keras import regularizers
from keras.callbacks import TensorBoard
from keras.utils import plot_model
from keras import backend as K
from os.path import exists, join from os import makedirs batch_sizes = 256
nb_class = 10
nb_epochs = 2
log_dir = './bgbv2_log_dir' if not exists(log_dir):
makedirs(log_dir) # input image dimensions
img_rows, img_cols = 1, 2048
'''
第一步 准备数据
'''
# matlab文件名 准备数据
file_name = u'G:/GANCode/CSWU/12k drive end vps/trainset/D/D_dataset.mat'
original_data = sio.loadmat(file_name)
X_train = original_data['x_train']
Y_train = original_data['y_train']
X_test = original_data['x_test']
Y_test = original_data['y_test']
channel = 1 X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], channel))
X_test = X_test.reshape((X_test.shape[0], X_test.shape[1], channel))
input_shape = (X_train.shape[1], channel) # 标签打乱
permutation = np.random.permutation(Y_train.shape[0])
X_train = X_train[permutation, :, :]
Y_train = Y_train[permutation] permutation = np.random.permutation(Y_test.shape[0])
X_test = X_test[permutation, :, :]
Y_test = Y_test[permutation] X_train = X_train.astype('float32') # astype SET AS TYPE INTO
X_test = X_test.astype('float32')
#X_train = (X_train+1)/2
#X_test = (X_test+1)/2
print('x_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples') X_meta = X_test.reshape((X_test.shape[0], X_test.shape[1])) kkkkk=0 # save class labels to disk to color data points in TensorBoard accordingly
with open(join(log_dir, 'metadata.tsv'), 'w') as f:
np.savetxt(f, Y_test[:200]) '''
第三步 设置标签 one-hot
'''
Y_test = np_utils.to_categorical(Y_test, nb_class) # Label
Y_train = np_utils.to_categorical(Y_train, nb_class) '''
第四步 网络model
'''
model = Sequential()
model.add(Conv1D(64, 11, activation='relu', input_shape=(2048, 1)))
model.add(Conv1D(64, 11, activation='relu'))
model.add(MaxPooling1D(3))
model.add(Conv1D(128, 11, activation='relu'))
model.add(Conv1D(128, 11, activation='relu')) '''
model.add(GlobalAveragePooling1D())
model.add(Dropout(0.5)) '''
model.add(MaxPooling1D(3))
model.add(Dropout(0.25))
model.add(keras.layers.Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax')) embedding_layer_names = set(layer.name
for layer in model.layers
if layer.name.startswith('dense_')) # https://stackoverflow.com/questions/45265436/keras-save-image-embedding-of-the-mnist-data-set model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy']) callbacks = [keras.callbacks.TensorBoard(
log_dir='bgbv2_log_dir',
embeddings_layer_names=['dense_2'], #监视某一层,就要写某一层的名字,可以同时监视很多层,用上面的字典形式。
#embeddings_metadata='metadata.tsv',
embeddings_freq=1,
#histogram_freq=1,
embeddings_data=X_test # 数据要和X_train保持一致。这里我用的是一维数据,(60000,2048,1)表示有6万个样本,每个样本有2048个长度,且每个样本有1个通道(1个传感器),换成多个通道的话,就要使用多个传感器的数据。
)] model.fit(X_train, Y_train,
batch_size=batch_sizes,
callbacks=callbacks,
epochs=nb_epochs,
verbose=1,
validation_data=(X_test, Y_test)) xxasfs=1
# You can now launch tensorboard with `tensorboard --logdir=./logs` on your
# command line and then go to http://localhost:6006/#projector to view the
# embeddings
# keras.callbacks.TensorBoard(
# log_dir='./logs',
# histogram_freq=0,
# batch_size=32,
# write_graph=True,
# write_grads=False,
# write_images=False,
# embeddings_freq=0,
# embeddings_layer_names=None,
# embeddings_metadata=None,
# embeddings_data=None,
# update_freq='epoch')

坑死我了。

没有人教,自己琢磨了一天。

下面就能清楚地看见我们的三维图啦~用来写paper和PPT都是极好的素材。

PS:任何一个图层的输出:

https://stackoverflow.com/questions/41711190/keras-how-to-get-the-output-of-each-layer

参考1,keras Tensorboard官方说明

https://keras.io/callbacks/#tensorboard

from __future__ import print_function

from os import makedirs
from os.path import exists, join import keras
from keras.callbacks import TensorBoard
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K import numpy as np batch_size = 128
num_classes = 10
epochs = 12
log_dir = './logs' if not exists(log_dir):
makedirs(log_dir) # input image dimensions
img_rows, img_cols = 28, 28 # the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data() if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1) x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples') # save class labels to disk to color data points in TensorBoard accordingly
with open(join(log_dir, 'metadata.tsv'), 'w') as f:
np.savetxt(f, y_test) # convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes) tensorboard = TensorBoard(batch_size=batch_size,
embeddings_freq=1,
embeddings_layer_names=['features'],
embeddings_metadata='metadata.tsv',
embeddings_data=x_test) model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu', name='features'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax')) model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy']) model.fit(x_train, y_train,
batch_size=batch_size,
callbacks=[tensorboard],
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1]) # You can now launch tensorboard with `tensorboard --logdir=./logs` on your
# command line and then go to http://localhost:6006/#projector to view the
# embeddings

参考2,keras Mnist最后一层可视化。

https://keras.io/examples/tensorboard_embeddings_mnist/

参考3,IMDB影视评论最后一层可是化

import keras
from keras import layers
from keras.datasets import imdb
from keras.preprocessing import sequence
max_features = 500 # 原文为2000
max_len = 500
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
x_train = sequence.pad_sequences(x_train, maxlen=max_len)
x_test = sequence.pad_sequences(x_test, maxlen=max_len) KK=x_train[:100].astype("float32")
MM=1 model = keras.models.Sequential()
model.add(layers.Embedding(max_features, 128, input_length=max_len, name='embed'))
model.add(layers.Conv1D(32, 7, activation='relu'))
model.add(layers.MaxPooling1D(5))
model.add(layers.Conv1D(32, 7, activation='relu'))
model.add(layers.GlobalMaxPooling1D())
model.add(layers.Dense(1))
model.summary()
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
callbacks = [keras.callbacks.TensorBoard(
log_dir='my_log_dir',
histogram_freq=1,
embeddings_freq=1,
embeddings_data=x_train[:100].astype("float32")
)]
history = model.fit(x_train, y_train, epochs=20, batch_size=128, validation_split=0.2, callbacks=callbacks) #补充 https://codeday.me/bug/20180924/267508.html

【keras】用tensorboard监视CNN每一层的输出的更多相关文章

  1. Keras(四)CNN 卷积神经网络 RNN 循环神经网络 原理及实例

    CNN 卷积神经网络 卷积 池化 https://www.cnblogs.com/peng8098/p/nlp_16.html 中有介绍 以数据集MNIST构建一个卷积神经网路 from keras. ...

  2. keras与tensorboard结合使用

    使用tensorboard将keras的训练过程显示出来(动态的.直观的)是一个绝好的主意,特别是在有架设好的VPS的基础上,这篇文章就是一起来实现这个过程. 一.主要原理 keras的在训练(fit ...

  3. 【Keras案例学习】 CNN做手写字符分类(mnist_cnn )

    from __future__ import print_function import numpy as np np.random.seed(1337) from keras.datasets im ...

  4. fasttext和cnn的比较,使用keras imdb看效果——cnn要慢10倍。

    fasttext: '''This example demonstrates the use of fasttext for text classification Based on Joulin e ...

  5. CNN中下一层Feature map大小计算

    符号表示: $W$:表示当前层Feature map的大小. $K$:表示kernel的大小. $S$:表示Stride的大小. 具体来讲: 整体说来,和下一层Feature map大小最为密切的就是 ...

  6. keras启用tensorboard

    在callback函数中添加tensorboard,启用tensorboard. # TensorBoard callback tensorboard_cb = K.callbacks.TensorB ...

  7. Keras框架下使用CNN进行CIFAR-10的识别测试

    有手册,然后代码不知道看一下:https://keras-cn.readthedocs.io/en/latest/ 首先是下载数据集,下载太慢了就从网盘上下载: 链接:https://pan.baid ...

  8. pytorch 中LSTM模型获取最后一层的输出结果,单向或双向

    单向LSTM import torch.nn as nn import torch seq_len = 20 batch_size = 64 embedding_dim = 100 num_embed ...

  9. caffe网络中屏蔽某一层的输出Silence层

    屏蔽label输出 layer { name: "silence0" type: "Silence" bottom: "label" pha ...

随机推荐

  1. 【机器学习】--GBDT算法从初始到应用

    一.前述 提升是一种机器学习技术,可以用于回归和分类的问题,它每一步产生弱预测模型(如决策树),并加权累加到总模型中:如果每一步的弱预测模型的生成都是依据损失函数的梯度方式的,那么就称为梯度提升(Gr ...

  2. 【机器学习】--模型评估指标之混淆矩阵,ROC曲线和AUC面积

    一.前述 怎么样对训练出来的模型进行评估是有一定指标的,本文就相关指标做一个总结. 二.具体 1.混淆矩阵 混淆矩阵如图:  第一个参数true,false是指预测的正确性.  第二个参数true,p ...

  3. AutoStartUtil【打开自启动设置界面】

    参考资料 Android6.0 打开自启动管理页面(华为.小米) Android打开自启动设置页面 Android 机型设置自启动的跳转界面 代码 注意:需要搭配<RomUtil[Android ...

  4. java~lambda表达式让查询更优雅

    在java之前的版本里,如果希望从集合时查找符合条件的数据,如果先遍历他,这种写法是我们不能接受的,所以现在java有了lambda就很好的解决了这个问题,让代码更优雅一些! /** * lambda ...

  5. 前端笔记之服务器&Ajax(上)服务器&PHP&数据交互&HTTP

    一.服务器 1.1 什么是服务器,做什么的? 服务器,就是放在机房中的电脑,和我们的电脑的区别在与服务器有固定的IP,服务器的安全性和稳定性相当的高;性能一般就可以了,但是CPU的性能要比普通的客户机 ...

  6. Python:黑板课爬虫闯关第五关

    第五关是最后一关了,至此之后黑板课就没有更新过关卡了. 第五关地址:http://www.heibanke.com/lesson/crawler_ex04/ 可以看到,是在第三关的基础上加了验证码. ...

  7. 【ASP.NET Core快速入门】(三)准备CentOS和Nginx环境

    基本软件 VMware虚拟机 centos:http://isoredirect.centos.org/centos/7/isos/x86_64/CentOS-7-x86_64-Minimal-170 ...

  8. Asp.Net进程外Session(状态服务器Session、数据库Session)

    介绍 我们知道,当浏览器关闭,或者网站重启的时候,会话就结束了.即Seesion就丢失了.(当Web.config配置文件改动,哪怕什么内容都不加,仅仅往配置文件中加一个空格都是改we.config变 ...

  9. python异常处理的哲学

    所谓异常指的是程序的执行出现了非预期行为,就好比现实中的做一件事过程中总会出现一些意外的事.异常的处理是跨越编程语言的,和具体的编程细节相比,程序执行异常的处理更像是哲学.限于认知能力和经验所限,不可 ...

  10. 校园生活app结对开发第二天

    昨天进行了android studio的安装与配置遇到很多问题,在gradel处遇到很多问题,安装版本错误等等,在百度和书籍的帮助下成功安装 今天要做登陆界面开发