from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import Conv1D, MaxPooling1D
import scipy.io as sio
import matplotlib.pyplot as plt
from keras.utils import np_utils
import keras
import numpy as np
from keras import regularizers
from keras.callbacks import TensorBoard
from keras.utils import plot_model
from keras import backend as K
from os.path import exists, join from os import makedirs batch_sizes = 256
nb_class = 10
nb_epochs = 2
log_dir = './bgbv2_log_dir' if not exists(log_dir):
makedirs(log_dir) # input image dimensions
img_rows, img_cols = 1, 2048
'''
第一步 准备数据
'''
# matlab文件名 准备数据
file_name = u'G:/GANCode/CSWU/12k drive end vps/trainset/D/D_dataset.mat'
original_data = sio.loadmat(file_name)
X_train = original_data['x_train']
Y_train = original_data['y_train']
X_test = original_data['x_test']
Y_test = original_data['y_test']
channel = 1 X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], channel))
X_test = X_test.reshape((X_test.shape[0], X_test.shape[1], channel))
input_shape = (X_train.shape[1], channel) # 标签打乱
permutation = np.random.permutation(Y_train.shape[0])
X_train = X_train[permutation, :, :]
Y_train = Y_train[permutation] permutation = np.random.permutation(Y_test.shape[0])
X_test = X_test[permutation, :, :]
Y_test = Y_test[permutation] X_train = X_train.astype('float32') # astype SET AS TYPE INTO
X_test = X_test.astype('float32')
#X_train = (X_train+1)/2
#X_test = (X_test+1)/2
print('x_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples') X_meta = X_test.reshape((X_test.shape[0], X_test.shape[1])) kkkkk=0 # save class labels to disk to color data points in TensorBoard accordingly
with open(join(log_dir, 'metadata.tsv'), 'w') as f:
np.savetxt(f, Y_test[:200]) '''
第三步 设置标签 one-hot
'''
Y_test = np_utils.to_categorical(Y_test, nb_class) # Label
Y_train = np_utils.to_categorical(Y_train, nb_class) '''
第四步 网络model
'''
model = Sequential()
model.add(Conv1D(64, 11, activation='relu', input_shape=(2048, 1)))
model.add(Conv1D(64, 11, activation='relu'))
model.add(MaxPooling1D(3))
model.add(Conv1D(128, 11, activation='relu'))
model.add(Conv1D(128, 11, activation='relu')) '''
model.add(GlobalAveragePooling1D())
model.add(Dropout(0.5)) '''
model.add(MaxPooling1D(3))
model.add(Dropout(0.25))
model.add(keras.layers.Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax')) embedding_layer_names = set(layer.name
for layer in model.layers
if layer.name.startswith('dense_')) # https://stackoverflow.com/questions/45265436/keras-save-image-embedding-of-the-mnist-data-set model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy']) callbacks = [keras.callbacks.TensorBoard(
log_dir='bgbv2_log_dir',
embeddings_layer_names=['dense_2'], #监视某一层,就要写某一层的名字,可以同时监视很多层,用上面的字典形式。
#embeddings_metadata='metadata.tsv',
embeddings_freq=1,
#histogram_freq=1,
embeddings_data=X_test # 数据要和X_train保持一致。这里我用的是一维数据,(60000,2048,1)表示有6万个样本,每个样本有2048个长度,且每个样本有1个通道(1个传感器),换成多个通道的话,就要使用多个传感器的数据。
)] model.fit(X_train, Y_train,
batch_size=batch_sizes,
callbacks=callbacks,
epochs=nb_epochs,
verbose=1,
validation_data=(X_test, Y_test)) xxasfs=1
# You can now launch tensorboard with `tensorboard --logdir=./logs` on your
# command line and then go to http://localhost:6006/#projector to view the
# embeddings
# keras.callbacks.TensorBoard(
# log_dir='./logs',
# histogram_freq=0,
# batch_size=32,
# write_graph=True,
# write_grads=False,
# write_images=False,
# embeddings_freq=0,
# embeddings_layer_names=None,
# embeddings_metadata=None,
# embeddings_data=None,
# update_freq='epoch')

坑死我了。

没有人教,自己琢磨了一天。

下面就能清楚地看见我们的三维图啦~用来写paper和PPT都是极好的素材。

PS:任何一个图层的输出:

https://stackoverflow.com/questions/41711190/keras-how-to-get-the-output-of-each-layer

参考1,keras Tensorboard官方说明

https://keras.io/callbacks/#tensorboard

from __future__ import print_function

from os import makedirs
from os.path import exists, join import keras
from keras.callbacks import TensorBoard
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K import numpy as np batch_size = 128
num_classes = 10
epochs = 12
log_dir = './logs' if not exists(log_dir):
makedirs(log_dir) # input image dimensions
img_rows, img_cols = 28, 28 # the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data() if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1) x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples') # save class labels to disk to color data points in TensorBoard accordingly
with open(join(log_dir, 'metadata.tsv'), 'w') as f:
np.savetxt(f, y_test) # convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes) tensorboard = TensorBoard(batch_size=batch_size,
embeddings_freq=1,
embeddings_layer_names=['features'],
embeddings_metadata='metadata.tsv',
embeddings_data=x_test) model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu', name='features'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax')) model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy']) model.fit(x_train, y_train,
batch_size=batch_size,
callbacks=[tensorboard],
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1]) # You can now launch tensorboard with `tensorboard --logdir=./logs` on your
# command line and then go to http://localhost:6006/#projector to view the
# embeddings

参考2,keras Mnist最后一层可视化。

https://keras.io/examples/tensorboard_embeddings_mnist/

参考3,IMDB影视评论最后一层可是化

import keras
from keras import layers
from keras.datasets import imdb
from keras.preprocessing import sequence
max_features = 500 # 原文为2000
max_len = 500
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
x_train = sequence.pad_sequences(x_train, maxlen=max_len)
x_test = sequence.pad_sequences(x_test, maxlen=max_len) KK=x_train[:100].astype("float32")
MM=1 model = keras.models.Sequential()
model.add(layers.Embedding(max_features, 128, input_length=max_len, name='embed'))
model.add(layers.Conv1D(32, 7, activation='relu'))
model.add(layers.MaxPooling1D(5))
model.add(layers.Conv1D(32, 7, activation='relu'))
model.add(layers.GlobalMaxPooling1D())
model.add(layers.Dense(1))
model.summary()
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
callbacks = [keras.callbacks.TensorBoard(
log_dir='my_log_dir',
histogram_freq=1,
embeddings_freq=1,
embeddings_data=x_train[:100].astype("float32")
)]
history = model.fit(x_train, y_train, epochs=20, batch_size=128, validation_split=0.2, callbacks=callbacks) #补充 https://codeday.me/bug/20180924/267508.html

【keras】用tensorboard监视CNN每一层的输出的更多相关文章

  1. Keras(四)CNN 卷积神经网络 RNN 循环神经网络 原理及实例

    CNN 卷积神经网络 卷积 池化 https://www.cnblogs.com/peng8098/p/nlp_16.html 中有介绍 以数据集MNIST构建一个卷积神经网路 from keras. ...

  2. keras与tensorboard结合使用

    使用tensorboard将keras的训练过程显示出来(动态的.直观的)是一个绝好的主意,特别是在有架设好的VPS的基础上,这篇文章就是一起来实现这个过程. 一.主要原理 keras的在训练(fit ...

  3. 【Keras案例学习】 CNN做手写字符分类(mnist_cnn )

    from __future__ import print_function import numpy as np np.random.seed(1337) from keras.datasets im ...

  4. fasttext和cnn的比较,使用keras imdb看效果——cnn要慢10倍。

    fasttext: '''This example demonstrates the use of fasttext for text classification Based on Joulin e ...

  5. CNN中下一层Feature map大小计算

    符号表示: $W$:表示当前层Feature map的大小. $K$:表示kernel的大小. $S$:表示Stride的大小. 具体来讲: 整体说来,和下一层Feature map大小最为密切的就是 ...

  6. keras启用tensorboard

    在callback函数中添加tensorboard,启用tensorboard. # TensorBoard callback tensorboard_cb = K.callbacks.TensorB ...

  7. Keras框架下使用CNN进行CIFAR-10的识别测试

    有手册,然后代码不知道看一下:https://keras-cn.readthedocs.io/en/latest/ 首先是下载数据集,下载太慢了就从网盘上下载: 链接:https://pan.baid ...

  8. pytorch 中LSTM模型获取最后一层的输出结果,单向或双向

    单向LSTM import torch.nn as nn import torch seq_len = 20 batch_size = 64 embedding_dim = 100 num_embed ...

  9. caffe网络中屏蔽某一层的输出Silence层

    屏蔽label输出 layer { name: "silence0" type: "Silence" bottom: "label" pha ...

随机推荐

  1. 内核ring buffer -- kfifo

    目前kernel的kfifo根据版本有两种形式, 早期的函数形式和现在的宏定义形式 1. 早期的(linux-3.0.56/kernel/kfifo.c) 感兴趣读者可以自己看, 源码如下: /* * ...

  2. 合肥.NET技术社区首次线下聚会全程回顾【多图】

    2019年3月16日对于合肥.NET来说是一个特别的日子,因为这是合肥.NET技术社区首次非正式线下聚会!这次聚会受场地限制(毕竟是聚餐的形式),即使换成了小椅子后,最多也只能容纳24个人,所以还有一 ...

  3. 【朝花夕拾】Android性能篇之(八)ANR篇--草稿

    1.ANR概念 2.ANR发生场景 Android开发者官网 上说到了两个原因:(1)点击按键或者触摸屏幕等输入事件在5s内没有响应:(2)10s内没有完成广播事件.如下所示: Android wil ...

  4. ACache【轻量级的开源缓存框架】

    版权声明:本文为HaiyuKing原创文章,转载请注明出处! 前言 官方介绍 ASimpleCache 是一个为android制定的 轻量级的 开源缓存框架.轻量到只有一个java文件(由十几个类精简 ...

  5. 【带着canvas去流浪(8)】碰撞

    目录 一. canvas的能力 二. 动画框架 三. 在canvas中模拟碰撞 3.1定义小球的属性 3.2 生成新的小球 3.3 帧动画绘制函数step 3.4 定义小球的update方法 3.5 ...

  6. vue项目使用MD5进行密码加盐

    首先给项目安装MD5模块:npm install --save js-md5 使用方法有两种: 使用方法1:  在需要使用的项目文件中引入MD5:import md5 from 'js-md5'; 使 ...

  7. Python编程从入门到实践笔记——操作列表

    Python编程从入门到实践笔记——操作列表 #coding=utf-8 magicians = ['alice','david','carolina'] #遍历整个列表 for magician i ...

  8. Hexo优化 | 创建sitemap站点地图并向Google提交

    前言 站点地图是一种文件,您可以通过该文件列出您网站上的网页,从而将您网站内容的组织架构告知Google和其他搜索引擎.Sitemap 可方便管理员通知搜索引擎他们网站上有哪些可供抓取的网页.搜索引擎 ...

  9. 原子操作&普通锁&读写锁

    一:原子操作CAS(compare-and-swap) 原子操作分三步:读取addr的值,和old进行比较,如果相等,则将new赋值给*addr,他能保证这三步一起执行完成,叫原子操作也就是说它不能再 ...

  10. 用v-bind:style时的问题

    今天纠结了挺久一个问题,个人习惯是在HBuilder里先写好前端样式,在放.net去测试数据,但是发现一个问题 就是一个提示框跟随鼠标移动 提示框用v-bind:style绑定一个对象 DIV就是这句 ...