【keras】用tensorboard監視CNN每一層的輸出



from
keras.models import Sequential from keras.layers import Dense, Dropout from keras.layers import Conv1D, MaxPooling1D import scipy.io as sio import matplotlib.pyplot as plt from keras.utils import np_utils import keras import numpy as np from keras import regularizers from keras.callbacks import TensorBoard from keras.utils import plot_model from keras import backend as K from os.path import exists, join from os import makedirs batch_sizes = 256 nb_class = 10 nb_epochs = 2 log_dir = './bgbv2_log_dir' if not exists(log_dir): makedirs(log_dir) # input image dimensions img_rows, img_cols = 1, 2048 ''' 第一步 准備數據 ''' # matlab文件名 准備數據 file_name = u'G:/GANCode/CSWU/12k drive end vps/trainset/D/D_dataset.mat' original_data = sio.loadmat(file_name) X_train = original_data['x_train'] Y_train = original_data['y_train'] X_test = original_data['x_test'] Y_test = original_data['y_test'] channel = 1 X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], channel)) X_test = X_test.reshape((X_test.shape[0], X_test.shape[1], channel)) input_shape = (X_train.shape[1], channel) # 標簽打亂 permutation = np.random.permutation(Y_train.shape[0]) X_train = X_train[permutation, :, :] Y_train = Y_train[permutation] permutation = np.random.permutation(Y_test.shape[0]) X_test = X_test[permutation, :, :] Y_test = Y_test[permutation] X_train = X_train.astype('float32') # astype SET AS TYPE INTO X_test = X_test.astype('float32') #X_train = (X_train+1)/2 #X_test = (X_test+1)/2 print('x_train shape:', X_train.shape) print(X_train.shape[0], 'train samples') print(X_test.shape[0], 'test samples') X_meta = X_test.reshape((X_test.shape[0], X_test.shape[1])) kkkkk=0 # save class labels to disk to color data points in TensorBoard accordingly with open(join(log_dir, 'metadata.tsv'), 'w') as f: np.savetxt(f, Y_test[:200]) ''' 第三步 設置標簽 one-hot ''' Y_test = np_utils.to_categorical(Y_test, nb_class) # Label Y_train = np_utils.to_categorical(Y_train, nb_class) ''' 第四步 網絡model ''' model = Sequential() model.add(Conv1D(64, 11, activation='relu', input_shape=(2048, 1))) model.add(Conv1D(64, 11, activation='relu')) model.add(MaxPooling1D(3)) model.add(Conv1D(128, 11, activation='relu')) model.add(Conv1D(128, 11, activation='relu')) ''' model.add(GlobalAveragePooling1D()) model.add(Dropout(0.5)) ''' model.add(MaxPooling1D(3)) model.add(Dropout(0.25)) model.add(keras.layers.Flatten()) model.add(Dense(1000, activation='relu')) model.add(Dense(100, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) embedding_layer_names = set(layer.name for layer in model.layers if layer.name.startswith('dense_')) # https://stackoverflow.com/questions/45265436/keras-save-image-embedding-of-the-mnist-data-set model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) callbacks = [keras.callbacks.TensorBoard( log_dir='bgbv2_log_dir', embeddings_layer_names=['dense_2'], #監視某一層,就要寫某一層的名字,可以同時監視很多層,用上面的字典形式。 #embeddings_metadata='metadata.tsv', embeddings_freq=1, #histogram_freq=1, embeddings_data=X_test # 數據要和X_train保持一致。這里我用的是一維數據,(60000,2048,1)表示有6萬個樣本,每個樣本有2048個長度,且每個樣本有1個通道(1個傳感器),換成多個通道的話,就要使用多個傳感器的數據。 )] model.fit(X_train, Y_train, batch_size=batch_sizes, callbacks=callbacks, epochs=nb_epochs, verbose=1, validation_data=(X_test, Y_test)) xxasfs=1 # You can now launch tensorboard with `tensorboard --logdir=./logs` on your # command line and then go to http://localhost:6006/#projector to view the # embeddings # keras.callbacks.TensorBoard( # log_dir='./logs', # histogram_freq=0, # batch_size=32, # write_graph=True, # write_grads=False, # write_images=False, # embeddings_freq=0, # embeddings_layer_names=None, # embeddings_metadata=None, # embeddings_data=None, # update_freq='epoch')

坑死我了。

沒有人教,自己琢磨了一天。

下面就能清楚地看見我們的三維圖啦~用來寫paper和PPT都是極好的素材。

PS:任何一個圖層的輸出:

https://stackoverflow.com/questions/41711190/keras-how-to-get-the-output-of-each-layer

參考1,keras Tensorboard官方說明

https://keras.io/callbacks/#tensorboard

from __future__ import print_function

from os import makedirs
from os.path import exists, join

import keras
from keras.callbacks import TensorBoard
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K

import numpy as np

batch_size = 128
num_classes = 10
epochs = 12
log_dir = './logs'

if not exists(log_dir):
    makedirs(log_dir)

# input image dimensions
img_rows, img_cols = 28, 28

# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()

if K.image_data_format() == 'channels_first':
    x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
    x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
    input_shape = (1, img_rows, img_cols)
else:
    x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
    x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
    input_shape = (img_rows, img_cols, 1)

x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# save class labels to disk to color data points in TensorBoard accordingly
with open(join(log_dir, 'metadata.tsv'), 'w') as f:
    np.savetxt(f, y_test)

# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

tensorboard = TensorBoard(batch_size=batch_size,
                          embeddings_freq=1,
                          embeddings_layer_names=['features'],
                          embeddings_metadata='metadata.tsv',
                          embeddings_data=x_test)

model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
                 activation='relu',
                 input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu', name='features'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adadelta(),
              metrics=['accuracy'])

model.fit(x_train, y_train,
          batch_size=batch_size,
          callbacks=[tensorboard],
          epochs=epochs,
          verbose=1,
          validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])

# You can now launch tensorboard with `tensorboard --logdir=./logs` on your
# command line and then go to http://localhost:6006/#projector to view the
# embeddings

 

參考2,keras Mnist最后一層可視化。

https://keras.io/examples/tensorboard_embeddings_mnist/

參考3,IMDB影視評論最后一層可是化

import keras
from keras import layers
from keras.datasets import imdb
from keras.preprocessing import sequence
max_features = 500 # 原文為2000
max_len = 500
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
x_train = sequence.pad_sequences(x_train, maxlen=max_len)
x_test = sequence.pad_sequences(x_test, maxlen=max_len)

KK=x_train[:100].astype("float32")
MM=1

model = keras.models.Sequential()
model.add(layers.Embedding(max_features, 128, input_length=max_len, name='embed'))
model.add(layers.Conv1D(32, 7, activation='relu'))
model.add(layers.MaxPooling1D(5))
model.add(layers.Conv1D(32, 7, activation='relu'))
model.add(layers.GlobalMaxPooling1D())
model.add(layers.Dense(1))
model.summary()
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
callbacks = [keras.callbacks.TensorBoard(
                      log_dir='my_log_dir',
                      histogram_freq=1,
                      embeddings_freq=1,
                      embeddings_data=x_train[:100].astype("float32")
)]
history = model.fit(x_train, y_train,  epochs=20, batch_size=128, validation_split=0.2, callbacks=callbacks)


#補充 https://codeday.me/bug/20180924/267508.html


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM