from __future__ import print_function
import numpy as np
np.random.seed(1337)
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
batch_size = 128
nb_classes = 10
nb_epoch = 12
# 輸入圖像的維度,此處是mnist圖像,因此是28*28
img_rows, img_cols = 28, 28
# 卷積層中使用的卷積核的個數
nb_filters = 32
# 池化層操作的范圍
pool_size = (2,2)
# 卷積核的大小
kernel_size = (3,3)
# keras中的mnist數據集已經被划分成了60,000個訓練集,10,000個測試集的形式,按以下格式調用即可
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# 后端使用tensorflow時,即tf模式下,
# 會將100張RGB三通道的16*32彩色圖表示為(100,16,32,3),
# 第一個維度是樣本維,表示樣本的數目,
# 第二和第三個維度是高和寬,
# 最后一個維度是通道維,表示顏色通道數
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
# 將X_train, X_test的數據格式轉為float32
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
# 歸一化
X_train /= 255
X_test /= 255
# 打印出相關信息
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
X_train shape: (60000, 28, 28, 1)
60000 train samples
10000 test samples
# 將類別向量(從0到nb_classes的整數向量)映射為二值類別矩陣,
# 相當於將向量用one-hot重新編碼
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
# 建立序貫模型
model = Sequential()
# 卷積層,對二維輸入進行滑動窗卷積
# 當使用該層為第一層時,應提供input_shape參數,在tf模式中,通道維位於第三個位置
# border_mode:邊界模式,為"valid","same"或"full",即圖像外的邊緣點是補0
# 還是補成相同像素,或者是補1
model.add(Convolution2D(nb_filters, kernel_size[0] ,kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(Activation('relu'))
# 卷積層,激活函數是ReLu
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
model.add(Activation('relu'))
# 池化層,選用Maxpooling,給定pool_size,dropout比例為0.25
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
# Flatten層,把多維輸入進行一維化,常用在卷積層到全連接層的過渡
model.add(Flatten())
# 包含128個神經元的全連接層,激活函數為ReLu,dropout比例為0.5
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
# 包含10個神經元的輸出層,激活函數為Softmax
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
# 輸出模型的參數信息
model.summary()
# 配置模型的學習過程
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
____________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
====================================================================================================
convolution2d_3 (Convolution2D) (None, 26, 26, 32) 320 convolution2d_input_2[0][0]
____________________________________________________________________________________________________
activation_5 (Activation) (None, 26, 26, 32) 0 convolution2d_3[0][0]
____________________________________________________________________________________________________
convolution2d_4 (Convolution2D) (None, 24, 24, 32) 9248 activation_5[0][0]
____________________________________________________________________________________________________
activation_6 (Activation) (None, 24, 24, 32) 0 convolution2d_4[0][0]
____________________________________________________________________________________________________
maxpooling2d_2 (MaxPooling2D) (None, 12, 12, 32) 0 activation_6[0][0]
____________________________________________________________________________________________________
dropout_3 (Dropout) (None, 12, 12, 32) 0 maxpooling2d_2[0][0]
____________________________________________________________________________________________________
flatten_2 (Flatten) (None, 4608) 0 dropout_3[0][0]
____________________________________________________________________________________________________
dense_3 (Dense) (None, 128) 589952 flatten_2[0][0]
____________________________________________________________________________________________________
activation_7 (Activation) (None, 128) 0 dense_3[0][0]
____________________________________________________________________________________________________
dropout_4 (Dropout) (None, 128) 0 activation_7[0][0]
____________________________________________________________________________________________________
dense_4 (Dense) (None, 10) 1290 dropout_4[0][0]
____________________________________________________________________________________________________
activation_8 (Activation) (None, 10) 0 dense_4[0][0]
====================================================================================================
Total params: 600,810
Trainable params: 600,810
Non-trainable params: 0
____________________________________________________________________________________________________
# 訓練模型
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
# 按batch計算在某些輸入數據上模型的誤差
score = model.evaluate(X_test, Y_test, verbose=0)
Train on 60000 samples, validate on 10000 samples
Epoch 1/12
60000/60000 [==============================] - 18s - loss: 0.3675 - acc: 0.8886 - val_loss: 0.0877 - val_acc: 0.9722
Epoch 2/12
60000/60000 [==============================] - 13s - loss: 0.1346 - acc: 0.9598 - val_loss: 0.0623 - val_acc: 0.9802
Epoch 3/12
60000/60000 [==============================] - 13s - loss: 0.1039 - acc: 0.9691 - val_loss: 0.0527 - val_acc: 0.9837
Epoch 4/12
60000/60000 [==============================] - 13s - loss: 0.0887 - acc: 0.9736 - val_loss: 0.0462 - val_acc: 0.9849
Epoch 5/12
60000/60000 [==============================] - 13s - loss: 0.0778 - acc: 0.9763 - val_loss: 0.0420 - val_acc: 0.9860
Epoch 6/12
60000/60000 [==============================] - 13s - loss: 0.0698 - acc: 0.9794 - val_loss: 0.0383 - val_acc: 0.9871
Epoch 7/12
60000/60000 [==============================] - 14s - loss: 0.0659 - acc: 0.9802 - val_loss: 0.0374 - val_acc: 0.9868
Epoch 8/12
60000/60000 [==============================] - 14s - loss: 0.0616 - acc: 0.9818 - val_loss: 0.0385 - val_acc: 0.9877
Epoch 9/12
60000/60000 [==============================] - 14s - loss: 0.0563 - acc: 0.9829 - val_loss: 0.0338 - val_acc: 0.9881
Epoch 10/12
60000/60000 [==============================] - 14s - loss: 0.0531 - acc: 0.9845 - val_loss: 0.0320 - val_acc: 0.9889
Epoch 11/12
60000/60000 [==============================] - 13s - loss: 0.0498 - acc: 0.9855 - val_loss: 0.0323 - val_acc: 0.9890
Epoch 12/12
60000/60000 [==============================] - 14s - loss: 0.0479 - acc: 0.9852 - val_loss: 0.0329 - val_acc: 0.9892
# 輸出訓練好的模型在測試集上的表現
print('Test score:', score[0])
print('Test accuracy:', score[1])
Test score: 0.032927570413
Test accuracy: 0.9892