1.手寫數字數據集
- from sklearn.datasets import load_digits
- digits = load_digits()
from sklearn.datasets import load_digits digits = load_digits()
2.圖片數據預處理
- x:歸一化MinMaxScaler()
- y:獨熱編碼OneHotEncoder()或to_categorical
- 訓練集測試集划分
- 張量結構
import numpy as np
from sklearn.datasets import load_digits
from sklearn.preprocessing import MinMaxScaler,OneHotEncoder
digits = load_digits()
#數據處理
X_data = digits.data.astype(np.float32)
Y_data = digits.target.astype(np.float32).reshape(-1,1)#將Y_ data變為一列
scale =MinMaxScaler()#歸一化
X_data = scale.fit_transform(X_data)
print('MinMaxScaler_trans_X_data:')
print(X_data)
Y = OneHotEncoder().fit_transform(Y_data).todense()#one-hot處理
print('one-hot_Y:')
print(Y)
# 轉換為圖片的格式
X = X_data.reshape(-1,8,8,1)
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,Y,test_size=0.2,random_state=0,stratify=Y)
print(X_train.shape,X_test.shape,y_train.shape,y_test.shape)
歸一化

one-hot處理

圖片格式

划分測試集與訓練集

3.設計卷積神經網絡結構

4.模型訓練
#導入相關包 from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense,Dropout,Flatten,Conv2D,MaxPool2D #建立模型 model = Sequential() ks = (3, 3) # 第一層卷積 # 第一層輸入數據的shape要指定外,其他層的數據的shape框架會自動推導 model.add(Conv2D(filters=16, kernel_size=ks, padding='same', input_shape=X_train.shape[1:], activation='relu')) # 池化層 model.add(MaxPool2D(pool_size=(2, 2))) # 防止過擬合 model.add(Dropout(0.25)) # 第二層卷積 model.add(Conv2D(filters=32, kernel_size=ks, padding='same', activation='relu')) # 池化層 model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.25)) # 第三層卷積 model.add(Conv2D(filters=64, kernel_size=ks, padding='same', activation='relu')) # 第四層卷積 model.add(Conv2D(filters=128, kernel_size=ks, padding='same', activation='relu')) # 池化層 model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten())# 平坦層 model.add(Dense(128, activation='relu'))# 全連接層 model.add(Dropout(0.25)) model.add(Dense(10, activation='softmax'))# 激活函數softmax model.summary() model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy']) train_history = model.fit(x=X_train,y=y_train,validation_split=0.2,batch_size=300,epochs=10,verbose=2) score = model.evaluate(X_test, y_test) print(score)
訓練運行結果如下:

5.模型評價
- model.evaluate()
- 交叉表與交叉矩陣
- pandas.crosstab
- seaborn.heatmap
import seaborn as sns
import pandas as pd
results = model.evaluate(X_test, y_test)
print('評估結果:', results)
# 預測值
y_pred = model.predict_classes(X_test)
print('預測值:', y_pred[:10])
# 交叉表與交叉矩陣
y_test1 = np.argmax(y_test, axis=1).reshape(-1)
y_true = np.array(y_test1)[0]
# 交叉表查看預測數據與原數據對比
c=pd.crosstab(y_true, y_pred, rownames=['true'], colnames=['predict'])
# 交叉矩陣
y_test1 = y_test1.tolist()[0]
a = pd.crosstab(np.array(y_test1), y_pred, rownames=['Lables'], colnames=['Predict'])
df = pd.DataFrame(a)#轉換成屬dataframe
sns.heatmap(df, annot=True, cmap="RdGy", linewidths=0.2, linecolor='G')
plt.show()
交叉表預測數據與原數據

預測數據與原數據熱力圖如下

具體代碼如下:
# -*- coding:utf-8 -*-
# 班級:17軟件工程一班
# 開發人員:愛飛的大白鯊
# 開發時間:2020/6/8 10:20
# 文件名稱:手寫數據集及預處理.py
import numpy as np
from sklearn.datasets import load_digits
from sklearn.preprocessing import MinMaxScaler,OneHotEncoder
digits = load_digits()
#數據處理
X_data = digits.data.astype(np.float32)
Y_data = digits.target.astype(np.float32).reshape(-1,1)#將Y_ data變為一列
scale =MinMaxScaler()#歸一化
X_data = scale.fit_transform(X_data)
print('MinMaxScaler_trans_X_data:')
print(X_data)
Y = OneHotEncoder().fit_transform(Y_data).todense()#one-hot處理
print('one-hot_Y:')
print(Y)
# 轉換為圖片的格式
X = X_data.reshape(-1,8,8,1)
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,Y,test_size=0.2,random_state=0,stratify=Y)
print(X_train.shape,X_test.shape,y_train.shape,y_test.shape)
#導入相關包
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Dropout,Flatten,Conv2D,MaxPool2D
#建立模型
model = Sequential()
ks = (3, 3)
# 第一層卷積
# 第一層輸入數據的shape要指定外,其他層的數據的shape框架會自動推導
model.add(Conv2D(filters=16, kernel_size=ks, padding='same', input_shape=X_train.shape[1:],
activation='relu'))
# 池化層
model.add(MaxPool2D(pool_size=(2, 2)))
# 防止過擬合
model.add(Dropout(0.25))
# 第二層卷積
model.add(Conv2D(filters=32, kernel_size=ks, padding='same', activation='relu'))
# 池化層
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 第三層卷積
model.add(Conv2D(filters=64, kernel_size=ks, padding='same', activation='relu'))
# 第四層卷積
model.add(Conv2D(filters=128, kernel_size=ks, padding='same', activation='relu'))
# 池化層
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())# 平坦層
model.add(Dense(128, activation='relu'))# 全連接層
model.add(Dropout(0.25))
model.add(Dense(10, activation='softmax'))# 激活函數softmax
model.summary()
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
train_history = model.fit(x=X_train,y=y_train,validation_split=0.2,batch_size=300,epochs=10,verbose=2)
score = model.evaluate(X_test, y_test)
print(score)
train_history.history
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['FangSong'] # 指定字體
def show_train_history(train_history, train, validation):
plt.plot(train_history.history[train])
plt.plot(train_history.history[validation])
plt.ylabel('train')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
p = plt.figure(figsize=(15, 15))
a1 = p.add_subplot(2, 1, 1)
show_train_history(train_history, 'accuracy', 'val_accuracy')
plt.title("准確率")
a2 = p.add_subplot(2, 1, 2)
show_train_history(train_history, 'loss', 'val_loss')
plt.title("損失率")
plt.show()
import seaborn as sns
import pandas as pd
results = model.evaluate(X_test, y_test)
print('評估結果:', results)
# 預測值
y_pred = model.predict_classes(X_test)
print('預測值:', y_pred[:10])
# 交叉表與交叉矩陣
y_test1 = np.argmax(y_test, axis=1).reshape(-1)
y_true = np.array(y_test1)[0]
# 交叉表查看預測數據與原數據對比
c=pd.crosstab(y_true, y_pred, rownames=['true'], colnames=['predict'])
# 交叉矩陣
y_test1 = y_test1.tolist()[0]
a = pd.crosstab(np.array(y_test1), y_pred, rownames=['Lables'], colnames=['Predict'])
df = pd.DataFrame(a)#轉換成屬dataframe
sns.heatmap(df, annot=True, cmap="RdGy", linewidths=0.2, linecolor='G')
plt.show()
