# 導入手寫數據集 from sklearn.datasets import load_digits data = load_digits() print(data)

圖片數據預處理
- x:歸一化MinMaxScaler()
- y:獨熱編碼OneHotEncoder()或to_categorical
- 訓練集測試集划分
- 張量結構
"""
@author Rakers
"""
import numpy as np
# 導入手寫數據集
from sklearn.datasets import load_digits
# 圖片數據預處理 --歸一化
from sklearn.preprocessing import MinMaxScaler
# OneHotEncoder獨熱編碼
from sklearn.preprocessing import OneHotEncoder
# 切分數據集
from sklearn.model_selection import train_test_split
data = load_digits()
# x:歸一化MinMaxScaler()
X_data = data['data'].astype(np.float32)
scaler = MinMaxScaler()
X_data = scaler.fit_transform(X_data)
print("歸一化后數據:\n",X_data)
# 轉化為圖片的格式
X=X_data.reshape(-1, 8, 8, 1)
print("轉化為圖片后數據:", X.shape)
# y:獨熱編碼OneHotEncoder()
y = data['target'].astype(np.float32).reshape(-1, 1) # 將Y_data變為一列
Y = OneHotEncoder().fit_transform(y).todense() # 張量結構todense
print("Y獨熱編碼:\n", Y)
X_train,X_test,y_train,y_test = train_test_split(X, Y, test_size=0.2, random_state=0, stratify=Y)
print(X_train,X_test,y_train,y_test)
print("X_data.shape:",X_data.shape)
print("X.shape:",X.shape)


設計卷積神經網絡結構
繪制模型結構圖,設計依據。

"""
@author Rakers
"""
import numpy as np
# 導入手寫數據集
from sklearn.datasets import load_digits
# 圖片數據預處理 --歸一化
from sklearn.preprocessing import MinMaxScaler
# OneHotEncoder獨熱編碼
from sklearn.preprocessing import OneHotEncoder
# 切分數據集
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Dropout,Conv2D,MaxPool2D,Flatten
def buildModel(isPrintSummary=True, X_train=None):
"""
# 建立模型
:param isPrintSummary: 是否打印Summary信息
:return: 返回構建的模型
"""
model = Sequential()
ks = (3, 3) # 卷積核的大小
input_shape = X_train.shape[1:]
# 一層卷積,padding='same',tensorflow會對輸入自動補0
model.add(Conv2D(filters=16, kernel_size=ks, padding='same', input_shape=input_shape, activation='relu'))
# 池化層1
model.add(MaxPool2D(pool_size=(2, 2)))
# 防止過擬合,隨機丟掉連接
model.add(Dropout(0.25))
# 二層卷積
model.add(Conv2D(filters=32, kernel_size=ks, padding='same', activation='relu'))
# 池化層2
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 三層卷積
model.add(Conv2D(filters=64, kernel_size=ks, padding='same', activation='relu'))
# 四層卷積
model.add(Conv2D(filters=128, kernel_size=ks, padding='same', activation='relu'))
# 池化層3
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 平坦層
model.add(Flatten())
# 全連接層
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.25))
# 激活函數softmax
model.add(Dense(10, activation='softmax'))
if isPrintSummary:
print(model.summary())
return model
if __name__ == "__main__":
data = load_digits()
# x:歸一化MinMaxScaler()
X_data = data['data'].astype(np.float32)
scaler = MinMaxScaler()
X_data = scaler.fit_transform(X_data)
# print("歸一化后數據:\n", X_data)
# 轉化為圖片的格式
X = X_data.reshape(-1, 8, 8, 1)
# print("轉化為圖片后數據:", X.shape)
# y:獨熱編碼OneHotEncoder()
y = data['target'].astype(np.float32).reshape(-1, 1) # 將Y_data變為一列
Y = OneHotEncoder().fit_transform(y).todense() # 張量結構todense
# print("Y獨熱編碼:\n", Y)
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=0, stratify=Y)
print(X_train, X_test, y_train, y_test)
# print("X_data.shape:", X_data.shape)
# print("X.shape:", X.shape)
model = buildModel(X_train=X_train)

模型訓練
"""
@author Rakers
"""
import numpy as np
import matplotlib.pyplot as plt
# 導入手寫數據集
from sklearn.datasets import load_digits
# 圖片數據預處理 --歸一化
from sklearn.preprocessing import MinMaxScaler
# OneHotEncoder獨熱編碼
from sklearn.preprocessing import OneHotEncoder
# 切分數據集
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Dropout,Conv2D,MaxPool2D,Flatten
def buildModel(isPrintSummary=True, X_train=None):
"""
# 建立模型
:param isPrintSummary: 是否打印Summary信息
:return: 返回構建的模型
"""
model = Sequential()
ks = (3, 3) # 卷積核的大小
input_shape = X_train.shape[1:]
# 一層卷積,padding='same',tensorflow會對輸入自動補0
model.add(Conv2D(filters=16, kernel_size=ks, padding='same', input_shape=input_shape, activation='relu'))
# 池化層1
model.add(MaxPool2D(pool_size=(2, 2)))
# 防止過擬合,隨機丟掉連接
model.add(Dropout(0.25))
# 二層卷積
model.add(Conv2D(filters=32, kernel_size=ks, padding='same', activation='relu'))
# 池化層2
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 三層卷積
model.add(Conv2D(filters=64, kernel_size=ks, padding='same', activation='relu'))
# 四層卷積
model.add(Conv2D(filters=128, kernel_size=ks, padding='same', activation='relu'))
# 池化層3
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 平坦層
model.add(Flatten())
# 全連接層
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.25))
# 激活函數softmax
model.add(Dense(10, activation='softmax'))
if isPrintSummary:
print(model.summary())
return model
# 畫Train History圖
def show_train_history(train_history, train, validation):
"""
@author Rakers
:param train_history:
:param train:
:param validation:
:return:
"""
if train in train_history.history:
plt.plot(train_history.history[train])
if validation in train_history.history:
plt.plot(train_history.history[validation])
plt.title('Train History')
plt.ylabel('train')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
if __name__ == "__main__":
data = load_digits()
# x:歸一化MinMaxScaler()
X_data = data['data'].astype(np.float32)
scaler = MinMaxScaler()
X_data = scaler.fit_transform(X_data)
# print("歸一化后數據:\n", X_data)
# 轉化為圖片的格式
X = X_data.reshape(-1, 8, 8, 1)
# print("轉化為圖片后數據:", X.shape)
# y:獨熱編碼OneHotEncoder()
y = data['target'].astype(np.float32).reshape(-1, 1) # 將Y_data變為一列
Y = OneHotEncoder().fit_transform(y).todense() # 張量結構todense
# print("Y獨熱編碼:\n", Y)
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=0, stratify=Y)
print(X_train, X_test, y_train, y_test)
# print("X_data.shape:", X_data.shape)
# print("X.shape:", X.shape)
model = buildModel(X_train=X_train)
# 模型訓練
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
train_history = model.fit(x=X_train, y=y_train, validation_split=0.2, batch_size=300, epochs=10, verbose=2)
# 准確率
show_train_history(train_history, 'acc', 'val_acc')
# 損失率
show_train_history(train_history, 'loss', 'val_loss')



模型評價
- model.evaluate()
- 交叉表與交叉矩陣
- pandas.crosstab
- seaborn.heatmap
"""
@author Rakers
"""
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# 導入手寫數據集
from sklearn.datasets import load_digits
# 圖片數據預處理 --歸一化
from sklearn.preprocessing import MinMaxScaler
# OneHotEncoder獨熱編碼
from sklearn.preprocessing import OneHotEncoder
# 切分數據集
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Dropout,Conv2D,MaxPool2D,Flatten
def buildModel(isPrintSummary=True, X_train=None):
"""
# 建立模型
:param isPrintSummary: 是否打印Summary信息
:return: 返回構建的模型
"""
model = Sequential()
ks = (3, 3) # 卷積核的大小
input_shape = X_train.shape[1:]
# 一層卷積,padding='same',tensorflow會對輸入自動補0
model.add(Conv2D(filters=16, kernel_size=ks, padding='same', input_shape=input_shape, activation='relu'))
# 池化層1
model.add(MaxPool2D(pool_size=(2, 2)))
# 防止過擬合,隨機丟掉連接
model.add(Dropout(0.25))
# 二層卷積
model.add(Conv2D(filters=32, kernel_size=ks, padding='same', activation='relu'))
# 池化層2
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 三層卷積
model.add(Conv2D(filters=64, kernel_size=ks, padding='same', activation='relu'))
# 四層卷積
model.add(Conv2D(filters=128, kernel_size=ks, padding='same', activation='relu'))
# 池化層3
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 平坦層
model.add(Flatten())
# 全連接層
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.25))
# 激活函數softmax
model.add(Dense(10, activation='softmax'))
if isPrintSummary:
print(model.summary())
return model
# 畫Train History圖
def show_train_history(train_history, train, validation):
"""
@author Rakers
:param train_history:
:param train:
:param validation:
:return:
"""
if train in train_history.history:
plt.plot(train_history.history[train])
if validation in train_history.history:
plt.plot(train_history.history[validation])
plt.title('Train History')
plt.ylabel(train)
plt.xlabel('epoch')
plt.legend([train, validation], loc='upper left')
plt.show()
if __name__ == "__main__":
data = load_digits()
# x:歸一化MinMaxScaler()
X_data = data['data'].astype(np.float32)
scaler = MinMaxScaler()
X_data = scaler.fit_transform(X_data)
# print("歸一化后數據:\n", X_data)
# 轉化為圖片的格式
X = X_data.reshape(-1, 8, 8, 1)
# print("轉化為圖片后數據:", X.shape)
# y:獨熱編碼OneHotEncoder()
y = data['target'].astype(np.float32).reshape(-1, 1) # 將Y_data變為一列
Y = OneHotEncoder().fit_transform(y).todense() # 張量結構todense
# print("Y獨熱編碼:\n", Y)
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=0, stratify=Y)
print(X_train, X_test, y_train, y_test)
# print("X_data.shape:", X_data.shape)
# print("X.shape:", X.shape)
model = buildModel(X_train=X_train)
# 模型訓練
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
train_history = model.fit(x=X_train, y=y_train, validation_split=0.2, batch_size=300, epochs=10, verbose=2)
# 准確率
show_train_history(train_history, 'acc', 'val_acc')
# 損失率
show_train_history(train_history, 'loss', 'val_loss')
# 模型評價
score = model.evaluate(X_test, y_test)
print('score:', score)
# 預測值
y_pred = model.predict_classes(X_test)
print('y_pred:', y_pred[:10])
# 交叉表與交叉矩陣
y_test1 = np.argmax(y_test, axis=1).reshape(-1)
y_true = np.array(y_test1)[0]
# 交叉表查看預測數據與原數據對比
# pandas.crosstab
pd.crosstab(y_true, y_pred, rownames=['true'], colnames=['predict'])
# 交叉矩陣
# seaborn.heatmap
y_test1 = y_test1.tolist()[0]
a = pd.crosstab(np.array(y_test1), y_pred, rownames=['Lables'], colnames=['Predict'])
# 轉換成屬dataframe
df = pd.DataFrame(a)
sns.heatmap(df, annot=True, cmap="Reds", linewidths=0.2, linecolor='G')
plt.show()


