用神經網絡模型,預測紅酒質量;
后又用KNN\邏輯回歸\SVM模型試了試,准確率都差不多,神經網絡稍高。原始數據格式如下:

最后輸出如下:
神經網絡模型的預測准確率是: 0.755
KNN模型的預測准確率是:0.7275
LogicRe模型的預測准確率是:0.7325
SVM模型的預測准確率是:0.7425
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
import seaborn as sns
import matplotlib.pyplot as plt
pd.set_option('expand_frame_repr', False) # 當列太多時不換行
pd.set_option('display.max_rows', 500) # 最多顯示數據的行數
file_path = './data/wine_quality.csv'
if __name__ == '__main__':
data_df = pd.read_csv(file_path)
all_cols = data_df.columns.tolist() # 巧妙的取出了所有列名,並轉化為list
feat_cols = all_cols[:-1]
# 看看quality值各有多少個
# sns.countplot(data_df['quality']) # 這句跟下面一句等價
# sns.countplot(data=data_df, x='quality')
# plt.show()
# 對quality列進行處理,原來若干種分類變為0、1兩種分類
data_df.loc[data_df['quality'] <= 5,'quality'] = 0
data_df.loc[data_df['quality'] >= 6,'quality'] = 1
# sns.countplot(data=data_df, x='quality')
# plt.show()
X = data_df[feat_cols]
y = data_df['quality']
# 對特征值進行歸一化
scaler = MinMaxScaler()
X_process = scaler.fit_transform(X)
X_train,X_test,y_train,y_test = train_test_split(X_process,y,test_size=0.25,random_state=10)
# 神經網絡模型;隱藏層也不是越多越好;max_iter設置太小會有警告(沒達到最優),max_iter默認200;random_state設置后每次運行結果一樣。
mlp_model = MLPClassifier(hidden_layer_sizes=(100,100),max_iter=1000,activation='relu',random_state=17)
mlp_model.fit(X_train,y_train)
accuracy = mlp_model.score(X_test,y_test)
print('神經網絡模型的預測准確率是:',accuracy)
# KNN\邏輯回歸\SVM模型試試
model_dict = {
'KNN': KNeighborsClassifier(n_neighbors=3),
'LogicRe': LogisticRegression(C=1e3, solver='liblinear', multi_class='auto'),
'SVM': SVC(C=1e3, gamma='auto') # C值越小表示越強的正則化,也就是更弱復雜度;C值默認為1.0
}
for model_name,model in model_dict.items():
model.fit(X_train,y_train)
acc = model.score(X_test,y_test)
print('{}模型的預測准確率是:{}'.format(model_name,acc))
