scikit-learn機器學習(二)邏輯回歸進行二分類(垃圾郵件分類),二分類性能指標,畫ROC曲線,計算acc,recall,presicion,f1


數據來自UCI機器學習倉庫中的垃圾信息數據集

數據可從http://archive.ics.uci.edu/ml/datasets/sms+spam+collection下載

 

轉成csv載入數據

import matplotlib
matplotlib.rcParams['font.sans-serif']=[u'simHei']
matplotlib.rcParams['axes.unicode_minus']=False
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.model_selection import train_test_split,cross_val_score

df = pd.read_csv('data/SMSSpamCollection.csv',header=None)
print(df.head)

print("垃圾郵件個數:%s" % df[df[0]=='spam'][0].count())
print("正常郵件個數:%s" % df[df[0]=='ham'][0].count())

垃圾郵件個數:747
正常郵件個數:4825

 

創建TfidfVectorizer實例,將訓練文本和測試文本都進行轉換

X = df[1].values
y = df[0].values
X_train_raw,X_test_raw,y_train,y_test=train_test_split(X,y)
vectorizer = TfidfVectorizer()
X_train = vectorizer.fit_transform(X_train_raw)
X_test = vectorizer.transform(X_test_raw)

 

建立邏輯回歸模型訓練和預測

LR = LogisticRegression()
LR.fit(X_train,y_train)
predictions = LR.predict(X_test)
for i,prediction in enumerate(predictions[:5]):
    print("預測為 %s ,信件為 %s" % (prediction,X_test_raw[i]))

 

預測為 ham ,信件為 Send to someone else :-)
預測為 ham ,信件為 Easy ah?sen got selected means its good..
預測為 ham ,信件為 Sorry da. I gone mad so many pending works what to do.
預測為 ham ,信件為 What not under standing.
預測為 spam ,信件為 SIX chances to win CASH! From 100 to 20,000 pounds txt> CSH11 and send to 87575. Cost 150p/day, 6days, 16+ TsandCs apply Reply HL 4 info

 

二元分類性能指標:混淆矩陣

# In[2]二元分類分類指標
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
# predictions 與 y_test
confusion_matrix = confusion_matrix(y_test,predictions)
print(confusion_matrix)
plt.matshow(confusion_matrix)
plt.title("混淆矩陣")
plt.colorbar()
plt.ylabel("真實值")
plt.xlabel("預測值")
plt.show()

[[1217    1]
 [  52  123]]

 

准確率,召回率,精准率,F1值

# In[3] 給出 precision    recall  f1-score   support
from sklearn.metrics import classification_report
print(classification_report(y_test,predictions))

from sklearn.metrics import roc_curve,auc
# 准確率
scores =  cross_val_score(LR,X_train,y_train,cv=5)
print("准確率為: ",scores)
print("平均准確率為: ",np.mean(scores))

# 有時必須要將標簽轉為數值
from sklearn.preprocessing import LabelEncoder
class_le = LabelEncoder()
y_train_n = class_le.fit_transform(y_train)
y_test_n = class_le.fit_transform(y_test)

# 精准率
precision =  cross_val_score(LR,X_train,y_train_n,cv=5,scoring='precision')
print("平均精准率為: ",np.mean(precision))
# 召回率
recall =  cross_val_score(LR,X_train,y_train_n,cv=5,scoring='recall')
print("平均召回率為: ",np.mean(recall))   
# F1值
f1 =  cross_val_score(LR,X_train,y_train_n,cv=5,scoring='f1')
print("平均F1值為: ",np.mean(f1))  
准確率為:  [0.96654719 0.95459976 0.95449102 0.9508982  0.96047904]
平均准確率為:  0.9574030433756144
平均精准率為:  0.9906631114805584
平均召回率為:  0.6956979405034325
平均F1值為:  0.8162874707978786

 

畫出ROC曲線,AUC為ROC曲線以下部分的面積

# In[4] ROC曲線 y_test_n為數值
predictions_pro = LR.predict_proba(X_test)
false_positive_rate, recall, thresholds = roc_curve(y_test_n,predictions_pro[:,1])
roc_auc = auc(false_positive_rate, recall)
plt.title("受試者操作特征曲線(ROC)")
plt.plot(false_positive_rate, recall, 'b', label='AUC = % 0.2f' % roc_auc)
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('假陽性率')
plt.ylabel('召回率')
plt.show()
    

 

 所有代碼:

# -*- coding: utf-8 -*-
import matplotlib
matplotlib.rcParams['font.sans-serif']=[u'simHei']
matplotlib.rcParams['axes.unicode_minus']=False
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.model_selection import train_test_split,cross_val_score

df = pd.read_csv('data/SMSSpamCollection.csv',header=None)
print(df.head)

print("垃圾郵件個數:%s" % df[df[0]=='spam'][0].count())
print("正常郵件個數:%s" % df[df[0]=='ham'][0].count())

# In[1]
X = df[1].values
y = df[0].values
X_train_raw,X_test_raw,y_train,y_test=train_test_split(X,y)
vectorizer = TfidfVectorizer()
X_train = vectorizer.fit_transform(X_train_raw)
X_test = vectorizer.transform(X_test_raw)

LR = LogisticRegression()
LR.fit(X_train,y_train)
predictions = LR.predict(X_test)
for i,prediction in enumerate(predictions[:5]):
    print("預測為 %s ,信件為 %s" % (prediction,X_test_raw[i]))
    


# In[2]二元分類分類指標
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
# predictions 與 y_test
confusion_matrix = confusion_matrix(y_test,predictions)
print(confusion_matrix)
plt.matshow(confusion_matrix)
plt.title("混淆矩陣")
plt.colorbar()
plt.ylabel("真實值")
plt.xlabel("預測值")
plt.show()

# In[3] 給出 precision    recall  f1-score   support
from sklearn.metrics import classification_report
print(classification_report(y_test,predictions))

from sklearn.metrics import roc_curve,auc
# 准確率
scores =  cross_val_score(LR,X_train,y_train,cv=5)
print("准確率為: ",scores)
print("平均准確率為: ",np.mean(scores))

# 必須要將標簽轉為數值
from sklearn.preprocessing import LabelEncoder
class_le = LabelEncoder()
y_train_n = class_le.fit_transform(y_train)
y_test_n = class_le.fit_transform(y_test)

# 精准率
precision =  cross_val_score(LR,X_train,y_train_n,cv=5,scoring='precision')
print("平均精准率為: ",np.mean(precision))
# 召回率
recall =  cross_val_score(LR,X_train,y_train_n,cv=5,scoring='recall')
print("平均召回率為: ",np.mean(recall))   
# F1值
f1 =  cross_val_score(LR,X_train,y_train_n,cv=5,scoring='f1')
print("平均F1值為: ",np.mean(f1))  

# In[4] ROC曲線 y_test_n為數值
predictions_pro = LR.predict_proba(X_test)
false_positive_rate, recall, thresholds = roc_curve(y_test_n,predictions_pro[:,1])
roc_auc = auc(false_positive_rate, recall)
plt.title("受試者操作特征曲線(ROC)")
plt.plot(false_positive_rate, recall, 'b', label='AUC = % 0.2f' % roc_auc)
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('假陽性率')
plt.ylabel('召回率')
plt.show() 

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM