sklearn中的朴素貝葉斯模型及其應用


1.使用朴素貝葉斯模型對iris數據集進行花分類

#高斯分布型

from sklearn.datasets import load_iris
iris = load_iris()
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()  #建立高斯分布模型
pred = gnb.fit(iris.data,iris.target)  #模型訓練
y_pred = pred.predict(iris.data)   #分類預測
print(iris.data.shape[0],(iris.target != y_pred).sum())

運行結果:

#多項式型

from sklearn import datasets
iris = datasets.load_iris()
from sklearn.naive_bayes import MultinomialNB  
gnb = MultinomialNB()   #構造多項式分布模型
pred = gnb.fit(iris.data,iris.target)  #模型訓練
y_pred = pred.predict(iris.data)   #分類預測
print(iris.data.shape[0],(iris.target != y_pred).sum())

運行結果:

#伯努利型

from sklearn import datasets
iris = datasets.load_iris()
from sklearn.naive_bayes import BernoulliNB  
gnb = BernoulliNB()   #構造伯努利模型
pred = gnb.fit(iris.data,iris.target)  #模型訓練
y_pred = pred.predict(iris.data)   #分類預測
print(iris.data.shape[0],(iris.target != y_pred).sum())

運行結果:

2.使用sklearn.model_selection.cross_val_score(),對模型進行驗證。

#高斯模型驗證

from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection  import cross_val_score
gnb = GaussianNB()
scores = cross_val_score(gnb,iris.data,iris.target,cv=10)  #對高斯模型進行驗證
print("Accuracy:%.3f"%scores.mean())

運行結果:

#多項式模型驗證

from sklearn.naive_bayes import MultinomialNB 
from sklearn.model_selection  import cross_val_score
gnb = MultinomialNB ()
scores = cross_val_score(gnb,iris.data,iris.target,cv=10)  #對多項式分布模型進行驗證
print("Accuracy:%.3f"%scores.mean())

運行結果:

#伯努利模型驗證

from sklearn.naive_bayes import BernoulliNB
from sklearn.model_selection  import cross_val_score
gnb = BernoulliNB()
scores = cross_val_score(gnb,iris.data,iris.target,cv=10)  #對伯努利模型進行驗證
print("Accuracy:%.3f"%scores.mean())

運行結果:

3. 垃圾郵件分類

數據准備:

  • 用csv讀取郵件數據,分解出郵件類別及郵件內容。
  • 對郵件內容進行預處理:去掉長度小於3的詞,去掉沒有語義的詞等

嘗試使用nltk庫:

pip install nltk

import nltk

nltk.download

不成功:就使用詞頻統計的處理方法

(由於下載nltk庫不成功5次,現將源代碼先保存為一份,故沒有運行截圖)

代碼1

import nltk
nltk.download()

text = '''ham	"Go until jurong point, crazy.. Available only in bugis n great world la e buffet... Cine there got amore wat..."'''
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
def preprocessing(text):
    #text=text.decode("utf-8")
    tokens=[word for sent in nltk.sent_tokenize(text) for word in nltk.word in nltk.word_tokenize(sent)]
    stops=stopwords.words('english')
    tokens=[token for token in tokens if token not in stops]
    tokens=[token.lower() for token in tokens if len(token)>=3]
    lmtzr= WordNetLemmatizer()
    tokens=[lmtzr.lemmatizer(token) for token in tokens]
    preprocessed_text=' '.join(tokens)
    return preprocessed_text

preprocessing(text)

  

 代碼2

import csv
file_path=r'F:\Pycharm\11.22\SMSSpamCollectionjsn.txt'
sms=open(file_path,'r',encoding='utf-8')
sms_data=[]
sms_label=[]
csv_reader=csv.reader(sms,delimiter='\t')
for line in csv_reader:
    sms_label.append(line[0])
    sms_data.append(line[1])
sms.close()
print(len(sms_label))
sms_label

  

代碼3

def preprocessing(text):
    preprocessing_text = text
    return preprocessed_text

import csv
file_path=r'F:\Pycharm\11.22\SMSSpamCollection'
sms=open(file_path,'r',encoding='utf-8')
sms_data=[]
sms_label=[]
csv_reader=csv.reader(sms,delimiter='\t')
for line in csv_reader:
    sms_label.append(line[0])
    sms_data.append(preprocessing(line[1]))
sms.close()
sms_data

  

代碼4

from sklearn.model_selection import train_test_split
x_train, x_text, y_train, y_test = train_test_split(sms_data, sms_label, test_size=0.3, random_state=0, stratify=sms_label)

x_train
x_test

from sklearn.naive_bayes import MultinomialNB
clf=MultinomialNB().fit(x_train,y_train)

  

代碼5

x_train

  

代碼6

x_test

  


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM