python實現KNN算法的全體流程代碼
#1-1KNN算法的原理底層代碼
import numpy as np
import matplotlib.pyplot as plt #導入相應的數據可視化模塊
raw_data_X=[[3.393533211,2.331273381],
[3.110073483,1.781539638],
[1.343808831,3.368360954],
[3.582294042,4.679179110],
[2.280362439,2.866990263],
[7.423436942,4.696522875],
[5.745051997,3.533989803],
[9.172168622,2.511101045],
[7.792783481,3.424088941],
[7.939820817,0.791637231]]
raw_data_Y=[0,0,0,0,0,1,1,1,1,1]
print(raw_data_X)
print(raw_data_Y)
x_train=np.array(raw_data_X)
y_train=np.array(raw_data_Y) #數據的預處理,需要將其先轉換為矩陣,並且作為訓練數據集
print(x_train.ndim)
print(y_train.ndim)
print(x_train)
print(y_train)
plt.figure(1)
plt.scatter(x_train[y_train==0,0],x_train[y_train==0,1],color="g")
plt.scatter(x_train[y_train==1,0],x_train[y_train==1,1],color="r") #將其散點圖輸出
x=np.array([8.093607318,3.365731514]) #定義一個新的點,需要判斷它到底屬於哪一類數據類型
plt.scatter(x[0],x[1],color="b") #在算點圖上輸出這個散點,看它在整體散點圖的分布情況
#kNN機器算法的使用
from math import sqrt
distance=[]
for x_train in x_train:
d=sqrt(np.sum((x_train-x)**2))
distance.append(d)
print(distance)
d1=np.argsort(distance) #輸出distance排序的索引值
print(d1)
k=6
n_k=[y_train[(d1[i])] for i in range(0,k)]
print(n_k)
from collections import Counter #導入Counter模塊
c=Counter(n_k).most_common(1)[0][0] #Counter模塊用來輸出一個列表中元素的個數,輸出的形式為列表,其里面的元素為不同的元組
#另外的話對於Counter模塊它有.most_common(x)可以輸出統計數字出現最多的前x個元組,其中元組的key是其元素值,后面的值是出現次數
print(Counter(n_k))
y_predict=c
print(y_predict)
plt.show() #輸出點的個數
#1-2KNN算法在scikitlearn中的調用
import matplotlib
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
iris=datasets.load_iris() #鳶尾花數據集(150,4)
x=iris.data
y=iris.target
data=datasets.load_digits() #手寫字體識別的數據集(1797,64),8x8的像素點數據,0-16之間表示灰度
x=data.data
y=data.target
print(x.shape)
print(y.shape)
shuffle_index=np.random.permutation(len(x)) #對索引進行隨機打亂
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=666)
knn_classifier=KNeighborsClassifier(n_neighbors=4)
knn_classifier.fit(x_train,y_train)
y_predict=knn_classifier.predict(x_test)
p=(sum(knn_classifier.predict(x_test)==y_test)/len(x_test))*100
print("准確度為:%d"% (p)+"%")
#輸出1個數字字體的實例
s=x[666]
s=s.reshape(8,8)
plt.imshow(s,cmap=matplotlib.cm.binary)
plt.show()
from sklearn import metrics
print(metrics.accuracy_score(y_test,y_predict)) #輸出准確度
print(metrics.accuracy_score(y_test,y_predict,normalize=False)) #輸出准確的預測個數
print(metrics.confusion_matrix(y_test,y_predict)) #輸出混淆矩陣的大小
print(knn_classifier.score(x_test,y_test))
#超參數調節尋找最好的K值,weights=["uniform","distance"],p=0-10
#采用for循環來進行尋找最好的超參數k與p
best_score=0.0
best_k=1
best_p=0
for k in range(1,11):
for p in range(1,6):
knn=KNeighborsClassifier(n_neighbors=k,weights="distance",p=p)
knn.fit(x_train,y_train)
score=knn.score(x_test,y_test)
if score>best_score:
best_score=score
best_k=k
best_p=p
print("best_k=%d" % k)
print("best_score=",best_score)
print("best_p=",p)
#網格搜索方法尋找最優的超參數,它采用的評價指標是預測准確度,采用的方式是交叉驗證CV
#導入scikitlearn中的網格搜索函數GridSearchCV
param_grid=[
{
"weights":["uniform"],
"n_neighbors":[i for i in range(1,11)]
},
{
"weights":["distance"],
"n_neighbors":[i for i in range(1,11)],
"p":[i for i in range(1,5)]
}
]
k=KNeighborsClassifier()
from sklearn.model_selection import GridSearchCV
#定義相應網格搜索方式(輸入初始化參數:1機器學習算法、2超參數組合列表、3n_jobs(選擇並行內核數目,-1表示全部是用),4verbose=2表示輸出相應搜索過程)
grid_search=GridSearchCV(k,param_grid,n_jobs=-1)
grid_search.fit(x_train,y_train)
print(grid_search.best_estimator_)
print(grid_search.best_params_) #輸出最好的超參數組合
print(grid_search.best_score_) #輸出最好的模型的時候的准確度
knn_best=grid_search.best_estimator_ #定義出最好的分類器
y_pre=knn_best.predict(x_test)
print(y_pre)
print(knn_best.score(x_test,y_test))
#數據歸一化,將數據映射到同一個尺度,降低數據引起的偏差
#最值歸一化,收到outline影響比較大,有明顯邊界,成績等
#均值方差歸一化:均值為0,方差為1,數據沒有明顯邊界,有可能存在極端的數據值,收入等
#最值歸一化
import random
x=np.random.randint(0,100,size=100)
x=(x-np.min(x))/(np.max(x)-np.min(x))
print(x)
x=np.random.randint(0,100,size=(50,2))
x=np.array(x,dtype=float)
x[:,0]=(x[:,0]-np.min(x[:,0]))/(np.max(x[:,0]-np.min(x[:,0]))) #將0列第一個特征數據進行最值歸一化處理
print(x[:,0])
print(np.mean(x[:,0]))
print(np.std(x[:,0]))
#均值方差歸一化函數
x[:,1]=(x[:,1]-np.mean(x[:,1]))/np.std(x[:,1])
print(x[:,1])
print(np.mean(x[:,1]))
print(np.std(x[:,1]))
#數據歸一化的使用
#scikit-learn中的StandardScaler均值方差歸一化
from sklearn import datasets
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
iris=datasets.load_iris() #鳶尾花數據集(150,4)
x=iris.data
y=iris.target
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=666)
from sklearn.preprocessing import StandardScaler
s=StandardScaler()
s.fit(x_train)
print(s.mean_)
print(s.scale_)
#將兩個數據集進行歸一化處理
x_train=s.transform(x_train)
x_test=s.transform(x_test)
#使用歸一化的數據集進行模型訓練
k=KNeighborsClassifier(n_neighbors=3)
k.fit(x_train,y_train)
print(k.predict(x_test))
print(k.score(x_test,y_test))
#scikit-learn中的MinMaxScaler最值歸一化
from sklearn.preprocessing import MinMaxScaler
s=MinMaxScaler()
s.fit(x_train)
#將兩個數據集進行歸一化處理
x_train=s.transform(x_train)
x_test=s.transform(x_test)
#使用歸一化的數據集進行模型訓練
k=KNeighborsClassifier(n_neighbors=3)
k.fit(x_train,y_train)
print(k.predict(x_test))
print(k.score(x_test,y_test))
實現效果如下所示: