k-近鄰算法python代碼實現(非常全)


1、k近鄰算法是學習機器學習算法最為經典和簡單的算法,它是機器學習算法入門最好的算法之一,可以非常好並且快速地理解機器學習的算法的框架與應用。它是一種經典簡單的分類算法,當然也可以用來解決回歸問題。
2、kNN機器學習算法具有以下的特點:
(1)思想極度簡單
(2)應用的數學知識非常少
(3)解決相關問題的效果非常好
(4)可以解釋機器學習算法使用過程中的很多細節問題
(5)更加完整地刻畫機器學習應用的流程
3、KNN算法pyhton代碼實現如下:

(1)解決分類問題的代碼如下:
#1-1輸入任意的自定義數據集來進行相關的驗證
import numpy as np
import matplotlib.pyplot as plt #導入相應的數據可視化模塊
raw_data_X=[[3.393533211,2.331273381],
[3.110073483,1.781539638],
[1.343808831,3.368360954],
[3.582294042,4.679179110],
[2.280362439,2.866990263],
[7.423436942,4.696522875],
[5.745051997,3.533989803],
[9.172168622,2.511101045],
[7.792783481,3.424088941],
[7.939820817,0.791637231]]
raw_data_Y=[0,0,0,0,0,1,1,1,1,1]
print(raw_data_X)
print(raw_data_Y)
x_train=np.array(raw_data_X)
y_train=np.array(raw_data_Y) #數據的預處理,需要將其先轉換為矩陣,並且作為訓練數據集
print(x_train)
print(y_train)
plt.figure(1)
plt.scatter(x_train[y_train==0,1],x_train[y_train==0,0],color="g")
plt.scatter(x_train[y_train==1,0],x_train[y_train==1,1],color="r") #將其散點圖輸出
x=np.array([8.093607318,3.365731514]) #定義一個新的點,需要判斷它到底屬於哪一類數據類型
plt.scatter(x[0],x[1],color="b") #在算點圖上輸出這個散點,看它在整體散點圖的分布情況
#kNN機器算法的使用
from math import sqrt
distance=[]
for x_train in x_train:
d=sqrt(np.sum((x_train-x)**2))
distance.append(d)
print(distance)
d1=np.argsort(distance) #輸出distance排序的索引值
print(d1)
k=6
n_k=[y_train[(d1[i])] for i in range(0,k)]
print(n_k)
from collections import Counter #導入Counter模塊
c=Counter(n_k).most_common(1)[0][0] #Counter模塊用來輸出一個列表中元素的個數,輸出的形式為列表,其里面的元素為不同的元組
#另外的話對於Counter模塊它有.most_common(x)可以輸出統計數字出現最多的前x個元組,其中元組的key是其元素值,后面的值是出現次數
y_predict=c
print(y_predict)
plt.show() #輸出點的個數
#在scikitlearn中調用KNN算法的操作步驟
from sklearn.neighbors import KNeighborsClassifier
KNN_classifier=KNeighborsClassifier(n_neighbors=6)
raw_data_X=[[3.393533211,2.331273381],
[3.110073483,1.781539638],
[1.343808831,3.368360954],
[3.582294042,4.679179110],
[2.280362439,2.866990263],
[7.423436942,4.696522875],
[5.745051997,3.533989803],
[9.172168622,2.511101045],
[7.792783481,3.424088941],
[7.939820817,0.791637231]]
raw_data_Y=[0,0,0,0,0,1,1,1,1,1]
print(raw_data_X)
print(raw_data_Y)
x_train=np.array(raw_data_X)
y_train=np.array(raw_data_Y)
print(x_train)
print(y_train)
KNN_classifier.fit(x_train,y_train)
print(x)
x=x.reshape(1,-1)
print(KNN_classifier.predict(x))
test_data1=[[3.93533211,2.33127381],
[3.10073483,1.78159638],
[1.34808831,3.36830954],
[3.58294042,4.67919110],
[2.28032439,2.86690263],
[7.42343942,4.69652875],
[5.74505997,3.53399803],
[9.17216622,2.51101045],
[7.79278481,3.42488941],
[7.93982087,0.79637231]]
test_data=np.array(test_data1)
test_target=[0,0,0,0,1,1,0,0,0,0]
y_pred=KNN_classifier.predict(test_data)
from sklearn import metrics #引入機器學習的驗證模塊
print(metrics.accuracy_score(y_true=test_target,y_pred=y_pred)) #輸出整體預測結果的准確率,其中第三個參數normalize=False表示輸出結果預測正確的個數
print(metrics.confusion_matrix(y_true=test_target,y_pred=y_pred)) #輸出混淆矩陣,如果為對角陣,則表示預測結果是正確的,准確度越大


#1-2利用scikitlearn自帶的iris數據集進行相關的訓練
import numpy as np
import pandas as pd
#引入原始數據,進行數據的預處理
from sklearn.datasets import load_iris #導入iris原始數據集合
iris=load_iris()
print(iris)
print(len(iris["data"]))
from sklearn.model_selection import train_test_split #引入數據訓練與檢驗模塊
train_data,test_data, train_target, test_target=train_test_split(iris.data,iris.target,test_size=0.1,random_state=1)
#建立數據的模型和相應的決策樹結構
from sklearn.neighbors import KNeighborsClassifier
KNN_classifier=KNeighborsClassifier(n_neighbors=6)
KNN_classifier.fit(train_data,train_target) #進行原始數據的訓練
y_pred=KNN_classifier.predict(test_data) #進行數據集的測試

#數據驗證
from sklearn import metrics #引入機器學習的驗證模塊
print(metrics.accuracy_score(y_true=test_target,y_pred=y_pred)) #輸出整體預測結果的准確率,其中第三個參數normalize=False表示輸出結果預測正確的個數
print(metrics.confusion_matrix(y_true=test_target,y_pred=y_pred)) #輸出混淆矩陣,如果為對角陣,則表示預測結果是正確的,准確度越大


#1-3利用scikitlearn自帶的手寫字體digits數據集進行相關的訓練
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
import matplotlib
digits=datasets.load_digits() #導入手寫字體數據集
print(digits.keys())
x=digits.data
print(x.shape)
y=digits.target
print(y.shape)
print(y[:100])
print(x[:10])
x1=x[666].reshape(8,8)
print(x1)
plt.imshow(x1,cmap=matplotlib.cm.binary)
plt.show()
print(y[666])
from sklearn.model_selection import train_test_split #引入數據訓練與檢驗模塊
x_train,x_test, y_train, y_test=train_test_split(digits.data,digits.target,test_size=0.1,random_state=0)
#建立數據的模型和相應的KNNs算法結構
from sklearn.neighbors import KNeighborsClassifier
KNN=KNeighborsClassifier(n_neighbors=3)
KNN_classifier.fit(x_train,y_train) #進行原始數據的訓練
y_pred=KNN_classifier.predict(x_test) #進行數據集的測試
print(y_pred)
print(KNN_classifier.score(x_test,y_test)) #直接輸出相應的准確度
#1-5數據驗證
from sklearn import metrics #引入機器學習的驗證模塊
print(metrics.accuracy_score(y_true=y_test,y_pred=y_pred)) #輸出整體預測結果的准確率,其中第三個參數normalize=False表示輸出結果預測正確的個數
print(metrics.confusion_matrix(y_true=y_test,y_pred=y_pred)) #輸出混淆矩陣,如果為對角陣,則表示預測結果是正確的,准確度越大
from sklearn.model_selection import train_test_split #引入數據訓練與檢驗模塊
x_train,x_test, y_train, y_test=train_test_split(digits.data,digits.target,test_size=0.2,random_state=0)
#建立數據的模型和相應的KNNs算法結構

#1-6對於KNN算法尋找最佳的超參數k的值以及另外一個超參數distances,以及在distance的情況下選擇出最佳的超參數p的值的大小
best_method=""
best_score=0.0
best_k=0
s=[]
from sklearn.neighbors import KNeighborsClassifier
for method in ["uniform","distance"]:
for k in range(1,11):
KNN=KNeighborsClassifier(n_neighbors=k,weights=method)
KNN.fit(x_train,y_train) #進行原始數據的訓練
score=KNN.score(x_test,y_test) #直接輸出相應的准確度
s.append(score)
if score>best_score:
best_score=score
best_k=k
best_method=method
#數據驗證
print("best_method=",best_method)
print("best_k=",best_k)
print("best_score=",best_score)
plt.figure(2)
x=[i for i in range(1,21)]
plt.plot(x,s,"r")
plt.show()

best_p=0
best_score=0.0
best_k=0
s=[]
from sklearn.neighbors import KNeighborsClassifier
for k in range(1,11):
for p in range(1,6):
KNN=KNeighborsClassifier(n_neighbors=k,weights="distance",p=p)
KNN.fit(x_train,y_train) #進行原始數據的訓練
score=KNN.score(x_test,y_test) #直接輸出相應的准確度
s.append(score)
if score>best_score:
best_score=score
best_k=k
best_p=p
#數據驗證
print("best_p=",best_p)
print("best_k=",best_k)
print("best_score=",best_score)
plt.figure(2)
s1=[]
x=[i for i in range(1,6)]
for i in range(1,11):
s1=s[(i*5-5):(5*i)]
plt.plot(x,s1,label=i)
plt.legend(loc=2)
plt.show()

#1-7使用scikitlearn中的gridsearch來進行機器學習算法的超參數的最佳網格搜索方式
param_grid=[{
"weights":["uniform"],
"n_neighbors":[i for i in range(1,11)]
},
{"weights":["distance"],
"n_neighbors":[i for i in range(1,11)],
"p":[i for i in range(1,6)]
}
] #定義機器學習算法的不同超參數組合,使用字典的方式,二對於具體的超參數采用列表的數據結構
knn_clf=KNeighborsClassifier()
from sklearn.model_selection import GridSearchCV
grid_search=GridSearchCV(knn_clf,param_grid,n_jobs=-1,verbose=2)
grid_search.fit(x_train,y_train)
print(grid_search.best_estimator_)
print(grid_search.best_params_)
print(grid_search.best_score_)


#1-8 Scaler數據歸一化處理
import numpy as np
from sklearn import datasets
iris=datasets.load_iris()
x=iris.data
y=iris.target
print(x[:10])
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=666)

#1-8-1對於x_train利用均值方差進行歸一化處理
from sklearn.preprocessing import StandardScaler
standardscaler=StandardScaler()
standardscaler.fit(x_train)
print(standardscaler.mean_) #平均值向量
print(standardscaler.scale_) #標准差向量
print(standardscaler.transform(x_train))
x_train=standardscaler.transform(x_train)
print(x_train)
x_test_standard=standardscaler.transform(x_test)
from sklearn.neighbors import KNeighborsClassifier
knn=KNeighborsClassifier(n_neighbors=3)
knn.fit(x_train,y_train)
print(knn.score(x_test_standard,y_test))

#1-8-2對於x_train利用均值歸一化進行歸一化處理
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=666)
from sklearn.preprocessing import MinMaxScaler
standardscaler1=MinMaxScaler()
standardscaler1.fit(x_train)
x_train=standardscaler1.transform(x_train)
print(x_train)
x_test_standard1=standardscaler1.transform(x_test)
from sklearn.neighbors import KNeighborsClassifier
knn=KNeighborsClassifier(n_neighbors=3)
knn.fit(x_train,y_train)
print(x_test_standard1)
print(knn.score(x_test_standard1,y_test))
(2)解決回歸問題的代碼如下:
#1-1使用KNN算法的回歸算法對數據進行訓練和預測
import numpy as np
import matplotlib.pyplot as plt #導入相應的數據可視化模塊
from sklearn import datasets
d=datasets.load_boston()
print(d.data)
print(d.DESCR)
print(d.feature_names)
print(d.data[:,5])
x=d.data[d.target<50]
y=d.target[d.target<50]
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,random_state=666)
from sklearn.neighbors import KNeighborsRegressor
knn=KNeighborsRegressor()
knn.fit(x_train,y_train)
y_pre=knn.predict(x_test)
print(knn.score(x_test,y_test))
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score #直接調用庫函數進行輸出R2
print(mean_squared_error(y_test,y_pre))
print(mean_absolute_error(y_test,y_pre))
#1-2利用網格搜搜尋找最優超參數組合
param=[{"n_neighbors":[i for i in range(1,11)],
"weights":["uniform"],
},
{
"weights":["distance"],
"n_neighbors":[i for i in range(1,11)],
"p":[j for j in range(1,6)]
}
]
from sklearn.model_selection import GridSearchCV #利用網格搜索的方法對KNN算法求取最佳的超參數組合
knn1=KNeighborsRegressor()
grid1=GridSearchCV(knn1,param,n_jobs=-1,verbose=2)
grid1.fit(x_train,y_train)
print(grid1.best_params_)
print(grid1.best_estimator_)
print(grid1.best_estimator_.score(x_test,y_test))
k=grid1.best_estimator_
print(k.predict(x_test))
print(k.score(x_test,y_test))
最終實現效果如下所示;

 
        
 










免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM