吳裕雄 python 機器學習——半監督學習LabelSpreading模型


import numpy as np
import matplotlib.pyplot as plt

from sklearn import  metrics
from sklearn import datasets
from sklearn.semi_supervised.label_propagation import LabelSpreading

def load_data():
    '''
    加載數據集
    '''
    digits = datasets.load_digits()
    ######   混洗樣本 ########
    rng = np.random.RandomState(0)
    indices = np.arange(len(digits.data)) # 樣本下標集合
    rng.shuffle(indices) # 混洗樣本下標集合
    X = digits.data[indices]
    y = digits.target[indices]
    ###### 生成未標記樣本的下標集合 ####
    # 只有 10% 的樣本有標記
    n_labeled_points = int(len(y)/10) 
    # 后面 90% 的樣本未標記
    unlabeled_indices = np.arange(len(y))[n_labeled_points:] 
    return X,y,unlabeled_indices

#半監督學習LabelSpreading模型
def test_LabelSpreading(*data):
    X,y,unlabeled_indices=data
    y_train=np.copy(y) # 必須拷貝,后面要用到 y
    y_train[unlabeled_indices]=-1 # 未標記樣本的標記設定為 -1
    clf=LabelSpreading(max_iter=100,kernel='rbf',gamma=0.1)
    clf.fit(X,y_train)
    ### 獲取預測准確率
    predicted_labels = clf.transduction_[unlabeled_indices] # 預測標記
    true_labels = y[unlabeled_indices] # 真實標記
    print("Accuracy:%f"%metrics.accuracy_score(true_labels,predicted_labels))
    # 或者 print("Accuracy:%f"%clf.score(X[unlabeled_indices],true_labels))
    
# 獲取半監督分類數據集
data=load_data() 
# 調用 test_LabelSpreading
test_LabelSpreading(*data) 

def test_LabelSpreading_rbf(*data):
    '''
    測試 LabelSpreading 的 rbf 核時,預測性能隨 alpha 和 gamma 的變化
    '''
    X,y,unlabeled_indices=data
    # 必須拷貝,后面要用到 y
    y_train=np.copy(y) 
    # 未標記樣本的標記設定為 -1
    y_train[unlabeled_indices]=-1 

    fig=plt.figure()
    ax=fig.add_subplot(1,1,1)
    alphas=np.linspace(0.01,1,num=10,endpoint=True)
    gammas=np.logspace(-2,2,num=50)
    # 顏色集合,不同曲線用不同顏色
    colors=((1,0,0),(0,1,0),(0,0,1),(0.5,0.5,0),(0,0.5,0.5),(0.5,0,0.5),(0.4,0.6,0),(0.6,0.4,0),(0,0.6,0.4),(0.5,0.3,0.2)) 
    ## 訓練並繪圖
    for alpha,color in zip(alphas,colors):
        scores=[]
        for gamma in gammas:
            clf=LabelSpreading(max_iter=100,gamma=gamma,alpha=alpha,kernel='rbf')
            clf.fit(X,y_train)
            scores.append(clf.score(X[unlabeled_indices],y[unlabeled_indices]))
        ax.plot(gammas,scores,label=r"$\alpha=%s$"%alpha,color=color)

    ### 設置圖形
    ax.set_xlabel(r"$\gamma$")
    ax.set_ylabel("score")
    ax.set_xscale("log")
    ax.legend(loc="best")
    ax.set_title("LabelSpreading rbf kernel")
    plt.show()
    
# 調用 test_LabelSpreading_rbf
test_LabelSpreading_rbf(*data) 

def test_LabelSpreading_knn(*data):
    '''
   測試 LabelSpreading 的 knn 核時,預測性能隨 alpha 和 n_neighbors 的變化
    '''
    X,y,unlabeled_indices=data
    # 必須拷貝,后面要用到 y
    y_train=np.copy(y) 
    # 未標記樣本的標記設定為 -1
    y_train[unlabeled_indices]=-1 

    fig=plt.figure()
    ax=fig.add_subplot(1,1,1)
    alphas=np.linspace(0.01,1,num=10,endpoint=True)
    Ks=[1,2,3,4,5,8,10,15,20,25,30,35,40,50]
    # 顏色集合,不同曲線用不同顏色
    colors=((1,0,0),(0,1,0),(0,0,1),(0.5,0.5,0),(0,0.5,0.5),(0.5,0,0.5),(0.4,0.6,0),(0.6,0.4,0),(0,0.6,0.4),(0.5,0.3,0.2)) 
    ## 訓練並繪圖
    for alpha,color in zip(alphas,colors):
        scores=[]
        for K in Ks:
            clf=LabelSpreading(kernel='knn',max_iter=100,n_neighbors=K,alpha=alpha)
            clf.fit(X,y_train)
            scores.append(clf.score(X[unlabeled_indices],y[unlabeled_indices]))
        ax.plot(Ks,scores,label=r"$\alpha=%s$"%alpha,color=color)

    ### 設置圖形
    ax.set_xlabel(r"$k$")
    ax.set_ylabel("score")
    ax.legend(loc="best")
    ax.set_title("LabelSpreading knn kernel")
    plt.show()
    
# 調用 test_LabelSpreading_knn
test_LabelSpreading_knn(*data) 

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM