【作業一】林軒田機器學習基石


作業方面,暫時只關注需要編程的題目了,用python完成代碼。

Q15~Q17應用的是傳統PLA算法,給定的數據集也是保證線性可分的。

代碼需要完成的就是實現一個簡單的PLA,並且“W = W + speed*yX”中的speed是可以配置的(即學習速率)

代碼1

#encoding=utf8
import sys
import numpy as np
import math

if __name__ == '__main__':
    W = [ 0.0, 0.0, 0.0, 0.0, 0.0 ]
    halts = 0
    for line in open("train.dat").readlines():
        items = line.strip().split('\t')
        y = items[1].strip()
        X = items[0].strip().split(' ')
        X.insert(0,1)
        # gurantee the length of W and X
        if ( len(W)!=len(X) ):
            sys.exit(-1)
        # initial score 0
        score = 0.0
        # calculate W'X
        for i in range(0,len(X)):
            score = score + float(X[i]) * float(W[i])
        print "score" + str(score)
        # transfer score to sign
        sign = 1 if score>0.0 else -1
        if sign != int(y) :
            halts = halts + 1
            for i in range(0,len(X)):
                W[i] = float(W[i]) + float(y)*float(X[i])
    for w in W:
        print w
    print "halts:" + str(halts)

代碼2(隨機打亂樣本順序)

#encoding=utf8
import sys
import numpy as np
import math
from random import *

if __name__ == '__main__':
    # params
    TIMES = 2000
    sum_halts = 0
    SPEED = 0.5
    # read raw data
    raw_data = []
    for line in open("train.dat").readlines():
        raw_data.append(line.strip())
    # iteratively
    a = Random()
    for i in range(0,TIMES):
        W = [ 0.0, 0.0, 0.0, 0.0, 0.0 ]
        halts = 0
        # randomly shuffle data
        a.seed(i)
        a.shuffle(raw_data)
        # pla process
        for line in raw_data:
            items = line.strip().split('\t')
            y = items[1].strip()
            X = items[0].strip().split(' ')
            X.insert(0,1)
            # gurantee the length of W and X
            if ( len(W)!=len(X) ):
                sys.exit(-1)
            # initial score 0
            score = 0.0
            # calculate W'X
            for i in range(0,len(X)):
                score = score + float(X[i]) * float(W[i])
            # transfer score to sign
            sign = 1 if score>0.0 else -1
            if sign != int(y) :
                halts = halts + 1
                for i in range(0,len(X)):
                    W[i] = float(W[i]) + SPEED*float(y)*float(X[i])
        print "halts:" + str(halts)
        # accumulate sum of halts
        sum_halts = sum_halts + halts
    print "average halts:" + str(sum_halts/(TIMES-1))

這幾道題的可以得到的結論就是:如果更新學習的速率,打亂樣本順序,可能會對收斂的次數產生影響。

另外,還有一個細節就是:一定不要忘記加上偏執W0(即常數項截距),否則會一直保持一個誤差無法做到收斂。

==============================================

作業Q18~Q20考查的是pocket pla

即,train數據不是線性可分的情況(實際中也多是如此),改進成pocket pla的方法。

之前一直沒理解好pocket的意思,后來參考了討論區的內容,理解了Pocket的意思。

簡而言之就是,“pocket不影響pla的正常運行,每輪W該更新還是要更新;pocket只需要維護歷史出現的W中,在train_data上error最小的那個即可”

#encoding=utf8
import sys
import numpy as np
import math
from random import *

def error_on_data(data, W):
    error_W = 0
    for line in data:
        items = line.strip().split('\t')
        y = items[1].strip()
        X = items[0].strip().split(' ')
        X.insert(0,1)
        # calculate scores of W
        score_W = 0.0
        for i in range(0,len(X)): score_W = score_W + float(X[i]) * float(W[i])
        # judge W 
        sign_W = 1 if score_W>0.0 else -1
        if sign_W != int(y) : error_W = error_W + 1
    return error_W

def pocket_algorithm(train_data, r):
    best_W = [ 0, 0, 0, 0, 0 ]
    best_error = error_on_data(train_data, best_W)
    W = [ 0, 0, 0, 0, 0 ]
    rounds = 0
    while rounds<100:
        line = train_data[r.randint(0,len(train_data)-1)]
        items = line.strip().split('\t')
        y = items[1].strip()
        X = items[0].strip().split(' ')
        X.insert(0,1)
        # initial score 0
        score = 0.0
        # calculate W'X
        for i in range(0,len(X)): score = score + float(X[i]) * float(W[i])
        # wrong judgement : transfer score to sign
        sign = 1 if score>0.0 else -1
        if sign != int(y) :
            rounds = rounds + 1
            for i in range(0,len(X)): W[i] = float(W[i]) + float(y)*float(X[i])
            # update best_W
            curr_error = error_on_data(train_data,W)
            print "curr_error:" + str(curr_error) + ",best_error:" + str(best_error)
             if curr_error<best_error:
                for i in range(0,len(best_W)): best_W[i]=W[i]
                best_error = curr_error
    return best_W
    #return W

if __name__ == '__main__':
    # read raw data
    train_data = []
    for line in open("train2.dat").readlines(): train_data.append(line.strip())
    test_data = []
    for line in open("test2.dat").readlines(): test_data.append(line.strip())
    # iteratively pocket algorithm
    iterative_times = 100
    total_error_times = 0
    r = Random()
    for i in range(0,iterative_times):
        # each round initialize a random seed
        r.seed(i)
        # conduct one round pocket algorithm
        W = pocket_algorithm(train_data, r)
        # accmulate error times
        error_times = error_on_data(test_data, W)
        total_error_times = total_error_times + error_times
    print str( (1.0*total_error_times)/(iterative_times*len(test_data)) )

 這個參考資料解釋了Pocket 算法是怎么樣運行的

https://class.coursera.org/ntumlone-002/forum/thread?thread_id=79


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM