預測結果為1到11中的1個
首先加載數據,訓練數據,訓練標簽,預測數據,預測標簽:
if __name__=="__main__": importTrainContentdata() importTestContentdata() importTrainlabeldata() importTestlabeldata()
traindata = [] testdata = [] trainlabel = [] testlabel = [] def importTrainContentdata(): file = 'F:/goverment/myfinalcode/train_big.csv' fo=open(file) ls=[] for line in fo: line=line.replace("\t",",") line=line.replace("\n",",") line=line.replace("\"",",") ls.append(line.split(",")) for i in ls: li=[] for j in i: if j == '': continue li.append(float(j)) traindata.append(li) def importTestContentdata(): file = 'F:/goverment/myfinalcode/test_big.csv' fo=open(file) ls=[] for line in fo: line=line.replace("\t",",") line=line.replace("\n",",") line=line.replace("\"",",") ls.append(line.split(",")) for i in ls: li=[] for j in i: if j == '': continue li.append(float(j)) testdata.append(li) #導入類別的訓練和測試數據 def importTrainlabeldata(): file = 'F:/goverment/myfinalcode/train_big_label.xls' wb = xlrd.open_workbook(file) ws = wb.sheet_by_name("Sheet1") for r in range(ws.nrows): col = [] for c in range(1): col.append(ws.cell(r, c).value) trainlabel.append(col) def importTestlabeldata(): file = 'F:/goverment/myfinalcode/test_big_label.xls' wb = xlrd.open_workbook(file) ws = wb.sheet_by_name("Sheet1") for r in range(ws.nrows): col = [] for c in range(1): col.append(ws.cell(r, c).value) testlabel.append(col)
其中訓練數據,預測數據是csv文件格式,而且是str,要轉為float並一排排放入lis,然后將所有lis放入traindata或testdata中,但csv中是以","隔開的,所以要將"\t"等都轉為",",需要利用
ls.append(line.split(","))放入ls中,但仍然是str型的,我又另外轉化成了float,后來發
現不轉化也是可以的,可能它后來會在即轉化吧。
之后運用多種分類器,調參數參考
http://scikit-learn.org/stable/supervised_learning.html#supervised-learning
然后選出盡量好的分類器,提高准確率
''' #19% from sklearn import neighbors knn=neighbors.KNeighborsClassifier(n_neighbors=75, leaf_size=51, weights='distance',p=2) knn.fit(traindata, trainlabel) predict=knn.predict(testdata) ''' ''' #這個不行 from sklearn.neural_network import MLPClassifier import numpy as np traindata = np.array(traindata)#TypeError: cannot perform reduce with flexible type traindata = traindata.astype(float) trainlabel = np.array(trainlabel) trainlabel = trainlabel.astype(float) testdata=np.array(testdata) testdata = testdata.astype(float) model=MLPClassifier(activation='relu', alpha=1e-05, batch_size='auto', beta_1=0.9, beta_2=0.999, early_stopping=False, epsilon=1e-08, hidden_layer_sizes=(5, 2), learning_rate='constant', learning_rate_init=0.001, max_iter=200, momentum=0.9, nesterovs_momentum=True, power_t=0.5, random_state=1, shuffle=True, solver='lbfgs', tol=0.0001, validation_fraction=0.1, verbose=False, warm_start=False) model.fit(traindata, trainlabel) predict = model.predict(testdata) ''' ''' #19% from sklearn.tree import DecisionTreeClassifier model=DecisionTreeClassifier(class_weight='balanced',max_features=68,splitter='best',random_state=5) model.fit(traindata, trainlabel) predict = model.predict(testdata) 這個不行 from sklearn.naive_bayes import MultinomialNB clf = MultinomialNB(alpha=0.052).fit(traindata, trainlabel) #clf.fit(traindata, trainlabel) predict=clf.predict(testdata) ''' '''17% from sklearn.svm import SVC clf = SVC(C=150,kernel='rbf', degree=51, gamma='auto',coef0=0.0,shrinking=False,probability=False,tol=0.001,cache_size=300, class_weight=None,verbose=False,max_iter=-1,decision_function_shape=None,random_state=None) clf.fit(traindata, trainlabel) predict=clf.predict(testdata) ''' '''0.5% from sklearn.naive_bayes import GaussianNB import numpy as np gnb = GaussianNB() traindata = np.array(traindata)#TypeError: cannot perform reduce with flexible type traindata = traindata.astype(float) trainlabel = np.array(trainlabel) trainlabel = trainlabel.astype(float) testdata=np.array(testdata) testdata = testdata.astype(float) predict = gnb.fit(traindata, trainlabel).predict(testdata) ''' '''16% from sklearn.naive_bayes import BernoulliNB import numpy as np gnb = BernoulliNB() traindata = np.array(traindata)#TypeError: cannot perform reduce with flexible type traindata = traindata.astype(float) trainlabel = np.array(trainlabel) trainlabel = trainlabel.astype(float) testdata=np.array(testdata) testdata = testdata.astype(float) predict = gnb.fit(traindata, trainlabel).predict(testdata) ''' from sklearn.ensemble import RandomForestClassifier forest = RandomForestClassifier(n_estimators=500,random_state=5, warm_start=False, min_impurity_decrease=0.0,min_samples_split=15) # 生成隨機森林多分類器 predict = forest.fit(traindata, trainlabel).predict(testdata)
輸出准確率,我還把預測結果輸出到txt中,方便分析。
s=len(predict) f=open('F:/goverment/myfinalcode/predict.txt', 'w') for i in range(s): f.write(str(predict[i])) f.write('\n') f.write("寫好了") f.close() k=0 print(s) for i in range(s): if testlabel[i] == predict[i]: k=k+1 print("精度為:",k*1.0/s)
接下來是輸出所有標簽的支持度
print('我要開始輸出支持度啦') attribute_proba=forest.predict_proba(testdata) #print(forest.predict_proba(testdata))#輸出各個標簽的概率 print(type(attribute_proba)) import xlwt myexcel = xlwt.Workbook() sheet = myexcel.add_sheet('sheet') si=-1 sj=-1 for i in attribute_proba: si=si+1 for j in i: sj=sj+1 sheet.write(si,sj,str(j)) sj=-1 myexcel.save("attribute_proba_small.xls")
運行結果如下:
但是這樣還不夠,我還要輸出前3個的預測結果的編號和支持度。
我開了個類attri,key用來放編號,weight則放支持度。
之后對每一條記錄的所有的預測概率(支持度)遍歷3次。每次找出概率最大的一個,挑出后把編號和
概率存好,並把這個值變為0,再尋找挑出最大的一個,循環3次。存好后輸出到excel
'''接下來輸出每組概率最大的四個的編號''' class attri: def __init__(self): self.key=0 self.weight=0.0 label=[] for i in attribute_proba: lis=[] k=0 while k<3: k=k+1 p=1 mm=0 sj=-1 for j in i: sj=sj+1 if j>mm: mm=j p=sj i[p]=0#難道是從1開始?我一開始寫了i【p-1】但debug時發現不對 a=attri() a.key=p a.weight=mm lis.append(a) label.append(lis) print('挑幾個輸出') import xlwt myexcel = xlwt.Workbook() sheet = myexcel.add_sheet('sheet') si=-2 sj=-1 for i in label: si=si+2 for j in i: sj=sj+1 sheet.write(si,sj,str(j.key)) sheet.write(si+1,sj,str(j.weight)) sj=-1 myexcel.save("proba_big.xls")
運行結果如下:
自學得真辛苦啊,這些都是我的學習成果,准確還是可以在提高的,對你有幫助的話,點個贊吧,嘿嘿。