很方面的,懶得自己寫了。
-
clc
-
clear all
-
load('wdtFeature');
-
-
% 訓練樣本:train_data % 矩陣,每行一個樣本,每列一個特征
-
% 訓練樣本標簽:train_label % 列向量
-
% 測試樣本:test_data
-
% 測試樣本標簽:test_label
-
train_data = traindata'
-
train_label = trainlabel'
-
test_data = testdata'
-
test_label = testlabel'
-
% K近鄰分類器 (KNN)
-
% mdl = ClassificationKNN.fit(train_data,train_label,'NumNeighbors',1);
-
% predict_label = predict(mdl, test_data);
-
% accuracy = length(find(predict_label == test_label))/length(test_label)*100
-
%
-
% 94%
-
% 隨機森林分類器(Random Forest)
-
% nTree = 5
-
% B = TreeBagger(nTree,train_data,train_label);
-
% predict_label = predict(B,test_data);
-
%
-
% m=0;
-
% n=0;
-
% for i=1:50
-
% if predict_label{i,1}>0
-
% m=m+1;
-
% end
-
% if predict_label{i+50,1}<0
-
% n=n+1;
-
% end
-
% end
-
%
-
% s=m+n
-
% r=s/100
-
-
% result 50%
-
-
% **********************************************************************
-
% 朴素貝葉斯 (Na?ve Bayes)
-
% nb = NaiveBayes.fit(train_data, train_label);
-
% predict_label = predict(nb, test_data);
-
% accuracy = length(find(predict_label == test_label))/length(test_label)*100;
-
%
-
%
-
% % 結果 81%
-
% % **********************************************************************
-
% % 集成學習方法(Ensembles for Boosting, Bagging, or Random Subspace)
-
% ens = fitensemble(train_data,train_label,'AdaBoostM1' ,100,'tree','type','classification');
-
% predict_label = predict(ens, test_data);
-
%
-
% m=0;
-
% n=0;
-
% for i=1:50
-
% if predict_label(i,1)>0
-
% m=m+1;
-
% end
-
% if predict_label(i+50,1)<0
-
% n=n+1;
-
% end
-
% end
-
%
-
% s=m+n
-
% r=s/100
-
-
% 結果 97%
-
% **********************************************************************
-
% 鑒別分析分類器(discriminant analysis classifier)
-
% obj = ClassificationDiscriminant.fit(train_data, train_label);
-
% predict_label = predict(obj, test_data);
-
%
-
% m=0;
-
% n=0;
-
% for i=1:50
-
% if predict_label(i,1)>0
-
% m=m+1;
-
% end
-
% if predict_label(i+50,1)<0
-
% n=n+1;
-
% end
-
% end
-
%
-
% s=m+n
-
% r=s/100
-
% result 86%
-
% **********************************************************************
-
% 支持向量機(Support Vector Machine, SVM)
-
SVMStruct = svmtrain(train_data, train_label);
-
predict_label = svmclassify(SVMStruct, test_data)
-
m=0;
-
n=0;
-
for i=1:50
-
if predict_label(i,1)>0
-
m=m+1;
-
end
-
if predict_label(i+50,1)<0
-
n=n+1;
-
end
-
end
-
-
s=m+n
-
r=s/100
-
-
% result 86%
原文鏈接:http://blog.csdn.net/u014114990/article/details/51067059