以下均為自己看視頻做的筆記,自用,侵刪!
還參考了:http://www.ai-start.com/ml2014/
In [8]:
%matplotlib inline import pandas as pd import matplotlib.pylab as plt iris_data = pd.read_csv('iris.data') iris_data.columns = ['sepal_length_cm', 'sepal_width_cm', 'petal_length_cm', 'petal_width_cm', 'class'] iris_data.head()
Out[8]:
In [9]:
from PIL import Image img = Image.open('test.jpg') plt.imshow(img) plt.show()
In [10]:
iris_data.describe()
Out[10]:
畫出每個種類的分布
In [12]:
%matplotlib inline import matplotlib.pyplot as plt import seaborn as sb # pairplot傳入的數據不能有缺失值 sb.pairplot(iris_data.dropna(), hue='class')
Out[12]:
In [13]:
plt.figure(figsize=(10, 10)) # 列名的索引,改列名 for column_index, column in enumerate(iris_data.columns): if column == 'class': continue plt.subplot(2, 2, column_index + 1) sb.violinplot(x='class', y=column, data=iris_data)
划分訓練集和測試集
In [15]:
from sklearn.cross_validation import train_test_split all_inputs = iris_data[['sepal_length_cm', 'sepal_width_cm', 'petal_length_cm', 'petal_width_cm']].values all_classes = iris_data['class'].values (training_inputs, testing_inputs, training_classes, testing_classes) = train_test_split(all_inputs, all_classes, train_size=0.75, random_state=1)
構建決策樹模型
In [19]:
from sklearn.tree import DecisionTreeClassifier # 1.criterion gini or entropy 評判標准 # 2.splitter best or random 前者是在所有特征中找最好的切分點 后者是在部分特征中(數據量大的時候) # 3.max_features None(所有),log2,sqrt,N 特征小於50的時候一般使用所有的 # 4.max_depth 數據少或者特征少的時候可以不管這個值,如果模型樣本量多,特征也多的情況下,可以嘗試限制下(預剪枝) # 5.min_samples_split 如果某節點的樣本數少於min_samples_split,則不會繼續再嘗試選擇最優特征來進行划分 # 如果樣本量不大,不需要管這個值。如果樣本量數量級非常大,則推薦增大這個值。(停止的操作) # 6.min_samples_leaf 這個值限制了葉子節點最少的樣本數,如果某葉子節點數目小於樣本數,則會和兄弟節點一起被 # 剪枝,如果樣本量不大,不需要管這個值,大些如10W可是嘗試下5 # 7.min_weight_fraction_leaf 這個值限制了葉子節點所有樣本權重和的最小值,如果小於這個值,則會和兄弟節點一起 # 被剪枝默認是0,就是不考慮權重問題。一般來說,如果我們有較多樣本有缺失值, # 或者分類樹樣本的分布類別偏差很大,就會引入樣本權重,這時我們就要注意這個值了。 # 8.max_leaf_nodes 通過限制最大葉子節點數,可以防止過擬合,默認是"None”,即不限制最大的葉子節點數。 # 如果加了限制,算法會建立在最大葉子節點數內最優的決策樹。 # 如果特征不多,可以不考慮這個值,但是如果特征分成多的話,可以加以限制 # 具體的值可以通過交叉驗證得到。 # 9.class_weight 指定樣本各類別的的權重,主要是為了防止訓練集某些類別的樣本過多 # 導致訓練的決策樹過於偏向這些類別。這里可以自己指定各個樣本的權重 # 如果使用“balanced”,則算法會自己計算權重,樣本量少的類別所對應的樣本權重會高。 # 10.min_impurity_split 這個值限制了決策樹的增長,如果某節點的不純度 # (基尼系數,信息增益,均方差,絕對差)小於這個閾值 # 則該節點不再生成子節點。即為葉子節點 。(用的比較多)
decision_tree_classifier = DecisionTreeClassifier() # Train the classifier on the training set decision_tree_classifier.fit(training_inputs, training_classes) # Validate the classifier on the testing set using classification accuracy decision_tree_classifier.score(testing_inputs, testing_classes)
Out[19]:
In [28]:
from sklearn.cross_validation import cross_val_score import numpy as np decision_tree_classifier = DecisionTreeClassifier() # cross_val_score returns a list of the scores, which we can visualize # to get a reasonable estimate of our classifier's performance cv_scores = cross_val_score(decision_tree_classifier, all_inputs, all_classes, cv=10) print(cv_scores) #kde=False sb.distplot(cv_scores, kde=False) plt.title('Average score: {}'.format(np.mean(cv_scores)))
Out[28]:
In [29]:
decision_tree_classifier = DecisionTreeClassifier(max_depth=1) cv_scores = cross_val_score(decision_tree_classifier, all_inputs, all_classes, cv=10) print (cv_scores) sb.distplot(cv_scores, kde=False) plt.title('Average score: {}'.format(np.mean(cv_scores)))
Out[29]:
In [30]:
from sklearn.grid_search import GridSearchCV from sklearn.cross_validation import StratifiedKFold decision_tree_classifier = DecisionTreeClassifier() parameter_grid = {'max_depth': [1, 2, 3, 4, 5], 'max_features': [1, 2, 3, 4]} cross_validation = StratifiedKFold(all_classes, n_folds=10) grid_search = GridSearchCV(decision_tree_classifier, param_grid=parameter_grid, cv=cross_validation) grid_search.fit(all_inputs, all_classes) print('Best score: {}'.format(grid_search.best_score_)) print('Best parameters: {}'.format(grid_search.best_params_))
In [31]:
grid_visualization = [] for grid_pair in grid_search.grid_scores_: grid_visualization.append(grid_pair.mean_validation_score) grid_visualization = np.array(grid_visualization) grid_visualization.shape = (5, 4) sb.heatmap(grid_visualization, cmap='Blues') plt.xticks(np.arange(4) + 0.5, grid_search.param_grid['max_features']) plt.yticks(np.arange(5) + 0.5, grid_search.param_grid['max_depth'][::-1]) plt.xlabel('max_features') plt.ylabel('max_depth')
Out[31]:
In [32]:
decision_tree_classifier = grid_search.best_estimator_ decision_tree_classifier
Out[32]:
In [33]:
import sklearn.tree as tree from sklearn.externals.six import StringIO with open('iris_dtc.dot', 'w') as out_file: out_file = tree.export_graphviz(decision_tree_classifier, out_file=out_file) #http://www.graphviz.org/
用dot -Tpng -test.png iris_dtc.dot 生成下面的圖片

In [34]:
from sklearn.ensemble import RandomForestClassifier random_forest_classifier = RandomForestClassifier() parameter_grid = {'n_estimators': [5, 10, 25, 50], 'criterion': ['gini', 'entropy'], 'max_features': [1, 2, 3, 4], 'warm_start': [True, False]} cross_validation = StratifiedKFold(all_classes, n_folds=10) grid_search = GridSearchCV(random_forest_classifier, param_grid=parameter_grid, cv=cross_validation) grid_search.fit(all_inputs, all_classes) print('Best score: {}'.format(grid_search.best_score_)) print('Best parameters: {}'.format(grid_search.best_params_)) grid_search.best_estimator_
Out[34]: