決策樹3:基尼指數--Gini index(CART)


  

既能做分類,又能做回歸。
分類:基尼值作為節點分類依據。
回歸:最小方差作為節點的依據。

 

節點越不純,基尼值越大,熵值越大

 pi表示在信息熵部分中有介紹,如下圖中介紹

 

方差越小越好。

 

 選擇最小的那個0.3

 代碼:

#整個c4.5決策樹的所有算法:
import numpy as np import operator def creatDataSet(): """ outlook-> 0:sunny | 1:overcast | 2:rain temperature-> 0:hot | 1:mild | 2:cool humidity-> 0:high | 1:normal windy-> 0:false | 1:true """ dataSet = np.array([[0, 0, 0, 0, 'N'], [0, 0, 0, 1, 'N'], [1, 0, 0, 0, 'Y'], [2, 1, 0, 0, 'Y'], [2, 2, 1, 0, 'Y'], [2, 2, 1, 1, 'N'], [1, 2, 1, 1, 'Y']]) labels = np.array(['outlook', 'temperature', 'humidity', 'windy']) return dataSet, labels def createTestSet(): """ outlook-> 0:sunny | 1:overcast | 2:rain temperature-> 0:hot | 1:mild | 2:cool humidity-> 0:high | 1:normal windy-> 0:false | 1:true """ testSet = np.array([[0, 1, 0, 0], [0, 2, 1, 0], [2, 1, 1, 0], [0, 1, 1, 1], [1, 1, 0, 1], [1, 0, 1, 0], [2, 1, 0, 1]]) return testSet def dataset_entropy(dataset): """ 計算數據集的信息熵 """ classLabel=dataset[:,-1] labelCount={} for i in range(classLabel.size): label=classLabel[i] labelCount[label]=labelCount.get(label,0)+1     #將所有的類別都計算出來了
    #熵值(第一步)
    cnt=0 for k,v in labelCount.items(): cnt += -v/classLabel.size*np.log2(v/classLabel.size) return cnt #接下來切分,然后算最優屬性
def splitDataSet(dataset,featureIndex,value): subdataset=[] #迭代所有的樣本
    for example in dataset: if example[featureIndex]==value: subdataset.append(example) return np.delete(subdataset,featureIndex,axis=1) def classLabelPi(dataset): #多叉樹
    classLabel=dataset[:,-1] labelCount={} for i in range(classLabel.size): label=classLabel[i] labelCount[label]=labelCount.get(label,0)+1 valueList=list(labelCount.values()) sum=np.sum(valueList) pi=0 for i in valueList: pi+=(i/sum)**2
    return pi def chooseBestFeature(dataset,labels): """ 選擇最優特征,但是特征是不包括名稱的。 如何選擇最優特征:增益率最小 """
    #特征的個數
    featureNum=labels.size baseEntropy=dataset_entropy(dataset) #設置最大增益值
    maxRatio,bestFeatureIndex=0,None #樣本總數
    n=dataset.shape[0] #最小基尼值 
    minGini=1
    for i in range(featureNum): #指定特征的條件熵
        featureEntropy=0 gini=0 #返回所有子集
        featureList=dataset[:,i] featureValues=set(featureList) for value in featureValues: subDataSet=splitDataSet(dataset,i,value) pi=subDataSet.shape[0]/n gini+=pi*(1-classLabelPi(subDataSet)) if minGini > gini: minGini=gini bestFeatureIndex=i return bestFeatureIndex #最佳增益

def mayorClass(classList): labelCount={} for i in range(classList.size): label=classList[i] labelCount[label]=labelCount.get(label,0)+1 sortedLabel=sorted(labelCount.items(),key=operator.itemgetter(1),reverse=True) return sortedLabel[0][0] def createTree(dataset,labels): """ 參考hunt算法那張圖片 """ classList=dataset[:,-1] if len(set(dataset[:,-1]))==1: return dataset[:,-1][0] #返回類別
    if labels.size==0 or len(dataset[0])==1:  #條件熵最少的一定是類別最多的
        #條件熵算不下去的時候,
        return mayorClass(classList) bestFeatureIndex=chooseBestFeature(dataset,labels) bestFeature=labels[bestFeatureIndex] dtree={bestFeature:{}}  #用代碼表示這棵樹
    featureList=dataset[:,bestFeatureIndex] featureValues=set(featureList) for value in featureValues: subdataset=splitDataSet(dataset,bestFeatureIndex,value) sublabels=np.delete(labels,bestFeatureIndex) dtree[bestFeature][value]=createTree(subdataset,sublabels) #將原始的labels干掉一列
    return dtree def predict(tree,labels,testData): #分類,預測
    rootName=list(tree.keys())[0] rootValue=tree[rootName] featureIndex =list(labels).index(rootName) classLabel=None for key in rootValue.keys(): if testData[featureIndex]==int(key): if type(rootValue[key]).__name__=="dict": classLabel=predict(rootValue[key],labels,testData)    #遞歸
            else: classLabel=rootValue[key] return classLabel def predictAll(tree,labels,testSet): classLabels=[] for i in testSet: classLabels.append(predict(tree,labels,i)) return classLabels if __name__ == "__main__": dataset,labels=creatDataSet() # print(dataset_entropy(dataset)
    # s=splitDataSet(dataset,0)
    # for item in s:
    # print(item)
    tree=createTree(dataset,labels) testSet=createTestSet() print(predictAll(tree,labels,testSet)) ···························································· 輸出: ['N', 'N', 'Y', 'N', 'Y', 'Y', 'N']

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM