基於KNN的newsgroup 18828文本分類器的Python實現


還是同前一篇作為學習入門。

1. KNN算法描述:

step1: 文本向量化表示,計算特征詞的TF-IDF值

step2: 新文本到達后,根據特征詞確定文本的向量

step3 : 在訓練文本集中選出與新文本向量最相近的k個文本向量,相似度度量采用“余弦相似度”,根據實驗測試的結果調整k值,此次選擇20

step4: 在新文本的k個鄰居中,依次計算每類的權重,

step5: 比較類的權重,將新文本放到權重最大的那個類中

 

2. 文檔TF-IDF計算和向量化表示

# -*- coding: utf-8 -*-
import time
from os import listdir
from math import log
from numpy import *
from numpy import linalg
from operator import itemgetter

###################################################
## 計算所有單詞的IDF值
###################################################
def computeIDF():
    fileDir = 'processedSampleOnlySpecial_2'
    wordDocMap = {}  # <word, set(docM,...,docN)>
    IDFPerWordMap = {}  # <word, IDF值>
    countDoc = 0.0
    cateList = listdir(fileDir)
    for i in range(len(cateList)):
        sampleDir = fileDir + '/' + cateList[i]
        sampleList = listdir(sampleDir)
        for j in range(len(sampleList)):
            sample = sampleDir + '/' + sampleList[j]
            for line in open(sample).readlines():
                word = line.strip('\n')
                if word in wordDocMap.keys():
                    wordDocMap[word].add(sampleList[j]) # set結構保存單詞word出現過的文檔
                else:
                    wordDocMap.setdefault(word,set())
                    wordDocMap[word].add(sampleList[j])
        print 'just finished %d round ' % i

    for word in wordDocMap.keys():
        countDoc = len(wordDocMap[word]) # 統計set中的文檔個數
        IDF = log(20000/countDoc)/log(10)
        IDFPerWordMap[word] = IDF
 
    return IDFPerWordMap

###################################################
## 將IDF值寫入文件保存
###################################################    
def main():
    start=time.clock()
    IDFPerWordMap = computeIDF()
    end=time.clock()
    print 'runtime: ' + str(end-start)
    fw = open('IDFPerWord','w')
    for word, IDF in IDFPerWordMap.items():
        fw.write('%s %.6f\n' % (word,IDF))
    fw.close()
    
########################################################
## 生成訓練集和測試集的文檔向量,向量形式<cate, doc, (word1, tdidf1), (word2, tdidf2),...> 存入文件
## @param indexOfSample 迭代的序號
## @param trainSamplePercent 訓練集合和測試集合划分百分比
########################################################
def computeTFMultiIDF(indexOfSample, trainSamplePercent):
    IDFPerWord = {} # <word, IDF值> 從文件中讀入后的數據保存在此字典結構中
    for line in open('IDFPerWord').readlines():
        (word, IDF) = line.strip('\n').split(' ')
        IDFPerWord[word] = IDF        
    
    fileDir = 'processedSampleOnlySpecial_2'
    trainFileDir = "docVector/" + 'wordTFIDFMapTrainSample' + str(indexOfSample)
    testFileDir = "docVector/" + 'wordTFIDFMapTestSample' + str(indexOfSample)

    tsTrainWriter = open(trainFileDir, 'w')
    tsTestWriter = open(testFileDir, 'w')

        
    cateList = listdir(fileDir)
    for i in range(len(cateList)):
        sampleDir = fileDir + '/' + cateList[i]
        sampleList = listdir(sampleDir)
        
        testBeginIndex = indexOfSample * ( len(sampleList) * (1-trainSamplePercent) )
        testEndIndex = (indexOfSample+1) * ( len(sampleList) * (1-trainSamplePercent) )
        
        for j in range(len(sampleList)):
            TFPerDocMap = {} # <word, 文檔doc下該word的出現次數>
            sumPerDoc = 0  # 記錄文檔doc下的單詞總數
            sample = sampleDir + '/' + sampleList[j]
            for line in open(sample).readlines():
                sumPerDoc += 1
                word = line.strip('\n')
                TFPerDocMap[word] = TFPerDocMap.get(word, 0) + 1
            
            if(j >= testBeginIndex) and (j <= testEndIndex):
                tsWriter = tsTestWriter
            else:
                tsWriter = tsTrainWriter

            tsWriter.write('%s %s ' % (cateList[i], sampleList[j])) # 寫入類別cate,文檔doc

            for word, count in TFPerDocMap.items():
                TF = float(count)/float(sumPerDoc)
                tsWriter.write('%s %f ' % (word, TF * float(IDFPerWord[word]))) # 繼續寫入類別cate下文檔doc下的所有單詞及它的TF-IDF值

            tsWriter.write('\n')

        print 'just finished %d round ' % i

        #if i==0: break

    tsTrainWriter.close()
    tsTestWriter.close()
    tsWriter.close()

3. KNN算法的實現

def doProcess():
    trainFiles = 'docVector/wordTFIDFMapTrainSample0'
    testFiles = 'docVector/wordTFIDFMapTestSample0'
    kNNResultFile = 'docVector/KNNClassifyResult'

    trainDocWordMap = {}  # 字典<key, value> key=cate_doc, value={{word1,tfidf1}, {word2, tfidf2},...}

    for line in open(trainFiles).readlines():
        lineSplitBlock = line.strip('\n').split(' ')
        trainWordMap = {}
        m = len(lineSplitBlock)-1
        for i in range(2, m, 2):  # 在每個文檔向量中提取(word, tfidf)存入字典
            trainWordMap[lineSplitBlock[i]] = lineSplitBlock[i+1]

        temp_key = lineSplitBlock[0] + '_' + lineSplitBlock[1]  # 在每個文檔向量中提取類目cate,文檔doc,
        trainDocWordMap[temp_key] = trainWordMap 

    testDocWordMap = {}

    for line in open(testFiles).readlines():
        lineSplitBlock = line.strip('\n').split(' ')
        testWordMap = {} 
        m = len(lineSplitBlock)-1
        for i in range(2, m, 2):
            testWordMap[lineSplitBlock[i]] = lineSplitBlock[i+1]

        temp_key = lineSplitBlock[0] + '_' + lineSplitBlock[1]
        testDocWordMap[temp_key] = testWordMap #<類_文件名,<word, TFIDF>>

    #遍歷每一個測試樣例計算與所有訓練樣本的距離,做分類
    count = 0
    rightCount = 0
    KNNResultWriter = open(kNNResultFile,'w')
    for item in testDocWordMap.items():
        classifyResult = KNNComputeCate(item[0], item[1], trainDocWordMap)  # 調用KNNComputeCate做分類

        count += 1
        print 'this is %d round' % count

        classifyRight = item[0].split('_')[0]
        KNNResultWriter.write('%s %s\n' % (classifyRight,classifyResult))
        if classifyRight == classifyResult:
            rightCount += 1
        print '%s %s rightCount:%d' % (classifyRight,classifyResult,rightCount)

    accuracy = float(rightCount)/float(count)
    print 'rightCount : %d , count : %d , accuracy : %.6f' % (rightCount,count,accuracy)
    return accuracy
            


#########################################################
## @param cate_Doc 測試集<類別_文檔>
## @param testDic 測試集{{word, TFIDF}}
## @param trainMap 訓練集<類_文件名,<word, TFIDF>>
## @return sortedCateSimMap[0][0] 返回與測試文檔向量距離和最小的類
#########################################################
def KNNComputeCate(cate_Doc, testDic, trainMap):
    simMap = {} #<類目_文件名,距離> 后面需要將該HashMap按照value排序
    for item in trainMap.items():
        similarity = computeSim(testDic,item[1])  # 調用computeSim()
        simMap[item[0]] = similarity

    sortedSimMap = sorted(simMap.iteritems(), key=itemgetter(1), reverse=True) #<類目_文件名,距離> 按照value排序

    k = 20
    cateSimMap = {} #<類,距離和>
    for i in range(k):
        cate = sortedSimMap[i][0].split('_')[0]
        cateSimMap[cate] = cateSimMap.get(cate,0) + sortedSimMap[i][1]

    sortedCateSimMap = sorted(cateSimMap.iteritems(),key=itemgetter(1),reverse=True)

    return sortedCateSimMap[0][0]   
        
    
#################################################
## @param testDic 一維測試文檔向量<<word, tfidf>>
## @param trainDic 一維訓練文檔向量<<word, tfidf
## @return 返回余弦相似度
def computeSim(testDic, trainDic):
    testList = []  # 測試向量與訓練向量共有的詞在測試向量中的tfidf值
    trainList = []  # # 測試向量與訓練向量共有的詞在訓練向量中的tfidf值
    
    for word, weight in testDic.items():
        if trainDic.has_key(word):
            testList.append(float(weight)) # float()將字符型數據轉換成數值型數據,參與下面運算
            trainList.append(float(trainDic[word]))

    testVect = mat(testList)  # 列表轉矩陣,便於下面向量相乘運算和使用Numpy模塊的范式函數計算
    trainVect = mat(trainList)
    num = float(testVect * trainVect.T)
    denom = linalg.norm(testVect) * linalg.norm(trainVect)
    #print 'denom:%f' % denom
    return float(num)/(1.0+float(denom))


輸出結果:

 

運行時遇到幾種語法錯誤:

Error1:

split(' ')按空格分割后最后一位是空串,不檢查分割后數組的最后一位很難發現,以致產生越界

Error2:

因為導入數據用字符串運算strip切分后,返回的都是字符型數據,而string不能計算,需int(string) float(string)轉換后參與計算,錯誤如下:

Error3:

sorted()對字典中每對<key, value>數據進行排序,返回的是包含tuple(key, value)的列表,之前不了解這一點,出現了index的錯誤:

 sorted()的返回形式:


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM