前言:自然语言处理入门(何晗著)第7章 词性标注
一 概念:
分词语料库,词性标注语料库,标注集
二 流程:
工程上通常在大型分词语料库上训练分词器,然后与小型词性标注语料库上的词性标注模型灵活组合为一个异源的流水线式词法分析器
即先分别训练分词器以及词性标注模型,将分词结果运用到词性标注模型上 ,进行词性标注
用来训练分词器的材料和用来训练词性标注模型的材料不同
三 代码解析:
词性标注流程
a.分词
b.标注词性
from pyhanlp import * import zipfile import os from pyhanlp.static import download, remove_file, HANLP_DATA_PATH def test_data_path(): """ 获取测试数据路径,位于$root/data/test,根目录由配置文件指定。 :return: """ data_path = os.path.join(HANLP_DATA_PATH, 'test') if not os.path.isdir(data_path): os.mkdir(data_path) return data_path def ensure_data(data_name, data_url): root_path = test_data_path() dest_path = os.path.join(root_path, data_name) if os.path.exists(dest_path): return dest_path if data_url.endswith('.zip'): dest_path += '.zip' download(data_url, dest_path) if data_url.endswith('.zip'): with zipfile.ZipFile(dest_path, "r") as archive: archive.extractall(root_path) remove_file(dest_path) dest_path = dest_path[:-len('.zip')] return dest_path PKU98 = ensure_data("pku98", "http://file.hankcs.com/corpus/pku98.zip") PKU199801 = os.path.join(PKU98, '199801.txt') PKU199801_TRAIN = os.path.join(PKU98, '199801-train.txt') PKU199801_TEST = os.path.join(PKU98, '199801-test.txt') POS_MODEL = os.path.join('C:\\Users\\Administrator\\Desktop\\cx', 'pos.bin') # 获取空模型 NER_MODEL = os.path.join('C:\\Users\\Administrator\\Desktop\\cx', 'ner.bin') ZHUXIAN = ensure_data("zhuxian", "http://file.hankcs.com/corpus/zhuxian.zip") + "/train.txt" POSTrainer = JClass('com.hankcs.hanlp.model.perceptron.POSTrainer') PerceptronSegmenter = JClass('com.hankcs.hanlp.model.perceptron.PerceptronSegmenter') AbstractLexicalAnalyzer = JClass('com.hankcs.hanlp.tokenizer.lexical.AbstractLexicalAnalyzer') PerceptronPOSTagger = JClass('com.hankcs.hanlp.model.perceptron.PerceptronPOSTagger') def train_perceptron_pos(corpus): trainer = POSTrainer() model = trainer.train(corpus, POS_MODEL).getModel() # 标注训练并保存文件 model = os.path.join('C:\\Users\\Administrator\\Desktop\\cx\\pos.bin') # 指定模型文件路径 tagger = PerceptronPOSTagger(model) # 加载模型文件 print(', '.join(tagger.tag("他", "的", "希望", "是", "希望", "上学"))) # 预测 analyzer = AbstractLexicalAnalyzer(PerceptronSegmenter(), tagger) # 构造词法分析器 print(analyzer.analyze("李狗蛋的希望是希望上学")) # 分词+词性标注 return tagger posTagger = train_perceptron_pos(ZHUXIAN) # 训练 analyzer = AbstractLexicalAnalyzer(PerceptronSegmenter(), posTagger) # 包装 print(analyzer.analyze("陆雪琪的天琊神剑不做丝毫退避,直冲而上,瞬间,这两道奇光异宝撞到了一起。")) # 分词+标注