1、知識點
""" logging : scrapy: settings中設置LOG_LEVEL="WARNING" settings中設置LOG_FILE="./log.log" #設置日志保存的位置,設置后在終端不會顯示日志內容 import logging 實例化一個logger的方式在任何文件中使用logger輸出內容 logger = logging.getLogger(__name__) #實例化 普通項目中: import logging logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S', filename='myapp.log', filemode='w') #設置日志輸出格式 實例化一個ogger = logging.getLogger(__name__) 在任何py文件中調用logger即可 """
2、scrapy項目中使用logging
# -*- coding: utf-8 -*- import scrapy import logging logger = logging.getLogger(__name__) class JulyeduSpider(scrapy.Spider): name = 'julyedu' allowed_domains = ['julyedu.com'] start_urls = ['http://julyedu.com/'] #這個parse方法名不能改 def parse(self, response): """ 爬蟲七月在線的導師名單 :param response: :return: """ list_li = response.xpath("//div[@class='swiper-wrapper']//li") #print(list_li) item = {} for li in list_li: item["name"] = li.xpath(".//h3/text()").extract_first() item["content"] = li.xpath(".//p[@class='teacherBrief']/text()").extract_first() #item["content"] = li.xpath(".//p[@class='teacherIntroduction']/text()").extract_first() #print(item) #將數據傳遞道pipelines,yield只接受Request,BaseItem,dict,None四種類型 logger.warning(item) #打印日志 yield item
2、普通項目中
a)建立一個通用的log_a.py
# coding = utf-8 import logging logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S', filename='myapp.log', filemode='w') logger = logging.getLogger(__name__) if __name__ == '__main__': logger.info("this is a log ")
b)log_b.py文件使用通用的log_a.py
# coding = utf-8 from log_a import logger if __name__ == '__main__': logger.warning("b文件")