用XPath來做一個簡單的爬蟲,嘗試爬取某個貼吧里的所有帖子,並且將該這個帖子里每個樓層發布的圖片下載到本地。
# -*- coding:utf-8 -*- import urllib import urllib2 from lxml import etree def loadPage(url): """ 作用:根據url發送請求,獲取服務器響應文件 url: 需要爬取的url地址 """ #print url #headers = {"User-Agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11"} request = urllib2.Request(url) html = urllib2.urlopen(request).read() # 解析HTML文檔為HTML DOM模型 content = etree.HTML(html) #print content # 返回所有匹配成功的列表集合 link_list = content.xpath('//div[@class="t_con cleafix"]/div/div/div/a/@href') #link_list = content.xpath('//a[@class="j_th_tit"]/@href') for link in link_list: fulllink = "http://tieba.baidu.com" + link # 組合為每個帖子的鏈接 #print link loadImage(fulllink) # 取出每個帖子里的每個圖片連接 def loadImage(link): headers = {"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"} request = urllib2.Request(link, headers = headers) html = urllib2.urlopen(request).read() # 解析 content = etree.HTML(html) # 取出帖子里每層層主發送的圖片連接集合 #link_list = content.xpath('//img[@class="BDE_Image"]/@src') # link_list = content.xpath('//div[@class="post_bubble_middle"]') link_list = content.xpath('//img[@class="BDE_Image"]/@src') # 取出每個圖片的連接 for link in link_list: # print link writeImage(link) def writeImage(link): """ 作用:將html內容寫入到本地 link:圖片連接 """ #print "正在保存 " + filename headers = {"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"} # 文件寫入 request = urllib2.Request(link, headers = headers) # 圖片原始數據 image = urllib2.urlopen(request).read() # 取出連接后10位做為文件名 filename = link[-10:] # 寫入到本地磁盤文件內 with open(filename, "wb") as f: f.write(image) print "已經成功下載 "+ filename def tiebaSpider(url, beginPage, endPage): """ 作用:貼吧爬蟲調度器,負責組合處理每個頁面的url url : 貼吧url的前部分 beginPage : 起始頁 endPage : 結束頁 """ for page in range(beginPage, endPage + 1): pn = (page - 1) * 50 #filename = "第" + str(page) + "頁.html" fullurl = url + "&pn=" + str(pn) #print fullurl loadPage(fullurl) #print html print "謝謝使用" if __name__ == "__main__": kw = raw_input("請輸入需要爬取的貼吧名:") beginPage = int(raw_input("請輸入起始頁:")) endPage = int(raw_input("請輸入結束頁:")) url = "http://tieba.baidu.com/f?" key = urllib.urlencode({"kw": kw}) fullurl = url + key tiebaSpider(fullurl, beginPage, endPage)
效果: