有的頁面會使用frame 框架,使用Selenium + PhantomJS 后並不會加載iframe 框架中的網頁內容。iframe 框架相當於在頁面中又加載了一個頁面,需要使用Selenium 的 switch_to.frame() 方法加載
(官網給的方法是switch_to_frame(),但是IDE提醒使用前面的方法替代該方法)。
比如:
driver.switch_to.frame('g_iframe')
一、介紹
本例子用Selenium +phantomjs爬取流媒體(http://www.lmtw.com/search.php?show=title%2Ckeyboard%2Cwriter&searchget=1&keyboard=%E7%94%B5%E8%A7%86)的資訊信息,輸入給定關鍵字抓取資訊信息。
給定關鍵字:數字;融合;電視
抓取信息內如下:
1、資訊標題
2、資訊鏈接
3、資訊時間
4、資訊來源
二、網站信息



三、數據抓取
針對上面的網站信息,來進行抓取
1、首先抓取信息列表
抓取代碼:Elements = doc('h2[class="r"]')
2、抓取標題
抓取代碼:title = element.find('a').text().encode('utf8').strip()
3、抓取鏈接
抓取代碼:url = element.find('a').attr('href')
4、抓取日期
抓取代碼:dateElements = doc('span[class="a"]')
5、抓取來源
抓取代碼:strSources = dochtml('div[class="from"]').text().encode('utf8').strip()
四、完整代碼
# coding=utf-8 import os import re from selenium import webdriver import selenium.webdriver.support.ui as ui import time import datetime from pyquery import PyQuery as pq import LogFile import mongoDB import urllib class lmtwSpider(object): def __init__(self,driver,log,keyword_list,websearch_url,valid,filter): ''' :param driver: 驅動 :param log: 日志 :param keyword_list: 關鍵字列表 :param websearch_url: 搜索網址 :param valid: 采集參數 0:采集當天;1:采集昨天 :param filter: 過濾項,下面這些內容如果出現在資訊標題中,那么這些內容不要,過濾掉 ''' self.log = log self.driver = driver self.webSearchUrl_list = websearch_url.split(';') self.keyword_list = keyword_list self.db = mongoDB.mongoDbBase() self.valid = valid self.start_urls = [] # 過濾項 self.filter = filter for keyword in keyword_list: url = websearch_url + urllib.quote(keyword) self.start_urls.append(url) def Comapre_to_days(self,leftdate, rightdate): ''' 比較連個字符串日期,左邊日期大於右邊日期多少天 :param leftdate: 格式:2017-04-15 :param rightdate: 格式:2017-04-15 :return: 天數 ''' l_time = time.mktime(time.strptime(leftdate, '%Y-%m-%d')) r_time = time.mktime(time.strptime(rightdate, '%Y-%m-%d')) result = int(l_time - r_time) / 86400 return result def date_isValid(self, strDateText): ''' 判斷日期時間字符串是否合法:如果給定時間大於當前時間是合法,或者說當前時間給定的范圍內 :param strDateText: '2017-06-20 10:22 ' :return: True:合法;False:不合法 ''' currentDate = time.strftime('%Y-%m-%d') datePattern = re.compile(r'\d{4}-\d{2}-\d{2}') strDate = re.findall(datePattern, strDateText) if len(strDate) == 1: if self.valid == 0 and self.Comapre_to_days(currentDate, strDate[0])==0: return True, currentDate elif self.valid == 1 and self.Comapre_to_days(currentDate, strDate[0])==1: return True,str(datetime.datetime.now() - datetime.timedelta(days=1))[0:10] elif self.valid == 2 and self.Comapre_to_days(currentDate, strDate[0]) == 2: return True,str(datetime.datetime.now() - datetime.timedelta(days=2))[0:10] return False, '' def log_print(self, msg): ''' # 日志函數 # :param msg: 日志信息 # :return: # ''' print '%s: %s' % (time.strftime('%Y-%m-%d %H-%M-%S'), msg) def scrapy_date(self): try: strsplit = '------------------------------------------------------------------------------------' # isbreak = False keywordIndex = 0 for link in self.start_urls: self.driver.get(link) self.driver.switch_to.frame('iframepage') keyword = self.keyword_list[keywordIndex] keywordIndex += 1 selenium_html = self.driver.execute_script("return document.documentElement.outerHTML") doc = pq(selenium_html) infoList = [] self.log.WriteLog(strsplit) self.log_print(strsplit) Elements = doc('h2[class="r"]') dateElements = doc('span[class="a"]') index = 0 for element in Elements.items(): date = dateElements[index].text index += 1 flag,strDate = self.date_isValid(date) if flag: title = element.find('a').text().encode('utf8').strip() # 考慮過濾項 if title.find(self.filter)>-1: continue if title.find(keyword) > -1: url = element.find('a').attr('href') dictM = {'title': title, 'date': strDate, 'url': url, 'keyword': keyword, 'introduction': title} infoList.append(dictM) if len(infoList) > 0: for item in infoList: item['sourceType']=1 url = item['url'] self.driver.get(url) # self.driver.switch_to.frame('iframepage') htext = self.driver.execute_script("return document.documentElement.outerHTML") dochtml = pq(htext) strSources = dochtml('div[class="from"]').text().encode('utf8').strip() txtsource = strSources[strSources.find('來源:') + 9:] item['source']=txtsource[0:txtsource.find(' ')] self.log.WriteLog('title:%s' % item['title']) self.log.WriteLog('url:%s' % item['url']) self.log.WriteLog('date:%s' % item['date']) self.log.WriteLog('source:%s' % item['source']) self.log.WriteLog('kword:%s' % item['keyword']) self.log.WriteLog(strsplit) self.log_print('title:%s' % item['title']) self.log_print('url:%s' % item['url']) self.log_print('date:%s' % item['date']) self.log_print('source:%s' % item['source']) self.log_print('kword:%s' % item['keyword']) self.log_print(strsplit) self.db.SaveInformations(infoList) except Exception,e: self.log.WriteLog('lmtwSpider:'+ e.message) finally: pass
