從網頁爬取文本信息:
eg:從http://computer.swu.edu.cn/s/computer/kxyj2xsky/中爬取講座信息(講座時間和講座名稱)
注:如果要爬取的內容是多頁的話,網址一定要從第二頁開始,因為有的首頁和其他頁的網址有些區別
代碼
import pymysql import requests #需要導入模塊 db = pymysql.connect('localhost', 'root', '*********', 'mysql')#第三個是數據庫密碼,第四個是數據庫名稱 print("數據庫連接成功!") print("---------------------------------------------------") r = requests.get("https://python123.io/ws/demo.html")#獲取網頁源代碼 import re def get_text(url):#函數 r = requests.get(url) r.raise_for_status() r.encoding = r.apparent_encoding return r.text def parse_html(url, list): demo = get_text(url) # 將正則表達式編譯成正則表達式對象,方便復用該正則表達式 # ".*?" :匹配任意字符串 # [\u4e00-\u9fa5] :匹配中文 # (\d{4}-\d{2}-\d{2}) : 匹配日期 #計信院前沿學術報告(2019.7.1)</a></li>\[(\d{4}-\d{2}-\d{2})\] patern = re.compile('<li><span\sclass="fr">\[(\d{4}-\d{2}-\d{2})\]</span>.*? (.*?)</a></li>', re.S) results = re.findall(patern, demo) for result in results: list.append(result)#向列表添加對象 return list url = 'http://computer.swu.edu.cn/s/computer/kxyj2xsky/index.html' list = [] for i in range(2,5): url = 'http://computer.swu.edu.cn/s/computer/kxyj2xsky/index_'+str(i) + '.html' list = parse_html(url, list) count = 0 for i in list: count = count + 1 print(i) print("一共有"+str(count)+"條數據!")
輸出

數據庫連接成功! --------------------------------------------------- ('2018-11-20', '計信院前沿學術報告(2018.11-23)') ('2018-11-19', '計信院前沿學術報告(2018.11-20)') ('2018-11-15', '計信院前沿學術報告(2018.11-22)') ('2018-11-12', '計信院前沿學術報告(2018.11-14)') ('2018-11-02', '第三屆全國形式化方法與應用會議暨形式化專委年會(FMAC 2018)即將開幕') ('2018-11-01', '計信院前沿學術報告(2018.11-06)') ('2018-10-25', '計信院前沿學術報告(2018.10-31)') ('2018-10-17', '計信院前沿學術報告(2018.10-19)') ('2018-10-10', '計信院前沿學術報告(2018.10-17)') ('2018-09-26', '計信院前沿學術報告(2018.09-29)') ('2018-09-12', '計信院前沿學術報告(2018.09-18)') ('2018-09-03', '計信院前沿學術報告(2018.09-04)') ('2018-07-05', '計信院前沿學術報告(2018.07-05)') ('2018-06-28', '計信院前沿學術報告(2018.07-02)') ('2018-06-20', '第7屆華人學者知識表示與推理學術研討會') ('2018-06-19', '計信院前沿學術報告(2018-06-20)') ('2018-05-15', '計信院前沿學術報告(2018-05-16)') ('2018-05-07', '計信院前沿學術報告(2018-05-10)') ('2018-05-02', '西南大學第三屆青年學者含弘科技論壇 計算機與信息科學學院分論壇 學術報告') ('2018-04-16', '計信院前沿學術報告(2018-04-23)') ('2018-04-09', '計信院前沿學術報告(2018-04-16)') ('2018-04-04', '第四屆可信軟件系統工程(國際)春季學校Spring School on Engineering Trustworthy Software Systems') ('2018-04-02', '計信院前沿學術報告(2018-04-08)') ('2018-04-02', '計信院前沿學術報告(2018-04-02)') ('2018-03-27', '計信院前沿學術報告(2018-03-30)') ('2018-01-09', '理論計算機科學2018寒假講習班') ('2018-01-09', '計信院前沿學術報告(2018-01-11)') ('2018-01-03', '計信院前沿學術報告(2018-01-05)') ('2017-12-27', '出國訪學(留學)經驗交流') ('2017-12-27', '計信院前沿學術報告(2017-12-28)') ('2017-12-25', '計信院前沿學術報告(2017-12-28)') ('2017-12-18', '出國訪學(留學)經驗交流') ('2017-12-18', '西南大學第二屆青年學者含弘科技論壇 計算機與信息科學學院分論壇 學術報告(二)') ('2017-12-18', '西南大學第二屆青年學者含弘科技論壇 計算機與信息科學學院分論壇 學術報告(一)') ('2017-12-15', '計信院前沿學術報告(2017-12-28)') ('2017-12-15', '出國訪學(留學)經驗交流') ('2017-12-11', '計信院前沿學術報告(2017-12-13)') ('2017-11-28', '計信院前沿學術報告(2017-11-28)') ('2017-11-22', 'Third Joint Research Workshop') ('2017-11-06', '計信院前沿學術報告(2017-11-11)') ('2017-11-06', '計信院前沿學術報告(2017-11-10)') ('2017-11-06', '計信院前沿學術報告(2017-11-09)') ('2017-10-29', '計信院前沿學術報告(2017-10-30)') ('2017-10-25', '計信院前沿學術報告(2017-10-31)') ('2017-10-19', '計信院前沿學術報告(2017-10-23)') ('2017-10-17', '卑爾根-重慶網絡化系統暑期學校') ('2017-10-12', '首屆“西南大學重要學術成果”候選成果系列報告') ('2017-09-18', '出國訪學(留學)經驗交流') ('2017-09-14', '計信院前沿學術報告2017-09-15') ('2017-09-06', '出國訪學(留學)經驗交流') ('2017-07-05', '出國訪學(留學)經驗交流') ('2017-06-21', '計信院前沿學術報告(2017-06-27)') ('2017-06-21', '計信院前沿學術報告(2017-06-26)') ('2017-06-12', '計信院出國訪學(留學)經驗交流') ('2017-05-27', '計信院前沿學術報告(2017-06-02)') ('2017-05-24', '計信院學術研討會(2017-05-28)') ('2017-05-24', '計信院前沿學術報告(2017-05-26)') ('2017-05-03', '西南大學青年學者含弘科技論壇') ('2017-04-13', '可信軟件系統工程(國際)春季學校') ('2017-04-10', '西南大學計信院前沿學術報告') ('2017-03-31', '西南大學計信院前沿學術報告') ('2017-03-31', '西南大學教師參加國際學術會議專題報告會') ('2017-03-29', '西南大學計信院前沿學術報告') ('2017-03-29', '西南大學計信院前沿學術報告') ('2017-03-29', '西南大學計信院前沿學術報告') ('2017-03-28', '出國訪學(留學)經驗交流') ('2017-03-16', '出國訪學(留學)經驗交流') ('2017-03-15', '出國訪學(留學)經驗交流') ('2017-01-10', '西南大學計信院前沿學術報告') ('2017-01-05', '學術講座') ('2017-01-04', '西南大學計信院前沿學術報告') ('2016-12-20', '理論計算機科學與形式化方法研討會') ('2016-12-20', '西南大學計信院前沿學術報告') ('2016-12-14', '西南大學計信院前沿學術報告') ('2016-12-12', '西南大學計信院前沿學術報告') 一共有75條數據! Process finished with exit code 0
從網頁中爬取圖片
eg:從https://maoyan.com/board/4?offset=10中爬取圖片,存到位置C:\Users\22725\Desktop\temp
format 格式控制函數 foramt函數更常見的用法其實是str.format()
示例:
>>>"{} {}".format("hello", "world") # 不設置指定位置,按默認順序 'hello world' >>> "{0} {1}".format("hello", "world") # 設置指定位置 'hello world' >>> "{1} {0} {1}".format("hello", "world") # 設置指定位置 'world hello world'
代碼:
import pymysql import requests from hashlib import md5 import re import os def get_text(url): r = requests.get(url) r.raise_for_status() r.encoding = r.apparent_encoding return r.text def parse_html(url, list): demo = get_text(url) # 將正則表達式編譯成正則表達式對象,方便復用該正則表達式 # ".*?" :匹配任意字符串 # [\u4e00-\u9fa5] :匹配中文 # (\d{4}-\d{2}-\d{2}) : 匹配日期 patern = re.compile('img\sdata-src="(.*?)"\salt', re.S) results = re.findall(patern, demo) for result in results: list.append(result) return list list = [] for i in range(0,2):#左閉右開區間[0,2) url = 'https://maoyan.com/board/4?offset='+str(10*i) list = parse_html(url, list) count = 0 for i in list: count = count + 1 print(i)#輸出圖片鏈接 print("一共有"+str(count)+"條數據!") def download_image(url):#保存圖片鏈接 r = requests.get(url) r.raise_for_status() save_image(r.content) def save_image(content):#下載圖片 file_path = '{0}/{1}.{2}'.format('C:/Users/22725/Desktop/temp', md5(content).hexdigest(), 'jpg')#注意斜杠是/ #format('文件儲存地址',哈希算法隨機生成子文件名稱,'文件格式') if not os.path.exists(file_path):#os.path.exists(file_path)判斷文件是否存在,存在返回1,不存在返回0 with open(file_path, 'wb') as f: f.write(content) f.close() for i in list: download_image(i) print("下載成功")
輸出:

C:\Users\22725\PycharmProjects\A\venv\Scripts\python.exe C:/Users/22725/.PyCharmCE2019.1/config/scratches/scratch.py https://p1.meituan.net/movie/20803f59291c47e1e116c11963ce019e68711.jpg@160w_220h_1e_1c https://p0.meituan.net/movie/283292171619cdfd5b240c8fd093f1eb255670.jpg@160w_220h_1e_1c https://p0.meituan.net/movie/289f98ceaa8a0ae737d3dc01cd05ab052213631.jpg@160w_220h_1e_1c https://p1.meituan.net/movie/6bea9af4524dfbd0b668eaa7e187c3df767253.jpg@160w_220h_1e_1c https://p1.meituan.net/movie/b607fba7513e7f15eab170aac1e1400d878112.jpg@160w_220h_1e_1c https://p0.meituan.net/movie/da64660f82b98cdc1b8a3804e69609e041108.jpg@160w_220h_1e_1c https://p0.meituan.net/movie/46c29a8b8d8424bdda7715e6fd779c66235684.jpg@160w_220h_1e_1c https://p0.meituan.net/movie/223c3e186db3ab4ea3bb14508c709400427933.jpg@160w_220h_1e_1c https://p1.meituan.net/movie/ba1ed511668402605ed369350ab779d6319397.jpg@160w_220h_1e_1c https://p0.meituan.net/movie/b0d986a8bf89278afbb19f6abaef70f31206570.jpg@160w_220h_1e_1c https://p1.meituan.net/movie/18e3191039d5e71562477659301f04aa61905.jpg@160w_220h_1e_1c https://p1.meituan.net/movie/14a7b337e8063e3ce05a5993ed80176b74208.jpg@160w_220h_1e_1c https://p1.meituan.net/movie/6bc004d57358ee6875faa5e9a1239140128550.jpg@160w_220h_1e_1c https://p0.meituan.net/movie/ae7245920d95c03765fe1615f3a1fe3865785.jpg@160w_220h_1e_1c https://p1.meituan.net/movie/6a964e9cee699267053bd6a4bf6f2671195394.jpg@160w_220h_1e_1c https://p0.meituan.net/movie/43d259ecbcd53e8bbe902632772281d6327525.jpg@160w_220h_1e_1c https://p0.meituan.net/movie/2bbaa395c825920fac397706369746e4300686.jpg@160w_220h_1e_1c https://p1.meituan.net/movie/ad974d3527879f00be2eec29135118163728582.jpg@160w_220h_1e_1c https://p0.meituan.net/movie/932bdfbef5be3543e6b136246aeb99b8123736.jpg@160w_220h_1e_1c https://p1.meituan.net/movie/aacb9ed2a6601bfe515ef0970add1715623792.jpg@160w_220h_1e_1c 一共有20條數據! 下載成功 Process finished with exit code 0