1 import requests 2 import time 3 import random 4 url = ['http://cq.srx123.com/', 5 'http://cq.srx123.com/article.php', 6 'http://cq.srx123.com/yszc.php?act=k', 7 'http://cq.srx123.com/download.php'] 8 9 head = ['Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36', 10 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:75.0) Gecko/20100101 Firefox/75.0'] 11 12 # DaiLi = ['58.218.92.68:2360','58.218.92.72:6156','58.218.92.78:5424', 13 #'58.218.92.74:4716','58.218.92.74:9387','58.218.92.78:2863','58.218.92.68:8890','58.218.92.77:2867','58.218.92.77:8749', 14 #'58.218.92.73:7463','58.218.92.78:3749','58.218.92.68:9321','58.218.92.75:4647','58.218.92.73:6601','58.218.92.74:4077', 15 # '58.218.92.69:4815','58.218.92.68:3761','58.218.92.78:3447'] 16 17 18 19 ShuLiang =1 20 for i in range(len(url)): 21 for Tou in range(len(head)): 22 headers = {"User-Agent": head[Tou]}//構建請求頭 23 24 # for Dai in range(len(DaiLi)): 25 # proxies = {"http": "http://" + DaiLi[Dai]}//構建代理ip格式 26 response = requests.get(url[i], headers=headers, timeout=10)//如果使用ip代理,get方法內需要添加對應參數 27 if response.status_code == 200: 28 print('第 ' + str(ShuLiang), '次訪問成功,使用代理:' ) 29 ShuLiang += 1 30 DengDai = random.randint(0, 99) 31 print(DengDai) 32 time.sleep(DengDai) 33 else: 34 print("訪問失敗") 35 ShuLiang += 1
最近學校因為專業課的問題,老師給我們布置了一個網站運營的作業,考核標准就是網站的訪問量。所以我便用Python寫了這樣一個程序(部分代碼)
程序能用但是還是存在一些問題,比如訪問過快的話會被服務器當作是DDOS攻擊屏蔽掉,或者說訪問速度過快被統計端屏蔽掉等等。
個人建議:使用時應當注意目標網站是否允許網絡爬蟲的訪問,還有就是應當注意網絡爬蟲使用的道德規范,可以通過查看目標網站對爬蟲的限制來進行特定的訪問爬取