案例1:使用爬蟲爬取京東華為手機用戶評論
本案例借鑒嗶哩嗶哩博客主視頻教程,感謝其教程為我開啟了爬蟲之旅:https://www.bilibili.com/video/BV1Yt4y1Y7nt?t=3456。本案例主要是通過京東華為手機頁面爬取了用戶的評論數據,便於對華為本款手機的性能、質量、價格等多維度進行用戶方面的分析。
import requests import json class Jdcomment_spider(): def __init__(self,file_name='jingdong_comment'): self.headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36'} #打開文件 self.fp = open(f'./{file_name}.txt','w',encoding='utf-8') print(f'爬蟲開始,打開{file_name}文件!') def parse_one_page(self,url): #京東華為評論的URL,需要去除callback后綴 #url = 'https://club.jd.com/comment/productPageComments.action?productId=10025237646790&score=0&sortType=5&page=0&pageSize=10&isShadowSku=0&fold=1' #打印響應 response = requests.get(url,headers=self.headers) #print(response.text) #告訴服務器用Python requests發出請求 #print(response.request.headers) #第一種 將json格式字符串轉換為字典 js_data = json.loads(response.text,strict=False) #print(type(js_data)) #第二種 #js_data = response.json() #數據提取 comment_list = js_data['comments'] #print(comment_list) for comment in comment_list: #提取商品id goods_id = comment.get('id') #提取名稱 nickname = comment.get('nickname') print(nickname) #提取評分 score = comment.get('score') #提取產品類型 productSize = comment.get('productSize') #提取產品顏色 productColor = comment.get('productColor') #提取評論時間 creationTime = comment.get('creationTime') #提取評論內容 content = comment.get('content') #換行符替換空格 或split分離 content = content.replace('\n',' ') print(content) #存儲數據 self.fp.write(f'{goods_id}\t{nickname}\t{score}\t{productSize}\t{productColor}\t{creationTime}\t{content}\n') def parse_max_page(self): for page_num in range(70): print(f'正在抓取第{page_num}頁的內容') url = f'https://club.jd.com/comment/productPageComments.action?productId=10025237646790&score=0&sortType=5&page={page_num}&pageSize=10&isShadowSku=0&fold=1' self.parse_one_page(url=url) def close_files(self): self.fp.close() print('爬蟲結束,關閉文件!') if __name__ == '__main__': #實例化對象 jd_spider = Jdcomment_spider() #調用方法 jd_spider.parse_max_page() jd_spider.close_files()
案例2:使用爬蟲下載百度學術論文
本案例主要是通過百度學術與SCI-HUB聯合的方式來獲取下載文獻,主要步驟為進入百度學術搜索界面獲取關鍵詞搜索后的URL,在關鍵詞跳轉后的論文頁面選擇查看源代碼找到DOI處(並采用正則表達式進行提取),提取完DOI后利用SCI-HUB搜索文獻的URL方式來獲取PDF下載的網址。
1 #導入模塊
2 import requests 3 import re 4 import os 5 from urllib.request import urlretrieve 6
7 #獲取URL信息
8 def get_url(key): 9 url = 'https://xueshu.baidu.com/s?wd=' + key + '&rsv_bp=0&tn=SE_baiduxueshu_c1gjeupa&rsv_spt=3&ie=utf-8&f=8&rsv_sug2=0&sc_f_para=sc_tasktype%3D%7BfirstSimpleSearch%7D'
10 return url 11
12 #獲取headers 反爬蟲
13 headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36', 14 'cookie':'PSTM=1566269439; BIDUPSID=3E682072B0A8C093085B76FBCE0C034D; MCITY=-%3A; BAIDUID=320C35B2412D12FCFA87BEAAE26FAC75:FG=1; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; __yjs_duid=1_ecde685c2a213f89118f49f95351d0131616728035849; BDSFRCVID_BFESS=Bc-OJeC624mAqbveVwGaU7iYMxe-PnbTH6aoaUI4HUrI-lBClSPbEG0P_f8g0Ku-jgOsogKKyeOTHu8F_2uxOjjg8UtVJeC6EG0Ptf8g0M5; H_BDCLCKID_SF_BFESS=tJkt_K-5JKvjD4-k247Hhn8thmT22-us0DAL2hcH0KLKMb6qKt5-bqKWQUQPt-ckB6b-sxJ8Kfb1MRjv-qozjMkAK4uL2UjmaN7T3q5TtUJreCnTDMRhqtIsXpbyKMniMCT9-pnafpQrh459XP68bTkA5bjZKxtq3mkjbPbDfn028DKu-n5jHj5XeH0D3q; delPer=0; PSINO=5; BD_HOME=0; BD_CK_SAM=1; antispam_key_id=45; antispam_site=ae_xueshu_paper; BDRCVFR[A88o6x7IGkt]=mk3SLVN4HKm; ab_sr=1.0.0_MWE3NGYyMTgxMjY0ZGM2NTcxNDAwMjVjZmNiOWU3YzIwNDA4OWNmZmNlNmM4NWUyZmZkNDVmN2E1OTZjOGZkMWFiNGFjYTU4Yzg4NTEyMDRkYTkzZTJlYTg3OTU0NTdl; antispam_data=fba5ca43ae000a429d092bba6e092ef3cf7c4c117f92e52dfe3260bb55855d667471475559fa8e05dc8e013a6316afd57f176fcab9710d0fe3eefb2f7799e44a25af15c58c1aae998deb0b9cf008b74e9e8d346b4156cdb351b74869e25b2990; antispam_sign=adb02c19; BA_HECTOR=aka0a52gah2504agvt1g68d0h0r; H_PS_PSSID=; Hm_lvt_43115ae30293b511088d3cbe41ec099c=1617162379,1617179509,1617179667; Hm_lpvt_43115ae30293b511088d3cbe41ec099c=1617179667; Hm_lvt_f28578486a5410f35e6fbd0da5361e5f=1617162379,1617179509,1617179667; Hm_lpvt_f28578486a5410f35e6fbd0da5361e5f=1617179667; BDRCVFR[w2jhEs_Zudc]=mk3SLVN4HKm; BDSVRTM=173'} 15
16 #提取論文的DOI值
17 def get_paper_link(headers,key): 18 response = requests.get(url=get_url(key),headers=headers) 19 data = response.text 20 paper_link = re.findall(r'<h3 class=\"t c_font\">\n + \n + <a href=\"(.*)\"',data) # ()內容獲取論文的網址
21 doi_list = [] #列表接收doi
22 for link in paper_link: 23 paper_link = 'http:' + link 24 response2 = requests.get(url=paper_link,headers=headers) 25 res_data = response2.text 26 try: 27 paper_doi = re.findall(r'\'doi\'}\">\n +(.*?)\n ',res_data) 28 if str(10) in paper_doi[0]: 29 doi_list.append(paper_doi) 30 except: 31 pass
32 return doi_list 33
34 #構建scihub下載鏈接
35 def doi_download(headers,key): 36 doi_list = get_paper_link(headers,key) 37 for doi in doi_list: 38 doi_link = "https://sci-hub.tf/" + doi[0] 39 print(doi_link) 40
41 if 'https:' not in doi_link: 42 doi_link = 'https:' + doi_link 43 res = requests.get(url=doi_link,headers=headers) 44 down_link = re.findall('<iframe.*?src="(.*?)" id=.*?<\/iframe>',res.text)[0] 45 print(down_link) 46 r = requests.get(url=down_link,headers=headers) 47 path = doi_link.split('/')[-1] + '.pdf'
48 with open(path,'wb') as f: 49 f.write(r.content) 50 print('下載完畢:'+doi_link.split('/')[-1]) 51
52 key = input('請輸入需要下載的論文') 53 doi_download(headers=headers,key=key)
案例3:智聯招聘網站信息獲取及分析
本案例主要是通過智聯招聘網站獲取上海地區相關工作崗位的公司性質、公司工資、公司要求、學歷要求、經驗要求、年齡要求等多種數據,並通過對所得數據進行簡單的分析和處理。
1 import requests 2 import re 3 import openpyxl 4 import time 5
6 head = { 7 'cookie': "x-zp-client-id=780a3405-a5d4-4890-92e1-4664829ca846; sts_deviceid=17861c0ec7262d-0f5a8f3ca09458-7373e61-2073600-17861c0ec73103; adfcid2=none; adfbid2=0; LastCity=%E4%B8%8A%E6%B5%B7; LastCity%5Fid=538; FSSBBIl1UgzbN7N443S=25F.0sssQmVDwwCXGqZv2j9BDZfsjKtlMZTD.MdZsUai9uY_xSd8vUXpVziT_BAC; locationInfo_search={%22code%22:%22576%22%2C%22name%22:%22%E5%A4%AA%E5%8E%9F%22%2C%22message%22:%22%E5%8C%B9%E9%85%8D%E5%88%B0%E5%B8%82%E7%BA%A7%E7%BC%96%E7%A0%81%22}; _uab_collina=161654777909311187692832; at=081be913be4f467dadafa0c178630d50; rt=fc2e0c878e01427b82b32427e4db0ac2; selectCity_search=538; ssxmod_itna=eqjxgDyD0Q3WwxGq0dD=wOEBCGOY3Kat7DRAmx0yGueGzDAxn40iDt=oHPhzFSAY4+WDMm20EaxajfRXWKKeOcGbLQn4qGLDmKDySW3w7DxOq0rD74irDDxD3DbRdDSDWKD9zqi3DEnKGfDDoDYf6uDitD4qDBGhdDKqGg8wGtWA=4g4rMGmUtYCGx8qDMmeGXCBWQOeaaaAXWtqGyIPGu0uU9IqbDCO+bfYpGvDp4IAwh57hbmG53SDhr+7mb+DwtfBhGODxw0Dp0xDfxQABQeD; ssxmod_itna2=eqjxgDyD0Q3WwxGq0dD=wOEBCGOY3Kat7DRADnKSiW5Dsp+DLnaKou/F+dsBtYThjTV43uGLtKrBRhQD6DQ+deiat6hcwV9Zm09TiXhnKnxDO92GOpQwCMOop=Caxz9uc1/WoRt0yhjfeRx7UxYS3xA3m2xWD7QPCxGcDiQPeD==; urlfrom=121122523; urlfrom2=121122523; adfbid=0; sts_sg=1; sts_sid=17872e8425f2d9-0783ac3ef1514f-7373e61-2073600-17872e8426044b; sts_chnlsid=121122523; zp_src_url=https%3A%2F%2Fwww.baidu.com%2Fbaidu.php%3Fsc.Kf0000K5cNxA6dzipIkSprEwAQaOYa8tX4WYiAxDWD7I29PDuPkPYZM9hefcGb51rnV7AXHQrtltBH905i_cjRrjyPVYD9Dko0iYc7c3dh3W2rhUXbyDD-pqP_45d4QlbcX_MOIflgmOJ_cm8Pe-FSnpRnSJzRVxWYIuQg_VotdiIxV7tPSoZXhX5kA6e_IvJm6mVHyUZDGcES8kLzQBdcDN9unE.DD_NR2Ar5Od669BCXgjRzeASFDZtwhUVHf632MRRt_Q_DNKnLeMX5DkgboozuPvHWdsHRy2J7jZZOlsfRymoM4EQ9JuIWxDBaurGtIKnLxKfYt_U_DY2yQvTyjtLsqT7jHzlRL5spy59OPt5gKfYtVKnv-WF_tU2lSMkl32AM-9I7fH7fmCuX8a9G4myIrP-SJFWZWlkLfYXLDkexd8WoLurAOtxbOveMmOUSENOoRojPakgkvUSkf.U1Yk0ZDqd_xKJVgfkoWPSPx8YnQNYnp30ZKGm1Ys0Zfqd_xKJVgfkoWPSPx8YnQNYnp30A-V5HczPfKM5yqbXWD0Iybqmh7GuZR0TA-b5Hcv0APGujYznHf0UgfqnH0krNtknjDLg1csPH7xnH0YP7tknjc1g1nvnjD0pvbqn0KzIjYvPW00mhbqnHR3g1csP7tznHIxPH010AdW5HDsnj7xnH63rjRdrj6dP7tznjRkg1Dsn-tkg100TgKGujYs0Z7Wpyfqn0KzuLw9u1Ys0A7B5HKxn0K-ThTqn0KsTjYs0A4vTjYsQW0snj0snj0s0AdYTjYs0AwbUL0qn0KzpWYk0Aw-IWdsmsKhIjYs0ZKC5H00ULnqn0KBI1Ykn0K8IjYs0ZPl5fK9TdqGuAnqTZnVmvY0pywW5R9affKYmgFMugfqPWPxn7tkPH00IZN15H6kPH6Ln10LPHm1njTdPWRLrH00ThNkIjYkPWDvrjndPHcdnHfk0ZPGujdWuHnYm1bLnH0snj9bn1NW0AP1UHY3P1uKnj9jwbNAfHPKnDFK0A7W5HD0TA3qn0KkUgfqn0KkUgnqn0KlIjYs0AdWgvuzUvYqn7tsg1KxnH0YP-tsg100uA78IyF-gLK_my4GuZnqn7tsg1KxnH63nHm4rNtsg100TA7Ygvu_myTqn0Kbmv-b5H00ugwGujYVnfK9TLKWm1Ys0ZNspy4Wm1Ys0Z7VuWYs0AuWIgfqn0KGTvP_5H00XMK_Ignqn0K9uAu_myTqnfK_uhnqn0KbmvPb5H0knRR1rHPanbfkwbP7fWD1wWT1PY7Dn1TvnDNjwjnz0Zwzmyw-5HTvnjcsn6KBuA-b5HnznDn1PRczPjDzwjDzwbPAfH6LfbNKfRRzPbuKf1Td0AqW5HD0mMfqn0KEmgwL5H00ULfqnfKETMKY5HDWnan1c1cWnWR3rHc1nWfWnWDsnanznH0sQW0snj0snankc1cWnanVc108nj0snj0sc1D8nj0snj0s0Z91IZRqnWTdP1fLPsKkgLmqna34PdtsQW0sg108njKxna34n7tsQW61g108n1Pxna3zn7tknW60mMPxTZFEuA-b5H00pgPxmLK95H00mL0qn0K-TLfqn0KWThnqPHcvrjT%26xst%3Dm1YsnH77n1b1fWFDnRujwRcknYmLn1IKwjnLPWK7fYf1n67B5HnznDn1PRczPjDzwjDzwbPAfH6LfbNKfRRzPbuKf1Td0gnqnHf3PH6LnWcdPHTLP1DkP1fLPj9xnWcdg10KI1LyktAJdIjA8nL3dSefsVgfko6KTHLyktAJdIjA8nL3dSefsVgfko6KIHYzP1RLPjTL0gfqnHmkPW61PHRzPf7VTHYs0W0aQf7WpjdhmdqsmsD1PWmzrjb4Pj0z%26word%3D%26ck%3D6335.3.86.234.150.183.138.217%26shh%3Dwww.baidu.com%26sht%3Dbaidu%26us%3D1.0.1.0.1.301.0%26wd%3D%26bc%3D110101; ZP_OLD_FLAG=false; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%221098833668%22%2C%22first_id%22%3A%2217861c0ed597c3-038f16a76b7241-7373e61-2073600-17861c0ed5aae6%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_utm_source%22%3A%22baiduPC%22%2C%22%24latest_utm_medium%22%3A%22CPC%22%2C%22%24latest_utm_campaign%22%3A%22pp%22%2C%22%24latest_utm_content%22%3A%22tj%22%2C%22%24latest_utm_term%22%3A%2228701167%22%7D%2C%22%24device_id%22%3A%2217861c0ed597c3-038f16a76b7241-7373e61-2073600-17861c0ed5aae6%22%7D; Hm_lvt_38ba284938d5eddca645bb5e02a02006=1616547737,1616835569; sts_evtseq=2; ZL_REPORT_GLOBAL={%22/resume/new%22:{%22actionid%22:%220581e348-6ff3-47a2-9c46-adc4b33a299e%22%2C%22funczone%22:%22addrsm_ok_rcm%22}}; acw_tc=2760828b16168355886785280e5895223771a1826f480f902918aba10f19b7; d4d6cd0b4a19fa72b8cc377185129bb7=f2d20fdf-aca1-47d5-9419-68a1546651e1; zpfe_probe_token=7e4632aes7eed04bb5852d2b16abd20f2480; Hm_lpvt_38ba284938d5eddca645bb5e02a02006=1616835599; FSSBBIl1UgzbN7N443T=5yfdFrb7rjkmpYEstB19g1UVEj9YDb4LJm9L80MbHBMebRO_SeWcfjNo5j.peEtPAeKPKHUGCLZe28BXFiOi3vQYUGrsICs34JIGqAiY72SywAP0Gs.QTTm8iMdbgqKIKKHnuiMJUeztKS64vZRt6g2PeHj1hkinnFErcamuUhf7EBxP34L9oXRYLCdIRAixDuMhcTOwhxVurVUlQmvGOZ1tiflELTVw..OLkO8esROa.LEKP8AvE_ANpsReRVNz4RveDVngNgZTZ1Zq0fYffsv76AJkEmQuylNIF14LjxqNdVHjOHzBu7TFrL08ID1U3_515gBgJ5Gd3cw_6g2aXhXJh3WXlAFmWZe9NLyS.eplb6F9BK.J59jxPd.4XEzRq0La", 8 'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36"
9 } 10 def reqdata(): 11 position_name_data = [] 12 wage_range_data = [] 13 region_data = [] 14 working_years_data = [] 15 education_requirements_data = [] 16 enterprise_name_data = [] 17 enterprise_nature_data = [] 18 enterprise_scale_data = [] 19 all_list = [] 20 for num in range(1,10): 21 url1 = 'https://sou.zhaopin.com/?jl=538&p={}&re=2029'.format(num) 22 all_list.append(url1) 23 url2 = 'https://sou.zhaopin.com/?jl=538&p={}&re=2036'.format(num) 24 all_list.append(url2) 25 url3 = 'https://sou.zhaopin.com/?jl=538&p={}&re=2035'.format(num) 26 all_list.append(url3) 27 url4 = 'https://sou.zhaopin.com/?jl=538&p={}&re=2026'.format(num) 28 all_list.append(url4) 29 url5 = 'https://sou.zhaopin.com/?jl=538&p={}&re=2019'.format(num) 30 all_list.append(url5) 31 url6 = 'https://sou.zhaopin.com/?jl=538&p={}&re=2030'.format(num) 32 all_list.append(url6) 33 url7 = 'https://sou.zhaopin.com/?jl=538&p={}&re=2023'.format(num) 34 all_list.append(url7) 35 url8 = 'https://sou.zhaopin.com/?jl=538&p={}&re=2032'.format(num) 36 all_list.append(url8) 37 url9 = 'https://sou.zhaopin.com/?jl=538&p={}&re=2028'.format(num) 38 all_list.append(url9)招聘信息匯總表 (1).xlsx 39 url10 = 'https://sou.zhaopin.com/?jl=538&p={}&re=2031'.format(num) 40 all_list.append(url10) 41 url11 = 'https://sou.zhaopin.com/?jl=538&p={}&re=2024'.format(num) 42 all_list.append(url11) 43 url12 = 'https://sou.zhaopin.com/?jl=538&p={}&re=2034'.format(num) 44 all_list.append(url12) 45 url13 = 'https://sou.zhaopin.com/?jl=538&p={}&re=2033'.format(num) 46 all_list.append(url13) 47 url14 = 'https://sou.zhaopin.com/?jl=538&p={}&re=2021'.format(num) 48 all_list.append(url14) 49 url15 = 'https://sou.zhaopin.com/?jl=538&p={}&re=2027'.format(num) 50 all_list.append(url15) 51 url16 = 'https://sou.zhaopin.com/?jl=538&p={}&re=2022'.format(num) 52 all_list.append(url16) 53 for url in all_list: 54 time.sleep(1) 55 print('----正在下載----',url) 56 res = requests.get(url,headers=head).text.replace('\xa0','') 57 #職位名稱
58 position_name = re.findall('(?<=jobname"><span title=")(.+?)(?=" class="iteminfo)',res) 59 for a in position_name: 60 position_name_data.append(a) 61 #薪資范圍
62 wage_r = re.findall('(?<=<p class="iteminfo__line2__jobdesc__salary">)(.+?)(?=<!----></p>)',res,re.S) 63 wage_range = [] 64 for i in wage_r: 65 j = i.strip() 66 wage_range.append(j) 67 for b in wage_range: 68 wage_range_data.append(b) 69 # #地區
70 region = re.findall('(?<=<li class="iteminfo__line2__jobdesc__demand__item">)(.+?)(?=</li> <li class=")',res)[::2] 71 for c in region: 72 region_data.append(c) 73 # #工作年限
74 working_years = re.findall('(?<=<li class="iteminfo__line2__jobdesc__demand__item">)(.+?)(?=</li> <li class=")',res)[1::2] 75 for d in working_years: 76 working_years_data.append(d) 77 # #學歷要求
78 education_requirements = re.findall('(?<=<li class="iteminfo__line2__jobdesc__demand__item">)(.+?)(?=</li>)',res)[2::3] 79 for e in education_requirements: 80 education_requirements_data.append(e) 81 # #企業名稱
82 erro_name = re.findall('(?<=" alt=")(.+?)(?=</span>)',res) 83 enterprise_name = re.findall('(?<=<span title=")(.+?)(?=" class="iteminfo)',str(erro_name)) 84 for f in enterprise_name: 85 enterprise_name_data.append(f) 86 # #企業性質
87 enterprise_nature = re.findall('(?<=<span class="iteminfo__line2__compdesc__item">)(.+?)(?=</span>)',res)[::2] 88 for g in enterprise_nature: 89 enterprise_nature_data.append(g) 90 # #企業規模
91 enterprise_scale = re.findall('(?<=<span class="iteminfo__line2__compdesc__item">)(.+?)(?=</span>)',res)[1::2] 92 for h in enterprise_scale: 93 enterprise_scale_data.append(h) 94 return position_name_data,wage_range_data,region_data,working_years_data,education_requirements_data,enterprise_name_data,enterprise_nature_data,enterprise_scale_data 95 if __name__ == '__main__': 96 (a1,a2,a3,a4,a5,a6,a7,a8) = reqdata() 97 work = openpyxl.Workbook() # encoding='utf-8'
98 wke = work.create_sheet(index=0,title='招聘信息') 99 wke.cell(row=1, column=1).value = '職位名稱'
100 wke.cell(row=1, column=2).value = '薪資范圍'
101 wke.cell(row=1, column=3).value = '地區'
102 wke.cell(row=1, column=4).value = '工作年限'
103 wke.cell(row=1, column=5).value = '學歷要求'
104 wke.cell(row=1, column=6).value = '企業名稱'
105 wke.cell(row=1, column=7).value = '企業性質'
106 wke.cell(row=1, column=8).value = '企業規模'
107 for b1,b2,b3,b4,b5,b6,b7,b8,i in zip(a1,a2,a3,a4,a5,a6,a7,a8,range(2,5000)): 108 wke.cell(row=i, column=1).value = b1 109 wke.cell(row=i, column=2).value = b2 110 wke.cell(row=i, column=3).value = b3 111 wke.cell(row=i, column=4).value = b4 112 wke.cell(row=i, column=5).value = b5 113 wke.cell(row=i, column=6).value = b6 114 wke.cell(row=i, column=7).value = b7 115 wke.cell(row=i, column=8).value = b8 116 work.save('./招聘信息匯總表.xlsx') 117
118 import pandas as pd 119 import matplotlib.pyplot as plt 120 import matplotlib as mpl 121
122 preffered_foot = list(pd.read_excel(r'招聘信息匯總表.xlsx')['地區']) 123 foot = ['上海','上海-松江區','上海-徐匯區','上海-長寧區','上海-普陀區','上海-虹口區','上海-崇明區','上海-楊浦區','上海-金山區','上海-黃浦區','上海-閔行區','上海-寶山區','上海-嘉定區','上海-浦東新區','上海-青浦區','上海-靜安區','上海-奉賢區'] 124 #counts = [preffered_foot.count('Right'), preffered_foot.count('Left')]
125 counts = [preffered_foot.count('上海'),preffered_foot.count('上海-松江區'),preffered_foot.count('上海-徐匯區'),preffered_foot.count('上海-長寧區'),preffered_foot.count('上海-普陀區'),preffered_foot.count('上海-虹口區'),preffered_foot.count('上海-崇明區'),preffered_foot.count('上海-楊浦區'),preffered_foot.count('上海-金山區'),preffered_foot.count('上海-黃浦區'),preffered_foot.count('上海-閔行區'),preffered_foot.count('上海-寶山區'),preffered_foot.count('上海-嘉定區'),preffered_foot.count('上海-浦東新區'),preffered_foot.count('上海-青浦區'),preffered_foot.count('上海-靜安區'),preffered_foot.count('上海-奉賢區')] 126
127 # 設置中文顯示
128 mpl.rcParams['font.family'] = 'SimHei'
129 # 設置大小 像素
130 plt.figure(figsize=(9, 6), dpi=100) 131 plt.axes(aspect='equal') # 保證餅圖是個正圓
132 #explodes = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,1.6]
133 #exploades = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
134 color = ['red', 'y','c','b','cyan','#FF69B4','#FFB6C1','#6B4226','yellow','#E47833','greenyellow','#545454','#FF00FF','#32CD99','#00FFFF','#545454','#B5A642'] 135 # 繪制餅圖
136 # x:統計數據 explode:是否突出顯示 label:標簽 color:自定義顏色
137 # autopct:設置百分比的格式,保留2位小數 shadow: 有陰影 看起來立體
138 # startangle:初始角度 可使餅圖旋轉 labeldistance:標簽離圓心的位置
139 plt.pie(counts, labels=foot,colors=color, autopct='%.2f%%', shadow=True,startangle=30, labeldistance=1.1,) 140 plt.title('職位地區分布餅狀圖', fontsize=15) 141 plt.savefig(fname="職位地區分布餅狀圖.png") 142 plt.show() 143
144 import pandas as pd 145 import matplotlib.pyplot as plt 146 import matplotlib.gridspec as gridspec 147 import matplotlib as mpl 148 from matplotlib.ticker import FuncFormatter 149 plt.rcParams['font.sans-serif'] = ['Microsoft YaHei'] # 設置字體,否則中文會顯示異常
150 df = pd.read_excel(r"招聘信息匯總表.xlsx") 151 height = df['工作年限'].value_counts() 152 skill = [f'{m}' for m in height.index] 153 counts = height.sort_index() 154
155 plt.figure(figsize=(12, 5), dpi=100) 156 # 設置圖形顯示風格
157 plt.style.use('ggplot') 158 plt.plot(skill[::-1], counts[::-1],linewidth=8,color='y',marker='o', 159 markerfacecolor='blue',markersize=12) 160 def to_percent(temp, position): 161 return '%1.0f' % (5 * position) + '%'
162 plt.gca().yaxis.set_major_formatter(FuncFormatter(to_percent)) 163
164 plt.title('工作年限要求折線圖') 165 plt.xlabel('工作年限') 166 plt.ylabel('所占百分比') 167 plt.savefig(fname="工作年限要求折線圖.png") 168 plt.show() 169
170 import pandas as pd 171 import matplotlib.pyplot as plt 172 import matplotlib as mpl 173 from matplotlib.ticker import FuncFormatter 174 plt.rcParams['font.sans-serif'] = ['Microsoft YaHei'] # 設置字體,否則中文會顯示異常
175 df = pd.read_excel(r"招聘信息匯總表.xlsx") 176 skill_count = df['學歷要求'].value_counts() 177 skill = [f'{m}' for m in skill_count.index] # 列表推導式構造不同技術等級
178 counts = skill_count.values.tolist() # 技術等級對應人數統計的列表
179 # 設置中文顯示
180 mpl.rcParams['font.family'] = 'SimHei'
181 # 設置大小 像素
182 plt.figure(figsize=(9, 6), dpi=100) 183 # 繪制水平柱狀圖
184 plt.barh(skill[::-1], counts[::-1], height=0.5, color='#4169E1') 185 plt.title('學歷要求柱狀圖') 186 plt.xlabel('人數所占百分比') 187 def to_percent(temp, position): 188 return '%1.0f' % (0.02 * temp) + '%'
189 plt.gca().xaxis.set_major_formatter(FuncFormatter(to_percent)) 190 plt.yticks(['學歷不限','初中及以下','中專/中技','高中','大專','本科','碩士','MBA/EMBA','博士']) 191 plt.savefig(fname="學歷要求柱狀圖.png") 192 plt.show()