Python爬取前程無憂職位信息


一、選題背景

剛畢業往往會為自己不知道每個職位之間各種待遇的差異而迷茫,所以為了了解畢業后職位的待遇等方面做多種參考,貨比三家。

 

1.數據來源

前程無憂(https://www.51job.com/)

2.爬取內容

爬取內容包括職位名稱,公司名稱,地點,薪資,學歷要求,以及發布日期等。

 

二、實現爬取的步驟

1.代碼所需包

1 import urllib.request
2 import xlwt
3 import re
4 import urllib.parse
5 import time

 

 

2.進入前程無憂官網,搜索職位信息

 

3.打開開發者模式

 

4.模擬瀏覽器

1 header={
2     'Host':'search.51job.com',
3     'Upgrade-Insecure-Requests':'1',
4     'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'
5 }

 

 

5.為了實現爬取,我寫了一個能夠實現輸入想了解的職位就能爬取相關內容的函數

 1 #page是頁數,item是輸入的字符串
 2 def getfront(page,item):
 3 #先把字符串轉成十六進制編碼      
 4      result = urllib.parse.quote(item)                    
 5      ur1 = result+',2,'+ str(page)+'.html'
 6      ur2 = 'https://search.51job.com/list/000000,000000,0000,00,9,99,'
 7      res = ur2+ur1                                                            #拼接網址
 8      a = urllib.request.urlopen(res)
 9 # 讀取源代碼並轉為unicode
10      html = a.read().decode('gbk')          
11      return html
1 def getInformation(html):
2     #匹配換行符
3     reg = re.compile(r'class="t1 ">.*? <a target="_blank" title="(.*?)" href="(.*?)".*? <span class="t2"><a target="_blank" title="(.*?)" href="(.*?)".*?<span class="t3">(.*?)</span>.*?<span class="t4">(.*?)</span>.*?<span class="t5">(.*?)</span>.*?',re.S)
4     items=re.findall(reg,html)
5     return items

除了爬取基本信息外,還把職位超鏈接后的網址,以及公司超鏈接的網址爬取下來了。

 

6.把爬取的信息以Excel文件形式儲存起來,比較清晰直觀。

 1 #新建表格空間
 2 excel1 = xlwt.Workbook()
 3 # 設置單元格格式
 4 sheet1 = excel1.add_sheet('Job', cell_overwrite_ok=True)
 5 
 6 sheet1.write(0, 0, '序號')
 7 
 8 sheet1.write(0, 1, '職位')
 9 
10 sheet1.write(0, 2, '公司名稱')
11 
12 sheet1.write(0, 3, '公司地點')
13 
14 sheet1.write(0, 4, '公司性質')
15 
16 sheet1.write(0, 5, '薪資')
17 
18 sheet1.write(0, 6, '學歷要求')
19 
20 sheet1.write(0, 7, '工作經驗')
21 
22 sheet1.write(0, 8, '公司規模')
23 
24 sheet1.write(0, 9, '公司類型')
25 
26 sheet1.write(0, 10,'公司福利')
27 
28 sheet1.write(0, 11,'發布時間')

 

爬取代碼如下

 1 number = 1
 2 item = input()
 3 
 4 for j in range(1,1000):
 5     try:
 6         print("正在爬取第"+str(j)+"頁數據...")
 7 #調用獲取網頁原碼
 8         html = getfront(j,item)      
 9 
10         for i in getInformation(html):
11             try:
12 #職位網址
13                 url1 = i[1]          
14                 res1 = urllib.request.urlopen(url1).read().decode('gbk')
15                 company = re.findall(re.compile(r'<div class="com_tag">.*?<p class="at" title="(.*?)"><span class="i_flag">.*?<p class="at" title="(.*?)">.*?<p class="at" title="(.*?)">.*?',re.S),res1)
16 
17                 job_need = re.findall(re.compile(r'<p class="msg ltype".*?>.*?&nbsp;&nbsp;<span>|</span>&nbsp;&nbsp;(.*?)&nbsp;&nbsp;<span>|</span>&nbsp;&nbsp;(.*?)&nbsp;&nbsp;<span>|</span>&nbsp;&nbsp;.*?</p>',re.S),res1)
18 
19                 welfare = re.findall(re.compile(r'<span class="sp4">(.*?)</span>',re.S),res1)
20                 print(i[0],i[2],i[4],i[5],company[0][0],job_need[2]
21 [0],job_need[1][0],company[0][1],company[0][2],welfare,i[6])
22                 sheet1.write(number,0,number)
23 
24                 sheet1.write(number,1,i[0])
25 
26                 sheet1.write(number,2,i[2])
27 
28                 sheet1.write(number,3,i[4])
29 
30                 sheet1.write(number,4,company[0][0])
31 
32                 sheet1.write(number,5,i[5])
33 
34                 sheet1.write(number,6,job_need[1][0])
35 
36                 sheet1.write(number,7,job_need[2][0])
37 
38                 sheet1.write(number,8,company[0][1])
39 
40                 sheet1.write(number,9,company[0][2])
41 
42                 sheet1.write(number,10,("  ".join(str(i) for i in welfare)))
43 
44                 sheet1.write(number,11,i[6])
45 
46                 number+=1
47                 excel1.save("51job.xls")
48 #休息間隔,避免爬取海量數據時被誤判為攻擊,IP遭到封禁
49                 time.sleep(0.3) 
50             except:
51                 pass
52     except:
53         pass

 

結果如下:

 

三、數據清洗與處理

1.先打開文件

1 #coding:utf-8
2 import pandas as pd
3 import re
4 
5 #除此之外還要安裝xlrd包
6 
7 data = pd.read_excel(r'51job.xls',sheet_name='Job')
8 result = pd.DataFrame(data)

 

清洗思路:


1、出現有空值的信息,直接刪除整行

1 a = result.dropna(axis=0,how='any')
2 #輸出全部行,不省略
3 pd.set_option('display.max_rows',None) 

 

2.職位出錯(爬取職位與預想職位無關)

1 b = u'數據'
2 number = 1
3 li = a['職位']
4 for i in range(0,len(li)):
5     try:
6         if b in li[i]:
7             #print(number,li[i])
8             number+=1
9         else:
10             a = a.drop(i,axis=0)
11     except:
12         pass

 

3.其他地方出現的信息錯位,比如在學歷里出現 ‘招多少人’

 1 b2= u''
 2 li2 = a['學歷要求']
 3 for i in range(0,len(li2)):
 4     try:
 5         if b2 in li2[i]:
 6             #print(number,li2[i])
 7             number+=1
 8             a = a.drop(i,axis=0)
 9     except:
10         pass

 

 

4.轉換薪資單位不一致

 1 b3 =u'萬/年'
 2 b4 =u'千/月'
 3 li3 = a['薪資']
 4 
 5 #注釋部分的print都是為了調試用的
 6 
 7 for i in range(0,len(li3)):
 8     try:
 9         if b3 in li3[i]:
10             x = re.findall(r'\d*\.?\d+',li3[i])
11             #print(x)
12 
13 #轉換成浮點型並保留兩位小數
14             min_ = format(float(x[0])/12,'.2f')              
15             max_ = format(float(x[1])/12,'.2f')
16             li3[i][1] = min_+'-'+max_+u'萬/月'
17         if b4 in li3[i]:
18             x = re.findall(r'\d*\.?\d+',li3[i])
19             #print(x)
20 
21             #input()
22             min_ = format(float(x[0])/10,'.2f')
23             max_ = format(float(x[1])/10,'.2f')
24             li3[i][1] = str(min_+'-'+max_+'萬/月')
25         print(i,li3[i])
26 
27     except:
28         pass

 

清洗完成后保存到新的Excel文件里。

1 a.to_excel('51job2.xlsx', sheet_name='Job', index=False)

 

四、數據可視化

經過可視化處理能使數據更加直觀,更有利於分析 甚至可以說可視化是數據挖掘最重要的內容。

1.查看需要的包

1 # -*- coding: utf-8 -*-
2 import pandas as pd
3 import re
4 from pyecharts import Funnel,Pie,Geo
5 import matplotlib.pyplot as plt

 

2.打開文件

1 file = pd.read_excel(r'51job2.xls',sheet_name='Job')
2 f = pd.DataFrame(file)
3 pd.set_option('display.max_rows',None)

3.創建多個列表來單獨存放薪資,工作經驗,學歷要求,公司地點等信息

 1 add = f['公司地點']
 2 sly = f['薪資']
 3 edu = f['學歷要求']
 4 exp = f['工作經驗']
 5 address =[]
 6 salary = []
 7 education = []
 8 experience = []
 9 for i in range(0,len(f)):
10     try:
11         a = add[i].split('-')
12         address.append(a[0])
13         #print(address[i])
14         s = re.findall(r'\d*\.?\d+',sly[i])
15         s1= float(s[0])
16         s2 =float(s[1])
17         salary.append([s1,s2])
18         #print(salary[i])
19         education.append(edu[i])
20         #print(education[i])
21         experience.append(exp[i])
22         #print(experience[i])
23     except:
24        pass

4.工作經驗—薪資圖 與 學歷—薪資圖

 1 #定義存放最低薪資的列表
 2 min_s=[]
 3 #定義存放最高薪資的列表
 4 max_s=[]
 5 for i in range(0,len(experience)):
 6     min_s.append(salary[i][0])
 7     max_s.append(salary[i][0])
 8 
 9 my_df = pd.DataFrame({'experience':experience, 'min_salay' : min_s, 
10 #關聯工作經驗與薪資
11 'max_salay' : max_s})
12 data1 = my_df.groupby('experience').mean()['min_salay'].plot(kind='line')
13 plt.show()
14 
15 my_df2 = pd.DataFrame({'education':education, 'min_salay' : min_s, 
16 #關聯學歷與薪資
17 'max_salay' : max_s})
18 data2 = my_df2.groupby('education').mean()['min_salay'].plot(kind='line')
19 plt.show()

5.學歷要求圓環圖

 1 def get_edu(list):
 2     education2 = {}
 3     for i in set(list):
 4         education2[i] = list.count(i)
 5     return education2
 6 dir1 = get_edu(education)
 7 
 8 # print(dir1)
 9 
10 attr= dir1.keys()
11 value = dir1.values()
12 pie = Pie("學歷要求")
13 pie.add("", attr, value, center=[50, 50], is_random=False, radius=[30, 75], rosetype='radius',
14         is_legend_show=False, is_label_show=True,legend_orient='vertical')
15 pie.render('學歷要求玫瑰圖.html')

 

6.大數據城市需求地理位置分布圖

 1 def get_address(list):
 2     address2 = {}
 3     for i in set(list):
 4         address2[i] = list.count(i)
 5 
 6     address2.pop('異地招聘')
 7 
 8     #address2.pop('山東')
 9     #address2.pop('怒江')
10     #address2.pop('池州')
11 
12     return address2
13 
14 dir2 = get_address(address)
15 
16 #print(dir2)
17 
18 geo = Geo("大數據人才需求分布圖", title_color="#2E2E2E",
19           title_text_size=24,title_top=20,title_pos="center", width=1300,height=600)
20 
21 attr2 = dir2.keys()
22 value2 = dir2.values()
23 
24 geo.add("",attr2, value2, type="effectScatter", is_random=True, visual_range=[0, 1000], maptype='china',symbol_size=8, effect_scale=5, is_visualmap=True)
25 
26 geo.render('大數據城市需求分布圖.html')

 

 

7.工作經驗要求漏斗圖

 1 def get_experience(list):
 2     experience2 = {}
 3     for i in set(list):
 4 
 5          experience2[i] = list.count(i)
 6 
 7     return experience2
 8 
 9 dir3 = get_experience(experience)
10 
11 #print(dir3)
12 
13 attr3= dir3.keys()
14 value3 = dir3.values()
15 funnel = Funnel("工作經驗漏斗圖",title_pos='center')
16 
17 funnel.add("", attr3, value3,is_label_show=True,label_pos="inside", label_text_color="#fff",legend_orient='vertical',legend_pos='left')
18 
19 funnel.render('工作經驗要求漏斗圖.html')

 

 

完整代碼:

  1 import urllib.request
  2 import xlwt
  3 import re
  4 import urllib.parse
  5 import time
  6 
  7 header={
  8      'Host':'search.51job.com',
  9 
 10      'Upgrade-Insecure-Requests':'1',
 11      'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'
 12  }
 13 
 14 #page是頁數,item是輸入的字符串
 15 def getfront(page,item):
 16 #先把字符串轉成十六進制編碼      
 17       result = urllib.parse.quote(item) 
 18                    
 19       ur1 = result+',2,'+ str(page)+'.html'
 20       ur2 = 'https://search.51job.com/list/000000,000000,0000,00,9,99,'
 21 #拼接網址
 22       res = ur2+ur1 
 23       a = urllib.request.urlopen(res)
 24 # 讀取源代碼並轉為unicode
 25       html = a.read().decode('gbk')          
 26       return html
 27 
 28  def getInformation(html):
 29      #匹配換行符
 30      reg = re.compile(r'class="t1 ">.*? <a target="_blank" title="(.*?)" href="(.*?)".*? <span class="t2"><a target="_blank" title="(.*?)" href="(.*?)".*?<span class="t3">(.*?)</span>.*?<span class="t4">(.*?)</span>.*?<span class="t5">(.*?)</span>.*?',re.S)
 31      items=re.findall(reg,html)
 32      return items
 33 
 34 #新建表格空間
 35  excel1 = xlwt.Workbook()
 36 # 設置單元格格式
 37  sheet1 = excel1.add_sheet('Job', cell_overwrite_ok=True)
 38  5 
 39  sheet1.write(0, 0, '序號')
 40  
 41  sheet1.write(0, 1, '職位')
 42  
 43  sheet1.write(0, 2, '公司名稱')
 44  
 45  sheet1.write(0, 3, '公司地點')
 46  
 47  sheet1.write(0, 4, '公司性質')
 48  
 49  sheet1.write(0, 5, '薪資')
 50  
 51  sheet1.write(0, 6, '學歷要求')
 52  
 53  sheet1.write(0, 7, '工作經驗')
 54  
 55  sheet1.write(0, 8, '公司規模')
 56  
 57  sheet1.write(0, 9, '公司類型')
 58  
 59  sheet1.write(0, 10,'公司福利')
 60  
 61  sheet1.write(0, 11,'發布時間')
 62 
 63  number = 1
 64  item = input()
 65  
 66  for j in range(1,1000):
 67      try:
 68          print("正在爬取第"+str(j)+"頁數據...")
 69 #調用獲取網頁原碼 
 70         html = getfront(j,item)      
 71  
 72          for i in getInformation(html):
 73              try:
 74 #職位網址
 75                  url1 = i[1]          
 76                  res1 = urllib.request.urlopen(url1).read().decode('gbk')
 77                  company = re.findall(re.compile(r'<div class="com_tag">.*?<p class="at" title="(.*?)"><span class="i_flag">.*?<p class="at" title="(.*?)">.*?<p class="at" title="(.*?)">.*?',re.S),res1)
 78  
 79                  job_need = re.findall(re.compile(r'<p class="msg ltype".*?>.*?&nbsp;&nbsp;<span>|</span>&nbsp;&nbsp;(.*?)&nbsp;&nbsp;<span>|</span>&nbsp;&nbsp;(.*?)&nbsp;&nbsp;<span>|</span>&nbsp;&nbsp;.*?</p>',re.S),res1)
 80  
 81                 welfare = re.findall(re.compile(r'<span class="sp4">(.*?)</span>',re.S),res1)
 82                  print(i[0],i[2],i[4],i[5],company[0][0],job_need[2]
 83  [0],job_need[1][0],company[0][1],company[0][2],welfare,i[6])
 84                  sheet1.write(number,0,number)
 85  
 86                  sheet1.write(number,1,i[0])
 87 
 88                  sheet1.write(number,2,i[2])
 89  
 90                  sheet1.write(number,3,i[4])
 91  
 92                  sheet1.write(number,4,company[0][0])
 93  
 94                  sheet1.write(number,5,i[5])
 95  
 96                  sheet1.write(number,6,job_need[1][0])
 97  
 98                  sheet1.write(number,7,job_need[2][0])
 99  
100                  sheet1.write(number,8,company[0][1])
101  
102                  sheet1.write(number,9,company[0][2])
103  
104                  sheet1.write(number,10,("  ".join(str(i) for i in welfare)))
105  
106                  sheet1.write(number,11,i[6])
107  
108                  number+=1
109                  excel1.save("51job.xls")
110 #休息間隔,避免爬取海量數據時被誤判為攻擊,IP遭到封禁
111                  time.sleep(0.3) 
112              except:
113                  pass
114      except:
115         pass
116 
117 #coding:utf-8
118  import pandas as pd
119  import re
120  
121 #除此之外還要安裝xlrd包
122  
123  data = pd.read_excel(r'51job.xls',sheet_name='Job')
124  result = pd.DataFrame(data)
125 
126  a = result.dropna(axis=0,how='any')
127 #輸出全部行,不省略
128  pd.set_option('display.max_rows',None) 
129 
130  b = u'數據'
131  number = 1
132  li = a['職位']
133  for i in range(0,len(li)):
134 
135      try:
136          if b in li[i]:
137              #print(number,li[i])
138              number+=1
139          else:
140              a = a.drop(i,axis=0)
141      except:
142          pass
143 
144  b2= u''
145  li2 = a['學歷要求']
146  for i in range(0,len(li2)):
147 
148      try:
149          if b2 in li2[i]:
150              #print(number,li2[i])
151              number+=1
152              a = a.drop(i,axis=0)
153 
154      except:
155          pass
156 
157  b3 =u'萬/年'
158  b4 =u'千/月'
159  li3 = a['薪資']
160  
161  #注釋部分的print都是為了調試用的
162  
163  for i in range(0,len(li3)):
164      try:
165          if b3 in li3[i]:
166              x = re.findall(r'\d*\.?\d+',li3[i])
167              #print(x)
168  
169  #轉換成浮點型並保留兩位小數
170              min_ = format(float(x[0])/12,'.2f')              
171              max_ = format(float(x[1])/12,'.2f')
172              li3[i][1] = min_+'-'+max_+u'萬/月'
173 
174          if b4 in li3[i]:
175              x = re.findall(r'\d*\.?\d+',li3[i])
176              #print(x)
177  
178              #input()
179              min_ = format(float(x[0])/10,'.2f')
180              max_ = format(float(x[1])/10,'.2f')
181              li3[i][1] = str(min_+'-'+max_+'萬/月')
182          print(i,li3[i])
183  
184      except:
185          pass
186 
187  a.to_excel('51job2.xlsx', sheet_name='Job', index=False)
188 
189  # -*- coding: utf-8 -*-
190  import pandas as pd
191  import re
192 
193  from pyecharts import Funnel,Pie,Geo
194  import matplotlib.pyplot as plt
195 
196  file = pd.read_excel(r'51job2.xls',sheet_name='Job')
197  f = pd.DataFrame(file)
198 
199  pd.set_option('display.max_rows',None)
200 
201  add = f['公司地點']
202 
203  sly = f['薪資']
204 
205  edu = f['學歷要求']
206 
207  exp = f['工作經驗']
208 
209  address =[]
210 
211  salary = []
212 
213  education = []
214 
215  experience = []
216 
217  for i in range(0,len(f)):
218      try:
219          a = add[i].split('-')
220          address.append(a[0])
221 
222          #print(address[i])
223          s = re.findall(r'\d*\.?\d+',sly[i])
224          s1= float(s[0])
225          s2 =float(s[1])
226          salary.append([s1,s2])
227 
228          #print(salary[i])
229          education.append(edu[i])
230 
231          #print(education[i])
232          experience.append(exp[i])
233 
234          #print(experience[i])
235      except:
236         pass
237 
238  #定義存放最低薪資的列表
239  min_s=[]
240  #定義存放最高薪資的列表
241  max_s=[]
242  for i in range(0,len(experience)):
243      min_s.append(salary[i][0])
244      max_s.append(salary[i][0])
245  
246  my_df = pd.DataFrame({'experience':experience, 'min_salay' : min_s, 
247  #關聯工作經驗與薪資
248  'max_salay' : max_s})
249  data1 = my_df.groupby('experience').mean()['min_salay'].plot(kind='line')
250  plt.show()
251  
252  my_df2 = pd.DataFrame({'education':education, 'min_salay' : min_s, 
253 #關聯學歷與薪資
254  'max_salay' : max_s})
255  data2 = my_df2.groupby('education').mean()['min_salay'].plot(kind='line')
256  plt.show()
257 
258  def get_edu(list):
259      education2 = {}
260      for i in set(list):
261          education2[i] = list.count(i)
262      return education2
263  dir1 = get_edu(education)
264  
265 # print(dir1)
266  
267  attr= dir1.keys()
268  value = dir1.values()
269  pie = Pie("學歷要求")
270  pie.add("", attr, value, center=[50, 50], is_random=False, radius=[30, 75], rosetype='radius',
271          is_legend_show=False, is_label_show=True,legend_orient='vertical')
272  pie.render('學歷要求玫瑰圖.html')
273 
274  def get_address(list):
275      address2 = {}
276      for i in set(list):
277          address2[i] = list.count(i)
278  
279      address2.pop('異地招聘')
280  
281      #address2.pop('山東')
282      #address2.pop('怒江')
283      #address2.pop('池州')
284  
285      return address2
286  
287  dir2 = get_address(address)
288  
289  #print(dir2)
290  
291  geo = Geo("大數據人才需求分布圖", title_color="#2E2E2E",
292            title_text_size=24,title_top=20,title_pos="center", width=1300,height=600)
293  
294  attr2 = dir2.keys()
295  value2 = dir2.values()
296  
297  geo.add("",attr2, value2, type="effectScatter", is_random=True, visual_range=[0, 1000], maptype='china',symbol_size=8, effect_scale=5, is_visualmap=True)
298  
299  geo.render('大數據城市需求分布圖.html')
300 
301  def get_experience(list):
302      experience2 = {}
303      for i in set(list):
304  
305           experience2[i] = list.count(i)
306  
307      return experience2
308  
309  dir3 = get_experience(experience)
310  
311 #print(dir3)
312  
313  attr3= dir3.keys()
314  value3 = dir3.values()
315  funnel = Funnel("工作經驗漏斗圖",title_pos='center')
316  
317  funnel.add("", attr3, value3,is_label_show=True,label_pos="inside", label_text_color="#fff",legend_orient='vertical',legend_pos='left')
318  
319  funnel.render('工作經驗要求漏斗圖.html')

 

五、總結

本次主題的爬蟲因基礎薄弱進行的時間較久,但結果還是好的。通過Execll文件和可視化分析可以清晰直觀的了解到應聘職位的各種要求,

基本達到了想要的結果。但是pyecharts里面的圖還有很多種,還是要繼續慢慢發掘,加強自己的專業知識。


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM