拉勾网数据采集与可视化

python.png

全文简介

本文用python采集的是拉钩网上的'python'岗位数据,然后用python进行数据的可视化,主要涉及python爬虫和python数据分析的内容。

爬虫部分

首先用浏览器打开拉勾网首页搜索python,然后利用浏览器的开发者工具分析网络请求,发现拉勾网的数据交互是动态网页,通过对比网页上的数据发现数据提交的真实网址,再仔细观察发现拉勾网的饭爬措施。提交数据是post方式如下图


图片.png

仔细再看一下发现一个get请求,分析get里面的响应内容为公司的id,通过对比发现,和post请求之间有关联,因为post请求返回的内容里面有公司的id,而且刚好是15个。


图片.png
图片.png

爬虫部分代码实现

'''
datatime: 2018  03 15
by:北冥神君
内容:爬取拉勾网上面的数据
'''

#导入模块
import requests #网络请求模块
import re #正则模块
import time #时间模块
import random #随机数模块
# post的网址
url = 'https://www.lagou.com/jobs/positionAjax.json?needAddtionalResult=false&isSchoolJob=0'

# 反爬措施
header1 = {'Host': 'www.lagou.com',
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:58.0) Gecko/20100101 Firefox/58.0',
        'Accept': 'application/json, text/javascript, */*; q=0.01',
        'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
        'Accept-Encoding': 'gzip, deflate, br',
        'Referer': 'https://www.lagou.com/jobs/list_python?labelWords=&fromSearch=true&suginput=',
        'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
        'X-Requested-With': 'XMLHttpRequest',
        'X-Anit-Forge-Token': 'None',
        'X-Anit-Forge-Code': '0',
        'Content-Length': '26',
        'Cookie': 'Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1519816933,1519816935,1521079570,1521079575; _ga=GA1.2.129319102.1515420746; user_trace_token=20180108221226-f4036578-f47d-11e7-a021-5254005c3644; LGUID=20180108221226-f40369cf-f47d-11e7-a021-5254005c3644; index_location_city=%E5%85%A8%E5%9B%BD; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1521081597; LGSID=20180315100701-8cabf3af-27f5-11e8-b1fc-525400f775ce; LGRID=20180315103956-2609450b-27fa-11e8-b1ed-5254005c3644; _gid=GA1.2.2023749020.1521079570; JSESSIONID=ABAAABAAAIAACBI02527B187B701F2E661E90B666E236AF; hideSliderBanner20180305WithTopBannerC=1; TG-TRACK-CODE=search_code; SEARCH_ID=c9472cb5ce184e00bf8dcd8989fdc892; _gat=1; X_HTTP_TOKEN=d5fd7e2b382eab92942c6aee48b65dfa',
        'Connection': 'keep-alive',
        'Pragma': 'no-cache',
        'Cache-Control': 'no-cache'}
header2 = {'Host': 'www.lagou.com',
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:58.0) Gecko/20100101 Firefox/58.0',
            'Accept': 'application/json, text/javascript, */*; q=0.01',
            'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
            'Accept-Encoding': 'gzip, deflate, br',
            'Referer': 'https://www.lagou.com/jobs/list_python?labelWords=&fromSearch=true&suginput=',
            'X-Requested-With': 'XMLHttpRequest',
            'X-Anit-Forge-Token': 'None',
            'X-Anit-Forge-Code': '0',
            'Cookie': 'Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1519816933,1519816935,1521079570,1521079575; _ga=GA1.2.129319102.1515420746; user_trace_token=20180108221226-f4036578-f47d-11e7-a021-5254005c3644; LGUID=20180108221226-f40369cf-f47d-11e7-a021-5254005c3644; index_location_city=%E5%85%A8%E5%9B%BD; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1521081597; LGSID=20180315100701-8cabf3af-27f5-11e8-b1fc-525400f775ce; LGRID=20180315103956-2609450b-27fa-11e8-b1ed-5254005c3644; _gid=GA1.2.2023749020.1521079570; JSESSIONID=ABAAABAAAIAACBI02527B187B701F2E661E90B666E236AF; hideSliderBanner20180305WithTopBannerC=1; TG-TRACK-CODE=search_code; SEARCH_ID=c9472cb5ce184e00bf8dcd8989fdc892; _gat=1; X_HTTP_TOKEN=d5fd7e2b382eab92942c6aee48b65dfa',
            'Connection': 'keep-alive',
            'Pragma': 'no-cache',
            'Cache-Control': 'no-cache'}

for n in range(1,31):
    # 要提交的数据
    form = {'first': 'false',
            'kd': 'Python',
            'pn': str(n)}

    time.sleep(random.randint(5, 10))#随机暂停5-10秒

    # 提交数据
    html = requests.post(url, data=form, headers=header1)

    # 提取数据
    data = re.findall(
        '{"companyId":.*?,"positionName":"(.*?)","workYear":"(.*?)","education":"(.*?)","jobNature":"(.*?)","financeStage":"(.*?)","companyLogo":".*?","industryField":".*?","city":"(.*?)","salary":"(.*?)","positionId":.*?,"positionAdvantage":"(.*?)","companyShortName":"(.*?)","district"',
        html.text)
    print(data)
    #提取公司ID
    companyId = re.findall(
        '{"companyId":(.*?),.*?,"district"',
        html.text)
    print(companyId)
    companyIds = ','.join(companyId)
    print(companyIds)
    urlcompanyUrl = 'https://www.lagou.com/c/approve.json?companyIds='+companyIds
    print(urlcompanyUrl)
    #反爬
    get_company = requests.get(url=urlcompanyUrl,headers = header2)
    print(get_company.text)

    # 转换成数据框

    data = pd.DataFrame(data)
    print(data)

    # 保存在本地
    data.to_csv(r'LaGouData.csv', header=False, index=False, mode='a+')

数据分析

  • 学历要求
data['学历要求'].value_counts().plot(kind='barh',rot=0)
plt.show()
5F4C4359-F6EF-48D6-9D23-146C32E9C5EF.png
  • 工作经验
data['工作经验'].value_counts().plot(kind='bar',rot=0,color='b')
plt.show()
D7666874-4689-491D-BBFE-517EBECBF1C8.png

1080ADD8-883B-4000-820D-D0410B8ADD48.png
  • 工作地点
data['工作地点'].value_counts().plot(kind='pie',autopct='%1.2f%%',explode = np.linspace(0,0.4,19))
plt.show()
1080ADD8-883B-4000-820D-D0410B8ADD48.png
  • 工资情况
data['工资'].value_counts().plot(kind='pie',autopct='%1.2f%%')
plt.show()
图片.png
  • 词云分析
final = ''
stopwords = ['PYTHON', 'python', 'Python', '工程师', '(', ')', '/']  # 停止词
for n in range(data.shape[0]):

    seg_list = list(jieba.cut(data['岗位职称'][n]))

    for seg in seg_list:
        if seg not in stopwords:
            final = final + seg + ' '
python.png

数据分析代码

import pandas as pd # 数据框操作
import numpy as np
import matplotlib.pyplot as plt # 绘图
import jieba # 分词
import matplotlib as mpl  # 配置字体

mpl.rcParams["font.sans-serif"] = ["cmb10"]
mpl.rcParams['axes.unicode_minus'] = False
# 配置绘图风格
plt.rcParams["axes.labelsize"] = 16.
plt.rcParams["xtick.labelsize"] = 14.
plt.rcParams["ytick.labelsize"] = 14.
plt.rcParams["legend.fontsize"] = 12.
plt.rcParams["figure.figsize"] = [15., 15.]

# 导入数据
data = pd.read_csv('/Users/tencenting/PycharmProjects/qm/venv/LaGouData.csv',encoding='utf-8')  # 导入数据
print(data.head())
print(data.tail())

data['学历要求'].value_counts().plot(kind='barh',rot=0)
plt.show()

data['工作经验'].value_counts().plot(kind='bar',rot=0,color='b')
plt.show()

data['工资'].value_counts().plot(kind='pie',autopct='%1.2f%%')
plt.show()

#data['工作地点'].value_counts().plot(kind='pie',autopct='%1.2f%%',shadow =False)
data['工作地点'].value_counts().plot(kind='pie',autopct='%1.2f%%',explode = np.linspace(0,0.4,19))
x = np.linspace(0,1.5,25)
print(x)
print(len(x))
plt.show()

final = ''
stopwords = ['PYTHON', 'python', 'Python', '工程师', '(', ')', '/']  # 停止词
for n in range(data.shape[0]):

    seg_list = list(jieba.cut(data['岗位职称'][n]))

    for seg in seg_list:
        if seg not in stopwords:
            final = final + seg + ' '
#final 得到的词汇
print(final)

数据分析总结

python程序员工作地点大部分集中在北京、深圳、上海、成都、广州、杭州、武汉,其中北京最多,招聘要求大部分是3-5年和1-3年的工作经验,对学历的要求为本科,工资大在8k-30k之间,从词语分析上看从事开发方向比较多。

你可能感兴趣的:(拉勾网数据采集与可视化)