(新手)Python爬取智联招聘岗位信息并生成词云图

首先爬取岗位标签


image.png

1.打开岗位页


(新手)Python爬取智联招聘岗位信息并生成词云图_第1张图片
image.png

2.找到信息存放网址(代码中的"web")


(新手)Python爬取智联招聘岗位信息并生成词云图_第2张图片
image.png
import re
import urllib.request as urlrequest
web = 'https://fe-api.zhaopin.com/c/i/sou?pageSize=90&cityId=635&salary=0,0&workExperience=-1&education=-1&companyType=-1&employmentType=-1&jobWelfareTag=-1&kw=%E4%BA%A7%E5%93%81%E7%BB%8F%E7%90%86&kt=3&=0&at=2ae1997aa6354964ac078481df22884c&rt=69444b28499d413094f94e06a6b4b161&_v=0.17798550&userCode=1036018570&x-zp-page-request-id=02072f79a0a64e619abfc5b21dc61a82-1558333624421-738392'
crawl_content = urlrequest.urlopen(web).read().decode('utf8')
#print(crawl_content)
sr=str(crawl_content)

#读取岗位页


with open("tag.txt","w",encoding='utf8') as outputfile:   #创建txt 为爬取的数据做准备
    pattern1=re.compile(r'"extractSkillTag":\[.*?]')      #正则匹配想要的内容1
    rs1=re.findall(pattern1,sr)                            
    i1=0
    for tag in rs1:                                      #输出每一个指定爬取的内容 并存档
        #print(rs1[i1])
        outputfile.write(rs1[i1]+"\n")
        i1=i1+1
        

    pattern2=re.compile(r'\\"skill\\":\[.*?]')          #正则匹配想要的内容2
    rs2=re.findall(pattern2,sr)
    i2=0
    for skill in rs2:
        #print(rs2[i2].replace("\\",""))
        outputfile.write(rs2[i2]+"\n")
        i2=i2+1
        
    pattern3=re.compile(r'"extractNormalizedTag":\[.*?]')  #正则匹配想要的内容3
    rs3=re.findall(pattern3,sr)
    i3=0
    for tag in rs3:
        #print(rs3[i3])
        outputfile.write(rs3[i3]+"\n")
        i3=i3+1
        

岗位详情页爬取岗位要求


(新手)Python爬取智联招聘岗位信息并生成词云图_第3张图片
image.png

1.找到html中内容的位置


(新手)Python爬取智联招聘岗位信息并生成词云图_第4张图片
image.png
import re
import urllib.request as urlrequest
import bs4
from bs4 import BeautifulSoup
import jieba
from wordcloud import WordCloud
import matplotlib.pyplot as plt
web = 'https://fe-api.zhaopin.com/c/i/sou?pageSize=90&cityId=635&salary=0,0&workExperience=-1&education=-1&companyType=-1&employmentType=-1&jobWelfareTag=-1&kw=%E4%BA%A7%E5%93%81%E7%BB%8F%E7%90%86&kt=3&=0&at=2ae1997aa6354964ac078481df22884c&rt=69444b28499d413094f94e06a6b4b161&_v=0.17798550&userCode=1036018570&x-zp-page-request-id=02072f79a0a64e619abfc5b21dc61a82-1558333624421-738392'
crawl_content = urlrequest.urlopen(web).read().decode('utf8')                #读取岗位页
#print(crawl_content)
p=re.compile(r'https://jobs.zhaopin.com/.*?\.htm')                             #把岗位页的详情岗位链接爬取下来
rs=p.findall(crawl_content)
with open("jobs_data.txt","w",encoding='utf-8') as outputfile:              #创建文件txt 以便存入数据
    for item in rs:                                                         #循环每一个岗位详情链接
        url=item                                                            
        wp = urlrequest.urlopen(url).read()                
        soup = BeautifulSoup(wp,'html.parser')                 
        #print(soup.prettify())
        rq=soup.find(class_="describtion__detail-content").get_text()      #解析html 然后找到想要内容的位置
        #print(rq)
        p = re.compile(r'岗位要求:.*|岗位职责:.*|.*要求:.*|职位描述:.*')  #匹配想要的内容
        pp=p.findall(rq)
        pa = re.compile(r'【你要做的工作】.*')
        pa1 = pa.findall(rq)
        if pa1:
            pa2=pa1[0]
            #print(pa2)
            #print(" ")
            #print("******找到岗位哪里的信息,并且变为字符串*******")
            #print(" ")

            pattern = re.compile(r'【我们想要的你】')
            pp2=pattern.sub('\n【我们想要的你】',pa2)
            #print(pp2)
            #print(" ")
            #print("******分隔任职和岗位***********")
            #print(" ")    

            pattern = re.compile(r'【我们给你的】.*')
            pp2=pattern.sub('',pp2)
            #print(pp2)
            #print("***********去除多余的*********")

            pattern = re.compile(r'\d、|\d\.')
            bb=pattern.sub('\n\t - ',pp2)
            #print(bb)
            #print(" ")
            #print("******把所有的数字符号隔行美观***")
            #print(" ")
            outputfile.write(bb+"\n")
        if pp:
            pp1=pp[0]
            #print(pp1)
            #print(" ")
           # print("******找到岗位哪里的信息 并且变为字符串*******")
            #print(" ")

            pattern = re.compile(r'任职要求:|任职资格:|岗位职责:')
            pp2=pattern.sub('\n任职要求:',pp1)
           # print(pp2)
           # print(" ")
           # print("******分隔任职和岗位***********")
           # print(" ")

            pattern = re.compile(r'\d、|\d\.')
            bb=pattern.sub('\n\t - ',pp2)
            #print(bb)
            #print(" ")
            #print("******把所有的数字符号隔行美观***")
            #print(" ")
            outputfile.write(bb+"\n")
        else:
                ## 没有文字提示的时候    
            p1 = re.compile(r'\d、|\d.')
            bb=pattern.sub('\n\t - ',rq)
            #print(bb)
            outputfile.write(bb+"\n")


生成词云图


(新手)Python爬取智联招聘岗位信息并生成词云图_第5张图片
image.png
import jieba
from wordcloud import WordCloud
import matplotlib.pyplot as plt
file=open("tag.txt",encoding='utf-8').read()           #打开读取文档
cw=jieba.cut(file)                                     #分词
result = " ".join(cw)                                  #把词语以空格连接在一起
wc = WordCloud(                                     #生成云图
    font_path = "simhei.ttf",
    background_color='white',   #背景颜色
    width=1000,
    height=1000,
    max_words=500,
    stopwords=["经理","产品","extractSkillTag","skill","extractNormalizedTag"]
)
wc.generate(result)
wc.to_file('tag_ph.png')    #图片保存

#5.显示图片
plt.figure('tag_ph')   #图片显示的名字
plt.imshow(wc)  
#plt.imshow()函数负责对图像进行处理,并显示其格式,作用:将一个image显示在二维坐标轴上。  
#其后跟着plt.show()才能显示出来。
plt.axis('off')        #关闭坐标
plt.show()

你可能感兴趣的:((新手)Python爬取智联招聘岗位信息并生成词云图)