两种方式对boss直聘招聘岗位的爬取

上次留了个坑,采取两种方式对需要爬取的网站进行数据抓取。首先使用selenium+chromedriver进行抓取,这种方式速度慢,但是可以跳过各种反爬设置,并对各种反爬能够直接以网页的形式进行显示。因此我们可以采取这种方式进行测试,找到其中的反爬规则,然后采用通用爬虫进行爬取。今天下午闲着没事对boss直聘进行了测试

第一种:selenium+chromedriver

鉴于代码不长,就直接贴代码了:

import pymysql
import re
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from lxml import etree
import time
from datetime import datetime,timedelta

path=r'I:\code\selenimu_chromedriver\chromedriver_win32\chromedriver.exe'
class LagouSpider(object):
    path = r'I:\code\selenimu_chromedriver\chromedriver_win32\chromedriver.exe'
    def __init__(self):
        self.dicts={}
        self.driver=webdriver.Chrome(executable_path=LagouSpider.path)
        self.url='https://www.zhaopin_spider.com/jobs/list_%E6%95%B0%E6%8D%AE%E6%8C%96%E6%8E%98?labelWords=&fromSearch=true&suginput='
    def run(self):
        self.driver.get(self.url)
        time.sleep(50)# 在调用的chromedriver中提前登录,避免爬取过程中被要求登录
        while True:
            #  显示等待‘下一页’避免爬取太快获取不到该元素
            WebDriverWait(driver=self.driver,timeout=15).until(EC.presence_of_element_located((By.XPATH,"//div[@class='pager_container']/span[last()]")))
            next_page=self.driver.find_element_by_xpath("//div[@class='pager_container']/span[last()]")
            #以下这种方法会丢失最后一页数据,可以使用for循环,先查看总页数总数,避免数据丢失
            if 'pager_next pager_next_disabled' not in next_page.get_attribute('class'):
                pass
            else:
                source = self.driver.page_source
                self.parse_list_source(source)
                next_page.click()

    def parse_list_source(self,source):
        html=etree.HTML(source)
        list_url=html.xpath("//div[@class='list_item_top']/div[@class='position']/div[@class='p_top']/a/@href")
        for link in list_url:
            self.request_detail_source(link)
            print(link)
            time.sleep(3)

    def request_detail_source(self,url):
        self.driver.execute_script("window.open('%s')"%url)# 打开新的窗口
        self.driver.switch_to.window(self.driver.window_handles[1])# 切换到新的窗口
        source=self.driver.page_source
        source=etree.HTML(source)
        salary = source.xpath("//dd[@class='job_request']/p[1]/span[@class='salary']/text()")
        # 该if-else 是为了避开验证信息,如果没有筛选到'salary'元素直接关掉该页面,缺点是丢失一页数据
        if len(salary) ==0:
            self.driver.close()
            self.driver.switch_to.window(self.driver.window_handles[0])
        else:
            salary=source.xpath("//dd[@class='job_request']/p[1]/span[@class='salary']/text()")[0].strip()
            adress=source.xpath("//dd[@class='job-address clearfix']/div[@class='work_addr']/a/text()")
            adress.pop(-1)
            adress="".join(adress)
            company = source.xpath("//h2[@class='fl']/em[@class='fl-cn']/text()")[0].strip()
            experience=source.xpath("//dd[@class='job_request']/p[1]/span[3]/text()")[0].strip('/').strip()
            education=source.xpath("//dd[@class='job_request']/p[1]/span[4]/text()")[0].strip('/').strip()
            tempting = source.xpath("//dd[@class='job-advantage']/p/text()")[0]
            # 以下是将requirement划分为responsibility和demand,由于这部分内容无特定格式,可能会丢失数据
            requirement = source.xpath("//dl[@id='job_detail']/dd[@class='job_bt']/div//text()")
            #  这种替换方法比较蠢,因此不用
            #temporary_variable = 0
            # for i in requirement:
            #     i = str(i).strip()
            #     requirement[temporary_variable] = i
            #     temporary_variable += 1
            requirement = "".join(requirement)
            requirement=re.sub('\n','',requirement).strip()
            requirement=re.sub(r"\xa0","",requirement)
            a = re.split(r'(工作要求|任职要求|任职资格|岗位要求|岗位职责|工作职责|岗位职能|我们期望|工作内容)', requirement, 3)
            demand=0
            responsibility=0
            if len(a)==5:
                if a[1] in ("岗位职责","工作职责","岗位职能",'工作内容','关于职位'):# 判断条件用in 而非 or
                    responsibility=a[1] + a[2]
                elif a[1]==0:
                    demand=a[1] + a[2]
                if a[3] in ("岗位职责","工作职责","岗位职能",'工作内容','关于职位'):#  带上括号控制优先级
                    responsibility = a[3] + a[4]
                else:
                    demand = a[3] + a[4]
            else:
                pass
            # 对取到的三种时间格式进行处理,最终得到发布日期(没有精确到分,为了格式统一)
            times = source.xpath("//p[@class='publish_time']/text()")[0]
            start_time = re.match('(\d\d:\d\d)|(\d天前)|(\d*-\d*-\d*)', times)
            start_time=start_time.group()
            if len(start_time) == 3:
                now = datetime.now()
                start_time = str(now - timedelta(days=int(list(start_time)[0])))
            elif len(start_time) == 5:
                start_time = str(datetime.now())
            else:
                pass
            #print("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s"%(salary,adress,company,experience,tempting,responsibility,demand,start_time))
            # 存入数据库
            conn=pymysql.connect(host="localhost",port=3306,user='root',password='as123456',database='zhaopin_spider',charset='utf8')
            cursor=conn.cursor() #创建游标对象
            sql = "INSERT INTO data_analysis(salary,adress,company, experience, tempting, responsibility,demand,start_time,url)\
                VALUES ('%s','%s','%s','%s','%s','%s','%s',cast('%s' as datetime),'%s')"%(salary,adress,company,experience,tempting,responsibility,demand,start_time,str(url))
            cursor.execute(sql)
            conn.commit()
            self.driver.close()
            self.driver.switch_to.window(self.driver.window_handles[0])

if __name__=='__main__':
    lagou=LagouSpider()
    lagou.run()
#

第二种:基于httpclient

从上面的测试来看,boss直聘基本没设置什么反爬措施,甚至连cookie都不用传,这样就比较简单了,代码不长,直接上代码了:


import requests
from lxml import etree
class ZhiPin(object):

   def __init__(self):
       self.header={
           "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134",
           "Accept-Language": "zh-Hans-CN,zh-Hans;q=0.5",
           "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
           "Upgrade-Insecure-Requests": "1",
           #"Accept-Encoding": "gzip, deflate, br",
           "Host": "www.zhipin.com",
           #"Connection": "Keep-Alive"
           }
       self.add_url='https://www.zhipin.com'
       self.start_url = 'https://www.zhipin.com/c100010000/?query=%E6%95%B0%E6%8D%AE%E6%8C%96%E6%8E%98&page=1&ka=page-2'

   def run(self):
       response = requests.get(self.start_url, headers=self.header)
       data = etree.HTML(response.text)  # etree.HTML解析字符串类型数据
       self.get_urls(data)
       self.get_lists_page(data)

   def get_lists_page(self,data):
       next_page_url = data.xpath("//div[@class='job-list']/div[@class='page']/a[@class='next']/@href")
       if len(next_page_url) != 0:
           next_page_url=next_page_url[0]
           print(next_page_url)
           if '/c100010000' in next_page_url:
               next_page_url = self.add_url + next_page_url
               self.start_url=next_page_url
               self.run()
           else:
               print(data.text)
       else:
           print('爬完了')


   def get_urls(self,data):
       urls=data.xpath("//div[@class='info-primary']/h3[@class='name']/a/@href")
       for url in urls:
           url=self.add_url+url
           self.url=url
           self.get_detial_page(url)

   def get_detail_page(self,url):
       print(url)

if __name__=="__main__":
   zhipin=ZhiPin()
   zhipin.run()

注意上面代码没有爬取详情页具体内容,只是进入详情页测试了一下,需要爬具体内容的可以自己写哈

你可能感兴趣的:(两种方式对boss直聘招聘岗位的爬取)