带有验证码的登陆页面的数据的爬取 selenium+tesserocr

from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from PIL import Image,ImageEnhance
import tesserocr
from selenium.common.exceptions import NoSuchElementException
import pymysql

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36'
}
web_url = "爬取的地址"
browser = webdriver.Chrome()
wait = WebDriverWait(browser, 10)

image_save = "E:\\yanzheng.png"
image_deal_save = "E:\\change.png"

def remove_noise(image, x, y, width, height):
    # 注:getpixel里面的参数是个元组
    loc = image.getpixel((x, y))
    # 255为白色
    if loc == 255:
        return

    loc_x = x
    loc_y = y
    black_num = 0
    for x in range(loc_x - 1, loc_x + 2):
        for y in range(loc_y - 1, loc_y + 2):
            if x >= 0 and y >= 0 and x < width and y < height:
                if image.getpixel((x, y)) == 0:
                    black_num = black_num + 1
    if black_num < 5:
        image.putpixel((loc_x, loc_y), 255)
    return


def deal_yanzheng():
    # 验证码使用截屏方式(因每次访问验证码地址,验证码图片都会变化,cookie无效,所以使用截屏)
    browser.get_screenshot_as_file(image_save)
    location = browser.find_element_by_id("yzm").location
    size = browser.find_element_by_id("yzm").size

    left = location['x']
    top = location['y']
    right = location['x']+size['width']
    bottom = location['y']+size['height']

    image = Image.open(image_save).crop((left, top, right, bottom))
    image.save(image_save)

    image = Image.open(image_save)
    enhancer = ImageEnhance.Contrast(image)
    image = enhancer.enhance(4)

    image.convert('1').save(image_deal_save)

    image = Image.open(image_deal_save)
    width = image.size[0]
    height = image.size[1]

    for x in range(width):
        for y in range(height):
            remove_noise(image, x, y, width, height)

    image.save(image_deal_save)

    image = image.convert("RGB")
    # 识别处理后会带有多个换行符 需要剔除
    result = tesserocr.image_to_text(image).replace("\n", "")

    if isinstance(result, str) and len(result) == 4 and result.isalnum():
        return result
    else:
        return


def input_yanzheng():
    res = deal_yanzheng()
    while(res == None):
        browser.find_element_by_id("yzm").click()
        res = deal_yanzheng()

    input3 = browser.find_element_by_id("code")
    input3.clear()
    input3.send_keys(res)
    browser.find_element_by_class_name("login_btn").click()

    try:
        codeError = browser.find_element_by_id("codeError").text
        if codeError == "验证码错误!":
            browser.find_element_by_id("yzm").click()
            input_yanzheng()
    except NoSuchElementException:
       pass


def find_statistic():
    # 注 webdriver有find_elements_by_class_name和find_element_by_class_name两个方法,用后者无法取到数组
    # 找多级class的时候注意需要前加点 否则判为compound class name报错 两种写法 1. "[panel panel-header]"  2. ".panel.panel-header" 中间无空格
    # 页面需要加载时间,需要使用wait.until 在点击panel-header时会报Other element would receive the click,该元素无法被点击,1.是因为这是一个蒙层动画中的元素,过几秒消失后即可点击,此时可用wait.until(EC.element_to_be_clickable())进行操作;2.是因为是一个js绑定事件(子元素绑定或该元素绑定),需要用js的click来进行执行;3. 元素在iframe中 碰巧这个项目中3种情况都出现了
    # 使用XPATH可以选择按序选择,如选择class带有panel-header的第四个元素,注意XPATH的序号是从1开始,最好是下载浏览器的Xpath插件
    # 注,EC.xxxx(tuple型)
    expand = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="nav"]/div[4]/div[1]')))
    browser.execute_script("arguments[0].click();", expand)
    store = browser.find_element_by_xpath('//*[@id="nav"]/div[4]/div[2]/ul/li[4]/div/a')
    browser.execute_script("arguments[0].click();", store)
    browser.switch_to.frame('iframeName')
    sta = browser.find_element_by_xpath('//*[@id="tabsId"]/div[1]/div[3]/ul/li[11]')
    # 在执行完这个点击事件后,浏览器会打开一个新的窗口,此时使用window_handles获取所有窗口,再用switch_to_window()进行切换
    browser.execute_script("arguments[0].click();", sta)
    # print(browser.window_handles)
    browser.switch_to.window(browser.window_handles[1])
    browser.maximize_window()
    # 打开数据页获取数据 该页面不知道为什么只有从上面路径进入才能访问 保存登陆cookie直接访问不行
    browser.get(“数据地址”)

if __name__ == "__main__":
    try:
        browser.get(web_url)
        # 一定要注意将窗口最大化,否则截验证码信息不全
        browser.maximize_window()

        input1 = browser.find_element_by_id("login-email-address")
        input1.send_keys("admin")
        input2 = browser.find_element_by_id("login-password")
        input2.send_keys("3g2win.com")

        input_yanzheng()
        find_statistic()


    finally:
        browser.close()

你可能感兴趣的:(带有验证码的登陆页面的数据的爬取 selenium+tesserocr)