上海图书馆-家谱数据库-数据爬虫


from seleniumimport webdriver

import requests

from bs4import BeautifulSoup

import csv

import ssl

import re

ssl._create_default_https_context = ssl._create_unverified_context

def get_newURL(surname):

browser = webdriver.PhantomJS(executable_path='/Users/viemaxwei/Downloads/phantomjs-2.1.1-macosx/bin/phantomjs')

browser.get("http://search.library.sh.cn/jiapu/bSearch.htm")

input_str = browser.find_element_by_name('expr')

input_str.send_keys(surname)

browser.find_element_by_xpath('//*[@value="检索"]').click()

browser.switch_to.window(browser.window_handles[1])

# time.sleep(1)

    global newurl

newurl = browser.current_url

browser.close()

# return newURL

# browser = webdriver.PhantomJS(executable_path='/Users/viemaxwei/Downloads/phantomjs-2.1.1-macosx/bin/phantomjs')

# browser.get("http://search.library.sh.cn/jiapu/bSearch.htm")

# input_str = browser.find_element_by_name('expr')

# input_str.send_keys("�")

def get_current_data(url):

session = requests.Session()

headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5)AppleWebKit 537.36 (KHTML, like Gecko) Chrome",

              "Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"}

html = session.get(url, headers=headers)

html.encoding ="GBK"

    html_code = html.text

bs = BeautifulSoup(html_code, "lxml")

content_list = bs.find_all("td")

content_list_1 = bs.find("H3")

global data

data = []

try:

for contentin content_list:

content = content.get_text()

data.append(content)

for iin data:

if i =='' or i =='*' or i ==' ':

data.remove(i)

except:

content = content_list_1

data.append(content)

def get_next_page(new_url):

global browser_1

browser_1 = webdriver.Firefox(executable_path='/Users/viemaxwei/Downloads/geckodriver')

# browser_1 = webdriver.PhantomJS()

    browser_1.get(new_url)

browser_1.find_element_by_xpath("//*[@value='下页']").click()

browser_1.switch_to.window(browser_1.window_handles[0])

global url_new

url_new = browser_1.current_url

browser_1.close()

# def close_all_browsers():

#    browser.quit()

#    browser_1.quit()

with open("/Users/viemaxwei/Downloads/surname.csv", "rt")as sur:

cin = csv.reader(sur)

surname = [ifor iin cin]

surname_dict =dict(surname)

surname_set = {}

for tin surname_dict:

get_newURL(surname_dict[t] +"氏")

print("现在自动检索" + surname_dict[t] +"氏数据")

print("-------------------------------------")

get_current_data(newurl)

all_data = data.copy()

# 计算需要爬取的网页数

    total = all_data[1]

pattern = re.compile('[0-9]+')

match = pattern.search(total)

total_true =int(match.group())

sheets = total_true//10 +1

    print("正在获取第1页数据... (总共%d页)" % sheets)

get_next_page(newurl)

print("正在获取第2页数据... (总共%d页)" % sheets)

get_current_data(url_new)

all_data.extend(data)

count =2

    while True:

try:

get_next_page(url_new)

get_current_data(url_new)

count +=1

            print("正在获取第%d的数据... (总共%d页)" % (count, sheets))

all_data.extend(data)

except:

surname_set[surname_dict[t]] = all_data

browser_1.close()

break

    print("爬取" + surname_dict[t] +"氏完成 !!!")

print("--------------------------")

continue

你可能感兴趣的:(上海图书馆-家谱数据库-数据爬虫)