爬虫相关:selenium+phantomjs+lxml练手小程序

当爬取动态页面(很多数据通过js获取的)的时候,如果仅想使用原生态的urllib,requests或者scrapy等技术,就无能为力了,此时需要借助于渲染js的方法。这种情况下,有一个比较好用的工具组合,那就是selenium与phantomjs的组合。

以下程序权当练手且防止遗忘,以供以后参考。

注意点:程序末尾一定要关闭phantomjs,否则它会一直运行,占用大量内存。

片段1

#coding=utf-8
from selenium import webdriver
from selenium.webdriver.remote.webelement import WebElement
from lxml import etree

driver = webdriver.PhantomJS(executable_path="phantomjs")
print "0"
driver.get("http://isplus.live.joins.com/photo/isplusgallery/isplus_photoslide.asp?gid=656")
# driver.get("http://www.baidu.com")
# driver.get("http://www.csdn.net")
# print driver.page_source

photo_pics = driver.find_elements_by_xpath("//span[@class='photo']/a/img")
# photos = driver.find_elements_by_tag_name()
# title = driver.title
# driver.save_screenshot("isplus.png")
# print title
print len(photo_pics)
# for photo in photo_pics:
#     print photo.get_attribute("src")
print "------------------"
# driver.execute_script("load_isplusgallery_film('next')")
# photo_pics = driver.find_elements_by_xpath("//span[@class='photo']/a/img")
# print len(photo_pics)
# for photo in photo_pics:
#     print photo.get_attribute("src")
#
channel = driver.find_element_by_xpath("//div[@id='gnb_isplus']/ul/li[@class='on']/a").text
print "channel:",channel

img = driver.find_element_by_xpath("//span[@id='img_area']/strong/em/img")
print img.get_attribute("src")

img_desc_span = driver.find_element_by_xpath("//div[@class='slideview slideview_v3']/div[@id='isplus_slide_bd']/span[@id='img_area']/a[@class='txt']")
# img_desc = driver.find_element_by_xpath("//span[@id='img_area']/a[@class='txt']").text
# print "图片描述:" + str(img_desc)
print isinstance(img_desc_span, WebElement)
print type(img_desc_span)
print img_desc_span.tag_name
print img_desc_span.text

# for i in range(1,100):
#     # driver.execute_script("getArticleContent('" + str(i + 1) + "')")
#     driver.find_element_by_id("btn_next").click()
#     img = driver.find_element_by_xpath("//span[@id='img_area']/strong/em/img")
#     print img.get_attribute("src")

pic_num = int(driver.find_element_by_id("subject_allcnt").text)
print pic_num

driver.quit()
print "3"

片段2

#coding=utf-8
from selenium import webdriver
from selenium.webdriver.remote.webelement import WebElement
from lxml import etree

driver = webdriver.PhantomJS(executable_path="phantomjs")
driver.get("http://isplus.live.joins.com/photo/isplusgallery/isplus_photoslide.asp?gid=665")
# driver.get("http://www.baidu.com")
# driver.get("http://www.csdn.net")
html = driver.page_source

html = etree.HTML(html.encode("utf-8"))
print type(html)

# photos_src = html.xpath("//span[@class='photo']/a/img/@src")
# print photos_src
#
# pic_desc = html.xpath("//div[@class='slideview slideview_v3']/div[@id='isplus_slide_bd']/span[@id='img_area']/a[@class='txt']/text()")
# print pic_desc[0].encode("utf-8")

# 正文图片url(大图url)
pictureUrl = []
# 图集图片url(缩略图url)
slidePictureUrl = []
# 图集图片描述
slidePictureDesc = []

imgUrls = html.xpath("//span[@id='img_area']/strong/em/img/@src")
for imgUrl in imgUrls:
    slide_img = imgUrl + ".tn_120.jpg"
    slidePictureUrl.append(slide_img)
img_desc = html.xpath("//div[@class='slideview slideview_v3']/div[@id='isplus_slide_bd']/span[@id='img_area']/a[@class='txt']/text()")
slidePictureDesc.append(img_desc)
print slidePictureDesc

pictureUrl.append(imgUrl)
driver.quit()

你可能感兴趣的:(爬虫相关:selenium+phantomjs+lxml练手小程序)