爬虫小程序

#coding:utf-8

import requests

from lxmlimport etree

#设置目标地址

start_url ="https://www.liuxue86.com/zhufuyu/chunjiezhufuyu/{}/html"

target_url = [start_url.format(x)for xin range(2,5)]

target_url.append("https://www.liuxue86.com/zhufuyu/chunjiezhufuyu/")

# 设置请求头

headers = {

'User-agent':"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.101 Safari/537.36"

}

# 发送请求得到返回数据

for urlin target_url:

response = requests.get(url,headers=headers)

# 转换格式

    html = etree.HTML(response.text)

# 查找数据

    count_link = html.xpath("//ul[@class='grid_list']/li/a/@href")

for link_urlin count_link:

response1 = requests.get(link_url,headers=headers)

response1.encoding ='utf-8'

        html2 = etree.HTML(response1.text)

content = html2.xpath("//div[@id='article-content']/p[position()>1]")

for iin content:

cont = i.xpath("string(.)")

print cont



#coding:utf-8

'''

2、http://kr.tingroom.com/yuedu/ 获得所有韩语阅读资料'''

import requests

from lxmlimport etree

response = requests.get('http://kr.tingroom.com/yuedu/hysjyd/')

content = etree.HTML(response.text)

urls = content.xpath("//ul[@class='e2']/li/a/@href")

print urls

for urlin urls :

response = requests.get(url)

response.encoding ='utf-8'

    content = etree.HTML(response.text)

txt = content.xpath("//div[@id='article']")[0].xpath("string(.)")

print txt

你可能感兴趣的:(爬虫小程序)