# -*- coding: utf-8 -*-
import requests
import os
import csv
import time
from lxml import etree
def spyder(myUrl):
myWebContent=requests.get(myUrl).content.decode("GB2312",'ignore')
myHTMLetree=etree.HTML(myWebContent)
myList=myHTMLetree.xpath("//li")
for item in myList:
# print(item.xpath("./a")[0].text)
# print(item.xpath("./em")[0].text)
# print(item.xpath('./a/@href'))
herf='http://politics.people.com.cn'+str(item.xpath('./a/@href'))
herf_=herf.replace('[','').replace(']','').replace("'",'')
print([item.xpath("./a")[0].text,item.xpath("./em")[0].text,herf_])
save_file([item.xpath("./a")[0].text,item.xpath("./em")[0].text,herf_])
def save_file(row_info):
path = "f:/测试/"
if not os.path.exists(path):
os.mkdir(path)
with open(path+'people' + '.txt',"a",encoding="utf-8",newline='') as fp:
file_writer = csv.writer(fp)
file_writer.writerow(row_info)
fp.close
if __name__ == '__main__':
for i in range(1,10000):
url = 'http://politics.people.com.cn/GB/1024/index'+ str(i) +'.html'
time.sleep(2)
print(spyder(url))