今天我们来爬取一下极客学院的课程,这次我们用requests和xpath,小伙伴们看好了,这真是一对神奇组合,棒棒哒!
爬取前我们先看一下我们的目标:
1.抓取极客学院的课程
2.抓取每一门课程的课程名称、简介、时长、等级和学习人数
import requests
page = 1
url = 'http://www.jikexueyuan.com/course/?pageNum=' + str(page)
html = requests.get(url)
print html.text
from lxml import etree
import requests
page = 1
url = 'http://www.jikexueyuan.com/course/?pageNum=' + str(page)
html = requests.get(url)
selector = etree.HTML(html.text)
content_field = selector.xpath('//div[@class="lesson-list"]/ul/li')
print content_field
div[@class="lesson-infor"]/h2[@class="lesson-info-h2"]/a/text(),div盒子下的h2下的a标签内的文字
for each in content_field:
title = each.xpath('div[@class="lesson-infor"]/h2[@class="lesson-info-h2"]/a/text()')[0]
content = (each.xpath('div[@class="lesson-infor"]/p/text()')[0]).strip()
classtime = each.xpath('div[@class="lesson-infor"]/div/div/dl/dd[@class="mar-b8"]/em/text()')[0]
classlevel = each.xpath('div[@class="lesson-infor"]/div/div/dl/dd[@class="zhongji"]/em/text()')[0]
learnnum = each.xpath('div[@class="lesson-infor"]/div[@class="timeandicon"]/div/em/text()')[0]
print title
print content
print classtime
print classlevel
print learnnum
#_*_coding:utf-8_*_
from lxml import etree
import requests
page = 1
url = 'http://www.jikexueyuan.com/course/?pageNum=' + str(page)
html = requests.get(url)
selector = etree.HTML(html.text)
content_field = selector.xpath('//div[@class="lesson-list"]/ul/li')
for each in content_field:
title = each.xpath('div[@class="lesson-infor"]/h2[@class="lesson-info-h2"]/a/text()')[0]
content = (each.xpath('div[@class="lesson-infor"]/p/text()')[0]).strip()
classtime = each.xpath('div[@class="lesson-infor"]/div/div/dl/dd[@class="mar-b8"]/em/text()')[0]
classlevel = each.xpath('div[@class="lesson-infor"]/div/div/dl/dd[@class="zhongji"]/em/text()')[0]
learnnum = each.xpath('div[@class="lesson-infor"]/div[@class="timeandicon"]/div/em/text()')[0]
print title
print content
print classtime
print classlevel
print learnnum
#_*_coding:utf-8_*_
from lxml import etree
import requests
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
#把课程信息保存到info.txt中
def saveinfo(classinfo):
f = open('info.txt','a')
f.writelines('title:'+ classinfo['title']+'\n')
f.writelines('content:' + classinfo['content'] + '\n')
f.writelines('classtime:' + classinfo['classtime'] + '\n')
f.writelines('classlevel:' + classinfo['classlevel'] + '\n')
f.writelines('learnnum:' +classinfo['learnnum'] +'\n\n')
f.close()
#爬虫主体
def spider(url):
html = requests.get(url)
selector = etree.HTML(html.text)
content_field = selector.xpath('//div[@class="lesson-list"]/ul/li')
info = []
for each in content_field:
classinfo = {}
classinfo['title'] = each.xpath('div[@class="lesson-infor"]/h2[@class="lesson-info-h2"]/a/text()')[0]
classinfo['content'] = (each.xpath('div[@class="lesson-infor"]/p/text()')[0]).strip()
classTime = (each.xpath('div[@class="lesson-infor"]/div/div/dl/dd[@class="mar-b8"]/em/text()')[0]).split()
classinfo['classtime'] = ''.join(classTime)
classinfo['classlevel'] = each.xpath('div[@class="lesson-infor"]/div/div/dl/dd[@class="zhongji"]/em/text()')[0]
classinfo['learnnum'] = each.xpath('div[@class="lesson-infor"]/div[@class="timeandicon"]/div/em/text()')[0]
info.append(classinfo)
return info
if __name__ == '__main__':
print u'开始爬取内容。。。'
page = []
#循环用来生产不同页数的链接
for i in range(1,11):
newpage = 'http://www.jikexueyuan.com/course/?pageNum=' + str(i)
print u"第%d页"%i
print u'正在处理页面:'+ newpage
page.append(newpage)
for each in page:
info = spider(each)
for each in info:
saveinfo(each)