- NO.1
- NO.2
- NO.3
- one
- two
正则表达式是进行内容匹配,将符合要求的内容全部获取;
xpath()能将字符串转化为标签,它会检测字符串内容是否为标签,但是不能检
测出内容是否为真的标签;
Beautifulsoup是Python的一个第三方库,它的作用和 xpath 作用一样,都是用来解析html数据的相比之下;xpath的速度会快一点,因为xpath底层是用c来实现的
2.三者语法不同,正则表达式使用元字符,将所有获得内容与匹配条件进行匹配,
而xpath和bs4将获取的解析后的源码进行按条件筛选,筛选出想要的标签即根据标签属性来找到指定的标签,之后对标签进行对应内容获取;
xpath:全称XML PATH Language, 一种小型的查询语言;
支持的解析:
XML格式
html格式
通过元素,和属性进行导航
import lxml.etree as etree
# 1). 将html内容转化成xpath可以解析/匹配的格式;
html = """
xpath测试
- NO.1
- NO.2
- NO.3
- one
- two
"""
selector = etree.HTML(html)
# 2).
# //: 对全文进行扫描
# //div
# //div[@id="content"]
str = selector.xpath('//div[@id="content"]/ul[@id="ul"]/li/text()')
print(str)
print(type(str))
# 需求: 获取文件中div的属性id为”url“里面的所有a标签的href属性
str1 = selector.xpath('//div[@id="url"]/a/@href')
print(str1)
csv格式存储: csv文件格式是一种通用的电子表格和数据库导入导出格式。
xxx:xxx:xxx:xxx
xxx,xxx,xxx,xxx
# 读取csv文件
import csv
with open('some.csv', 'rb') as f: # 采用b的方式处理可以省去很多问题
reader = csv.reader(f)
for row in reader:
# do something with row, such as row[0],row[1]
import csv
with open('some.csv', 'wb') as f: # 采用b的方式处理可以省去很多问题
writer = csv.writer(f)
writer.writerows(someiterable)
import csv
with open('doc/example.csv', 'w') as f:
writer = csv.writer(f)
# 将列表的每条数据依次写入csv文件, 并以逗号分隔
writer.writerows([['1', '2', '3'], ['4', '5', '6']])
with open('doc/example.csv', 'r') as f:
reader = csv.reader(f)
for row in reader:
print(row)
安装
jieba(结巴):切割中文的模块;
wordcloud:
pillow: python3中专门用来处理图像的模块;
numpy:
matplotlib:
pip install wordcloud
绘制英文词云
import re
import jieba
from PIL import Image
from wordcloud import wordcloud
import numpy as np
# text = "马云曾公开表态称对钱没兴趣称其从来没碰过钱上了微博热搜"
# 实现处理英文的词云比较简单
# 1. 切割和处理英文字符,
data = []
with open('/tmp/passwd') as f:
for line in f:
result1 = re.split(r'\s|:|/', line)
# 如果item存在数据并且不是空格或者数字, 则继续进行处理;
result2 = [item for item in result1 if not re.findall(r'\s+|\d+', item) and item]
# print(result2)
data.extend(result2)
# 2). 打开图片, 获取图片的数据信息;
imgObj = Image.open('./doc/wordcloud.jpg')
img_mask = np.array(imgObj)
# print(img_mask)
#
# 3). 创建词云对象, 设置属性
wcObj = wordcloud.WordCloud(
mask = img_mask,
background_color="snow",
min_font_size=5,
max_font_size=50,
width=1000,
height=1000,
)
# 4). 生成图片;
# 词云绘制时, 默认之处理字符串类型, 怎么分隔每个单词? 必须以逗号分隔符分割
wcObj.generate(",".join(data))
wcObj.to_file('doc/wcObj.png')
绘制中文词云
import re
import jieba
from PIL import Image
from wordcloud import wordcloud
import numpy as np
def gen_wordcloud(text, filename):
# 1). 强调分割中有问题的词;
jieba.suggest_freq(('微博'), True)
jieba.suggest_freq(('热搜'), True)
# 2). 难点: 如何切割中文, jieba, lcut
result = jieba.lcut(text)
print(result)
# 绘制词云
# 3). 打开图片, 获取图片的数据信息;
imgObj = Image.open('./doc/wordcloud.jpg')
img_mask = np.array(imgObj)
# print(img_mask)
# 4). 创建词云对象, 设置属性
wcObj = wordcloud.WordCloud(
mask = img_mask, # 数据如何填充到图片
background_color="snow", # 背景颜色
font_path="/usr/share/fonts/wqy-zenhei/wqy-zenhei.ttc", # 如果是中文, 指定字体库(fc-list :lang=zh)
min_font_size=5, # 图片中最小的字体大小
max_font_size=50, # 图片中最小的字体大小
width=1000, # 图片宽度
height=1000, # 高
)
# 5). 生成图片;
# 词云绘制时, 默认之处理字符串类型, 怎么分隔每个单词? 必须以逗号分隔符分割
wcObj.generate(",".join(result))
wcObj.to_file(filename)
if __name__ == '__main__':
text = "马云曾公开表态称对钱没兴趣称其从来没碰过钱上了微博热搜"
filename = 'doc/wcObj.png'
gen_wordcloud(text, filename)
1.爬取信息
import requests
import lxml.etree as etree
def get_content(url):
#爬取页面内容
try:
user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.109 Safari/537.36"
response = requests.get(url, headers={'User-Agent': user_agent})
response.raise_for_status() # 如果返回的状态码不是200, 则抛出异常;
response.encoding = response.apparent_encoding # 判断网页的编码格式, 便于respons.text知道如何解码;
except Exception as e:
print("爬取错误")
else:
print(response.url)
print("爬取成功!")
return response.content
def parser_content(html):
#分析页面并获取所需信息
selector = etree.HTML(html) #将页面转换为xpath可以解析的格式
#获取信息
courseDetails = selector.xpath('//div[@class="course-card-container"]')
for courseDetail in courseDetails:
name = courseDetail.xpath('.//h3[@class="course-card-name"]/text()')[0]
studentNum = courseDetail.xpath('.//span/text()')[1]
courseInfo = courseDetail.xpath('.//p[@class="course-card-desc"]/text()')[0]
print(name,studentNum,courseInfo)
courseUrl = 'http://www.imooc.com'+courseDetail.xpath('.//a/@href')[0]
print(courseUrl)
courseImgUrl = 'http:'+courseDetail.xpath('.//img/@src')[0]
if __name__ == '__main__':
url = 'http://www.imooc.com/course/list'
html = get_content(url)
parser_content(html)
import requests
import lxml.etree as etree
def get_content(url):
#爬取页面内容
try:
user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.109 Safari/537.36"
response = requests.get(url, headers={'User-Agent': user_agent})
response.raise_for_status() # 如果返回的状态码不是200, 则抛出异常;
response.encoding = response.apparent_encoding # 判断网页的编码格式, 便于respons.text知道如何解码;
except Exception as e:
print("爬取错误")
else:
print(response.url)
print("爬取成功!")
return response.content
def parser_content(html):
#分析页面并获取所需信息
selector = etree.HTML(html) #将页面转换为xpath可以解析的格式
#获取信息
courseinfos = [] #用于储存爬取数据
courseDetails = selector.xpath('//div[@class="course-card-container"]')
for courseDetail in courseDetails:
name = courseDetail.xpath('.//h3[@class="course-card-name"]/text()')[0]
studentNum = courseDetail.xpath('.//span/text()')[1]
courseInfo = courseDetail.xpath('.//p[@class="course-card-desc"]/text()')[0]
# print(name,studentNum,courseInfo)
courseUrl = 'http://www.imooc.com'+courseDetail.xpath('.//a/@href')[0]
# print(courseUrl)
courseImgUrl = 'http:'+courseDetail.xpath('.//img/@src')[0]
courseinfos.append((name,studentNum,courseInfo,courseUrl,courseImgUrl))
return courseinfos
#将数据以csv格式保存
def save_csv(courseInfo):
import csv
with open('mooc.csv','w') as f:
writer = csv.writer(f)
writer.writerows(courseInfo)
print('csv文件保存成功')
#将数据以json格式保存
def save_json(courseInfo):
import json
with open('mooc.json','w',encoding='utf-8') as f:
for item in courseInfo:
item = {
'name':item[0],
'studentNum': item[1],
'courseInfo': item[2],
'courseUrl': item[3],
'courseImgUrl': item[4]
}
# ensure_ascii: 如果有中文, 则设置为False, 表示使用Unicode编码, 中文不会乱码;
# indent=4: 所金为4个空格, 便于阅读;
jsonitem = json.dumps(item, ensure_ascii=False, indent=4)
f.write(jsonitem+'\n')
print('json文件保存成功')
if __name__ == '__main__':
url = 'http://www.imooc.com/course/list'
html = get_content(url)
courseInfos = parser_content(html)
print(courseInfos)
save_json(courseInfos)
save_csv(courseInfos)
def anlyseCourse(filename):
wordcloudString = ''
with open(filename) as f:
reader = csv.reader(f)
pattern = re.compile(r'[\u4e00-\u9fa5]+|[a-zA-Z0-9]+')
for item in reader:
name = ''.join(re.findall(pattern,item[0]))
detail = ''.join(re.findall(pattern,item[2]))
wordcloudString += name
wordcloudString += detail
print(re.sub(r'学习|使用|入门|基础|实现|掌握|教程','',wordcloudString))
import csv
import re
import jieba
import numpy as np
import requests
import lxml.etree as etree
from PIL import Image
from wordcloud import wordcloud
def get_content(url):
#爬取页面内容
try:
user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.109 Safari/537.36"
response = requests.get(url, headers={'User-Agent': user_agent})
response.raise_for_status() # 如果返回的状态码不是200, 则抛出异常;
response.encoding = response.apparent_encoding # 判断网页的编码格式, 便于respons.text知道如何解码;
except Exception as e:
print("爬取错误")
else:
print(response.url)
print("爬取成功!")
return response.content
def parser_content(html):
#分析页面并获取所需信息
selector = etree.HTML(html) #将页面转换为xpath可以解析的格式
#获取信息
courseinfos = [] #用于储存爬取数据
courseDetails = selector.xpath('//div[@class="course-card-container"]')
for courseDetail in courseDetails:
name = courseDetail.xpath('.//h3[@class="course-card-name"]/text()')[0]
studentNum = courseDetail.xpath('.//span/text()')[1]
courseInfo = courseDetail.xpath('.//p[@class="course-card-desc"]/text()')[0]
# print(name,studentNum,courseInfo)
courseUrl = 'http://www.imooc.com'+courseDetail.xpath('.//a/@href')[0]
# print(courseUrl)
courseImgUrl = 'http:'+courseDetail.xpath('.//img/@src')[0]
courseinfos.append((name,studentNum,courseInfo,courseUrl,courseImgUrl))
return courseinfos
#将数据以csv格式保存
def save_csv(courseInfo):
import csv
with open('mooc.csv','w') as f:
writer = csv.writer(f)
writer.writerows(courseInfo)
print('csv文件保存成功')
#将数据以json格式保存
def save_json(courseInfo):
import json
with open('mooc.json','w',encoding='utf-8') as f:
for item in courseInfo:
item = {
'name':item[0],
'studentNum': item[1],
'courseInfo': item[2],
'courseUrl': item[3],
'courseImgUrl': item[4]
}
# ensure_ascii: 如果有中文, 则设置为False, 表示使用Unicode编码, 中文不会乱码;
# indent=4: 所金为4个空格, 便于阅读;
jsonitem = json.dumps(item, ensure_ascii=False, indent=4)
f.write(jsonitem+'\n')
print('json文件保存成功')
def moocSpider():
# 1). 爬取课程信息的第一页
url = "http://www.imooc.com/course/list"
html = get_content(url=url)
courseInfos = parser_content(html) # 列表, 保存第一也的课程信息;
# 2). 如果有下一页信息, 则继续爬取课程内容;
# 如果没有下一页信息, 则跳出循环, 将课程信息保存到文件中.....;
#
while True:
# 获取是否拥有下一页?
selector = etree.HTML(html)
nextPage = selector.xpath('//a[contains(text(), "下一页")]/@href')
print(nextPage)
# 只爬取前2页, 用于测试;
if nextPage and ('3' not in nextPage[0]):
# if nextPage:
url = "http://www.imooc.com" + nextPage[0]
html = get_content(url=url)
otherCourseInfo = parser_content(html)
courseInfos += otherCourseInfo # 把其他页获取的页面信息追加到变量中;
else:
print("全部爬取结束......")
break
# print(courseInfos)
save_csv(courseInfos)
save_json(courseInfos)
# # 1). 课程信息有多页, url规则:
# """
# 两种url均可:
# http://www.imooc.com/course/list?page=28
# http://www.imooc.com/course/list?page=1
#
# http://www.imooc.com/course/list/2
# http://www.imooc.com/course/list/28
# """
#
# # 2). 什么时候爬取结束? 没有下一页的时候
# """
# # 有下一页:
# 下一页
#
# # 没有下一页:
# 下一页
#
# """
#
#
def anlyseCourse(filename):
wordcloudString = ''
with open(filename) as f:
reader = csv.reader(f)
pattern = re.compile(r'[\u4e00-\u9fa5]+|[a-zA-Z0-9]+')
for item in reader:
name = ''.join(re.findall(pattern,item[0]))
detail = ''.join(re.findall(pattern,item[2]))
wordcloudString += name
wordcloudString += detail
return re.sub(r'(学习|使用|入门|基础|实现|掌握|教程)','',wordcloudString)
def gen_wordcloud(text, filename):
result = jieba.lcut(text)
# 绘制词云
# 3). 打开图片, 获取图片的数据信息;
imgObj = Image.open('wordcloud.jpg')
img_mask = np.array(imgObj)
# print(img_mask)
# 4). 创建词云对象, 设置属性
wcObj = wordcloud.WordCloud(
mask=img_mask, # 数据如何填充到图片
background_color="snow", # 背景颜色
font_path="/usr/share/fonts/wqy-zenhei/wqy-zenhei.ttc", # 如果是中文, 指定字体库(fc-list :lang=zh)
min_font_size=5, # 图片中最小的字体大小
max_font_size=50, # 图片中最小的字体大小
width=1000, # 图片宽度
height=1000, # 高
)
# 5). 生成图片;
# 词云绘制时, 默认之处理字符串类型, 怎么分隔每个单词? 必须以逗号分隔符分割
wcObj.generate(",".join(result))
wcObj.to_file(filename)
print("生成图片%s成功......." %(filename))
if __name__ == '__main__':
#爬取数据信息
moocSpider()
#分析爬取数据,绘制词云
text = anlyseCourse('mooc.csv')
filename = 'mooc.png'
gen_wordcloud(text,filename)