简单爬取网页步骤:
1)分析网页元素
2)使用urllib2打开链接 ---python3是 from urllib import request
3)读取链接文本
4)使用re匹配出期望文本内容并分条存入list
代码:
import re
from urllib import request
def spider_mm():
#
req = request.urlopen('http://www.umei.cc/meinvtupian/')
buf = req.read().decode('utf-8')
#print(buf)
list_url = re.findall(r'src=.+\.jpg', buf)
#print(list_url)
# 把src="去掉
for i in range(len(list_url)):
list_url[i] = re.sub(r'src="', '', list_url[i])
print(list_url[i])
i = 0
for url in list_url: #读取并保存到本地
f = open('D:\\mooc\\' + str(i) + '.jpg', 'wb+')
#req = request.urlopen('http:' + url) # 必须要加上http:
req = request.urlopen( url) # 必须要加上http:
buf = req.read()
f.write(buf)
f.close()
i += 1
spider_mm()
http://news.jxufe.cn/news-show-94934.html
import urllib.request
import re
import os
#导入时间模板,用于制作时间戳的图片名称以及让线程休眠
import time
#设置要爬取的网页
ur = urllib.request.urlopen("http://news.jxufe.cn/news-show-94934.html")
#读取网页数据
content = ur.read()
#设置读取的编码格式
mystr = content.decode("utf8")
#关闭读取对象
ur.close()
#分析该网页图片,有以下两种形式,因此我们找他们的公共正则表达式
#
注明:在D盘新建一个mooc文件夹,爬取获得的图片都在该文件夹中。
1.输入关键字,爬取关键字百度图片
2.直接输入网页链接
#下载指定网页图片
#url_init = r'http://image.baidu.com/search/index?tn=baiduimage&word= keyword'
#输入你想要的关键字,搜索关键字图片
keyword = 'iu' # 关键词, 改为你想输入的词即可, 相当于在百度图片里搜索一样
url_init_first = r'http://image.baidu.com/search/flip?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fm=result&fr=&sf=1&fmq=1497491098685_R&pv=&ic=0&nc=1&z=&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&ctd=1497491098685%5E00_1519X735&word='
url_init = url_init_first + urllib.parse.quote(keyword, safe='/')
# -*- coding: utf-8 -*-
"""根据搜索词下载百度图片"""
import re
import sys
import urllib
import requests
def get_onepage_urls(onepageurl):
"""获取单个翻页的所有图片的urls+当前翻页的下一翻页的url"""
if not onepageurl:
print('已到最后一页, 结束')
return [], ''
try:
html = requests.get(onepageurl)
html.encoding = 'utf-8'
html = html.text
except Exception as e:
print(e)
pic_urls = []
fanye_url = ''
return pic_urls, fanye_url
pic_urls = re.findall('"objURL":"(.*?)",', html, re.S)
fanye_urls = re.findall(re.compile(r'下一页'), html, flags=0)
fanye_url = 'http://image.baidu.com' + fanye_urls[0] if fanye_urls else ''
return pic_urls, fanye_url
def down_pic(pic_urls):
"""给出图片链接列表, 下载所有图片"""
for i, pic_url in enumerate(pic_urls):
try:
pic = requests.get(pic_url, timeout=15)
string = "D:\\mooc\\" + str(i + 1) + '.jpg'
with open(string, 'wb') as f:
f.write(pic.content)
print('成功下载第%s张图片: %s' % (str(i + 1), str(pic_url)))
except Exception as e:
print('下载第%s张图片时失败: %s' % (str(i + 1), str(pic_url)))
print(e)
continue
if __name__ == '__main__':
#下载指定网页图片
#url_init = r'http://image.baidu.com/search/index?tn=baiduimage&word= keyword'
#输入你想要的关键字,搜索关键字图片
keyword = 'iu' # 关键词, 改为你想输入的词即可, 相当于在百度图片里搜索一样
url_init_first = r'http://image.baidu.com/search/flip?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fm=result&fr=&sf=1&fmq=1497491098685_R&pv=&ic=0&nc=1&z=&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&ctd=1497491098685%5E00_1519X735&word='
url_init = url_init_first + urllib.parse.quote(keyword, safe='/')
all_pic_urls = []
onepage_urls, fanye_url = get_onepage_urls(url_init)
all_pic_urls.extend(onepage_urls)
fanye_count = 0 # 累计翻页数
while 1:
onepage_urls, fanye_url = get_onepage_urls(fanye_url)
fanye_count += 1
# print('第页' % str(fanye_count))
if fanye_url == '' and onepage_urls == []:
break
all_pic_urls.extend(onepage_urls)
down_pic(list(set(all_pic_urls)))