示例代码:
from urllib.request import urlretrieve from urllib.request import urlopen from bs4 import BeautifulSoup import random urls = [] def create_url(id): url = "http://jandan.net/ooxx/page-"+str(id)+"#comments" urls.append(url) nums = [] page = int(input("请输入要爬取的页数:")) while page: num = 107-page nums.append(num) page = page - 1 for i in nums: create_url(i) for each in urls: html = urlopen(each) bsObj = BeautifulSoup(html,"html.parser") imageLocation = bsObj.find_all("img") temp = random.uniform(20, 10) for each in imageLocation: location = "http:" + each.get('src') print(location) temp += 3 urlretrieve(location, '%s.jpg' % temp) print(imageLocation)