import requests
from bs4 import BeautifulSoup
import traceback
import re
def getHTMLText(url):
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return ""
def getStockList(lst, stockURL):
html = getHTMLText(stockURL)
soup = BeautifulSoup(html, 'html.parser')
a = soup.find_all('a')
for i in a:
try:
href = i.attrs['href']
lst.append(re.findall(r"[s][hz]\d{6}", href)[0])#正则表达式,以s开头,中间是h或z,后面有6个数字
except:
continue
def getStockInfo(lst, stockURL, fpath):
for stock in lst:
url = stockURL + stock + ".html"
html = getHTMLText(url)
try:
if html == "":
continue
infoDict = {}
soup = BeautifulSoup(html, 'html.parser')
stockInfo = soup.find('div', attrs= {'class':'stock-bets'})
name = stockInfo.find_all(attrs = {'class':'bets-name'})[0]
infoDict.update({'股票名称':name.text.split()[0]})#split空格分割
keyList = stockInfo.find_all('dt')
valueList = stockInfo.find_all('dd')
for i in range(len(keyList)):
key = keyList[i].text
val = valueList[i].text
infoDict[key] = val#添加到字典中
with open(fpath, 'a', encoding= 'utf-8') as f:
f.write(str(infoDict) + '\n')
except:
traceback.print_exc()#获得错误信息
continue
return ""
def main():
stock_list_url = 'http://quote.eastmoney.com/stock_list.html'
stock_info_url = 'https://www.laohu8.com/quotes'
output_file = 'E://BaiduStockInfo.txt'
slist = []
getStockList(slist, stock_list_url)
getStockInfo(slist, stock_info_url, output_file)
print(main)
输出结果:
网站问题暂未解决,无结果。
def getHTMLText(url, code= 'utf-8'):#code默认utf-8编码
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
# r.encoding = r.apparent_encoding
r.encoding = code #直接赋值编码类型,提高速度
return r.text
except:
return ""
def getStockInfo(lst, stockURL, fpath):
count = 0
for stock in lst:
url = stockURL + stock + ".html"
html = getHTMLText(url)
try:
if html == "":
continue
infoDict = {}
soup = BeautifulSoup(html, 'html.parser')
stockInfo = soup.find('div', attrs= {'class':'stock-bets'})
name = stockInfo.find_all(attrs = {'class':'bets-name'})[0]
infoDict.update({'股票名称':name.text.split()[0]})#split空格分割
keyList = stockInfo.find_all('dt')
valueList = stockInfo.find_all('dd')
for i in range(len(keyList)):
key = keyList[i].text
val = valueList[i].text
infoDict[key] = val#添加到字典中
with open(fpath, 'a', encoding= 'utf-8') as f:
f.write(str(infoDict) + '\n')
count = count + 1
#打印当前进度的百分比
# \r: 转义符,将打印字符串最后的光标提到字符串的头部,下次打印时会覆盖上次的信息,实现不换行的显示。.2f: 浮点型,两位小数
print('\r当前速度:{:.2f}%'.format(count*100/len(lst)),end='')
except:
traceback.print_exc()#获得错误信息
continue
return ""
步骤1
步骤2
步骤3
执行Scrapy爬虫
stocks.py
# -*- coding: utf-8 -*-
import scrapy
import re
class StockSpider(scrapy.Spider):
name = 'stocks'
start_urls = ['http://quote.eastmoney.com/stockslist.html']
def parse(self, response):
for href in response.css('a::attr(href)').extract():
try:
stock = re.findall(r"[s][hz]\d{6}", href)[0]
url = 'https://gupiao.baidu.com/stock/' + stock + '.html'
yield scrapy.Reauest(url, callback= self.parse_stock)
except:
continue
def parse_stock(self, response):
infoDict = {}
stockInfo = response.css('.stock-bets')
name = stockInfo.css('bets-name').extract()[0]
keyList = stockInfo.css('dt').extract()
valueList = stockInfo.css('dd').extract()
for i in range(len(keyList)):
key = re.findall(r'>.*', keyList[i])[0][1:-5]
try:
val = re.findall(r'\d+\.?.*',valueList[i])[0][0:-5]
except:
val = '--'
infoDict[key] = val
infoDict.update(
{'股票名称':re.findall('\s.*\(',name)[0].split()[0] + \
re.findall('\>.*\<', name)[0][1:-1]})
yield infoDict
pipelines.py
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
class BaidustocksPipeline(object):
def process_item(self, item, spider):
return item
class BaidustocksInfoPipeline(object):
def open_spider(self, spider):
self.f = open('BaiduStockInfo.txt', 'w')
def close_spider(self, spider):
self.f.close()
def process_item(self, item, spider):
try:
line = str(dict(item)) + '\n'
self.f.write(line)
except:
pass
return item
setting.py
将该部分进行以下修改
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'BaiduStocks.pipelines.BaidustocksInfoPipeline': 300,
}
输出结果:
网站问题暂未解决,无结果。
Scrapy爬虫的功能和特点:
视频学习部分OVER!