置,返回match对象re.match()从一个字符串的开始位置起匹配正则表达式,返回match对象re.findall()搜索字符串,以列表类型返回全部能匹配的子串re.split()将一个字符串按照正则表达式匹配结果进行分割,返回列表类型re.finditer()搜索字符串,返回一个匹配结果的迭代类型,每个迭代元素是match对象re.sub()在一个字符串中替换所有匹配正则表达式的子串,返回替换后的字符串
Python资源共享群:626017123
re.search(pattern,string,flags=0)
re.search(pattern,string,flags=0)
例子:
import re match = re.search(r'[1-9]\d{5}','BIT 100081') if match: print(match.group(0)) #'100081'
re.match(pattern,string,flags=0)
re.match(pattern,string,flags=0)
例子:
import re match = re.match(r'[1-9]\d{5}','BIT 100081') if match: print(match.group(0)) #NULL match = re.match(r'[1-9]\d{5}','100081 BIT') if match: print(match.group(0)) #'100081'
re.findall(pattern,string,flags=0)
re.findall(pattern,string,flags=0)
例子:
import re ls = re.findall(r'[1-9]\d{5}', 'BIT100081 TSU100084') print(ls) #['100081', '100084']
re.split(pattern,string,maxsplit=0,flags=0)
re.split(pattern,string,flags=0)
例子:
import re ls = re.split(r'[1-9]\d{5}', 'BIT100081 TSU100084') print(ls) #['BIT', ' TSU', ''] ls2 = re.split(r'[1-9]\d{5}', 'BIT100081 TSU100084', maxsplit=1) print(ls2) #['BIT', ' TSU10084']
re.finditer(pattern,string,flags=0)
re.finditer(pattern,string,flags=0)
例子:
import re for m in re.finditer(r'[1-9]\d{5}', 'BIT100081 TSU100084'): if m: print(m.group(0)) #100081 100084
re.sub(pattern,repl,string,count=0,flags=0)
re.sub(pattern,repl,string,count=0,flags=0)
例子:
import re rst = re.sub(r'[1-9]\d{5}', ':zipcode', 'BIT 100081,TSU 100084') print(rst) # 'BIT :zipcode TSU :zipcode'
Re库的另一种用法
编译后的对象拥有的方法和re库主要功能函数相同
#函数式用法:一次性操作 rst = re.search(r'[1-9]\d{5}', 'BIT 100081') #面向对象用法:编译后的多次操作 pat = re.compile(r'[1-9]\d{5}') rst = pat.search('BIT 100081')
re.compile(pattern,flags=0)
regex = re.compile(r'[1-9]\d{5}')
Re库的match对象
import re match = re.search(r'[1-9]\d{5}','BIT 100081') if match: print(match.group(0)) # '100081' print(type(match)) #
Match对象的属性
属性说明.string待匹配的文本.re匹配时使用的pattern对象(正则表达式).pos正则表达式搜索文本的开始位置.endpos正则表达式搜索文本的结束位置
Match对象的方法
方法说明.group(0)获得匹配后的字符串.start()匹配字符串在原始字符串的开始位置.end()匹配字符串在原始字符串的结束位置.span()返回(.start(),.end())
import re m = re.search(r'[1-9]\d{5}', 'BIT100081 TSU100084') print(m.string) # BIT100081 TSU100084 print(m.re) # re.compile('[1-9]\\d{5}') print(m.pos) # 0 print(m.endpos) # 19 print(m.group(0)) # '100081' 返回的是第一次匹配的结果,获取所有使用re.finditer()方法 print(m.start()) # 3 print(m.end()) # 9 print(m.span()) # (3, 9)
Re库的贪婪匹配和最小匹配
Re库默认采用贪婪匹配,即输出匹配最长的子串。
import re match = re.search(r'PY.*N', 'PYANBNCNDN') print(match.group(0)) # PYANBNCNDN
最小匹配方法:
import re match = re.search(r'PY.*?N', 'PYANBNCNDN') print(match.group(0)) # PYAN
最小匹配操作符
操作符说明*?前一个字符0次或无限次扩展,最小匹配+?前一个字符1次或无限次扩展,最小匹配??前一个字符0次或1次扩展,最小匹配{m,n}?扩展前一个字符m至n次(含n),最小匹配
Re库实例之淘宝商品比价定向爬虫
功能描述:
程序的结构设计:
import requests import re def getHTMLText(url): #浏览器请求头中的User-Agent,代表当前请求的用户代理信息(下方有获取方式) headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36'} try: #浏览器请求头中的cookie,包含自己账号的登录信息(下方有获取方式) coo = '' cookies = {} for line in coo.split(';'): #浏览器伪装 name, value = line.strip().split('=', 1) cookies[name] = value r = requests.get(url, cookies = cookies, headers=headers, timeout = 30) r.raise_for_status() r.encoding = r.apparent_encoding return r.text except: return "" #解析请求到的页面,提取出相关商品的价格和名称 def parsePage(ilt, html): try: plt = re.findall(r'\"view_price\"\:\"[\d\.]*\"', html) tlt = re.findall(r'\"raw_title\"\:\".*?\"', html) for i in range(len(plt)): price = eval(plt[i].split(':')[1]) title = eval(tlt[i].split(':')[1]) ilt.append([price, title]) except: print("") def printGoodsList(ilt): tplt = "{:4}\t{:8}\t{:16}" print(tplt.format("序号", "价格", "商品名称")) count = 0 for g in ilt: count = count + 1 print(tplt.format(count, g[0], g[1])) def main(): goods = '书包' depth = 2 #爬取深度,2表示爬取两页数据 start_url = 'https://s.taobao.com/search?q=' + goods infoList = [] for i in range(depth): try: url = start_url + '&s=' + str(44*i) html = getHTMLText(url) parsePage(infoList, html) except: continue printGoodsList(infoList) main()
需要注意的是,淘宝网站本身有反爬虫机制,所以在使用 requests 库的 get() 方法爬取网页信息时,需要加入本地的cookie信息,否则淘宝返回的是一个错误页面,无法获取数据。
代码中的 coo 变量中需要自己添加浏览器中的 cookie 信息,具体做法是在浏览器中按F12,在出现的窗口中进入 network (网络)内,搜索“书包”,然后找到请求的url(一般是第一个),点击请求在右侧 header (消息头)中找到 Request Header (请求头),在请求头中找到 User-Agent 和 cookie 字段,放到代码相应位置即可。
Re库实例之股票数据定向爬虫
功能描述:
候选数据网站的选择:
程序的结构设计
初步代码编写(error)
import requests from bs4 import BeautifulSoup import traceback import re def getHTMLText(url): try: r = requests.get(url) r.raise_for_status() r.encoding = r.apparent_encoding return r.text except: return "" def getStockList(lst, stockURL): html = getHTMLText(stockURL) soup = BeautifulSoup(html, 'html.parser') a = soup.find_all('a') for i in a: try: href = i.attrs['href'] lst.append(re.findall(r"[s][hz]\d{6}", href)[0]) except: continue def getStockInfo(lst, stockURL, fpath): for stock in lst: url = stockURL + stock + ".html" html = getHTMLText(url) try: if html=="": continue infoDict = {} soup = BeautifulSoup(html, 'html.parser') stockInfo = soup.find('div',attrs={'class':'stock-bets'}) name = stockInfo.find_all(attrs={'class':'bets-name'})[0] infoDict.update({'股票名称': name.text.split()[0]}) keyList = stockInfo.find_all('dt') valueList = stockInfo.find_all('dd') for i in range(len(keyList)): key = keyList[i].text val = valueList[i].text infoDict[key] = val with open(fpath, 'a', encoding='utf-8') as f: f.write( str(infoDict) + '\n' ) except: traceback.print_exc() continue def main(): stock_list_url = 'https://quote.eastmoney.com/stocklist.html' stock_info_url = 'https://gupiao.baidu.com/stock/' output_file = 'D:/BaiduStockInfo.txt' slist=[] getStockList(slist, stock_list_url) getStockInfo(slist, stock_info_url, output_file) main()
代码优化(error)
速度提高:编码识别的优化
import requests from bs4 import BeautifulSoup import traceback import re def getHTMLText(url, code="utf-8"): try: r = requests.get(url) r.raise_for_status() r.encoding = code return r.text except: return "" def getStockList(lst, stockURL): html = getHTMLText(stockURL, "GB2312") soup = BeautifulSoup(html, 'html.parser') a = soup.find_all('a') for i in a: try: href = i.attrs['href'] lst.append(re.findall(r"[s][hz]\d{6}", href)[0]) except: continue def getStockInfo(lst, stockURL, fpath): count = 0 for stock in lst: url = stockURL + stock + ".html" html = getHTMLText(url) try: if html=="": continue infoDict = {} soup = BeautifulSoup(html, 'html.parser') stockInfo = soup.find('div',attrs={'class':'stock-bets'}) name = stockInfo.find_all(attrs={'class':'bets-name'})[0] infoDict.update({'股票名称': name.text.split()[0]}) keyList = stockInfo.find_all('dt') valueList = stockInfo.find_all('dd') for i in range(len(keyList)): key = keyList[i].text val = valueList[i].text infoDict[key] = val with open(fpath, 'a', encoding='utf-8') as f: f.write( str(infoDict) + '\n' ) count = count + 1 print("\r当前进度: {:.2f}%".format(count*100/len(lst)),end="") except: count = count + 1 print("\r当前进度: {:.2f}%".format(count*100/len(lst)),end="") continue def main(): stock_list_url = 'https://quote.eastmoney.com/stocklist.html' stock_info_url = 'https://gupiao.baidu.com/stock/' output_file = 'D:/BaiduStockInfo.txt' slist=[] getStockList(slist, stock_list_url) getStockInfo(slist, stock_info_url, output_file) main()
测试成功代码
由于东方财富网链接访问时出现错误,所以更换了一个新的网站去获取股票列表,具体代码如下:
import requests import re import traceback from bs4 import BeautifulSoup import bs4 def getHTMLText(url): try: r = requests.get(url, timeout=30) r.raise_for_status() r.encoding = r.apparent_encoding return r.text except: return"" def getStockList(lst, stockListURL): html = getHTMLText(stockListURL) soup = BeautifulSoup(html, 'html.parser') a = soup.find_all('a') lst = [] for i in a: try: href = i.attrs['href'] lst.append(re.findall(r"[S][HZ]\d{6}", href)[0]) except: continue lst = [item.lower() for item in lst] # 将爬取信息转换小写 return lst def getStockInfo(lst, stockInfoURL, fpath): count = 0 for stock in lst: url = stockInfoURL + stock + ".html" html = getHTMLText(url) try: if html == "": continue infoDict = {} soup = BeautifulSoup(html, 'html.parser') stockInfo = soup.find('div', attrs={'class': 'stock-bets'}) if isinstance(stockInfo, bs4.element.Tag): # 判断类型 name = stockInfo.find_all(attrs={'class': 'bets-name'})[0] infoDict.update({'股票名称': name.text.split('\n')[1].replace(' ','')}) keylist = stockInfo.find_all('dt') valuelist = stockInfo.find_all('dd') for i in range(len(keylist)): key = keylist[i].text val = valuelist[i].text infoDict[key] = val with open(fpath, 'a', encoding='utf-8') as f: f.write(str(infoDict) + '\n') count = count + 1 print("\r当前速度:{:.2f}%".format(count*100/len(lst)), end="") except: count = count + 1 print("\r当前速度:{:.2f}%".format(count*100/len(lst)), end="") traceback.print_exc() continue def main(): fpath = 'D://gupiao.txt' stock_list_url = 'https://hq.gucheng.com/gpdmylb.html' stock_info_url = 'https://gupiao.baidu.com/stock/' slist = [] list = getStockList(slist, stock_list_url) getStockInfo(list, stock_info_url, fpath) main()