查看某模块路径
pip show --files selenium
文件编码转换
convmv -f GBK -t UTF-8 --notest -r ydcz_1/
查找当前目录下,结尾为.php的文件中,包含字符串“zfb_box”的文件
find ./ -name "*.php" | xargs grep “zfb_box"
百度搜索结果URL提取主域名
cat xinxi_jieguo|awk -F"," '{print $3}'|egrep -o 'http://[^/]*?/' |egrep -o '([a-z0-9_-]{1,32}\.)+([a-z0-9_-]{1,32})((\.[a-z]{2,4})(.[a-z]{1,2})?)'|egrep -o "\.[^\.]*?\.(com\.cn|com|cn|net|org|cc|hk|tv|info|de|tw|wang|kr)$”
统计每个域名流量分发
cat xinxi_jieguo|awk -F"," '{print $3}'|egrep '\.1688.com'|egrep -o 'http://[^/]*?/[^/]*?/' | sort|uniq -c|sort -nr
两文件按列合并
paste -d " " 4+.txt out.txt > hebing.txt
awk计算重复次数
cat urldata|awk '{a[$1]++}END{for(i in a){print i,a[i]}}'
按列求和
awk '/aaa/ {sum += $2};END {print sum}’ test
按列求和2
cat test.txt | awk '{s[$1]+=$2}END{for(i in s){print i,s[i]}}'
按行数将一个文件分割成多个,sitemap使用
split -8142 file outfile
查找目录下包含某个字符的文件
find .|xargs grep -ri "IBM"
python下载图片
#! /usr/bin/env python
#coding=utf-8
import url lib,os filepath=os.getcwd() if os.path.exists(filepath) is False: os.mkdir(filepath) x=1 print u'爬虫准备就绪...' for line in open('logo_url.txt'): line = line.strip() id = line.split(',')[1] imgurl = line.split(',')[2] temp= '%s.jpg' % id print u'正在下载第%s张图片' % x print imgurl try: urllib.urlretrieve(imgurl,temp) x+=1 except: continue print u'图片下载完毕,保存路径为'+filepath
MD5生成
import hashlib
m2 = hashlib.md5() m2.update(src) print m2.hexdigest()
读取CSV
# coding: utf-8
import csv
csvfile = file('csv_test.csv', 'rb') reader = csv.reader(csvfile) for line in reader: print line csvfile.close()
写入CSV
# coding: utf-8
import csv
csvfile = file('csv_test.csv', 'wb') writer = csv.writer(csvfile) writer.writerow(['姓名', '年龄', '电话']) data = [ ('小河', '25', '1234567'), ('小芳', '18', '789456') ] writer.writerows(data) csvfile.close()
删除中文字符串
#coding: utf-8
import sys
import re
reload(sys) sys.setdefaultencoding('utf8') s = """ en: Regular expression is a powerful tool for manipulating text. zh: 汉语是世界上最优美的语言,正则表达式是一个很有用的工具 jp: 正規表現は非常に役に立つツールテキストを操作することです。 jp-char: あアいイうウえエおオ kr:정규 표현식은 매우 유용한 도구 텍스트를 조작하는 것입니다. """ print "原始utf8字符" #utf8 print "--------" print repr(s) print "--------\n" #非ansi re_words=re.compile(r"[\x80-\xff]+") m = re_words.search(s,0) print "非ansi字符" print "--------" print m print m.group() print "--------\n" #unicode s = unicode(s) print "原始unicode字符" print "--------" print repr(s) print "--------\n" #unicode chinese re_words = re.compile(u"[\u4e00-\u9fa5]+") m = re_words.search(s,0) print "unicode 中文" print "--------" print m print m.group() res = re.findall(re_words, s) # 查询出所有的匹配字符串 if res: print "There are %d parts:\n"% len(res) for r in res: print "\t",r print print "--------\n" #unicode korean re_words=re.compile(u"[\uac00-\ud7ff]+") m = re_words.search(s,0) print "unicode 韩文" print "--------" print m print m.group() print "--------\n" #unicode japanese katakana re_words=re.compile(u"[\u30a0-\u30ff]+") m = re_words.search(s,0) print "unicode 日文 片假名" print "--------" print m print m.group() print "--------\n" #unicode japanese hiragana re_words=re.compile(u"[\u3040-\u309f]+") m = re_words.search(s,0) print "unicode 日文 平假名" print "--------" print m print m.group() print "--------\n" #unicode cjk Punctuation re_words=re.compile(u"[\u3000-\u303f\ufb00-\ufffd]+") m = re_words.search(s,0) print "unicode 标点符号" print "--------" print m print m.group() print "--------\n"
通过scrapinghub提取代理ip,抓取海外网页
import requests
from requests.auth import HTTPProxyAuth url = "http://m.baidu.com/s?word=seo" headers = {} proxy_host = "paygo.crawlera.com" proxy_auth = HTTPProxyAuth("88aa8b802a7f4626b659dae926ee445b", "") proxies = {"http": "http://proxy.crawlera.com:8010/".format(proxy_host)} if url.startswith("https:"): url = "http://" + url[8:] headers["X-Crawlera-Use-HTTPS"] = "1" r = requests.get(url, headers=headers, proxies=proxies, auth=proxy_auth) print r.headers['x-crawlera-slave']
mysql查询数据
def sql_r_num(sql): cur = con.cursor() cur.execute(sql) data = cur.fetchone() return data[0] user_nums = sql_r_num("select max(id) from ask_users")
两个时间点之间,随机生成日期
def random_date(): a1=(2018,1,1,0,0,0,0,0,0) #设置开始日期时间元组(1976-01-01 00:00:00) a2=(2019,4,14,0,0,0,0,0,0) #设置结束日期时间元组(1990-12-31 23:59:59) start=time.mktime(a1) #生成开始时间戳 end=time.mktime(a2) #生成结束时间戳 #随机生成日期字符串 t=random.randint(start,end) #在开始和结束时间戳中随机取出一个 date_touple=time.localtime(t) #将时间戳生成时间元组 date=time.strftime("%Y-%m-%d %H:%M:%S",date_touple) #将时间元组转成格式化字符串(1976-05-21) return date
写入mysql
import sys,time,os,smtplib import MySQLdb as mdb mysql_time = time.strftime('%Y-%m-%d',time.localtime(time.time())) con= mdb.connect("23.236.79.228","root","123456”,”seo_data",charset=“utf8”,unix_socket='/tmp/mysql.sock') cur = con.cursor() mysql_haosou_uv = '''INSERT INTO haosou_pc_uv VALUES ("%s",%s)''' % (mysql_time,','.join(sql_haosou_uv)) mysql_bd_pc_spider = '''INSERT INTO bd_pc_spider VALUES ("%s",%s)''' % (mysql_time,','.join(sql_bd_pc_spider)) mysql_bd_m_spider = '''INSERT INTO bd_m_spider VALUES ("%s",%s)''' % (mysql_time,','.join(sql_bd_m_spider)) mysql_bd_pc_uv = '''INSERT INTO bd_pc_uv VALUES ("%s",%s)''' % (mysql_time,','.join(sql_bd_pc_uv)) mysql_bd_m_uv = '''INSERT INTO bd_m_uv VALUES ("%s",%s)''' % (mysql_time,','.join(sql_bd_m_uv)) sql_list = [mysql_haosou_uv,mysql_bd_pc_spider,mysql_bd_m_spider,mysql_bd_pc_uv,mysql_bd_m_uv] for sql in sql_list: print 'Import:%s' % sql try: cur.execute(sql) con.commit() print 'done' except: con.rollback()
判断字符串是否全部为中文
#coding:utf-8
import sys
reload(sys) sys.setdefaultencoding('utf8') '''判断当前字符串是否全部为中文''' def check_contain_chinese(check_str): n = 0 m = 0 for ch in check_str.decode('utf-8'): if u'\u4e00' <= ch <= u'\u9fff': n += 1 else: m += 1 if m == 0: return 1 else: return 0
文本格式化
a = re.sub(r'<(?!p|img|/p)[^<>]*?>','',content).strip() #将除p和img之外的标签清空,且去除正文开头结尾的换行,并把单引号换成双引号
b = re.sub(r'>]*?>','>',a) #格式化p标签 newcontent = re.sub(r'alt="[^"]*?"','alt="%s"' % title,b).lower()
提取字符串中文并计算字数
text = re.sub("[\s+\.\!\/_,$%^*(+\"\']+|[+——!,::。?、~@#¥%……&*()“”《》]+".decode("utf8"), "".decode("utf8"),newcontent) #去除中英文标点符号 text2 = re.sub('<[^>]*?>','',text) #去除所有标签 words_number = len(text2)
unicode字符串转义(Python中,如何将反斜杠u类型(\uXXXX)的字符串,转换为对应的unicode的字符)
slashUStr = "\\u0063\\u0072\\u0069\\u0066\\u0061\\u006E\\u0020\\u5728\\u8DEF\\u4E0A"
decodedUniChars = slashUStr.decode("unicode-escape") print "decodedUniChars=",decodedUniChars
json与dict转化
import simplejson
# JSON转化为字典
json_2_dict = simplejson.loads(user) print json_2_dict #字典转化为JSON字符串 dict_2_jsonstr = simplejson.dumps(json_2_dict) print dict_2_jsonstr
识别客户端类型
def getUA(ua): reg_b = re.compile(r"(android|bb\\d+|meego).+mobile|avantgo|bada\\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\\.(browser|link)|vodafone|wap|windows ce|xda|xiino", re.I|re.M) reg_v = re.compile(r"1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\\-(n|u)|c55\\/|capi|ccwa|cdm\\-|cell|chtm|cldc|cmd\\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\\-s|devi|dica|dmob|do(c|p)o|ds(12|\\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\\-|_)|g1 u|g560|gene|gf\\-5|g\\-mo|go(\\.w|od)|gr(ad|un)|haie|hcit|hd\\-(m|p|t)|hei\\-|hi(pt|ta)|hp( i|ip)|hs\\-c|ht(c(\\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\\-(20|go|ma)|i230|iac( |\\-|\\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\\/)|klon|kpt |kwc\\-|kyo(c|k)|le(no|xi)|lg( g|\\/(k|l|u)|50|54|\\-[a-w])|libw|lynx|m1\\-w|m3ga|m50\\/|ma(te|ui|xo)|mc(01|21|ca)|m\\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\\-2|po(ck|rt|se)|prox|psio|pt\\-g|qa\\-a|qc(07|12|21|32|60|\\-[2-7]|i\\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\\-|oo|p\\-)|sdk\\/|se(c(\\-|0|1)|47|mc|nd|ri)|sgh\\-|shar|sie(\\-|m)|sk\\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\\-|v\\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\\-|tdg\\-|tel(i|m)|tim\\-|t\\-mo|to(pl|sh)|ts(70|m\\-|m3|m5)|tx\\-9|up(\\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\\-|your|zeto|zte\\-", re.I|re.M) b = reg_b.search(ua) v = reg_v.search(ua[0:4]) if b or v: return 'wap' else: return 'pc'
日期遍历
import datatime,time
def date_range(start, end, only_monday=False, input_format='%y%m%d', output_format='%y%m%d'): '''如print date_range(140130, 140202) 输出['140130', '140131', '140201', '140202'] ''' start = str(start) end = str(end) start = datetime.datetime.strptime(start, input_format) end = datetime.datetime.strptime(end, input_format) one_day = datetime.timedelta(days=1) range_ = [] d = start - one_day while 1: d = d + one_day if d > end: break if only_monday and d.strftime('%w')!='1': continue range_.append(datetime.datetime.strftime(d, output_format)) return range_
查看某模块路径
pip show --files selenium
文件编码转换
convmv -f GBK -t UTF-8 --notest -r ydcz_1/
查找当前目录下,结尾为.php的文件中,包含字符串“zfb_box”的文件
find ./ -name "*.php" | xargs grep “zfb_box"
百度搜索结果URL提取主域名
cat xinxi_jieguo|awk -F"," '{print $3}'|egrep -o 'http://[^/]*?/' |egrep -o '([a-z0-9_-]{1,32}\.)+([a-z0-9_-]{1,32})((\.[a-z]{2,4})(.[a-z]{1,2})?)'|egrep -o "\.[^\.]*?\.(com\.cn|com|cn|net|org|cc|hk|tv|info|de|tw|wang|kr)$”
统计每个域名流量分发
cat xinxi_jieguo|awk -F"," '{print $3}'|egrep '\.1688.com'|egrep -o 'http://[^/]*?/[^/]*?/' | sort|uniq -c|sort -nr
两文件按列合并
paste -d " " 4+.txt out.txt > hebing.txt
awk计算重复次数
cat urldata|awk '{a[$1]++}END{for(i in a){print i,a[i]}}'
按列求和
awk '/aaa/ {sum += $2};END {print sum}’ test
按列求和2
cat test.txt | awk '{s[$1]+=$2}END{for(i in s){print i,s[i]}}'
按行数将一个文件分割成多个,sitemap使用
split -8142 file outfile
查找目录下包含某个字符的文件
find .|xargs grep -ri "IBM"
python下载图片
#! /usr/bin/env python
#coding=utf-8
import url lib,os filepath=os.getcwd() if os.path.exists(filepath) is False: os.mkdir(filepath) x=1 print u'爬虫准备就绪...' for line in open('logo_url.txt'): line = line.strip() id = line.split(',')[1] imgurl = line.split(',')[2] temp= '%s.jpg' % id print u'正在下载第%s张图片' % x print imgurl try: urllib.urlretrieve(imgurl,temp) x+=1 except: continue print u'图片下载完毕,保存路径为'+filepath
MD5生成
import hashlib
m2 = hashlib.md5() m2.update(src) print m2.hexdigest()
读取CSV
# coding: utf-8
import csv
csvfile = file('csv_test.csv', 'rb') reader = csv.reader(csvfile) for line in reader: print line csvfile.close()
写入CSV
# coding: utf-8
import csv
csvfile = file('csv_test.csv', 'wb') writer = csv.writer(csvfile) writer.writerow(['姓名', '年龄', '电话']) data = [ ('小河', '25', '1234567'), ('小芳', '18', '789456') ] writer.writerows(data) csvfile.close()
删除中文字符串
#coding: utf-8
import sys
import re
reload(sys) sys.setdefaultencoding('utf8') s = """ en: Regular expression is a powerful tool for manipulating text. zh: 汉语是世界上最优美的语言,正则表达式是一个很有用的工具 jp: 正規表現は非常に役に立つツールテキストを操作することです。 jp-char: あアいイうウえエおオ kr:정규 표현식은 매우 유용한 도구 텍스트를 조작하는 것입니다. """ print "原始utf8字符" #utf8 print "--------" print repr(s) print "--------\n" #非ansi re_words=re.compile(r"[\x80-\xff]+") m = re_words.search(s,0) print "非ansi字符" print "--------" print m print m.group() print "--------\n" #unicode s = unicode(s) print "原始unicode字符" print "--------" print repr(s) print "--------\n" #unicode chinese re_words = re.compile(u"[\u4e00-\u9fa5]+") m = re_words.search(s,0) print "unicode 中文" print "--------" print m print m.group() res = re.findall(re_words, s) # 查询出所有的匹配字符串 if res: print "There are %d parts:\n"% len(res) for r in res: print "\t",r print print "--------\n" #unicode korean re_words=re.compile(u"[\uac00-\ud7ff]+") m = re_words.search(s,0) print "unicode 韩文" print "--------" print m print m.group() print "--------\n" #unicode japanese katakana re_words=re.compile(u"[\u30a0-\u30ff]+") m = re_words.search(s,0) print "unicode 日文 片假名" print "--------" print m print m.group() print "--------\n" #unicode japanese hiragana re_words=re.compile(u"[\u3040-\u309f]+") m = re_words.search(s,0) print "unicode 日文 平假名" print "--------" print m print m.group() print "--------\n" #unicode cjk Punctuation re_words=re.compile(u"[\u3000-\u303f\ufb00-\ufffd]+") m = re_words.search(s,0) print "unicode 标点符号" print "--------" print m print m.group() print "--------\n"
通过scrapinghub提取代理ip,抓取海外网页
import requests
from requests.auth import HTTPProxyAuth url = "http://m.baidu.com/s?word=seo" headers = {} proxy_host = "paygo.crawlera.com" proxy_auth = HTTPProxyAuth("88aa8b802a7f4626b659dae926ee445b", "") proxies = {"http": "http://proxy.crawlera.com:8010/".format(proxy_host)} if url.startswith("https:"): url = "http://" + url[8:] headers["X-Crawlera-Use-HTTPS"] = "1" r = requests.get(url, headers=headers, proxies=proxies, auth=proxy_auth) print r.headers['x-crawlera-slave']
mysql查询数据
def sql_r_num(sql): cur = con.cursor() cur.execute(sql) data = cur.fetchone() return data[0] user_nums = sql_r_num("select max(id) from ask_users")
两个时间点之间,随机生成日期
def random_date(): a1=(2018,1,1,0,0,0,0,0,0) #设置开始日期时间元组(1976-01-01 00:00:00) a2=(2019,4,14,0,0,0,0,0,0) #设置结束日期时间元组(1990-12-31 23:59:59) start=time.mktime(a1) #生成开始时间戳 end=time.mktime(a2) #生成结束时间戳 #随机生成日期字符串 t=random.randint(start,end) #在开始和结束时间戳中随机取出一个 date_touple=time.localtime(t) #将时间戳生成时间元组 date=time.strftime("%Y-%m-%d %H:%M:%S",date_touple) #将时间元组转成格式化字符串(1976-05-21) return date
写入mysql
import sys,time,os,smtplib import MySQLdb as mdb mysql_time = time.strftime('%Y-%m-%d',time.localtime(time.time())) con= mdb.connect("23.236.79.228","root","123456”,”seo_data",charset=“utf8”,unix_socket='/tmp/mysql.sock') cur = con.cursor() mysql_haosou_uv = '''INSERT INTO haosou_pc_uv VALUES ("%s",%s)''' % (mysql_time,','.join(sql_haosou_uv)) mysql_bd_pc_spider = '''INSERT INTO bd_pc_spider VALUES ("%s",%s)''' % (mysql_time,','.join(sql_bd_pc_spider)) mysql_bd_m_spider = '''INSERT INTO bd_m_spider VALUES ("%s",%s)''' % (mysql_time,','.join(sql_bd_m_spider)) mysql_bd_pc_uv = '''INSERT INTO bd_pc_uv VALUES ("%s",%s)''' % (mysql_time,','.join(sql_bd_pc_uv)) mysql_bd_m_uv = '''INSERT INTO bd_m_uv VALUES ("%s",%s)''' % (mysql_time,','.join(sql_bd_m_uv)) sql_list = [mysql_haosou_uv,mysql_bd_pc_spider,mysql_bd_m_spider,mysql_bd_pc_uv,mysql_bd_m_uv] for sql in sql_list: print 'Import:%s' % sql try: cur.execute(sql) con.commit() print 'done' except: con.rollback()
判断字符串是否全部为中文
#coding:utf-8
import sys
reload(sys) sys.setdefaultencoding('utf8') '''判断当前字符串是否全部为中文''' def check_contain_chinese(check_str): n = 0 m = 0 for ch in check_str.decode('utf-8'): if u'\u4e00' <= ch <= u'\u9fff': n += 1 else: m += 1 if m == 0: return 1 else: return 0
文本格式化
a = re.sub(r'<(?!p|img|/p)[^<>]*?>','',content).strip() #将除p和img之外的标签清空,且去除正文开头结尾的换行,并把单引号换成双引号
b = re.sub(r'>]*?>','>',a) #格式化p标签 newcontent = re.sub(r'alt="[^"]*?"','alt="%s"' % title,b).lower()
提取字符串中文并计算字数
text = re.sub("[\s+\.\!\/_,$%^*(+\"\']+|[+——!,::。?、~@#¥%……&*()“”《》]+".decode("utf8"), "".decode("utf8"),newcontent) #去除中英文标点符号 text2 = re.sub('<[^>]*?>','',text) #去除所有标签 words_number = len(text2)
unicode字符串转义(Python中,如何将反斜杠u类型(\uXXXX)的字符串,转换为对应的unicode的字符)
slashUStr = "\\u0063\\u0072\\u0069\\u0066\\u0061\\u006E\\u0020\\u5728\\u8DEF\\u4E0A"
decodedUniChars = slashUStr.decode("unicode-escape") print "decodedUniChars=",decodedUniChars
json与dict转化
import simplejson
# JSON转化为字典
json_2_dict = simplejson.loads(user) print json_2_dict #字典转化为JSON字符串 dict_2_jsonstr = simplejson.dumps(json_2_dict) print dict_2_jsonstr
识别客户端类型
def getUA(ua): reg_b = re.compile(r"(android|bb\\d+|meego).+mobile|avantgo|bada\\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\\.(browser|link)|vodafone|wap|windows ce|xda|xiino", re.I|re.M) reg_v = re.compile(r"1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\\-(n|u)|c55\\/|capi|ccwa|cdm\\-|cell|chtm|cldc|cmd\\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\\-s|devi|dica|dmob|do(c|p)o|ds(12|\\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\\-|_)|g1 u|g560|gene|gf\\-5|g\\-mo|go(\\.w|od)|gr(ad|un)|haie|hcit|hd\\-(m|p|t)|hei\\-|hi(pt|ta)|hp( i|ip)|hs\\-c|ht(c(\\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\\-(20|go|ma)|i230|iac( |\\-|\\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\\/)|klon|kpt |kwc\\-|kyo(c|k)|le(no|xi)|lg( g|\\/(k|l|u)|50|54|\\-[a-w])|libw|lynx|m1\\-w|m3ga|m50\\/|ma(te|ui|xo)|mc(01|21|ca)|m\\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\\-2|po(ck|rt|se)|prox|psio|pt\\-g|qa\\-a|qc(07|12|21|32|60|\\-[2-7]|i\\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\\-|oo|p\\-)|sdk\\/|se(c(\\-|0|1)|47|mc|nd|ri)|sgh\\-|shar|sie(\\-|m)|sk\\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\\-|v\\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\\-|tdg\\-|tel(i|m)|tim\\-|t\\-mo|to(pl|sh)|ts(70|m\\-|m3|m5)|tx\\-9|up(\\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\\-|your|zeto|zte\\-", re.I|re.M) b = reg_b.search(ua) v = reg_v.search(ua[0:4]) if b or v: return 'wap' else: return 'pc'
日期遍历
import datatime,time
def date_range(start, end, only_monday=False, input_format='%y%m%d', output_format='%y%m%d'): '''如print date_range(140130, 140202) 输出['140130', '140131', '140201', '140202'] ''' start = str(start) end = str(end) start = datetime.datetime.strptime(start, input_format) end = datetime.datetime.strptime(end, input_format) one_day = datetime.timedelta(days=1) range_ = [] d = start - one_day while 1: d = d + one_day if d > end: break if only_monday and d.strftime('%w')!='1': continue range_.append(datetime.datetime.strftime(d, output_format)) return range_
转账自:https://www.gogochuang.com/post/58.html