以前聚宽还没有全部基金的历史数据,于是利用网上的代码,利用爬虫方式从东方财富抓取数据,虽然网上代码很多,不过错误也很多,坑不少。
##参考代码链接 https://www.jianshu.com/p/d79d3cd62560
##修改内容:日期改为datetime,数据采用dataframe,value为float。原网页page参数错误,实际为当前页数。最长一页40条数据,需要组合。
import requests
import pandas as pd
from bs4 import BeautifulSoup
import random
import datetime
import re
import numpy as np
import json
from six import StringIO
from six import BytesIO
##获取代理池,详见 https://github.com/1again/SmartProxyPool
def get_proxy():
data_json = requests.get("http://proxy.1again.cc:35050/api/v1/proxy/?region=中国").text
data = json.loads(data_json)
return data['data']['proxy']
def get_url(url, params=None, proxies=None,header=None):
rsp = requests.get(url, params=params, proxies={"http": proxies},headers=header)
rsp.raise_for_status()
return rsp.text
##获取全基金
url = 'http://fund.eastmoney.com/js/fundcode_search.js'
html = get_url(url,proxies=None)
soup = BeautifulSoup(html, 'html.parser')
exec('securities'+str(soup).strip('var '))
funds = pd.DataFrame(data=securities, index=None, columns=['code','2','name','type','5'])
# fund type类型 股票型- 混合型 债券型 指数型 保本型 理财型 货币型 混合-FOF ...
def get_header():
# user_agent列表
user_agent_list = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 SE 2.X MetaSr 1.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Maxthon/4.4.3.4000 Chrome/30.0.1599.101 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122 UBrowser/4.0.3214.0 Safari/537.36'
]
# referer列表
referer_list = [
'http://fund.eastmoney.com/110022.html',
'http://fund.eastmoney.com/110023.html',
'http://fund.eastmoney.com/110024.html',
'http://fund.eastmoney.com/110025.html',
'https://www.baidu.com/s?wd=%E5%A4%A9%E5%A4%A9%E5%9F%BA%E9%87%91%E7%BD%91'
]
# 获取一个随机user_agent和Referer
header = {'User-Agent': random.choice(user_agent_list),'Referer': random.choice(referer_list)}
return header
def get_fund_data(code, start='', end='',proxy_list=0):
url = 'http://fund.eastmoney.com/f10/F10DataApi.aspx'
params = {'type': 'lsjz', 'code': code, 'page': 1, 'per': 40, 'sdate': start, 'edate': end}
if proxy_list==0:
html = get_url(url,params,proxies=None,header=get_header())
else:
while(1):
try:
proxy_random=random.choice(proxy_list)
html = get_url(url,params,proxies=proxy_random,header=get_header())
break
except:
continue
soup = BeautifulSoup(html, 'html.parser')
records = pd.DataFrame(data=None, index=None, columns=['Code','NetAssetValue','AccumulatedNetValue','ChangePercent'])
tab = soup.findAll('tbody')[0]
for tr in tab.findAll('tr'):
if tr.findAll('td') and len((tr.findAll('td'))) == 7:
date=datetime.datetime.strptime(str(tr.select('td:nth-of-type(1)')[0].getText().strip()),'%Y-%m-%d')
if tr.select('td:nth-of-type(2)')[0].getText().strip()=='':
nav=0
else:
nav=float(tr.select('td:nth-of-type(2)')[0].getText().strip())
if tr.select('td:nth-of-type(3)')[0].getText().strip()=='':
aav=0
else:
aav=float(tr.select('td:nth-of-type(3)')[0].getText().strip())
if tr.select('td:nth-of-type(4)')[0].getText().strip('%')=='':
cpt=0
else:
cpt=float(tr.select('td:nth-of-type(4)')[0].getText().strip('%'))
records.loc[date,:]=[code,nav,aav,cpt]
reg=re.compile(r"(?<=pages:)\d+")
match=reg.search(str(soup))
pages=int(match.group(0))
if pages >1:
for p in range (2,pages+1):
params = {'type': 'lsjz', 'code': code, 'page': p, 'per': 40, 'sdate': start, 'edate': end}
html = get_url(url, params)
soup = BeautifulSoup(html, 'html.parser')
tab = soup.findAll('tbody')[0]
for tr in tab.findAll('tr'):
if tr.findAll('td') and len((tr.findAll('td'))) == 7:
date=datetime.datetime.strptime(str(tr.select('td:nth-of-type(1)')[0].getText().strip()),'%Y-%m-%d')
if tr.select('td:nth-of-type(2)')[0].getText().strip()=='':
nav=0
else:
nav=float(tr.select('td:nth-of-type(2)')[0].getText().strip())
if tr.select('td:nth-of-type(3)')[0].getText().strip()=='':
aav=0
else:
aav=float(tr.select('td:nth-of-type(3)')[0].getText().strip())
if tr.select('td:nth-of-type(4)')[0].getText().strip('%')=='':
cpt=0
else:
cpt=float(tr.select('td:nth-of-type(4)')[0].getText().strip('%'))
records.loc[date,:]=[code,nav,aav,cpt]
return records
## 获取数据示例:get_fund_data('163402','2018-09-18','2019-09-18')
#获取代理列表
proxy_list=[]
test_url = 'http://fund.eastmoney.com/js/fundcode_search.js'
while(len(proxy_list)<5):
while(1):
try:
data_json = requests.get("http://proxy.1again.cc:35050/api/v1/proxy/?region=中国").text
data = json.loads(data_json)
proxy=data['data']['proxy']
rsp = requests.get(test_url, params=None, proxies={"http": proxy},headers=get_header(),timeout=5)
rsp.raise_for_status()
proxy_list.append(proxy)
break
except:
print(len(proxy_list))
proxy_list.append(None)
proxy_list.append(None)
proxy_list.append(None)
##研究时间段:
##'2014-06-30' '2015-06-01' 大涨
##'2015-06-01' '2016-03-10' 大跌
##'2016-03-10' '2018-01-18' 缓涨
##'2018-01-18' '2019-01-18' 缓跌
##'2019-01-18' '2020-01-18' 缓涨
##'2020-01-18' '2020-03-16' 疫情涨跌
d1='2014-06-30'
d2='2015-06-01'
d3='2016-03-10'
d4='2018-01-18'
d5='2019-01-18'
d6='2020-01-17'
d7='2020-03-16'
funds_list=list(funds[(funds['type']!='理财型') &(funds['type']!='货币型') & (funds['type']!='混合-FOF')]['code'])
start_list=[]
history_price= pd.DataFrame(data=None, index=None, columns=['code','p1','p2','p3','p4','p5','p6','p7'])
total=len(funds_list)
div=round(total/10)
##手动配置代理列表,如采用后面的自动获取,需要注释掉
proxy_list=['127.0.0.1:1080',None,]
i=0
for f in funds_list:
print('%4.2f %%\r' %(float(i/total*100)),end="") #显示百分比
history_price.loc[f,'code']=str(f)
history_price.loc[f,'p1']=get_fund_data(f,d1,d1,proxy_list)['AccumulatedNetValue'].values
history_price.loc[f,'p2']=get_fund_data(f,d2,d2,proxy_list)['AccumulatedNetValue'].values
history_price.loc[f,'p3']=get_fund_data(f,d3,d3,proxy_list)['AccumulatedNetValue'].values
history_price.loc[f,'p4']=get_fund_data(f,d4,d4,proxy_list)['AccumulatedNetValue'].values
history_price.loc[f,'p5']=get_fund_data(f,d5,d5,proxy_list)['AccumulatedNetValue'].values
history_price.loc[f,'p6']=get_fund_data(f,d6,d6,proxy_list)['AccumulatedNetValue'].values
history_price.loc[f,'p7']=get_fund_data(f,d7,d7,proxy_list)['AccumulatedNetValue'].values
i=i+1
##清洗数据
washed_price=pd.DataFrame(data=None, index=history_price.index, columns=['code','p1','p2','p3','p4','p5','p6','p7'])
washed_price['code']=history_price['code']
for i in range(0,len(history_price)):
for j in range(1,8):
if np.size(history_price.iloc[i,j])==0:
washed_price.iloc[i,j]=0
else:
washed_price.iloc[i,j]=float(history_price.iloc[i,j])
washed_price=washed_price.drop(washed_price[washed_price['p7']==0].index)
washed_price[washed_price[:]==0]=np.nan
washed_price.fillna(method='bfill',axis=1)
##计算分数
point=pd.DataFrame(data=None, index=histroy_price.index, columns=['p1','p2','p3','p4','p5','p6','p_4year','fall','rise','total'])
point['p1']=(washed_price['p2']-washed_price['p1'])/washed_price['p1']
point['p2']=(washed_price['p3']-washed_price['p2'])/washed_price['p2']
point['p3']=(washed_price['p4']-washed_price['p3'])/washed_price['p3']
point['p4']=(washed_price['p5']-washed_price['p4'])/washed_price['p4']
point['p5']=(washed_price['p6']-washed_price['p5'])/washed_price['p5']
point['p6']=(washed_price['p7']-washed_price['p6'])/washed_price['p6']
point['p_4year']=(washed_price['p7']-washed_price['p3'])/washed_price['p3']
point['fall']=point['p2']+point['p4']
point['rise']=point['p1']+point['p3']+point['p5']
point['total']=point['fall']*3+point['rise']+point['p_4year']*5
point.fillna(0);
##查看分数
show=point[point['fall']>-0.2].sort_values(by=['total'],ascending=False)
show
分析的思路是,抓取所有类型的基金在7个时间点的累计净值,这7个时间点构造了8个区间。参考指数,这八个区间大盘的涨跌趋势一致。着重关注在大盘上涨区间的盈利能力和在大盘回撤期间的风控能力,主要的指标都是区间收益率,对不同区间的收益率赋予不同的权值,即可计算出每一只基金的分数。
通过回撤比例,即可大致分开基金类型,例如,代码中选出大盘大回撤阶段累积收益率要高于-0.2,就选出了风控能力较好的基金,但是同样收益率也会下降。
分数选择出来之后最好在网上看看该基金的累计曲线,有些基金在某一时间点突然收益剧增,明显是偶然事件,虽然总分很高,但是并不具备普适性。