github:链接
论文趋势分析专栏:链接
# 导入所需的package
import seaborn as sns #用于画图
from bs4 import BeautifulSoup #用于爬取arxiv的数据
import re #用于正则表达式,匹配字符串的模式
import requests #用于网络连接,发送网络请求,使用域名获取对应信息
import json #读取数据,我们的数据为json格式的
import pandas as pd #数据处理,数据分析
import matplotlib.pyplot as plt #画图工具
def readArxivFile(path, columns=['id', 'submitter', 'authors', 'title', 'comments', 'journal-ref', 'doi',
'report-no', 'categories', 'license', 'abstract', 'versions',
'update_date', 'authors_parsed'], count=None):
'''
定义读取文件的函数
path: 文件路径
columns: 需要选择的列
count: 读取行数
'''
data = []
with open(path, 'r') as f:
for idx, line in enumerate(f):
if idx == count:
break
d = json.loads(line)
d = {
col : d[col] for col in columns}
data.append(d)
data = pd.DataFrame(data)
return data
data = readArxivFile('D:\code\Github\data\AcademicTrendsAnalysis/arxiv-metadata-oai-snapshot.json',
['id','categories','authors_parsed'],
200000)
data['categories'] = data.categories.str.split(' ',expand=True)[0]
category = pd.read_csv('D:\code\Github\data\AcademicTrendsAnalysis\categories.csv')
data = data.merge(category,how='left')
data.head(3)
id | categories | authors_parsed | group_name | archive_name | archive_id | category_name | category_description | |
---|---|---|---|---|---|---|---|---|
0 | 0704.0001 | hep-ph | [[Balázs, C., ], [Berger, E. L., ], [Nadolsky,... | Physics | High Energy Physics - Phenomenology | hep-ph | High Energy Physics - Phenomenology | Description coming soon |
1 | 0704.0002 | math.CO | [[Streinu, Ileana, ], [Theran, Louis, ]] | Mathematics | Mathematics | Mathematics | Combinatorics | Discrete mathematics, graph theory, enumeratio... |
2 | 0704.0003 | physics.gen-ph | [[Pan, Hongjun, ]] | Physics | Physics | physics | General Physics | Description coming soon |
data_ML = data.loc[data.category_name == 'Machine Learning']
data_ML.shape
(596, 8)
import networkx as nx
# 创建无向图
G = nx.Graph()
# 只用五篇论文进行构建
for row in data_ML.iloc[:10].itertuples():
authors = row[3]
authors = [' '.join(x[:-1]) for x in authors]
# 第一个作者 与 其他作者链接
for author in authors[1:]:
G.add_edge(authors[0],author) # 添加节点2,3并链接23节点
nx.draw(G, with_labels=True)
G = nx.Graph()
for row in data_ML.itertuples():
authors = row[3]
authors = [' '.join(x[:-1]) for x in authors]
# 第一个作者 与 其他作者链接
for author in authors[1:]:
G.add_edge(authors[0],author)
num_sorted = sorted(dict(G.degree()).items(),key=lambda x :x[1],reverse=True)
for i,(name,num) in enumerate(num_sorted):
if i <3:
print(name,':',num)
Beygelzimer Alina : 10
Aran Oya : 9
Liu Han : 8
G['Beygelzimer Alina']
AtlasView({'Langford John': {}, 'Dasgupta Sanjoy': {}, 'Lifshits Yuri': {}, 'Sorkin Gregory': {}, 'Strehl Alex': {}, 'Li Lihong': {}, 'Reyzin Lev': {}, 'Schapire Robert E.': {}, 'Hsu Daniel': {}, 'Zhang Tong': {}})
def func(x):
s = ''
for i in x:
t = ' '.join(i)
s += t+','
return s
data_temp= data_ML.authors_parsed.apply(func=func)
data_ML_beya= data_temp[data_temp.str.contains('Beygelzimer Alina')]
# 创建无向图
import networkx as nx
G = nx.Graph()
# 只用五篇论文进行构建
for row in data_ML_beya.to_frame().itertuples():
authors = row[1]
authors = authors.split(',')
# 第一个作者 与 其他作者链接
for author in authors[1:]:
G.add_edge(authors[0],author) # 添加节点2,3并链接23节点
nx.draw(G, with_labels=True)
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.bar(range(len(nx.degree_histogram(G))) ,nx.degree_histogram(G))
plt.xlabel('合作作者的数量')
plt.ylabel('人数')
plt.title('机器学习领域论文论文作者与其他人合作的数量')
Text(0.5, 1.0, '机器学习领域论文论文作者与其他人合作的数量')
#取出每个节点的度(链接边数)
degree_sequence = sorted([d for n, d in G.degree()], reverse=True)
dmax = max(degree_sequence)
Gcc = G.subgraph(sorted(nx.connected_components(G), key=len, reverse=True)[0])
pos = nx.spring_layout(Gcc)
plt.axis("off")
nx.draw_networkx_nodes(Gcc, pos, node_size=20)
nx.draw_networkx_edges(Gcc, pos, alpha=0.4)
plt.show()