github地址:链接
# 导入所需的package
import seaborn as sns #用于画图
from bs4 import BeautifulSoup #用于爬取arxiv的数据
import re #用于正则表达式,匹配字符串的模式
import requests #用于网络连接,发送网络请求,使用域名获取对应信息
import json #读取数据,我们的数据为json格式的
import pandas as pd #数据处理,数据分析
import matplotlib.pyplot as plt #画图工具
def readArxivFile(path, columns=['id', 'submitter', 'authors', 'title', 'comments', 'journal-ref', 'doi',
'report-no', 'categories', 'license', 'abstract', 'versions',
'update_date', 'authors_parsed'], count=None):
'''
定义读取文件的函数
path: 文件路径
columns: 需要选择的列
count: 读取行数
'''
data = []
with open(path, 'r') as f:
for idx, line in enumerate(f):
if idx == count:
break
d = json.loads(line)
d = {
col : d[col] for col in columns}
data.append(d)
data = pd.DataFrame(data)
return data
data = readArxivFile('D:\code\Github\data\AcademicTrendsAnalysis/arxiv-metadata-oai-snapshot.json', ['id', 'abstract', 'categories', 'comments'])
df_categories = pd.read_csv('D:\code\Github\data\AcademicTrendsAnalysis\categories.csv')
使用内置函数.str.extract(pat)要比apply更快和高效
# 将评论转换成pd.string类行
data.comments = data.comments.astype('string')
pat = '(\d+) pages'
data['pages'] = data.comments.str.extract(pat=pat)
data.pages.dropna(how = 'any').astype(int).describe().astype(int)
count 1089208
mean 17
std 22
min 0
25% 8
50% 13
75% 22
max 11232
Name: pages, dtype: int32
data.loc[data.pages == '0'].head()
id | abstract | categories | comments | pages | |
---|---|---|---|---|---|
166453 | 1001.1165 | In this paper, we obtain an exact formula fo... | cond-mat.str-el cond-mat.supr-con | 4.0 pages + supplementary material, published ... | 0 |
169447 | 1001.4159 | We propose a simple three-body model of an a... | quant-ph physics.atom-ph | 0 pages, 10 figures, to be published in Phys. ... | 0 |
345355 | 1205.6354 | A Coulomb impurity placed in an undoped Weyl... | cond-mat.mes-hall hep-th nucl-th | 4+0 pages, extra references added | 0 |
375597 | 1210.1803 | A Coulomb impurity placed in an undoped Weyl... | cond-mat.mes-hall hep-th | 4+0 pages, 1 figure, generalization of arXiv:1... | 0 |
410837 | 1302.6497 | B. Szegedy [Edge coloring models and reflect... | math.CO math.AG | Some typos and inconsistencies have been fixed... | 0 |
.str.split()
进行字符分割将论文与类别数据进行表连接
data['categories'] = data.categories.str.split(' ',expand = True)[0]
data_merge = data.merge(df_categories,how='left',on='categories').drop_duplicates(['id','group_name'],keep = 'first')
删除data,节约内存
del data
import gc
gc.collect()
2052
data_merge.pages = data_merge.pages.astype('string').astype('Int64')
plt.rcParams['font.sans-serif']=['fangsong']
data_merge.groupby('group_name')['pages'].mean().sort_values().plot(kind = 'barh')
plt.title('不同大类论文的平均页数')
Text(0.5, 1.0, '不同大类论文的平均页数')
data_merge.query('group_name == "Computer Science"').groupby('category_name')['pages'].mean().sort_values().head(15).plot(kind = 'barh')
plt.title('计算机不同领域论文的平均页数')
Text(0.5, 1.0, '计算机不同领域论文的平均页数')
pat = '(\d+) figures'
data_merge['figure'] = data_merge.comments.str.extract(pat)
data_merge.figure.dropna(how = 'any').astype(int).describe().astype(int)
count 647810
mean 7
std 10
min 0
25% 4
50% 6
75% 9
max 4989
Name: figure, dtype: int32
data_merge.figure = data_merge.figure.astype('string').astype('Int64')
data_merge.groupby('group_name')['figure'].mean().sort_values().plot(kind = 'barh')
plt.title('不同大类论文的图表数量')
Text(0.5, 1.0, '不同大类论文的图表数量')
data_merge.query('group_name == "Computer Science"').groupby('category_name')['figure'].mean().sort_values().head(15).plot(kind = 'barh')
plt.title('计算机不同领域论文的平均图表数量')
Text(0.5, 1.0, '计算机不同领域论文的平均图表数量')
data_with_code = data_merge[
(data_merge.comments.str.contains('github')==True)|
(data_merge.abstract.str.contains('github')==True)
]
data_with_code['text'] = data_with_code['abstract'].fillna('') + data_with_code['comments'].fillna('')
# 使用正则表达式匹配论文
pattern = '[a-zA-z]+://github[^\s]*'
data_with_code['code_flag'] = data_with_code['text'].str.findall(pattern).apply(lambda x : 0 if len(x) <1 else 1)
data_with_code.groupby(['group_name'])['code_flag'].count().sort_values().plot(kind='barh')
plt.title('不同大类论文包含开放代码的论文数量')
Text(0.5, 1.0, '不同大类论文包含开放代码的论文数量')
data_with_code.query('group_name == "Computer Science"').groupby('category_name')['code_flag'].count().sort_values().head(15).plot(kind = 'barh')
plt.title('计算机不同领域开放代码的论文数量')
Text(0.5, 1.0, '计算机不同领域开放代码的论文数量')