1.构建你的检索式,以AD为例,搜索出的结果选择
2.创建文档
import requests
import re
import os
import urllib.request
import openpyxl
# headers 保持与服务器的会话连接
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36',
}
'''
根据doi,找到文献的pdf,然后下载到本地
'''
def getPaperPdf(url):
pattern = '/.*?\.pdf'
content = requests.get(url, headers=headers)
download_url = re.findall(pattern, content.text)
# print(download_url)
download_url[1] = "https:" + download_url[1]
print(download_url[1])
path = r"papers"
if os.path.exists(path):
pass
else:
os.makedirs(path)
# 使用 urllib.request 来包装请求
req = urllib.request.Request(download_url[1], headers=headers)
# 使用 urllib.request 模块中的 urlopen方法获取页面
u = urllib.request.urlopen(req, timeout=5)
file_name = download_url[1].split('/')[-2] + '%' + download_url[1].split('/')[-1]
f = open(path + '/' + file_name, 'wb')
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
f.write(buffer)
f.close()
print("Sucessful to download" + " " + file_name)
'''
将表格放在代码保存和运行的路径内,将wb变量内的'***.xlsx'改为自己的excel文件名,
最后下载的论文在该路径下新建的papers文件夹内
'''
wb = openpyxl.load_workbook('D:\pythonProject\doi.xlsx')
# doi在sheet1中
Sheet1 = wb['Sheet1']
# 读取第*列(doi号在的那一列)
'''
修改代码内,excel中doi所在列,比如在k列,所以col_range变量后面的字符改为‘k’
'''
col_range = Sheet1['K']
# 也可以读取其中的第几行:row_range = sheet1[2:6]
fails = [] #应该在搜索查询打印到一个.txt文件中
从pubmed下载下来的CSV格式文档另存为python文件目录下的.xlsx格式,第K行就是doi号了
下面通过sci-hub进行下载
for col in col_range: # 打印dio列单元格中的值内容
doi = col.value
print(doi)
if __name__ == '__main__':
sci_Hub_Url = "https://sci-hub.ren/"
paper_url = sci_Hub_Url + doi
print(paper_url)
nmm = 0
try:
getPaperPdf(paper_url) # 通过文献的url下载pdf
continue
except Exception:
nmm = 1
print("Failed to get pdf 1")
if nmm == 1:
try:
sci_Hub_Url_2 = "https://sci-hub.se/"
paper_url_2 = sci_Hub_Url_2 + doi
getPaperPdf(paper_url_2)
continue
except Exception:
print("Failed to get pdf 2")
记得下载之前先登录一下sci-hub网站进行测试,如果打不开请更新最新的sci-hub网址
最后获取下载失败文献的doi号
# 获取下载失败的doi
print(fails)
因为是通过sci-hub进行下载的所以代码运行结束会有很多无法下载的doi号
又无法通过pubmed官网进行爬取,
所以只能手动检测一遍,输入到pubmed上进行下载获取。
最后完整代码为
import requests
import re
import os
import urllib.request
import openpyxl
# headers 保持与服务器的会话连接
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/536.36',
}
'''
根据doi,找到文献的pdf,然后下载到本地
'''
def getPaperPdf(url):
pattern = '/.*?\.pdf'
content = requests.get(url, headers=headers)
download_url = re.findall(pattern, content.text)
# print(download_url)
download_url[1] = "https:" + download_url[1]
print(download_url[1])
path = r"papers"
if os.path.exists(path):
pass
else:
os.makedirs(path)
# 使用 urllib.request 来包装请求
req = urllib.request.Request(download_url[1], headers=headers)
# 使用 urllib.request 模块中的 urlopen方法获取页面
u = urllib.request.urlopen(req, timeout=5)
file_name = download_url[1].split('/')[-2] + '%' + download_url[1].split('/')[-1]
f = open(path + '/' + file_name, 'wb')
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
f.write(buffer)
f.close()
print("Sucessful to download" + " " + file_name)
'''
将表格放在代码保存和运行的路径内,将wb变量内的'***.xlsx'改为自己的excel文件名,
最后下载的论文在该路径下新建的papers文件夹内
'''
wb = openpyxl.load_workbook('D:\pythonProject\doi.xlsx')
# doi在sheet1中
Sheet1 = wb['Sheet1']
# 读取第*列(doi号在的那一列)
'''
修改代码内,excel中doi所在列,比如在k列,所以col_range变量后面的字符改为了‘k’
'''
col_range = Sheet1['K']
# 也可以读取其中的第几行:row_range = sheet1[2:6]
fails = [] #应该在搜索查询打印到一个.txt文件中
# 加入SCI-hub网址进行下载
for col in col_range: # 打印dio列单元格中的值内容
doi = col.value
print(doi)
if __name__ == '__main__':
sci_Hub_Url = "https://sci-hub.ren/"
paper_url = sci_Hub_Url + doi
print(paper_url)
nmm = 0
try:
getPaperPdf(paper_url) # 通过文献的url下载pdf
continue
except Exception:
nmm = 1
print("Failed to get pdf 1")
if nmm == 1:
try:
sci_Hub_Url_2 = "https://sci-hub.se/"
paper_url_2 = sci_Hub_Url_2 + doi
getPaperPdf(paper_url_2)
continue
except Exception:
print("Failed to get pdf 2")
# if nmm == 1:
# try:
# sci_Hub_Url_3 = "https://sci-hub.st/"
# paper_url_3 = sci_Hub_Url_3 + doi
# getPaperPdf(paper_url_3)
# continue
# except Exception:
# print("Failed to get pdf 3")
# if nmm == 1:
# try:
# sci_Hub_Url_4 = "https://sci-hub.shop/"
# paper_url_4 = sci_Hub_Url_4 + doi
# getPaperPdf(paper_url_4)
# continue
# except Exception:
# print("Failed to get pdf 4")
# if nmm == 1:
# try:
# sci_Hub_Url_5 = "https://sci-hub.shop/"
# paper_url_5 = sci_Hub_Url_5 + doi
# getPaperPdf(paper_url_5)
# continue
# except Exception:
# print("Failed to get pdf 5")
# if nmm == 1:
# try:
# sci_Hub_Url_6 = "https://libgen.ggfwzs.net/"
# paper_url_6 = sci_Hub_Url_6 + doi
# getPaperPdf(paper_url_6)
# continue
# except Exception:
# print("Failed to get pdf 6")
# fails.append(doi)
# if nmm == 1:
# try:
# sci_Hub_Url_7 = "https://sci-hub.do/"
# paper_url_7 = sci_Hub_Url_7 + doi
# getPaperPdf(paper_url_7)
# continue
# except Exception:
# print("Failed to get pdf 7")
# 获取下载失败的doi
print(fails)