想读取文件夹*.pdf格式的发票并写入到excel当中,当然也可以写入txt(注释代码有)
详见下面代码,代码开头有参考的几篇文章的地址
一开始用的是pdfplumber,不好用,识别率不高,后来使用了pdfminer,还可以,但还是有一些识别不出来
# 1. python 3.x需要安装的是pdfminer3k而不是pdfminer(后者仅支持python 2.x),不过在导入的时候的语句都是`import pdfminer`
# 1. Python 3.6 中使用pdfminer解析pdf文件 - 大泡泡的专栏 - CSDN博客 https://blog.csdn.net/u011389474/article/details/60139786
# revised by wolf @2020年12月25日15:20:00
# https://github.com/jsvine/pdfplumber
# https://blog.csdn.net/weixin_48629601/article/details/107224376
# import pdfplumber as pdf
# infilePath=os.path.join(os.getcwd(),'pdf\\191.pdf')
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfinterp import PDFTextExtractionNotAllowed
from pdfminer.layout import LTTextBoxHorizontal, LAParams
from pdfminer.pdfparser import PDFParser, PDFDocument
from pdfminer.converter import PDFPageAggregator
from openpyxl import Workbook
import sys,os,re
def from_pdf_to_txt(read_file, page_start=0, page_end=0):
"""
:param read_file: str. 注意后缀名是".pdf"
:param write_file: str. 注意后缀名填".txt"
:param page_start: int
:param page_end: int
:return:
"""
# 以二进制读模式打开
origin_pdf_file = open(read_file, 'rb')
# 用文件对象来创建一个pdf文档分析器
parser = PDFParser(origin_pdf_file)
# 创建一个pdf文档
doc = PDFDocument()
# 连接分析器与文档对象,这个语句比较有意思,相互set对方进去
parser.set_document(doc)
doc.set_parser(parser)
# 提供初始化密码.如果pdf没有密码,就传入一个空参数
doc.initialize()
# 检测文档是否提供txt转换,不提供就忽略
if not doc.is_extractable:
# 如果pdf不支持提取,则直接报错
raise PDFTextExtractionNotAllowed
else:
# 创建pdf资源管理器 来管理共享资源
srcmgr = PDFResourceManager()
# 创建一个pdf设备对象
device = PDFPageAggregator(srcmgr, laparams=LAParams())
# 创建一个pdf解释器对象
interpreter = PDFPageInterpreter(srcmgr, device)
# 循环遍历列表,每次处理一个page的内容
pages = list(doc.get_pages())
if page_end == 0:
page_end = len(pages)
results=''
for i in range(page_start, page_end):
interpreter.process_page(pages[i])
# 接受该页面的LTPage对象
layout = device.get_result()
# 这里返回的是一个LTPage对象,里面存放着这个page解析出的各种对象
# 一般包括LTTextBox,LTFigure,LTImage,LTTextBoxHorizontal等等
# 想要获取文本就取它的text属性,即x.get_text()
# 获取text属性
for x in layout:
if isinstance(x, LTTextBoxHorizontal):
# with open(write_file, 'a', encoding='utf-8') as f:
string=x.get_text().replace('\n','')
string=string.replace(":",'')
string=string.replace(":",'')
results = results+string.replace(' ','')
# f.write(results)
# 最后关闭原始pdf文件
origin_pdf_file.close()
return results
def re_text(bt,text):
m1=re.search(bt,text)
if not m1 is None:
reText=m1[0]
return reText
# def pdf_text(pdffilepath):
# with pdf.open(pdffilepath) as file:
# firstPage=file.pages[0]
# pdftext=firstPage.extract_text()
# pdftext=pdftext.replace(' ','')
# pdftext=pdftext.replace(':','')
# pdftext=pdftext.replace(':','')
# return pdftext
if __name__=='__main__':
xlfilename='发票.xlsx'
xlfilepath=os.path.join(os.getcwd(),xlfilename)
dirpath=os.path.join(os.getcwd(),'g:/发票')
wb=Workbook()
ws=wb.active
ws['A1']='文件名'
ws['B1']='发票代码'
ws['C1']='发票号码'
ws['D1']='开票日期'
ws['E1']='校验码'
ws['F1']='合计金额'
# ws.title='统计'
# txtfile='save.txt'
# ofile=open(txtfile,'w',encoding='utf-8')
# ofile.write(text)
receiptsCount=0
for root,subdirs,filenames in os.walk(dirpath):
for filename in filenames:
# (filename,extension)=os.path.splitext(file)
# if extension in ['.pdf']:
if filename.endswith('.pdf'):
filepath=os.path.join(root,filename)
receiptsCount+=1
print(filepath+'is extracting')
pdftext=from_pdf_to_txt(filepath, 0, 1)
# pdftext=pdf_text(filepath)
bt='(?<=发票代码)\d+'
rt1=re_text(bt,pdftext)
bt='(?<=发票号码)\d+'
rt2=re_text(bt,pdftext)
bt='(?<=开票日期)\d+年\d+月\d+日'
rt3=re_text(bt,pdftext)
bt='(?<=校验码)\d+'
rt4=re_text(bt,pdftext)
bt='(?<=合计¥)\d+\.\d+'
rt5=re_text(bt,pdftext)
rownum=receiptsCount+1
ws['A'+str(rownum)]=filepath
ws['B'+str(rownum)]=rt1
ws['C'+str(rownum)]=rt2
ws['D'+str(rownum)]=rt3
ws['E'+str(rownum)]=rt4
ws['F'+str(rownum)]=rt5
wb.save(filename=xlfilepath)
print('done')
# ofile.close()
# # 利用pdfplumber单个提取表格
# with pdfplumber.open('基于python的网页爬虫.pdf') as pdf:
# first_page = pdf.pages[0]
# print(first_page.extract_table())
# # 利用pdfplumber多个提取表格
# with pdfplumber.open('基于python的网页爬虫.pdf') as pdf:
# first_page = pdf.pages[0]
# for table in first_page.extract_tables():
# print(table)
# # 利用pdfplumber单个提取财报 table_settings: 提取表格是的设定
# with pdfplumber.open('基于python的网页爬虫.pdf') as pdf:
# first_page = pdf.pages[0]
# table = first_page.extract_tables(
# table_settings={
# 'vertical_strategy': 'text',
# 'horizontal_strategy': 'text'
# }
# )
# new_table = []
# for row in table:
# new_row = []
# # 如果不是空行
# if not ''.join([str(item) for item in row]) == '':
# # 合并单词
# new_row.append(''.join([str(item) if item else '' for item in row[:3]]))
# new_row += row[3:]
# new_table.append(new_row)
# print(new_table)