1. 读取word文本
Python可以利用python-docx
模块处理word文档,处理方式是面向对象的,python-docx
模块会把word文档中的段落、文本、字体等都看做对象,对对象进行处理就是对word文档的内容处理。
1 Document对象,表示一个word文档。
2 Paragraph对象,表示word文档中的一个段落
3 Paragraph对象的text属性,表示段落中的文本内容
首先在cmd命令行中输入pip install python-docx
安装 python-docx
模块,安装后导入模块,代码如下:
import docx
#获取文档对象
file=docx.Document("D:\\App\\Relevance reconsidered.docx")
print("段落数:"+str(len(file.paragraphs)))
#输出每一段的内容
for para in file.paragraphs:
print(para.text)
#输出段落编号及段落内容
for i in range(len(file.paragraphs)):
print("第"+str(i)+"段的内容是:"+file.paragraphs[i].text)
示例代码——从word文档中读取文本,利用python的jieba分词包进行停用词处理和词频统计:
import docx
import jieba
#获取文档对象
file=docx.Document("D:\\App\\Relevance reconsidered.docx")
print("段落数:"+str(len(file.paragraphs)))
text = "" # 接收word文本内容
for para in file.paragraphs:
text += para.text
# print(text)
# 分词
words = jieba.cut(text, cut_all = False)
stopwords=[] # 设置停用词
for word in open(r"D:\App\stop_word.txt",'r'): # 这里加载停用词的路径
stopwords.append(word.strip())
# 停用词过滤
stayed_line=""
for word in words:
if word not in stopwords:
stayed_line += word + " "
#对过滤后的文本进行分词
words = jieba.cut(stayed_line, cut_all = False)
# 统计词频
word_freq = {} # 词频序列
for word in words:
if word in word_freq: # 统计字/词出现的次数
word_freq[word] += 1
else:
word_freq[word] = 1
freq_word = [] # 排序后的字/词列表
for word, freq in word_freq.items():
freq_word.append((word, freq))
freq_word.sort(key = lambda x: x[1], reverse = True)
max_number = 10 # 输入想要的前10位高频词
for word, freq in freq_word[: max_number]:
print (word, freq)
2. 读取txt文本
f = open("D:\\App\\Relevance reconsidered.txt","r")
line = f.readline()
while line:
print(line)
line = f.readline()
f.close()
示例代码——从txt文档中读取文本,利用python的jieba分词包进行停用词处理和词频统计:
import jieba
f = open("D:\\App\\Relevance reconsidered.txt","r")
text = ""
line = f.readline()
while line:
text += line
line = f.readline()
f.close()
#print(text)
stopwords=[]
for word in open(r"D:\App\stop_word.txt",'r'): # 这里加载停用词的路径
stopwords.append(word.strip())
words = jieba.cut(text, cut_all = False)
stayed_line=""
for word in words:
if word not in stopwords:
stayed_line += word + " "
# print (stayed_line)
words = jieba.cut(stayed_line, cut_all = False)
word_freq = {} # 词频序列
for word in words:
if word in word_freq: # 统计字/词出现的次数
word_freq[word] += 1
else:
word_freq[word] = 1
freq_word = [] # 排序后的字/词列表
for word, freq in word_freq.items():
freq_word.append((word, freq))
freq_word.sort(key = lambda x: x[1], reverse = True)
max_number = 10 # 输入想要的前3位高频词
for word, freq in freq_word[: max_number]:
print (word, freq)
3. 读取pdf文本
首先需要安装pip install pdfminer3k
库:
在python中导入pip install pdfminer
模块解析本地pdf文件:
import sys
import importlib
importlib.reload(sys)
from pdfminer.pdfparser import PDFParser,PDFDocument
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LTTextBoxHorizontal,LAParams
from pdfminer.pdfinterp import PDFTextExtractionNotAllowed
path = "D:\\App\\information.pdf"
def parse():
fp = open(path, 'rb') # 以二进制读模式打开
praser = PDFParser(fp)#用文件对象来创建一个pdf文档分析器
doc = PDFDocument()# 创建一个PDF文档
praser.set_document(doc)# 连接分析器 与文档对象
doc.set_parser(praser)
# 创建PDf 资源管理器 来管理共享资源
rsrcmgr = PDFResourceManager()
# 创建一个PDF设备对象
laparams = LAParams()
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
# 创建一个PDF解释器对象
interpreter = PDFPageInterpreter(rsrcmgr, device)
# 循环遍历列表,每次处理一个page的内容
for page in doc.get_pages(): # doc.get_pages() 获取page列表
interpreter.process_page(page)
# 接受该页面的LTPage对象
layout = device.get_result()
# 这里layout是一个LTPage对象 里面存放着这个page解析出的各种对象 一般包括LTTextBox, LTFigure, LTImage, LTTextBoxHorizontal 等等 想要获取文本就获得对象的text属性,
for x in layout:
if (isinstance(x, LTTextBoxHorizontal)):
results = x.get_text()
print(results)
if __name__ == '__main__':
parse()
在python中导入pip install pdfminer
模块解析在线pdf文件:
import importlib
import sys
import random
from urllib.request import urlopen
from urllib.request import Request
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LTTextBoxHorizontal, LAParams
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfinterp import PDFTextExtractionNotAllowed
from pdfminer.pdfparser import PDFParser, PDFDocument
importlib.reload(sys)
def parse(_path):
request = Request(url=_path, headers={'User-Agent': random.choice(user_agent)}) # 随机从user_agent列表中抽取一个元素
fp = urlopen(request) #打开在线PDF文档
praser_pdf = PDFParser(fp)# 用文件对象来创建一个pdf文档分析器
doc = PDFDocument()# 创建一个PDF文档
praser_pdf.set_document(doc)# 连接分析器 与文档对象
doc.set_parser(praser_pdf)
# 创建PDf资源管理器 来管理共享资源
rsrcmgr = PDFResourceManager()
# 创建一个PDF参数分析器
laparams = LAParams()
# 创建聚合器
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
# 创建一个PDF页面解释器对象
interpreter = PDFPageInterpreter(rsrcmgr, device)
# 循环遍历列表,每次处理一页的内容
# doc.get_pages() 获取page列表
for page in doc.get_pages():
# 使用页面解释器来读取
interpreter.process_page(page)
# 使用聚合器获取内容
layout = device.get_result()
# 这里layout是一个LTPage对象 里面存放着 这个page解析出的各种对象 一般包括LTTextBox, LTFigure, LTImage, LTTextBoxHorizontal 等等 想要获取文本就获得对象的text属性,
for out in layout:
# 判断是否含有get_text()方法,图片之类的就没有
# if hasattr(out,"get_text"):
if isinstance(out, LTTextBoxHorizontal):
results = out.get_text()
print("results: " + results)
if __name__ == '__main__':
url = "http://www.caac.gov.cn/XXGK/XXGK/TJSJ/201708/P020170821330916187824.pdf"
parse(url)
4. 读取xml文本
import xml.dom.minidom
#打开xml文档
dom = xml.dom.minidom.parse('C:/Users/asus/Desktop/1.xml')
#得到文档元素对象
root = dom.documentElement
urls = dom.getElementsByTagName('url')
copus = ""
for url in urls:
copus = copus + url.firstChild.data + ";"
# copus.append(url.firstChild.data)
text = "https://www.drugs.com/sfx/nytol-quickcaps-side-effects.html"
if copus.find(text) == 0:
print("已经存在")
5. 读取excel数据
- 读excel文件标题
import openpyxl
filename = r'D:/Pythonworkspace/Articles/data/excel/Journal of Propulsion and Power.xlsx'
text = openpyxl.load_workbook(filename)
sheetname = text.get_sheet_names()
ws = text.get_sheet_by_name(sheetname[0])
print(ws['A1'].value)
- 读excel文件某一列的数据
import xlrd
filename = r'D:/Pythonworkspace/Articles/data/excel/Journal of Thermophysics and Heat Transfer.xlsx'
data = xlrd.open_workbook(filename)
table = data.sheet_by_name(u'sheet0')
the_list = set(table.col_values(10))
the_text = str(the_list)
the_text