完整程序
import datetime
from requests_toolbelt import MultipartEncoder
from urllib import parse
import xlwt
import time
import pymssql
import requests
import json
from fake_useragent import UserAgent
import os # 要想使用路径相关功能需要导入 os 模块
from apscheduler.schedulers.blocking import BlockingScheduler
import pytz
import logging
timezone = pytz.timezone('Asia/Shanghai')
scheduler = BlockingScheduler()
requests.adapters.DEFAULT_RETRIES = 5 # 增加重连次数
s = requests.session()
s.keep_alive = False
ua=UserAgent()
# 配置日志格式
logging.basicConfig(filename='downLoad.log', format='%(asctime)s - %(levelname)s - %(message)s', encoding='utf-8')
def read_db_config():
BASE_DIR = os.path.abspath(__file__)
a = BASE_DIR.split("\\")[:-2]
fat = '/'.join(a)
# 读取数据库配置文件
with open(fat+'/config.json') as file:
config = json.load(file)
return config
class downLoad():
def __init__(self) -> object:
self.logger = logging.getLogger(__name__)
self.sheet_name = 'report_' + time.strftime("%Y-%m-%d")
self.filename = 'report_' + time.strftime("%Y-%m-%d") + '.xls'
self.out_path = 'd:/data/任务表' + '.xls'
# 下面有说明
self.url = "XXXXXXXXX"
self.file_path = self.out_path
pass
def selectData(self):
config = read_db_config()
with pymssql.connect(**config) as connect:
print(config) # if connect:
# print("连接成功")
with connect.cursor() as cursor:
sql = """
SELECT
task_area,
opinion_note,
task_title,
start_time
FROM
[dbo].[p_task_temp]
WHERE
start_time BETWEEN DATEADD( hh, - 1, GETDATE( ) )
AND GETDATE( )
order by start_time asc
"""
# 处理数据
cursor.execute(sql)
# 搜取所有结果
results = cursor.fetchall()
if len(results)==0:
return
# 获取MYSQL里面的数据字段名称
fields = cursor.description
workbook = xlwt.Workbook(encoding='utf-8') # workbook是sheet赖以生存的载体。
sheet = workbook.add_sheet(self.sheet_name, cell_overwrite_ok=True)
# 写上字段信息
for field in range(0, len(fields)):
filedname = fields[field][0];
if filedname == 'task_area':
filedname = '序号'
if filedname == 'task_title':
filedname = '标题'
if filedname == 'start_time':
filedname = '开始时间'
if filedname == 'opinion_note':
filedname = '处理意见'
sheet.write(0, field, filedname)
# 获取并写入数据段信息
row = 1
col = 0
for row in range(1, len(results) + 1):
for col in range(0, len(fields)):
sheet.write(row, col, u'%s' % results[row - 1][col])
workbook.save(self.out_path)
self.logger.info(f"下载成功!")
# 发送企业微信
self.QYWXSendGroupFile(self.file_path,self.url)
def QYWXSendGroupFile(self,filepath, url):
"""
发送微信群组机器人文件
:param filepath: 文件路径
:param url: 群组机器人WebHook
:return:
"""
# url为群组机器人WebHook,配置项
headers = {
"content-type": "application/json"
}
# 发送文件需要先上传文件获取media_id
media_id = self.UploadFile(filepath, url)
msg = {"msgtype": "file", "file": {"media_id": media_id}}
# 发送请求
try:
result = requests.post(url, headers=headers, json=msg)
return True
except Exception as e:
print("企业微信机器人发送文件失败,详细信息:" + str(e))
return False
def UploadFile(self,filepath, webHookUrl):
"""
企业微信机器人上传文件,发送文件前需要先上传--要求文件大小在5B~20M之间
:param filepath: 文件路径
:param webHookUrl: 群组机器人WebHook
:return: media_id
"""
# url为群组机器人WebHook,配置项
url = webHookUrl
params = parse.parse_qs(parse.urlparse(webHookUrl).query)
webHookKey = params['key'][0]
upload_url = f'https://qyapi.weixin.qq.com/cgi-bin/webhook/upload_media?key={webHookKey}&type=file'
headers = {"Accept": "application/json, text/plain, */*", "Accept-Encoding": "gzip, deflate",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.100 Safari/537.36"}
filename = os.path.basename(filepath)
try:
multipart = MultipartEncoder(
fields={'filename': filename, 'filelength': '', 'name': 'media',
'media': (filename, open(filepath, 'rb'), 'application/octet-stream')},
boundary='-------------------------acebdf13572468')
headers['Content-Type'] = multipart.content_type
resp = requests.post(upload_url, headers=headers, data=multipart)
json_res = resp.json()
if json_res.get('media_id'):
# print(f"企业微信机器人上传文件成功,file:{filepath}")
return json_res.get('media_id')
except Exception as e:
# print(f"企业微信机器人上传文件失败,file: {filepath}, 详情:{e}")
print("企业微信机器人上传文件失败,详细信息:" + str(e))
return ""
def run(self):
# 判断文件是否存在
if (os.path.exists(self.out_path)):
# 存在,则删除文件
os.remove(self.out_path)
self.selectData()
# 测试
# if __name__=="__main__":
# env = downLoad()
# env.run()
# 每一个整点执行
@scheduler.scheduled_job('cron', year="*", month="*", day="*", hour="*", minute="0", second="0")
def request_update_status():
env = downLoad()
env.run()
scheduler.start()
{
"server": "数据库ip地址,例:127.0.0.1",
"user": "数据库用户名,root",
"password": "数据库密码,root",
"database": "数据库名称:dataBase",
"port": 3306,
"charset": "utf8"
}