import requests
import json
import re
from urllib import parse
requests=requests.session()
url0="https://900458.private.mabangerp.com/index.htm"
url = "https://900458.private.mabangerp.com/index.php?mod=main.doLogin"
headers0 = {
'Host': '900458.private.mabangerp.com',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.9',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
'Upgrade-Insecure-Requests': '1',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive'
}
payload = "isMallRpcFinds=&username=111111111&password=jieyi123"
headers = {
'Origin': 'https://900458.private.mabangerp.com',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'keep-alive'
}
requests.request("GET", url, headers=headers0)
response = requests.request("POST", url, headers=headers, data = payload)
t_start='2020-07-01'
t_end='2020-10-01'
url = "https://900458.private.mabangerp.com/index.php?mod=order.orderSearch"
payload="PrintCenterOrderIdssql=&fbaFlag=&Order.orderStatus=&OrderCurrency.beforeStatus=&printCount=&labelMultipleChoiceWhere=cross&TextVal=weight&TextZx=&TextZd=&TextFee=OrderFee&minOrderFee=&maxOrderFee=&queryTime=paidTime&startTime1="+t_start+"+00%3A00%3A00&endTime1="+t_end+"+00%3A00%3A00&itemCount=&queryTime2=&OrderSearch.fuzzySearchKey=&OrderSearch.fuzzySearchKey1=&grid=&providerName=&OrderItem.developerId=&smtSearchVal=&orderhighfastsearch=&parentCategoryId=&categoryId=&OrderItem.stockStatus=&orderSearchHistory=&orderPageKey=00542f581084128b7dced69b48849733&goPaypalRefundStatus=1&page=1&rowsPerPage=100&Order_isCloud=2&m=order&OrderPlus.isNewOrder=2&a=orderalllist&isNewOrderPage=1&post_tableBase=1&showError=undefined&pageListC=undefined&1=1"
# payload = "PrintCenterOrderIdssql=&fbaFlag=&Order.orderStatus=&OrderCurrency.beforeStatus=&printCount=&labelMultipleChoiceWhere=cross&TextVal=weight&TextZx=&TextZd=&TextFee=OrderFee&minOrderFee=&maxOrderFee=&queryTime=createDate&startTime1="+t_start+"+00%3A00%3A00&endTime1="+t_end+"+00%3A00%3A00&itemCount=&queryTime2=&OrderSearch.fuzzySearchKey=&OrderSearch.fuzzySearchKey1=&grid=&providerName=&OrderItem.developerId=&smtSearchVal=&orderhighfastsearch=&parentCategoryId=&categoryId=&OrderItem.stockStatus=&orderSearchHistory=&orderPageKey=11d7ca827bdc33f37e8f2f5f50fb0973&goPaypalRefundStatus=1&page=1&rowsPerPage=100&Order_isCloud=2&m=order&OrderPlus.isNewOrder=2&a=orderalllist&isNewOrderPage=1&post_tableBase=1&showError=undefined&pageListC=undefined&1=1"
response = requests.request("POST", url, headers=headers, data = payload)
print(divmod(response.json()['pageCount'],5000))
rj=response.json()
content = rj["pageHtml"]
# print(content)
# r='{"data":"(.*?)"}' #待配货
r='"data":"(.*?)","page"'
r=re.compile(r)
# print(parse.quote(r.findall(content)[0]))
pd=parse.quote(r.findall(content)[0])
dm=divmod(response.json()['pageCount'],5000)
toltalpage= dm[0]+1 if dm[1]>0 else dm[0]
print(toltalpage)
allrecords=[]
cookies=requests.cookies.get_dict()
print(cookies)
import asyncio
import aiohttp
import time
import csv
def step1(cookies,pd,toltalpage):
# start=time.time()
async def post(url,headers,data,cookies):
# conn = aiohttp.TCPConnector(limit=2)
conn = aiohttp.TCPConnector(limit_per_host=1)
session=aiohttp.ClientSession(headers=headers,cookies=cookies,connector=conn)
response=await session.post(url,data=data)
# dlist.append(await response.json()['gourl'])
# print(json.loads(str(await response.text()))['gourl'])
# await asyncio.sleep(1)
allrecords.append(json.loads(str(await response.text()))['hasPlatformOrderId'])
# await response.text()
await session.close()
# return response
async def request(pd,i):
data = "PrintCenterOrderIdssql="+pd+"&fbaFlag=&Order.orderStatus=2&OrderCurrency.beforeStatus=&printCount=&TextVal=weight&TextZx=&TextZd=&TextFee=OrderFee&minOrderFee=&maxOrderFee=&queryTime=&itemCount=&queryTime2=&OrderSearch.fuzzySearchKey=&OrderSearch.fuzzySearchKey1=&grid=&providerName=&OrderItem.developerId=&smtSearchVal=&orderhighfastsearch=&parentCategoryId=&categoryId=&OrderItem.stockStatus=&orderSearchHistory=&page="+str(i)+"&ordersearchtype=exportsearch&exportsearchnum=5000&p_tableBase=&employeeId=308469"
headers = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Referer': 'https://private-amz.mabangerp.com/index.php?mod=order.exportOrderByTemplate&isCloud=2&tableBase=&os=99&orderItemOrderBy=platformSku+asc%2CisCombo+asc%2CstockId+asc%2Cid+asc&cMKey=0bac0855cad15bb5eb5fbf3117311c5e&platformOrderIdsMkey=EXPORT_ORDER_BY_TEMPLATE_INTERFACE_308469&platformOrderIdssqlMkey=EXPORT_ORDER_BY_TEMPLATE_SQL_INTERFACE_308469&lang=cn',
'Origin': 'https://private-amz.mabangerp.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
}
url = "https://print-private.mabangerp.com/index.php?mod=order.orderSearch"
try:
await post(url,headers,data,cookies)
except:
await post(url,headers,data,cookies)
tasks = [asyncio.ensure_future(request(pd,i)) for i in range(1,toltalpage+1)]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
# with open('111.csv', 'w', newline='') as csvfile:
# writer = csv.writer(csvfile,delimiter=' ', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# for rows in allrecords:
# for row in rows:
# writer.writerow(row)
# end = time.time()
# print(end - start)
# for i in range(1,toltalpage+1):
# print(i)
# url = "https://print-private.mabangerp.com/index.php?mod=order.orderSearch"
# payload = "PrintCenterOrderIdssql="+parse.quote(r.findall(content)[0])+"&fbaFlag=&Order.orderStatus=2&OrderCurrency.beforeStatus=&printCount=&TextVal=weight&TextZx=&TextZd=&TextFee=OrderFee&minOrderFee=&maxOrderFee=&queryTime=&itemCount=&queryTime2=&OrderSearch.fuzzySearchKey=&OrderSearch.fuzzySearchKey1=&grid=&providerName=&OrderItem.developerId=&smtSearchVal=&orderhighfastsearch=&parentCategoryId=&categoryId=&OrderItem.stockStatus=&orderSearchHistory=&page="+str(i)+"&ordersearchtype=exportsearch&exportsearchnum=5000&p_tableBase=&employeeId=308469"
# headers = {
# 'Accept': 'application/json, text/javascript, */*; q=0.01',
# 'Referer': 'https://private-amz.mabangerp.com/index.php?mod=order.exportOrderByTemplate&isCloud=2&tableBase=&os=99&orderItemOrderBy=platformSku+asc%2CisCombo+asc%2CstockId+asc%2Cid+asc&cMKey=0bac0855cad15bb5eb5fbf3117311c5e&platformOrderIdsMkey=EXPORT_ORDER_BY_TEMPLATE_INTERFACE_308469&platformOrderIdssqlMkey=EXPORT_ORDER_BY_TEMPLATE_SQL_INTERFACE_308469&lang=cn',
# 'Origin': 'https://private-amz.mabangerp.com',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
# 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
# }
# response = requests.request("POST", url, headers=headers, data = payload)
# allrecords.append(response.json()['hasPlatformOrderId'])
headers = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Referer': 'https://private-amz.mabangerp.com/index.php?mod=order.exportOrderByTemplate&isCloud=2&tableBase=&os=99&orderItemOrderBy=platformSku+asc%2CisCombo+asc%2CstockId+asc%2Cid+asc&cMKey=0bac0855cad15bb5eb5fbf3117311c5e&platformOrderIdsMkey=EXPORT_ORDER_BY_TEMPLATE_INTERFACE_308469&platformOrderIdssqlMkey=EXPORT_ORDER_BY_TEMPLATE_SQL_INTERFACE_308469&lang=cn',
'Origin': 'https://private-amz.mabangerp.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
}
url='https://private-amz.mabangerp.com/index.php?mod=order.exportOrderByTemplate&isCloud=2&tableBase=&os=&orderItemOrderBy=stockId+asc%2Cid+asc&cMKey=0bac0855cad15bb5eb5fbf3117311c5e&platformOrderIdsMkey=EXPORT_ORDER_BY_TEMPLATE_INTERFACE_308469&platformOrderIdssqlMkey=EXPORT_ORDER_BY_TEMPLATE_SQL_INTERFACE_308469&lang=cn'
requests.request("GET", url, headers=headers)
print(requests.cookies.get_dict())
# print(len(allrecords[0]))
step1(cookies,pd,toltalpage)
print(len(allrecords))
dlist=[]
cookies=requests.cookies.get_dict()
def step2(cookies):
start=time.time()
async def post(url,headers,data,cookies):
conn = aiohttp.TCPConnector(limit_per_host=1)
session=aiohttp.ClientSession(headers=headers,cookies=cookies,connector=conn)
try:
response=await session.post(url,data=data)
# await asyncio.sleep(1)
# dlist.append(await response.json()['gourl'])
print(json.loads(str(await response.text()))['gourl'])
dlist.append(json.loads(str(await response.text()))['gourl'])
# await response.text()
except Exception as e:
response=await session.post(url,data=data)
# await asyncio.sleep(1)
# dlist.append(await response.json()['gourl'])
print(json.loads(str(await response.text()))['gourl'])
dlist.append(json.loads(str(await response.text()))['gourl'])
finally:
await session.close()
# return response
async def request(li):
data = 'backUrl=&orderIds='+'%0D%0A'.join(li)+'&fieldlabel=uq101&fieldlabel=uq102&fieldlabel=uq113&fieldlabel=uq115&fieldlabel=uq128&fieldlabel=uq130&fieldlabel=uq135&fieldlabel=uq136&fieldlabel=uq141&fieldlabel=uq195&fieldlabel=uq149&fieldlabel=uq160&fieldlabel=uq216&fieldlabel=uq226&fieldlabel=uq104&fieldlabel=uq105&fieldlabel=uq108&fieldlabel=uq109&fieldlabel=uq110&fieldlabel=uq111&fieldlabel=uq145&fieldlabel=uq152&fieldlabel=uq201&fieldlabel=uq146&fieldlabel=uq250&fieldlabel=uq156&fieldlabel=uq255&fieldlabel=uq119&fieldlabel=uq120&fieldlabel=uq196&fieldlabel=uq260&map-name%5B%5D=%E4%BB%98%E6%AC%BE%E6%97%B6%E9%97%B4&map-uq%5B%5D=uq115&map-text%5B%5D=&map-name%5B%5D=%E8%AE%A2%E5%8D%95%E7%BC%96%E5%8F%B7&map-uq%5B%5D=uq101&map-text%5B%5D=&map-name%5B%5D=%E5%BA%97%E9%93%BA%E5%90%8D&map-uq%5B%5D=uq135&map-text%5B%5D=&map-name%5B%5D=%E4%BA%A4%E6%98%93%E7%BC%96%E5%8F%B7&map-uq%5B%5D=uq102&map-text%5B%5D=&map-name%5B%5D=%E7%8A%B6%E6%80%81&map-uq%5B%5D=uq136&map-text%5B%5D=&map-name%5B%5D=%E8%AE%A2%E5%8D%95%E5%8E%9F%E5%A7%8B%E6%80%BB%E9%87%91%E9%A2%9D%EF%BC%88%E9%A6%96%E8%AE%B0%E5%BD%95%EF%BC%89&map-uq%5B%5D=uq250&map-text%5B%5D=&map-name%5B%5D=%E5%8E%9F%E5%A7%8B%E5%95%86%E5%93%81%E6%80%BB%E9%87%91%E9%A2%9D&map-uq%5B%5D=uq146&map-text%5B%5D=&map-name%5B%5D=%E5%B8%81%E7%A7%8D&map-uq%5B%5D=uq156&map-text%5B%5D=&map-name%5B%5D=%E5%AE%A2%E6%88%B7%E5%A7%93%E5%90%8D&map-uq%5B%5D=uq104&map-text%5B%5D=&map-name%5B%5D=%E7%94%B5%E8%AF%9D1&map-uq%5B%5D=uq105&map-text%5B%5D=&map-name%5B%5D=%E8%81%94%E7%B3%BB%E9%82%AE%E7%AE%B1&map-uq%5B%5D=uq145&map-text%5B%5D=&map-name%5B%5D=%E5%9B%BD%E5%AE%B6(%E4%B8%AD)&map-uq%5B%5D=uq152&map-text%5B%5D=&map-name%5B%5D=%E6%89%80%E5%B1%9E%E5%9C%B0%E5%8C%BA&map-uq%5B%5D=uq108&map-text%5B%5D=&map-name%5B%5D=%E6%89%80%E5%B1%9E%E5%9F%8E%E5%B8%82&map-uq%5B%5D=uq109&map-text%5B%5D=&map-name%5B%5D=%E9%82%AE%E6%94%BF%E7%BC%96%E7%A0%81&map-uq%5B%5D=uq110&map-text%5B%5D=&map-name%5B%5D=%E9%97%A8%E7%89%8C%E5%8F%B7&map-uq%5B%5D=uq201&map-text%5B%5D=&map-name%5B%5D=%E9%82%AE%E5%AF%84%E5%9C%B0%E5%9D%801(%E5%AE%8C%E6%95%B4%E5%AF%BC%E5%87%BA)&map-uq%5B%5D=uq111&map-text%5B%5D=&map-name%5B%5D=Paypal+ID&map-uq%5B%5D=uq195&map-text%5B%5D=&map-name%5B%5D=%E7%89%A9%E6%B5%81%E6%B8%A0%E9%81%93&map-uq%5B%5D=uq128&map-text%5B%5D=&map-name%5B%5D=%E8%B4%A7%E8%BF%90%E5%8D%95%E5%8F%B7&map-uq%5B%5D=uq130&map-text%5B%5D=&map-name%5B%5D=%E5%86%85%E9%83%A8%E5%8D%95%E5%8F%B7&map-uq%5B%5D=uq160&map-text%5B%5D=&map-name%5B%5D=%E5%95%86%E5%93%81%E6%80%BB%E9%87%8D%E9%87%8F&map-uq%5B%5D=uq255&map-text%5B%5D=&map-name%5B%5D=%E5%8F%91%E8%B4%A7%E6%97%B6%E9%97%B4&map-uq%5B%5D=uq149&map-text%5B%5D=&map-name%5B%5D=%E8%AE%A2%E5%8D%95%E5%95%86%E5%93%81%E5%90%8D%E7%A7%B0&map-uq%5B%5D=uq120&map-text%5B%5D=&map-name%5B%5D=%E5%B9%B3%E5%8F%B0SKU&map-uq%5B%5D=uq196&map-text%5B%5D=&map-name%5B%5D=%E5%A4%87%E6%B3%A8&map-uq%5B%5D=uq113&map-text%5B%5D=&map-name%5B%5D=%E5%95%86%E5%93%81%E6%98%AF%E5%90%A6%E7%BC%BA%E8%B4%A7&map-uq%5B%5D=uq260&map-text%5B%5D=&map-name%5B%5D=%E8%87%AA%E5%AE%9A%E4%B9%89%E5%88%86%E7%B1%BB&map-uq%5B%5D=uq226&map-text%5B%5D=&map-name%5B%5D=%E4%BA%A4%E8%BF%90%E5%BC%82%E5%B8%B8%E5%8E%9F%E5%9B%A0&map-uq%5B%5D=uq216&map-text%5B%5D=&map-name%5B%5D=PayPal%E9%82%AE%E7%AE%B1&map-uq%5B%5D=uq141&map-text%5B%5D=&map-name%5B%5D=SKU&map-uq%5B%5D=uq119&map-text%5B%5D=&templateName=&templateId=42834&standardVersion=1&orderItemOrderBy=stockId+asc%2Cid+asc&pageSave=1&tableBase=&hbddgyxx=1'
headers = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Referer': 'https://private-amz.mabangerp.com/index.php?mod=order.exportOrderByTemplate&isCloud=2&tableBase=&os=&orderItemOrderBy=stockId+asc%2Cid+asc&cMKey=0bac0855cad15bb5eb5fbf3117311c5e&platformOrderIdsMkey=EXPORT_ORDER_BY_TEMPLATE_INTERFACE_308469&platformOrderIdssqlMkey=EXPORT_ORDER_BY_TEMPLATE_SQL_INTERFACE_308469&lang=cn',
'Origin': 'https://private-amz.mabangerp.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
}
url = "https://private-amz.mabangerp.com/index.php?mod=order.doExportByTemplateData"
await post(url,headers,data,cookies)
tasks = [asyncio.ensure_future(request(li)) for li in allrecords]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
end = time.time()
print(end - start)
# dlist.append(test())
step2(cookies)
print(dlist)
# start=time.time()
# for li in allrecords:
# url = "https://private-amz.mabangerp.com/index.php?mod=order.doExportByTemplateData"
# payload = 'backUrl=&orderIds='+'%0D%0A'.join(li)+'&fieldlabel=uq101&fieldlabel=uq102&fieldlabel=uq113&fieldlabel=uq115&fieldlabel=uq128&fieldlabel=uq130&fieldlabel=uq135&fieldlabel=uq136&fieldlabel=uq141&fieldlabel=uq195&fieldlabel=uq149&fieldlabel=uq160&fieldlabel=uq216&fieldlabel=uq226&fieldlabel=uq104&fieldlabel=uq105&fieldlabel=uq107&fieldlabel=uq108&fieldlabel=uq109&fieldlabel=uq110&fieldlabel=uq111&fieldlabel=uq145&fieldlabel=uq201&fieldlabel=uq146&fieldlabel=uq250&fieldlabel=uq156&fieldlabel=uq255&fieldlabel=uq120&fieldlabel=uq196&fieldlabel=uq260&map-name%5B%5D=%E4%BB%98%E6%AC%BE%E6%97%B6%E9%97%B4&map-uq%5B%5D=uq115&map-text%5B%5D=&map-name%5B%5D=%E8%AE%A2%E5%8D%95%E7%BC%96%E5%8F%B7&map-uq%5B%5D=uq101&map-text%5B%5D=&map-name%5B%5D=%E5%BA%97%E9%93%BA%E5%90%8D&map-uq%5B%5D=uq135&map-text%5B%5D=&map-name%5B%5D=%E4%BA%A4%E6%98%93%E7%BC%96%E5%8F%B7&map-uq%5B%5D=uq102&map-text%5B%5D=&map-name%5B%5D=%E7%8A%B6%E6%80%81&map-uq%5B%5D=uq136&map-text%5B%5D=&map-name%5B%5D=%E8%AE%A2%E5%8D%95%E5%8E%9F%E5%A7%8B%E6%80%BB%E9%87%91%E9%A2%9D%EF%BC%88%E9%A6%96%E8%AE%B0%E5%BD%95%EF%BC%89&map-uq%5B%5D=uq250&map-text%5B%5D=&map-name%5B%5D=%E5%8E%9F%E5%A7%8B%E5%95%86%E5%93%81%E6%80%BB%E9%87%91%E9%A2%9D&map-uq%5B%5D=uq146&map-text%5B%5D=&map-name%5B%5D=%E5%B8%81%E7%A7%8D&map-uq%5B%5D=uq156&map-text%5B%5D=&map-name%5B%5D=%E5%AE%A2%E6%88%B7%E5%A7%93%E5%90%8D&map-uq%5B%5D=uq104&map-text%5B%5D=&map-name%5B%5D=%E7%94%B5%E8%AF%9D1&map-uq%5B%5D=uq105&map-text%5B%5D=&map-name%5B%5D=%E8%81%94%E7%B3%BB%E9%82%AE%E7%AE%B1&map-uq%5B%5D=uq145&map-text%5B%5D=&map-name%5B%5D=%E5%9B%BD%E5%AE%B6&map-uq%5B%5D=uq107&map-text%5B%5D=&map-name%5B%5D=%E6%89%80%E5%B1%9E%E5%9C%B0%E5%8C%BA&map-uq%5B%5D=uq108&map-text%5B%5D=&map-name%5B%5D=%E6%89%80%E5%B1%9E%E5%9F%8E%E5%B8%82&map-uq%5B%5D=uq109&map-text%5B%5D=&map-name%5B%5D=%E9%82%AE%E6%94%BF%E7%BC%96%E7%A0%81&map-uq%5B%5D=uq110&map-text%5B%5D=&map-name%5B%5D=%E9%97%A8%E7%89%8C%E5%8F%B7&map-uq%5B%5D=uq201&map-text%5B%5D=&map-name%5B%5D=%E9%82%AE%E5%AF%84%E5%9C%B0%E5%9D%801(%E5%AE%8C%E6%95%B4%E5%AF%BC%E5%87%BA)&map-uq%5B%5D=uq111&map-text%5B%5D=&map-name%5B%5D=Paypal+ID&map-uq%5B%5D=uq195&map-text%5B%5D=&map-name%5B%5D=%E7%89%A9%E6%B5%81%E6%B8%A0%E9%81%93&map-uq%5B%5D=uq128&map-text%5B%5D=&map-name%5B%5D=%E8%B4%A7%E8%BF%90%E5%8D%95%E5%8F%B7&map-uq%5B%5D=uq130&map-text%5B%5D=&map-name%5B%5D=%E5%86%85%E9%83%A8%E5%8D%95%E5%8F%B7&map-uq%5B%5D=uq160&map-text%5B%5D=&map-name%5B%5D=%E5%95%86%E5%93%81%E6%80%BB%E9%87%8D%E9%87%8F&map-uq%5B%5D=uq255&map-text%5B%5D=&map-name%5B%5D=%E5%8F%91%E8%B4%A7%E6%97%B6%E9%97%B4&map-uq%5B%5D=uq149&map-text%5B%5D=&map-name%5B%5D=%E8%AE%A2%E5%8D%95%E5%95%86%E5%93%81%E5%90%8D%E7%A7%B0&map-uq%5B%5D=uq120&map-text%5B%5D=&map-name%5B%5D=%E5%B9%B3%E5%8F%B0SKU&map-uq%5B%5D=uq196&map-text%5B%5D=&map-name%5B%5D=%E5%A4%87%E6%B3%A8&map-uq%5B%5D=uq113&map-text%5B%5D=&map-name%5B%5D=%E5%95%86%E5%93%81%E6%98%AF%E5%90%A6%E7%BC%BA%E8%B4%A7&map-uq%5B%5D=uq260&map-text%5B%5D=&map-name%5B%5D=%E8%87%AA%E5%AE%9A%E4%B9%89%E5%88%86%E7%B1%BB&map-uq%5B%5D=uq226&map-text%5B%5D=&map-name%5B%5D=%E4%BA%A4%E8%BF%90%E5%BC%82%E5%B8%B8%E5%8E%9F%E5%9B%A0&map-uq%5B%5D=uq216&map-text%5B%5D=&map-name%5B%5D=PayPal%E9%82%AE%E7%AE%B1&map-uq%5B%5D=uq141&map-text%5B%5D=&templateName=&templateId=42834&standardVersion=1&orderItemOrderBy=stockId+asc%2Cid+asc&pageSave=1&tableBase=&hbddgyxx=1'
# headers = {
# 'Accept': 'application/json, text/javascript, */*; q=0.01',
# 'Referer': 'https://private-amz.mabangerp.com/index.php?mod=order.exportOrderByTemplate&isCloud=2&tableBase=&os=&orderItemOrderBy=stockId+asc%2Cid+asc&cMKey=0bac0855cad15bb5eb5fbf3117311c5e&platformOrderIdsMkey=EXPORT_ORDER_BY_TEMPLATE_INTERFACE_308469&platformOrderIdssqlMkey=EXPORT_ORDER_BY_TEMPLATE_SQL_INTERFACE_308469&lang=cn',
# 'Origin': 'https://private-amz.mabangerp.com',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
# 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
# }
# response = requests.request("POST", url, headers=headers, data = payload)
# print(response.json()['gourl'])
# dlist.append(response.json()['gourl'])
# end = time.time()
# print(end - start)
rl={
}
def step3():
async def get(url,headers):
conn = aiohttp.TCPConnector(limit_per_host=1)
session=aiohttp.ClientSession(headers=headers,connector=conn)
async with session.get(url) as resp:
con=await resp.read()
rl[url] = con
await session.close()
async def request(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'
}
await get(url,headers)
tasks = [asyncio.ensure_future(request(url)) for url in dlist]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
step3()
for k,v in rl.items():
with open ('C:/Users/lx/Desktop/w/'+k[55:],"wb") as f:
try:
f.write(v)
except FileExistsError:
pass
# print(response.text)
# url = "https://upload-private.mabangerp.com/stock/orderexport/159506163484242700297.xls"
# headers = {
# 'Connection': 'keep-alive',
# 'Upgrade-Insecure-Requests': '1',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
# 'Referer': 'https://private-amz.mabangerp.com/index.php?mod=order.exportOrderByTemplate&isCloud=2&tableBase=&os=&orderItemOrderBy=stockId+asc%2Cid+asc&cMKey=0bac0855cad15bb5eb5fbf3117311c5e&platformOrderIdsMkey=EXPORT_ORDER_BY_TEMPLATE_INTERFACE_308469&platformOrderIdssqlMkey=EXPORT_ORDER_BY_TEMPLATE_SQL_INTERFACE_308469&lang=cn',
# 'Accept-Encoding': 'gzip, deflate, br',
# 'Accept-Language': 'zh-CN,zh;q=0.9',
# 'Cookie': 'CRAWL_KANDENG_KEY=yluGNNKDf8xXsv3YNEkT5FIOgl%2FrB42PstVnYIBIHUxJLFguCl98%2FE6Yop54DHesiyRMTt%2BTSOS3G9xxMyNhdg%3D%3D; MULTI_LANGUAGE_TYPE=%2BYjZ6oacL7xJ%2FKOcmBg9Z7cTOqi7UgOUgujRs4KQ4Ms%3D; CRAWL_KANDENG_KEY=yluGNNKDf8xXsv3YNEkT5FIOgl%2FrB42PstVnYIBIHUxJLFguCl98%2FE6Yop54DHesiyRMTt%2BTSOS3G9xxMyNhdg%3D%3D; MULTI_LANGUAGE_TYPE=%2BYjZ6oacL7xJ%2FKOcmBg9Z7cTOqi7UgOUgujRs4KQ4Ms%3D'
# }
# response = requests.request("GET", url, headers=headers, data = {})
# response.encoding='UTF-8'
# with open ('t.xls',"wb") as f:
# try:
# f.write(response.content)
# except FileExistsError:
# pass
# print(response.text)
import pandas as pd
import datetime
import re
import glob
import os
def toint(a):
try:
b=int(a)
except ValueError as e:
b=None
except TypeError as e:
b=None
return b
def todatetime(a):
# a1=str(a)
def ti(c):
try:
b=datetime.datetime.strptime(c,a)
except TypeError as e:
b=c
except ValueError as e:
b=None
return b
return ti
def tr2df(folder_path,usecols,r='',sheetname=None,distinct=True,subset=None,keep='last',converters=None):
p = re.compile(r)
# all_d=[]
alpath = [i for i in glob.glob(os.path.join(folder_path,'*.*')) if len(p.findall(os.path.basename(i))) > 0]
# alpath=sorted(alpath, key=lambda x:int(x[x.find('.')-8:x.find('.')]))
# for fname in alpath:
# all_d.append(fname)
# all_f=all_d
# if len(all_d)==0:
# raise ValueError('所在文件夹找不到关键字标识的文件')
# all_df=dict(zip(all_d,all_f))
col_name=usecols
adf=[]
for k in alpath:
if k[k.index('.')+1:].lower()=='csv' or k[k.index('.')+1:].lower()=='xlsx' or k[k.index('.')+1:].lower()=='xls':
# print(1)
try:
if k[k.index('.')+1:].lower()=='csv':
try:
df0=pd.read_csv(k,encoding='utf-8',usecols=usecols,low_memory=False)
except:
df0=pd.read_csv(k,encoding='gbk',usecols=usecols,low_memory=False)
if col_name == None:
col_name =df0.columns.tolist()
# df0=df0.reindex(columns=list(['表日期']+col_name), fill_value=k[k.index('.')-8:k.index('.')])
elif k[k.index('.')+1:].lower()=='xlsx' or k[k.index('.')+1:].lower()=='xls':
df0=pd.read_excel(k,sheet_name=sheetname, index_col=None,usecols=usecols)
if sheetname==None:
adf0=[]
for i in df0:
adf0.append(df0[i])
df0=pd.concat(adf0, axis=0, ignore_index=True)
adf0=[]
if col_name ==None:
col_name =df0.columns.tolist()
# df0=df0.reindex(columns=list(['表日期']+col_name), fill_value=str(os.path.basename(k))[0:-13])
except:
# print(2)
continue
adf.append(df0)
df=pd.concat(adf, axis=0, ignore_index=True)
if distinct :
df.drop_duplicates(subset=subset,keep=keep,inplace=True)
return df
df=tr2df(r'C:\Users\lx\Desktop\马帮',usecols=None,r='',sheetname=None,distinct=0,subset=None,keep='last',converters=None)
# print(df['Paypal ID'][df['订单编号']=='2295283777676'])
# df=df.replace('\n', '', regex=True)
# df['Paypal ID']=df['Paypal ID'].map(lambda x:"'"+str(x))
# df['订单编号']=df['订单编号'].map(lambda x:"'"+str(x))
# df.to_csv('mabang1.csv', index=False,encoding="utf_8_sig")
# df=df[~(df['0']=="日期")]
df.to_csv('mb1.csv', index=False,encoding="utf_8_sig")
# import pandas as pd
# from pandas.io.common import EmptyDataError
# import chardet
# # with open('C:/Users/lx/Desktop/case/杰一客诉.CSV','rb') as f:
# # ed=chardet.detect(f.read())
# # df=pd.read_csv(r'C:/Users/lx/Desktop/case/杰一客诉.CSV', index_col=None,usecols=None,encoding='gbk',skiprows=10)
# # df.to_csv('测试.csv', index=False)
# # print(df.shape[0])
# df=pd.read_csv(r'C:\Users\lx\Downloads\mabang1.csv')
# print(df['Paypal ID'][df['订单编号']=='2295283777676'])
#-----------------------------------------------------------------------
# import asyncio
# import aiohttp
# CONCURRENCY = 5
# URL = 'https://www.baidu.com'
# semaphore = asyncio.Semaphore(CONCURRENCY)
# session = None
# async def scrape_api():
# async with semaphore:
# print('scraping', URL)
# async with session.get(URL) as response:
# await asyncio.sleep(1)
# return await response.text()
# async def main():
# global session
# session = aiohttp.ClientSession()
# scrape_index_tasks = [asyncio.ensure_future(scrape_api()) for _ in range(10000)]
# await asyncio.gather(*scrape_index_tasks)
# if __name__ == '__main__':
# asyncio.get_event_loop().run_until_complete(main())
#.........................................................................................
import asyncio
import aiohttp
import time
def test(number):
start=time.time()
async def get(url):
session=aiohttp.ClientSession()
response=await session.get(url)
await response.text()
await session.close()
return response
async def request():
url = 'https://www.baidu.com/'
await get(url)
tasks = [asyncio.ensure_future(request()) or _ in range(number)]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
end = time.time()
print('Number:', number, 'Cost time:', end - start)
for number in [1,3,5,10,15,30,50,75,100,200,500]:
test(number)
import pandas as pd
import chardet
import re
# def re_strip(s):
# s_re = re.compile(r'^c|\.\d')
# a=s_re.sub('',s)
# return a
# def jd(s):
# if s
# return 'A'
# elif s
# return 'B'
# elif s
# return 'C'
# elif s
# return 'D'
# elif s
# return 'E'
# elif s
# return 'F'
# else:
# return 'G'
def re_strip(s):
s_re = re.compile(r'^c|\.\d')
a=s_re.sub('',s)
return a
def re_strip1(s):
s_re = re.compile(r'_.{1,5}$|Y.{1,3}$')
a=s_re.sub('',s)
return a
def re_strip2(s):
r=re.compile(r'_\d$|Y\d$')
return r.search(s)
l=[]
df=pd.read_csv(r'C:\Users\lx\Desktop\对账单总\dzd1101.csv', low_memory=False)
df['说明']=df['说明'].map(lambda x: str(x).strip())
df=df[(df['说明']=='PayPal 保障紅利、PayPal 買家購物安全保障的款項支出、PayPal 買家信貸完整保障的款項支出')|(df['说明']=='PayPal保障奖金,PayPal买家保障赔偿,PayPal买家信用全额保障赔偿')|(df['说明']=='費用撤銷')|(df['说明']=='费用撤销')|(df['说明']=='撤銷付款撤銷')|(df['说明']=='退单撤销')]
df.to_csv('js.csv', index=False)
# print(df['说明'].value_counts())
# df=df[df['订单编号'].str.contains(r'即时付款审查')]
# df[['付款时间','订单编号','订单原始总金额(首记录)']]=df[['付款时间','订单编号','订单原始总金额(首记录)']].fillna(method='ffill',axis=0)
# dic=dict(df['订单编号'].value_counts())
# df['是否多品']=df['订单编号'].map(lambda x: '是' if dic[x]>1 else x)
# df=df[~df['订单编号'].str.contains(r'_.{1,5}$|Y.{1,3}$', regex=True)]
# df.to_csv('mb7--9.csv', index=False)
# df['count'] = df['elder_no'].apply(lambda x: dict(df['elder_no'].value_counts())[x])
# df1['参考交易号']=df1['参考交易号'].map(lambda x: "'"+str(x).strip())
# df1.fillna({'账单号': '---'} ,inplace=True)
# df2=pd.read_csv(r'C:\Users\lx\Desktop\my3.csv', low_memory=False)
# df2=df2.loc[:,['Paypal-交易号','Paypal-账单号','订单编号','付款时间']]
# a = ['参考交易号','账单号']
# b = ['Paypal-交易号','Paypal-账单号']
# c = zip(a,b)
# for i in c:
# df=pd.merge(df1, df2, how='left', left_on=i[0], right_on=i[1])
# # df=df[dl]
# df1=df[df['订单编号'].isnull()].drop(['Paypal-交易号','Paypal-账单号','订单编号','付款时间'],axis=1)
# df.dropna(subset=['订单编号'],inplace=True)
# l.append(df)
# df2=[]
# l.append(df1)
# df=pd.concat(l, axis=0, ignore_index=True)
# df['订单编号']=df['订单编号'].map(lambda x: re_strip1(str(x)) if x==x else None)
# print(df.shape)
# df.drop_duplicates(subset=None,inplace=True)
# print(df.shape)
# df.to_csv('付款撤销1101-.csv', index=False)
# df['日期']=pd.to_datetime(df['日期'],errors='coerce')
# for i in range(1,12):
# df1=df[(df['日期'].dt.month==i)]
# df1.to_csv('C:/Users/lx/Desktop/按月份/'+str(i)+'月.csv', index=False)
# print(df['日期'].dt.month.unique())
# l.append(df1)
# df['账单号a']=df['账单号'].map(lambda x: re_strip(str(x)) if x==x else None)
# df2=pd.read_csv(r'C:\Users\lx\Desktop\对账单总\dzd1018.csv', low_memory=False)
# l.append(df2)
# df['日期']=pd.to_datetime(df['日期'],errors='coerce')
# df.loc[(df['主体']=='尚为leo')&(df['日期'].dt.month==7),'主体'] = '速亚'
# df['主体']=df.apply(lambda x: '速亚' if ((x['主体']=='尚为leo')&(x['日期'].month==7)) else x,axis =1)
# df=df[~((df['日期'].dt.month==7)&(df['主体']=='尚为leo'))]
# l.append(df)
# df=pd.concat(l, axis=0, ignore_index=True)
# df.drop_duplicates(subset=None,inplace=True)
# df=df.rename(columns={'0':'日期','1':'时间','2':'时区','3':'说明','4':'币种','5':'总额','6':'费用','7':'净额','8':'余额','9':'交易号','10':'发件人邮箱地址','11':'名称','12':'银行名称','13':'银行账户','14':'运费和手续费金额','15':'营业税','16':'账单号','17':'参考交易号','18':'账单号a'})
# df['账单号a']=df['账单号'].map(lambda x: re_strip(str(x)) if x==x else None)
# df=df[~(df['日期']=='日期')]
# df['主体']=df['主体'].replace(r'西海677','西海')
# df.to_csv('dzd1101.csv', index=False)
# df['YearMonth'] = df['ArrivalDate'].map(lambda x: 1000*x.year + x.month)
# df1['付款时间']=pd.to_datetime(df1['付款时间'],format='%Y/%m/%d %H:%M:%S',errors='coerce')
# df1['订单标记'] = df1['订单标记'].astype(str)
# df1=df1[~df1['订单标记'].str.contains('马帮')]
# df1=df1.rename(columns={'国家/地区': '国家','订单': '订单编号'})
# df1=df1.loc[:,['订单编号','付款时间','商品名','总金额','国家']]
# df2=pd.read_csv(r'C:\Users\lx\Downloads\mb1011.csv', low_memory=False,usecols=['订单编号','付款时间','订单商品名称','订单原始总金额(首记录)','国家(中)'])
# df2=df2.rename(columns={'订单商品名称': '商品名','订单原始总金额(首记录)': '总金额','国家(中)': '国家'})
# df2.fillna(method='ffill',axis=0,inplace=True)
# l.append(df1)
# l.append(df2)
# df1 = df1.set_index('付款时间')
# df1=df1['2020-03-11':'2020-06-08']
# df1.drop_duplicates(subset=['订单','商品名'],inplace=True)
# print(df1.head())
# df=pd.concat(l, axis=0, ignore_index=True)
# df['订单编号']=df['订单编号'].map(lambda x: "'"+str(x).strip())
# df['商品名']=df['商品名'].map(lambda x: str(x).strip())
# df['付款时间']=pd.to_datetime(df['付款时间'],format='%Y/%m/%d %H:%M:%S',errors='coerce')
# df['时间段']=df['付款时间'].map(lambda x:jd(x))
# df['时间段']=pd.cut(df['付款时间'], right=False,labels=['A','B','C','D','E','F','G'],bins=[pd.to_datetime('1970-01-01'),pd.to_datetime('2020-03-14'), pd.to_datetime('2020-03-20'), pd.to_datetime('2020-04-01'), pd.to_datetime('2020-06-01'), pd.to_datetime('2020-07-06'), pd.to_datetime('2020-08-07'),pd.to_datetime('2020-12-07')])
# df=df[(df['付款时间']>=pd.to_datetime('2020-01-01'))]
# df.to_csv('1011.csv', index=False)
# df1['主体']=df1['主体'].replace(r'石头\d','石头',regex=True)
# df1['主体']=df1['主体'].replace(r'尚为leo\d','尚为leo',regex=True)
# df1['主体']=df1['主体'].replace(r'速亚\d','速亚',regex=True)
# df1['主体']=df1['主体'].replace(r'顺发003','顺发002',regex=True)
# df1['订单编号']=df1['订单编号'].map(lambda x: str(x).strip())
# df1.drop_duplicates(subset=['订单编号'],inplace=True)
# print(df1.shape)
# df1.drop_duplicates(subset=['3','9'],inplace=True)
# print(df1.shape)
# df1.drop_duplicates(subset=['订单编号'],inplace=True)
# print(df1.shape)
# df1['订单编号']=df1['订单编号'].map(lambda x:"'"+str(x))
# df1=df1.drop(['订单编号1'],axis=1)
# df1=df1.loc[:,['主体','订单编号1','9','16','收件人','电话','联系邮箱','国家','所属地区','所属城市','邮编','所属城市','门牌号','邮寄地址']]
# df1=df1.rename(columns={'9': 'Paypal-交易号','16': 'Paypal-账单号'})
# l.append(df1)
# df2=pd.read_csv(r'C:\Users\lx\Desktop\my.csv', low_memory=False)
# df=pd.merge(df1, df2, how='left', left_on='订单编号', right_on='订单编号1')
# df2['订单编号1']=df2['订单编号'].map(lambda x: str(x).strip("'"))
# df2=df2.loc[:,['主体','订单编号1','9','16','收件人','电话','联系邮箱','国家','所属地区','所属城市','邮编','所属城市','门牌号','邮寄地址']]
# df2=df2.rename(columns={'9': 'Paypal-交易号','16': 'Paypal-账单号'})
# l.append(df2)
# df=pd.concat(l, axis=0, ignore_index=True)
# print(df.shape)
# # usecols=['付款时间','客户姓名','电话1','联系邮箱','国家(中)','所属地区','所属城市','邮政编码','门牌号','邮寄地址1(完整导出)','物流渠道','货运单号','发货时间','订单编号','Paypal ID','交易编号']
# usecols=['付款时间','订单','收件人','电话','邮箱','国家/地区','省/州','城市','邮编','门牌号','邮寄地址','支付交易号','账单号','物流渠道','运单号','发货时间']
# df0=pd.read_csv(r'C:\Users\lx\Desktop\物流信息3-8.csv', low_memory=False)
# df0=df0.rename(columns={'客户姓名': '收件人','电话1': '电话','国家(中)': '国家','邮寄地址1(完整导出)': '邮寄地址','邮政编码': '邮编'})
# df0=df0.rename(columns={'订单': '订单编号','邮箱': '联系邮箱','国家/地区': '国家','省/州': '所属地区','城市': '所属城市','运单号': '货运单号'})
# df0['erp']='云栈'
# print(df0.shape)
# df0.drop_duplicates(subset=None,inplace=True)
# print(df0.shape)
# df0.to_csv('yz1.csv', index=False)
# df0=pd.read_csv(r'C:\Users\lx\Downloads\yz.csv', low_memory=False,usecols=usecols)
# # print(df0.shape)
# # df0=df0[(df0['3']=='快速结账付款')|(df0['3']=='快速結帳付款')]
# # print(df0.shape)
# df1['18']=df1['16'].map(lambda x: re_strip(str(x)) if x==x else None)
# # # df0.fillna({'交易账单号': '---'} ,inplace=True)
# df0=df0.loc[:,['主体','9','16','167']]
# # # print(df0[df0['交易账单号'].isnull()])
# print(df0.columns.tolist())
# df1=pd.read_excel(r"mb600.xlsx",encoding='gbk')
# # df1.dropna(subset=['付款时间'],inplace=True)
# # # print(df.shape[0])
# # df1=df1.loc[:,['Paypal ID','交易编号','订单编号']]
# df1['Paypal ID']=df1['Paypal ID'].map(lambda x: str(x).strip())
# df1['交易编号']=df1['交易编号'].map(lambda x: str(x).strip())
# df1['订单编号']=df1['订单编号'].map(lambda x: str(x).strip())
# # # df2=pd.read_csv(r"yz.csv")
# # dl=df1.columns.tolist()+['主体','9','16']
# print(df1.columns.tolist())
# # # print(df.shape[0])
# dfa=df1
# a = ['交易编号','Paypal ID','订单编号','交易编号']
# b = ['9','9','167','167']
# c = zip(a,b)
# for i in c:
# df=pd.merge(dfa, df0, how='left', left_on=i[0], right_on=i[1])
# # df=df[dl]
# dfa=df[df['主体'].isnull()].drop(['主体','9','167','16'],axis=1)
# df.dropna(subset=['主体'],inplace=True)
# l.append(df)
# df1=[]
# # df2=pd.read_csv(r"yz.csv", low_memory=False)
# # df2=df2.loc[:,['支付交易号','账单号','订单']]
# df2=df2[~df2['订单'].str.contains(r'_\d$|Y\d$', regex=True)]
# # df2.drop_duplicates(subset=['订单'],inplace=True)
# # df2['支付交易号']=df2['支付交易号'].map(lambda x: str(x).strip())
# # df2['账单号']=df2['账单号'].map(lambda x: str(x).strip())
# # # df2.fillna('--', inplace=True)
# # dl=df0.columns.tolist()+['订单']
# # dfa=df0
# # df0=[]
# # a = ['原始交易号','争议交易号','交易账单号']
# # b = ['支付交易号','支付交易号','账单号']
# # c = zip(a,b)
# # for i in c:
# # df=pd.merge(dfa, df2, how='left', left_on=i[0], right_on=i[1])
# # df=df[dl]
# # dfa=df[df['订单'].isnull()].drop('订单',axis=1)
# # df.dropna(subset=['订单'],inplace=True)
# # l.append(df)
# # df2=[]
# df=pd.concat(l, axis=0, ignore_index=True)
# # df['订单']=df['订单'].map(lambda x: "'"+str(x).strip())
# # df['订单编号']=df['订单编号'].map(lambda x: "'"+str(x).strip())
# df['Paypal ID']=df['Paypal ID'].map(lambda x:"'"+str(x))
# df['交易编号']=df['交易编号'].map(lambda x:"'"+str(x))
# df['订单编号']=df['订单编号'].map(lambda x:"'"+str(x))
# df['9']=df['9'].map(lambda x:"'"+str(x))
# df1=df1.rename(columns={'0':'日期','1':'时间','2':'时区','3':'说明','4':'币种','5':'总额','6':'费用','7':'净额','8':'余额','9':'交易号','10':'发件人邮箱地址','11':'名称','12':'银行名称','13':'银行账户','14':'运费和手续费金额','15':'营业税','16':'账单号','17':'参考交易号','18':'账单号a'})
# df1['日期']=pd.to_datetime(df1['日期'],format='%m/%d/%Y',errors='coerce')
# df1.to_csv('dzd1-8.csv', index=False)
#-----------------------------------------------------------------------------------------------wl.py
# import pandas as pd
# import chardet
# import re
# def re_strip(s):
# s_re = re.compile(r'^c|\.\d')
# a=s_re.sub('',s)
# return a
# def re_strip1(s):
# s_re = re.compile(r'_\d$|Y\d$')
# a=s_re.sub('',s)
# return a
# def re_strip2(s):
# r=re.compile(r'_\d$|Y\d$')
# return r.search(s)
#-------------------------------------------1----------
# l=[]
# df0=pd.read_csv(r'C:\Users\lx\Desktop\物流信息3-8.csv', low_memory=False)
# df0=df0[['订单编号','付款时间', '物流公司(新)', '物流渠道(新)', '货运单号']]
# df1=pd.read_csv(r'C:\Users\lx\Desktop\mb2.csv', low_memory=False)
# df1=df1[['订单编号','国家', '发货时间']]
# df1['订单编号']=df1['订单编号'].map(lambda x: x[1:])
# l.append(df1)
# df2=pd.read_csv(r'C:\Users\lx\Desktop\yz2.csv', low_memory=False)
# df2=df2[['订单编号','国家', '发货时间']]
# df2['订单编号']=df2['订单编号'].map(lambda x: x[1:])
# l.append(df2)
# df3=pd.concat(l, axis=0, ignore_index=True)
# df4=pd.merge(df0, df3, how='left', on='订单编号')
# df4['货运单号']=df4['货运单号'].map(lambda x:"'"+str(x))
# pd.to_datetime(df4['发货时间'],format='%m/%d/%Y %H:%M:%S',errors='coerce')
# print(df4.dtypes)
# df4 = df4[(df4['发货时间'] < '2020-08-01')]
# print(df4.shape)
# df4=df4.rename(columns={'物流公司(新)': '物流公司','物流渠道(新)': '物流渠道'})
# l=[]
# with pd.io.excel.ExcelFile(r'C:\Users\lx\Desktop\8月物流分析.xlsx') as io:
# df5=pd.read_excel(io,sheet_name='Sheet1',usecols=['订单编号','付款时间', '物流公司', '物流渠道', '货运单号','国家(中)','发货时间'])
# df5=df5.rename(columns={'国家(中)': '国家'})
# df5['货运单号']=df5['货运单号'].map(lambda x:"'"+str(x))
# l.append(df4)
# l.append(df5)
# df6=pd.concat(l, axis=0, ignore_index=True) #wuliu 马帮 云栈
# df6['订单编号']=df6['订单编号'].map(lambda x: re_strip1(str(x)) if x==x else None)
# df6.drop_duplicates(subset='订单编号',inplace=True)
# print(df6.shape)
# df7=pd.read_csv(r'C:\Users\lx\Desktop\123.csv', low_memory=False,encoding='gbk')
# df7['订单编号']=df7['订单编号'].map(lambda x: re_strip1(str(x)))
# df7.drop_duplicates(subset='订单编号',inplace=True)
# df8=pd.merge(df6, df7, how='left', on='订单编号')
# print(df8.shape)
# df8.to_csv('6665656.csv', index=False)
#-------------------------------------------2----------
# l=[]
# df0=pd.read_csv(r'C:\Users\lx\Desktop\q1.csv', low_memory=False,encoding='gbk')
# l.append(df0)
# df1=pd.read_csv(r'C:\Users\lx\Desktop\q2.csv', low_memory=False,encoding='gbk')
# l.append(df1)
# df2=pd.read_csv(r'C:\Users\lx\Desktop\q3.csv', low_memory=False,encoding='gbk')
# l.append(df2)
# df3=pd.concat(l, axis=0, ignore_index=True)
# df3=df3[['查询号码','最新状态','上网时间','妥投日期']]
# df3['查询号码']=df3['查询号码'].map(lambda x: str(x).strip().strip("'"))
# df3.drop_duplicates(subset='查询号码',inplace=True)
# # df3['查询号码']=df3['查询号码'].map(lambda x: x[1:])
# df4=pd.read_csv(r'C:\Users\lx\Desktop\6665656.csv', low_memory=False)
# df4['货运单号']=df4['货运单号'].map(lambda x: str(x).strip("'"))
# df5=pd.merge(df4, df3, how='left', left_on='货运单号', right_on='查询号码')
# print(df5.dtypes)
# print(df5.shape)
# df5.to_csv('6665656-1.csv', index=False)
#-------------------------------------------3----------
# df0=pd.read_csv(r'C:\Users\lx\Desktop\6665656-1.csv', low_memory=False)
# print(df0.shape)
# df0.drop_duplicates(subset='货运单号',inplace=True)
# df0['付款时间']=pd.to_datetime(df0['付款时间'],format='%Y/%m/%d %H:%M:%S',errors='coerce')
# df0['发货时间']=pd.to_datetime(df0['发货时间'],format='%Y/%m/%d %H:%M:%S',errors='coerce')
# df0['上网时间']=pd.to_datetime(df0['上网时间'],format='%Y/%m/%d %H:%M:%S',errors='coerce')
# df0['妥投日期']=pd.to_datetime(df0['妥投日期'],format='%Y/%m/%d %H:%M:%S',errors='coerce')
# df0['事件提交时间']=pd.to_datetime(df0['事件提交时间'],format='%Y/%m/%d %H:%M:%S',errors='coerce')
# df0['事件原因']=df0['事件原因'].map(lambda x: None if pd.isnull(x) else '未收到物品')
# df0['最新状态']=df0['最新状态'].map(lambda x: '51无法匹配' if pd.isnull(x) else str(x).strip())
# print(df0.shape)
# df0=df0[((df0['上网时间'] >= "2020-03-01")&(df0['妥投日期'] >= "2020-03-01"))|(df0['妥投日期'].isnull())|(df0['上网时间'].isnull())]
# print(df0.shape)
# df0.loc[df0['订单编号'].str.contains(r'_\d$|Y\d$', regex=True)|(df0['付款时间'] <= "2020-01-01"),'付款时间'] = df0['发货时间']- pd.Timedelta(days=1)
# df0=df0[~((df0['最新状态']=='成功签收')&(df0['上网时间'].isnull()))]
# df0.to_csv('6665656-2.csv', index=False)
#-------------------------------------------4----------
# df=pd.read_csv(r'C:\Users\lx\Desktop\6665656-2.csv', low_memory=False)
# df['是否产生物流case']=df['事件原因'].map(lambda x: 0 if pd.isnull(x) else 1)
# df['妥投数']=df['最新状态'].map(lambda x: 1 if x=='成功签收' else 0)
# df['付款时间']=pd.to_datetime(df['付款时间'],format='%Y/%m/%d %H:%M:%S',errors='coerce')
# df['发货时间']=pd.to_datetime(df['发货时间'],format='%Y/%m/%d %H:%M:%S',errors='coerce')
# df['上网时间']=pd.to_datetime(df['上网时间'],format='%Y/%m/%d %H:%M:%S',errors='coerce')
# df['妥投日期']=pd.to_datetime(df['妥投日期'],format='%Y/%m/%d %H:%M:%S',errors='coerce')
# df['事件提交时间']=pd.to_datetime(df['事件提交时间'],format='%Y/%m/%d %H:%M:%S',errors='coerce')
# df['发货时效']=round((df['发货时间']-df['付款时间'])/pd.Timedelta(1,unit='D'),2)
# df['停留时间']=round((df['上网时间']-df['发货时间'])/pd.Timedelta(1,unit='D'),2)
# df['运输时效']=round((df['妥投日期']-df['上网时间'])/pd.Timedelta(1,unit='D'),2)
# df['签收时效']=round((df['妥投日期']-df['付款时间'])/pd.Timedelta(1,unit='D'),2)
# df['发货数']=1
# df['妥投后的物流case数']=df.apply(lambda x: 1 if ((x['是否产生物流case']==1)&(x['事件提交时间']>x['妥投日期'])&(x['妥投数']==1)) else 0,axis =1)
# df['15天妥投数']=df['运输时效'].map(lambda x: 1 if x <=15 else 0)*df['妥投数']
# df['20天妥投数']=df['运输时效'].map(lambda x: 1 if x <=20 else 0)*df['妥投数']
# df['30天妥投数']=df['运输时效'].map(lambda x: 1 if x <=30 else 0)*df['妥投数']
# print(df.columns.tolist())
# print(df.head())
# print(df['物流公司'].value_counts())
# print(df['物流渠道'].value_counts())
# df.to_csv('6665656-3.csv', index=False)
#------------------------------------------------------------------------------3.py
# import pandas as pd
# import chardet
# import re
# def re_strip(s):
# s_re = re.compile(r'^c|\.\d')
# a=s_re.sub('',s)
# return a
# def re_strip1(s):
# s_re = re.compile(r'_.{1,5}$|Y.{1,3}$')
# a=s_re.sub('',s)
# return a
# l=[]
# # usecols=['付款时间','客户姓名','电话1','联系邮箱','国家(中)','所属地区','所属城市','邮政编码','门牌号','邮寄地址1(完整导出)','物流渠道','货运单号','发货时间','订单编号','Paypal ID','交易编号']
# usecols=['付款时间','订单','收件人','电话','邮箱','国家/地区','省/州','城市','邮编','门牌号','邮寄地址','支付交易号','账单号','物流渠道','运单号','发货时间']
# df0=pd.read_csv(r'C:\Users\lx\Downloads\yz.csv', low_memory=False,usecols=usecols)
# # df0=df0.rename(columns={'客户姓名': '收件人','电话1': '电话','国家(中)': '国家','邮寄地址1(完整导出)': '邮寄地址','邮政编码': '邮编'})
# df0=df0.rename(columns={'订单': '订单编号','邮箱': '联系邮箱','国家/地区': '国家','省/州': '所属地区','城市': '所属城市','运单号': '货运单号'})
# df0['erp']='云栈'
# print(df0.shape)
# df0.drop_duplicates(subset=None,inplace=True)
# print(df0.shape)
# df0.to_csv('yz1.csv', index=False)
# df=pd.read_csv(r'C:\Users\lx\Downloads\mb1011.csv',usecols=['订单编号','交易编号','店铺名','Paypal ID','发货时间','货运单号','订单原始总金额(首记录)','国家(中)','物流渠道','订单商品名称','SKU'])
# df.dropna(subset=['订单编号'],inplace=True)
# df1=df1[(df1['说明']=='快速结账付款')|(df1['说明']=='快速結帳付款')]
# df.columns=['主体','日期','时间','时区','说明','币种','总额','费用','净额','余额','交易号', '发件人邮箱地址', '名称','银行名称', '银行账户', '运费和手续费金额', '营业税', '账单号', '参考交易号', '账单号a']
# df['订单编号-']=df['订单编号'].map(lambda x: re_strip1(str(x)))
# df=pd.read_csv(r'C:\Users\lx\Desktop\对账单总\dzd1018.csv', low_memory=False)
# df=df[(df['说明']=='快速结账付款')|(df['说明']=='快速結帳付款')]
# df2=pd.read_csv(r'C:\Users\lx\Desktop\对账单总\dzd-9+1018-.csv', low_memory=False)
# df3=pd.read_csv(r'C:\Users\lx\Desktop\对账单总\dzd1-80.csv', low_memory=False)
# l.append(df1)
# l.append(df2)
# l.append(df3)
# df=pd.concat(l, axis=0, ignore_index=True)
# df.drop_duplicates(subset=['说明','交易号'],inplace=True)
# df=df.drop(['账单号a'],axis=1)
# df['主体']=df['主体'].replace(r'石头\d','石头',regex=True)
# df['主体']=df['主体'].replace(r'尚为leo\d','尚为leo',regex=True)
# df['主体']=df['主体'].replace(r'速亚\d','速亚',regex=True)
# df['主体']=df['主体'].replace(r'顺发003','顺发002',regex=True)
# df['主体']=df['主体'].replace(r'多麦\d','多麦',regex=True)
# df['主体']=df['主体'].replace(r'若水nme','若水',regex=True)
# df['主体']=df['主体'].replace(r'拾星','拾星s8g')
# df['主体']=df['主体'].replace(r'拾星02','拾星002')
# df['主体']=df['主体'].replace(r'拾星03','拾星003')
# df['主体']=df['主体'].replace(r'拾星04','拾星004')
# df['主体']=df['主体'].replace(r'仕未\d','仕未',regex=True)
# df['主体']=df['主体'].replace(r'仕末','仕未',regex=True)
# df['主体']=df['主体'].replace(r'西海677','西海',regex=True)
# df['主体']=df['主体'].replace(r'益广豪业01','益广one',regex=True)
# df['主体']=df['主体'].replace(r'益广豪业03','益广three',regex=True)
# df['主体']=df['主体'].replace(r'益广豪业','益广zmd',regex=True)
# df['日期']=pd.to_datetime(df['日期'],errors='coerce')
# df['账单号a']=df['账单号'].map(lambda x: re_strip(str(x)) if x==x else None)
# print(df.shape)
# df.drop_duplicates(subset=['说明','交易号'],inplace=True)
# print(df.shape)
# df=df.iloc[0:5,]
# l.append(df0)
# df1=pd.read_csv(r'C:\Users\lx\Desktop\对账单总\dzd-.csv', low_memory=False)
# df1['日期']=pd.to_datetime(df1['日期'],format='%m/%d/%Y',errors='coerce')
# df1=df1[(df1['日期'] >= "2020-08-01")&(df1['日期'] < "2020-09-01")]
# l.append(df1)
# df2=pd.read_csv(r'C:\Users\lx\Desktop\对账单总\dzd-9+1018-.csv', low_memory=False)
# df2['日期']=pd.to_datetime(df2['日期'],format='%m/%d/%Y',errors='coerce')
# df2=df2[(df2['日期'] >= "2020-08-01")&(df2['日期'] < "2020-09-01")]
# l.append(df2)
# df=pd.concat(l, axis=0, ignore_index=True)
# df.drop_duplicates(subset=['说明','交易号'],inplace=True)
# df1=pd.read_csv(r'C:\Users\lx\Desktop\yzq.csv', low_memory=False)
# df1['订单-']=df1['订单'].map(lambda x: re_strip1(str(x)))
# df0.fillna(value='---' ,inplace=True)
# # df1.dropna(subset=['付款时间'],inplace=True)
# # print(df.shape[0])
# # df1=df1.loc[:,['Paypal ID','交易编号','订单编号']]
# # df1.dropna(subset=['付款时间'],inplace=True)
# df0['账单号a']=df0['账单号a'].map(lambda x: str(x).strip())
# df0=df0.rename(columns={'账单号': '账单号0'})
# df1['订单-']=df1['订单-'].map(lambda x: str(x).strip() if x==x else None)
# # df1['交易编号']=df1['交易编号'].map(lambda x: str(x).strip() if x==x else None)
# # df1['Paypal ID']=df1['Paypal ID'].map(lambda x: str(x).strip() if x==x else None)
# # df1['货运单号']=df1['货运单号'].map(lambda x: str(x).strip() if x==x else None)
# a = ['支付交易号','账单号','支付交易号','订单-']
# b = ['交易号','账单号a','参考交易号','账单号a']
# c = zip(a,b)
# for i in c:
# df=pd.merge(df1, df0, how='left', left_on=i[0], right_on=i[1])
# # df=df[dl]
# df1=df[df['主体'].isnull()].drop(['主体','交易号','账单号0','账单号a','参考交易号'],axis=1)
# df.dropna(subset=['主体'],inplace=True)
# l.append(df)
# df0=[]
# l.append(df1)
# df=pd.concat(l, axis=0, ignore_index=True)
# df2=pd.read_csv(r"yz.csv")
# dl=df1.columns.tolist()+['主体','9','16']
# print(df1.columns.tolist())
# print(df.shape[0])
# dfa=df1
# df1=df1.loc[:,['主体','9','16','167']]
# df2=df2[(df2['说明']=='快速结账付款')|(df2['说明']=='快速結帳付款')]
# df3=pd.read_csv(r'C:\Users\lx\Desktop\dzd-.csv')
# df3=df3[(df3['说明']=='快速结账付款')|(df3['说明']=='快速結帳付款')]
# df=pd.merge(df1, df0, how='left', on='订单编号-')
# print(df.shape)
# print(df.shape)
# df2=pd.read_excel(r'C:\Users\lx\Desktop\工作簿12.xlsx')
# l.append(df1)
# l.append(df2)
# l.append(df3)
# df=pd.concat(l, axis=0, ignore_index=True)
# df.drop_duplicates(subset=['交易号'],inplace=True)
# print(df.shape)
# df1=pd.read_excel(r'C:\Users\lx\Desktop\1234.xlsx')
# df1['订单']=df1['订单'].map(lambda x: str(x).strip())
# print(df1.shape)
# df=pd.merge(df1, df0, how='left', left_on='email', right_on='10')
# print(df.shape)
# print(df0.shape)
# df0=df0[(df0['3']=='快速结账付款')|(df0['3']=='快速結帳付款')]
# print(df0.shape)
# df0['167']=df0['16'].map(lambda x: re_strip(str(x)) if x==x else None)
# # df0.fillna({'交易账单号': '---'} ,inplace=True)
# df0=df0.loc[:,['主体','9','16','167']]
# # # print(df0[df0['交易账单号'].isnull()])
# print(df0.columns.tolist())
# df1=pd.read_csv(r"yz1.csv")
# # df1.dropna(subset=['付款时间'],inplace=True)
# # # print(df.shape[0])
# # df1=df1.loc[:,['Paypal ID','交易编号','订单编号']]
# # df1.dropna(subset=['付款时间'],inplace=True)
# df1['支付交易号']=df1['支付交易号'].map(lambda x: str(x).strip())
# df1['账单号']=df1['账单号'].map(lambda x: str(x).strip())
# df1['订单编号']=df1['订单编号'].map(lambda x: str(x).strip())
# df1['货运单号']=df1['货运单号'].map(lambda x: str(x).strip())
# # # df2=pd.read_csv(r"yz.csv")
# # dl=df1.columns.tolist()+['主体','9','16']
# print(df1.columns.tolist())
# # # print(df.shape[0])
# # dfa=df1
# a = ['账单号','支付交易号']
# b = ['167','9']
# c = zip(a,b)
# for i in c:
# df=pd.merge(df1, df0, how='left', left_on=i[0], right_on=i[1])
# # df=df[dl]
# df1=df[df['主体'].isnull()].drop(['主体','9','167','16'],axis=1)
# df.dropna(subset=['主体'],inplace=True)
# l.append(df)
# df0=[]
# l.append(df1)
# # df2=pd.read_csv(r"yz.csv", low_memory=False)
# # df2=df2.loc[:,['支付交易号','账单号','订单']]
# # df2=df2[~df2['订单'].str.contains(r'_\d$|Y\d$', regex=True)]
# # df2.drop_duplicates(subset=['订单'],inplace=True)
# # df2['支付交易号']=df2['支付交易号'].map(lambda x: str(x).strip())
# # df2['账单号']=df2['账单号'].map(lambda x: str(x).strip())
# # # df2.fillna('--', inplace=True)
# # dl=df0.columns.tolist()+['订单']
# # dfa=df0
# # df0=[]
# # a = ['原始交易号','争议交易号','交易账单号']
# # b = ['支付交易号','支付交易号','账单号']
# # c = zip(a,b)
# # for i in c:
# # df=pd.merge(dfa, df2, how='left', left_on=i[0], right_on=i[1])
# # df=df[dl]
# # dfa=df[df['订单'].isnull()].drop('订单',axis=1)
# # df.dropna(subset=['订单'],inplace=True)
# # l.append(df)
# # df2=[]
# df=pd.concat(l, axis=0, ignore_index=True)
# # df['订单']=df['订单'].map(lambda x: "'"+str(x).strip())
# df['订单编号']=df['订单编号'].map(lambda x: "'"+str(x).strip())
# df['支付交易号']=df['支付交易号'].map(lambda x:"'"+str(x))
# df['账单号']=df['账单号'].map(lambda x:"'"+str(x))
# df['订单']=df['订单'].map(lambda x:"'"+str(x))
# df['货运单号']=df['货运单号'].map(lambda x:"'"+str(x))
# df['物流状态']=df['物流状态'].map(lambda x:str(x))
# # df['167']=df['167'].map(lambda x:"'"+str(x))
# df=df[(df['物流状态']=='成功签收')|(df['物流状态']=='已签收')]
# df.to_csv('dzdk1018.csv', index=False)
#-----------------------------------------------------------------------------123.py
# import pandas as pd
# import chardet
# import re
# def re_strip(s):
# s_re = re.compile(r'^c|\.\d')
# a=s_re.sub('',s)
# return a
# def re_strip1(s):
# s_re = re.compile(r'_.{1,5}$|Y.{1,3}$')
# a=s_re.sub('',s)
# return a
# l=[]
# # usecols=['付款时间','客户姓名','电话1','联系邮箱','国家(中)','所属地区','所属城市','邮政编码','门牌号','邮寄地址1(完整导出)','物流渠道','货运单号','发货时间','订单编号','Paypal ID','交易编号']
# usecols=['付款时间','订单','收件人','电话','邮箱','国家/地区','省/州','城市','邮编','门牌号','邮寄地址','支付交易号','账单号','物流渠道','运单号','发货时间']
# df0=pd.read_csv(r'C:\Users\lx\Downloads\yz.csv', low_memory=False,usecols=usecols)
# # df0=df0.rename(columns={'客户姓名': '收件人','电话1': '电话','国家(中)': '国家','邮寄地址1(完整导出)': '邮寄地址','邮政编码': '邮编'})
# df0=df0.rename(columns={'订单': '订单编号','邮箱': '联系邮箱','国家/地区': '国家','省/州': '所属地区','城市': '所属城市','运单号': '货运单号'})
# df0['erp']='云栈'
# print(df0.shape)
# df0.drop_duplicates(subset=None,inplace=True)
# print(df0.shape)
# df0.to_csv('yz1.csv', index=False)
# df=pd.read_csv(r'C:\Users\lx\Downloads\mb1011.csv',usecols=['订单编号','交易编号','店铺名','Paypal ID','发货时间','货运单号','订单原始总金额(首记录)','国家(中)','物流渠道','订单商品名称','SKU'])
# df.dropna(subset=['订单编号'],inplace=True)
# df1=df1[(df1['说明']=='快速结账付款')|(df1['说明']=='快速結帳付款')]
# df.columns=['主体','日期','时间','时区','说明','币种','总额','费用','净额','余额','交易号', '发件人邮箱地址', '名称','银行名称', '银行账户', '运费和手续费金额', '营业税', '账单号', '参考交易号', '账单号a']
# df['订单编号-']=df['订单编号'].map(lambda x: re_strip1(str(x)))
# df0=pd.read_csv(r'C:\Users\lx\Desktop\mb-dzd.csv', low_memory=False,usecols=['订单编号-','主体','账单号','交易号','PayPal邮箱','货运单号'])
# df0=df0.rename(columns={'PayPal邮箱': '收款账号','货运单号': '运单号'})
# df1=pd.read_csv(r'C:\Users\lx\Desktop\yz-dzd.csv', low_memory=False,usecols=['订单-','主体','账单号0','交易号','收款账号','运单号'])
# df1=df1.rename(columns={'订单-': '订单编号-','账单号0': '账单号'})
# l.append(df0)
# l.append(df1)
# dfa=pd.concat(l, axis=0, ignore_index=True)
# dfa.drop_duplicates(subset=['订单编号-','运单号'],inplace=True)
# dfa['订单编号-']=dfa['订单编号-'].map(lambda x: "'"+str(x).strip())
# dfa['运单号']=dfa['运单号'].map(lambda x: "'"+str(x).strip())
# dfa=dfa.rename(columns={'交易号': 'paypal-交易号','账单号': 'paypal-账单号'})
# dfa.fillna(value='---' ,inplace=True)
# print(dfa.columns.tolist())
# print(dfa.shape)
# df2=pd.read_csv(r'C:\Users\lx\Desktop\qqu1.csv', low_memory=False,usecols=['订单编号','物流公司(新)','物流渠道(新)','货运单号','物流状态'])
# df2['订单号']=df2['订单编号'].map(lambda x: re_strip1(str(x).strip()) if x==x else None)
# df2['物流状态']=df2['物流状态'].map(lambda x: str(x).strip())
# df3=pd.merge(df2, df, how='left', on=['订单编号-'])
# print(df2.columns.tolist())
# a = ['订单号','货运单号']
# b = ['订单编号-','运单号']
# c = zip(a,b)
# for i in c:
# df=pd.merge(df2, dfa, how='left', left_on=i[0], right_on=i[1])
# df2=df[df['主体'].isnull()].drop(['运单号', '收款账号', '订单编号-', '主体', 'paypal-交易号', 'paypal-账单号'],axis=1)
# df.dropna(subset=['主体'],inplace=True)
# l.append(df)
# l.append(df2)
# df=pd.concat(l, axis=0, ignore_index=True)
# print(df.shape)
# df.drop_duplicates(subset=['订单号','货运单号','主体'],inplace=True)
# print(df.shape)
# df0.fillna(value='---' ,inplace=True)
# df1.dropna(subset=['付款时间'],inplace=True)
# print(df.shape[0])
# df1=df1.loc[:,['Paypal ID','交易编号','订单编号']]
# df1.dropna(subset=['付款时间'],inplace=True)
# df0['账单号a']=df0['账单号a'].map(lambda x: str(x).strip())
# df1['订单编号-']=df1['订单编号-'].map(lambda x: str(x).strip() if x==x else None)
# df1['交易编号']=df1['交易编号'].map(lambda x: str(x).strip() if x==x else None)
# df1['Paypal ID']=df1['Paypal ID'].map(lambda x: str(x).strip() if x==x else None)
# df1['货运单号']=df1['货运单号'].map(lambda x: str(x).strip() if x==x else None)
# a = ['交易编号','订单编号-','交易编号','Paypal ID','Paypal ID']
# b = ['交易号','账单号a','参考交易号','交易号','参考交易号']
# c = zip(a,b)
# for i in c:
# df=pd.merge(df1, df0, how='left', left_on=i[0], right_on=i[1])
# df=df[dl]
# df1=df[df['主体'].isnull()].drop(['主体','交易号','账单号','账单号a','参考交易号'],axis=1)
# df.dropna(subset=['主体'],inplace=True)
# l.append(df)
# df0=[]
# l.append(df1)
# df=pd.concat(l, axis=0, ignore_index=True)
# df2=pd.read_csv(r"yz.csv")
# dl=df1.columns.tolist()+['主体','9','16']
# print(df1.columns.tolist())
# print(df.shape[0])
# dfa=df1
# df1=df1.loc[:,['主体','9','16','167']]
# df2=df2[(df2['说明']=='快速结账付款')|(df2['说明']=='快速結帳付款')]
# df3=pd.read_csv(r'C:\Users\lx\Desktop\dzd-.csv')
# df3=df3[(df3['说明']=='快速结账付款')|(df3['说明']=='快速結帳付款')]
# df=pd.merge(df1, df0, how='left', on='订单编号-')
# print(df.shape)
# print(df.shape)
# df2=pd.read_excel(r'C:\Users\lx\Desktop\工作簿12.xlsx')
# l.append(df1)
# l.append(df2)
# l.append(df3)
# df=pd.concat(l, axis=0, ignore_index=True)
# df.drop_duplicates(subset=['交易号'],inplace=True)
# print(df.shape)
# df1=pd.read_excel(r'C:\Users\lx\Desktop\1234.xlsx')
# df1['订单']=df1['订单'].map(lambda x: str(x).strip())
# print(df1.shape)
# df=pd.merge(df1, df0, how='left', left_on='email', right_on='10')
# print(df.shape)
# print(df0.shape)
# df0=df0[(df0['3']=='快速结账付款')|(df0['3']=='快速結帳付款')]
# print(df0.shape)
# df0['167']=df0['16'].map(lambda x: re_strip(str(x)) if x==x else None)
# # df0.fillna({'交易账单号': '---'} ,inplace=True)
# df0=df0.loc[:,['主体','9','16','167']]
# # # print(df0[df0['交易账单号'].isnull()])
# print(df0.columns.tolist())
# df1=pd.read_csv(r"yz1.csv")
# # df1.dropna(subset=['付款时间'],inplace=True)
# # # print(df.shape[0])
# # df1=df1.loc[:,['Paypal ID','交易编号','订单编号']]
# # df1.dropna(subset=['付款时间'],inplace=True)
# df1['支付交易号']=df1['支付交易号'].map(lambda x: str(x).strip())
# df1['账单号']=df1['账单号'].map(lambda x: str(x).strip())
# df1['订单编号']=df1['订单编号'].map(lambda x: str(x).strip())
# df1['货运单号']=df1['货运单号'].map(lambda x: str(x).strip())
# # # df2=pd.read_csv(r"yz.csv")
# # dl=df1.columns.tolist()+['主体','9','16']
# print(df1.columns.tolist())
# # # print(df.shape[0])
# # dfa=df1
# a = ['账单号','支付交易号']
# b = ['167','9']
# c = zip(a,b)
# for i in c:
# df=pd.merge(df1, df0, how='left', left_on=i[0], right_on=i[1])
# # df=df[dl]
# df1=df[df['主体'].isnull()].drop(['主体','9','167','16'],axis=1)
# df.dropna(subset=['主体'],inplace=True)
# l.append(df)
# df0=[]
# l.append(df1)
# # df2=pd.read_csv(r"yz.csv", low_memory=False)
# # df2=df2.loc[:,['支付交易号','账单号','订单']]
# # df2=df2[~df2['订单'].str.contains(r'_\d$|Y\d$', regex=True)]
# # df2.drop_duplicates(subset=['订单'],inplace=True)
# # df2['支付交易号']=df2['支付交易号'].map(lambda x: str(x).strip())
# # df2['账单号']=df2['账单号'].map(lambda x: str(x).strip())
# # # df2.fillna('--', inplace=True)
# # dl=df0.columns.tolist()+['订单']
# # dfa=df0
# # df0=[]
# # a = ['原始交易号','争议交易号','交易账单号']
# # b = ['支付交易号','支付交易号','账单号']
# # c = zip(a,b)
# # for i in c:
# # df=pd.merge(dfa, df2, how='left', left_on=i[0], right_on=i[1])
# # df=df[dl]
# # dfa=df[df['订单'].isnull()].drop('订单',axis=1)
# # df.dropna(subset=['订单'],inplace=True)
# # l.append(df)
# # df2=[]
# df=pd.concat(l, axis=0, ignore_index=True)
# # df['订单']=df['订单'].map(lambda x: "'"+str(x).strip())
# df['订单编号']=df['订单编号'].map(lambda x: "'"+str(x).strip())
# df['支付交易号']=df['支付交易号'].map(lambda x:"'"+str(x))
# df['账单号']=df['账单号'].map(lambda x:"'"+str(x))
# df['订单']=df['订单'].map(lambda x:"'"+str(x))
# df['货运单号']=df['货运单号'].map(lambda x:"'"+str(x))
# df['物流状态']=df['物流状态'].map(lambda x:str(x))
# # df['167']=df['167'].map(lambda x:"'"+str(x))
# df=df[(df['物流状态']=='成功签收')|(df['物流状态']=='已签收')]
# df.to_csv('jg.csv', index=False)
#-----------------------------------------------------------12345.py
# import pandas as pd
# import chardet
# import re
# def re_strip(s):
# s_re = re.compile(r'^c|\.\d')
# a=s_re.sub('',s)
# return a
# def re_strip1(s):
# s_re = re.compile(r'_.{1,5}$|Y.{1,3}$')
# a=s_re.sub('',s)
# return a
# l=[]
# # usecols=['付款时间','客户姓名','电话1','联系邮箱','国家(中)','所属地区','所属城市','邮政编码','门牌号','邮寄地址1(完整导出)','物流渠道','货运单号','发货时间','订单编号','Paypal ID','交易编号']
# usecols=['付款时间','订单','收件人','电话','邮箱','国家/地区','省/州','城市','邮编','门牌号','邮寄地址','支付交易号','账单号','物流渠道','运单号','发货时间']
# df0=pd.read_csv(r'C:\Users\lx\Downloads\yz.csv', low_memory=False,usecols=usecols)
# # df0=df0.rename(columns={'客户姓名': '收件人','电话1': '电话','国家(中)': '国家','邮寄地址1(完整导出)': '邮寄地址','邮政编码': '邮编'})
# df0=df0.rename(columns={'订单': '订单编号','邮箱': '联系邮箱','国家/地区': '国家','省/州': '所属地区','城市': '所属城市','运单号': '货运单号'})
# df0['erp']='云栈'
# print(df0.shape)
# df0.drop_duplicates(subset=None,inplace=True)
# print(df0.shape)
# df0.to_csv('yz1.csv', index=False)
# df=pd.read_csv(r'C:\Users\lx\Downloads\mb1011.csv',usecols=['订单编号','交易编号','店铺名','Paypal ID','发货时间','货运单号','订单原始总金额(首记录)','国家(中)','物流渠道','订单商品名称','SKU'])
# df.dropna(subset=['订单编号'],inplace=True)
# df1=df1[(df1['说明']=='快速结账付款')|(df1['说明']=='快速結帳付款')]
# df.columns=['主体','日期','时间','时区','说明','币种','总额','费用','净额','余额','交易号', '发件人邮箱地址', '名称','银行名称', '银行账户', '运费和手续费金额', '营业税', '账单号', '参考交易号', '账单号a']
# df['订单编号-']=df['订单编号'].map(lambda x: re_strip1(str(x)))
# df=pd.read_csv(r'C:\Users\lx\Desktop\jg.csv', low_memory=False)
# df.drop_duplicates(subset=['订单编号'],inplace=True)
# print(df.shape)
# df['收款账号']=df['收款账号'].replace(r'---','//',regex=True)
# df['收款账号']=df['收款账号'].map(lambda x: str(x).strip() if x==x else None)
# df1=pd.read_csv(r'C:\Users\lx\Desktop\匹配.csv',encoding='gbk')
# df1['邮箱']=df1['邮箱'].map(lambda x: str(x).strip() if x==x else 'cnm')
# print(df1.shape)
# df2=pd.merge(df, df1, how='left', left_on='收款账号', right_on='邮箱')
# df2['主体']=df2['主体'].map(lambda x: x if x!="---" else df2['收款账号'])
# df2['主体']=df2['主体'].where(df2['主体']!='---',df2['结果'])
# df2['主体']=df2['主体'].replace(r'石头\d','石头',regex=True)
# df2['主体']=df2['主体'].replace(r'任品闲001','任品闲',regex=True)
# df2['主体']=df2['主体'].replace(r'多麦\d','多麦',regex=True)
# df2['主体']=df2['主体'].replace(r'尚为leo\d','尚为leo',regex=True)
# df2['主体']=df2['主体'].replace(r'速亚\d','速亚',regex=True)
# df2['主体']=df2['主体'].replace(r'顺发003','顺发002',regex=True)
# print(df2.shape)
# df2=df2.loc[:,['订单编号','物流公司(新)','物流渠道(新)','货运单号','主体','paypal-交易号','paypal-账单号','物流状态']]
# df2=df2.rename(columns={'物流公司(新)': '物流公司','物流渠道(新)': '物流渠道'})
# df2=df2[(df2['物流状态']=='成功签收')|(df2['物流状态']=='已签收')]
# df2['物流状态']=df2['物流状态'].replace(r'已签收','成功签收',regex=True)
# print(df2.shape)
# df2['主体']=df2['主体'].map(lambda x: str(x).strip() if x==x else '无法匹配')
# df3=df2[df2['主体'].str.contains(r'石头|尚为', regex=True)]
# df4=df2[~df2['主体'].str.contains(r'石头|尚为', regex=True)]
# df3.to_csv('jgb1.csv', index=False)
# df4.to_csv('jgb2.csv', index=False)
# df1=pd.read_csv(r'C:\Users\lx\Desktop\yz-dzd.csv', low_memory=False,usecols=['订单-','主体','账单号0','交易号','收款账号','运单号'])
# df1=df1.rename(columns={'订单-': '订单编号-','账单号0': '账单号'})
# l.append(df0)
# l.append(df1)
# dfa=pd.concat(l, axis=0, ignore_index=True)
# dfa.drop_duplicates(subset=['订单编号-','运单号'],inplace=True)
# dfa['订单编号-']=dfa['订单编号-'].map(lambda x: "'"+str(x).strip())
# dfa['运单号']=dfa['运单号'].map(lambda x: "'"+str(x).strip())
# dfa=dfa.rename(columns={'交易号': 'paypal-交易号','账单号': 'paypal-账单号'})
# dfa.fillna(value='---' ,inplace=True)
# print(dfa.columns.tolist())
# print(dfa.shape)
# df2=pd.read_csv(r'C:\Users\lx\Desktop\qqu1.csv', low_memory=False,usecols=['订单编号','物流公司(新)','物流渠道(新)','货运单号','物流状态'])
# df2['订单号']=df2['订单编号'].map(lambda x: re_strip1(str(x).strip()) if x==x else None)
# df2['物流状态']=df2['物流状态'].map(lambda x: str(x).strip())
# df3=pd.merge(df2, df, how='left', on=['订单编号-'])
# print(df2.columns.tolist())
# a = ['订单号','货运单号']
# b = ['订单编号-','运单号']
# c = zip(a,b)
# for i in c:
# df=pd.merge(df2, dfa, how='left', left_on=i[0], right_on=i[1])
# df2=df[df['主体'].isnull()].drop(['运单号', '收款账号', '订单编号-', '主体', 'paypal-交易号', 'paypal-账单号'],axis=1)
# df.dropna(subset=['主体'],inplace=True)
# l.append(df)
# l.append(df2)
# df=pd.concat(l, axis=0, ignore_index=True)
# print(df.shape)
# df.drop_duplicates(subset=['订单号','货运单号','主体'],inplace=True)
# print(df.shape)
# df0.fillna(value='---' ,inplace=True)
# df1.dropna(subset=['付款时间'],inplace=True)
# print(df.shape[0])
# df1=df1.loc[:,['Paypal ID','交易编号','订单编号']]
# df1.dropna(subset=['付款时间'],inplace=True)
# df0['账单号a']=df0['账单号a'].map(lambda x: str(x).strip())
# df1['订单编号-']=df1['订单编号-'].map(lambda x: str(x).strip() if x==x else None)
# df1['交易编号']=df1['交易编号'].map(lambda x: str(x).strip() if x==x else None)
# df1['Paypal ID']=df1['Paypal ID'].map(lambda x: str(x).strip() if x==x else None)
# df1['货运单号']=df1['货运单号'].map(lambda x: str(x).strip() if x==x else None)
# a = ['交易编号','订单编号-','交易编号','Paypal ID','Paypal ID']
# b = ['交易号','账单号a','参考交易号','交易号','参考交易号']
# c = zip(a,b)
# for i in c:
# df=pd.merge(df1, df0, how='left', left_on=i[0], right_on=i[1])
# df=df[dl]
# df1=df[df['主体'].isnull()].drop(['主体','交易号','账单号','账单号a','参考交易号'],axis=1)
# df.dropna(subset=['主体'],inplace=True)
# l.append(df)
# df0=[]
# l.append(df1)
# df=pd.concat(l, axis=0, ignore_index=True)
# df2=pd.read_csv(r"yz.csv")
# dl=df1.columns.tolist()+['主体','9','16']
# print(df1.columns.tolist())
# print(df.shape[0])
# dfa=df1
# df1=df1.loc[:,['主体','9','16','167']]
# df2=df2[(df2['说明']=='快速结账付款')|(df2['说明']=='快速結帳付款')]
# df3=pd.read_csv(r'C:\Users\lx\Desktop\dzd-.csv')
# df3=df3[(df3['说明']=='快速结账付款')|(df3['说明']=='快速結帳付款')]
# df=pd.merge(df1, df0, how='left', on='订单编号-')
# print(df.shape)
# print(df.shape)
# df2=pd.read_excel(r'C:\Users\lx\Desktop\工作簿12.xlsx')
# l.append(df1)
# l.append(df2)
# l.append(df3)
# df=pd.concat(l, axis=0, ignore_index=True)
# df.drop_duplicates(subset=['交易号'],inplace=True)
# print(df.shape)
# df1=pd.read_excel(r'C:\Users\lx\Desktop\1234.xlsx')
# df1['订单']=df1['订单'].map(lambda x: str(x).strip())
# print(df1.shape)
# df=pd.merge(df1, df0, how='left', left_on='email', right_on='10')
# print(df.shape)
# print(df0.shape)
# df0=df0[(df0['3']=='快速结账付款')|(df0['3']=='快速結帳付款')]
# print(df0.shape)
# df0['167']=df0['16'].map(lambda x: re_strip(str(x)) if x==x else None)
# # df0.fillna({'交易账单号': '---'} ,inplace=True)
# df0=df0.loc[:,['主体','9','16','167']]
# # # print(df0[df0['交易账单号'].isnull()])
# print(df0.columns.tolist())
# df1=pd.read_csv(r"yz1.csv")
# # df1.dropna(subset=['付款时间'],inplace=True)
# # # print(df.shape[0])
# # df1=df1.loc[:,['Paypal ID','交易编号','订单编号']]
# # df1.dropna(subset=['付款时间'],inplace=True)
# df1['支付交易号']=df1['支付交易号'].map(lambda x: str(x).strip())
# df1['账单号']=df1['账单号'].map(lambda x: str(x).strip())
# df1['订单编号']=df1['订单编号'].map(lambda x: str(x).strip())
# df1['货运单号']=df1['货运单号'].map(lambda x: str(x).strip())
# # # df2=pd.read_csv(r"yz.csv")
# # dl=df1.columns.tolist()+['主体','9','16']
# print(df1.columns.tolist())
# # # print(df.shape[0])
# # dfa=df1
# a = ['账单号','支付交易号']
# b = ['167','9']
# c = zip(a,b)
# for i in c:
# df=pd.merge(df1, df0, how='left', left_on=i[0], right_on=i[1])
# # df=df[dl]
# df1=df[df['主体'].isnull()].drop(['主体','9','167','16'],axis=1)
# df.dropna(subset=['主体'],inplace=True)
# l.append(df)
# df0=[]
# l.append(df1)
# # df2=pd.read_csv(r"yz.csv", low_memory=False)
# # df2=df2.loc[:,['支付交易号','账单号','订单']]
# # df2=df2[~df2['订单'].str.contains(r'_\d$|Y\d$', regex=True)]
# # df2.drop_duplicates(subset=['订单'],inplace=True)
# # df2['支付交易号']=df2['支付交易号'].map(lambda x: str(x).strip())
# # df2['账单号']=df2['账单号'].map(lambda x: str(x).strip())
# # # df2.fillna('--', inplace=True)
# # dl=df0.columns.tolist()+['订单']
# # dfa=df0
# # df0=[]
# # a = ['原始交易号','争议交易号','交易账单号']
# # b = ['支付交易号','支付交易号','账单号']
# # c = zip(a,b)
# # for i in c:
# # df=pd.merge(dfa, df2, how='left', left_on=i[0], right_on=i[1])
# # df=df[dl]
# # dfa=df[df['订单'].isnull()].drop('订单',axis=1)
# # df.dropna(subset=['订单'],inplace=True)
# # l.append(df)
# # df2=[]
# df=pd.concat(l, axis=0, ignore_index=True)
# # df['订单']=df['订单'].map(lambda x: "'"+str(x).strip())
# df['订单编号']=df['订单编号'].map(lambda x: "'"+str(x).strip())
# df['支付交易号']=df['支付交易号'].map(lambda x:"'"+str(x))
# df['账单号']=df['账单号'].map(lambda x:"'"+str(x))
# df['订单']=df['订单'].map(lambda x:"'"+str(x))
# df['货运单号']=df['货运单号'].map(lambda x:"'"+str(x))
# df['物流状态']=df['物流状态'].map(lambda x:str(x))
# # df['167']=df['167'].map(lambda x:"'"+str(x))
# df=df[(df['物流状态']=='成功签收')|(df['物流状态']=='已签收')]
#-------------------------------------------------------------
# import pandas as pd
# import chardet
# import re
# def re_strip(s):
# s_re = re.compile(r'^c|\.\d')
# a=s_re.sub('',s)
# return a
# def re_strip1(s):
# s_re = re.compile(r'_.{1,5}$|Y.{1,3}$')
# a=s_re.sub('',s)
# return a
# l=[]
# # usecols=['付款时间','客户姓名','电话1','联系邮箱','国家(中)','所属地区','所属城市','邮政编码','门牌号','邮寄地址1(完整导出)','物流渠道','货运单号','发货时间','订单编号','Paypal ID','交易编号']
# usecols=['付款时间','订单','收件人','电话','邮箱','国家/地区','省/州','城市','邮编','门牌号','邮寄地址','支付交易号','账单号','物流渠道','运单号','发货时间']
# df0=pd.read_csv(r'C:\Users\lx\Downloads\yz.csv', low_memory=False,usecols=usecols)
# # df0=df0.rename(columns={'客户姓名': '收件人','电话1': '电话','国家(中)': '国家','邮寄地址1(完整导出)': '邮寄地址','邮政编码': '邮编'})
# df0=df0.rename(columns={'订单': '订单编号','邮箱': '联系邮箱','国家/地区': '国家','省/州': '所属地区','城市': '所属城市','运单号': '货运单号'})
# df0['erp']='云栈'
# print(df0.shape)
# df0.drop_duplicates(subset=None,inplace=True)
# print(df0.shape)
# df0.to_csv('yz1.csv', index=False)
# df=pd.read_csv(r'C:\Users\lx\Downloads\mb1011.csv',usecols=['订单编号','交易编号','店铺名','Paypal ID','发货时间','货运单号','订单原始总金额(首记录)','国家(中)','物流渠道','订单商品名称','SKU'])
# df.dropna(subset=['订单编号'],inplace=True)
# df1=df1[(df1['说明']=='快速结账付款')|(df1['说明']=='快速結帳付款')]
# df.columns=['主体','日期','时间','时区','说明','币种','总额','费用','净额','余额','交易号', '发件人邮箱地址', '名称','银行名称', '银行账户', '运费和手续费金额', '营业税', '账单号', '参考交易号', '账单号a']
# df['订单编号-']=df['订单编号'].map(lambda x: re_strip1(str(x)))
# df0=pd.read_csv(r'C:\Users\lx\Desktop\快速结账付款(全)1018-.csv', low_memory=False)
# df1=pd.read_csv(r'C:\Users\lx\Desktop\mbq.csv', low_memory=False)
# df1['订单编号-']=df1['订单编号'].map(lambda x: re_strip1(str(x)))
# df0.fillna(value='---' ,inplace=True)
# df1.dropna(subset=['付款时间'],inplace=True)
# print(df.shape[0])
# df1=df1.loc[:,['Paypal ID','交易编号','订单编号']]
# df1.dropna(subset=['付款时间'],inplace=True)
# df0['账单号a']=df0['账单号a'].map(lambda x: str(x).strip())
# df1['订单编号-']=df1['订单编号-'].map(lambda x: str(x).strip() if x==x else None)
# df1['交易编号']=df1['交易编号'].map(lambda x: str(x).strip() if x==x else None)
# df1['Paypal ID']=df1['Paypal ID'].map(lambda x: str(x).strip() if x==x else None)
# df1['货运单号']=df1['货运单号'].map(lambda x: str(x).strip() if x==x else None)
# a = ['交易编号','订单编号-','交易编号','Paypal ID','Paypal ID']
# b = ['交易号','账单号a','参考交易号','交易号','参考交易号']
# c = zip(a,b)
# for i in c:
# df=pd.merge(df1, df0, how='left', left_on=i[0], right_on=i[1])
# df=df[dl]
# df1=df[df['主体'].isnull()].drop(['主体','交易号','账单号','账单号a','参考交易号'],axis=1)
# df.dropna(subset=['主体'],inplace=True)
# l.append(df)
# df0=[]
# l.append(df1)
# df=pd.concat(l, axis=0, ignore_index=True)
# df2=pd.read_csv(r"yz.csv")
# dl=df1.columns.tolist()+['主体','9','16']
# print(df1.columns.tolist())
# print(df.shape[0])
# dfa=df1
# df1=df1.loc[:,['主体','9','16','167']]
# df2=df2[(df2['说明']=='快速结账付款')|(df2['说明']=='快速結帳付款')]
# df3=pd.read_csv(r'C:\Users\lx\Desktop\dzd-.csv')
# df3=df3[(df3['说明']=='快速结账付款')|(df3['说明']=='快速結帳付款')]
# df=pd.merge(df1, df0, how='left', on='订单编号-')
# print(df.shape)
# print(df.shape)
# df2=pd.read_excel(r'C:\Users\lx\Desktop\工作簿12.xlsx')
# l.append(df1)
# l.append(df2)
# l.append(df3)
# df=pd.concat(l, axis=0, ignore_index=True)
# df.drop_duplicates(subset=['交易号'],inplace=True)
# print(df.shape)
# df1=pd.read_excel(r'C:\Users\lx\Desktop\1234.xlsx')
# df1['订单']=df1['订单'].map(lambda x: str(x).strip())
# print(df1.shape)
# df=pd.merge(df1, df0, how='left', left_on='email', right_on='10')
# print(df.shape)
# print(df0.shape)
# df0=df0[(df0['3']=='快速结账付款')|(df0['3']=='快速結帳付款')]
# print(df0.shape)
# df0['167']=df0['16'].map(lambda x: re_strip(str(x)) if x==x else None)
# # df0.fillna({'交易账单号': '---'} ,inplace=True)
# df0=df0.loc[:,['主体','9','16','167']]
# # # print(df0[df0['交易账单号'].isnull()])
# print(df0.columns.tolist())
# df1=pd.read_csv(r"yz1.csv")
# # df1.dropna(subset=['付款时间'],inplace=True)
# # # print(df.shape[0])
# # df1=df1.loc[:,['Paypal ID','交易编号','订单编号']]
# # df1.dropna(subset=['付款时间'],inplace=True)
# df1['支付交易号']=df1['支付交易号'].map(lambda x: str(x).strip())
# df1['账单号']=df1['账单号'].map(lambda x: str(x).strip())
# df1['订单编号']=df1['订单编号'].map(lambda x: str(x).strip())
# df1['货运单号']=df1['货运单号'].map(lambda x: str(x).strip())
# # # df2=pd.read_csv(r"yz.csv")
# # dl=df1.columns.tolist()+['主体','9','16']
# print(df1.columns.tolist())
# # # print(df.shape[0])
# # dfa=df1
# a = ['账单号','支付交易号']
# b = ['167','9']
# c = zip(a,b)
# for i in c:
# df=pd.merge(df1, df0, how='left', left_on=i[0], right_on=i[1])
# # df=df[dl]
# df1=df[df['主体'].isnull()].drop(['主体','9','167','16'],axis=1)
# df.dropna(subset=['主体'],inplace=True)
# l.append(df)
# df0=[]
# l.append(df1)
# # df2=pd.read_csv(r"yz.csv", low_memory=False)
# # df2=df2.loc[:,['支付交易号','账单号','订单']]
# # df2=df2[~df2['订单'].str.contains(r'_\d$|Y\d$', regex=True)]
# # df2.drop_duplicates(subset=['订单'],inplace=True)
# # df2['支付交易号']=df2['支付交易号'].map(lambda x: str(x).strip())
# # df2['账单号']=df2['账单号'].map(lambda x: str(x).strip())
# # # df2.fillna('--', inplace=True)
# # dl=df0.columns.tolist()+['订单']
# # dfa=df0
# # df0=[]
# # a = ['原始交易号','争议交易号','交易账单号']
# # b = ['支付交易号','支付交易号','账单号']
# # c = zip(a,b)
# # for i in c:
# # df=pd.merge(dfa, df2, how='left', left_on=i[0], right_on=i[1])
# # df=df[dl]
# # dfa=df[df['订单'].isnull()].drop('订单',axis=1)
# # df.dropna(subset=['订单'],inplace=True)
# # l.append(df)
# # df2=[]
# df=pd.concat(l, axis=0, ignore_index=True)
# # df['订单']=df['订单'].map(lambda x: "'"+str(x).strip())
# df['订单编号']=df['订单编号'].map(lambda x: "'"+str(x).strip())
# df['支付交易号']=df['支付交易号'].map(lambda x:"'"+str(x))
# df['账单号']=df['账单号'].map(lambda x:"'"+str(x))
# df['订单']=df['订单'].map(lambda x:"'"+str(x))
# df['货运单号']=df['货运单号'].map(lambda x:"'"+str(x))
# df['物流状态']=df['物流状态'].map(lambda x:str(x))
# # df['167']=df['167'].map(lambda x:"'"+str(x))
# df=df[(df['物流状态']=='成功签收')|(df['物流状态']=='已签收')]
# df.to_csv('mb-dzd.csv', index=False)
#---------------------------------------------------------------------
from docx import Document
import xlrd
from docx.shared import Cm
d=Document('1.docx')
# tb=d.add_table(rows=3,cols=3)
# tb.add_row()
# for i in range(3):
# tb.cell(0,i).text=str(i)#添加表头
# for row in range(1,3+1):
# for col in range(3):
# tb.cell(row,col).width=1
# tb.cell(row,col).text='666'
# tb.cell(row,col).width=Cm(6)
# tb.style='Medium Grid 1 Accent 1'
# tb.autofit=True
# d.save('tb.docx')
print(d.paragraphs[0].text)
print('---------------')
print(d.paragraphs[1].text)
print('---------------')
print(d.paragraphs[2].text)
for r in d.paragraphs[2].runs:
print(r.font.size,r.text)
r.text='清水' if r.text=='黄贝' else r.text
# print(d.paragraphs[2].runs[0].font.size)
# d.paragraphs[2].runs[0].font.size=100
# print(d.paragraphs[2].runs[0].font.size)
print('---------------')
print(d.paragraphs[3].text)
print('---------------')
print(d.paragraphs[4].text)
print('---------------')
print(d.paragraphs[5].text)
print('---------------')
print(d.paragraphs[10].text)
print(len(d.tables))
# for i in d.tables:
# print(i)
# for i in d.tables[32].rows[11].cells:
# i.paragraphs[0].runs[0].add_picture('1.png')
# print(i.add_picture('1.png'))
from docx.shared import Inches
d.tables[32].rows[11].cells[0].paragraphs[0].runs[0].clear()
d.tables[32].rows[11].cells[0].paragraphs[0].runs[0].add_picture('1.jpg',width=Inches(3))
# d.tables[1].rows[2].cells[3].paragraphs[0].runs[0].text='100.95m'
# d.tables[1].rows[2].cells[3].paragraphs[0].runs[0].font.size
# d.paragraphs[2].text='(黄贝街道-宁水垃圾转运站)'
# d.paragraphs[2].
d.save('2.docx')
# from translate import Translator
import csv
# # 以下是将简单句子从英语翻译中文
# translator= Translator(to_lang="chinese")
# translation = translator.translate("Good night!")
# print(translation)
# 在任何两种语言之间,中文翻译成英文
# l=[]
# translator= Translator(from_lang="chinese",to_lang="english")
# with open("留言.csv","r",encoding='gbk',errors="replace") as csvfile:
# reader = csv.reader(csvfile)
# #这里不需要readlines
# for line in reader:
# translation =translator.translate(line[0])
# l.append(translation)
# translation = translator.translate("任意")
# print(translation)
# with open("留言翻译结果.csv","w") as csvfile:
# writer = csv.writer(csvfile)
# #先写入columns_name
# writer.writerow(l)
#写入多行用writerows
# writer.writerows([[0,1,3],[1,2,3],[2,3,4]])
#----------------------------------------------------------
import requests
import re
import json
import time
import csv
class GoogleTranslator ():
_host = 'translate.google.cn'
_headers = {
'Host': _host,
'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Mobile Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate, br',
'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',
'Referer': 'https://' + _host,
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0'
}
_language = {
'afrikaans': 'af',
'arabic': 'ar',
'belarusian': 'be',
'bulgarian': 'bg',
'catalan': 'ca',
'czech': 'cs',
'welsh': 'cy',
'danish': 'da',
'german': 'de',
'greek': 'el',
'english': 'en',
'esperanto': 'eo',
'spanish': 'es',
'estonian': 'et',
'persian': 'fa',
'finnish': 'fi',
'french': 'fr',
'irish': 'ga',
'galician': 'gl',
'hindi': 'hi',
'croatian': 'hr',
'hungarian': 'hu',
'indonesian': 'id',
'icelandic': 'is',
'italian': 'it',
'hebrew': 'iw',
'japanese': 'ja',
'korean': 'ko',
'latin': 'la',
'lithuanian': 'lt',
'latvian': 'lv',
'macedonian': 'mk',
'malay': 'ms',
'maltese': 'mt',
'dutch': 'nl',
'norwegian': 'no',
'polish': 'pl',
'portuguese': 'pt',
'romanian': 'ro',
'russian': 'ru',
'slovak': 'sk',
'slovenian': 'sl',
'albanian': 'sq',
'serbian': 'sr',
'swedish': 'sv',
'swahili': 'sw',
'thai': 'th',
'filipino': 'tl',
'turkish': 'tr',
'ukrainian': 'uk',
'vietnamese': 'vi',
'yiddish': 'yi',
'chinese_simplified': 'zh-CN',
'chinese_traditional': 'zh-TW',
'auto': 'auto'
}
_url = 'https://' + _host + '/translate_a/single'
_params = {
'client': 'webapp',
'sl': 'en',
'tl': 'zh-CN',
'hl': 'zh-CN',
'dt': 'at',
'dt': 'bd',
'dt': 'ex',
'dt': 'ld',
'dt': 'md',
'dt': 'qca',
'dt': 'rw',
'dt': 'rm',
'dt': 'ss',
'dt': 't',
'otf': '1',
'ssel': '0',
'tsel': '0',
'kc': '1'
}
__cookies = None
__googleTokenKey = '376032.257956'
__googleTokenKeyUpdataTime = 600.0
__googleTokenKeyRetireTime = time.time() + 600.0
def __init__(self, src = 'en', dest = 'zh-CN', tkkUpdataTime = 600.0):
if src not in self._language and src not in self._language.values():
src = 'auto'
if dest not in self._language and dest not in self._language.values():
dest = 'auto'
self._params['sl'] = src
self._params['tl'] = dest
self.googleTokenKeyUpdataTime = tkkUpdataTime
self.__updateGoogleTokenKey()
def __updateGoogleTokenKey(self):
self.__googleTokenKey = self.__getGoogleTokenKey()
self.__googleTokenKeyRetireTime = time.time() + self.__googleTokenKeyUpdataTime
def __getGoogleTokenKey(self):
"""Get the Google TKK from https://translate.google.cn"""
# TKK example: '435075.3634891900'
result = ''
try:
res = requests.get('https://' + self._host, timeout = 3)
res.raise_for_status()
self.__cookies = res.cookies
result = re.search(r'tkk\:\'(\d+\.\d+)?\'', res.text).group(1)
except Exception as ex:
print('ERROR: ' + str(ex))
time.sleep(1)
return result
def __getGoogleToken(self, a, TKK):
"""Calculate Google tk from TKK """
# https://www.cnblogs.com/chicsky/p/7443830.html
# if text = 'Tablet Developer' and TKK = '435102.3120524463', then tk = '315066.159012'
def RL(a, b):
for d in range(0, len(b)-2, 3):
c = b[d + 2]
c = ord(c[0]) - 87 if 'a' <= c else int(c)
c = a >> c if '+' == b[d + 1] else a << c
a = a + c & 4294967295 if '+' == b[d] else a ^ c
return a
g = []
f = 0
while f < len(a):
c = ord(a[f])
if 128 > c:
g.append(c)
else:
if 2048 > c:
g.append((c >> 6) | 192)
else:
if (55296 == (c & 64512)) and (f + 1 < len(a)) and (56320 == (ord(a[f+1]) & 64512)):
f += 1
c = 65536 + ((c & 1023) << 10) + (ord(a[f]) & 1023)
g.append((c >> 18) | 240)
g.append((c >> 12) & 63 | 128)
else:
g.append((c >> 12) | 224)
g.append((c >> 6) & 63 | 128)
g.append((c & 63) | 128)
f += 1
e = TKK.split('.')
h = int(e[0]) or 0
t = h
for item in g:
t += item
t = RL(t, '+-a^+6')
t = RL(t, '+-3^+b+-f')
t ^= int(e[1]) or 0
if 0 > t:
t = (t & 2147483647) + 2147483648
result = t % 1000000
return str(result) + '.' + str(result ^ h)
def translate(self, text):
if time.time() > self.__googleTokenKeyRetireTime:
self.__updateGoogleTokenKey()
data = {
'q': text}
self._params['tk'] = self.__getGoogleToken(text, self.__googleTokenKey)
result = ''
try:
res = requests.post(self._url,
headers = self._headers,
cookies = self.__cookies,
data = data,
params = self._params,
timeout = 6)
res.raise_for_status()
jsonText = res.text
if len(jsonText)>0:
jsonResult = json.loads(jsonText)
if len(jsonResult[0])>0:
for item in jsonResult[0]:
result += item[0]
return result
except Exception as ex:
print('ERROR: ' + str(ex))
return ''
def readFile(fileName):
with open(fileName, 'r',errors="replace") as f:
paragraph = ''
for line in f:
if line[0]!='\n':
paragraph += line.strip('\n')
else:
if len(paragraph)>0:
yield paragraph
paragraph = ''
if len(paragraph)>0:
yield paragraph
def main():
translator = GoogleTranslator()
count = 0
with open(r'C:\Users\lx\Desktop\留言翻译结果.txt', 'w', encoding='utf-8') as df:
for line in readFile(r'C:\Users\lx\Desktop\留言.txt'):
if len(line) > 1:
count += 1
print('\r' + str(count), end = '', flush = True)
df.write(line.strip() + "\n")
result = translator.translate(line)
df.write(result.strip() + "\n\n")
if __name__ == "__main__":
startTime = time.time()
main()
print()
print('%.2f seconds' % (time.time() - startTime))
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import requests
from PIL import Image
import re
import time
import csv
mx=[]
with open('1.csv')as f:
data= csv.reader(f)
ll=list(data)[1:]
shuliang=len(ll)
def fk_cookies(i):
c=1
req=requests.session()
while c:
headers = {
'Host': 'sf.gmcc.net:7001',
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/7.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET4.0C; .NET4.0E)',
'Connection':'Keep-Alive'
}
r = req.get("http://sf.gmcc.net:7001/mobilesg/checkUsergd.action?",headers=headers)
r = req.get("http://sf.gmcc.net:7001/mobilesg/gxcaptcha.png",headers=headers)
with open('验证码.png', 'wb') as f:
f.write(r.content)
img=Image.open('验证码.png')
img.show()
yzm=input('验证码:')
headers = {
'Referer':'http://sf.gmcc.net:7001/mobilesg/checkUsergd.action?returnUrl=toOSSOAction.action&referrer=',
'User-Agent':'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/7.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET4.0C; .NET4.0E)',
'Content-Type':'application/x-www-form-urlencoded',
'Host':'sf.gmcc.net:7001'
}
data="username="+i['username']+"&password="+i['password']+"&validationCode="+yzm+"&loginType=1"
r = req.post("http://sf.gmcc.net:7001//mobilesg/checkUsergd.action?returnUrl=toOSSOAction.action&referrer=",headers=headers,data=data)
jy='验证码错误!'
if len(re.findall(jy,r.text,re.S))>0:
print('验证码错误,请重新看图片输入新的验证码')
req.cookies.clear()
continue
c=0
return [dict({
'name':k,'value':v}) for k,v in req.cookies.get_dict().items()]
driver = webdriver.Ie()
wait = WebDriverWait(driver, 7)
# ~ # ie_para=q.get()
# ~ # driver.set_window_size(ie_para[2], ie_para[3])
# ~ # driver.set_window_position(ie_para[0], ie_para[1])
# ~ # wait = WebDriverWait(driver, 5)
# ~ driver.get('http://sf.gmcc.net:7001/mobilesg/showCont4WaitWork.do?srcPage=jiake')
driver.get('http://sf.gmcc.net:7001/mobilesg/toShowMainFrame.action?leftMenu=myWorks&title=我的工作&srcPage=showNewToDoWorkList')
driver.delete_all_cookies()
sacookies=fk_cookies({
'username':'','password':''})
# ~ print(sacookies)
for cookie in sacookies:
driver.add_cookie(cookie)
driver.refresh()
t0=time.time()
while len(ll)>0:
l=ll.pop()
driver.switch_to.frame('contFrame')
driver.switch_to.frame('topFrame')
js='$("input[readonly]").attr("readonly",null);'
driver.execute_script(js)
b = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id="processDate_startid"]')))
driver.execute_script('arguments[0].value="";', b)
b = wait.until(EC.element_to_be_clickable((By.NAME, 'queryParam.orderSerialNo')))
g=str(l[0][1:])
driver.execute_script("arguments[0].value=\'"+g+"\';", b)
b = wait.until(EC.element_to_be_clickable((By.NAME, 'queryParam.sourceFlag')))
driver.execute_script('arguments[0].value="已签收 ";', b)
b = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="btn"]')))
driver.execute_script("arguments[0].click()", b)
time.sleep(1)
driver.switch_to.parent_frame()
driver.switch_to.frame("mainFrame")
try:
b = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="listsListContainer"]/table/tbody/tr[2]/td[4]')))
# print(b)
# ~ /html/body/table[2]/tbody/tr/td/table[2]/tbody/tr/td/div/table/tbody/tr[2]/td[10]/font
driver.execute_script("arguments[0].click()", b)
except:
m=','.join([str(g),'无法点击派出',str(time.strftime("%Y-%m-%d %X", time.localtime()))])
print(m)
mx.append(m)
driver.switch_to.parent_frame()
driver.switch_to.parent_frame()
continue
time.sleep(1)
# ~ driver.get('http://sf.gmcc.net:7001/list4WaitWork.do?productId=&actionId=&isFirstQuery=NO&srcPage=showNewToDoWorkList')
driver.switch_to.parent_frame()
driver.switch_to.frame("infoFrame")
time.sleep(1)
driver.execute_script("window.confirm = function(str){return 0;};")
driver.execute_script("newchangeDispatchTicketgd()")
# try:
# print(driver.switch_to.alert.text)
# driver.switch_to.alert.dismiss()
# driver.execute_script("window.confirm = function(str){return 1;};")
# except:
# pass
driver.switch_to.parent_frame()
time.sleep(2)
try:
driver.switch_to.frame('contFrame')
except:
pass
# b = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="ext-gen45"]')))
# print(b.text)
driver.execute_script("queryDatas();")
b = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id="ext-comp-1018"]')))
g=l[2]
driver.execute_script("arguments[0].value=\'"+g+"\';", b)
b = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="ext-gen307"]')))
driver.execute_script("arguments[0].click()", b)
b = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="ext-gen156"]')))
driver.execute_script("arguments[0].click()", b)
time.sleep(2)
b = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="ext-gen241"]//div[@class="x-grid3-cell-inner x-grid3-col-3" and contains(text(),"中移在线-广州")]')))
# print(b.text)
b.click()
b = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="ext-gen97"]')))
driver.execute_script("arguments[0].click()", b)
b = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="_dispatchReason"]')))
driver.execute_script('arguments[0].value="1";', b)
b = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="ext-gen13"]')))
driver.execute_script("arguments[0].click()", b)
# ~ b = wait.until(EC.alert_is_present())
# ~ b.accept()
time.sleep(1)
print(str(l[0][1:])+',已派出')
# ~ js='window.alert = function(str){return;};'
# ~ driver.execute_script(js)
# ~ b = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="ext-gen328"]')))
# ~ b.send_keys(Keys.ENTER)
driver.get('http://sf.gmcc.net:7001/mobilesg/toShowMainFrame.action?leftMenu=myWorks&title=我的工作&srcPage=showNewToDoWorkList')
with open('未派出.csv', 'w', encoding='utf-8',newline='') as f:
f.write('\n\''.join(mx))
print('\r本次工单派发完毕,共操作'+str(shuliang)+'条工单,无法处理'+str(len(mx))+'条工单,用时'+str(int(time.time()-t0))+'秒')
print('无法处理明细请看脚本目录下 未派出.csv')
# ~ driver.refresh()
# ~ driver.switch_to.frame('contFrame')
# ~ driver.switch_to.frame('topFrame')
# ~ b = wait.until(EC.element_to_be_clickable((By.NAME, 'queryParam.orderSerialNo')))
# ~ g='2020'
# ~ driver.execute_script("arguments[0].value=\'"+g+"\';", b)
#http://sf.gmcc.net:7001/mobilesg/showCont4WaitWork.do
# ~ b = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="changeDispatchName"]')))
# ~ driver.execute_script("arguments[0].click()", b)
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import requests
from PIL import Image
import re
import time
import csv
from multiprocessing import Manager,Pool,freeze_support
import pyautogui
import pyperclip
# ~ import win32com.client
def fk_cookies(i):
c=1
req=requests.session()
while c:
headers = {
'Host': 'sf.gmcc.net:7001',
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/7.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET4.0C; .NET4.0E)',
'Connection':'Keep-Alive'
}
r = req.get("http://sf.gmcc.net:7001/mobilesg/checkUsergd.action?",headers=headers)
r = req.get("http://sf.gmcc.net:7001/mobilesg/gxcaptcha.png",headers=headers)
with open('验证码.png', 'wb') as f:
f.write(r.content)
img=Image.open('验证码.png')
img.show()
yzm=input('验证码:')
headers = {
'Referer':'http://sf.gmcc.net:7001/mobilesg/checkUsergd.action?returnUrl=toOSSOAction.action&referrer=',
'User-Agent':'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/7.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET4.0C; .NET4.0E)',
'Content-Type':'application/x-www-form-urlencoded',
'Host':'sf.gmcc.net:7001'
}
data="username="+i['username']+"&password="+i['password']+"&validationCode="+yzm+"&loginType=1"
r = req.post("http://sf.gmcc.net:7001//mobilesg/checkUsergd.action?returnUrl=toOSSOAction.action&referrer=",headers=headers,data=data)
jy='验证码错误!'
if len(re.findall(jy,r.text,re.S))>0:
print('验证码错误,请重新看图片输入新的验证码')
req.cookies.clear()
continue
c=0
return [dict({
'name':k,'value':v}) for k,v in req.cookies.get_dict().items()]
def td(ck,dl,q,qq,qqq,qqqq,mx,q0):
g0 = str(dl[0][0])
driver = webdriver.Ie()
wait = WebDriverWait(driver, 7)
driver.get('http://sf.gmcc.net:7001/mobilesg/toShowMainFrame.action?leftMenu=myWorks&title=我的工作&srcPage=showNewToDoWorkList')
driver.delete_all_cookies()
for cookie in ck:
driver.add_cookie(cookie)
driver.refresh()
while len(dl)>0:
gdxx=dl.pop()
with open('1.txt','w') as f:
f.write('账号:'+str(gdxx[1])+'\n'+ '通话流水号:'+str(gdxx[2])+'\n'+'退单原因:'+str(gdxx[3]))
driver.switch_to.frame('contFrame')
driver.switch_to.frame('topFrame')
js='$("input[readonly]").attr("readonly",null);'
driver.execute_script(js)
b = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id="processDate_startid"]')))
driver.execute_script('arguments[0].value="";', b)
b = wait.until(EC.element_to_be_clickable((By.NAME, 'queryParam.orderSerialNo')))
g=str(gdxx[0])
driver.execute_script("arguments[0].value=\'"+g+"\';", b)
b = wait.until(EC.element_to_be_clickable((By.NAME, 'queryParam.sourceFlag')))
driver.execute_script('arguments[0].value="已签收 ";', b)
b = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="btn"]')))
driver.execute_script("arguments[0].click()", b)
time.sleep(1)
driver.switch_to.parent_frame()
driver.switch_to.frame("mainFrame")
time.sleep(1)
try:
b = wait.until(EC.presence_of_element_located((By.XPATH, '/html/body/div[1]/table/tbody/tr[19]/td[2]')))
c=driver.execute_script("return arguments[0].innerText;", b)
# print(c)
b = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="listsListContainer"]/table/tbody/tr[2]/td[4]')))
driver.execute_script("arguments[0].click()", b)
except:
m=','.join([str(g),'代办无工单',str(time.strftime("%Y-%m-%d %X", time.localtime()))])
print(m)
mx.append(m)
driver.switch_to.parent_frame()
driver.switch_to.parent_frame()
q0.put(g)
continue
q0.put(0)
time.sleep(1)
driver.switch_to.parent_frame()
driver.switch_to.frame("infoFrame")
time.sleep(1)
b = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="opening_3"]')))
driver.execute_script("arguments[0].scrollIntoView();", b)
driver.execute_script("arguments[0].click()", b)
b = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id="attach"]')))
driver.switch_to.frame(b)
q.put(1)
# ~ b = wait.until(EC.presence_of_element_located((By.XPATH, '//input[@id="fileName"]')))
# ~ driver.execute_script("arguments[0].click()", b)
pyautogui.moveTo(pyautogui.locateCenterOnScreen('3.png'))
pyautogui.click()
qq.get()
# print(222)
b = wait.until(EC.presence_of_element_located((By.XPATH, '//input[@name="submitBtn"]')))
driver.execute_script("arguments[0].click()", b)
time.sleep(1)
driver.switch_to.parent_frame()
js="saveWorkTicketValue.getSgAttachmentByTachNameAndID("+str(c)+",'集中预约',newdoRefreshSgAttachment);"
driver.execute_script(js)
qqq.put(gdxx[3])
qqqq.get()
# ~ driver.switch_to.default_content()
driver.switch_to.parent_frame()
q0.put(g0)
def wj(q,qq,qqq,qqqq,dl,q0):
g = str(dl[0][0])
while 1:
c=q0.get()
if c==g:
break
elif c!=0:
continue
a=q.get()
time.sleep(1)
pyautogui.moveTo(pyautogui.locateCenterOnScreen('1.png'))
pyautogui.click()
pyautogui.typewrite('1.txt')
pyautogui.press('enter')
qq.put(1)
cp=qqq.get()
time.sleep(1)
pyautogui.moveTo(pyautogui.locateCenterOnScreen('2.png'))
pyautogui.click()
pyautogui.press('down')
pyautogui.press('down')
pyautogui.press('down')
pyautogui.press('down')
pyautogui.press('enter')
pyautogui.press('tab')
pyautogui.press('down')
pyautogui.press('enter')
pyperclip.copy(cp)
pyautogui.moveTo(pyautogui.locateCenterOnScreen('4.png'))
pyautogui.click()
pyautogui.hotkey('ctrl', 'v')
pyautogui.moveTo(pyautogui.locateCenterOnScreen('5.png'))
pyautogui.click()
pyautogui.hotkey('ctrl', 'v')
pyautogui.moveTo(pyautogui.locateCenterOnScreen('6.png'))
pyautogui.click()
qqqq.put(1)
if a== g :
print(1)
break
if __name__ == '__main__':
freeze_support()
manager=Manager()
mx=manager.list(['工单号,处理情况,处理时间'])
r=requests.get('http://106.52.169.246/ajax')
if r.text !='10000':
print('无法使用,请联系开发人员')
time.sleep(1000000)
with open('1.csv', 'r') as f:
reader = csv.reader(f)
dl=manager.list(list(reversed(list(reader)[1:])))
# ~ print(len(dl))
if len(dl) ==0 :
print('1.csv 无记录')
time.sleep(1000000)
ck=fk_cookies({
'username':'','password':''})
q = manager.Queue()
qq = manager.Queue()
qqq = manager.Queue()
qqqq = manager.Queue()
q0 = manager.Queue()
pool = Pool()
pool.apply_async(td,(ck,dl,q,qq,qqq,qqqq,mx,q0)) #
pool.apply_async(wj,(q,qq,qqq,qqqq,dl,q0))
pool.close()
pool.join()
with open('代办无单.csv', 'w', encoding='utf-8',newline='') as f:
f.write('\n\''.join(mx))