python爬虫-全国列车信息查询

列车信息

代码:

python
import urllib
from urllib.request import urlopen
from bs4 import BeautifulSoup
import pymysql
import requests
import FromAToB




def getHtml(start,arrive,time):
    start=urlChange(start)
    arrive=urlChange(arrive)
    url="https://huoche.cncn.com/train-'%s'-'%s'?date='%s'" %(start,arrive,time)
    headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36'}
    req = urllib.request.Request(url=url,headers=headers)
    response = urllib.request.urlopen(req)
    data=response.read()
    soup=BeautifulSoup(data,"html.parser")
    return soup

def urlChange(a):
    a=a.encode('gb2312')
    a=str(a)
    a=a[2:-1]
    a=a.replace('\\x','%')
    a=a.upper()
    return a

def getData(soup):
    #print(soup)
    trainNumInfo=[]
    startStation=[]
    startTime=[]
    runTime=[]
    arriveStation=[]
    arriveTime=[]
    distance=[]
    seat=[]
    trainInfos=soup.find_all('table',attrs={'id':False})
    for trainInfo in trainInfos:
        td=trainInfo.find('tr').find_all('td')
        #获取车次信息
        trainNumInfo.append(td[0].find('b').find('a').getText())
        #获得出发车站、出发时间、目的车站、到达时间
        start=td[1].getText().rstrip().split(' ')
        startStation.append(start[0][2:len(start[0])])
        arriveStation.append(start[1][1:len(start[1])])
        startTime.append(td[2].find('b').getText())
        arriveTime.append(td[2].find('span').getText())
        #获得行驶时间、行驶距离
        runTime.append(td[3].find('b').getText())
        distance.append(td[3].find('span').getText())
        sit={'一等座':'无','二等座':'无','硬座':'无','卧铺':'无'}
        flag=td[4].getText().lstrip()
        flag=flag.rstrip()
        if flag != '无':
            ems=td[4].find_all('em')
            spans=td[4].find_all('span')
            for em,i in zip(ems,range(len(ems))):
                sit[em.getText()]=spans[i].getText()
        else:
            sit['无座']='0.0'
        seat.append(sit)

    trains=[]
    for i in range(len(trainNumInfo)):
        train=[]
        train.append(trainNumInfo[i])
        train.append(startStation[i])
        train.append(arriveStation[i])
        train.append(startTime[i])
        train.append(arriveTime[i])
        train.append(runTime[i])
        train.append(distance[i])
        train.append(seat[i]['一等座'])
        train.append(seat[i]['二等座'])
        train.append(seat[i]['硬座'])
        train.append(seat[i]['卧铺'])
        trains.append(train)
    return trains
        
def getStation(soup):
    station=soup.find('div',attrs={'class':'train_details train_list'}).find('table').find('td',attrs={'class':'first','width':False})
    sta=station.getText()
    station=sta.split(' ')
    return station[2]

def leastTime(x):
    a={}
    trains=[]
    train={}
    for i in x:
        if i[0][0:1].isdigit():
            a['无']=10*24*60
        else:
            a[i[0][0:1]]=10*24*60
    for i in x:
        time=i[5].split('小时')
        if len(time)>1:
            if time[1] != '':
                time=int(time[0])*60+int(time[1][0:-2])
            else:
                time=int(time[0])*60
            
        else:
            time=int(time[0][0:-2])
        if i[0][0:1] in a:
            if a[i[0][0:1]]>time:
                a[i[0][0:1]]=time
                train[i[0][0:1]]=i
        else:
            if a['无']>time:
                a['无']=time
                train['无']=i
    for i in train.values():
        trains.append(i)
    return trains
def mostComf(x,p):
    kind=['G','D','C','Z','T','K','无']
    tp=[]
    t=10*24*60
    a={}
    for i in kind:
        a[i]=''
        
    for i in x:
        if i[0][0:1] in a:
            a[i[0][0:1]]=i
        else:
            a['无']=i
            
    if p:
        j=0
        for i in kind:
            if len(a[i])>0:
                tp.append(a[i])
                j+=1
        return [tp[int(j/2)]]
    else:
        for i in kind:
            if len(a[i])>0:
                return [a[i]]
def leastMoney(x):
    kind=['G','D','C','Z','T','K','无']
    tp=[]
    t=10*24*60
    a={}
    for i in kind:
        a[i]=''
        
    for i in x:
        if i[0][0:1] in a:
            a[i[0][0:1]]=i
        else:
            a['无']=i
            
    j=0
    for i in kind:
        if len(a[i])>0:
            tp.append(a[i])
            j+=1
    return [tp[j-1]]
        
def AToB(A,B,time,p,t,c):
    soup=getHtml(A,B,time)
    if FromAToB.selAB(A,B):
        x=getData(soup)
        if t:
            x=leastTime(x)
        if c:
            x=mostComf(x,p)
        if p:
            x=leastMoney(x)
        return x
    else:
        mid=getStation(soup)
        print(mid)
        x=getData(getHtml(A,mid,time))
        if t:
            x=leastTime(x)
        if c:
            x=mostComf(x,p)
        if p:
            x=leastMoney(x)
        y=getData(getHtml(mid,B,time))
        if t:
            y=leastTime(y)
        if c:
            y=mostComf(y,p)
        if p:
            x=leastMoney(x)
        return x+y
#a=AToB('马鞍山','南京','2018-10-18',True,True,True)
#print(a)

程序思路

分析网页

[页面一: 查询首页]
[页面二: 南京-马鞍山-2018/11/28]
在输入出发城市,到达城市,出发时间,点击立即搜索,然后页面跳转到页面二,发现格式url有固定格式。

  • train-%C4%CF%BE%A9-%C2%ED%B0%B0%C9%BD?date=2018-11-28
    • train-++==%C4%CF%BE%A9==++-%C2%ED%B0%B0%C9%BD?date=2018-11-28:标记部分为出发城市
    • train-%C4%CF%BE%A9-++==%C2%ED%B0%B0%C9%BD==++?date=2018-11-28:标记部分为到达城市
    • train-%C4%CF%BE%A9-%C2%ED%B0%B0%C9%BD?++==date=2018-11-28==++:标记部分为出发时间

然后分析其编码。因为不同字符集对同一数据进行编码结果不同,所以找对编码很关键。这里的编码就显得很孤儿。他并没有用别人专门的urlencoder和urldecoder。

==注:然后开始尝试编码==

网页上的马鞍山字符编码:%C2%ED%B0%B0%C9%BD
utf-8:b'\xe9\xa9\xac\xe9\x9e\x8d\xe5\xb1\xb1 gbk:b'\xc2\xed\xb0\xb0\xc9\xbd'
gb2312:b'\xc2\xed\xb0\xb0\xc9\xbd'

发现gbk和gb2312的编码只要将\x换成%,然后去掉开头两个字符,最后一个字符就和网页上的马鞍山字符编码一样了
然后写一个编码转换的函数即可

python
def urlChange(a):
    a=a.encode('gb2312')
    a=str(a)
    a=a[2:-1]
    a=a.replace('\\x','%')
    a=a.upper()
    return a

你可能感兴趣的:(python爬虫-全国列车信息查询)