python爬虫——链家苏州成交房价2

# -*- coding: utf-8 -*-
import bs4 
import requests
import time#引入time,计算下载时间


def open_url(url):
#    url = 'https://su.lianjia.com/chengjiao/gongyeyuan/pg1/'
    hd = {}
    hd['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36'
    r = requests.get(url,headers=hd,timeout=10)
    return r

host = 'https://su.lianjia.com/chengjiao/pg'

whvj = []
aa = []
bb = []
cc = []
dd = []
count = 1

start = time.time()
size = 0
q = 2  

#for i in range(98,99):
while count < q:
#    count = 1aa
    url = host + str(count)
    r = open_url(url)
    soup = bs4.BeautifulSoup(r.text,'html.parser')
    count = count + 1
    
    
    targets = soup.find_all('a',class_="img")
    
    for each in targets:
        whvj.append(each['href'])
    print('\r'+"已经下载:"+int(count/q*100)*"█"+"【"+str(round(float(count/q)*100,2))+"%"+"】",end="")
#    print(url)
#    print(whvj)
count1 = 0 
response = requests.get(url,stream = True)#stream参数设置成True时,它不会立即开始下载,当你使用iter_content或iter_lines遍历内容或访问内容属性时才开始下载
chunk_size = 1024#每次块大小为1024
content_size = int(len(whvj))
for i in whvj:
    soup1 = bs4.BeautifulSoup(open_url(i).text,'html.parser')
    djjx = soup1.find_all("span",class_="record_price")
    aa.append(djjx[0].text)
    xbxi = soup1.find_all("div",class_="content")
    for each in xbxi[0]:
        bb.append(each.text.split())
    uijm = soup1.find_all("div",class_="name")
#        for each in uijm[len(uijm)-1]:
    cc.append(uijm[len(uijm)-1].text)
    title = soup1.find_all("title")
    for each in title:
        dd.append(each.text)
    size = size +1 
    print('\r'+"已经下载:"+int(size/content_size*100)*"█"+" 【"+str(round(size/chunk_size/1024,2))+"MB】"+"【"+str(round(float(size/content_size)*100,2))+"%"+"】",end="")

##
result = []
length =len(bb)
for i in range(length):
    result.append(str(dd[i])+'  '  +str(aa[i])+'  '+str(whvj[i])
                    + '  ' + str(bb[i]) + '^' + str(cc[i]) + '\n')
    
end = time.time()
print("总耗时:"+str(end-start)+"秒")

#data_count = 0

with open('ty.txt','w',encoding='utf-8') as f:
    for each in result:
        f.write(each)

你可能感兴趣的:(笔记)