Python实战计划学习笔记:week1

由于现在58同城主页改版,基本上都是转转的商品信息。在这个基础上爬取转转商品的信息。

代码如下:

#!/usr/bin/env python
# coding:utf-8
__author__ = 'lucky'

from bs4 import BeautifulSoup
import requests
import time

url = "http://bj.58.com/pbdn/0/"

header1 ={"User-Agent":'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36',
          "Cookie":'ipcity=sy%7C%u6C88%u9633; myfeet_tooltip=end; f=n; ipcity=sy%7C%u6C88%u9633; myfeet_tooltip=end; bj58_id58s="eWs0d25wVFQ4dFEtNjYzOQ=="; id58=c5/njVdz7Zg4WxzCBY1FAg==; als=0; city=nj; myfeet_tooltip=end; __utma=253535702.531780430.1467215895.1467215895.1467215895.1; __utmz=253535702.1467215895.1.1.utmcsr=bj.58.com|utmccn=(referral)|utmcmd=referral|utmcct=/pbdn/; 58home=nj; sessionid=2541e821-f53f-4629-8fb7-3e8911ce33d0; zz_download=2; bj58_new_session=0; bj58_init_refer=""; bj58_new_uv=2; 58tj_uuid=2ea1d0c1-893a-40eb-a802-ce41dcfa6601; new_session=0; new_uv=2; utm_source=; spm=; init_refer=; bdshare_firstime=1467252573738; final_history=25145823854502; ipcity=sy%7C%u6C88%u9633'
}
header2 = {
    "User-Agent":'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36',
    "Cookie":'myfeet_tooltip=end; bj58_id58s="eWs0d25wVFQ4dFEtNjYzOQ=="; id58=c5/njVdz7Zg4WxzCBY1FAg==; als=0; city=nj; myfeet_tooltip=end; __utma=253535702.531780430.1467215895.1467215895.1467215895.1; __utmz=253535702.1467215895.1.1.utmcsr=bj.58.com|utmccn=(referral)|utmcmd=referral|utmcct=/pbdn/; 58home=nj; sessionid=2541e821-f53f-4629-8fb7-3e8911ce33d0; zz_download=2; bj58_new_session=0; bj58_init_refer=""; bj58_new_uv=2; 58tj_uuid=2ea1d0c1-893a-40eb-a802-ce41dcfa6601; new_session=0; new_uv=2; utm_source=; spm=; init_refer='
}

info_list = []

def get_page(url,data=None):
    wb_data = requests.get(url,headers=header2)
    Soup = BeautifulSoup(wb_data.text,'lxml')
    titles = Soup.select("div.box_left_top > h1")
    views = Soup.select("p > span.look_time")
    prices = Soup.select("span.price_now > i")
    addresses = Soup.select('div.palce_li > span > i')
    if data ==None:
        for title,view,price,address in zip(titles,views,prices,addresses):
            data = {
                "title":title.get_text(),
                "view":view.get_text(),
                "price":price.get_text(),
                "address":address.get_text()
            }
            print(data)

def get_links(url,data=None):
    wb_data = requests.get(url,headers=header1)
    Soup = BeautifulSoup(wb_data.text,'lxml')
    links = Soup.select('#infolist > div.infocon > table > tbody > tr > td.img > a')
    if data == None:
        for link in links:
            single_link = link.get('href')
            info_list.append(single_link)


get_links(url)
for link in info_list:
    get_page(link)
    time.sleep(1)

爬取的效果图如下所示:


Python实战计划学习笔记:week1_第1张图片
result.png

还算比较简单的。

为了联系下如何查看个人正常商品的浏览量,找到了一个正常的网站做了测试,快照如下:

Python实战计划学习笔记:week1_第2张图片
网页快照.png

代码如下:

#!/usr/bin/env python
# coding:utf-8

__author__ = 'lucky'

from bs4 import BeautifulSoup
import requests
import time

url = "http://bj.58.com/pingbandiannao/26125016613569x.shtml"

def get_view(url):

    #把link中的数字提取出来
    id = url.split('/')[-1].strip('x.shtml')
    api = 'http://jst1.58.com/counter?infoid={}'.format(id)
    # 一开始爬58的浏览量view始终等于0,把headers全添加进去才OK,网站应该做了反爬虫升级.
    #从chrome的Sources和Network中查询
    headers = {
    'Accept':'*/*',
    'Accept-Encoding':'gzip, deflate, sdch',
    'Accept-Language':'zh-CN,zh;q=0.8,zh-TW;q=0.6,en;q=0.4',
    'Cache-Control':'max-age=0',
    'Connection':'keep-alive',
    'Cookie':r'bj58_id58s="R3kzdjNnK0cyY1hVOTM1MQ=="; id58=c5/njVd1S1Vxil1/BAOQAg==; 58home=sy; ipcity=sy%7C%u6C88%u9633; als=0; myfeet_tooltip=end; 58tj_uuid=ad884df7-493a-4788-86d8-1aa0d8110340; new_session=0; new_uv=3; utm_source=; spm=; init_refer=; final_history={}; bj58_new_session=0; bj58_init_refer=""; bj58_new_uv=3'.format(id),
    'Host':'jst1.58.com',
    'Referer':r'http://bj.58.com/pingbandiannao/26125016613569x.shtml',
    'User-Agent':r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'}
    js = requests.get(api,headers=headers)
    view = js.text.split('=')[-1]
    return view

def get_page(url,data=None):
    wb_data = requests.get(url)
    Soup = BeautifulSoup(wb_data.text,'lxml')
    title = Soup.select('div.col_sub.mainTitle > h1')
    price = Soup.select('span.price.c_f50')
    area = Soup.select('span.c_25d')
    date = Soup.select('li.time')
    data ={
        'title':title[0].text,
        'price':price[0].text,
        'date':date[0].text,
        'area':list(area[0].stripped_strings),
        'view':get_view(url)
    }
    print(data)

get_page(url)

难点在58同城的如何应对反爬取。这里通过查询了chrome浏览器的Source和Network,找到了相应的链接,添加了相关的headers,后来实现了抓取浏览量

Python实战计划学习笔记:week1_第3张图片
view.png

总结:

  • 1.通过练习更好的掌握了网页元素的位置的抓取。
  • 2.对如何使用chrome来查找网页信息有了深入的了解学习。
  • 3.面对一些常见的反爬虫技术,可以找到解决方法,完成网页信息的爬取。
  • 4.遇到不会的问题先思考,google来寻找解决方法。

你可能感兴趣的:(Python实战计划学习笔记:week1)