5.链家网爬虫(包含,json,csv,数据库存储方式)

import json
from urllib import request, parse
from bs4 import BeautifulSoup
import csv
import pymysql # 先安装(pip install pymysql)

# 1. 请求页面
# 构造请求对象
def create_request(url,page,city):
    page_url = url%(city,page)
    # print(page_url)
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
    }
    return request.Request(url=page_url, headers=headers)

# 发起请求
def request_data(req):
    res = request.urlopen(req)

    return res.read().decode('utf-8')

# 2. 解析页面
def anylasis_html(html):
    # 把html字符串初始化到soup对象中
    soup = BeautifulSoup(html,'lxml')
    # 选择所有的房屋信息
    house_list = soup.select('li.clear')
    # print(len(house_list))
    for house in house_list:
        # 定义一个字典,用于保存房屋信息
        item = {}
        # title
        item['title'] = house.select('.title a')[0].get_text()

        # 房屋信息
        item['house'] = house.select('.houseInfo')[0].get_text()

        # 位置信息
        item['positions'] = house.select('.positionInfo')[0].get_text()

        # 总价
        item['totalPrice'] = house.select('.totalPrice')[0].get_text()

        # 单价
        item['unitPrice'] = house.select('.unitPrice')[0].get_text()

        # 图片
        item['img'] = house.select('.lj-lazy')[0].get('data-original')

        yield item

# 3. 存储数据
# 1> 存入json
def write_to_json(data):
    # 把数据整合成json支持的类型
    json_list = []
    for houses in data:
        for house in houses:
            json_list.append(house)
    with open('lianjia.json','w') as fp:
        fp.write(json.dumps(json_list))

# 2> 存入csv
def write_to_csv(data):
    # 在写csv的时候,首先需要把data整合成一个二维列表
    # 定义一个大列表,用于存储所有的房屋信息
    csv_items = []
    for houses in data:
        for house in houses:
            # house是字典,按照键值得形成存储了每个房屋的信息,取出值
            item = []
            # 遍历house字典依次取出每一个值
            for k,v in house.items():
                item.append(v)
            csv_items.append(item)
    # 写入csv
    with open('lianjai.csv', 'w') as fp:
        # 用fp 来创建一个csv的写对象
        w = csv.writer(fp)
        # 写表头
        w.writerow(['title', 'house', 'position', 'totalPrice', 'unitPrice', 'img'])
        # 写数据
        w.writerow(csv_items)


# 3> 存入数据库
def write_to_mysql(data):
    # 创建一个mysql数据库的链接
    conn = pymysql.connect(host='127.0.0.1', port = 3306, user='root', password='root', db = 'lianjiadb', charset='utf8')
    # 创建一个游标,用于解析sql语句
    cursor = conn.cursor()
    # 创建sql语句
    for houses in data:
        for house in houses:
            sql = 'insert into ershoufang values(NULL , "%s", "%s","%s", "%s","%s", "%s")'%(house['title'],house['house'],house['positions'], house['totalPrice'],house['unitPrice'], house['img'])
            # print(sql)
            # 解析并提交sql语句
            cursor.execute(sql)
            conn.commit()
    # 关闭游标和数据库链接
    cursor.close()
    conn.close()


def main():
    # https://sz.lianjia.com/ershoufang/pg1/
    url = 'https://%s.lianjia.com/ershoufang/pg%d/'
    city = input('请输入城市:')
    start = input('请输入起始页:')
    end = input('请输入终止页:')
    # 创建一个空列表,用于整合
    house_list = []
    for page in range(int(start), int(end)+1):
        # 构造请求对象
        req = create_request(url=url,page=page,city=city)

        # 发起请求
        html = request_data(req)

        # 解析请求
        res = anylasis_html(html)

        # json数据需要在这里把生成器整合成json支持的格式
        house_list.append(res)

    # 存储数据
    # json数据的存储
    # write_to_json(house_list)

    # 存入csv
    # write_to_csv(house_list)

    # 存入数据库
    write_to_mysql(house_list)



if __name__ == '__main__':
    main()


你可能感兴趣的:(5.链家网爬虫(包含,json,csv,数据库存储方式))