python抓取百度企业信用

1.抓取百度企业信用的企业信息(基本信息、股东信息、主要人员等)
在这里插入图片描述
2.难点分析,在于解析数据和通过一个爬虫全部解析入库,我自己用的scrapy,代码如下:

    def parse(self,response):

        detail_url = response.xpath("//h3/a/@href").extract()[0]

        # 拼接详情页url
        detail_url = "https://xin.baidu.com" + detail_url

        try:
            yield scrapy.Request(
                detail_url,
                callback=self.parse_details,
                dont_filter=True
            )
        except Exception as e:
            logger.error(e)

            # 详情页解析

    def parse_details(self, response):

        # 营业执照的item
        item = QccItem()

        try:
            # 法定代表人
            item["legal_person"] = \
                response.xpath("//table[@class='zx-detail-basic-table']//tr[2]/td[2]//text()").extract()[0]
            # print(item["legal_person"])
        except:
            item["legal_person"] = " "

        try:
            # 注册资本
            item["money"] = \
            int(response.xpath("//table[@class='zx-detail-basic-table']//tr[1]/td[2]//text()").extract()[0].strip().split("万")[0])
            # print(item["money"])

            if item["money"] == "-":
                item["money"] = None
        except:
            item["money"] = None

        try:
            # 成立日期
            item["establish_date"] = response.xpath("//table[@class='zx-detail-basic-table']//tr[6]/td[4]//text()").extract()[
                0].strip()
            if item["establish_date"] == '-' or item["establish_date"] == '正常' or item["establish_date"] == '暂无':
                item["establish_date"] = None
        except:
            item["establish_date"] = None

        try:
            # 登记状态
            item["register_status"] = response.xpath("//table[@class='zx-detail-basic-table']//tr[2]/td[4]//text()").extract()[
                0].strip()
        except:
            item["register_status"] = " "

        try:
            # 统一社会信用代码
            item["code_or_registration_no"] = \
                response.xpath("//table[@class='zx-detail-basic-table']//tr[4]/td[2]//text()").extract()[0].strip()
        except:
            item["code_or_registration_no"] = " "


        try:
            # 企业类型
            item["type"] = response.xpath("//table[@class='zx-detail-basic-table']//tr[7]/td[2]//text()").extract()[0].strip()
        except:
            item["type"] = " "

        try:
            # 核准日期
            item["approval_date"] = response.xpath("//table[@class='zx-detail-basic-table']//tr[8]/td[4]//text()").extract()[
                0].strip()

            if item["approval_date"] == "-":
                item["approval_date"] = None
        except:
            item["approval_date"] = None

        try:
            # 登记机关
            item["register_organization"] = \
                response.xpath("//table[@class='zx-detail-basic-table']//tr[6]/td[2]//text()").extract()[
                    0].strip()
        except:
            item["register_organization"] = " "

        try:
            # 所属区域
            item["area_id"] = response.xpath("//table[@class='zx-detail-basic-table']//tr[8]/td[2]//text()").extract()[
                0].strip()
        except:
            item["area_id"] = " "

        try:
            # 营业期限自
            item["operating_period_start"] = \
                response.xpath("//section[@id='Cominfo']//table//tr[7]/td[4]//text()").extract()[
                    0].strip().split("至")[0].replace(" ", "")

            if item["operating_period_start"] == "***":
                item["operating_period_start"] = None
        except:
            item["operating_period_start"] = None

        try:
            # 营业期限至
            item["operating_period_end"] = \
                response.xpath("//table[@class='zx-detail-basic-table']//tr[7]/td[4]//text()").extract()[
                    0].strip().split("至")[1].replace(" ", "")

            if item["operating_period_end"] == "无固定期限":
                item["operating_period_end"] = None
        except:
            item["operating_period_end"] = None

        try:
            # 住所
            item["address"] = response.xpath("//table[@class='zx-detail-basic-table']//tr[9]/td[2]//text()").extract()[
                0].strip().split("查")[0]
        except:
            item["address"] = " "

        try:
            # 经营范围
            item["business_scope"] = response.xpath("//table[@class='zx-detail-basic-table']//tr[10]/td[2]//text()").extract()[
                0].strip()
        except:
            item["business_scope"] = " "

        item["name"] = response.xpath("//span[@class='entName']//text()").extract()[0]

        # 注册资本(币种)
        item["money_currency"] = None

        # 经营者
        item["business_ower"] = None

        # 组成形式
        item["establish_form"] = None

        # 创建时间
        item["create_date"] = time.strftime('%Y-%m-%d', time.localtime(time.time()))

        # 修改时间
        item["update_date"] = time.strftime('%Y-%m-%d', time.localtime(time.time()))

        # 空置率
        item['empty_ratio'] = None

        # 注销日期
        item["logout_date"] = None

        # 吊销日期
        item["revoke_date"] = None

        # 注册日期
        item["register_date"] = None

        # 数据当前状态
        item["data_status"] = 1

        # print(item)
        yield item


在这里插入代码片

3.和企查查的类似,通过企业获取到企业id,然后去爬取。
4.部署完爬虫项目,每天的数据量,大概20万左右。
5.希望大家一起学习进步。qq:763073105

你可能感兴趣的:(数据库,爬虫)