python爬取京东文胸数据(三)

上篇我们只爬了一个牌子的文胸,这次我们来多爬几个牌子的

##1.爬取不同牌子的url
python爬取京东文胸数据(三)_第1张图片
其实可以直接爬那个href,但我发现有的带了https有的没带就索性直接取id拼接了

import requests
import json
import threading
import time
import re
from lxml import etree

class cup:
    def __init__(self):
        self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'}
        self.url = 'https://item.jd.com/'


    def vari_cpu(self):#取到不同类型文胸的评论json
        url_list = []
        url = 'https://search.jd.com/Search?keyword=%E6%96%87%E8%83%B8&enc=utf-8&spm=2.1.1'
        html = requests.get(url,headers = self.headers).text
        html = etree.HTML(html)
        cpu_link = html.xpath('//div[@class="p-icons"]/@id')
        for i in cpu_link:#网页后缀
            i = i[6::] #得到数字的部分
            Fin_url = f'https://sclub.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98vv12370&productId={i}'
            #format的新用法
            url_list.append(Fin_url) #url列表
        return url_list


    def get_json(self,url):
        res = requests.get(url, headers=self.headers).text
        s = re.compile(r'fetchJSON_comment.*?\(')
        uesless = str(s.findall(res))
        jd = json.loads(res.lstrip(uesless).rstrip(');'))
        com_list = jd['comments']
        for i in com_list:
            print(i['productColor'],i['productSize'])


if __name__ == '__main__':
    pa = cup()
    url_list = pa.vari_cpu()
    for j in url_list:
        for i in range(3):
            js_url = j+'&score=0&sortType=5&page=%d&pageSize=10&isShadowSku=0&rid=0&fold=1'%i
            time.sleep(1)
            t = threading.Thread(target=pa.get_json, args=(js_url,))
            t.start()

我直接把json的url里面的id换成各自的id了,发现竟然可行,那就可行吧,省的麻烦.

这里还是有不完善的地方,就是那个线程,等会儿去补补线程进程内容,再改进一波,这个不敢爬多,先微微爬点看看,不过我看那一堆玩意儿是贞德多,就不发了

宁外我还发现它的首页竟然还藏着一个ajax,当你不下拉时就30个牌子,一下拉变60个了…


改进在于实现了多个牌子和线程池的改进

import requests
import json
from concurrent.futures import ThreadPoolExecutor
import time
import re
from lxml import etree

class cpu:
    def __init__(self):
        self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'}
        self.url = 'https://item.jd.com/'


    def vari_cpu(self):#取到不同类型文胸的评论json
        url_list = []
        url = 'https://search.jd.com/Search?keyword=%E6%96%87%E8%83%B8&enc=utf-8&spm=2.1.1'
        html = requests.get(url,headers = self.headers).text
        html = etree.HTML(html)
        cpu_link = html.xpath('//div[@class="p-icons"]/@id')
        for i in cpu_link:#网页后缀
            i = i[6::] #得到数字的部分
            Fin_url = f'https://sclub.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98vv12370&productId={i}'
            #format的新用法
            url_list.append(Fin_url) #url列表
        return url_list


    def get_json(self,url):
        res = requests.get(url, headers=self.headers).text
        s = re.compile(r'fetchJSON_comment.*?\(')#匹配无关符号,每天会变所以用正则匹配
        uesless = str(s.findall(res)) #变成字符串供下面使用
        jd = json.loads(res.lstrip(uesless).rstrip(');'))#去掉无关符号
        com_list = jd['comments']
        for i in com_list:
            print(i['productSize'])

    def get_list(self):
        Fin_url= []
        url_list = self.vari_cpu()
        for j in url_list:
            for i in range(5):
                js_url = j + '&score=0&sortType=5&page=%d&pageSize=10&isShadowSku=0&rid=0&fold=1' % i  # json的url(有规律)
                Fin_url.append(js_url)
        return Fin_url

if __name__ == '__main__':
    pa = cpu()
    Fin_url = pa.get_list()
    with ThreadPoolExecutor(max_workers=8) as pool:
        results = pool.map(pa.get_json,Fin_url)
    for i in results:
        print(i)

你可能感兴趣的:(python爬取京东文胸数据(三))