基于scrapy抓取wap版微博博文

由于项目要求及数据库设计原因,所以抓取wap版博文
1、研究微博网站的结构
找到登录也的地址
对用户进行加密或者使用抓包工具查看抓到的登录用户名(在pc版中查看)

def get_su(user_name):
            username_ = urllib.quote(user_name)     # html字符转义
            username = base64.encodestring(username_)[:-1]
            return username

wap版中对密码不需要加密直接发送即可

def start_requests(self):
        
        url = 'https://login.sina.com.cn/sso/prelogin.php?checkpin=1&entry=mweibo&su=%s&callback=jsonpcallback1449104018150' % (self.base64name,)
        yield Request(url,method='get', meta={'cookiejar':1},callback=self.post_message)
def post_message(self,response):
        formdata = {}
        formdatas = {
            'username':self.user_name,
            'password':'******',#无需加密,直接填写密码
            'url':'http%3A%2F%2Fm.weibo.cn%2F'
        }
        for key in formdatas:
            formdata[key] = str(formdatas[key]).decode()
        return [FormRequest(url = 'https://passport.weibo.cn/sso/login',meta = {'cookiejar' : response.meta['cookiejar']},formdata = formdata,callback=self.parse_item) ]

pc版中的密码使用了rsa的加密方法

jsonpcallback1449710780974({"retcode":0,"servertime":1449710857,"pcid":"gz-49f0277a655f3fd80d684d703b7d56ab6023","nonce":"HWYSXO","pubkey":"-----BEGIN PUBLIC KEY-----\nMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDrKjhWhmGIf6GAvdtcq9XyHHv9\nWcCQyy0kWoesJTBiiCcpKT5VBjUFCOf5qju3f0MzIxSQ+RX21jxV\/i8IpJs1P0RK\n05k8rMAtt4Sru45CqbG7\/\/s4vhjXjoeg5Bubj3OpKO4MzuH2c5iEuXd+T+noihu+\nSVknrEp5mzGB1kQkQwIDAQAB\n-----END PUBLIC KEY-----","rsakv":"1330428213","is_openlock":0,"showpin":0,"exectime":13})

def get_sp_rsa(passwd, pubkey,servertime, nonce):
# 这个值可以在prelogin得到,因为是固定值,所以写死在这里
weibo_rsa_n = pubkey
weibo_rsa_e = 65537  # 10001对应的10进制
message = str(servertime) + '\t' + str(nonce) + '\n' + passwd
key = rsa.PublicKey(int(weibo_rsa_n, 16), weibo_rsa_e)
encropy_pwd = rsa.encrypt(message, key)
return binascii.b2a_hex(encropy_pwd) #使用公钥加密

2.获取内容

def parse_item(self,response):
        content = response.body
        encodejson = json.dumps(content)
        decodeobj = json.loads(encodejson)
        jsontxt = eval(decodeobj)
        url = 'http:' + jsontxt['data']['loginresulturl'].replace('\\','')
        yield Request(url,callback=self.parse_detail)
    def parse_detail(self,response):
        for page in range(10):
            url = "http://m.weibo.cn/index/feed?format=cards&page=%s" % (int(page)+1)
            yield Request(url,callback=self.parse_list)
    def parse_list(self,response):
        item = ListItem()
        data = re.findall(r'(?<=\[).*[^\]]+(?=\])',response.body)
        jsontxt = json.loads(data[0])

        card_group = jsontxt['card_group']
        for jsonitem in card_group:
            item['url'] = 'http://m.weibo.cn/' + str(jsonitem['mblog']['user']['id']) + '/' + str(jsonitem['mblog']['bid'])
            item['title'] = jsonitem['mblog']['text'][0:255]
            item['unique_id'] = jsonitem['mblog']['id']
            item['channel_id'] = 22
            yield item

3.所有代码都在这里了

class WeiboSpider(CrawlSpider):
    name = 'mweibo.cn'
    allowed_domains = ['weibo.cn','passport.weibo.cn']
    start_urls = ['http://m.weibo.cn/']
    
    #获取一些参数
    user_name = '[email protected]'
    def get_su(user_name):
        username_ = urllib.quote(user_name)     # html字符转义
        username = base64.encodestring(username_)[:-1]
        return username
    base64name = get_su(user_name)
    def start_requests(self):
        
        url = 'https://login.sina.com.cn/sso/prelogin.php?checkpin=1&entry=mweibo&su=%s&callback=jsonpcallback1449104018150' % (self.base64name,)
        yield Request(url,method='get', meta={'cookiejar':1},callback=self.post_message)
    #发送post请求
    def post_message(self,response):
        serverdata = re.findall(r'(?<=jsonpcallback1449104018150[\(()]).*[^\))]+(?=[\))])',response.body)
        encodejson = json.dumps(serverdata)
        decodeobj = json.loads(encodejson)
        xmltest = decodeobj[0].encode("utf-8", 'ignore')
        jsontxt = eval(xmltest)
        servertime = jsontxt['servertime']
        nonce = jsontxt['nonce']
        rsakv = jsontxt['rsakv']
        pubkey = jsontxt['pubkey']
        formdata = {}
        formdatas = {
            'username':self.user_name,
            'password':'********',
            'url':'http%3A%2F%2Fm.weibo.cn%2F'
        }
        for key in formdatas:
            formdata[key] = str(formdatas[key]).decode()
        return [FormRequest(url = 'https://passport.weibo.cn/sso/login',meta = {'cookiejar' : response.meta['cookiejar']},formdata = formdata,callback=self.parse_item) ]
    def parse_item(self,response):
        content = response.body
        encodejson = json.dumps(content)
        decodeobj = json.loads(encodejson)
        jsontxt = eval(decodeobj)
        url = 'http:' + jsontxt['data']['loginresulturl'].replace('\\','')
        yield Request(url,callback=self.parse_detail)
    def parse_detail(self,response):
        for page in range(10):
            url = "http://m.weibo.cn/index/feed?format=cards&page=%s" % (int(page)+1)
            yield Request(url,callback=self.parse_list)
    def parse_list(self,response):
        item = ListItem()
        data = re.findall(r'(?<=\[).*[^\]]+(?=\])',response.body)
        jsontxt = json.loads(data[0])

        card_group = jsontxt['card_group']
        for jsonitem in card_group:
            item['url'] = 'http://m.weibo.cn/' + str(jsonitem['mblog']['user']['id']) + '/' + str(jsonitem['mblog']['bid'])
            item['title'] = jsonitem['mblog']['text'][0:255]
            item['unique_id'] = jsonitem['mblog']['id']
            item['channel_id'] = 22
            yield item

你可能感兴趣的:(基于scrapy抓取wap版微博博文)