查询阿里巴巴关键字排名

记录下自己实现阿里巴巴关键词排名查询的主要代码及pyquery、urllib库的基础用法

  1. urllib 基础模块的应用,通过该类获取到url中的html文档信息,内部可以重写代理的获取方法

    class ProxyScrapy(object):
    def init(self):

    self.proxy_robot = ProxyRobot()
    self.current_proxy = None
    self.cookie = cookielib.CookieJar()
    

    def __builder_proxy_cookie_opener(self):

    cookie_handler = urllib2.HTTPCookieProcessor(self.cookie)        
    handlers = [cookie_handler]
    
    if PROXY_ENABLE:
        self.current_proxy = ip_port = self.proxy_robot.get_random_proxy()
        proxy_handler = urllib2.ProxyHandler({'http': ip_port[7:]})
        handlers.append(proxy_handler)
    
    opener = urllib2.build_opener(*handlers)
    urllib2.install_opener(opener)
    return opener
    

    def get_html_body(self,url):

    opener = self.__builder_proxy_cookie_opener()
    
    request=urllib2.Request(url)
    #request.add_header("Accept-Encoding", "gzip,deflate,sdch")
    #request.add_header("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8")
    #request.add_header("Cache-Control", "no-cache")
    #request.add_header("Connection", "keep-alive")
    
    try:
        response = opener.open(request,timeout=2)
    
        http_code = response.getcode()
        if http_code == 200:
            if PROXY_ENABLE:
                self.proxy_robot.handle_success_proxy(self.current_proxy)
            html = response.read()
            return html
        else:
            if PROXY_ENABLE:
                self.proxy_robot.handle_double_proxy(self.current_proxy)
            return self.get_html_body(url)
    except Exception as inst:
        print inst,self.current_proxy
        self.proxy_robot.handle_double_proxy(self.current_proxy)
        return self.get_html_body(url)
    
  2. [代码]根据输入的公司名及关键词列表,返回每个关键词的排名

    def search_keywords_rank(keyword_company_name, keywords):
    def get_context(url):

    start=clock()
    html=curl.get_html_body(url)
    finish=clock()
    print url,(finish-start)
    
    d = pq(html)
    items = d("#J-items-content .ls-item")
    items_c = len(items)
    print items_c
    if items_c < 38:
        return get_context(url)
    return items, items_c
    

    result = OrderedDict()
    for keyword in keywords:

    for page_index in range(1,9):
        u = url % (re.sub('\s+', '_', keyword.strip()), page_index)
        items, items_c = get_context(u)
        b = False
        for item_index in range(0, items_c):
            e=items.eq(item_index).find('.title a')
            p_title = e.text()
            p_url = e.attr('href')
    
            e=items.eq(item_index).find('.cright h3 .dot-product')
            company_name = e.text()
            company_url = e.attr('href')
    
            if  keyword_company_name in company_url:
                total_index = (page_index-1)*38 +item_index+1+(0 if page_index==1 else 5)
                print 'page %s, index %s, total index %s' % (page_index, item_index+1, total_index)
                b = True
                if keyword not in result:
                    result[keyword] = (p_title, p_url, page_index, item_index+1, total_index, u)
                break
        if b:
            break
    

    return result

http://www.freeb2bmarket.com/keywordtool/

由于想放在web服务器上面,这个web页面实际上是用tonardo做的, 在php和tonado同用一个端口的话又用了nginx做前端代理服务器,对nginx的设置也是这次应用的一个学习点吧 ;)

你可能感兴趣的:(阿里巴巴关键词排名,Urllib代理的设置,PyQuery)