http://blog.csdn.net/infoworld/article/details/19547723
以下代码是基于infoworld的csdn备份python代码修改的cnblogs博客备份,但是和infoworld的界面不匹配,只能够用在python里面。python确实有意思,开发很快,怪不得这么流行。
#! encoding=utf-8
#cnblogs博客备份,使用方法:修改最下面的url和output,然后执行就可以了。
import urllib2
import re
import os
import sys
# from HTMLParser import HTMLParser
import html5lib
# from xml.etree.ElementTree import ElementTree
from urlparse import urlparse
import xml
import codecs
import traceback
import time
# class MyHTMLParser(HTMLParser):
# def handle_starttag(self, tag, attrs):
# # if tag.lower() == "img":
# print "Encountered the beginning of a %s tag,attrs size %d" % (tag ,len(attrs))
# for x in attrs:
# print "name %s,value %s" % (x[0],x[1])
# def handle_endtag(self, tag):
# print "Encountered the end of a %s tag" % tag
# def handle_startendtag(self, tag, attrs):
# print "Encountered the beginning of a %s tag,attrs size %d" % (tag ,len(attrs))
# for x in attrs:
# print "name %s,value %s" % (x[0],x[1])
# 资源尝试次数
gTestTime = 5
def DownloadFile(url,output):
responseText = None
dirssPath = None
try:
res = urlparse(url)
url = res.scheme+"://"+res.netloc+res.path
path = res.path
index = path.rfind('/')
dirss = "/"
if index != -1:
dirss = output + "/" + res.netloc.encode("utf-8") + path[0:index].encode("utf-8")
dirssPath = output + "/" + res.netloc.encode("utf-8") + path.encode("utf-8")
dirss_ansi = dirss.decode('utf-8')
if not os.path.exists(dirss_ansi):
os.makedirs(dirss_ansi)
global gTestTime
count = gTestTime
while True:
if count < 0:
break
count = count - 1
header={"User-Agent": "Mozilla-Firefox5.0"}
if not url.startswith("http://"):
break
try:
# print "url: %s:%d" % (url,count)
time.sleep(0.5)
request = urllib2.Request(url,None,header)
response = urllib2.urlopen(request)
dirssPath_ansi = dirssPath.decode("utf-8")
if not os.path.exists(dirssPath_ansi):
resourceFile = open(dirssPath_ansi,"wb")
responseText = response.read()
if url.endswith(".js"):
responseText = responseText.replace("http://","")
responseText = responseText.replace("https://","")
resourceFile.write(responseText)
resourceFile.close()
break
except Exception,e:
print "DownloadFile: %s:%s:%d" % (e,url,count)
# pass
# exstr = traceback.format_exc()
# print exstr
except Exception,e:
pass
# exstr = traceback.format_exc()
# print exstr
return (responseText,url,output)
def ReadCss(css):
# print "ReadCss"
mode = 'url\(\"?([^)]+)\"?\)'
pattern = re.compile(mode)
try:
text = css[0]
if css[0] == None:
return
strMatch = pattern.findall(text)
size = len(strMatch)
# print "size: ",size
for i in range(0,size,1):
one = strMatch[i]
newurl = GetConcatUrl(css[1],one)
DownloadFile(newurl,css[2])
except Exception,e:
pass
# exstr = traceback.format_exc()
# print exstr
def Download(url,output):
# try:
header={"User-Agent": "Mozilla-Firefox5.0"}
namespace = "{http://www.w3.org/1999/xhtml}"
request = urllib2.Request(url,None,header)
response = urllib2.urlopen(request)
data = response.read()
document = html5lib.parse(data)
imgElements = document.findall('.//{0}img'.format(namespace))
# print "imgElements %d" % len(imgElements)
for img in imgElements:
src = img.attrib["src"]
# print "src %s" % src
try:
res = urlparse(src)
# 非cnblogs的图片不下载
if not res.netloc.endswith(".cnblogs.com"):
print "image not download: %s:%s" % (src,res.netloc)
continue
except Exception,e:
pass
DownloadFile(src,output)
linkElements = document.findall('.//{0}link'.format(namespace))
# print "linkElements %d" % len(linkElements)
for link in linkElements:
href = link.attrib["href"]
# print "href %s" % href
text = DownloadFile(href,output)
if link.attrib.has_key("rel") and link.attrib["rel"].lower() == "stylesheet":
ReadCss(text)
scriptElements = document.findall('.//{0}script'.format(namespace))
# print "scriptElements %d" % len(scriptElements)
for script in scriptElements:
if script.attrib.has_key("src"):
src = script.attrib["src"]
# print "src %s" % src
DownloadFile(src,output)
htmlNameIndex = url.rfind("/");
urlLen = len(url)
htmlName = GetHtmlName(url)
output = output.decode("utf-8") + "/"+htmlName+".htm"
data = data.replace("http://","")
data = data.replace("https://","")
data = data.replace("www.w3.org/1999/xhtml","http://www.w3.org/1999/xhtml")
resourceFile = open(output,"wb")
resourceFile.write(data)
resourceFile.close()
def GetConcatUrl(url,png):
# one: "../images/f_icon.png" -- url http://static.csdn.net/public/common/toolbar/css/index.css
count = 0
index = png.find("..")
startindex = None
while index != -1:
count = count + 1;
startindex = index + 2
index = png.find("..",startindex)
second = png[startindex:]
length = len(url)
index = url.rfind("/")
endindex = 0
while count >= 0 and index != -1:
endindex = index
index = url.rfind("/",0, endindex)
count = count - 1
first = url[0:endindex]
return first+second
def getAllListUrl(url):
header={"User-Agent": "Mozilla-Firefox5.0"}
request = urllib2.Request(url,None,header)
response = urllib2.urlopen(request)
data = response.read()
# By default, the document will be an xml.etree element instance.Whenever possible, html5lib chooses the accelerated ElementTreeimplementation (i.e. xml.etree.cElementTree on Python 2.x).
document = html5lib.parse(data)
namespace = "{http://www.w3.org/1999/xhtml}"
# get