HTMLParser入门_01_网络爬虫的雏形_解析文章和处理文章中的图片

本文所用到的localHTML.html等练习文件

详见此处下载:http://download.csdn.net/detail/jadyer/5127317

package com.jadyer.httpclient;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;

import org.apache.commons.io.FileUtils;
import org.apache.commons.io.FilenameUtils;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.util.EntityUtils;
import org.htmlparser.Node;
import org.htmlparser.NodeFilter;
import org.htmlparser.Parser;
import org.htmlparser.filters.NodeClassFilter;
import org.htmlparser.nodes.TagNode;
import org.htmlparser.nodes.TextNode;
import org.htmlparser.tags.ImageTag;
import org.htmlparser.util.NodeList;

/**
 * HTMLParser入门_01_网络爬虫的雏形_解析文章和处理文章中的图片
 * @see ---------------------------------------------------------------------------------------------------------
 * @see 所有jar如下
 * @see commons-io-2.3.jar
 * @see commons-land-2.3.jar
 * @see commons-codec-1.6.jar(以下7个jar取自HttpClient官网下载的httpcomponents-client-4.2.1-bin.zip)
 * @see commons-logging-1.1.1.jar
 * @see fluent-hc-4.2.1.jar
 * @see httpclient-4.2.1.jar
 * @see httpclient-cache-4.2.1.jar
 * @see httpcore-4.2.1.jar
 * @see httpmime-4.2.1.jar
 * @see filterbuilder.jar(以下5个jar取自HTMLParser官网下载的HTMLParser-2.0-SNAPSHOT-bin.zip)
 * @see htmllexer.jar
 * @see htmlparser.jar
 * @see sitecapturer.jar
 * @see thumbelina.jar
 * @see ---------------------------------------------------------------------------------------------------------
 * @see 网络爬虫的雏形
 * @see 通过本文的三个方法,就可以初步实现一个网络爬虫
 * @see 比如有个后台管理系统,可添加待爬虫的技术文章,输入参数为文章URL,输出参数为文章中的实际内容
 * @see 并且它还会下载文章内容中的图片,并修改内容中的图片URL为本地的绝对路径,使得文章图片正常显示
 * @see 接下来就是在前台显示文章了,至于怎么显示,随心所欲喽..
 * @see 至于如何获取文章标题,作者,关键字,总体描述等信息,可参考我的下面这一篇文章
 * @see http://blog.csdn.net/jadyer/article/details/8656477
 * @see ---------------------------------------------------------------------------------------------------------
 * @create Mar 10, 2013 4:05:55 PM
 * @author 玄玉<http://blog.csdn/net/jadyer>
 */
public class SpiderDemo {
	private static final String articleURI = "http://www.ibm.com/developerworks/cn/java/j-javaroundtable/index.html";
	private static final String localHTML = "D:/Download/localHTML.html";
	
	/**
	 * 下载文章
	 */
	private static void downloadArticle() throws Exception {
		HttpClient httpClient = new DefaultHttpClient();
		HttpGet httpGet = new HttpGet(articleURI);
		try {
			HttpResponse response = httpClient.execute(httpGet);
			HttpEntity entity = response.getEntity();
			if(null != entity){
				String responseContent = EntityUtils.toString(entity, "UTF-8");
				EntityUtils.consume(entity);
				//文章内容写到本地(IOUtils干完活儿会自动关闭IO流)
				IOUtils.write(responseContent, new FileOutputStream(localHTML), "UTF-8");
			}
		}finally{
			httpClient.getConnectionManager().shutdown();
		}
	}
	
	
	/**
	 * 下载文章内容中的图片到本地
	 */
	private static void downloadArticleImage() throws Exception {
		//加载文章整理
		String html = IOUtils.toString(new FileInputStream(localHTML), "UTF-8");
		//获取文章内容,这里是通过分析文章<body>中的特征来获取内容的
		String article = StringUtils.substringBetween(html, "<!-- MAIN_COLUMN_CONTENT_BEGIN -->", "<!-- CMA");
		System.out.println("文章内容:" + article);
		Parser parser = new Parser();
		parser.setInputHTML(article);
		//提取所有的<img>标签
		//这里使用了内置的NodeClassFilter,它会根据节点类型来过滤,这里过滤的是ImageTag节点类型
		NodeList imageTags = parser.parse(new NodeClassFilter(ImageTag.class));
		for(int i=0; i<imageTags.size(); i++){
			ImageTag it = (ImageTag)imageTags.elementAt(i);
			//it.getImageURL()即<img>中的src属性值
			System.out.println("图片链接:" + it.getImageURL());
			//合成图片的绝对路径http://www.ibm.com/developerworks/cn/java/j-javaroundtable/ + it.getImageURL()
			String absoluteURL = articleURI.substring(0, articleURI.lastIndexOf("/")+1) + it.getImageURL();
			System.out.println("图片地址:" + absoluteURL);
			//下载文章内容中的图片
			HttpClient httpClient = new DefaultHttpClient();
			HttpGet httpGet = new HttpGet(absoluteURL);
			HttpResponse response = httpClient.execute(httpGet);
			HttpEntity entity = response.getEntity();
			if(null != entity){
				byte[] images = EntityUtils.toByteArray(entity);
				IOUtils.write(images, new FileOutputStream("D:/Download/" + FilenameUtils.getName(absoluteURL)));
				System.out.println("图片[" + absoluteURL + "]下载完毕");
			}
		}
	}
	
	
	/**
	 * 修改文章内容中的图片链接为本地链接
	 */
	@SuppressWarnings("serial")
	private static void modifyImageURL() throws Exception{
		StringBuilder sb = new StringBuilder();
		String html = IOUtils.toString(new FileInputStream(localHTML), "UTF-8");
		String article = StringUtils.substringBetween(html, "<!-- MAIN_COLUMN_CONTENT_BEGIN -->", "<!-- CMA");
		Parser parser = new Parser();
		parser.setInputHTML(article);
		//这里nodeList将包含网页中的所有内容
		NodeList nodeList = parser.parse(
			new NodeFilter(){
				@Override
				public boolean accept(Node node) {
					//接受所有的标签
					return true;
				}
			}
		);
		//遍历文章内容中的所有标签
		for(int i=0; i<nodeList.size(); i++){
			Node node = nodeList.elementAt(i);
			if(node instanceof ImageTag){
				ImageTag it = (ImageTag)node;
				//修改文章内容中<img>图片地址为本地硬盘上的绝对路径
				it.setImageURL("D:/Download/" + it.getImageURL());
				//把<img>标签所有的内容放到StringBuilder
				sb.append(it.toHtml());
			}else if(node instanceof TextNode){
				TextNode tn = (TextNode)node;
				//对于文本节点,直接将其文本放到StringBuilder
				sb.append(tn.getText());
			}else{
				TagNode tn = (TagNode)node;
				//将标签原样放到StringBuilder
				sb.append("<").append(tn.getText()).append(">");
			}
		}
		System.out.println("图片链接修改后的文章内容为:" + sb);
		//FileUtils的好处是,当文件所在目录不存在时,它会自动创建,省去我们判断的步骤了
		FileUtils.writeStringToFile(new File("D:/Download/newHTML.html"), sb.toString());
	}
}

你可能感兴趣的:(爬虫,提取,HtmlParser,html解析,htmlparse)