截止目前,已完成如下功能:
1、指定某个地址,使用HttpClient下载该网页至本地文件
2、使用HtmlParser解释第1步下载的网页,抽取其中包含的链接信息
3、下载第2步的所有链接指向的网页至本地文件
下一步需要完成的功能:
1、创建用于保存种子URL的配置文件及其数据结构
2、创建用于保存Todo信息(未下载URL)的数据结构
3、创建用于保存Visited信息(已下载的URL)的数据结构
4、下载网页时同步更新Tode与Visited。
5、从上述第3步下载的网页抽取链接并继续下载,直到Todo列表为空。
主要有以下类:
1、主类MyCrawler
2、网页下载类PageDownloader
3、网页内容分类类HtmlParserTool
4、接口Filter
完整代码可见归档代码 Jediael_v0.01
或者
https://code.csdn.net/jediael_lu/daopattern/tree/d196da609baa59ef08176322ca61928fbfbdf813
或者
http://download.csdn.net/download/jediael_lu/7382011
1、主类MyCrawler
package org.ljh.search; import java.io.IOException; import java.util.Iterator; import java.util.Set; import org.htmlparser.Parser; import org.ljh.search.downloadpage.PageDownloader; import org.ljh.search.html.HtmlParserTool; import org.ljh.search.html.LinkFilter; public class MyCrawler { public static void main(String[] args) { String url = "http://www.baidu.com"; LinkFilter linkFilter = new LinkFilter(){ @Override public boolean accept(String url) { if(url.contains("baidu")){ return true; }else{ return false; } } }; try { PageDownloader.downloadPageByGetMethod(url); Set<String> urlSet = HtmlParserTool.extractLinks(url, linkFilter); Iterator iterator = urlSet.iterator(); while(iterator.hasNext()){ PageDownloader.downloadPageByGetMethod((String) iterator.next()); } } catch (Exception e) { e.printStackTrace(); } } }
package org.ljh.search.downloadpage; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.io.PrintWriter; import java.io.Writer; import java.util.Scanner; import org.apache.http.HttpEntity; import org.apache.http.HttpStatus; import org.apache.http.client.ClientProtocolException; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpGet; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; //本类用于将指定url对应的网页下载至本地一个文件。 public class PageDownloader { public static void downloadPageByGetMethod(String url) throws IOException { // 1、通过HttpGet获取到response对象 CloseableHttpClient httpClient = HttpClients.createDefault(); // 注意,必需要加上http://的前缀,否则会报:Target host is null异常。 HttpGet httpGet = new HttpGet(url); CloseableHttpResponse response = httpClient.execute(httpGet); InputStream is = null; if (response.getStatusLine().getStatusCode() == HttpStatus.SC_OK) { try { // 2、获取response的entity。 HttpEntity entity = response.getEntity(); // 3、获取到InputStream对象,并对内容进行处理 is = entity.getContent(); String fileName = getFileName(url); saveToFile("D:\\tmp\\", fileName, is); } catch (ClientProtocolException e) { e.printStackTrace(); } finally { if (is != null) { is.close(); } if (response != null) { response.close(); } } } } //将输入流中的内容输出到path指定的路径,fileName指定的文件名 private static void saveToFile(String path, String fileName, InputStream is) { Scanner sc = new Scanner(is); Writer os = null; try { os = new PrintWriter(path + fileName); while (sc.hasNext()) { os.write(sc.nextLine()); } } catch (FileNotFoundException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } finally { if (sc != null) { sc.close(); } if (os != null) { try{ os.flush(); os.close(); }catch(IOException e){ e.printStackTrace(); System.out.println("输出流关闭失败!"); } } } } // 将url中的特殊字符用下划线代替 private static String getFileName(String url) { url = url.substring(7); String fileName = url.replaceAll("[\\?:*|<>\"/]", "_") + ".html"; return fileName; } }
package org.ljh.search.html; import java.util.HashSet; import java.util.Set; import org.htmlparser.Node; import org.htmlparser.NodeFilter; import org.htmlparser.Parser; import org.htmlparser.filters.NodeClassFilter; import org.htmlparser.filters.OrFilter; import org.htmlparser.tags.LinkTag; import org.htmlparser.util.NodeList; import org.htmlparser.util.ParserException; //本类创建用于HTML文件解释工具 public class HtmlParserTool { // 本方法用于提取某个html文档中内嵌的链接 public static Set<String> extractLinks(String url, LinkFilter filter) { Set<String> links = new HashSet<String>(); try { // 1、构造一个Parser,并设置相关的属性 Parser parser = new Parser(url); parser.setEncoding("gb2312"); // 2.1、自定义一个Filter,用于过滤<Frame >标签,然后取得标签中的src属性值 NodeFilter frameNodeFilter = new NodeFilter() { @Override public boolean accept(Node node) { if (node.getText().startsWith("frame src=")) { return true; } else { return false; } } }; //2.2、创建第二个Filter,过滤<a>标签 NodeFilter aNodeFilter = new NodeClassFilter(LinkTag.class); //2.3、净土上述2个Filter形成一个组合逻辑Filter。 OrFilter linkFilter = new OrFilter(frameNodeFilter, aNodeFilter); //3、使用parser根据filter来取得所有符合条件的节点 NodeList nodeList = parser.extractAllNodesThatMatch(linkFilter); //4、对取得的Node进行处理 for(int i = 0; i<nodeList.size();i++){ Node node = nodeList.elementAt(i); String linkURL = ""; //如果链接类型为<a /> if(node instanceof LinkTag){ LinkTag link = (LinkTag)node; linkURL= link.getLink(); }else{ //如果类型为<frame /> String nodeText = node.getText(); int beginPosition = nodeText.indexOf("src="); nodeText = nodeText.substring(beginPosition); int endPosition = nodeText.indexOf(" "); if(endPosition == -1){ endPosition = nodeText.indexOf(">"); } linkURL = nodeText.substring(5, endPosition - 1); } //判断是否属于本次搜索范围的url if(filter.accept(linkURL)){ links.add(linkURL); } } } catch (ParserException e) { e.printStackTrace(); } return links; } }
package org.ljh.search.html; //本接口所定义的过滤器,用于判断url是否属于本次搜索范围。 public interface LinkFilter { public boolean accept(String url); }