这是一个web搜索的基本程序,从命令行输入搜索条件(起始的URL、处理url的最大数、要搜索的字符串),
它就会逐个对Internet上的URL进行实时搜索,查找并输出匹配搜索条件的页面。 这个程序的原型来自《java编程艺术》,
为了更好的分析,站长去掉了其中的GUI部分,并稍作修改以适用jdk1.5。以这个程序为基础,可以写出在互联网上搜索
诸如图像、邮件、网页下载之类的“爬虫”。
先请看程序运行的过程:
D:/java>javac SearchCrawler.java(编译)
D:/java>java SearchCrawler http://127.0.0.1:8080/zz3zcwbwebhome/index.jsp 20 java
Start searching...
result:
searchString=java
http://127.0.0.1:8080/zz3zcwbwebhome/index.jsp
http://127.0.0.1:8080/zz3zcwbwebhome/reply.jsp
http://127.0.0.1:8080/zz3zcwbwebhome/learn.jsp
http://127.0.0.1:8080/zz3zcwbwebhome/download.jsp
http://127.0.0.1:8080/zz3zcwbwebhome/article.jsp
http://127.0.0.1:8080/zz3zcwbwebhome/myexample/jlGUIOverview.htm
http://127.0.0.1:8080/zz3zcwbwebhome/myexample/Proxooldoc/index.html
http://127.0.0.1:8080/zz3zcwbwebhome/view.jsp?id=301
http://127.0.0.1:8080/zz3zcwbwebhome/view.jsp?id=297
http://127.0.0.1:8080/zz3zcwbwebhome/view.jsp?id=291
http://127.0.0.1:8080/zz3zcwbwebhome/view.jsp?id=286
http://127.0.0.1:8080/zz3zcwbwebhome/view.jsp?id=285
http://127.0.0.1:8080/zz3zcwbwebhome/view.jsp?id=284
http://127.0.0.1:8080/zz3zcwbwebhome/view.jsp?id=276
http://127.0.0.1:8080/zz3zcwbwebhome/view.jsp?id=272
又如:
D:/java>java SearchCrawler http://www.sina.com 20 java
Start searching...
result:
searchString=java
http://sina.com
http://redirect.sina.com/WWW/sinaCN/www.sina.com.cn class=a2
http://redirect.sina.com/WWW/sinaCN/www.sina.com.cn class=a8
http://redirect.sina.com/WWW/sinaHK/www.sina.com.hk class=a2
http://redirect.sina.com/WWW/sinaTW/www.sina.com.tw class=a8
http://redirect.sina.com/WWW/sinaUS/home.sina.com class=a8
http://redirect.sina.com/WWW/smsCN/sms.sina.com.cn/ class=a2
http://redirect.sina.com/WWW/smsCN/sms.sina.com.cn/ class=a3
http://redirect.sina.com/WWW/sinaNet/www.sina.net/ class=a3
D:/java>
下面是这个程序的源码
- import java.util.*;
- import java.net.*;
- import java.io.*;
- import java.util.regex.*;
-
-
- public class SearchCrawler implements Runnable{
-
-
-
-
-
-
-
-
-
-
-
- private HashMap< String,ArrayList< String>> disallowListCache = new HashMap< String,ArrayList< String>>();
- ArrayList< String> errorList= new ArrayList< String>();
- ArrayList< String> result=new ArrayList< String>();
- String startUrl;
- int maxUrl;
- String searchString;
- boolean caseSensitive=false;
- boolean limitHost=false;
-
- public SearchCrawler(String startUrl,int maxUrl,String searchString){
- this.startUrl=startUrl;
- this.maxUrl=maxUrl;
- this.searchString=searchString;
- }
-
- public ArrayList< String> getResult(){
- return result;
- }
-
- public void run(){
-
- crawl(startUrl,maxUrl, searchString,limitHost,caseSensitive);
- }
-
-
-
- private URL verifyUrl(String url) {
-
- if (!url.toLowerCase().startsWith("http://"))
- return null;
-
- URL verifiedUrl = null;
- try {
- verifiedUrl = new URL(url);
- } catch (Exception e) {
- return null;
- }
-
- return verifiedUrl;
- }
-
-
- private boolean isRobotAllowed(URL urlToCheck) {
- String host = urlToCheck.getHost().toLowerCase();
-
-
-
- ArrayList< String> disallowList =disallowListCache.get(host);
-
-
- if (disallowList == null) {
- disallowList = new ArrayList< String>();
- try {
- URL robotsFileUrl =new URL("http://" + host + "/robots.txt");
- BufferedReader reader =new BufferedReader(new InputStreamReader(robotsFileUrl.openStream()));
-
-
- String line;
- while ((line = reader.readLine()) != null) {
- if (line.indexOf("Disallow:") == 0) {
- String disallowPath =line.substring("Disallow:".length());
-
-
- int commentIndex = disallowPath.indexOf("#");
- if (commentIndex != - 1) {
- disallowPath =disallowPath.substring(0, commentIndex);
- }
-
- disallowPath = disallowPath.trim();
- disallowList.add(disallowPath);
- }
- }
-
-
- disallowListCache.put(host, disallowList);
- } catch (Exception e) {
- return true;
- }
- }
-
-
- String file = urlToCheck.getFile();
-
- for (int i = 0; i < disallowList.size(); i++) {
- String disallow = disallowList.get(i);
- if (file.startsWith(disallow)) {
- return false;
- }
- }
-
- return true;
- }
-
-
-
-
- private String downloadPage(URL pageUrl) {
- try {
-
- BufferedReader reader =
- new BufferedReader(new InputStreamReader(pageUrl.openStream()));
-
-
- String line;
- StringBuffer pageBuffer = new StringBuffer();
- while ((line = reader.readLine()) != null) {
- pageBuffer.append(line);
- }
-
- return pageBuffer.toString();
- } catch (Exception e) {
- }
-
- return null;
- }
-
-
- private String removeWwwFromUrl(String url) {
- int index = url.indexOf("://www.");
- if (index != -1) {
- return url.substring(0, index + 3) +
- url.substring(index + 7);
- }
-
- return (url);
- }
-
-
- private ArrayList< String> retrieveLinks(URL pageUrl, String pageContents, HashSet crawledList,
- boolean limitHost)
- {
-
- Pattern p =Pattern.compile("]",Pattern.CASE_INSENSITIVE);
- Matcher m = p.matcher(pageContents);
-
-
- ArrayList< String> linkList = new ArrayList< String>();
- while (m.find()) {
- String link = m.group(1).trim();
-
- if (link.length() < 1) {
- continue;
- }
-
-
- if (link.charAt(0) == '#') {
- continue;
- }
-
-
- if (link.indexOf("mailto:") != -1) {
- continue;
- }
-
- if (link.toLowerCase().indexOf("javascript") != -1) {
- continue;
- }
-
- if (link.indexOf("://") == -1){
- if (link.charAt(0) == '/') {
- link = "http://" + pageUrl.getHost()+":"+pageUrl.getPort()+ link;
- } else {
- String file = pageUrl.getFile();
- if (file.indexOf('/') == -1) {
- link = "http://" + pageUrl.getHost()+":"+pageUrl.getPort() + "/" + link;
- } else {
- String path =file.substring(0, file.lastIndexOf('/') + 1);
- link = "http://" + pageUrl.getHost() +":"+pageUrl.getPort()+ path + link;
- }
- }
- }
-
- int index = link.indexOf('#');
- if (index != -1) {
- link = link.substring(0, index);
- }
-
- link = removeWwwFromUrl(link);
-
- URL verifiedLink = verifyUrl(link);
- if (verifiedLink == null) {
- continue;
- }
-
-
- if (limitHost &&
- !pageUrl.getHost().toLowerCase().equals(
- verifiedLink.getHost().toLowerCase()))
- {
- continue;
- }
-
-
- if (crawledList.contains(link)) {
- continue;
- }
-
- linkList.add(link);
- }
-
- return (linkList);
- }
-
-
-
- private boolean searchStringMatches(String pageContents, String searchString, boolean caseSensitive){
- String searchContents = pageContents;
- if (!caseSensitive) {
- searchContents = pageContents.toLowerCase();
- }
-
-
- Pattern p = Pattern.compile("[//s]+");
- String[] terms = p.split(searchString);
- for (int i = 0; i < terms.length; i++) {
- if (caseSensitive) {
- if (searchContents.indexOf(terms[i]) == -1) {
- return false;
- }
- } else {
- if (searchContents.indexOf(terms[i].toLowerCase()) == -1) {
- return false;
- }
- } }
-
- return true;
- }
-
-
-
- public ArrayList< String> crawl(String startUrl, int maxUrls, String searchString,boolean limithost,boolean caseSensitive )
- {
-
- System.out.println("searchString="+searchString);
- HashSet< String> crawledList = new HashSet< String>();
- LinkedHashSet< String> toCrawlList = new LinkedHashSet< String>();
-
- if (maxUrls < 1) {
- errorList.add("Invalid Max URLs value.");
- System.out.println("Invalid Max URLs value.");
- }
-
-
- if (searchString.length() < 1) {
- errorList.add("Missing Search String.");
- System.out.println("Missing search String");
- }
-
-
- if (errorList.size() > 0) {
- System.out.println("err!!!");
- return errorList;
- }
-
-
-
- startUrl = removeWwwFromUrl(startUrl);
-
-
- toCrawlList.add(startUrl);
- while (toCrawlList.size() > 0) {
-
- if (maxUrls != -1) {
- if (crawledList.size() == maxUrls) {
- break;
- }
- }
-
-
- String url = toCrawlList.iterator().next();
-
-
- toCrawlList.remove(url);
-
-
- URL verifiedUrl = verifyUrl(url);
-
-
- if (!isRobotAllowed(verifiedUrl)) {
- continue;
- }
-
-
-
- crawledList.add(url);
- String pageContents = downloadPage(verifiedUrl);
-
-
- if (pageContents != null && pageContents.length() > 0){
-
- ArrayList< String> links =retrieveLinks(verifiedUrl, pageContents, crawledList,limitHost);
-
- toCrawlList.addAll(links);
-
- if (searchStringMatches(pageContents, searchString,caseSensitive))
- {
- result.add(url);
- System.out.println(url);
- }
- }
-
-
- }
- return result;
- }
-
-
- public static void main(String[] args) {
- if(args.length!=3){
- System.out.println("Usage:java SearchCrawler startUrl maxUrl searchString");
- return;
- }
- int max=Integer.parseInt(args[1]);
- SearchCrawler crawler = new SearchCrawler(args[0],max,args[2]);
- Thread search=new Thread(crawler);
- System.out.println("Start searching...");
- System.out.println("result:");
- search.start();
-
- }
- }