java爬虫爬取博客园数据

一、爬虫是什么

以下是百度百科上对于网络爬虫的定义:

网络爬虫(又被称为网页蜘蛛,网络机器人,在FOAF社区中间,更经常的称为网页追逐者),是一种按照一定的规则,自动地抓取万维网信息的程序或者脚本。另外一些不常使用的名字还有蚂蚁、自动索引、模拟程序或者蠕虫。
通俗的讲,爬虫就是能够自动访问互联网并将网站内容下载下来的的程序或脚本,类似一个机器人,能把别人网站的信息弄到自己的电脑上,再做一些过滤,筛选,归纳,整理,排序等等。
网络爬虫的英文即Web Spider,是一个很形象的名字。把互联网比喻成一个蜘蛛网,那么Spider就是在网上爬来爬去的蜘蛛。网络蜘蛛是通过网页的链接地址来寻找网页,从网站某一个页面(通常是首页)开始,读取网页的内容,找到在网页中的其它链接地址,然后通过这些链接地址寻找下一个网页,这样一直循环下去,直到把这个网站所有的网页都抓取完为止。如果把整个互联网当成一个网站,那么网络蜘蛛就可以用这个原理把互联网上所有的网页都抓取下来

二、爬虫能做什么

现如今大数据时代已经到来,网络爬虫技术成为这个时代不可或缺的一部分,企业需要数据来分析用户行为,来分析自己产品的不足之处,来分析竞争对手的信息等等,但是这些的首要条件就是数据的采集。我们可以从招聘网站上看到,很多的企业在高薪招聘爬虫工程师。但是,网络爬虫作为一项专业性的技能,又不可能在极短的时间内学会。互联网高速发展,各种各样的网站也越来越多,很多传统企业因为跟不上时代的发展被竞争对手甩在身后,一些企业很急切的想要一些行业数据,但又苦于不懂技术,无从下手。

接下来我们来试一下爬博客
我们来看代码
导入pom依赖


	4.0.0

	com.javaxl
	T226_jsoup
	0.0.1-SNAPSHOT
	jar

	T226_jsoup
	http://maven.apache.org

	
		UTF-8
	

	
		
		
			mysql
			mysql-connector-java
			5.1.44
		

		
		
			org.apache.httpcomponents
			httpclient
			4.5.2
		

		
		
			org.jsoup
			jsoup
			1.10.1
		


		
		
			log4j
			log4j
			1.2.16
		

		
		
			net.sf.ehcache
			ehcache
			2.10.3
		

		
		
			commons-io
			commons-io
			2.5
		

		
			com.alibaba
			fastjson
			1.2.47
		
	


BlogCrawlerStarter(爬取博客园网站数据)(https://www.cnblogs.com/)
java爬虫爬取博客园数据_第1张图片

package com.javaxl.crawler;
import java.io.File;
import java.io.IOException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;

import org.apache.commons.io.FileUtils;
import org.apache.http.HttpEntity;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.util.EntityUtils;
import org.apache.log4j.Logger;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import com.javaxl.util.DateUtil;
import com.javaxl.util.DbUtil;
import com.javaxl.util.PropertiesUtil;

import net.sf.ehcache.Cache;
import net.sf.ehcache.CacheManager;
import net.sf.ehcache.Status;

/**
 * @author Administrator
 *
 */
public class BlogCrawlerStarter {

	private static Logger logger = Logger.getLogger(BlogCrawlerStarter.class);
	private static String HOMEURL = "https://www.cnblogs.com/";
	private static CloseableHttpClient httpClient;
	private static Connection con;
	private static CacheManager cacheManager;
	private static Cache cache;

	/**
	 * httpclient解析首页,获取首页内容
	 */
	public static void parseHomePage() {
		logger.info("开始爬取首页:" + HOMEURL);
		
		cacheManager = CacheManager.create(PropertiesUtil.getValue("ehcacheXmlPath"));
		cache = cacheManager.getCache("cnblog");
		
		httpClient = HttpClients.createDefault();
		HttpGet httpGet = new HttpGet(HOMEURL);
		RequestConfig config = RequestConfig.custom().setConnectTimeout(5000).setSocketTimeout(8000).build();
		httpGet.setConfig(config);
		CloseableHttpResponse response = null;
		try {
			response = httpClient.execute(httpGet);
			if (response == null) {
				logger.info(HOMEURL + ":爬取无响应");
				return;
			}

			if (response.getStatusLine().getStatusCode() == 200) {
				HttpEntity entity = response.getEntity();
				String homePageContent = EntityUtils.toString(entity, "utf-8");
				// System.out.println(homePageContent);
				parseHomePageContent(homePageContent);
			}

		} catch (ClientProtocolException e) {
			logger.error(HOMEURL + "-ClientProtocolException", e);
		} catch (IOException e) {
			logger.error(HOMEURL + "-IOException", e);
		} finally {
			try {
				if (response != null) {
					response.close();
				}

				if (httpClient != null) {
					httpClient.close();
				}
			} catch (IOException e) {
				logger.error(HOMEURL + "-IOException", e);
			}
		}

		if(cache.getStatus() ==  Status.STATUS_ALIVE) {
			cache.flush();
		}
		cacheManager.shutdown();
		logger.info("结束爬取首页:" + HOMEURL);

	}

	/**
	 * 通过网络爬虫框架jsoup,解析网页类容,获取想要数据(博客的连接)
	 * 
	 * @param homePageContent
	 */
	private static void parseHomePageContent(String homePageContent) {
		Document doc = Jsoup.parse(homePageContent);
		//#feedlist_id .list_con .title h2 a
		Elements aEles = doc.select("#post_list .post_item .post_item_body h3 a");
		for (Element aEle : aEles) {
//			这个是首页中的博客列表中的单个链接URL
			String blogUrl = aEle.attr("href");
			if (null == blogUrl || "".equals(blogUrl)) {
				logger.info("该博客未内容,不再爬取插入数据库!");
				continue;
			}
			if(cache.get(blogUrl) != null) {
				logger.info("该数据已经被爬取到数据库中,数据库不再收录!");
				continue;
			}
//			System.out.println("************************"+blogUrl+"****************************");
			
			parseBlogUrl(blogUrl);
		}
	}

	/**
	 * 通过博客地址获取博客的标题,以及博客的类容
	 * 
	 * @param blogUrl
	 */
	private static void parseBlogUrl(String blogUrl) {

		logger.info("开始爬取博客网页:" + blogUrl);
		httpClient = HttpClients.createDefault();
		HttpGet httpGet = new HttpGet(blogUrl);
		RequestConfig config = RequestConfig.custom().setConnectTimeout(5000).setSocketTimeout(8000).build();
		httpGet.setConfig(config);
		CloseableHttpResponse response = null;
		try {
			response = httpClient.execute(httpGet);
			if (response == null) {
				logger.info(blogUrl + ":爬取无响应");
				return;
			}

			if (response.getStatusLine().getStatusCode() == 200) {
				HttpEntity entity = response.getEntity();
				String blogContent = EntityUtils.toString(entity, "utf-8");
				parseBlogContent(blogContent, blogUrl);
			}

		} catch (ClientProtocolException e) {
			logger.error(blogUrl + "-ClientProtocolException", e);
		} catch (IOException e) {
			logger.error(blogUrl + "-IOException", e);
		} finally {
			try {
				if (response != null) {
					response.close();
				}
			} catch (IOException e) {
				logger.error(blogUrl + "-IOException", e);
			}
		}

		logger.info("结束爬取博客网页:" + HOMEURL);

	}

	/**
	 * 解析博客类容,获取博客中标题以及所有内容
	 * 
	 * @param blogContent
	 */
	private static void parseBlogContent(String blogContent, String link) {
		Document doc = Jsoup.parse(blogContent);
		if(!link.contains("ansion2014")) {
			System.out.println(blogContent);
		}
		Elements titleEles = doc
				//#mainBox main .blog-content-box .article-header-box .article-header .article-title-box h1
				.select("#topics .post h1 a");
		
		System.out.println(titleEles.toString());
	
		if (titleEles.size() == 0) {
			logger.info("博客标题为空,不插入数据库!");
			return;
		}
		String title = titleEles.get(0).html();

		Elements blogContentEles = doc.select("#cnblogs_post_body ");
		if (blogContentEles.size() == 0) {
			logger.info("博客内容为空,不插入数据库!");
			return;
		}
		String blogContentBody = blogContentEles.get(0).html();
		
//		Elements imgEles = doc.select("img");
//		List imgUrlList = new LinkedList();
//		if(imgEles.size() > 0) {
//			for (Element imgEle : imgEles) {
//				imgUrlList.add(imgEle.attr("src"));
//			}
//		}
//		
//		if(imgUrlList.size() > 0) {
//			Map replaceUrlMap = downloadImgList(imgUrlList);
//			blogContent = replaceContent(blogContent,replaceUrlMap);
//		}

		String sql = "insert into `t_jsoup_article` values(null,?,?,null,now(),0,0,null,?,0,null)";
		try {
			PreparedStatement pst = con.prepareStatement(sql);
			pst.setObject(1, title);
			pst.setObject(2, blogContentBody);
			pst.setObject(3, link);
			if(pst.executeUpdate() == 0) {
				logger.info("爬取博客信息插入数据库失败");
			}else {
				cache.put(new net.sf.ehcache.Element(link, link));
				logger.info("爬取博客信息插入数据库成功");
			}
		} catch (SQLException e) {
			logger.error("数据异常-SQLException:",e);
		}
	}

	/**
	 * 将别人博客内容进行加工,将原有图片地址换成本地的图片地址
	 * @param blogContent
	 * @param replaceUrlMap
	 * @return
	 */
	private static String replaceContent(String blogContent, Map replaceUrlMap) {
		for(Map.Entry entry: replaceUrlMap.entrySet()) {
			blogContent = blogContent.replace(entry.getKey(), entry.getValue());
		}
		return blogContent;
	}

	/**
	 * 别人服务器图片本地化
	 * @param imgUrlList
	 * @return
	 */
	private static Map downloadImgList(List imgUrlList) {
		Map replaceMap = new HashMap();
		for (String imgUrl : imgUrlList) {
			CloseableHttpClient httpClient = HttpClients.createDefault();
			HttpGet httpGet = new HttpGet(imgUrl);
			RequestConfig config = RequestConfig.custom().setConnectTimeout(5000).setSocketTimeout(8000).build();
			httpGet.setConfig(config);
			CloseableHttpResponse response = null;
			try {
				response = httpClient.execute(httpGet);
				if (response == null) {
					logger.info(HOMEURL + ":爬取无响应");
				}else {
					if (response.getStatusLine().getStatusCode() == 200) {
						HttpEntity entity = response.getEntity();
						String blogImagesPath = PropertiesUtil.getValue("blogImages");
						String dateDir = DateUtil.getCurrentDatePath();
						String uuid = UUID.randomUUID().toString();
						String subfix = entity.getContentType().getValue().split("/")[1];
						String fileName = blogImagesPath + dateDir + "/" + uuid + "." + subfix;
						
						FileUtils.copyInputStreamToFile(entity.getContent(), new File(fileName));
						replaceMap.put(imgUrl, fileName);
					}
				}
			} catch (ClientProtocolException e) {
				logger.error(imgUrl + "-ClientProtocolException", e);
			} catch (IOException e) {
				logger.error(imgUrl + "-IOException", e);
			} catch (Exception e) {
				logger.error(imgUrl + "-Exception", e);
			} finally {
				try {
					if (response != null) {
						response.close();
					}
				} catch (IOException e) {
					logger.error(imgUrl + "-IOException", e);
				}
			}
		
		}
		return replaceMap;
	}

	public static void start() {
		while(true) {
			DbUtil dbUtil = new DbUtil();
			try {
				con = dbUtil.getCon();
				parseHomePage();
			} catch (Exception e) {
				logger.error("数据库连接势失败!");
			} finally {
				try {
					if (con != null) {
						con.close();
					}
				} catch (SQLException e) {
					logger.error("数据关闭异常-SQLException:",e);
				}
			}
			try {
				Thread.sleep(1000*60);
			} catch (InterruptedException e) {
				logger.error("主线程休眠异常-InterruptedException:",e);
			}
		}
	}

	public static void main(String[] args) {
		start();
	}
}



我们看一下运行之后控制台

java爬虫爬取博客园数据_第2张图片
java爬虫爬取博客园数据_第3张图片
我们在看一下数据库
java爬虫爬取博客园数据_第4张图片

接下来再爬一张图片(blogImages:图片的存放路径)
java爬虫爬取博客园数据_第5张图片
java爬虫爬取博客园数据_第6张图片

DownloadImg

package com.javaxl.crawler;

import java.io.File;
import java.io.IOException;
import java.util.UUID;

import org.apache.commons.io.FileUtils;
import org.apache.http.HttpEntity;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.log4j.Logger;

import com.javaxl.util.DateUtil;
import com.javaxl.util.PropertiesUtil;

public class DownloadImg {
	private static Logger logger = Logger.getLogger(DownloadImg.class);
	private static String URL = "http://pic22.nipic.com/20120725/9676681_001949824394_2.jpg";
	public static void main(String[] args) {

		logger.info("开始爬取首页:" + URL);
		

		CloseableHttpClient httpClient = HttpClients.createDefault();
		HttpGet httpGet = new HttpGet(URL);
		RequestConfig config = RequestConfig.custom().setConnectTimeout(5000).setSocketTimeout(8000).build();
		httpGet.setConfig(config);
		CloseableHttpResponse response = null;
		try {
			response = httpClient.execute(httpGet);
			if (response == null) {
				logger.info("连接超时!!!");
			} else {
				HttpEntity entity = response.getEntity();
				String imgPath = PropertiesUtil.getValue("blogImages");
				String dateDir = DateUtil.getCurrentDatePath();
				String uuid = UUID.randomUUID().toString();
				String subfix = entity.getContentType().getValue().split("/")[1];
				String localFile = imgPath+dateDir+"/"+uuid+"."+subfix;
//				System.out.println(localFile);
				FileUtils.copyInputStreamToFile(entity.getContent(), new File(localFile));
			}
		} catch (ClientProtocolException e) {
			logger.error(URL+"-ClientProtocolException", e);
		} catch (IOException e) {
			logger.error(URL+"-IOException", e);
		} catch (Exception e) {
			logger.error(URL+"-Exception", e);
		} finally {
			try {
				if (response != null) {
					response.close();
				}
				if(httpClient != null) {
					httpClient.close();
				}
			} catch (IOException e) {
				logger.error(URL+"-IOException", e);
			}
		}
		

		logger.info("结束首页爬取:" + URL);
	
	}
}

接下来我们要在百度上面我们爬取图片
java爬虫爬取博客园数据_第7张图片
在看一下我们是否爬取 到了
java爬虫爬取博客园数据_第8张图片
?!!

你可能感兴趣的:(原创)