这个框架有很多细节等着我们去发现,本文主要介绍下。
对于小白来说可以好好学习下
package us.codecraft.webmagic.thread;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
/**
* Thread pool for workers.
* Use {@link java.util.concurrent.ExecutorService} as inner implement.
* New feature:
* 1. Block when thread pool is full to avoid poll many urls without process.
* 2. Count of thread alive for monitor.
*
* @author [email protected]
* @since 0.5.0
*/
public class CountableThreadPool {
private int threadNum;
private AtomicInteger threadAlive = new AtomicInteger();
private ReentrantLock reentrantLock = new ReentrantLock();
private Condition condition = reentrantLock.newCondition();
public CountableThreadPool(int threadNum) {
this.threadNum = threadNum;
this.executorService = Executors.newFixedThreadPool(threadNum);
}
public CountableThreadPool(int threadNum, ExecutorService executorService) {
this.threadNum = threadNum;
this.executorService = executorService;
}
public void setExecutorService(ExecutorService executorService) {
this.executorService = executorService;
}
public int getThreadAlive() {
return threadAlive.get();
}
public int getThreadNum() {
return threadNum;
}
private ExecutorService executorService;
public void execute(final Runnable runnable) {
if (threadAlive.get() >= threadNum) {
try {
reentrantLock.lock();
while (threadAlive.get() >= threadNum) {
try {
condition.await();
} catch (InterruptedException e) {
}
}
} finally {
reentrantLock.unlock();
}
}
threadAlive.incrementAndGet();
executorService.execute(new Runnable() {
@Override
public void run() {
try {
runnable.run();
} finally {
try {
reentrantLock.lock();
threadAlive.decrementAndGet();
condition.signal();
} finally {
reentrantLock.unlock();
}
}
}
});
}
public boolean isShutdown() {
return executorService.isShutdown();
}
public void shutdown() {
executorService.shutdown();
}
}
这块我觉得需要很大的改造
package us.codecraft.webmagic.proxy;
import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Task;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
/**
* A simple ProxyProvider. Provide proxy as round-robin without heartbeat and error check. It can be used when all proxies are stable.
* @author [email protected]
* Date: 17/4/16
* Time: 10:18
* @since 0.7.0
*/
public class SimpleProxyProvider implements ProxyProvider {
private final List proxies;
private final AtomicInteger pointer;
public SimpleProxyProvider(List proxies) {
this(proxies, new AtomicInteger(-1));
}
private SimpleProxyProvider(List proxies, AtomicInteger pointer) {
this.proxies = proxies;
this.pointer = pointer;
}
public static SimpleProxyProvider from(Proxy... proxies) {
List proxiesTemp = new ArrayList(proxies.length);
for (Proxy proxy : proxies) {
proxiesTemp.add(proxy);
}
return new SimpleProxyProvider(Collections.unmodifiableList(proxiesTemp));
}
@Override
public void returnProxy(Proxy proxy, Page page, Task task) {
//Donothing
}
@Override
public Proxy getProxy(Task task) {
return proxies.get(incrForLoop());
}
private int incrForLoop() {
int p = pointer.incrementAndGet();
int size = proxies.size();
if (p < size) {
return p;
}
while (!pointer.compareAndSet(p, p % size)) {
p = pointer.get();
}
return p % size;
}
}
这块xpath,部分语法不支持,但是你还可以发现还有个xpath2
package us.codecraft.webmagic.selector;
import net.sf.saxon.lib.NamespaceConstant;
import net.sf.saxon.xpath.XPathEvaluator;
import org.apache.log4j.Logger;
import org.htmlcleaner.CleanerProperties;
import org.htmlcleaner.DomSerializer;
import org.htmlcleaner.HtmlCleaner;
import org.htmlcleaner.TagNode;
import org.w3c.dom.Document;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import javax.xml.namespace.NamespaceContext;
import javax.xml.transform.OutputKeys;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.stream.StreamResult;
import javax.xml.xpath.XPathConstants;
import javax.xml.xpath.XPathExpression;
import javax.xml.xpath.XPathExpressionException;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/**
* 支持xpath2.0的选择器。包装了HtmlCleaner和Saxon HE。
*
* @author [email protected]
* Date: 13-4-21
* Time: 上午9:39
*/
public class Xpath2Selector implements Selector {
private String xpathStr;
private XPathExpression xPathExpression;
private Logger logger = Logger.getLogger(getClass());
public Xpath2Selector(String xpathStr) {
this.xpathStr = xpathStr;
try {
init();
} catch (XPathExpressionException e) {
throw new IllegalArgumentException("XPath error!", e);
}
}
enum XPath2NamespaceContext implements NamespaceContext {
INSTANCE;
private final Map prefix2NamespaceMap = new ConcurrentHashMap();
private final Map> namespace2PrefixMap = new ConcurrentHashMap>();
private void put(String prefix, String namespaceURI) {
prefix2NamespaceMap.put(prefix, namespaceURI);
List prefixes = namespace2PrefixMap.get(namespaceURI);
if (prefixes == null) {
prefixes = new ArrayList();
namespace2PrefixMap.put(namespaceURI, prefixes);
}
prefixes.add(prefix);
}
private XPath2NamespaceContext() {
put("fn", NamespaceConstant.FN);
put("xslt", NamespaceConstant.XSLT);
}
@Override
public String getNamespaceURI(String prefix) {
return prefix2NamespaceMap.get(prefix);
}
@Override
public String getPrefix(String namespaceURI) {
List prefixes = namespace2PrefixMap.get(namespaceURI);
if (prefixes == null || prefixes.size() < 1) {
return null;
}
return prefixes.get(0);
}
@Override
public Iterator getPrefixes(String namespaceURI) {
List prefixes = namespace2PrefixMap.get(namespaceURI);
if (prefixes == null || prefixes.size() < 1) {
return null;
}
return prefixes.iterator();
}
}
private void init() throws XPathExpressionException {
XPathEvaluator xPathEvaluator = new XPathEvaluator();
xPathEvaluator.setNamespaceContext(XPath2NamespaceContext.INSTANCE);
xPathExpression = xPathEvaluator.compile(xpathStr);
}
@Override
public String select(String text) {
try {
HtmlCleaner htmlCleaner = new HtmlCleaner();
TagNode tagNode = htmlCleaner.clean(text);
Document document = new DomSerializer(new CleanerProperties()).createDOM(tagNode);
Object result;
try {
result = xPathExpression.evaluate(document, XPathConstants.NODESET);
} catch (XPathExpressionException e) {
result = xPathExpression.evaluate(document, XPathConstants.STRING);
}
if (result instanceof NodeList) {
NodeList nodeList = (NodeList) result;
if (nodeList.getLength() == 0) {
return null;
}
Node item = nodeList.item(0);
if (item.getNodeType() == Node.ATTRIBUTE_NODE || item.getNodeType() == Node.TEXT_NODE) {
return item.getTextContent();
} else {
StreamResult xmlOutput = new StreamResult(new StringWriter());
Transformer transformer = TransformerFactory.newInstance().newTransformer();
transformer.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "yes");
transformer.transform(new DOMSource(item), xmlOutput);
return xmlOutput.getWriter().toString();
}
}
return result.toString();
} catch (Exception e) {
logger.error("select text error! " + xpathStr, e);
}
return null;
}
@Override
public List selectList(String text) {
List results = new ArrayList();
try {
HtmlCleaner htmlCleaner = new HtmlCleaner();
TagNode tagNode = htmlCleaner.clean(text);
Document document = new DomSerializer(new CleanerProperties()).createDOM(tagNode);
Object result;
try {
result = xPathExpression.evaluate(document, XPathConstants.NODESET);
} catch (XPathExpressionException e) {
result = xPathExpression.evaluate(document, XPathConstants.STRING);
}
if (result instanceof NodeList) {
NodeList nodeList = (NodeList) result;
Transformer transformer = TransformerFactory.newInstance().newTransformer();
StreamResult xmlOutput = new StreamResult();
transformer.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "yes");
for (int i = 0; i < nodeList.getLength(); i++) {
Node item = nodeList.item(i);
if (item.getNodeType() == Node.ATTRIBUTE_NODE || item.getNodeType() == Node.TEXT_NODE) {
results.add(item.getTextContent());
} else {
xmlOutput.setWriter(new StringWriter());
transformer.transform(new DOMSource(item), xmlOutput);
results.add(xmlOutput.getWriter().toString());
}
}
} else {
results.add(result.toString());
}
} catch (Exception e) {
logger.error("select text error! " + xpathStr, e);
}
return results;
}
}
这块没有成熟demo,很可惜
这个项目也没有完成,但是有别的项目代替,爬虫管理是重点。
这一系列文章,剖析了整个框架,以及爬虫如何编写。
我们可以从实现方式,制定3种方案
1 全模拟 全程使用驱动去完成爬取过程,优点:操作简单 缺点:速度慢,即便用了headless
2 半模拟 在登入获取cookie等值,或者中途去执行js,优点:中等难度,速度较快,易于迅速解决问题 缺点:速度较慢
3 无模拟 仅凭代码实现各种参数生成,优点:速度很快,缺点:真的很烦,解析Js最烦