Tomcat NioEndpoint内部类Poller实现了Runnable接口,主要用来作为独立的后台线程来完成以下轮询服务 :
具体实现如下 :
/**
* 该类是Tomcat NioEndpoint的内嵌类,但注意并没有声明为static。
* Poller class.
*/
public class Poller implements Runnable {
// Java NIO Selector 记录变量
private Selector selector;
// PollerEvent事件队列,同步队列,因为对PollerEvent的操作牵涉到多线程,所以才用同步队列。
// 比如该队列事件的注册者和该队列事件消费者可能是不同的线程,更具体的来讲,tomcat的
// 连接请求接收线程acceptor接收到连接后就会把连接套接字注册到Poller的该事件队列,
// 而poller线程本身也在一直运行并消费该事件队列,这里提到的是两个不同的线程在操作同一个
// 队列对象,所以要用同步队列。
//
// 该事件队列中的事件会在该Poller实例所附属的线程的执行循环中被消费和处理
private final SynchronizedQueue<PollerEvent> events =
new SynchronizedQueue<>();
// 记录当前poller轮询器是否被通知要关闭轮询线程
private volatile boolean close = false;
private long nextExpiration = 0;//optimize expiration handling
private AtomicLong wakeupCounter = new AtomicLong(0);
private volatile int keyCount = 0;
public Poller() throws IOException {
// 该Poller对象维持一个自己的Java NIO Selector对象
this.selector = Selector.open();
}
public int getKeyCount() { return keyCount; }
public Selector getSelector() { return selector;}
/**
* Destroy the poller.
* 设计给当前轮询器poller所属的NioEndpoint实例使用,用于关闭该轮询器
*/
protected void destroy() {
// Wait for polltime before doing anything, so that the poller threads
// exit, otherwise parallel closure of sockets which are still
// in the poller can cause problems
// 用语告诉轮询线程要结束了
close = true;
selector.wakeup();
}
// 往PollerEvent事件队列中添加事件,封装给当前Poller实例自己使用的私有方法
private void addEvent(PollerEvent event) {
events.offer(event);
if ( wakeupCounter.incrementAndGet() == 0 ) selector.wakeup();
}
/**
* Add specified socket and associated pool to the poller. The socket will
* be added to a temporary array, and polled first after a maximum amount
* of time equal to pollTime (in most cases, latency will be much lower,
* however).
*
* @param socket to add to the poller
* @param interestOps Operations for which to register this socket with
* the Poller
*/
public void add(final NioChannel socket, final int interestOps) {
// eventCache是当前Poller实例所属NioEndpoint实例的PollerEvent循环回收缓存,
// eventCache存在的目的是为了循环回收使用用过的PollerEvent对象,降低
// GC成本
PollerEvent r = eventCache.pop();
// 如果没有可循环回收使用的PollerEvent对象则新建一个,否则重用循环回收缓存中获取
// 的PollerEvent对象
if ( r==null) r = new PollerEvent(socket,null,interestOps);
else r.reset(socket,null,interestOps);
// 往队列中放入待处理事件PollerEvent
addEvent(r);
if (close) {
NioEndpoint.NioSocketWrapper ka = (NioEndpoint.NioSocketWrapper)socket.getAttachment();
processSocket(ka, SocketEvent.STOP, false);
}
}
/**
* Processes events in the event queue of the Poller.
* 处理PollerEvent事件队列中的所有事件
* @return true if some events were processed,false if queue was empty
* 队列中有需要处理的事件则返回true,否则返回false
*/
public boolean events() {
// 用于标记该次方法调用是否处理过PollerEvent事件
boolean result = false;
PollerEvent pe = null;
// 从队列中循环取出PollerEvent并处理,直到队列中所有的事件都被处理完
while ( (pe = events.poll()) != null ) {
result = true;//队列中只要存在任何一个事件被处理则当前方法返回true
try {
// 处理取出的PollerEvent事件
pe.run();//执行PollerEvent.run()
// 处理完事件如果仍处于服务状态则重置并回收该PollerEvent对象
pe.reset();
if (running && !paused) {
eventCache.push(pe);
}
} catch ( Throwable x ) {
log.error("",x);
}
}
return result;
}
/**
* Registers a newly created socket with the poller.
* 向Poller对象注册一个新创建的套接字socket,
* 典型应用 : tomcat acceptor线程每接收到一个连接请求,就会调用某个poller对象的该方法
*
* @param socket The newly created socket
*/
public void register(final NioChannel socket) {
socket.setPoller(this);
NioSocketWrapper ka = new NioSocketWrapper(socket, NioEndpoint.this);
socket.setSocketWrapper(ka);
ka.setPoller(this);
ka.setReadTimeout(getSocketProperties().getSoTimeout());
ka.setWriteTimeout(getSocketProperties().getSoTimeout());
ka.setKeepAliveLeft(NioEndpoint.this.getMaxKeepAliveRequests());
ka.setSecure(isSSLEnabled());
ka.setReadTimeout(getConnectionTimeout());
ka.setWriteTimeout(getConnectionTimeout());
// eventCache是当前Poller实例所属NioEndpoint实例的PollerEvent循环回收缓存,
// eventCache存在的目的是为了循环回收使用用过的PollerEvent对象,降低
// GC成本
PollerEvent r = eventCache.pop();
// 这里表明对将要注册的目标套接字socket关注的操作是OP_READ,读数据
ka.interestOps(SelectionKey.OP_READ);//this is what OP_REGISTER turns into.
// 这里表明将要添加的PollerEvent事件的执行会是将目标套接字执行操作OP_REGISTER,
// 注册到相应的 Java NIO Selector实例
if ( r==null) r = new PollerEvent(socket,ka,OP_REGISTER);
else r.reset(socket,ka,OP_REGISTER);
// 添加PollerEvent事件到队列
addEvent(r);
}
// 处理取消的SelectionKey,关闭相应的套接字通道(连接),调整连接数量记录
public NioSocketWrapper cancelledKey(SelectionKey key) {
NioSocketWrapper ka = null;
try {
if ( key == null ) return null;//nothing to do
ka = (NioSocketWrapper) key.attach(null);
if (ka != null) {
// If attachment is non-null then there may be a current
// connection with an associated processor.
getHandler().release(ka);
}
if (key.isValid()) key.cancel();
// If it is available, close the NioChannel first which should
// in turn close the underlying SocketChannel. The NioChannel
// needs to be closed first, if available, to ensure that TLS
// connections are shut down cleanly.
if (ka != null) {
try {
ka.getSocket().close(true);
} catch (Exception e){
if (log.isDebugEnabled()) {
log.debug(sm.getString(
"endpoint.debug.socketCloseFail"), e);
}
}
}
// The SocketChannel is also available via the SelectionKey. If
// it hasn't been closed in the block above, close it now.
if (key.channel().isOpen()) {
try {
key.channel().close();
} catch (Exception e) {
if (log.isDebugEnabled()) {
log.debug(sm.getString(
"endpoint.debug.channelCloseFail"), e);
}
}
}
try {
if (ka != null && ka.getSendfileData() != null
&& ka.getSendfileData().fchannel != null
&& ka.getSendfileData().fchannel.isOpen()) {
ka.getSendfileData().fchannel.close();
}
} catch (Exception ignore) {
}
if (ka != null) {
countDownConnection();
}
} catch (Throwable e) {
ExceptionUtils.handleThrowable(e);
if (log.isDebugEnabled()) log.error("",e);
}
return ka;
}
/**
* The background thread that adds sockets to the Poller, checks the
* poller for triggered events and hands the associated socket off to an
* appropriate processor as events occur.
*
* tomcat的后台poller线程的主逻辑 , 循环处理以下几件事情 :
* 1. 每次循环处理PollerEvent事件队列中所有的事件
* 2. 每次循环处理NIO selector所关注的事件中发生的事件(所有请求的处理,实际上这里都委托给了worker线程)
* 3. 超时处理:每次循环中特定条件满足时执行一次超时处理
* 4. 结束检测:如果被通知结束,执行结束逻辑,也就是该run()方法内的while-loop的结束
*/
@Override
public void run() {
// Loop until destroy() is called
while (true) {
boolean hasEvents = false;
try {
if (!close) {
// 没有收到停止消息,处理PollerEvent事件队列中所有的事件
hasEvents = events();
if (wakeupCounter.getAndSet(-1) > 0) {
//if we are here, means we have other stuff to do
//do a non blocking select
keyCount = selector.selectNow();
} else {
keyCount = selector.select(selectorTimeout);
}
wakeupCounter.set(0);
}
if (close) {
// 收到结束通知,poller线程停止前先处理掉PollerEvent队列中的事件
events();
// poller关闭前的超时处理
timeout(0, false);
// 结束Java NIO selector,也就是关闭接收和处理服务
try {
selector.close();
} catch (IOException ioe) {
log.error(sm.getString("endpoint.nio.selectorCloseFail"), ioe);
}
// 被通知结束并且处理完收尾工作,现在结束整个线程的while-loop
break;
}
} catch (Throwable x) {
// 出现异常不退出,记日志然后 poller 线程 while-loop继续执行
ExceptionUtils.handleThrowable(x);
log.error("",x);
continue;
}
//either we timed out or we woke up, process events first
if ( keyCount == 0 ) hasEvents = (hasEvents | events());
Iterator<SelectionKey> iterator =
keyCount > 0 ? selector.selectedKeys().iterator() : null;
// Walk through the collection of ready keys and dispatch
// any active event.遍历处理所有待处理的NIO事件
while (iterator != null && iterator.hasNext()) {
SelectionKey sk = iterator.next();
NioSocketWrapper attachment = (NioSocketWrapper)sk.attachment();
// Attachment may be null if another thread has called
// cancelledKey()
if (attachment == null) {
iterator.remove();
} else {
iterator.remove();
// 处理有待处理事件的SelectionKey , 其实真正的处理都委托给了 worker 线程
processKey(sk, attachment);
}
}//while
//process timeouts,正常运行中处理超时
timeout(keyCount,hasEvents);
}//while
getStopLatch().countDown();
}
protected void processKey(SelectionKey sk, NioSocketWrapper attachment) {
try {
if ( close ) {
// 被通知关闭了,对参数SelectionKey执行取消处理
cancelledKey(sk);
} else if ( sk.isValid() && attachment != null ) {
// 如果参数SelectionKey有效并且带有附件
if (sk.isReadable() || sk.isWritable() ) {
if ( attachment.getSendfileData() != null ) {
processSendfile(sk,attachment, false);
} else {
unreg(sk, attachment, sk.readyOps());
boolean closeSocket = false;
// Read goes before write
if (sk.isReadable()) {
// 处理Socket NIO读操作
// processSocket()是所属NioEndpoint实例的方法,方法实现位于类 AbstractEndpoint。
// 如果有线程池,他会将具体操作交给SocketProcessor和线程池完成,
// 如果没有线程池,他会将具体操作交给SocketProcessor和当前线程完成,
if (!processSocket(attachment, SocketEvent.OPEN_READ, true)) {
// 处理失败,需要关闭参数SelectionKey对应的套接字通道
closeSocket = true;
}
}
if (!closeSocket && sk.isWritable()) {
// 处理Socket NIO写操作
// processSocket()是所属NioEndpoint实例的方法,
// 如果有线程池,他会将具体操作交给SocketProcessor和线程池完成,
// 如果没有线程池,他会将具体操作交给SocketProcessor和当前线程完成,
if (!processSocket(attachment, SocketEvent.OPEN_WRITE, true)) {
// 处理失败,需要关闭参数SelectionKey对应的套接字通道
closeSocket = true;
}
}
if (closeSocket) {
// 处理失败,需要关闭参数SelectionKey对应的套接字通道,
// 现在对其执行取消操作
cancelledKey(sk);
}
}
}
} else {
//invalid key,对于无效的SelectionKey,做取消操作
cancelledKey(sk);
}
} catch ( CancelledKeyException ckx ) {
// 出现异常,作取消操作
cancelledKey(sk);
} catch (Throwable t) {
ExceptionUtils.handleThrowable(t);
log.error("",t);
}
}
public SendfileState processSendfile(SelectionKey sk, NioSocketWrapper socketWrapper,
boolean calledByProcessor) {
NioChannel sc = null;
try {
unreg(sk, socketWrapper, sk.readyOps());
SendfileData sd = socketWrapper.getSendfileData();
if (log.isTraceEnabled()) {
log.trace("Processing send file for: " + sd.fileName);
}
if (sd.fchannel == null) {
// Setup the file channel
File f = new File(sd.fileName);
@SuppressWarnings("resource") // Closed when channel is closed
FileInputStream fis = new FileInputStream(f);
sd.fchannel = fis.getChannel();
}
// Configure output channel
sc = socketWrapper.getSocket();
// TLS/SSL channel is slightly different
WritableByteChannel wc = ((sc instanceof SecureNioChannel)?sc:sc.getIOChannel());
// We still have data in the buffer
if (sc.getOutboundRemaining()>0) {
if (sc.flushOutbound()) {
socketWrapper.updateLastWrite();
}
} else {
long written = sd.fchannel.transferTo(sd.pos,sd.length,wc);
if (written > 0) {
sd.pos += written;
sd.length -= written;
socketWrapper.updateLastWrite();
} else {
// Unusual not to be able to transfer any bytes
// Check the length was set correctly
if (sd.fchannel.size() <= sd.pos) {
throw new IOException("Sendfile configured to " +
"send more data than was available");
}
}
}
if (sd.length <= 0 && sc.getOutboundRemaining()<=0) {
if (log.isDebugEnabled()) {
log.debug("Send file complete for: "+sd.fileName);
}
socketWrapper.setSendfileData(null);
try {
sd.fchannel.close();
} catch (Exception ignore) {
}
// For calls from outside the Poller, the caller is
// responsible for registering the socket for the
// appropriate event(s) if sendfile completes.
if (!calledByProcessor) {
switch (sd.keepAliveState) {
case NONE: {
if (log.isDebugEnabled()) {
log.debug("Send file connection is being closed");
}
close(sc, sk);
break;
}
case PIPELINED: {
if (log.isDebugEnabled()) {
log.debug("Connection is keep alive, processing pipe-lined data");
}
if (!processSocket(socketWrapper, SocketEvent.OPEN_READ, true)) {
close(sc, sk);
}
break;
}
case OPEN: {
if (log.isDebugEnabled()) {
log.debug("Connection is keep alive, registering back for OP_READ");
}
reg(sk,socketWrapper,SelectionKey.OP_READ);
break;
}
}
}
return SendfileState.DONE;
} else {
if (log.isDebugEnabled()) {
log.debug("OP_WRITE for sendfile: " + sd.fileName);
}
if (calledByProcessor) {
add(socketWrapper.getSocket(),SelectionKey.OP_WRITE);
} else {
reg(sk,socketWrapper,SelectionKey.OP_WRITE);
}
return SendfileState.PENDING;
}
} catch (IOException x) {
if (log.isDebugEnabled()) log.debug("Unable to complete sendfile request:", x);
if (!calledByProcessor && sc != null) {
close(sc, sk);
}
return SendfileState.ERROR;
} catch (Throwable t) {
log.error("", t);
if (!calledByProcessor && sc != null) {
close(sc, sk);
}
return SendfileState.ERROR;
}
}
protected void unreg(SelectionKey sk, NioSocketWrapper attachment, int readyOps) {
//this is a must, so that we don't have multiple threads messing with the socket
reg(sk,attachment,sk.interestOps()& (~readyOps));
}
protected void reg(SelectionKey sk, NioSocketWrapper attachment, int intops) {
sk.interestOps(intops);
attachment.interestOps(intops);
}
// 超时处理
protected void timeout(int keyCount, boolean hasEvents) {
long now = System.currentTimeMillis();
// This method is called on every loop of the Poller. Don't process
// timeouts on every loop of the Poller since that would create too
// much load and timeouts can afford to wait a few seconds.
// Poller线程的每个运行循环loop中都会调用该方法,但是不要每个循环loop中都要真正
// 处理超时,因为这会增加很多工作量,而且已经发生了的超时timeout稍微多等个几秒钟
// 也能承受。
// 但是,在以下几种情况下必须要处理超时 :
// However, do process timeouts if any of the following are true:
// - the selector simply timed out (suggests there isn't much load)
// - the nextExpiration time has passed
// - the server socket is being closed
if (nextExpiration > 0 && (keyCount > 0 || hasEvents) && (now < nextExpiration) && !close) {
// 判断是否不需要处理超时,不需要处理的话直接返回
return;
}
//timeout 现在要处理超时了
int keycount = 0;
try {
for (SelectionKey key : selector.keys()) {
keycount++;
try {
NioSocketWrapper ka = (NioSocketWrapper) key.attachment();
if ( ka == null ) {
// 取消SelectionKey:没有附件的key不支持,关闭其对应的socket
cancelledKey(key); //we don't support any keys without attachments
} else if (close) {
// 要关闭服务了
key.interestOps(0);
ka.interestOps(0); //avoid duplicate stop calls
// 处理SelectionKey
processKey(key,ka);
} else if ((ka.interestOps()&SelectionKey.OP_READ) == SelectionKey.OP_READ ||
(ka.interestOps()&SelectionKey.OP_WRITE) == SelectionKey.OP_WRITE) {
// 正常服务状态下,感兴趣的操作有读写操作,现在检查是否有超时发生
boolean isTimedOut = false;
// Check for read timeout
if ((ka.interestOps() & SelectionKey.OP_READ) == SelectionKey.OP_READ) {
// 是否发生了读超时
long delta = now - ka.getLastRead();
long timeout = ka.getReadTimeout();
isTimedOut = timeout > 0 && delta > timeout;
}
// Check for write timeout
if (!isTimedOut && (ka.interestOps() & SelectionKey.OP_WRITE) == SelectionKey.OP_WRITE) {
// 是否发生了写超时
long delta = now - ka.getLastWrite();
long timeout = ka.getWriteTimeout();
isTimedOut = timeout > 0 && delta > timeout;
}
if (isTimedOut) {
// 如果发生了读超时或者写超时,调用所属NioEndpoint的processSocket()
// 处理 SocketTimeoutException异常
key.interestOps(0);
ka.interestOps(0); //avoid duplicate timeout calls
ka.setError(new SocketTimeoutException());
if (!processSocket(ka, SocketEvent.ERROR, true)) {
// 处理失败,处理取消的SelectionKey,管壁其连接
cancelledKey(key);
}
}
}
}catch ( CancelledKeyException ckx ) {
// 处理异常,处理取消的SelectionKey,管壁其连接
cancelledKey(key);
}
}//for
} catch (ConcurrentModificationException cme) {
// See https://bz.apache.org/bugzilla/show_bug.cgi?id=57943
log.warn(sm.getString("endpoint.nio.timeoutCme"), cme);
}
long prevExp = nextExpiration; //for logging purposes only
nextExpiration = System.currentTimeMillis() +
socketProperties.getTimeoutInterval();
if (log.isTraceEnabled()) {
log.trace("timeout completed: keys processed=" + keycount +
"; now=" + now + "; nextExpiration=" + prevExp +
"; keyCount=" + keyCount + "; hasEvents=" + hasEvents +
"; eval=" + ((now < prevExp) && (keyCount>0 || hasEvents) && (!close) ));
}
}
}
具体实现如下 :
/**
* 往Poller对象的事件队列插入的待处理的事件的抽象,可以被Poller缓存循环回收利用以避免GC成本
* PollerEvent, cacheable object for poller events to avoid GC
*/
public static class PollerEvent implements Runnable {
// 待操作的 NioChannel
private NioChannel socket;
// 在待操作的 NioChannel上所关注的操作
private int interestOps;
private NioSocketWrapper socketWrapper;
public PollerEvent(NioChannel ch, NioSocketWrapper w, int intOps) {
reset(ch, w, intOps);
}
public void reset(NioChannel ch, NioSocketWrapper w, int intOps) {
socket = ch;
interestOps = intOps;
socketWrapper = w;
}
public void reset() {
reset(null, null, 0);
}
// PollerEvent 事件的执行
@Override
public void run() {
if (interestOps == OP_REGISTER) {
// 如果在待操作的socket上所关注的操作是OP_REGISTER,则将其注册到
// 待操作的socket的Poller的Java NIO selector上关注其NIO事件OP_READ读数据
try {
socket.getIOChannel().register(
socket.getPoller().getSelector(), SelectionKey.OP_READ, socketWrapper);
} catch (Exception x) {
log.error(sm.getString("endpoint.nio.registerFail"), x);
}
} else {
final SelectionKey key = socket.getIOChannel().keyFor(socket.getPoller().getSelector());
try {
if (key == null) {
// The key was cancelled (e.g. due to socket closure)
// and removed from the selector while it was being
// processed. Count down the connections at this point
// since it won't have been counted down when the socket
// closed.
socket.socketWrapper.getEndpoint().countDownConnection();
} else {
final NioSocketWrapper socketWrapper = (NioSocketWrapper) key.attachment();
if (socketWrapper != null) {
//we are registering the key to start with, reset the fairness counter.
int ops = key.interestOps() | interestOps;
socketWrapper.interestOps(ops);
key.interestOps(ops);
} else {
socket.getPoller().cancelledKey(key);
}
}
} catch (CancelledKeyException ckx) {
try {
socket.getPoller().cancelledKey(key);
} catch (Exception ignore) {}
}
}
}
@Override
public String toString() {
return "Poller event: socket [" + socket + "], socketWrapper [" + socketWrapper +
"], interestOps [" + interestOps + "]";
}
}
Tomcat NioEndpoint内部类Acceptor
Tomcat NIO 基本架构