tomcat的底层网络NIO通信基于主从Reactor多线程模型。
它有三大线程组分别用于处理不同的逻辑:
备注:Acceptor线程和poller线程之间有一个SocketChannel队列,Acceptor线程负责将SocketChannel推送到队列,poller线程负责从队列取出SocketChannel。poller线程从队列取出SocketChannel后,紧接着会把它放到selector上注册读事件。
主从Reactor线程模型的特点是:服务端用于接收客户端连接的不再是个1个单独的NIO线程,而是一个独立的NIO线程池。Acceptor接收到客户端TCP连接请求处理完成后(可能包含接入认证等),将新创建的SocketChannel注册到IO线程池(sub reactor线程池)的某个IO线程上,由它负责SocketChannel的读写和编解码工作。Acceptor线程池仅仅只用于客户端的登陆、握手和安全认证,一旦链路建立成功,就将链路注册到后端subReactor线程池的IO线程上,由IO线程负责后续的IO操作。
它的线程模型如下图所示:
它的工作流程总结如下:
Acceptor用于接收连接,并将其传递给工作线程
/**
* Threads used to accept new connections and pass them to worker threads.
*/
protected List> acceptors;
开始AcceptorThread
启动AcceptorTheadCount个线程监听连接
protected void startAcceptorThreads() {
int count = getAcceptorThreadCount();
acceptors = new ArrayList<>(count);
for (int i = 0; i < count; i++) {
Acceptor acceptor = new Acceptor<>(this);
String threadName = getName() + "-Acceptor-" + i;
acceptor.setThreadName(threadName);
acceptors.add(acceptor);
Thread t = new Thread(acceptor, threadName);
t.setPriority(getAcceptorThreadPriority());
t.setDaemon(getDaemon());
t.start();
}
}
acceptorThread监听连接
acceptorThread启动后调用Acceptor#run()方法,该run方法有2个核心逻辑:
AcceptorThread —— Acceptor类
private final AbstractEndpoint,U> endpoint;
@Override
public void run() {
int errorDelay = 0;
// Loop until we receive a shutdown command
while (endpoint.isRunning()) {
// Loop if endpoint is paused
while (endpoint.isPaused() && endpoint.isRunning()) {
state = AcceptorState.PAUSED;
try {
Thread.sleep(50);
} catch (InterruptedException e) {
// Ignore
}
}
if (!endpoint.isRunning()) {
break;
}
state = AcceptorState.RUNNING;
try {
//if we have reached max connections, wait
endpoint.countUpOrAwaitConnection();
// Endpoint might have been paused while waiting for latch
// If that is the case, don't accept new connections
if (endpoint.isPaused()) {
continue;
}
U socket = null;
try {
// Accept the next incoming connection from the server
// socket
//监听连接,处理连接请求
socket = endpoint.serverSocketAccept();
} catch (Exception ioe) {
// We didn't get a socket
endpoint.countDownConnection();
if (endpoint.isRunning()) {
// Introduce delay if necessary
errorDelay = handleExceptionWithDelay(errorDelay);
// re-throw
throw ioe;
} else {
break;
}
}
// Successful accept, reset the error delay
errorDelay = 0;
// Configure the socket
if (endpoint.isRunning() && !endpoint.isPaused()) {
// setSocketOptions() will hand the socket off to
// an appropriate processor if successful
//将socket委托给poller线程
if (!endpoint.setSocketOptions(socket)) {
endpoint.closeSocket(socket);
}
} else {
endpoint.destroySocket(socket);
}
} catch (Throwable t) {
..........................
}
}
state = AcceptorState.ENDED;
}
/**
* External Executor based thread pool.
*/
private Executor executor = null;
public void setExecutor(Executor executor) {
this.executor = executor;
this.internalExecutor = (executor == null);
}
public Executor getExecutor() { return executor; }
public void createExecutor() {
internalExecutor = true;
TaskQueue taskqueue = new TaskQueue();
TaskThreadFactory tf = new TaskThreadFactory(getName() + "-exec-", daemon, getThreadPriority());
executor = new ThreadPoolExecutor(getMinSpareThreads(), getMaxThreads(), 60, TimeUnit.SECONDS,taskqueue, tf);
taskqueue.setParent( (ThreadPoolExecutor) executor);
}
/**
* Maximum amount of worker threads.
*/
private int maxThreads = 200;
public void setMaxThreads(int maxThreads) {
this.maxThreads = maxThreads;
Executor executor = this.executor;
if (internalExecutor && executor instanceof java.util.concurrent.ThreadPoolExecutor) {
// The internal executor should always be an instance of
// j.u.c.ThreadPoolExecutor but it may be null if the endpoint is
// not running.
// This check also avoids various threading issues.
((java.util.concurrent.ThreadPoolExecutor) executor).setMaximumPoolSize(maxThreads);
}
}
public final void start() throws Exception {
if (bindState == BindState.UNBOUND) {
bindWithCleanup();
bindState = BindState.BOUND_ON_START;
}
//初始化ServerSocket,启动workThread、pollerThread、acceptorThread
startInternal();
}
private void bindWithCleanup() throws Exception {
try {
bind();
} catch (Throwable t) {
// Ensure open sockets etc. are cleaned up if something goes
// wrong during bind
ExceptionUtils.handleThrowable(t);
unbind();
throw t;
}
}
/**
* Start the NIO endpoint, creating acceptor, poller threads.
*/
@Override
public void startInternal() throws Exception {
if (!running) {
running = true;
paused = false;
processorCache = new SynchronizedStack<>(SynchronizedStack.DEFAULT_SIZE,
socketProperties.getProcessorCache());
eventCache = new SynchronizedStack<>(SynchronizedStack.DEFAULT_SIZE,
socketProperties.getEventCache());
nioChannels = new SynchronizedStack<>(SynchronizedStack.DEFAULT_SIZE,
socketProperties.getBufferPool());
// Create worker collection
//启动工作线程
if ( getExecutor() == null ) {
createExecutor();
}
initializeConnectionLatch();
// Start poller threads
//启动pollerThread
pollers = new Poller[getPollerThreadCount()];
for (int i=0; i
初始化ServerSocketChannel,绑定服务地址。
NioEndpoint初始化ServerSocket时,设置configureBolcking为true,所以在调用accept()方法是会阻塞的。
private volatile ServerSocketChannel serverSock = null;
// Separated out to make it easier for folks that extend NioEndpoint to
// implement custom [server]sockets
protected void initServerSocket() throws Exception {
if (!getUseInheritedChannel()) {
serverSock = ServerSocketChannel.open();
socketProperties.setProperties(serverSock.socket());
InetSocketAddress addr = new InetSocketAddress(getAddress(), getPortWithOffset());
serverSock.socket().bind(addr,getAcceptCount());
} else {
// Retrieve the channel provided by the OS
Channel ic = System.inheritedChannel();
if (ic instanceof ServerSocketChannel) {
serverSock = (ServerSocketChannel) ic;
}
if (serverSock == null) {
throw new IllegalArgumentException(sm.getString("endpoint.init.bind.inherited"));
}
}
serverSock.configureBlocking(true); //mimic APR behavior
}
@Override
public void bind() throws Exception {
initServerSocket();
// Initialize thread count defaults for acceptor, poller
if (acceptorThreadCount == 0) {
// FIXME: Doesn't seem to work that well with multiple accept threads
//值得一提的是,设置多个Acceptor thread的效果并不好
acceptorThreadCount = 1;
}
if (pollerThreadCount <= 0) {
//minimum one poller thread
pollerThreadCount = 1;
}
setStopLatch(new CountDownLatch(pollerThreadCount));
// Initialize SSL if needed
initialiseSsl();
selectorPool.open();
}
1、Acceptor线程阻塞等待客户端连接
如前面所述,Acceptor的run()方法内部会调用serverSocketAccept()方法阻塞等待客户端连接。
@Override
protected SocketChannel serverSocketAccept() throws Exception {
return serverSock.accept();
}
2、Acceptor线程将socket委托给poller线程
如前面所述,Acceptor的run()方法内部会调用NioEndPoint#setSocketOptions()方法将socket委托给poller线程。
/**
* Process the specified connection.
* @param socket The socket channel
* @return true
if the socket was correctly configured
* and processing may continue, false
if the socket needs to be
* close immediately
*/
@Override
protected boolean setSocketOptions(SocketChannel socket) {
// Process the connection
try {
// Disable blocking, polling will be used
socket.configureBlocking(false);
Socket sock = socket.socket();
socketProperties.setProperties(sock);
//复用NioChannel,只用在从nioChannels队列弹出的NioChannel为null时才新建NioChannel
NioChannel channel = nioChannels.pop();
if (channel == null) {
SocketBufferHandler bufhandler = new SocketBufferHandler(
socketProperties.getAppReadBufSize(),
socketProperties.getAppWriteBufSize(),
socketProperties.getDirectBuffer());
if (isSSLEnabled()) {
channel = new SecureNioChannel(socket, bufhandler, selectorPool, this);
} else {
channel = new NioChannel(socket, bufhandler);
}
} else {
channel.setIOChannel(socket);
channel.reset();
}
//将socket委托给poller线程
getPoller0().register(channel);
} catch (Throwable t) {
ExceptionUtils.handleThrowable(t);
try {
log.error(sm.getString("endpoint.socketOptionsError"), t);
} catch (Throwable tt) {
ExceptionUtils.handleThrowable(tt);
}
// Tell to close the socket
return false;
}
return true;
}
NioEndPoint#getPoller0()方法
从poller线程组中获取一个可用的poller线程
/**
* Return an available poller in true round robin fashion.
*
* @return The next poller in sequence
*/
public Poller getPoller0() {
int idx = Math.abs(pollerRotater.incrementAndGet()) % pollers.length;
return pollers[idx];
}
poller封装了selector。在初始化poller时会初始化Selector。
public class Poller implements Runnable {
//Selector
private Selector selector;
//PollerEvent队列
private final SynchronizedQueue events =
new SynchronizedQueue<>();
//初始化Selector
public Poller() throws IOException {
this.selector = Selector.open();
}
.........
}
1、poller线程接受来自acceptor线程的socket委托
将socket封装成 PollerEvent,加入到PollerEvent队列中。
注意,这里的register是假性注册,只是把socket加入到队列中缓存,并不是真正把socket注册到selector上。
/**
* Registers a newly created socket with the poller.
*
* @param socket The newly created socket
*/
public void register(final NioChannel socket) {
socket.setPoller(this);
//将NioChannel封装成NioSocketWrapper
NioSocketWrapper socketWrapper = new NioSocketWrapper(socket, NioEndpoint.this);
socket.setSocketWrapper(socketWrapper);
socketWrapper.setPoller(this);
socketWrapper.setReadTimeout(getConnectionTimeout());
socketWrapper.setWriteTimeout(getConnectionTimeout());
socketWrapper.setKeepAliveLeft(NioEndpoint.this.getMaxKeepAliveRequests());
socketWrapper.setSecure(isSSLEnabled());
//复用PollerEvent,只有在从eventCache队列中弹出的PollerEvent为null时才新建PollerEvent
PollerEvent r = eventCache.pop();
socketWrapper.interestOps(SelectionKey.OP_READ);//this is what OP_REGISTER turns into.
if (r == null) {
//将NioChannel和NioSocketWrapper封装成PollerEvent,事件类型为op_register
r = new PollerEvent(socket, socketWrapper, OP_REGISTER);
} else {
r.reset(socket, socketWrapper, OP_REGISTER);
}
//加入到PollerEvent队列
addEvent(r);
}
2、run方法
tomcat的后台poller线程的主逻辑 , 循环处理以下几件事情 :
1. 每次循环处理PollerEvent队列中所有的PollerEvent,处理逻辑是将PollerEvent中的SocketChannel放到Selector上注册读事件;
2. 每次循环处理NIO selector中就绪的SelectionKey,处理逻辑是将就绪的SelectionKey都委托给了worker线程;
3. 超时处理:每次循环中特定条件满足时执行一次超时处理;
4. 结束检测:如果被通知结束,执行结束逻辑,也就是该run()方法内的while-loop的结束;
/**
* The background thread that adds sockets to the Poller, checks the
* poller for triggered events and hands the associated socket off to an
* appropriate processor as events occur.
*/
@Override
public void run() {
// Loop until destroy() is called
while (true) {
boolean hasEvents = false;
try {
if (!close) {
hasEvents = events();
if (wakeupCounter.getAndSet(-1) > 0) {
// If we are here, means we have other stuff to do
// Do a non blocking select
keyCount = selector.selectNow();
} else {
//设置在超时时间内Selector阻塞等待就绪的SelectionKey
keyCount = selector.select(selectorTimeout);
}
wakeupCounter.set(0);
}
if (close) {
events();
timeout(0, false);
try {
selector.close();
} catch (IOException ioe) {
log.error(sm.getString("endpoint.nio.selectorCloseFail"), ioe);
}
break;
}
} catch (Throwable x) {
ExceptionUtils.handleThrowable(x);
log.error(sm.getString("endpoint.nio.selectorLoopError"), x);
continue;
}
// Either we timed out or we woke up, process events first
//如果Selector wake up之后没有就绪的SelectionKey,则先处理PollerEvent队列中的Event
if ( keyCount == 0 ) hasEvents = (hasEvents | events());
//从Selector中获取就绪的SelectionKey集合
Iterator iterator =
keyCount > 0 ? selector.selectedKeys().iterator() : null;
// Walk through the collection of ready keys and dispatch
// any active event.
while (iterator != null && iterator.hasNext()) {
SelectionKey sk = iterator.next();
NioSocketWrapper attachment = (NioSocketWrapper)sk.attachment();
// Attachment may be null if another thread has called
// cancelledKey()
if (attachment == null) {
iterator.remove();
} else {
iterator.remove();
//处理就绪的SelectionKey
processKey(sk, attachment);
}
}
// Process timeouts
timeout(keyCount,hasEvents);
}
getStopLatch().countDown();
}
3、Poller处理PollerEvent队列中的事件
events()方法
遍历eventQueue中的所有PollerEvent,调用他们的run()方法。
private final SynchronizedQueue events =
new SynchronizedQueue<>();
/**
* Processes events in the event queue of the Poller.
*
* @return true
if some events were processed,
* false
if queue was empty
*/
public boolean events() {
boolean result = false;
PollerEvent pe = null;
for (int i = 0, size = events.size(); i < size && (pe = events.poll()) != null; i++ ) {
result = true;
try {
pe.run();
pe.reset();
if (running && !paused) {
eventCache.push(pe);
}
} catch ( Throwable x ) {
log.error(sm.getString("endpoint.nio.pollerEventError"), x);
}
}
return result;
}
PollerEvent#run()方法处理event
1、如果interestOps为OP_REGISTER,channel注册读事件。
2、如果interestOps为其它,调用Channel#keyFor()方法,获取注册在selctor的SelectionKey。
public static class PollerEvent implements Runnable {
//NioChannel#getIOChannel()获取channel
//NioChannel#getPoller().getSelector()获取selector
private NioChannel socket;
//channel要注册的事件类型,PollerEvent在加入到队列中时都初始化为OP_REGISTER
private int interestOps;
//SelectionKey的attachment
private NioSocketWrapper socketWrapper;
.................
@Override
public void run() {
if (interestOps == OP_REGISTER) {
try {
//如果interestOps为OP_REGISTER,channel注册读事件
socket.getIOChannel().register(
socket.getPoller().getSelector(), SelectionKey.OP_READ, socketWrapper);
} catch (Exception x) {
log.error(sm.getString("endpoint.nio.registerFail"), x);
}
} else {
final SelectionKey key = socket.getIOChannel().keyFor(socket.getPoller().getSelector());
try {
if (key == null) {
// The key was cancelled (e.g. due to socket closure)
// and removed from the selector while it was being
// processed. Count down the connections at this point
// since it won't have been counted down when the socket
// closed.
socket.socketWrapper.getEndpoint().countDownConnection();
((NioSocketWrapper) socket.socketWrapper).closed = true;
} else {
final NioSocketWrapper socketWrapper = (NioSocketWrapper) key.attachment();
if (socketWrapper != null) {
// We are registering the key to start with, reset the fairness counter.
int ops = key.interestOps() | interestOps;
socketWrapper.interestOps(ops);
//设置SelectionKey感兴趣的事件类型
key.interestOps(ops);
} else {
socket.getPoller().cancelledKey(key);
}
}
} catch (CancelledKeyException ckx) {
try {
socket.getPoller().cancelledKey(key);
} catch (Exception ignore) {}
}
}
}
}
processKey()方法
1、先处理SelectionKey的读就绪事件
processSocket()是所属NioEndpoint实例的方法,方法实现位于类 AbstractEndpoint。如果有线程池,他会将具体操作交给SocketProcessor和线程池完成;如果没有线程池,他会将具体操作交给SocketProcessor和当前线程完成。
2、再处理SelectionKey的写就绪事件
processSocket()是所属NioEndpoint实例的方法,方法实现位于类 AbstractEndpoint。如果有线程池,他会将具体操作交给SocketProcessor和线程池完成;如果没有线程池,他会将具体操作交给SocketProcessor和当前线程完成。
protected void processKey(SelectionKey sk, NioSocketWrapper socketWrapper) {
try {
if (close) {
//被通知关闭了,对参数SelectionKey执行取消处理
cancelledKey(sk);
} else if (sk.isValid() && socketWrapper != null) {
if (sk.isReadable() || sk.isWritable()) {
if ( socketWrapper.getSendfileData() != null ) {
processSendfile(sk, socketWrapper, false);
} else {
unreg(sk, socketWrapper, sk.readyOps());
boolean closeSocket = false;
// Read goes before write
//先处理读就绪事件
if (sk.isReadable()) {
if (socketWrapper.readOperation != null) {
getExecutor().execute(socketWrapper.readOperation);
} else if (!processSocket(socketWrapper, SocketEvent.OPEN_READ, true)) {
closeSocket = true;
}
}
//处理写就绪事件
if (!closeSocket && sk.isWritable()) {
if (socketWrapper.writeOperation != null) {
getExecutor().execute(socketWrapper.writeOperation);
} else if (!processSocket(socketWrapper, SocketEvent.OPEN_WRITE, true)) {
closeSocket = true;
}
}
if (closeSocket) {
cancelledKey(sk);
}
}
}
} else {
// Invalid key
cancelledKey(sk);
}
} catch (CancelledKeyException ckx) {
cancelledKey(sk);
} catch (Throwable t) {
ExceptionUtils.handleThrowable(t);
log.error(sm.getString("endpoint.nio.keyProcessingError"), t);
}
}
1、AbstractEndpoint#processSocket()方法
新建SocketProcessor,提交到workThread线程池执行
/**
* Process the given SocketWrapper with the given status. Used to trigger
* processing as if the Poller (for those endpoints that have one)
* selected the socket.
*
* @param socketWrapper The socket wrapper to process
* @param event The socket event to be processed
* @param dispatch Should the processing be performed on a new
* container thread
*
* @return if processing was triggered successfully
*/
public boolean processSocket(SocketWrapperBase socketWrapper,
SocketEvent event, boolean dispatch) {
try {
if (socketWrapper == null) {
return false;
}
//复用SocketProcessor,只有在从processorCache队列中弹出的SocketProcessor为null时才会新建SocketProcessor
SocketProcessorBase sc = processorCache.pop();
if (sc == null) {
sc = createSocketProcessor(socketWrapper, event);
} else {
sc.reset(socketWrapper, event);
}
//获取workThread线程池
Executor executor = getExecutor();
if (dispatch && executor != null) {
//将SocketProcessor提交到workThread线程池执行
executor.execute(sc);
} else {
sc.run();
}
} catch (RejectedExecutionException ree) {
getLog().warn(sm.getString("endpoint.executor.fail", socketWrapper) , ree);
return false;
} catch (Throwable t) {
ExceptionUtils.handleThrowable(t);
// This means we got an OOM or similar creating a thread, or that
// the pool and its queue are full
getLog().error(sm.getString("endpoint.process.fail"), t);
return false;
}
return true;
}
2、SocketProcessor的run()方法逻辑
将SocketChannel注入到SocketProcessor,由工作线程执行SocketProcessor的run()方法逻辑。
@Override
public final void run() {
synchronized (socketWrapper) {
// It is possible that processing may be triggered for read and
// write at the same time. The sync above makes sure that processing
// does not occur in parallel. The test below ensures that if the
// first event to be processed results in the socket being closed,
// the subsequent events are not processed.
if (socketWrapper.isClosed()) {
return;
}
doRun();
}
}
@Override
protected void doRun() {
NioChannel socket = socketWrapper.getSocket();
SelectionKey key = socket.getIOChannel().keyFor(socket.getPoller().getSelector());
try {
int handshake = -1;
try {
if (key != null) {
if (socket.isHandshakeComplete()) {
// No TLS handshaking required. Let the handler
// process this socket / event combination.
handshake = 0;
} else if (event == SocketEvent.STOP || event == SocketEvent.DISCONNECT ||
event == SocketEvent.ERROR) {
// Unable to complete the TLS handshake. Treat it as
// if the handshake failed.
handshake = -1;
} else {
handshake = socket.handshake(key.isReadable(), key.isWritable());
// The handshake process reads/writes from/to the
// socket. status may therefore be OPEN_WRITE once
// the handshake completes. However, the handshake
// happens when the socket is opened so the status
// must always be OPEN_READ after it completes. It
// is OK to always set this as it is only used if
// the handshake completes.
event = SocketEvent.OPEN_READ;
}
}
} catch (IOException x) {
handshake = -1;
if (log.isDebugEnabled()) log.debug("Error during SSL handshake",x);
} catch (CancelledKeyException ckx) {
handshake = -1;
}
if (handshake == 0) {
SocketState state = SocketState.OPEN;
// Process the request from this socket
if (event == null) {
//AbstractProtocol.ConnectionHandler#.process()处理NioSocketWrapper
state = getHandler().process(socketWrapper, SocketEvent.OPEN_READ);
} else {
state = getHandler().process(socketWrapper, event);
}
if (state == SocketState.CLOSED) {
close(socket, key);
}
} else if (handshake == -1 ) {
close(socket, key);
} else if (handshake == SelectionKey.OP_READ){
socketWrapper.registerReadInterest();
} else if (handshake == SelectionKey.OP_WRITE){
socketWrapper.registerWriteInterest();
}
} catch (CancelledKeyException cx) {
socket.getPoller().cancelledKey(key);
} catch (VirtualMachineError vme) {
ExceptionUtils.handleThrowable(vme);
} catch (Throwable t) {
log.error(sm.getString("endpoint.processing.fail"), t);
socket.getPoller().cancelledKey(key);
} finally {
socketWrapper = null;
event = null;
//return to cache
if (running && !paused) {
processorCache.push(this);
}
}
}