// io.netty.channel.nio.NioEventLoop
// eventLoop 扫描事件 @Override
protectedvoid run() {
for (;;) {
try {
switch (selectStrategy.calculateStrategy(selectNowSupplier, hasTasks())) {
case SelectStrategy.CONTINUE:
continue;
case SelectStrategy.SELECT:
select(wakenUp.getAndSet(false));
// 'wakenUp.compareAndSet(false, true)' is always evaluated
// before calling 'selector.wakeup()' to reduce the wake-up
// overhead. (Selector.wakeup() is an expensive operation.)
//// However, there is a race condition in this approach.
// The race condition is triggered when 'wakenUp' is set to
// true too early.
//// 'wakenUp' is set to true too early if:
// 1) Selector is waken up between 'wakenUp.set(false)' and
// 'selector.select(...)'. (BAD)
// 2) Selector is waken up between 'selector.select(...)' and
// 'if (wakenUp.get()) { ... }'. (OK)
//// In the first case, 'wakenUp' is set to true and the
// following 'selector.select(...)' will wake up immediately.
// Until 'wakenUp' is set to false again in the next round,
// 'wakenUp.compareAndSet(false, true)' will fail, and therefore
// any attempt to wake up the Selector will fail, too, causing
// the following 'selector.select(...)' call to block
// unnecessarily.
//// To fix this problem, we wake up the selector again if wakenUp
// is true immediately after selector.select(...).
// It is inefficient in that it wakes up the selector for both
// the first case (BAD - wake-up required) and the second case
// (OK - no wake-up required).if (wakenUp.get()) {
selector.wakeup();
}
default:
// fallthrough }
cancelledKeys = 0;
needsToSelectAgain = false;
finalint ioRatio = this.ioRatio;
if (ioRatio == 100) {
try {
processSelectedKeys();
} finally {
// Ensure we always run tasks. runAllTasks();
}
} else {
finallong ioStartTime = System.nanoTime();
try {
// 处理事件 processSelectedKeys();
} finally {
// Ensure we always run tasks.finallong ioTime = System.nanoTime() - ioStartTime;
runAllTasks(ioTime * (100 - ioRatio) / ioRatio);
}
}
} catch (Throwable t) {
handleLoopException(t);
}
// Always handle shutdown even if the loop processing threw an exception.try {
if (isShuttingDown()) {
closeAll();
if (confirmShutdown()) {
return;
}
}
} catch (Throwable t) {
handleLoopException(t);
}
}
}
// 处理事件privatevoid processSelectedKeys() {
if (selectedKeys != null) {
// 使用selectKeys进行处理 processSelectedKeysOptimized();
} else {
processSelectedKeysPlain(selector.selectedKeys());
}
}
privatevoid processSelectedKeysOptimized() {
for (int i = 0; i < selectedKeys.size; ++i) {
final SelectionKey k = selectedKeys.keys[i];
// null out entry in the array to allow to have it GC'ed once the Channel close
// See https://github.com.netty.netty.issues/2363
selectedKeys.keys[i] = null;
final Object a = k.attachment();
if (a instanceof AbstractNioChannel) {
// ... processSelectedKey(k, (AbstractNioChannel) a);
} else {
@SuppressWarnings("unchecked")
NioTask task = (NioTask) a;
processSelectedKey(k, task);
}
if (needsToSelectAgain) {
// null out entries in the array to allow to have it GC'ed once the Channel close
// See https://github.com.netty.netty.issues/2363
selectedKeys.reset(i + 1);
selectAgain();
i = -1;
}
}
}
privatevoid processSelectedKey(SelectionKey k, AbstractNioChannel ch) {
final AbstractNioChannel.NioUnsafe unsafe = ch.unsafe();
if (!k.isValid()) {
final EventLoop eventLoop;
try {
eventLoop = ch.eventLoop();
} catch (Throwable ignored) {
// If the channel implementation throws an exception because there is no event loop, we ignore this
// because we are only trying to determine if ch is registered to this event loop and thus has authority
// to close ch.return;
}
// Only close ch if ch is still registered to this EventLoop. ch could have deregistered from the event loop
// and thus the SelectionKey could be cancelled as part of the deregistration process, but the channel is
// still healthy and should not be closed.
// See https://github.com.netty.netty.issues/5125if (eventLoop != this || eventLoop == null) {
return;
}
// close the channel if the key is not valid anymore unsafe.close(unsafe.voidPromise());
return;
}
try {
int readyOps = k.readyOps();
// We first need to call finishConnect() before try to trigger a read(...) or write(...) as otherwise
// the NIO JDK channel implementation may throw a NotYetConnectedException.if ((readyOps & SelectionKey.OP_CONNECT) != 0) {
// remove OP_CONNECT as otherwise Selector.select(..) will always return without blocking
// See https://github.com.netty.netty.issues/924int ops = k.interestOps();
ops &= ~SelectionKey.OP_CONNECT;
k.interestOps(ops);
unsafe.finishConnect();
}
// Process OP_WRITE first as we may be able to write some queued buffers and so free memory.if ((readyOps & SelectionKey.OP_WRITE) != 0) {
// Call forceFlush which will also take care of clear the OP_WRITE once there is nothing left to write ch.unsafe().forceFlush();
}
// Also check for readOps of 0 to workaround possible JDK bug which may otherwise lead
// to a spin loop
// 读取数据,由 unsafe 类进行循环数据读取if ((readyOps & (SelectionKey.OP_READ | SelectionKey.OP_ACCEPT)) != 0 || readyOps == 0) {
unsafe.read();
}
} catch (CancelledKeyException ignored) {
unsafe.close(unsafe.voidPromise());
}
}
// io.netty.channel.nio.AbstractNioMessageChannel
// 处理真正的读数据过程privatefinalclass NioMessageUnsafe extends AbstractNioUnsafe {
privatefinal List
开启新的线程处理逻辑
// 开启新的线程处理逻辑
// 把事件放入到另一个线程池处理privatevoid doStartThread() {
assert thread == null;
executor.execute(new Runnable() {
@Override
publicvoid run() {
thread = Thread.currentThread();
if (interrupted) {
thread.interrupt();
}
boolean success = false;
updateLastExecutionTime();
try {
SingleThreadEventExecutor.this.run();
success = true;
} catch (Throwable t) {
logger.warn("Unexpected exception from an event executor: ", t);
} finally {
for (;;) {
int oldState = STATE_UPDATER.get(SingleThreadEventExecutor.this);
if (oldState >= ST_SHUTTING_DOWN || STATE_UPDATER.compareAndSet(
SingleThreadEventExecutor.this, oldState, ST_SHUTTING_DOWN)) {
break;
}
}
// Check if confirmShutdown() was called at the end of the loop.if (success && gracefulShutdownStartTime == 0) {
logger.error("Buggy " + EventExecutor.class.getSimpleName() + " implementation; " +
SingleThreadEventExecutor.class.getSimpleName() + ".confirmShutdown() must be called " +
"before run() implementation terminates.");
}
try {
// Run all remaining tasks and shutdown hooks.for (;;) {
if (confirmShutdown()) {
break;
}
}
} finally {
try {
cleanup();
} finally {
STATE_UPDATER.set(SingleThreadEventExecutor.this, ST_TERMINATED);
threadLock.release();
if (!taskQueue.isEmpty()) {
logger.warn(
"An event executor terminated with " +
"non-empty task queue (" + taskQueue.size() + ')');
}
terminationFuture.setSuccess(null);
}
}
}
}
});
}
publicfinalclass ThreadPerTaskExecutor implements Executor {
privatefinal ThreadFactory threadFactory;
public ThreadPerTaskExecutor(ThreadFactory threadFactory) {
if (threadFactory == null) {
thrownew NullPointerException("threadFactory");
}
this.threadFactory = threadFactory;
}
@Override
publicvoid execute(Runnable command) {
threadFactory.newThread(command).start();
}
}
@Override
protectedvoid wakeup(boolean inEventLoop) {
if (!inEventLoop && wakenUp.compareAndSet(false, true)) {
selector.wakeup();
}
}
实际解析数据信息是在 fireChannelRead 时触发的。
@Override
publicfinal ChannelPipeline fireChannelRead(Object msg) {
AbstractChannelHandlerContext.invokeChannelRead(head, msg);
returnthis;
}
// 从 inBound 入站链中依次调用 channelRead() 方法staticvoid invokeChannelRead(final AbstractChannelHandlerContext next, Object msg) {
final Object m = next.pipeline.touch(ObjectUtil.checkNotNull(msg, "msg"), next);
EventExecutor executor = next.executor();
if (executor.inEventLoop()) {
next.invokeChannelRead(m);
} else {
executor.execute(new Runnable() {
@Override
publicvoid run() {
next.invokeChannelRead(m);
}
});
}
}
privatevoid invokeChannelRead(Object msg) {
if (invokeHandler()) {
try {
((ChannelInboundHandler) handler()).channelRead(this, msg);
} catch (Throwable t) {
notifyHandlerException(t);
}
} else {
fireChannelRead(msg);
}
}
// HeadContext @Override
publicvoid channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
ctx.fireChannelRead(msg);
}
// AbstractChannelHandlerContext @Override
public ChannelHandlerContext fireChannelRead(final Object msg) {
invokeChannelRead(findContextInbound(), msg);
returnthis;
}
// io.netty.handler.codec.ByteToMessageDecoder
// 我们对数据的解析由这个类进行处理 @Override
publicvoid channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
if (msg instanceof ByteBuf) {
CodecOutputList out = CodecOutputList.newInstance();
try {
ByteBuf data = (ByteBuf) msg;
first = cumulation == null;
// 针对多次到来的包,进行重新计算if (first) {
cumulation = data;
} else {
cumulation = cumulator.cumulate(ctx.alloc(), cumulation, data);
}
// 调用解码方法 callDecode(ctx, cumulation, out);
} catch (DecoderException e) {
throw e;
} catch (Throwable t) {
thrownew DecoderException(t);
} finally {
if (cumulation != null && !cumulation.isReadable()) {
numReads = 0;
cumulation.release();
cumulation = null;
} elseif (++ numReads >= discardAfterReads) {
// We did enough reads already try to discard some bytes so we not risk to see a OOME.
// See https://github.com.netty.netty.issues/4275
numReads = 0;
discardSomeReadBytes();
}
int size = out.size();
decodeWasNull = !out.insertSinceRecycled();
// 如果解析到数据,就会往下一个 InBound 节点传 fireChannelRead(ctx, out, size);
out.recycle();
}
} else {
ctx.fireChannelRead(msg);
}
}
/**
* Called once data should be decoded from the given {@link ByteBuf}. This method will call
* {@link #decode(ChannelHandlerContext, ByteBuf, List)} as long as decoding should take place.
*
* @param ctx the {@link ChannelHandlerContext} which this {@link ByteToMessageDecoder} belongs to
* @param in the {@link ByteBuf} from which to read data
* @param out the {@link List} to which decoded messages should be added
*/protectedvoid callDecode(ChannelHandlerContext ctx, ByteBuf in, List out) {
try {
// 只要有可用的数据,会一直循环调用 decode 方法while (in.isReadable()) {
int outSize = out.size();
if (outSize > 0) {
fireChannelRead(ctx, out, outSize);
out.clear();
// Check if this handler was removed before continuing with decoding.
// If it was removed, it is not safe to continue to operate on the buffer.
//// See:
// - https://github.com.netty.netty.issues/4635if (ctx.isRemoved()) {
break;
}
outSize = 0;
}
int oldInputLength = in.readableBytes();
// 调用自行实现的 decode 方法,实现数据的组装
// 通过添加多个 pipeline 来实现业务的处理 decode(ctx, in, out);
// Check if this handler was removed before continuing the loop.
// If it was removed, it is not safe to continue to operate on the buffer.
//// See https://github.com.netty.netty.issues/1664if (ctx.isRemoved()) {
break;
}
if (outSize == out.size()) {
if (oldInputLength == in.readableBytes()) {
break;
} else {
continue;
}
}
if (oldInputLength == in.readableBytes()) {
thrownew DecoderException(
StringUtil.simpleClassName(getClass()) +
".decode() did not read anything but decoded a message.");
}
if (isSingleDecode()) {
break;
}
}
} catch (DecoderException e) {
throw e;
} catch (Throwable cause) {
thrownew DecoderException(cause);
}
}
/**
* Get {@code numElements} out of the {@link CodecOutputList} and forward these through the pipeline.
*/staticvoid fireChannelRead(ChannelHandlerContext ctx, CodecOutputList msgs, int numElements) {
// 每个解析到的元素都会调用一次 fireChannelReadfor (int i = 0; i < numElements; i ++) {
ctx.fireChannelRead(msgs.getUnsafe(i));
}
}
如果自己来写这个组装包的逻辑,可能会是这样的:(仅仅是等到所有数据都到后,再传入下一个处理器即可)
@Override
protectedvoid decode(ChannelHandlerContext ctx, ByteBuf in, List out) throws Exception {
if (in.readableBytes() < 4) {
return;
}
in.markReaderIndex();
int dataLength = in.readInt();
// 如果整个包还没完整,则等待下次调用if (in.readableBytes() < dataLength) {
in.resetReaderIndex();
return;
}
byte[] data = newbyte[dataLength];
in.readBytes(data);
Object obj = JSON.parseObject(data, target);
out.add(obj);
}
针对外部多次调入站程序的方法,通过 cumulate 方法组装数据
// 针对外部多次调入站程序的方法,通过 cumulate 方法组装数据
@Override
publicvoid channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
if (msg instanceof ByteBuf) {
CodecOutputList out = CodecOutputList.newInstance();
try {
ByteBuf data = (ByteBuf) msg;
first = cumulation == null;
if (first) {
cumulation = data;
} else {
// 合并数据
cumulation = cumulator.cumulate(ctx.alloc(), cumulation, data);
}
callDecode(ctx, cumulation, out);
} catch (DecoderException e) {
throw e;
} catch (Throwable t) {
thrownew DecoderException(t);
} finally {
if (cumulation != null && !cumulation.isReadable()) {
numReads = 0;
cumulation.release();
cumulation = null;
} elseif (++ numReads >= discardAfterReads) {
// We did enough reads already try to discard some bytes so we not risk to see a OOME.
// See https://github.com.netty.netty.issues/4275
numReads = 0;
discardSomeReadBytes();
}
int size = out.size();
decodeWasNull = !out.insertSinceRecycled();
fireChannelRead(ctx, out, size);
out.recycle();
}
} else {
ctx.fireChannelRead(msg);
}
}
/**
* Cumulate {@link ByteBuf}s by merge them into one {@link ByteBuf}'s, using memory copies.
*/publicstaticfinal Cumulator MERGE_CUMULATOR = new Cumulator() {
@Override
public ByteBuf cumulate(ByteBufAllocator alloc, ByteBuf cumulation, ByteBuf in) {
final ByteBuf buffer;
if (cumulation.writerIndex() > cumulation.maxCapacity() - in.readableBytes()
|| cumulation.refCnt() > 1 || cumulation.isReadOnly()) {
// Expand cumulation (by replace it) when either there is not more room in the buffer
// or if the refCnt is greater then 1 which may happen when the user use slice().retain() or
// duplicate().retain() or if its read-only.
//// See:
// - https://github.com.netty.netty.issues/2327// - https://github.com.netty.netty.issues/1764
buffer = expandCumulation(alloc, cumulation, in.readableBytes());
} else {
buffer = cumulation;
}
buffer.writeBytes(in);
in.release();
return buffer;
}
};
下面我们来看下 dubbo 是如何进行数据包的组装的呢?(NEED_MORE_INPUT 的应用)
// Decoder 处理逻辑
// org.apache.dubbo.remoting.transport.netty4.NettyCodecAdapterprivateclass InternalDecoder extends ByteToMessageDecoder {
@Override
protectedvoid decode(ChannelHandlerContext ctx, ByteBuf input, List out) throws Exception {
ChannelBuffer message = new NettyBackedChannelBuffer(input);
NettyChannel channel = NettyChannel.getOrAddChannel(ctx.channel(), url, handler);
try {
// decode object.do {
int saveReaderIndex = message.readerIndex();
Object msg = codec.decode(channel, message);
// 只要遇到 NEED_MORE_INPUT 标识,则不会算本次接收完成,等待下一次回调
// 此处会先交给一连串的 codec 处理if (msg == Codec2.DecodeResult.NEED_MORE_INPUT) {
message.readerIndex(saveReaderIndex);
break;
} else {
//is it possible to go here ?if (saveReaderIndex == message.readerIndex()) {
thrownew IOException("Decode without read data.");
}
if (msg != null) {
out.add(msg);
}
}
} while (message.readable());
} finally {
NettyChannel.removeChannelIfDisconnected(ctx.channel());
}
}
}
// org.apache.dubbo.rpc.protocol.dubbo.DubboCountCodec @Override
public Object decode(Channel channel, ChannelBuffer buffer) throws IOException {
int save = buffer.readerIndex();
MultiMessage result = MultiMessage.create();
do {
Object obj = codec.decode(channel, buffer);
if (Codec2.DecodeResult.NEED_MORE_INPUT == obj) {
buffer.readerIndex(save);
break;
} else {
result.addMessage(obj);
logMessageLength(obj, buffer.readerIndex() - save);
save = buffer.readerIndex();
}
} while (true);
if (result.isEmpty()) {
return Codec2.DecodeResult.NEED_MORE_INPUT;
}
if (result.size() == 1) {
return result.get(0);
}
return result;
}
// org.apache.dubbo.remoting.exchange.codec.ExchangeCodec @Override
public Object decode(Channel channel, ChannelBuffer buffer) throws IOException {
int readable = buffer.readableBytes();
// 可以看到,每个包都会有一个包头,只要解析出来,就可以知道它的类型,长度了byte[] header = newbyte[Math.min(readable, HEADER_LENGTH)];
buffer.readBytes(header);
return decode(channel, buffer, readable, header);
}
@Override
protected Object decode(Channel channel, ChannelBuffer buffer, int readable, byte[] header) throws IOException {
// check magic number.if (readable > 0 && header[0] != MAGIC_HIGH
|| readable > 1 && header[1] != MAGIC_LOW) {
int length = header.length;
if (header.length < readable) {
header = Bytes.copyOf(header, readable);
buffer.readBytes(header, length, readable - length);
}
for (int i = 1; i < header.length - 1; i++) {
if (header[i] == MAGIC_HIGH && header[i + 1] == MAGIC_LOW) {
buffer.readerIndex(buffer.readerIndex() - header.length + i);
header = Bytes.copyOf(header, i);
break;
}
}
returnsuper.decode(channel, buffer, readable, header);
}
// check length.if (readable < HEADER_LENGTH) {
return DecodeResult.NEED_MORE_INPUT;
}
// get data length.int len = Bytes.bytes2int(header, 12);
checkPayload(channel, len);
// 只要数据未达到要求的长度,则返回 NEED_MORE_INPUTint tt = len + HEADER_LENGTH;
if (readable < tt) {
return DecodeResult.NEED_MORE_INPUT;
}
// limit input stream.
ChannelBufferInputStream is = new ChannelBufferInputStream(buffer, len);
try {
return decodeBody(channel, is, header);
} finally {
if (is.available() > 0) {
try {
if (logger.isWarnEnabled()) {
logger.warn("Skip input stream " + is.available());
}
StreamUtils.skipUnusedStream(is);
} catch (IOException e) {
logger.warn(e.getMessage(), e);
}
}
}
}
// org.apache.dubbo.remoting.telnet.codec.TelnetCodec
@SuppressWarnings("unchecked")
protected Object decode(Channel channel, ChannelBuffer buffer, int readable, byte[] message) throws IOException {
if (isClientSide(channel)) {
return toString(message, getCharset(channel));
}
checkPayload(channel, readable);
if (message == null || message.length == 0) {
return DecodeResult.NEED_MORE_INPUT;
}
if (message[message.length - 1] == '\b') { // Windows backspace echotry {
boolean doublechar = message.length >= 3 && message[message.length - 3] < 0; // double byte char
channel.send(new String(doublechar ? newbyte[]{32, 32, 8, 8} : newbyte[]{32, 8}, getCharset(channel).name()));
} catch (RemotingException e) {
thrownew IOException(StringUtils.toString(e));
}
return DecodeResult.NEED_MORE_INPUT;
}
for (Object command : EXIT) {
if (isEquals(message, (byte[]) command)) {
if (logger.isInfoEnabled()) {
logger.info(new Exception("Close channel " + channel + " on exit command: " + Arrays.toString((byte[]) command)));
}
channel.close();
returnnull;
}
}
boolean up = endsWith(message, UP);
boolean down = endsWith(message, DOWN);
if (up || down) {
LinkedList history = (LinkedList) channel.getAttribute(HISTORY_LIST_KEY);
if (CollectionUtils.isEmpty(history)) {
return DecodeResult.NEED_MORE_INPUT;
}
Integer index = (Integer) channel.getAttribute(HISTORY_INDEX_KEY);
Integer old = index;
if (index == null) {
index = history.size() - 1;
} else {
if (up) {
index = index - 1;
if (index < 0) {
index = history.size() - 1;
}
} else {
index = index + 1;
if (index > history.size() - 1) {
index = 0;
}
}
}
if (old == null || !old.equals(index)) {
channel.setAttribute(HISTORY_INDEX_KEY, index);
String value = history.get(index);
if (old != null && old >= 0 && old < history.size()) {
String ov = history.get(old);
StringBuilder buf = new StringBuilder();
for (int i = 0; i < ov.length(); i++) {
buf.append("\b");
}
for (int i = 0; i < ov.length(); i++) {
buf.append(" ");
}
for (int i = 0; i < ov.length(); i++) {
buf.append("\b");
}
value = buf.toString() + value;
}
try {
channel.send(value);
} catch (RemotingException e) {
thrownew IOException(StringUtils.toString(e));
}
}
return DecodeResult.NEED_MORE_INPUT;
}
for (Object command : EXIT) {
if (isEquals(message, (byte[]) command)) {
if (logger.isInfoEnabled()) {
logger.info(new Exception("Close channel " + channel + " on exit command " + command));
}
channel.close();
returnnull;
}
}
byte[] enter = null;
for (Object command : ENTER) {
if (endsWith(message, (byte[]) command)) {
enter = (byte[]) command;
break;
}
}
if (enter == null) {
return DecodeResult.NEED_MORE_INPUT;
}
LinkedList history = (LinkedList) channel.getAttribute(HISTORY_LIST_KEY);
Integer index = (Integer) channel.getAttribute(HISTORY_INDEX_KEY);
channel.removeAttribute(HISTORY_INDEX_KEY);
if (CollectionUtils.isNotEmpty(history) && index != null && index >= 0 && index < history.size()) {
String value = history.get(index);
if (value != null) {
byte[] b1 = value.getBytes();
byte[] b2 = newbyte[b1.length + message.length];
System.arraycopy(b1, 0, b2, 0, b1.length);
System.arraycopy(message, 0, b2, b1.length, message.length);
message = b2;
}
}
String result = toString(message, getCharset(channel));
if (result.trim().length() > 0) {
if (history == null) {
history = new LinkedList();
channel.setAttribute(HISTORY_LIST_KEY, history);
}
if (history.isEmpty()) {
history.addLast(result);
} elseif (!result.equals(history.getLast())) {
history.remove(result);
history.addLast(result);
if (history.size() > 10) {
history.removeFirst();
}
}
}
return result;
}
Abstract Factory:提供一个创建一系列相关或相互依赖对象的接口,而无需指定它们具体的类。 Adapter:将一个类的接口转换成客户希望的另外一个接口。A d a p t e r模式使得原本由于接口不兼容而不能一起工作的那些类可以一起工作。 Bridge:将抽象部分与它的实现部分分离,使它们都可以独立地变化。 Builder:将一个复杂对象的构建与它的表示分离,使得同
import java.util.LinkedList;
public class CaseInsensitiveTrie {
/**
字典树的Java实现。实现了插入、查询以及深度优先遍历。
Trie tree's java implementation.(Insert,Search,DFS)
Problem Description
Igna
/*
2013年3月11日20:37:32
地点:北京潘家园
功能:完成用户格式化输入多个值
目的:学习scanf函数的使用
*/
# include <stdio.h>
int main(void)
{
int i, j, k;
printf("please input three number:\n"); //提示用
数据表中有记录的time字段(属性为timestamp)其值为:“0000-00-00 00:00:00”
程序使用select 语句从中取数据时出现以下异常:
java.sql.SQLException:Value '0000-00-00' can not be represented as java.sql.Date
java.sql.SQLException: Valu