案例重现
首先我们通过具体的case重现一下TCP粘包的过程
我们模拟下故障场景,客户端循环一百次调用服务端传输报文,服务端接收报文并打印接收报文和计数,同时根据报文回应客户端
服务端代码
public class TimeServerHandler extends ChannelHandlerAdapter {
private int count;
@Override
public void channelRead(ChannelHandlerContext ctx,Object msg) throws Exception{
ByteBuf byteBuf = (ByteBuf)msg;
byte[] req = new byte[byteBuf.readableBytes()];
byteBuf.readBytes(req);
System.out.println("received msg length:"+req.length);
String body = new String(req,"UTF-8").substring(0,req.length-System.getProperty("line.separator").length());
System.out.println("the time server receive order:"+body + ";the counter is:" + ++count);
String currentTIme = "QUERY TIME ORDER".equalsIgnoreCase(body) ? new java.util.Date(
System.currentTimeMillis()
).toString():"BAD ORDER";
currentTIme = currentTIme + System.getProperty("line.separator");
ByteBuf resp = Unpooled.copiedBuffer(currentTIme.getBytes());
ctx.writeAndFlush(resp);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
ctx.close();
}
}
public class TimeServer {
public void bind(int port) throws Exception {
//创建两个线程组 一个用于服务端接收客户端的连接
EventLoopGroup bossGroup = new NioEventLoopGroup();
//一个用于网络读写
EventLoopGroup workerGroup = new NioEventLoopGroup();
try {
ServerBootstrap b = new ServerBootstrap();
b.group(bossGroup,workerGroup).channel(NioServerSocketChannel.class)
.option(ChannelOption.SO_BACKLOG,1024)
.childHandler(new ChildChannelHander());
ChannelFuture future = b.bind(port).sync();
future.channel().closeFuture().sync();
}finally {
bossGroup.shutdownGracefully();
workerGroup.shutdownGracefully();
}
}
private class ChildChannelHander extends ChannelInitializer {
@Override
protected void initChannel(SocketChannel ch) throws Exception {
// ch.pipeline().addLast(new LineBasedFrameDecoder(1024));
// ch.pipeline().addLast(new StringDecoder());
ch.pipeline().addLast(new TimeServerHandler());
}
}
public static void main(String[] args) {
int port = 8080;
if (args != null && args.length >0) {
try {
port = Integer.valueOf(args[0]);
}catch (NumberFormatException e) {
}
}
try {
new TimeServer().bind(port);
} catch (Exception e) {
e.printStackTrace();
}
}
}
客户端代码
public class TimeClientHandler extends ChannelHandlerAdapter {
private int count;
private byte[] req;
public TimeClientHandler() {
req = ("QUERY TIME ORDER" + System.getProperty("line.separator")).getBytes();
}
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
ByteBuf message = null;
for (int i = 0; i< 100; i++) {
message = Unpooled.buffer(req.length);
message.writeBytes(req);
ctx.writeAndFlush(message);
}
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
ByteBuf byteBuf = (ByteBuf)msg;
byte[] req = new byte[byteBuf.readableBytes()];
byteBuf.readBytes(req);
String body = new String(req,"UTF-8");
System.out.println("Now is : "+body + "the counter is :"+ ++count);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
System.out.println("Unexpected exception from downstream :"+cause.getMessage());
ctx.close();
}
}
public class TimeClient {
public void connect(int port, String host) throws Exception {
//创建读写io线程组
EventLoopGroup group = new NioEventLoopGroup();
try {
Bootstrap b = new Bootstrap();
b.group(group).channel(NioSocketChannel.class)
.option(ChannelOption.TCP_NODELAY, true)
.handler(new ChannelInitializer() {
@Override
protected void initChannel(SocketChannel socketChannel) throws Exception {
//socketChannel.pipeline().addLast(new LineBasedFrameDecoder(1024));
//socketChannel.pipeline().addLast(new StringDecoder());
socketChannel.pipeline().addLast(new TimeClientHandler());
}
});
ChannelFuture f = b.connect(host, port).sync();
f.channel().closeFuture().sync();
} finally {
group.shutdownGracefully();
}
}
public static void main(String[] args) {
int port = 8080;
if (args != null && args.length > 0) {
try {
port = Integer.valueOf(args[0]);
} catch (NumberFormatException e) {
}
}
try {
new TimeClient().connect(port, "127.0.0.1");
} catch (Exception e) {
e.printStackTrace();
}
}
}
程序运行结果
服务端
客户端
按照设计初衷,客户端应该发送了100次数据给服务端,服务端每接收一次数据就回应一次客户端,那么客户端应该收到一百次消息,但是实际上客户端就收到2次消息,服务端也只收到两次消息。说明服务端和客户端都发生了粘包现象。
产生粘包拆包的原因
TCP协议是基于流的协议,是没有边界的一串数据,它会根据TCP缓冲区的实际情况进行包的拆分,上述例子中默认TCP缓存区的大小是1024个字节,服务端第一次收到的数据大小正好是1024个字节,也就是说多个小的报文可能封装出一个大的数据进行传送,而一个大的报文可能会被拆分成多个小包进行传送。
解决办法
由于TCP是底层通讯协议,它不关心上层业务,无法保证数据包不会拆包或者粘包,那么这个问题只能通过上层协议来解决,通常的解决办法有以下几点:
1、消息定长,例如每个报文都是500个字节,如果报文不够500个字节,那么就填充
2、报文尾部增加特殊分隔符
3、消息分为消息头和消息体,消息头定义消息的长度,消息体还是真实传送的报文(类型UDP协议)
Netty解决粘包拆包问题
Netty提供了多种编码解码器处理上述问题,其中可以使用LineBasedFrameDecoder解决粘包问题
服务端代码只要上述TimeServer加一个LineBasedFrameDecoder的ChannelHandler
private class ChildChannelHander extends ChannelInitializer {
@Override
protected void initChannel(SocketChannel ch) throws Exception {
ch.pipeline().addLast(new LineBasedFrameDecoder(1024));
ch.pipeline().addLast(new TimeServerHandler());
}
}
客户端代码只要上述TimeClient中加一个LineBasedFrameDecoder的ChannelHandler
public void connect(int port, String host) throws Exception {
//创建读写io线程组
EventLoopGroup group = new NioEventLoopGroup();
try {
Bootstrap b = new Bootstrap();
b.group(group).channel(NioSocketChannel.class)
.option(ChannelOption.TCP_NODELAY, true)
.handler(new ChannelInitializer() {
@Override
protected void initChannel(SocketChannel socketChannel) throws Exception {
socketChannel.pipeline().addLast(new LineBasedFrameDecoder(1024));
//socketChannel.pipeline().addLast(new StringDecoder());
socketChannel.pipeline().addLast(new TimeClientHandler());
}
});
ChannelFuture f = b.connect(host, port).sync();
f.channel().closeFuture().sync();
} finally {
group.shutdownGracefully();
}
}
代码运行结果:
[图片上传中...(image.png-bdb177-1553093851212-0)]
客户端运行结果
由此可见,增加LineBasedFrameDecoder之后解决了粘包问题
LineBasedFrameDecoder的工作原理是遍历缓冲区的可读字节,判断是否是“\n”或者"\r\n",如果有,那么就以该位置作为结束位置,从缓冲区可读区域到结束位置作为一个完整报文,他是以标识符作为解码器。
LineBasedFrameDecoder 源码分析:
我们先看下LineBasedFrameDecoder的类继承图,继承了ByteToMessageDecoder方法,ByteToMessageDecoder是将ByteBuf字节解码成其他消息类型的抽象类,它有一个关键的方法:
callDecode(ChannelHandlerContext ctx, ByteBuf in, List