《Linux下使用libevent库实现服务器端编程》讲述了如何通过evbuffer来实现服务端的编程;
bufferevent是libevent网络库的一套高级接口,内部通过一对evbuffer来实现了对socket的封装,对外提供数据读写接口,支持并发编程的场景;
所以,外部使用接口时仅需考虑业务逻辑的处理,无需在意socket的处理细节;
以下内容均通过libevent手册进行整理
每个bufferevent都有一个输入缓冲区input和一个输出缓冲区output,它们的类型都是“struct evbuffer”;
有数据要写入到bufferevent时,添加数据到输出缓冲区;
bufferevent中有数据供读取的时候,从输入缓冲区抽取(drain)数据;
每个bufferevent有两个数据相关的回调:一个读取回调和一个写入回调;
/**
A read or write callback for a bufferevent.
The read callback is triggered when new data arrives in the input
buffer and the amount of readable data exceed the low watermark
which is 0 by default.
The write callback is triggered if the write buffer has been
exhausted or fell below its low watermark.
@param bev the bufferevent that triggered the callback
@param ctx the user-specified context for this bufferevent
*/
typedef void (*bufferevent_data_cb)(struct bufferevent *bev, void *ctx);
默认情况下,从底层传输端口读取了任意量的数据之后会调用读取回调;输出缓冲区中足够量的数据被清空到底层传输端口后写入回调会被调用。
然而在某些业务场景下,譬如我希望收到足够多的数据后才进行处理,这个时候就可以通过调整bufferevent的读写水位(watermarks)来控制这些函数的调用动作。
bufferevent的4种水位:
/**
An event/error callback for a bufferevent.
The event callback is triggered if either an EOF condition or another
unrecoverable error was encountered.
@param bev the bufferevent for which the error condition was reached
@param what a conjunction of flags: BEV_EVENT_READING or BEV_EVENT_WRITING
to indicate if the error was encountered on the read or write path,
and one of the following flags: BEV_EVENT_EOF, BEV_EVENT_ERROR,
BEV_EVENT_TIMEOUT, BEV_EVENT_CONNECTED.
@param ctx the user-specified context for this bufferevent
*/
typedef void (*bufferevent_event_cb)(struct bufferevent *bev, short what, void *ctx);
#define BEV_EVENT_READING 0x01 /**< error encountered while reading 读操作过程中发生的事件*/
#define BEV_EVENT_WRITING 0x02 /**< error encountered while writing 写操作过程中发生的事件*/
#define BEV_EVENT_EOF 0x10 /**< eof file reached 遇到文件结束符(文件相关的操作) */
#define BEV_EVENT_ERROR 0x20 /**< unrecoverable error encountered 出现了错误 */
#define BEV_EVENT_TIMEOUT 0x40 /**< user-specified timeout reached 操作超时(前提是设置了超时时间) */
#define BEV_EVENT_CONNECTED 0x80 /**< connect operation finished. 连接完成(bufferevent 的connect方法) */
使用 bufferevent_socket_new 函数创建bev的时候可以指定选项:
/** Options that can be specified when creating a bufferevent */
enum bufferevent_options {
/** If set, we close the underlying file
* descriptor/bufferevent/whatever when this bufferevent is freed. */
BEV_OPT_CLOSE_ON_FREE = (1<<0), //当bufferevent被释放时,同时关闭相关的套接字
/** If set, and threading is enabled, operations on this bufferevent
* are protected by a lock */
BEV_OPT_THREADSAFE = (1<<1), //多线程中使用,bev相关接口将创建锁来保证多线程安全
/** If set, callbacks are run deferred in the event loop. */
BEV_OPT_DEFER_CALLBACKS = (1<<2), //设置这个标志时,bufferevent延迟所有回调
/** If set, callbacks are executed without locks being held on the
* bufferevent. This option currently requires that
* BEV_OPT_DEFER_CALLBACKS also be set; a future version of Libevent
* might remove the requirement.*/
BEV_OPT_UNLOCK_CALLBACKS = (1<<3) //默认情况下,如果设置bufferevent为线程安全的,则bufferevent会在调用用户提供的回调时进行锁定。设置这个选项会让libevent在执行回调的时候不进行锁定。
};
这块我的理解是防止调用层数过多导致栈溢出。
默认情况下,bufferevent的回调在相应的条件发生时立即被执行。(evbuffer的回调也是这样的,随后会介绍)在依赖关系复杂的情况下,这种立即调用会制造麻烦。比如说,假如某个回调在evbuffer A空的时候向其中移入数据,而另一个回调在evbuffer A满的时候从中取出数据。这些调用都是在栈上发生的,在依赖关系足够复杂的时候,有栈溢出的风险。
要解决此问题,可以请求bufferevent(或者evbuffer)延迟其回调。条件满足时,延迟回调不会立即调用,而是在event_loop()调用中被排队,然后在通常的事件回调之后执行。
(延迟回调由libevent 2.0.1-alpha版引入)
有了大概的概念后,来通过一个例子实践一下:通过bufferevent实现了一个简单的tcp代理程序(例子来源sample/le-proxy.c)
主要流程是:创建本地监听、接受客户端的新连接、发起服务端的新连接、接收客户端数据转发到服务器、接收服务器数据转发到客户端;
首先先整体过一下,main函数解析输入的地址信息,通过 evutil_parse_sockaddr_port 函数可以解析出如 1.2.3.4:5678 这样的地址,接着就是通过evlistener获取监听信息;
#include
#include
#include
#include
#include
#ifdef WIN32
#include
#include
#else
#include
#include
#endif
#include
#include
#include
#include
#include
static struct event_base *base;
static struct sockaddr_storage lsn_on_addr = {0};
static struct sockaddr_storage con_to_addr = {0};
#define MAX_OUTPUT (512*1024)
static void __on_drained(struct bufferevent *bev, void *ctx);
static void __on_recv(struct bufferevent *bev, void *ctx);
static void __on_close(struct bufferevent *bev, void *ctx);
static void __on_error(struct bufferevent *bev, short what, void *ctx);
static void syntax()
{
fputs("Syntax:\n", stderr);
fputs(" ./tcp_proxy [-s] [-W] \n", stderr);
fputs("Example:\n", stderr);
fputs(" ./tcp-proxy 127.0.0.1:15001 127.0.0.1:5001\n", stderr);
exit(1);
}
int main(int argc, char *argv[])
{
int ret = -1;
int con_len = sizeof(con_to_addr);
int lsn_len = sizeof(lsn_on_addr);
struct evconnlistener *listener = NULL;
if ( argc < 3 ) {
syntax();
}
ret = evutil_parse_sockaddr_port(argv[2], (struct sockaddr*)&con_to_addr, &con_len);
assert(0 == ret);
ret = evutil_parse_sockaddr_port(argv[1], (struct sockaddr*)&lsn_on_addr, &lsn_len);
assert(0 == ret);
base = event_base_new();
assert(base);
listener = evconnlistener_new_bind(base, __on_accept, NULL,
LEV_OPT_CLOSE_ON_FREE | LEV_OPT_CLOSE_ON_EXEC | LEV_OPT_REUSEABLE,
-1, (struct sockaddr*)&lsn_on_addr, lsn_len);
assert(listener);
event_base_dispatch(base);
evconnlistener_free(listener);
event_base_free(base);
exit(EXIT_SUCCESS);
}
static void __on_accept(struct evconnlistener *listener, evutil_socket_t fd,
struct sockaddr *a, int slen, void *p)
{
char ip[128] = {0};
struct bufferevent *b_srv, *b_clt;
/* Create two linked bufferevent objects: one to connect, one for the
* new connection */
b_clt = bufferevent_socket_new(base, fd,
BEV_OPT_CLOSE_ON_FREE|BEV_OPT_DEFER_CALLBACKS);
b_srv = bufferevent_socket_new(base, -1,
BEV_OPT_CLOSE_ON_FREE|BEV_OPT_DEFER_CALLBACKS);
assert(b_clt && b_srv);
if (bufferevent_socket_connect(b_srv,
(struct sockaddr*)&con_to_addr, sizeof(con_to_addr))<0) {
perror("bufferevent_socket_connect");
bufferevent_free(b_srv);
bufferevent_free(b_clt);
return;
}
printf("Accept from: %s, connect to: %s\n",
evutil_inet_ntop(AF_INET, &((struct sockaddr_in *)a)->sin_addr, ip, sizeof(ip)),
evutil_inet_ntop(AF_INET, &((struct sockaddr_in *)&con_to_addr)->sin_addr, ip, sizeof(ip)));
bufferevent_setcb(b_clt, __on_recv, NULL, __on_error, b_srv);
bufferevent_setcb(b_srv, __on_recv, NULL, __on_error, b_clt);
bufferevent_enable(b_clt, EV_READ|EV_WRITE);
bufferevent_enable(b_srv, EV_READ|EV_WRITE);
}
这两个过程是一样的,都通过__on_recv 进行统一处理;
static void __on_recv(struct bufferevent *bev, void *ctx)
{
struct bufferevent *partner = ctx;
struct evbuffer *src, *dst;
size_t len;
src = bufferevent_get_input(bev);
len = evbuffer_get_length(src);
if (!partner) {
evbuffer_drain(src, len);
return;
}
dst = bufferevent_get_output(partner);
evbuffer_add_buffer(dst, src);
if (evbuffer_get_length(dst) >= MAX_OUTPUT) {
/* We're giving the other side data faster than it can
* pass it on. Stop reading here until we have drained the
* other side to MAX_OUTPUT/2 bytes. */
printf("%d is full\n", bufferevent_getfd(bev));
bufferevent_setcb(partner, __on_recv, __on_drained, __on_error, bev);
bufferevent_setwatermark(partner, EV_WRITE, MAX_OUTPUT/2, MAX_OUTPUT);
bufferevent_disable(bev, EV_READ);
}
}
这里面有个设置水位的地方需要理解一下,就是每个连接都设定了一个512KB的缓冲区限制;
这样是考虑了在收包快、发包慢的情况,防止内存使用过多的情况(bufferevent内部的evbuffer开辟的是堆上的空间);
上述代码的意思是:当收包达到了512KB限制之后,停止继续收包,并增加了一个回调函数__on_drained处理,仅当发送水位下降到 MAX/2的时候才调用;
static void __on_drained(struct bufferevent *bev, void *ctx)
{
struct bufferevent *partner = ctx;
printf("%d no full\n", bufferevent_getfd(bev));
/* We were choking the other side until we drained our outbuf a bit.
* Now it seems drained. */
bufferevent_setcb(bev, __on_recv, NULL, __on_error, partner);
bufferevent_setwatermark(bev, EV_WRITE, 0, 0);
if (partner)
bufferevent_enable(partner, EV_READ);
}
即表示当数据包摆渡完成一半后,才触发进入__on_drained,然后清除刚才设置的水位,恢复收包情况;
余下的就是socket上的断开的回收处理,当有一端异常的时候,需要关闭,同时需要关闭另一端;
但是这里有个细节得注意下:避免数据丢失,必须完成数据转发后才销毁这对连接;
所以代码里的处理就是:停止收包,设置__on_close进行检测,当ouput长度为0后,关闭连接;
static void __on_close(struct bufferevent *bev, void *ctx)
{
struct evbuffer *b = bufferevent_get_output(bev);
if (evbuffer_get_length(b) == 0) {
printf("Close %2d done\n", bufferevent_getfd(bev));
bufferevent_free(bev);
}
}
static void __on_error(struct bufferevent *bev, short what, void *ctx)
{
struct bufferevent *partner = ctx;
if (what & (BEV_EVENT_EOF|BEV_EVENT_ERROR)) {
if (what & BEV_EVENT_ERROR) {
if (errno)
perror("connection error");
}
if (partner) {
/* Flush all pending data */
__on_recv(bev, ctx);
if (evbuffer_get_length(bufferevent_get_output(partner))) {
/* We still have to flush data from the other
* side, but when that's done, close the other
* side. */
bufferevent_setcb(partner, NULL, __on_close, __on_error, NULL);
bufferevent_disable(partner, EV_READ);
} else {
/* We have nothing left to say to the other
* side; close it. */
printf("Close %2d & %2d\n", bufferevent_getfd(partner), bufferevent_getfd(bev));
bufferevent_free(partner);
}
}
else {
printf("Close %2d\n", bufferevent_getfd(bev));
}
bufferevent_free(bev);
}
}
运行效果如下,使用iperf 创建10条连接发送,可见正常低压力的情况下还是不会走__on_close的
上述仅是列举了一个简单的例子,相比自己从零开始写tcp代理代码量、逻辑要容易理解的多,还需要考虑的是:
1、考虑自己内部上下文维护一对一socket的相关信息,避免全局变量;
2、例子中并没有体现connect成功后的动作,实际上是bufferevent成功后才能知道socket值,才能真正开启包转发的流程,
如果connect失败了则需要反过来关闭新建上来的连接;
3、现在这个场景我测试过的并发是20000是完全没问题的,再往上则需要对内核参数进行一些调优了;
参考文章:
[1] http://www.cppblog.com/mysileng/archive/2013/02/05/197746.html
[2] http://www.cnblogs.com/csdreamer/articles/3068136.html