记录一下lighttpd的事件处理机制,这个基本上就是和libevent差不多
lighttpd事件处理的全局数据结构主要有
typedef struct fdevents {
fdevent_handler_t type; //事件处理类型,lighttpd通过配置文件获取
fdnode **fdarray; //回调事件,参数等保存在这个结构中
size_t maxfds;
#ifdef USE_LINUX_EPOLL
int epoll_fd;
struct epoll_event *epoll_events;
#endif
#ifdef USE_SELECT
fd_set select_read;
fd_set select_write;
fd_set select_error;
fd_set select_set_read;
fd_set select_set_write;
fd_set select_set_error;
int select_max_fd;
#endif
int (*reset)(struct fdevents *ev);
void (*free)(struct fdevents *ev);
int (*event_add)(struct fdevents *ev, int fde_ndx, int fd, int events);
int (*event_del)(struct fdevents *ev, int fde_ndx, int fd);
int (*event_get_revent)(struct fdevents *ev, size_t ndx);
int (*event_get_fd)(struct fdevents *ev, size_t ndx);
int (*event_next_fdndx)(struct fdevents *ev, int ndx);
int (*poll)(struct fdevents *ev, int timeout_ms);
int (*fcntl_set)(struct fdevents *ev, int fd);
} fdevents;
我只熟悉epoll和select,所以把其他平台的都去掉了,函数指针实际是虚函数,这种方式已经很熟悉了
typedef struct _fdnode {
fdevent_handler handler;
void *ctx;
int fd;
struct _fdnode *prev, *next;
} fdnode;
fdnode的结构就是保存回调事件及参数的,这个实际是以fd为下标的,保证以O(1)性能查找指定fd对应的回调函数
使用这个事件处理器的外部暴露函数是
fdevents *fdevent_init(size_t maxfds, fdevent_handler_t type);
int fdevent_reset(fdevents *ev);
void fdevent_free(fdevents *ev);
int fdevent_event_add(fdevents *ev, int *fde_ndx, int fd, int events);
int fdevent_event_del(fdevents *ev, int *fde_ndx, int fd);
int fdevent_event_get_revent(fdevents *ev, size_t ndx);
int fdevent_event_get_fd(fdevents *ev, size_t ndx);
fdevent_handler fdevent_get_handler(fdevents *ev, int fd);
void * fdevent_get_context(fdevents *ev, int fd);
int fdevent_event_next_fdndx(fdevents *ev, int ndx);
int fdevent_poll(fdevents *ev, int timeout_ms);
int fdevent_register(fdevents *ev, int fd, fdevent_handler handler, void *ctx);
int fdevent_unregister(fdevents *ev, int fd);
int fdevent_fcntl_set(fdevents *ev, int fd);
这些函数位于fdevent.c中,另外还有
int fdevent_select_init(fdevents *ev);
int fdevent_poll_init(fdevents *ev);
int fdevent_linux_rtsig_init(fdevents *ev);
int fdevent_linux_sysepoll_init(fdevents *ev);
int fdevent_solaris_devpoll_init(fdevents *ev);
int fdevent_freebsd_kqueue_init(fdevents *ev);
这些函数位于具体的实现中,比如 int fdevent_linux_sysepoll_init(fdevents *ev);在
fdevent_linux_sysepoll.c 文件中定义
int fdevent_select_init(fdevents *ev);在
fdevent_select.c中
也就是说处理器的初始化工作是交给具体的实现完成的,流程是首先调用fdevent.c中的fdevent_init
fdevents *fdevent_init(size_t maxfds, fdevent_handler_t type) {
fdevents *ev;
ev = calloc(1, sizeof(*ev));
ev->fdarray = calloc(maxfds, sizeof(*ev->fdarray));
ev->maxfds = maxfds;
switch(type) {
case FDEVENT_HANDLER_SELECT:
if (0 != fdevent_select_init(ev)) {
fprintf(stderr, "%s.%d: event-handler select failed\n",
__FILE__, __LINE__);
return NULL;
}
break;
case FDEVENT_HANDLER_LINUX_SYSEPOLL:
if (0 != fdevent_linux_sysepoll_init(ev)) {
fprintf(stderr, "%s.%d: event-handler linux-sysepoll failed, try to set server.event-handler = \"poll\" or \"select\"\n",
__FILE__, __LINE__);
return NULL;
}
break;
return ev;
}
fdevent_init只初始化了全局fdevents结构,然后就跳到具体的实现中,比如fdevent_linux_sysepoll_init
int fdevent_linux_sysepoll_init(fdevents *ev) {
ev->type = FDEVENT_HANDLER_LINUX_SYSEPOLL;
#define SET(x) \
ev->x = fdevent_linux_sysepoll_##x;
SET(free);
SET(poll);
SET(event_del);
SET(event_add);
SET(event_next_fdndx);
SET(event_get_fd);
SET(event_get_revent);
if (-1 == (ev->epoll_fd = epoll_create(ev->maxfds))) {
fprintf(stderr, "%s.%d: epoll_create failed (%s), try to set server.event-handler = \"poll\" or \"select\"\n",
__FILE__, __LINE__, strerror(errno));
return -1;
}
if (-1 == fcntl(ev->epoll_fd, F_SETFD, FD_CLOEXEC)) {
fprintf(stderr, "%s.%d: epoll_create failed (%s), try to set server.event-handler = \"poll\" or \"select\"\n",
__FILE__, __LINE__, strerror(errno));
close(ev->epoll_fd);
return -1;
}
ev->epoll_events = malloc(ev->maxfds * sizeof(*ev->epoll_events));
return 0;
}
这个函数的功能包括2点
1.初始化全局fdevents中的函数指针,即绑定虚函数具体的实现
2.初始化具体实现所需的数据结构,比如epoll 的 struct epoll_event 等
最后看下处理器处理事件的过程
if ((n = fdevent_poll(srv->ev, 1000)) > 0) {
/* n is the number of events */
int revents;
int fd_ndx;
fd_ndx = -1;
do {
fdevent_handler handler;
void *context;
handler_t r;
fd_ndx = fdevent_event_next_fdndx (srv->ev, fd_ndx);
revents = fdevent_event_get_revent (srv->ev, fd_ndx);
fd = fdevent_event_get_fd (srv->ev, fd_ndx);
handler = fdevent_get_handler(srv->ev, fd);
context = fdevent_get_context(srv->ev, fd);
/* connection_handle_fdevent needs a joblist_append */
#if 0
log_error_write(srv, __FILE__, __LINE__, "sdd",
"event for", fd, revents);
#endif
switch (r = (*handler)(srv, context, revents)) {
//处理结果 case 省略
}
} while (--n > 0);
}
fdevent_poll 很容易想到就是 epoll_wait 或者 select 的阻塞调用
后面的do while循环就是当返回大于 0时,处理所有事件,由于epoll和select 不同,该处理器通过
fd_ndx = fdevent_event_next_fdndx (srv->ev, fd_ndx);
revents = fdevent_event_get_revent (srv->ev, fd_ndx);
fd = fdevent_event_get_fd (srv->ev, fd_ndx);
三个函数提供统一行为
for epoll 调用
static int fdevent_linux_sysepoll_event_next_fdndx(fdevents *ev, int ndx) {
size_t i;
UNUSED(ev);
i = (ndx < 0) ? 0 : ndx + 1;
return i;
}
其实这个fd_ndx就是从 0~n 的序列,因为epoll已经将可处理的事件集合放入了struct epoll_event结构体集合里了
for select
static int fdevent_select_event_next_fdndx(fdevents *ev, int ndx) {
int i;
i = (ndx < 0) ? 0 : ndx + 1;
for (; i < ev->select_max_fd + 1; i++) {
if (FD_ISSET(i, &(ev->select_read))) break;
if (FD_ISSET(i, &(ev->select_write))) break;
if (FD_ISSET(i, &(ev->select_error))) break;
}
return i;
}
select 比较麻烦,因为不像epoll那样已经将所有可处理的fd返回,select 需要从 0开始依次FD_ISSET来判断是否是可处理事件,下次继续从当前的这个ndx开始,其实这个ndx也就是fd了