typedef struct server {
server_socket_array srv_sockets;
/* the errorlog */
int errorlog_fd;
enum { ERRORLOG_FILE, ERRORLOG_FD, ERRORLOG_SYSLOG, ERRORLOG_PIPE } errorlog_mode;
buffer *errorlog_buf;
fdevents *ev, *ev_ins;
buffer_plugin plugins;
void *plugin_slots;
/* counters */
int con_opened;
int con_read;
int con_written;
int con_closed;
int ssl_is_init;
int max_fds; /* max possible fds */
int cur_fds; /* currently used fds */
int want_fds; /* waiting fds */
int sockets_disabled;
size_t max_conns;
/* buffers */
buffer *parse_full_path;
buffer *response_header;
buffer *response_range;
buffer *tmp_buf;
buffer *tmp_chunk_len;
buffer *empty_string; /* is necessary for cond_match */
buffer *cond_check_buf;
/* caches */
#ifdef HAVE_IPV6
inet_ntop_cache_type inet_ntop_cache[INET_NTOP_CACHE_MAX];
#endif
mtime_cache_type mtime_cache[FILE_CACHE_MAX];
array *split_vals;
/* Timestamps */
time_t cur_ts;
time_t last_generated_date_ts;
time_t last_generated_debug_ts;
time_t startup_ts;
char entropy[8]; /* from /dev/[u]random if possible, otherwise rand() */
char is_real_entropy; /* whether entropy is from /dev/[u]random */
buffer *ts_debug_str;
buffer *ts_date_str;
/* config-file */
array *config;
array *config_touched;
array *config_context;
specific_config **config_storage;
server_config srvconf;
short int config_deprecated;
short int config_unsupported;
connections *conns;
connections *joblist;
connections *fdwaitqueue;
stat_cache *stat_cache;
/**
* The status array can carry all the status information you want
* the key to the array is <module-prefix>.<name>
* and the values are counters
*
* example:
* fastcgi.backends = 10
* fastcgi.active-backends = 6
* fastcgi.backend.<key>.load = 24
* fastcgi.backend.<key>....
*
* fastcgi.backend.<key>.disconnects = ...
*/
array *status;
fdevent_handler_t event_handler;
int (* network_backend_write)(struct server *srv, connection *con, int fd, chunkqueue *cq);
int (* network_backend_read)(struct server *srv, connection *con, int fd, chunkqueue *cq);
#ifdef USE_OPENSSL
int (* network_ssl_backend_write)(struct server *srv, connection *con, SSL *ssl, chunkqueue *cq);
int (* network_ssl_backend_read)(struct server *srv, connection *con, SSL *ssl, chunkqueue *cq);
#endif
uid_t uid;
gid_t gid;
} server;
为lighttpd的全局变量,其中涉及网络模型的fdevents *ev 结构贯穿整个网络模型始终
typedef struct fdevents {
struct server *srv;
fdevent_handler_t type;
fdnode **fdarray;
size_t maxfds;
#ifdef USE_LINUX_EPOLL
int epoll_fd;
struct epoll_event *epoll_events;
#endif
#ifdef USE_POLL
struct pollfd *pollfds;
size_t size;
size_t used;
buffer_int unused;
#endif
#ifdef USE_SELECT
fd_set select_read;
fd_set select_write;
fd_set select_error;
fd_set select_set_read;
fd_set select_set_write;
fd_set select_set_error;
int select_max_fd;
#endif
#ifdef USE_SOLARIS_DEVPOLL
int devpoll_fd;
struct pollfd *devpollfds;
#endif
#ifdef USE_SOLARIS_PORT
port_event_t *port_events;
#endif
#ifdef USE_FREEBSD_KQUEUE
int kq_fd;
struct kevent *kq_results;
#endif
#ifdef USE_SOLARIS_PORT
int port_fd;
#endif
#ifdef USE_LIBEV
struct ev_loop *libev_loop;
#endif
int (*reset)(struct fdevents *ev);
void (*free)(struct fdevents *ev);
int (*event_set)(struct fdevents *ev, int fde_ndx, int fd, int events);
int (*event_del)(struct fdevents *ev, int fde_ndx, int fd);
int (*event_get_revent)(struct fdevents *ev, size_t ndx);
int (*event_get_fd)(struct fdevents *ev, size_t ndx);
int (*event_next_fdndx)(struct fdevents *ev, int ndx);
int (*poll)(struct fdevents *ev, int timeout_ms);
int (*fcntl_set)(struct fdevents *ev, int fd);
} fdevents;
fdnode **fdarray;二级指针为fdevent的核心,用来存放每个fd所对应的con,以及handle等相关信息
fdevent_register(srv->ev, con->fd, connection_handle_fdevent, con);
方法是将一个接受的fd加入到刚才上面所说的fdarray中,fd值为索引
其中该数组的初始化
ev->fdarray = calloc(maxfds, sizeof(*ev->fdarray));
ev->maxfds = maxfds;
其中con结构为在网络模型中以fd为单元的实体属性信息集合
以我们用epoll模型为例,然后利用fdevent_fcntl_set(srv->ev, con->fd)讲fd注册到epoll中
然后我们看server.c
if ((n = fdevent_poll(srv->ev, 1000)) > 0) {
/* n is the number of events */
int revents;
int fd_ndx;
#if 0
if (n > 0) {
log_error_write(srv, __FILE__, __LINE__, "sd",
"polls:", n);
}
#endif
fd_ndx = -1;
do {
fdevent_handler handler;
void *context;
handler_t r;
fd_ndx = fdevent_event_next_fdndx (srv->ev, fd_ndx);
if (-1 == fd_ndx) break; /* not all fdevent handlers know how many fds got an event */
revents = fdevent_event_get_revent (srv->ev, fd_ndx);
fd = fdevent_event_get_fd (srv->ev, fd_ndx);
handler = fdevent_get_handler(srv->ev, fd);
context = fdevent_get_context(srv->ev, fd);
/* connection_handle_fdevent needs a joblist_append */
#if 0
log_error_write(srv, __FILE__, __LINE__, "sdd",
"event for", fd, revents);
#endif
switch (r = (*handler)(srv, context, revents)) {
case HANDLER_FINISHED:
case HANDLER_GO_ON:
case HANDLER_WAIT_FOR_EVENT:
case HANDLER_WAIT_FOR_FD:
break;
case HANDLER_ERROR:
/* should never happen */
SEGFAULT();
break;
default:
log_error_write(srv, __FILE__, __LINE__, "d", r);
break;
}
} while (--n > 0);
} else if (n < 0 && errno != EINTR) {
log_error_write(srv, __FILE__, __LINE__, "ss",
"fdevent_poll failed:",
strerror(errno));
}
其中通过fdevent_poll遍历整个epoll模型获取i/o 的fd句柄,然后获取fdarray中fd对应的相关信息,通过handle方式,将其加入到srv->joblist链表中
然后遍历该链表,进行响应的I/O 操作完毕
libevent的模型: event_set( &pClient->ev_read,client_fd,EV_READ|EV_PERSIST,on_read,pClient ); event_base_set( base,&pClient->ev_read ); event_add( &pClient->ev_read,NULL ); 以上为注册方法,其中on_read是自定义的函数,pclient为传递on_read的参数指针,以及event,fd等这都是外界传入的。 故event_set只是初始化一个event结构,并赋值以上属性 event_base_set将单独fd为实体的event结构注册到全局变量base中。 里面的实现主要是ev->ev_base = base; event_add方式包含两个操作 1 res = evsel->add(evbase, ev); 将ev注册到epoll模型中 再注册epoll模型中之前要说明 epoll_init的过程中初始化了
struct evepoll {
struct event *evread;
struct event *evwrite;
};
struct epollop {
struct evepoll *fds;
int nfds;
struct epoll_event *events;
int nevents;
int epfd;
};
两个结构,并将epollop*指针注册到base->evbase中,fds和events都是链表,保存I/O触发后的fd相关事件,初始化为NULL 这个时候再看evsel->add(evbase, ev);
struct evepoll *evep;
evep = &epollop->fds[fd];
epev.data.ptr = evep;
epev.events = events;
if (epoll_ctl(epollop->epfd, op, ev->ev_fd, &epev) == -1)
/* Update events responsible */
if (ev->ev_events & EV_READ)
evep->evread = ev;
if (ev->ev_events & EV_WRITE)
evep->evwrite = ev;
将evep赋值到ptr指针,并将ev赋值到相应事件上,同时也是给fds[fd]进行了赋值 接下来再看epoll_dispatch
res = epoll_wait(epollop->epfd, events, epollop->nevents, timeout);
for (i = 0; i < res; i++) {
int what = events[i].events;
struct event *evread = NULL, *evwrite = NULL;
evep = (struct evepoll *)events[i].data.ptr;
if (what & (EPOLLHUP|EPOLLERR)) {
evread = evep->evread;
evwrite = evep->evwrite;
} else {
if (what & EPOLLIN) {
evread = evep->evread;
}
if (what & EPOLLOUT) {
evwrite = evep->evwrite;
}
}
if (!(evread||evwrite))
continue;
if (evread != NULL)
event_active(evread, EV_READ, 1);
if (evwrite != NULL)
event_active(evwrite, EV_WRITE, 1);
}
;
此时通过ptr将evep取出来(此时其实通过fd应该也是可以从fds[fd]查出来) 然后根据evep->刚才赋值的事件进行假如活动链表等相关逻辑操作。
顺带了解下信号的处理,libevent通过ev_signal_pair[2]管道来与I/O结合通知事件的处罚,signal.c如同epoll.c一样的结构,初始化的时候会将event_set,此事件为永久注册也就是说之注册这一次。更重要的目的是当I/O的变化可以触发epoll_dispatch的触发,从而evsignal_process执行信号事件。
然后再evsignal_add中也是完成两个事情,一个是signal进行信号捕捉,一个是将上面的管道
event_add(只有一次),然后就是加入到base的信号链表中
。
这个时候当触发信号的时候,就会调用signal中的handle进行事件触发,然后对管道ev_signal_pair[1]进行写入操作,这个时候开始注册管道的
ev_signal_pair[0],会收到触发完成读取操作。
然后base统一遍历active链表,将活动的event进行handle输出。
关于libevent的timeout事件是这样的;如果我们设定fd=-1,很明显是没有I/O的,所以如果设置timeout,证明此事件是一个定时事件,在epoll_wait的timeout设定这个值,就可以保证在timeout时间后触发,我认为可能会造成一定的误差,如果I/O此时并发比较大,导致未能及时响应时间队列的便利,可能会导致此问题。
如果我们设定了fd,并且设定了timeout,此时需要在insert队列和timeout队列同时注册该时间看,如果此时该I/O在timeout内触发,则超时事件不会响应,如果timeout触发没有响应I/o,其实造成的结果就是超时,此时需要有逻辑去处理,比如libevent实现的http库,就进行了超时的判定,讲该连接释放。