Nginx event核心模块之process(四)
这个是event模块核心运行部分,主要函数涉及event_process_init和event_process_and_timer两个部分。其中event_init部分主要包括一下部分内容:
1) 初始化cycle_connections信息
2) 初始化cycle_read_events以及cycle_write_events信息,这两个部分主要初始化关于cycle的信息。
3) 获取listening信息
4) 初始化connection info
5) 设置rev处理函数为event_accept,这三个部分主要初始化从listen数据信息中关于bind socket的信息,并初始化处理函数。
6) 把rev事件添加到epoll侦听队列中
其中event_process_and_timer函数,是在worker进程中的循环的主体部分,里面具有调用epoll_process函数。主要有以下工作内容:
1) 获取定时器timer,用于传递给epoll_wait函数。
2) 后置处理标志
3) 多进程accept互斥
4) Epoll_process
5) 处理后置事件队列,包括accept后置事件
下面是两个函数的代码以及注释:
void
ngx_process_events_and_timers(ngx_cycle_t*cycle)//该函数在worker进程中loop
{
ngx_uint_t flags;//两个标识,一个是关于定时器的,另外一个则是关于accept post event的
ngx_msec_t timer, delta;
if (ngx_timer_resolution) {//如果定时器是否从新解决,初始值为0 这个应该是永远不会调用的
timer = NGX_TIMER_INFINITE;//-1,阻塞方式等待
flags = 0;
} else {//否则需要更新
timer = ngx_event_find_timer();//查找定时器,在epoll_wait之前更新定时器
flags = NGX_UPDATE_TIME;
#if (NGX_THREADS)//是否存在线程池
if (timer == NGX_TIMER_INFINITE || timer > 500) {//定时器大于500或者无限时设置为500
timer = 500;
}
#endif
}
//为什么需要在这里对accept进行互斥锁呢?
if (ngx_use_accept_mutex) {//如果使用accept互斥锁,只有在多进程的时候需要对accept进行互斥
if (ngx_accept_disabled > 0) {
ngx_accept_disabled--;
} else {
if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) {//尝试获取互斥锁
return;
}
if (ngx_accept_mutex_held) {//如果获取到锁
flags |= NGX_POST_EVENTS;//设置为后置处理
} else {//没有则设置定时器
if (timer == NGX_TIMER_INFINITE
|| timer >ngx_accept_mutex_delay)
{
timer =ngx_accept_mutex_delay;//如果一个工作进程没有获取到accept互斥锁,它将在最少在ngx_accept_mutex_delay毫秒后重新获取。这个值默认设置为500
}
}
}
}
delta = ngx_current_msec;//如果flags中有NGX_UPDATE_TIME,该变量会发生变化
(void) ngx_process_events(cycle, timer, flags);//如果调用epoll模块,则为ngx_epoll_process_events函数
delta = ngx_current_msec - delta;
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"timer delta: %M",delta);
if (ngx_posted_accept_events) {//在epoll模块中会把rev添加到ngx_posted_accept_events中
ngx_event_process_posted(cycle, &ngx_posted_accept_events);//处理ngx_posted_accept_events
}
if (ngx_accept_mutex_held) {
ngx_shmtx_unlock(&ngx_accept_mutex);//释放accept互斥锁
}
if (delta) {//当ngx_current_msec发生变化时,毁灭定时器
ngx_event_expire_timers();
}
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"posted events%p", ngx_posted_events);
if (ngx_posted_events) {//处理ngx_posted_events队列
if (ngx_threaded) {
ngx_wakeup_worker_thread(cycle);//使用线程处理
} else {
ngx_event_process_posted(cycle, &ngx_posted_events);//在进程中直接处理
}
}
}
static ngx_int_t
ngx_event_process_init(ngx_cycle_t *cycle)
{
…..
if (ccf->master && ccf->worker_processes > 1 &&ecf->accept_mutex) {//如果采用多进程且使用accept互斥锁
ngx_use_accept_mutex = 1;//在ngx_process_events_and_timer函数有用到
ngx_accept_mutex_held = 0;
ngx_accept_mutex_delay = ecf->accept_mutex_delay;
} else {
ngx_use_accept_mutex = 0;
}
……
if (ngx_event_timer_init(cycle->log) == NGX_ERROR) {//定时器初始化
return NGX_ERROR;
}
…………
cycle->connections =
ngx_alloc(sizeof(ngx_connection_t) * cycle->connection_n,cycle->log);//为connection分配空间
if (cycle->connections == NULL) {
return NGX_ERROR;
}
c = cycle->connections;
cycle->read_events = ngx_alloc(sizeof(ngx_event_t) *cycle->connection_n,
cycle->log);//初始化rev
if (cycle->read_events == NULL) {
return NGX_ERROR;
}
rev = cycle->read_events;
for (i = 0; i < cycle->connection_n; i++) {
rev[i].closed = 1;
rev[i].instance = 1;
#if (NGX_THREADS)
rev[i].lock = &c[i].lock;
rev[i].own_lock = &c[i].lock;
#endif
}
cycle->write_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n,
cycle->log);
if (cycle->write_events == NULL) {
return NGX_ERROR;
}
wev = cycle->write_events;
for (i = 0; i < cycle->connection_n; i++) {
wev[i].closed = 1;
#if (NGX_THREADS)
wev[i].lock = &c[i].lock;
wev[i].own_lock = &c[i].lock;
#endif
}
i = cycle->connection_n;
next = NULL;
do {
i--;
c[i].data = next;
c[i].read = &cycle->read_events[i];
c[i].write = &cycle->write_events[i];
c[i].fd = (ngx_socket_t) -1;
next = &c[i];
#if (NGX_THREADS)
c[i].lock = 0;
#endif
} while (i);
cycle->free_connections = next;
cycle->free_connection_n = cycle->connection_n;
/* for each listening socket */
ls = cycle->listening.elts;
for (i = 0; i < cycle->listening.nelts; i++) {//有侦听nelts个socket
c = ngx_get_connection(ls[i].fd, cycle->log);//获取listen后的connection数据信息
if (c == NULL) {
return NGX_ERROR;
}
c->log = &ls[i].log;
c->listening = &ls[i];
ls[i].connection = c;
rev = c->read;//把connection数据结构中的接受指针传给rev
rev->log = c->log;
rev->accept = 1;//设置accept位,在epoll_wait循环中会进行判定,从而调用accept处理函数
#if (NGX_HAVE_DEFERRED_ACCEPT)//延期的accept
rev->deferred_accept = ls[i].deferred_accept;
#endif
if (!(ngx_event_flags & NGX_USE_IOCP_EVENT)) {//iocp调用,win32?
if (ls[i].previous) {
/*
* delete the old accept eventsthat were bound to
* the old cycle read eventsarray
*/
old =ls[i].previous->connection;
if (ngx_del_event(old->read,NGX_READ_EVENT, NGX_CLOSE_EVENT)
== NGX_ERROR)
{
return NGX_ERROR;
}
old->fd = (ngx_socket_t) -1;
}
}
……..
rev->handler = ngx_event_accept;//设置rev handler为accept event
if (ngx_use_accept_mutex) {
continue;
}
if (ngx_event_flags & NGX_USE_RTSIG_EVENT) {//rtsig多路复用机制
if (ngx_add_conn(c) == NGX_ERROR) {
return NGX_ERROR;
}
} else {
if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) {//添加事件到epoll队列中,将调用ngx_epoll_add_event函数
return NGX_ERROR;
}
}
#endif
}
return NGX_OK;
}
最后说明下,在event模块中还有两个接口,由于业务逻辑层添加读写事件之用。
ngx_int_tngx_handle_read_event(ngx_event_t *rev, ngx_uint_t flags);
ngx_int_tngx_handle_write_event(ngx_event_t *wev, size_t lowat);
两个函数中都封装了epoll_add_event函数,用于添加事件,其中handle_read_event中的epoll op参数是EPOLLIN,而handle_write_event为EPOLLOUT。