ngx_event_core_module模块分析
event_core模块是第一个事件类型模块,它的主要功能是负责解析事件类配置项,选择事件处理机制(select、poll、epoll还是kqueue机制),创建连接池,预分配读写事件池等。
event_core模块需处理那些事件类配置项呢?下面是其ngx_command_t结构体:
static ngx_command_t ngx_event_core_commands[] = { { /***控制worker进程连接数大小,即每worker进程中支持的TCP最大连接数,默认为512***/ ngx_string("worker_connections"), NGX_EVENT_CONF|NGX_CONF_TAKE1, ngx_event_connections, 0, 0, NULL }, /***该配置项与worker_connections相同,用于控制连接数,此配置项已过时***/ { ngx_string("connections"), NGX_EVENT_CONF|NGX_CONF_TAKE1, ngx_event_connections, 0, 0, NULL }, /***指定使用那个事件模型(epoll、select、kqueue等等)模块来处理事件,默认由nginx在configure时根据配置选项决定***/ { ngx_string("use"), NGX_EVENT_CONF|NGX_CONF_TAKE1, ngx_event_use, 0, 0, NULL }, /***用于设置尽可能多的接收连接,即在某监听套接字上接收到新连接时循环调用accept接收 所有新连接,默认关闭***/ { ngx_string("multi_accept"), NGX_EVENT_CONF|NGX_CONF_FLAG, ngx_conf_set_flag_slot, 0, offsetof(ngx_event_conf_t, multi_accept), NULL }, /***是否开启worker进程接收连接的负载均衡锁,默认开启***/ { ngx_string("accept_mutex"), NGX_EVENT_CONF|NGX_CONF_FLAG, ngx_conf_set_flag_slot, 0, offsetof(ngx_event_conf_t, accept_mutex), NULL }, /***启用accept_mutex负载均衡锁后,延迟accept_mutex_delay秒后再次试图接收连接,默认500ms***/ { ngx_string("accept_mutex_delay"), NGX_EVENT_CONF|NGX_CONF_TAKE1, ngx_conf_set_msec_slot, 0, offsetof(ngx_event_conf_t, accept_mutex_delay), NULL }, /***对指定IP地址的连接打开debug级别日志,此项功能需在configure时增加--with-debug选项***/ { ngx_string("debug_connection"), NGX_EVENT_CONF|NGX_CONF_TAKE1, ngx_event_debug_connection, 0, 0, NULL }, ngx_null_command };
event_core模块配置项的结构体如下:
typedef struct { ngx_uint_t connections; //对应worker_connections或connections配置项 ngx_uint_t use; //对应use配置项值 ngx_flag_t multi_accept; //对应multi_accept配置项 ngx_flag_t accept_mutex; //对应accept_mutex配置项 ngx_msec_t accept_mutex_delay; //对应accept_mutex_delay配置项 u_char *name; //与use配置项一起使用,保存use配置项指定模块的名称 #if (NGX_DEBUG) ngx_array_t debug_connection; //对应debug_connection配置项 #endif } ngx_event_conf_t;
event_core模块上下文结构体:
ngx_event_module_t ngx_event_core_module_ctx = { &event_core_name, ngx_event_core_create_conf, /* create configuration */ ngx_event_core_init_conf, /* init configuration */ { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL } };
ngx_event_core_create_conf函数在配置项解析前调用分配保存配置项的内存空间,ngx_event_core_init_conf函数在配置项解析完后调用对配置文件中不存在的事件配置项进行初始化。
event_core模块的定义:
ngx_module_t ngx_event_core_module = { NGX_MODULE_V1, &ngx_event_core_module_ctx, /* module context */ ngx_event_core_commands, /* module directives */ NGX_EVENT_MODULE, /* module type */ NULL, /* init master */ ngx_event_module_init, /* init module */ ngx_event_process_init, /* init process */ NULL, /* init thread */ NULL, /* exit thread */ NULL, /* exit process */ NULL, /* exit master */ NGX_MODULE_V1_PADDING };
ngx_event_module_init函数在worker进程创建之前被调用,ngx_event_process_init函数在创建worker进程之后被调用,下面来分析它们各自完成了什么功能:
ngx_event_module_init函数主要做一些初始化工作,其源代码分析如下:
static ngx_int_t ngx_event_module_init(ngx_cycle_t *cycle) { void ***cf; u_char *shared; size_t size, cl; ngx_shm_t shm; ngx_time_t *tp; ngx_core_conf_t *ccf; ngx_event_conf_t *ecf; //获取存储所有事件模块配置结构的指针数据的首地址 cf = ngx_get_conf(cycle->conf_ctx, ngx_events_module); //获取event core模块的配置结构 ecf = (*cf)[ngx_event_core_module.ctx_index]; /***输出被使用的事件模块名***/ if (!ngx_test_config && ngx_process <= NGX_PROCESS_MASTER) { ngx_log_error(NGX_LOG_NOTICE, cycle->log, 0, "using the \"%s\" event method", ecf->name); } //获取core模块的配置结构 ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module); ngx_timer_resolution = ccf->timer_resolution; #if !(NGX_WIN32) { ngx_int_t limit; struct rlimit rlmt; //获取当前进程打开的最大文件描述符个数 if (getrlimit(RLIMIT_NOFILE, &rlmt) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "getrlimit(RLIMIT_NOFILE) failed, ignored"); } else { //连接数大于最大文件描述述符或者大于由worker_rlimit_nofile配置项设置的最大文件描述符个数时出错 if (ecf->connections > (ngx_uint_t) rlmt.rlim_cur && (ccf->rlimit_nofile == NGX_CONF_UNSET || ecf->connections > (ngx_uint_t) ccf->rlimit_nofile)) { limit = (ccf->rlimit_nofile == NGX_CONF_UNSET) ? (ngx_int_t) rlmt.rlim_cur : ccf->rlimit_nofile; ngx_log_error(NGX_LOG_WARN, cycle->log, 0, "%ui worker_connections exceed " "open file resource limit: %i", ecf->connections, limit); } } } #endif /* !(NGX_WIN32) */ //master为0表示不创建worker子进程,这时就不需要初始化下面的一些统计变量 if (ccf->master == 0) { return NGX_OK; } //ngx_accept_mutex_ptr不为空表示统计变量已设置好,直接返回 if (ngx_accept_mutex_ptr) { return NGX_OK; } /* cl should be equal to or greater than cache line size */ //128为缓存行的大小,在我的32位x86平台中缓存行大小为32 cl = 128; /***统计需要创建多大的共享内存,ngx_accept_mutex用于多个worker进程之间的负载均衡, ngx_connection_counter表示nginx共处理了多少个连接,ngx_temp_number表示在连接中创建的临时文件的个数***/ size = cl /* ngx_accept_mutex */ + cl /* ngx_connection_counter */ + cl; /* ngx_temp_number */ #if (NGX_STAT_STUB) /***下面注释中的这7个变量用于表示各种情况下的接连数。ngx_stat_accepted:已成功建立的连接数, ngx_stat_handled:已获取了ngx_connection_t结构体并在初始化了读写事件后的连接数, ngx_stat_requests:已经被http模块处理过的接连数,ngx_stat_active:已获取到ngx_connection_t结构体的连接数, ngx_stat_reading:正在接收tcp流的连接数,ngx_stat_writing:正在发送tcp流的连接数,ngx_stat_waiting:正在等待事件 发生的连接数***/ size += cl /* ngx_stat_accepted */ + cl /* ngx_stat_handled */ + cl /* ngx_stat_requests */ + cl /* ngx_stat_active */ + cl /* ngx_stat_reading */ + cl /* ngx_stat_writing */ + cl; /* ngx_stat_waiting */ #endif shm.size = size; shm.name.len = sizeof("nginx_shared_zone"); shm.name.data = (u_char *) "nginx_shared_zone"; shm.log = cycle->log; /***创建共享内存***/ if (ngx_shm_alloc(&shm) != NGX_OK) { return NGX_ERROR; } //获取共享内存首地址 shared = shm.addr; ngx_accept_mutex_ptr = (ngx_atomic_t *) shared; //-1表示在获取共享内存锁时,如果没有获取到不会阻塞 ngx_accept_mutex.spin = (ngx_uint_t) -1; if (ngx_shmtx_create(&ngx_accept_mutex, (ngx_shmtx_sh_t *) shared, cycle->lock_file.data) != NGX_OK) { return NGX_ERROR; } /***以下对变量进行初始化,使其指向正确的位置***/ ngx_connection_counter = (ngx_atomic_t *) (shared + 1 * cl); (void) ngx_atomic_cmp_set(ngx_connection_counter, 0, 1); ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "counter: %p, %d", ngx_connection_counter, *ngx_connection_counter); ngx_temp_number = (ngx_atomic_t *) (shared + 2 * cl); tp = ngx_timeofday(); ngx_random_number = (tp->msec << 16) + ngx_pid; #if (NGX_STAT_STUB) ngx_stat_accepted = (ngx_atomic_t *) (shared + 3 * cl); ngx_stat_handled = (ngx_atomic_t *) (shared + 4 * cl); ngx_stat_requests = (ngx_atomic_t *) (shared + 5 * cl); ngx_stat_active = (ngx_atomic_t *) (shared + 6 * cl); ngx_stat_reading = (ngx_atomic_t *) (shared + 7 * cl); ngx_stat_writing = (ngx_atomic_t *) (shared + 8 * cl); ngx_stat_waiting = (ngx_atomic_t *) (shared + 9 * cl); #endif return NGX_OK; }
ngx_event_process_init函数主要设置均衡锁,分配置接连池,为读写事件分配内存,其源代码分析如下:
static ngx_int_t ngx_event_process_init(ngx_cycle_t *cycle) { ngx_uint_t m, i; ngx_event_t *rev, *wev; ngx_listening_t *ls; ngx_connection_t *c, *next, *old; ngx_core_conf_t *ccf; ngx_event_conf_t *ecf; ngx_event_module_t *module; ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module); ecf = ngx_event_get_conf(cycle->conf_ctx, ngx_event_core_module); /***在打开accept_mutex锁的条件下,只有在启用master/worker工作模块并且worker进程数大于1的情况下才会启用均衡锁***/ if (ccf->master && ccf->worker_processes > 1 && ecf->accept_mutex) { ngx_use_accept_mutex = 1; ngx_accept_mutex_held = 0; ngx_accept_mutex_delay = ecf->accept_mutex_delay; } else { //否则关闭均衡锁 ngx_use_accept_mutex = 0; } #if (NGX_WIN32) /* * disable accept mutex on win32 as it may cause deadlock if * grabbed by a process which can't accept connections */ ngx_use_accept_mutex = 0; #endif #if (NGX_THREADS) ngx_posted_events_mutex = ngx_mutex_init(cycle->log, 0); if (ngx_posted_events_mutex == NULL) { return NGX_ERROR; } #endif //初始化定时器 if (ngx_event_timer_init(cycle->log) == NGX_ERROR) { return NGX_ERROR; } /***根据use配置项指向的事件模块ctx_index来调用该模块的init函数***/ for (m = 0; ngx_modules[m]; m++) { if (ngx_modules[m]->type != NGX_EVENT_MODULE) { continue; } if (ngx_modules[m]->ctx_index != ecf->use) { continue; } module = ngx_modules[m]->ctx; if (module->actions.init(cycle, ngx_timer_resolution) != NGX_OK) { /* fatal */ exit(2); } break; } #if !(NGX_WIN32) /***NGX_USE_TIMER_EVENT只在eventport和kqueue事件模型中存在,在设置了timer_resolution配置项且事件 模型不为eventport及kqueue事件时进入该分支***/ if (ngx_timer_resolution && !(ngx_event_flags & NGX_USE_TIMER_EVENT)) { struct sigaction sa; struct itimerval itv; /***这段代码设置SIGALRM信号处理句柄,ngx_timer_signal_handler函数主要处理ngx_event_timer_alarm变量,该变量表示 需要更新nginx维护的时间变量***/ ngx_memzero(&sa, sizeof(struct sigaction)); sa.sa_handler = ngx_timer_signal_handler; sigemptyset(&sa.sa_mask); if (sigaction(SIGALRM, &sa, NULL) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "sigaction(SIGALRM) failed"); return NGX_ERROR; } itv.it_interval.tv_sec = ngx_timer_resolution / 1000; itv.it_interval.tv_usec = (ngx_timer_resolution % 1000) * 1000; itv.it_value.tv_sec = ngx_timer_resolution / 1000; itv.it_value.tv_usec = (ngx_timer_resolution % 1000 ) * 1000; /***使用setitimer函数来时间发送SIGALRM信号***/ if (setitimer(ITIMER_REAL, &itv, NULL) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "setitimer() failed"); } } /***epoll模型不处理该段代码***/ if (ngx_event_flags & NGX_USE_FD_EVENT) { struct rlimit rlmt; if (getrlimit(RLIMIT_NOFILE, &rlmt) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "getrlimit(RLIMIT_NOFILE) failed"); return NGX_ERROR; } cycle->files_n = (ngx_uint_t) rlmt.rlim_cur; cycle->files = ngx_calloc(sizeof(ngx_connection_t *) * cycle->files_n, cycle->log); if (cycle->files == NULL) { return NGX_ERROR; } } #endif /***分配连接池***/ cycle->connections = ngx_alloc(sizeof(ngx_connection_t) * cycle->connection_n, cycle->log); if (cycle->connections == NULL) { return NGX_ERROR; } c = cycle->connections; /***分配读事件结构,读事件的个数与连接的个数相同***/ cycle->read_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n, cycle->log); if (cycle->read_events == NULL) { return NGX_ERROR; } rev = cycle->read_events; for (i = 0; i < cycle->connection_n; i++) { rev[i].closed = 1; rev[i].instance = 1; #if (NGX_THREADS) rev[i].lock = &c[i].lock; rev[i].own_lock = &c[i].lock; #endif } /***分配写事件结构,写事件的个数与连接的个数相同***/ cycle->write_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n, cycle->log); if (cycle->write_events == NULL) { return NGX_ERROR; } wev = cycle->write_events; for (i = 0; i < cycle->connection_n; i++) { wev[i].closed = 1; #if (NGX_THREADS) wev[i].lock = &c[i].lock; wev[i].own_lock = &c[i].lock; #endif } i = cycle->connection_n; next = NULL; /***将连接与读/写事件对应起来***/ do { i--; c[i].data = next; c[i].read = &cycle->read_events[i]; c[i].write = &cycle->write_events[i]; c[i].fd = (ngx_socket_t) -1; next = &c[i]; #if (NGX_THREADS) c[i].lock = 0; #endif } while (i); cycle->free_connections = next; cycle->free_connection_n = cycle->connection_n; /* for each listening socket */ ls = cycle->listening.elts; for (i = 0; i < cycle->listening.nelts; i++) { //为监听套接字分配连接结构体,并设置读/写事件 c = ngx_get_connection(ls[i].fd, cycle->log); if (c == NULL) { return NGX_ERROR; } c->log = &ls[i].log; c->listening = &ls[i]; ls[i].connection = c; rev = c->read; rev->log = c->log; rev->accept = 1; #if (NGX_HAVE_DEFERRED_ACCEPT) rev->deferred_accept = ls[i].deferred_accept; #endif if (!(ngx_event_flags & NGX_USE_IOCP_EVENT)) { if (ls[i].previous) { /* * delete the old accept events that were bound to * the old cycle read events array */ old = ls[i].previous->connection; if (ngx_del_event(old->read, NGX_READ_EVENT, NGX_CLOSE_EVENT) == NGX_ERROR) { return NGX_ERROR; } old->fd = (ngx_socket_t) -1; } } #if (NGX_WIN32) if (ngx_event_flags & NGX_USE_IOCP_EVENT) { ngx_iocp_conf_t *iocpcf; rev->handler = ngx_event_acceptex; if (ngx_use_accept_mutex) { continue; } if (ngx_add_event(rev, 0, NGX_IOCP_ACCEPT) == NGX_ERROR) { return NGX_ERROR; } ls[i].log.handler = ngx_acceptex_log_error; iocpcf = ngx_event_get_conf(cycle->conf_ctx, ngx_iocp_module); if (ngx_event_post_acceptex(&ls[i], iocpcf->post_acceptex) == NGX_ERROR) { return NGX_ERROR; } } else { rev->handler = ngx_event_accept; /***ngx_use_accept_mutex !=0 表示有多个worker工作进程且开启了accept_mutex锁,这时就不需要调用ngx_add_event函数来启动监听端口***/ if (ngx_use_accept_mutex) { continue; }
/***调用ngx_add_event函数来启动监听端口***/ if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) { return NGX_ERROR; } } #else //将监听套接字的回调方法设置为ngx_event_accept,即接收到新连接时调用 该函数 rev->handler = ngx_event_accept; if (ngx_use_accept_mutex) { continue; } if (ngx_event_flags & NGX_USE_RTSIG_EVENT) { if (ngx_add_conn(c) == NGX_ERROR) { return NGX_ERROR; } } else { /***将读事件添加到事件模型中***/ if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) { return NGX_ERROR; } } #endif } return NGX_OK; }