将event加入到队列后
调用
int
event_base_dispatch(struct event_base *event_base)
{
return (event_base_loop(event_base, 0));
}
int event_base_loop(struct event_base *base, int flags) { const struct eventop *evsel = base->evsel; struct timeval tv; struct timeval *tv_p; int res, done, retval = 0; /* Grab the lock. We will release it inside evsel.dispatch, and again * as we invoke user callbacks. */ EVBASE_ACQUIRE_LOCK(base, th_base_lock); if (base->running_loop) { event_warnx("%s: reentrant invocation. Only one event_base_loop" " can run on each event_base at once.", __func__); EVBASE_RELEASE_LOCK(base, th_base_lock); return -1; } base->running_loop = 1; clear_time_cache(base); if (base->sig.ev_signal_added && base->sig.ev_n_signals_added) evsig_set_base_(base); done = 0; #ifndef EVENT__DISABLE_THREAD_SUPPORT base->th_owner_id = EVTHREAD_GET_ID(); #endif base->event_gotterm = base->event_break = 0; while (!done) { base->event_continue = 0; base->n_deferreds_queued = 0; /* Terminate the loop if we have been asked to */ if (base->event_gotterm) { break; } if (base->event_break) { break; } tv_p = &tv; if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) { timeout_next(base, &tv_p); } else { /* * if we have active events, we just poll new events * without waiting. */ evutil_timerclear(&tv); } /* If we have no events, we just exit */ if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) && !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) { event_debug(("%s: no events registered.", __func__)); retval = 1; goto done; } event_queue_make_later_events_active(base); clear_time_cache(base); 调用复用IO的函数//该函数的内部会解锁,然后调用OS提供的的多路IO复用函数。 //这个函数退出后,又会立即加锁。这有点像条件变量。 res = evsel->dispatch(base, tv_p); if (res == -1) { event_debug(("%s: dispatch returned unsuccessfully.", __func__)); retval = -1; goto done; } update_time_cache(base); timeout_process(base); if (N_ACTIVE_CALLBACKS(base)) { int n = event_process_active(base); if ((flags & EVLOOP_ONCE) && N_ACTIVE_CALLBACKS(base) == 0 && n != 0) done = 1; } else if (flags & EVLOOP_NONBLOCK) done = 1; } event_debug(("%s: asked to terminate loop.", __func__)); done: clear_time_cache(base); base->running_loop = 0; EVBASE_RELEASE_LOCK(base, th_base_lock); return (retval); }
static void event_queue_make_later_events_active(struct event_base *base) { struct event_callback *evcb; EVENT_BASE_ASSERT_LOCKED(base); while ((evcb = TAILQ_FIRST(&base->active_later_queue))) { TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next); evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE; EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues); TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next); base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF); } }上面代码中有两个函数evsel->dispatch和event_process_active。前一个将调用多路IO复用函数,对event进行监听,并且把满足条件的event放到event_base的激活队列中。后一个则遍历这个激活队列的所有event,逐个调用对应的回调函数。
以poll为例:
static int poll_dispatch(struct event_base *base, struct timeval *tv) { int res, i, j, nfds; long msec = -1; struct pollop *pop = base->evbase; struct pollfd *event_set; poll_check_ok(pop); nfds = pop->nfds; #ifndef EVENT__DISABLE_THREAD_SUPPORT if (base->th_base_lock) { /* If we're using this backend in a multithreaded setting, * then we need to work on a copy of event_set, so that we can * let other threads modify the main event_set while we're * polling. If we're not multithreaded, then we'll skip the * copy step here to save memory and time. */ if (pop->realloc_copy) { struct pollfd *tmp = mm_realloc(pop->event_set_copy, pop->event_count * sizeof(struct pollfd)); if (tmp == NULL) { event_warn("realloc"); return -1; } pop->event_set_copy = tmp; pop->realloc_copy = 0; } memcpy(pop->event_set_copy, pop->event_set, sizeof(struct pollfd)*nfds); event_set = pop->event_set_copy; } else { event_set = pop->event_set; } #else event_set = pop->event_set; #endif if (tv != NULL) { msec = evutil_tv_to_msec_(tv); if (msec < 0 || msec > INT_MAX) msec = INT_MAX; } 解锁 EVBASE_RELEASE_LOCK(base, th_base_lock); res = poll(event_set, nfds, msec); 加锁 EVBASE_ACQUIRE_LOCK(base, th_base_lock); if (res == -1) { if (errno != EINTR) { event_warn("poll"); return (-1); } return (0); } event_debug(("%s: poll reports %d", __func__, res)); if (res == 0 || nfds == 0) return (0); i = evutil_weakrand_range_(&base->weakrand_seed, nfds); for (j = 0; j < nfds; j++) { int what; if (++i == nfds) i = 0; what = event_set[i].revents; if (!what) continue; res = 0; //如果fd发生错误,就把之当作读和写事件。之后调用read //或者write时,就能得知具体是什么错误了。这里的作用是 //通知到上层。 /* If the file gets closed notify */ if (what & (POLLHUP|POLLERR)) what |= POLLIN|POLLOUT; if (what & POLLIN) res |= EV_READ; if (what & POLLOUT) res |= EV_WRITE; if (res == 0) continue; 加入到激活队列 evmap_io_active_(base, event_set[i].fd, res); } return (0); }
void evmap_io_active_(struct event_base *base, evutil_socket_t fd, short events) { struct event_io_map *io = &base->io; struct evmap_io *ctx; struct event *ev; #ifndef EVMAP_USE_HT if (fd < 0 || fd >= io->nentries) return; #endif GET_IO_SLOT(ctx, io, fd, evmap_io); //由这个fd找到对应event_map_entry的TAILQ_HEAD. if (NULL == ctx) return; LIST_FOREACH(ev, &ctx->events, ev_io_next) {//遍历这个队列。将所有与fd相关联的event结构体都处理一遍 if (ev->ev_events & events) event_active_nolock_(ev, ev->ev_events & events, 1); }
void event_active_nolock_(struct event *ev, int res, short ncalls) { struct event_base *base; event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p", ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback)); base = ev->ev_base; EVENT_BASE_ASSERT_LOCKED(base); if (ev->ev_flags & EVLIST_FINALIZING) { /* XXXX debug */ return; } switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) { default: case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER: EVUTIL_ASSERT(0); break; case EVLIST_ACTIVE: /* We get different kinds of events, add them together */ ev->ev_res |= res; return; case EVLIST_ACTIVE_LATER: ev->ev_res |= res; break; case 0: ev->ev_res = res; break; } if (ev->ev_pri < base->event_running_priority) base->event_continue = 1; if (ev->ev_events & EV_SIGNAL) { #ifndef EVENT__DISABLE_THREAD_SUPPORT if (base->current_event == event_to_event_callback(ev) && !EVBASE_IN_THREAD(base)) { ++base->current_event_waiters; EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock); } #endif ev->ev_ncalls = ncalls; ev->ev_pncalls = NULL; } event_callback_activate_nolock_(base, event_to_event_callback(ev)); }
int event_callback_activate_nolock_(struct event_base *base, struct event_callback *evcb) { int r = 1; if (evcb->evcb_flags & EVLIST_FINALIZING) return 0; switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) { default: EVUTIL_ASSERT(0); case EVLIST_ACTIVE_LATER: event_queue_remove_active_later(base, evcb); r = 0; break; case EVLIST_ACTIVE: return 0; case 0: break; } event_queue_insert_active(base, evcb); 插入激活队列 if (EVBASE_NEED_NOTIFY(base)) 不是主线程 是次线程 就通知主线程 evthread_notify_base(base); return r; }
/* * Active events are stored in priority queues. Lower priorities are always * process before higher priorities. Low priority events can starve high * priority ones. */ static int event_process_active(struct event_base *base) { /* Caller must hold th_base_lock */ struct evcallback_list *activeq = NULL; int i, c = 0; const struct timeval *endtime; struct timeval tv; const int maxcb = base->max_dispatch_callbacks; const int limit_after_prio = base->limit_callbacks_after_prio; if (base->max_dispatch_time.tv_sec >= 0) { update_time_cache(base); gettime(base, &tv); evutil_timeradd(&base->max_dispatch_time, &tv, &tv); endtime = &tv; } else { endtime = NULL; } //从高优先级到低优先级遍历优先级数组 for (i = 0; i < base->nactivequeues; ++i) { if (TAILQ_FIRST(&base->activequeues[i]) != NULL) { base->event_running_priority = i; activeq = &base->activequeues[i]; if (i < limit_after_prio) c = event_process_active_single_queue(base, activeq, INT_MAX, NULL); else c = event_process_active_single_queue(base, activeq, maxcb, endtime); if (c < 0) { goto done; } else if (c > 0) break; /* Processed a real event; do not * consider lower-priority events */ /* If we get here, all of the events we processed * were internal. Continue. */ } } done: base->event_running_priority = -1; return c; }
static int event_process_active_single_queue(struct event_base *base, struct evcallback_list *activeq, int max_to_process, const struct timeval *endtime) { struct event_callback *evcb; int count = 0; EVUTIL_ASSERT(activeq != NULL); for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) { struct event *ev=NULL; if (evcb->evcb_flags & EVLIST_INIT) { ev = event_callback_to_event(evcb); if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING) event_queue_remove_active(base, evcb); else event_del_nolock_(ev, EVENT_DEL_NOBLOCK); event_debug(( "event_process_active: event: %p, %s%s%scall %p", ev, ev->ev_res & EV_READ ? "EV_READ " : " ", ev->ev_res & EV_WRITE ? "EV_WRITE " : " ", ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ", ev->ev_callback)); } else { event_queue_remove_active(base, evcb); event_debug(("event_process_active: event_callback %p, " "closure %d, call %p", evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback)); } if (!(evcb->evcb_flags & EVLIST_INTERNAL)) ++count; base->current_event = evcb; #ifndef EVENT__DISABLE_THREAD_SUPPORT base->current_event_waiters = 0; #endif switch (evcb->evcb_closure) { case EV_CLOSURE_EVENT_SIGNAL: EVUTIL_ASSERT(ev != NULL); event_signal_closure(base, ev);处理回调函数 break; case EV_CLOSURE_EVENT_PERSIST: EVUTIL_ASSERT(ev != NULL); event_persist_closure(base, ev); break; case EV_CLOSURE_EVENT: { void (*evcb_callback)(evutil_socket_t, short, void *); EVUTIL_ASSERT(ev != NULL); evcb_callback = *ev->ev_callback; EVBASE_RELEASE_LOCK(base, th_base_lock); evcb_callback(ev->ev_fd, ev->ev_res, ev->ev_arg);回调函数 } break; case EV_CLOSURE_CB_SELF: { void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb; EVBASE_RELEASE_LOCK(base, th_base_lock); evcb_selfcb(evcb, evcb->evcb_arg); } break; case EV_CLOSURE_EVENT_FINALIZE: case EV_CLOSURE_EVENT_FINALIZE_FREE: { void (*evcb_evfinalize)(struct event *, void *); int evcb_closure = evcb->evcb_closure; EVUTIL_ASSERT(ev != NULL); base->current_event = NULL; evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize; EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING)); EVBASE_RELEASE_LOCK(base, th_base_lock); evcb_evfinalize(ev, ev->ev_arg); event_debug_note_teardown_(ev); if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE) mm_free(ev); } break; case EV_CLOSURE_CB_FINALIZE: { void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize; base->current_event = NULL; EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING)); EVBASE_RELEASE_LOCK(base, th_base_lock); evcb_cbfinalize(evcb, evcb->evcb_arg); } break; default: EVUTIL_ASSERT(0); } EVBASE_ACQUIRE_LOCK(base, th_base_lock); base->current_event = NULL; #ifndef EVENT__DISABLE_THREAD_SUPPORT if (base->current_event_waiters) { base->current_event_waiters = 0; EVTHREAD_COND_BROADCAST(base->current_event_cond); } #endif if (base->event_break) return -1; if (count >= max_to_process) return count; if (count && endtime) { struct timeval now; update_time_cache(base); gettime(base, &now); if (evutil_timercmp(&now, endtime, >=)) return count; } if (base->event_continue) break; } return count; }
event_queue_remove函数的调用会改变event结构体的ev_flags变量的值。调用后, ev_flags变量为EVLIST_INIT | EVLIST_INSERTED。现在又可以等待下一次事件的到来了
};
TAILQ_ENTRY(event_callback) evcb_active_next;
//本event的优先级。调用event_priority_set设置
};
#define ev_pri ev_evcallback.evcb_pri
#define ev_flags ev_evcallback.evcb_flags
#define ev_closure ev_evcallback.evcb_closure
#define ev_callback ev_evcallback.evcb_cb_union.evcb_callback
#define ev_arg ev_evcallback.evcb_arg
优先级设置
int event_base_priority_init(struct event_base *base, int npriorities) { int i, r; r = -1; EVBASE_ACQUIRE_LOCK(base, th_base_lock); //由N_ACTIVE_CALLBACKS宏可以知道,本函数应该要在event_base_dispatch //函数调用前调用。不然将无法设置。 if (N_ACTIVE_CALLBACKS(base) || npriorities < 1 || npriorities >= EVENT_MAX_PRIORITIES) goto err; if (npriorities == base->nactivequeues)/之前和现在要设置的优先级数是一样的。 goto ok; if (base->nactivequeues) { mm_free(base->activequeues); base->nactivequeues = 0; } /* Allocate our priority queues */ //分配一个优先级数组。 base->activequeues = (struct evcallback_list *) mm_calloc(npriorities, sizeof(struct evcallback_list)); if (base->activequeues == NULL) { event_warn("%s: calloc", __func__); goto err; } base->nactivequeues = npriorities; for (i = 0; i < base->nactivequeues; ++i) { TAILQ_INIT(&base->activequeues[i]); } ok: r = 0; err: EVBASE_RELEASE_LOCK(base, th_base_lock); return (r); }
int event_priority_set(struct event *ev, int pri) { event_debug_assert_is_setup_(ev); if (ev->ev_flags & EVLIST_ACTIVE) return (-1); if (pri < 0 || pri >= ev->ev_base->nactivequeues)//优先级不能越界 return (-1); #define ev_pri ev_evcallback.evcb_pri ev->ev_pri = pri; 新的版本有个宏替换 //pri值越小,其优先级就越高。 return (0); }
在
event_callback_activate_nolock_(struct event_base *base, struct event_callback *evcb)
中会调用evthread_notify_base
struct event_base {/** Tell the thread currently running the event_loop for base (if any) that it * needs to stop waiting in its dispatch function (if it is) and process all * active callbacks. */ static int evthread_notify_base(struct event_base *base) { EVENT_BASE_ASSERT_LOCKED(base);加锁 if (!base->th_notify_fn) return -1; if (base->is_notify_pending) //写入一个字节,就能使event_base被唤醒。 //如果处于未决状态,就没必要写多一个字节 return 0; base->is_notify_pending = 1; return base->th_notify_fn(base); }在event_base_new_with_config(event_base_new会调用该函数)里面会调用evthread_make_base_notifiable函数,其中会设置
th_notify_fn函数指针;这里以evthread_notify_base_default作为默认值。这个evthread_notify_base_default完成实际的通知操作
同时也会设置回调函数;
/* Helper callback: wake an event_base from another thread. This version * works by writing a byte to one end of a socketpair, so that the event_base * listening on the other end will wake up as the corresponding event * triggers */ static int evthread_notify_base_default(struct event_base *base) { char buf[1]; int r; buf[0] = (char) 0; #ifdef _WIN32 //通知一下,用来唤醒。写一个字节足矣 r = send(base->th_notify_fd[1], buf, 1, 0); #else r = write(base->th_notify_fd[1], buf, 1); #endif return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0; //即使errno 等于 EAGAIN也无所谓,因为这是由于通信通道已经塞满了 //这已经能唤醒主线程了。没必要一定要再写入一个字节 }
只是往管道里面写入一个字节。当然这已经能使得event_base检测到管道可读,从而实现唤醒event_base。
往管道写入一个字节,event_base就会被唤醒,然后调用这个管道对应event的回调函数。当然,在event_base醒来的时候,还能看到其他东西。这也是Libevent提供唤醒功能的原因。
回调函数static void evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg) { unsigned char buf[1024]; struct event_base *base = arg; #ifdef _WIN32 while (recv(fd, (char*)buf, sizeof(buf), 0) > 0) ; #else while (read(fd, (char*)buf, sizeof(buf)) > 0) ; #endif EVBASE_ACQUIRE_LOCK(base, th_base_lock); base->is_notify_pending = 0; //修改之,使得其不再是未决的了。当然这也能让其他线程可以再次唤醒值。参看evthread_notify_base函数 EVBASE_RELEASE_LOCK(base, th_base_lock); }