Linux epoll源码剖析
linux内核版本:2.6.34
在读epoll源码前,需要先了解的知识点:
- 等待队列
- 文件系统(主要是进程的打开文件描述符表以及struct file)
- poll机制
- 资源注册监听
poll() -> poll_wait(struct file *, wait_queue_t *, poll_table *pt) -> pt->qproc(struct file *, wait_queue_t *, poll_table *)
- 资源就绪通知
callback_function(wait_queue_t *, unsigned mode, int sync, void *key)
- 资源注册监听
- epoll主要数据结构
- 一个epoll实例对应一个struct eventpoll(在用户空间以epollfd指向)
- 一个监听事件对应一个struct epitem(epoll_ctl()操作的就是epitem)
先引用一下《追踪Linux Tcp/Ip代码运行:基于2.6内核》中的一段话:
试想一下,程序员在编写程序时是先定义结构体还是先编写函数?答案可能有两种:第一种是先编写函数,根据函数的过程来产生结构体的需求从而有了结构体的定义;第二种是按照协议规定,如TCP头部和IP头部结构体的定义,这些是协议规定的结构体,因而结构体定义在先,函数编写在后。两种答案虽然相反,可是深入思考一下协议的由来也是经过实践总结而来的,从而得到了从实践到理论的结论。
有时我们需要站在程序员的角度来理解结构体的作用和定义,逆向推理结构体是因何产生、因何而用,这种方式不但提高了理解、阅读代码的水平,更能增强逻辑思维的推理能力,进而面对任意一段代码的时候从容不迫而游刃有余。
Structures
/*
* This structure is stored inside the "private_data" member of the file
* structure and rapresent the main data sructure for the eventpoll
* interface.
*/
struct eventpoll {
/* Protect the this structure access */
spinlock_t lock;
/*
* This mutex is used to ensure that files are not removed
* while epoll is using them. This is held during the event
* collection loop, the file cleanup path, the epoll file exit
* code and the ctl operations.
*/
struct mutex mtx;
/* Wait queue used by sys_epoll_wait() */
/* 阻塞在epoll_wait()当前epoll实例的用户被链接到这个等待队列 */
wait_queue_head_t wq;
/* Wait queue used by file->poll() */
/* epoll文件也可以被epoll_wait() */
wait_queue_head_t poll_wait;
/* List of ready file descriptors */
/* 已经ready的epitem的链表 */
struct list_head rdllist;
/* RB tree root used to store monitored fd structs */
/* 存储epitem */
struct rb_root rbr;
/*
* This is a single linked list that chains all the "struct epitem" that
* happened while transfering ready events to userspace w/out
* holding ->lock.
*/
/* 见ep_poll_callback()以及ep_scan_ready_list()中的注释 */
struct epitem *ovflist;
/* The user that created the eventpoll descriptor */
/* 创建当前epoll实例的用户 */
struct user_struct *user;
};
/*
* Each file descriptor added to the eventpoll interface will
* have an entry of this type linked to the "rbr" RB tree.
*/
struct epitem {
/* RB tree node used to link this structure to the eventpoll RB tree */
/* eventpoll内部的红黑树的挂载点 */
struct rb_node rbn;
/* List header used to link this structure to the eventpoll ready list */
/* 所有已经ready的epitem都会被挂载到eventpoll的rdllist中 */
struct list_head rdllink;
/*
* Works together "struct eventpoll"->ovflist in keeping the
* single linked chain of items.
*/
/* 配合eventpoll->ovflist使用 */
struct epitem *next;
/* The file descriptor information this item refers to */
/*
* 作为evetnpoll内部的红黑树节点的key
*/
struct epoll_filefd ffd;
/* Number of active wait queue attached to poll operations */
/* 监听队列挂载数 */
/* 难道一个epitem还能同时挂载到多个监听队列? */
int nwait;
/* List containing poll wait queues */
/* 链接当前epitem对应的eppoll_entry结构 */
struct list_head pwqlist;
/* The "container" of this item */
/* 关联当前epitem所属的epollevent */
struct eventpoll *ep;
/* List header used to link this item to the "struct file" items list */
/* 与所监听的struct file进行链接 */
struct list_head fllink;
/* The structure that describe the interested events and the source fd */
/* 通过epoll_ctl从用户空间传过来的数据,表示当前epitem关心的events */
struct epoll_event event;
};
struct epoll_filefd {
struct file *file;
int fd;
};
struct epoll_event {
__u32 events;
__u64 data;
};
/* Wrapper struct used by poll queueing */
struct ep_pqueue {
poll_table pt;
struct epitem *epi;
};
/*
* structures and helpers for f_op->poll implementations
*/
typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *);
typedef struct poll_table_struct {
poll_queue_proc qproc;
unsigned long key;
} poll_table;
/* Wait structure used by the poll hooks */
/* 挂载到资源文件监听队列中的钩子结构 */
struct eppoll_entry {
/* List header used to link this structure to the "struct epitem" */
/* 与其关联的epitem进行链接 */
struct list_head llink;
/* The "base" pointer is set to the container "struct epitem" */
/* 指向对应的epitem结构 */
/*
* 既然llink字段已经与对应的epitem结构进行了链接,为什么还需要
* 一个base指针指向对应的epitem???
*/
struct epitem *base;
/*
* Wait queue item that will be linked to the target file wait
* queue head.
*/
/* 挂载到资源文件监听队列的节点 */
wait_queue_t wait;
/* The wait queue head that linked the "wait" wait queue item */
/* 资源监听队列队列头 */
wait_queue_head_t *whead;
};
/* Used by the ep_send_events() function as callback private data */
struct ep_send_events_data {
int maxevents;
struct epoll_event __user *events;
};
调用链:
sys_epoll_create() -> sys_epoll_create1() -> ep_alloc()
-> anon_inode_getfd()
sys_epoll_ctl(EPOLL_CTL_ADD) -> ep_insert() -> f_op->poll() -> poll_wait() -> ep_ptable_queue_proc()
-> ep_rbtree_insert()
-> wake_up
sys_epoll_ctl(EPOLL_CTL_DEL) -> ep_remove() -> ep_unregister_pollwait()
-> ep_erase()
sys_epoll_ctl(EPOLL_CTL_MOD) -> ep_modify() -> f_op->poll()
-> wake_up
sys_epoll_wait() -> ep_poll() -> block
-> ep_send_events() -> ep_scan_ready_list() -> ep_send_events_proc()
-> wake_up
ep_poll_callback() -> wake_up
epoll_create()
SYSCALL_DEFINE1(epoll_create, int, size)
{
if (size <= 0)
return -EINVAL;
/* 调用sys_epoll_create1()执行真正的epoll实例创建 */
return sys_epoll_create1(0);
}
/*
* Open an eventpoll file descriptor.
*/
SYSCALL_DEFINE1(epoll_create1, int, flags)
{
int error;
struct eventpoll *ep = NULL;
/* Check the EPOLL_* constant for consistency. */
BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);
/* 只关心EPOLL_CLOEXEC标志 */
if (flags & ~EPOLL_CLOEXEC)
return -EINVAL;
/*
* Create the internal data structure ("struct eventpoll").
*/
/* 分配并初始化一个eventpoll结构体 */
error = ep_alloc(&ep);
if (error < 0)
return error;
/*
* Creates all the items needed to setup an eventpoll file. That is,
* a file structure and a free file descriptor.
*/
/*
* 从anon_inode_mnt文件系统中分配一个(inode, dentry, file)三元组,然后
* 将file映射到文件描述符并安装到当前进程的文件描述符表fdtable中
*
* anon_inode_mnt文件系统不存在磁盘映像,类似于socket没有一个真实的磁盘
* 文件与其对应一样。从这个匿名文件系统中分配的文件主要用于将资源映射到
* 文件描述符...
*
* 分配file结构之后将eventpoll挂载到它的private_data成员上,以便能够通
* 过文件描述符获得这个eventpoll
*
* file支持的操作由eventpoll_fops指出,可以看到它只支持release与poll,
* 其中release()在file析构时析构并释放掉挂载到其上的eventpoll结构
*/
error = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep,
O_RDWR | (flags & O_CLOEXEC));
if (error < 0)
ep_free(ep);
return error;
}
static int ep_alloc(struct eventpoll **pep)
{
int error;
struct user_struct *user;
struct eventpoll *ep;
/* 获取当前用户上下文的用户信息 */
user = get_current_user();
error = -ENOMEM;
/* 调用kmalloc,分配一个eventpoll结构体的空间 */
ep = kzalloc(sizeof(*ep), GFP_KERNEL);
if (unlikely(!ep))
goto free_uid;
/* 初始化 */
spin_lock_init(&ep->lock);
mutex_init(&ep->mtx);
init_waitqueue_head(&ep->wq);
init_waitqueue_head(&ep->poll_wait);
INIT_LIST_HEAD(&ep->rdllist);
/* 一颗空的红黑树 */
ep->rbr = RB_ROOT;
/* 注意 */
ep->ovflist = EP_UNACTIVE_PTR;
ep->user = user;
*pep = ep;
return 0;
free_uid:
free_uid(user);
return error;
}
epoll_ctl()
/*
* The following function implements the controller interface for
* the eventpoll file that enables the insertion/removal/change of
* file descriptors inside the interest set.
*/
SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
struct epoll_event __user *, event)
{
int error;
struct file *file, *tfile;
struct eventpoll *ep;
struct epitem *epi;
struct epoll_event epds;
error = -EFAULT;
/* 参数验证,并将epoll_event从用户空间拷贝到内核空间 */
if (ep_op_has_event(op) &&
copy_from_user(&epds, event, sizeof(struct epoll_event)))
goto error_return;
/* Get the "struct file *" for the eventpoll file */
error = -EBADF;
/* 获取eventpoll文件描述符对应的struct file结构 */
file = fget(epfd);
if (!file)
goto error_return;
/* Get the "struct file *" for the target file */
/* 获取需要被监听的文件描述符对应的struct file结构 */
tfile = fget(fd);
if (!tfile)
goto error_fput;
/* The target file descriptor must support poll */
error = -EPERM;
/* 需要被监听的文件必须支持poll() */
if (!tfile->f_op || !tfile->f_op->poll)
goto error_tgt_fput;
/*
* We have to check that the file structure underneath the file descriptor
* the user passed to us _is_ an eventpoll file. And also we do not permit
* adding an epoll file descriptor inside itself.
*/
error = -EINVAL;
/*
* 1. epoll实例不能监听自己,不然事件发生的时候会形成通知死循环...
* 2. 验证epfd指向的文件是否是epoll文件,其实内核好多文件验证都是
* 根据文件的操作集来判断的...
*/
if (file == tfile || !is_file_epoll(file))
goto error_tgt_fput;
/*
* At this point it is safe to assume that the "private_data" contains
* our own data structure.
*/
/* 取出挂载到epoll文件中的eventpoll */
ep = file->private_data;
/* mutex加锁:保护epitem,防止持有epitem的时候,它被异步删除 */
mutex_lock(&ep->mtx);
/*
* Try to lookup the file inside our RB tree, Since we grabbed "mtx"
* above, we can be sure to be able to use the item looked up by
* ep_find() till we release the mutex.
*/
/*
* eventpoll用一颗红黑树来存储监听事件epitem,
* 并且以(file, fd)二元组作为key
*
* ep_find()执行红黑树的二叉搜索,寻找(file, fd)对应的监听事件epitem
*/
epi = ep_find(ep, tfile, fd);
error = -EINVAL;
/* 执行具体操作op */
/* 注意哦:ep_insert()、ep_remove()、ep_modify()函数调用链都在mtx锁之下 */
switch (op) {
case EPOLL_CTL_ADD:
if (!epi) {
/* epoll_wait()总是监听POLLERR和POLLHUP */
epds.events |= POLLERR | POLLHUP;
error = ep_insert(ep, &epds, tfile, fd);
} else
error = -EEXIST;
break;
case EPOLL_CTL_DEL:
if (epi)
error = ep_remove(ep, epi);
else
error = -ENOENT;
break;
case EPOLL_CTL_MOD:
if (epi) {
/* epoll_wait()总是监听POLLERR和POLLHUP */
epds.events |= POLLERR | POLLHUP;
error = ep_modify(ep, epi, &epds);
} else
error = -ENOENT;
break;
}
mutex_unlock(&ep->mtx);
error_tgt_fput:
fput(tfile);
error_fput:
fput(file);
error_return:
return error;
}
/*
* Search the file inside the eventpoll tree. The RB tree operations
* are protected by the "mtx" mutex, and ep_find() must be called with
* "mtx" held.
*/
static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
{
int kcmp;
struct rb_node *rbp;
struct epitem *epi, *epir = NULL;
struct epoll_filefd ffd;
/* 使用epoll_filefd结构体封装(file, fd)二元组而形成key */
ep_set_ffd(&ffd, file, fd);
/* 二叉搜索,寻找监听事件epitem */
for (rbp = ep->rbr.rb_node; rbp; ) {
epi = rb_entry(rbp, struct epitem, rbn);
kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
if (kcmp > 0)
rbp = rbp->rb_right;
else if (kcmp < 0)
rbp = rbp->rb_left;
else {
epir = epi;
break;
}
}
return epir;
}
/*
* Must be called with "mtx" held.
*/
static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
struct file *tfile, int fd)
{
int error, revents, pwake = 0;
unsigned long flags;
struct epitem *epi;
struct ep_pqueue epq;
/* 用户资源限制验证 */
if (unlikely(atomic_read(&ep->user->epoll_watches) >=
max_user_watches))
return -ENOSPC;
/* 从slab中分配一个epitem */
if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
return -ENOMEM;
/* Item initialization follow here ... */
/* 初始化刚刚分配的epitem */
INIT_LIST_HEAD(&epi->rdllink);
INIT_LIST_HEAD(&epi->fllink);
INIT_LIST_HEAD(&epi->pwqlist);
epi->ep = ep;
ep_set_ffd(&epi->ffd, tfile, fd);
epi->event = *event;
epi->nwait = 0;
/* 注意 */
epi->next = EP_UNACTIVE_PTR;
/* Initialize the poll table using the queue callback */
/*
* 注意:
* epitem与poll_table被封装在了一个结构体中,以便之后向
* 资源注册监听的时候,能够用poll_table得到对应的epitem
*/
/* 将epitem挂载到这个ep_pqueue结构体中 */
epq.epi = epi;
/*
* 初始化ep_pqueue中的poll_table:
* 1. 设置监听注册函数为ep_ptable_queue_proc
* 2. 设置想要监听的事件为所有事件
*
* 小心,很多博客甚至源码原注释都将监听注册函数叫做回调函数...
* 其实它根本就没有任何信息回调,所以别被误导...
*/
init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
/*
* Attach the item to the poll hooks and get current event bits.
* We can safely use the file* here because its usage count has
* been increased by the caller of this function. Note that after
* this operation completes, the poll callback can start hitting
* the new item.
*/
/* NOTE THAT:
* 为了更舒服的阅读,这里的细节完全没必要了解...
* 只需要知道这个调用最终做了什么事情就行...
*/
/*
* 只有管道、套接字这些特殊设备文件才支持poll(),而在ext2/ext3/ext4
* 这些块设备上的文件不支持poll(),因为块设备文件不支持阻塞读啊...
* 有数据就返回数据,没有数据就返回0表示end-of-file...
*
* 我们以ipv4_tcp套接字举例:
* 1. sys_socketcall() -> sys_socket() -> sock_create() ->
* __sock_create() -> net_families[PF_INET]->create() ==>
* inet_create(): socket->ops = &inet_stream_ops
* 2. sys_socketcall() -> sys_socket() -> sock_map_fd() ->
* sock_alloc_file() -> alloc_file():
* file->f_op = &socket_file_ops
* 当使用socket(PF_INET, SOCK_STREAM, 0)创建套接字时,根据协议类型最终
* 设置socket的操作集ops为tcp_stream_ops,其中poll ==> tcp_poll,
* 在之后将socket与文件进行关联时,设置文件操作集f_op为socket_file_ops,
* 其中poll ==> sock_poll
*
* 3. [下面的代码] tfile->f_op->poll() ==>
* socket_file_ops.poll() ==> sock_poll() ->
* socket->ops->poll() ==> tcp_poll() ->
* sock_poll_wait() -> poll_wait()
* 当我们对socket对应的文件进行poll()时,会调用socket特定的poll()操作,
* 也就是以第3点所示的调用链那样最终调用poll_wait()
*
* 4. [下面的代码] poll_wait() -> epq.pt.qproc() ==>
* ep_ptable_queue_proc()
* 在poll_wait()中会调用我们传给它的poll_table中的proc函数,也就是我们
* 上一步在init_poll_funcptr()中设置的ep_ptable_queue_proc函数
*
* 所以说了这么多,也就第4步是关键...
* 内核被设计得这么复杂的原因是为了能有更好的扩展性...
*/
/*
* 最终做的事:
* 就是将eventpoll中的监听事件epitem通过eppoll_entry的封装挂载到资源文件
* 的监听队列。之后资源文件事件就绪,就会调用队列中所有节点的回调函数,
* 从而通知监听者...
*/
/*
* f_op->poll()还会返回文件当前的文件状态
*/
revents = tfile->f_op->poll(tfile, &epq.pt);
/*
* We have to check if something went wrong during the poll wait queue
* install process. Namely an allocation for a wait queue failed due
* high memory pressure.
*/
error = -ENOMEM;
if (epi->nwait < 0)
goto error_unregister;
/* Add the current item to the list of active epoll hook for this file */
/* spinlock加锁:保护struct file的访问 */
spin_lock(&tfile->f_lock);
/*
* 将epitem与它需要监听的文件链接起来
* struct file结构中的f_ep_links字段链接了所有需要监听它的epitem
*/
list_add_tail(&epi->fllink, &tfile->f_ep_links);
spin_unlock(&tfile->f_lock);
/*
* Add the current item to the RB tree. All RB tree operations are
* protected by "mtx", and ep_insert() is called with "mtx" held.
*/
/* 将epitem添加到eventpoll的红黑树当中 */
/*
* 来看一下为什么不需要ep->lock加锁:
* 红黑树节点增删操作:
* 1. epoll_ctl() -> ep_insert()
* 2. epoll_tcl() -> ep_remove()
* 3. eventpoll_release_file() -> ep_remove()
* 这三个函数在修改红黑树前都加了ep->mtx锁,所以不必再加ep->lock锁
*/
ep_rbtree_insert(ep, epi);
/* We have to drop the new item inside our item list to keep track of it */
/* spinlock加锁:保护eventpoll的访问 */
spin_lock_irqsave(&ep->lock, flags);
/* If the file is already "ready" we drop it inside the ready list */
/*
* 如果资源文件的当前状态revents中已经有了我们所关心的events的话,
* 就将当前epitem链接到eventpoll就绪队列
*/
/*
* epitem可能已经被异步ep_poll_callback()调用添加到了eventpoll中的
* 就绪队列里...这就是为什么需要!ep_is_linked(&epi->rdlink)的原因
*/
if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) {
list_add_tail(&epi->rdllink, &ep->rdllist);
/* Notify waiting tasks that events are available */
/* 唤醒epoll_wait()当前epoll实例的用户 */
if (waitqueue_active(&ep->wq))
wake_up_locked(&ep->wq);
/* 当前epoll文件已就绪 */
if (waitqueue_active(&ep->poll_wait))
pwake++;
}
spin_unlock_irqrestore(&ep->lock, flags);
/* 更新当前用户的监听事件数量 */
atomic_inc(&ep->user->epoll_watches);
/* We have to call this outside the lock */
if (pwake)
ep_poll_safewake(&ep->poll_wait);
return 0;
error_unregister:
ep_unregister_pollwait(ep, epi);
/*
* We need to do this because an event could have been arrived on some
* allocated wait queue. Note that we don't care about the ep->ovflist
* list, since that is used/cleaned only inside a section bound by "mtx".
* And ep_insert() is called with "mtx" held.
*/
spin_lock_irqsave(&ep->lock, flags);
if (ep_is_linked(&epi->rdllink))
list_del_init(&epi->rdllink);
spin_unlock_irqrestore(&ep->lock, flags);
kmem_cache_free(epi_cache, epi);
return error;
}
static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
{
if (p && wait_address)
p->qproc(filp, wait_address, p);
}
/*
* This is the callback that is used to add our wait queue to the
* target file wakeup lists.
*/
/**
* ep_ptable_queue_proc - 将epitem挂载到资源文件的监听队列
* @file: 被监听的资源文件
* @whead: 被监听的资源文件的等待队列头
* @pt: 在ep_insert()中设置的poll_tbale
*/
static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
poll_table *pt)
{
/* 获取epitem */
struct epitem *epi = ep_item_from_epqueue(pt);
struct eppoll_entry *pwq;
/* 从slab分配一个eppoll_entry结构,然后进行相应的初始化 */
if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) {
/*
* 初始化一个等待队列节点,其中唤醒函数设置为ep_poll_callback
*
* 重点!!!:
* 唤醒回调函数为ep_poll_callback!!!
*/
init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
/* 还要保存资源文件监听队列的队列头whead */
pwq->whead = whead;
pwq->base = epi;
/* 将eppoll_entry挂载到资源文件的监听队列中 */
add_wait_queue(whead, &pwq->wait);
/*
* 将eppoll_entry与对应的epitem进行关联...
* 虽然使用链表进行的链接,但是epitem与eppoll_entry是1:1关系
*/
/* FIXME:可能我对这里有误解,欢迎大家指正 */
list_add_tail(&pwq->llink, &epi->pwqlist);
/* 增加等待计数 */
epi->nwait++;
} else {
/* We have to signal that an error occurred */
epi->nwait = -1;
}
}
/*
* Removes a "struct epitem" from the eventpoll RB tree and deallocates
* all the associated resources. Must be called with "mtx" held.
*/
static int ep_remove(struct eventpoll *ep, struct epitem *epi)
{
unsigned long flags;
struct file *file = epi->ffd.file;
/*
* Removes poll wait queue hooks. We _have_ to do this without holding
* the "ep->lock" otherwise a deadlock might occur. This because of the
* sequence of the lock acquisition. Here we do "ep->lock" then the wait
* queue head lock when unregistering the wait queue. The wakeup callback
* will run by holding the wait queue head lock and will call our callback
* that will try to get "ep->lock".
*/
/* 卸载epitem在资源文件上的监听 */
ep_unregister_pollwait(ep, epi);
/* Remove the current item from the list of epoll hooks */
/* spinlock加锁:保护struct file的访问 */
spin_lock(&file->f_lock);
/* 将epitem与所监听的文件解除关联 */
if (ep_is_linked(&epi->fllink))
list_del_init(&epi->fllink);
spin_unlock(&file->f_lock);
/* 从eventpoll的红黑树中删除节点,不需要ep->lock加锁 */
rb_erase(&epi->rbn, &ep->rbr);
/* spinlock加锁:保护eventpoll的访问 */
spin_lock_irqsave(&ep->lock, flags);
/* 将epitem从eventpoll中的就绪队列中卸载 */
/*
* epitem挂载在ep->ovflist只能出现在epoll_wait() -> ep_poll()
* -> ep_scan_ready_list()中的ep->mtx临界区内,所以这里不用判断
* epi->next != NULL
*/
if (ep_is_linked(&epi->rdllink))
list_del_init(&epi->rdllink);
spin_unlock_irqrestore(&ep->lock, flags);
/* At this point it is safe to free the eventpoll item */
/* 释放节点 */
kmem_cache_free(epi_cache, epi);
/* 更新用户的监听事件数量 */
atomic_dec(&ep->user->epoll_watches);
return 0;
}
/*
* This function unregisters poll callbacks from the associated file
* descriptor. Must be called with "mtx" held (or "epmutex" if called from
* ep_free).
*/
/*
* 卸载监听事件:从资源文件的监听队列中删除、释放epitem关联的eppoll_entry
*/
static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
{
/* epi->pwdlist将epitem与对应的eppoll_entry进行了关联 */
struct list_head *lsthead = &epi->pwqlist;
struct eppoll_entry *pwq;
while (!list_empty(lsthead)) {
/* 获取eppoll_entry结构 */
pwq = list_first_entry(lsthead, struct eppoll_entry, llink);
/* 将eppoll_entry与epitem解除关联 */
list_del(&pwq->llink);
/* 从资源文件的监听队列中卸载 */
remove_wait_queue(pwq->whead, &pwq->wait);
/* 释放节点 */
kmem_cache_free(pwq_cache, pwq);
}
}
/*
* Modify the interest event mask by dropping an event if the new mask
* has a match in the current file status. Must be called with "mtx" held.
*/
static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event)
{
int pwake = 0;
unsigned int revents;
/*
* Set the new event interest mask before calling f_op->poll();
* otherwise we might miss an event that happens between the
* f_op->poll() call and the new event set registering.
*/
/* 修改epoll_event */
epi->event.events = event->events;
epi->event.data = event->data; /* protected by mtx */
/*
* Get current event bits. We can safely use the file* here because
* its usage count has been increased by the caller of this function.
*/
/*
* 因为修改了监听的events,因此需要重新获得资源的当前状态,然后判断资源的
* 当前状态revents中是否包含了我们新关心的events
*/
revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL);
/*
* If the item is "hot" and it is not registered inside the ready
* list, push it inside.
*/
/* 如果资源的当前状态包含了我们新关心的events,就绪,并唤醒相应用户 */
if (revents & event->events) {
/* spinlock加锁:保护eventpoll的访问 */
spin_lock_irq(&ep->lock);
/*
* epitem可能已经被异步ep_poll_callback()调用添加到了eventpoll中的
* 就绪队列里...这就是为什么需要!ep_is_linked(&epi->rdlink)的原因
*/
if (!ep_is_linked(&epi->rdllink)) {
list_add_tail(&epi->rdllink, &ep->rdllist);
/* Notify waiting tasks that events are available */
if (waitqueue_active(&ep->wq))
wake_up_locked(&ep->wq);
if (waitqueue_active(&ep->poll_wait))
pwake++;
}
spin_unlock_irq(&ep->lock);
}
/* We have to call this outside the lock */
if (pwake)
ep_poll_safewake(&ep->poll_wait);
return 0;
}
epoll_wait()
/*
* Implement the event wait interface for the eventpoll file. It is the kernel
* part of the user space epoll_wait(2).
*/
SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,
int, maxevents, int, timeout)
{
int error;
struct file *file;
struct eventpoll *ep;
/* The maximum number of event must be greater than zero */
/* 参数验证 */
if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
return -EINVAL;
/* Verify that the area passed by the user is writeable */
/* 验证events数组区域,当前用户是否能够访问 */
if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event))) {
error = -EFAULT;
goto error_return;
}
/* Get the "struct file *" for the eventpoll file */
error = -EBADF;
/* 获取eventpoll文件描述符对应的struct file结构 */
file = fget(epfd);
if (!file)
goto error_return;
/*
* We have to check that the file structure underneath the fd
* the user passed to us _is_ an eventpoll file.
*/
error = -EINVAL;
/* 验证epfd指向的文件是否是epoll文件 */
if (!is_file_epoll(file))
goto error_fput;
/*
* At this point it is safe to assume that the "private_data" contains
* our own data structure.
*/
/* 取出挂载到epoll文件中的eventpoll */
ep = file->private_data;
/* Time to fish for events ... */
/* 调用ep_poll()等待事件的到来 */
error = ep_poll(ep, events, maxevents, timeout);
error_fput:
fput(file);
error_return:
return error;
}
/*
* 唤醒发生在:
* 1. ep_insert()
* 2. ep_modify()
* 3. ep_poll_callback()
* 3. ep_poll() -> ep_send_events() -> ep_scan_ready_list()
*/
static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
int maxevents, long timeout)
{
int res, eavail;
unsigned long flags;
long jtimeout;
wait_queue_t wait;
/*
* Calculate the timeout by checking for the "infinite" value (-1)
* and the overflow condition. The passed timeout is in milliseconds,
* that why (t * HZ) / 1000.
*/
/* 处理睡眠时间:将毫秒数转化为HZ */
jtimeout = (timeout < 0 || timeout >= EP_MAX_MSTIMEO) ?
MAX_SCHEDULE_TIMEOUT : (timeout * HZ + 999) / 1000;
retry:
/* spinlock加锁:保护eventpoll的访问 */
spin_lock_irqsave(&ep->lock, flags);
res = 0;
/* 就绪队列为空,说明还没有任何events就绪 */
if (list_empty(&ep->rdllist)) {
/*
* We don't have any available event to return to the caller.
* We need to sleep here, and we will be wake up by
* ep_poll_callback() when events will become available.
*/
/* 初始化等待队列节点,设置等待状态为互斥等待 */
init_waitqueue_entry(&wait, current);
wait.flags |= WQ_FLAG_EXCLUSIVE;
/* 将刚刚初始化的等待队列节点挂载到eventpoll中的等待队列 */
__add_wait_queue(&ep->wq, &wait);
for (;;) {
/*
* We don't want to sleep if the ep_poll_callback() sends us
* a wakeup in between. That's why we set the task state
* to TASK_INTERRUPTIBLE before doing the checks.
*/
/* 设置程序运行状态为可中断阻塞,因为我们希望能够接收到
* ep_insert()、ep_modify()、ep_poll_callback()的唤醒
*/
set_current_state(TASK_INTERRUPTIBLE);
/* events就绪或者超时,跳出循环 */
if (!list_empty(&ep->rdllist) || !jtimeout)
break;
/* 出现未决信号,设置返回值为-EINTR并跳出循环 */
if (signal_pending(current)) {
res = -EINTR;
break;
}
spin_unlock_irqrestore(&ep->lock, flags);
/* 休眠...等待超时或者被就绪资源唤醒 */
jtimeout = schedule_timeout(jtimeout);
spin_lock_irqsave(&ep->lock, flags);
}
/* 从等待队列中卸载 */
__remove_wait_queue(&ep->wq, &wait);
/* 恢复程序运行状态 */
set_current_state(TASK_RUNNING);
}
/* Is it worth to try to dig for events ? */
/* 判断是否有资源就绪 */
eavail = !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR;
spin_unlock_irqrestore(&ep->lock, flags);
/*
* Try to transfer events to user space. In case we get 0 events and
* there's still timeout left over, we go trying again in search of
* more luck.
*/
/*
* 如果没有发生中断(!res)、有资源就绪(eavail),我们就将就绪的events向用户空间
* 交付(ep_send_events())
* 如果ep_send_events()向用户交付的事件数为0,并且还有超时时间剩余(jtimeout),
* 那么我们retry,期待不要空手而归...
*/
if (!res && eavail &&
!(res = ep_send_events(ep, events, maxevents)) && jtimeout)
goto retry;
return res;
}
static int ep_send_events(struct eventpoll *ep,
struct epoll_event __user *events, int maxevents)
{
struct ep_send_events_data esed;
/* 注意:将events数组与event最大接受数maxevents封装到了一起 */
esed.maxevents = maxevents;
esed.events = events;
/* 注意:events交付例程指定为ep_send_events_proc */
return ep_scan_ready_list(ep, ep_send_events_proc, &esed);
}
/**
* ep_scan_ready_list - Scans the ready list in a way that makes possible for
* the scan code, to call f_op->poll(). Also allows for
* O(NumReady) performance.
*
* @ep: Pointer to the epoll private data structure.
* @sproc: Pointer to the scan callback.
* @priv: Private opaque data passed to the @sproc callback.
*
* Returns: The same integer error code returned by the @sproc callback.
*/
static int ep_scan_ready_list(struct eventpoll *ep,
int (*sproc)(struct eventpoll *,
struct list_head *, void *),
void *priv)
{
int error, pwake = 0;
unsigned long flags;
struct epitem *epi, *nepi;
/* 初始化一个链表 */
LIST_HEAD(txlist);
/*
* We need to lock this because we could be hit by
* eventpoll_release_file() and epoll_ctl().
*/
/* mutex加锁 */
mutex_lock(&ep->mtx);
/*
* Steal the ready list, and re-init the original one to the
* empty list. Also, set ep->ovflist to NULL so that events
* happening while looping w/out locks, are not lost. We cannot
* have the poll callback to queue directly on ep->rdllist,
* because we want the "sproc" callback to be able to do it
* in a lockless way.
*/
/* spinlock加锁:保护eventpoll的访问 */
spin_lock_irqsave(&ep->lock, flags);
/*
* 将eventpoll就绪队列中的所有节点全部splice到链表txlist上,
* 之后eventpoll就绪队列为空
*/
list_splice_init(&ep->rdllist, &txlist);
/* 设置eventpoll.ovflist,使得接下来新就绪的events被挂载到
* eventpoll.ovflist而不是就绪队列 */
ep->ovflist = NULL;
spin_unlock_irqrestore(&ep->lock, flags);
/*
* Now call the callback function.
*/
/*
* sproc ==> ep_send_events_proc
* priv封装了events数组与events最大接受数maxevents
*
* 注意:ep_send_events_proc()只在ep->mtx临界区内
*/
error = (*sproc)(ep, &txlist, priv);
/* spinlock加锁:保护eventpoll的访问 */
spin_lock_irqsave(&ep->lock, flags);
/*
* During the time we spent inside the "sproc" callback, some
* other events might have been queued by the poll callback.
* We re-insert them inside the main ready-list here.
*/
/*
* 我们在调用ep_send_events_proc()将就绪队列中的事件交付
* 给用户的期间,新就绪的events被挂载到eventpoll.ovflist
* 所以我们需要遍历eventpoll.ovflist将所有已就绪的epitem
* 重新挂载到就绪队列中,等待下一次epoll_wait()进行交付...
*/
for (nepi = ep->ovflist; (epi = nepi) != NULL;
nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {
/*
* We need to check if the item is already in the list.
* During the "sproc" callback execution time, items are
* queued into ->ovflist but the "txlist" might already
* contain them, and the list_splice() below takes care of them.
*/
/* ep_is_linked(&epi->rdlink)的原因见上面的原注释... */
if (!ep_is_linked(&epi->rdllink))
list_add_tail(&epi->rdllink, &ep->rdllist);
}
/*
* We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after
* releasing the lock, events will be queued in the normal way inside
* ep->rdllist.
*/
/* 恢复eventpoll.ovflist,使得接下来新就绪的events被挂载到就绪队列
* 而不是ovflist */
ep->ovflist = EP_UNACTIVE_PTR;
/*
* Quickly re-inject items left on "txlist".
*/
/* 将调用ep_send_events_proc()之后剩余的未交付的epitem重新splice到
* eventpoll的就绪队列上 */
list_splice(&txlist, &ep->rdllist);
/*
* 注意到epoll_wait()中,将wait_queue_t的等待状态设置为互斥等待,因此
* 每次被唤醒的只有一个节点。现在我们已经将eventpoll中就绪队列里的事件
* 尽量向用户交付了,但是在交付时,可能没有交付完全(1.交付过程中出现了
* 错误 2.使用了LT模式),也有可能在过程中又发生了新的事件。也就是这次
* epoll_wait()调用后,还剩下一些就绪资源,那么我们再次唤醒一个等待节点
* 让别的用户也享用一下资源...
*
* 从这里已经可以看出内核对于epoll惊群的解决方案:ET模式:
* 1. 每次只唤醒一个节点
* 2. 事件交付后不再将事件重新挂载到就绪队列
*/
if (!list_empty(&ep->rdllist)) {
/*
* Wake up (if active) both the eventpoll wait list and
* the ->poll() wait list (delayed after we release the lock).
*/
/* 唤醒epoll_wait()当前epoll实例的用户 */
if (waitqueue_active(&ep->wq))
wake_up_locked(&ep->wq);
/* 当前epoll文件已就绪 */
if (waitqueue_active(&ep->poll_wait))
pwake++;
}
spin_unlock_irqrestore(&ep->lock, flags);
mutex_unlock(&ep->mtx);
/* We have to call this outside the lock */
if (pwake)
ep_poll_safewake(&ep->poll_wait);
return error;
}
static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
void *priv)
{
struct ep_send_events_data *esed = priv;
int eventcnt;
unsigned int revents;
struct epitem *epi;
struct epoll_event __user *uevent;
/*
* We can loop without lock because we are passed a task private list.
* Items cannot vanish during the loop because ep_scan_ready_list() is
* holding "mtx" during this call.
*/
/*
* 遍历head就绪队列
*
* eventcnt记录已交付的events的数量
* uevent指向esed中封装的events数组,这个数组用于将已就绪events返回给用户
*/
for (eventcnt = 0, uevent = esed->events;
!list_empty(head) && eventcnt < esed->maxevents;) {
epi = list_first_entry(head, struct epitem, rdllink);
/* 将epitem从head就绪队列中卸载 */
list_del_init(&epi->rdllink);
/* 从资源文件当前状态中提取出我们所关心的events */
revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL) &
epi->event.events;
/*
* If the event mask intersect the caller-requested one,
* deliver the event to userspace. Again, ep_scan_ready_list()
* is holding "mtx", so no operations coming from userspace
* can change the item.
*/
/* 如果有我们所关心的events发生 */
if (revents) {
/*
* 将events复制到用户空间
*
* 若复制失败,那么就将该epitem重新添加到head就绪队列首,然后
* 返回已交付的events的数量,调用者 ==> ep_scan_ready_list()
* 会重新将head就绪队列splice到eventpoll的就绪队列上,等待下次
* epoll_wait()->ep_poll()->ep_send_events()进行交付...
*/
if (__put_user(revents, &uevent->events) ||
__put_user(epi->event.data, &uevent->data)) {
/* 复制失败了... */
list_add(&epi->rdllink, head);
return eventcnt ? eventcnt : -EFAULT;
}
/* 更新已交付的event的数量 */
eventcnt++;
/* 指向events数组中的下一元素 */
uevent++;
if (epi->event.events & EPOLLONESHOT)
epi->event.events &= EP_PRIVATE_BITS;
else if (!(epi->event.events & EPOLLET)) {
/*
* If this file has been added with Level
* Trigger mode, we need to insert back inside
* the ready list, so that the next call to
* epoll_wait() will check again the events
* availability. At this point, noone can insert
* into ep->rdllist besides us. The epoll_ctl()
* callers are locked out by
* ep_scan_ready_list() holding "mtx" and the
* poll callback will queue them in ep->ovflist.
*/
/*
* LT模式:只要资源满足某种状态,就向用户交付该events
* ET模式:只有资源状态发生改变时,才向用户交付events
*
* 如果是LT模式,那么每次向用户交付events之后,再次把该epitem
* 挂载到eventpoll中的就绪队列上,下一次epoll_wait()时不休眠
* 直接进入到ep_send_events_proc()中来,通过获取资源文件的最新
* 状态然后与我们关心的events比较:
* 1. 如果资源状态还是满足我们关心的events(可能是资源又就绪了,
* 也有可能是上次就绪的资源未消费完),那么还是把它重新挂载
* 到就绪队列并再次交付;
* 2. 如果不再满足我们关心的events(上一次的就绪资源已经消费完
* 并且还没有再次就绪),那么将它从就绪队列上卸载之后可就不
* 再重新挂载了...
*
* 关于第2点,有博客讲可能会使这次epoll_wait()返回0空转一次,
* 然而通过程序测试,发现并没有...让我们跟踪一下内核...
*
* e.g.
* 假设我们的epoll实例中只监听了一个listen套接字,并且现在只来了
* 一个连接,那么epoll_wait()被唤醒然后向用户交付这个事件,然后
* 又把这个事件epitem重新挂载到了就绪队列,最后返回到用户空间...
* 第二次epoll_wait()无休眠第一次进入到ep_send_events_proc()中来,
* 然后出现了上述第2点描述的情况。因为eventpoll中只有一个节点,
* 所以就绪队列遍历完毕,eventcnt为0,然后回退ep_send_events_proc()
* -> ep_scan_ready_list() -> ep_send_events() -> ep_poll()
* 哈哈!!现在可以去理解ep_poll()最后的注释了...
*/
list_add_tail(&epi->rdllink, &ep->rdllist);
}
}
}
return eventcnt;
}
ep_poll_callback()
/*
* This is the callback that is passed to the wait queue wakeup
* machanism. It is called by the stored file descriptors when they
* have events to report.
*/
/**
* ep_poll_callback - 唤醒回调函数,这个函数将就绪的epitem链接到所属eventpoll中的
* 就绪队列,并唤醒监听者
* @wait: eppoll_entry.wait
* @mode:
* @key: 携带资源当前状态
*/
static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key)
{
int pwake = 0;
unsigned long flags;
/* 通过eppoll_entry中的wait获取对应的epitem */
struct epitem *epi = ep_item_from_wait(wait);
/* 获取epitem所属的eventpoll */
struct eventpoll *ep = epi->ep;
/* spinlock加锁:保护eventpoll的访问 */
/*
* 注意:ep_poll_callback()中只用了spinlock,因为这个回调函数在资源就绪时,由
* 资源的中断处理程序所调用,而中断处理程序中不允许休眠,所以这里面的同步不能
* 使用可休眠锁mutex
*
* 因为没有ep->mtx加锁,所以感觉这里应该可能出现竞争条件,在ep_item_from_wait()
* 获取epitem之后,这个epitem可能被异步删除...
*/
spin_lock_irqsave(&ep->lock, flags);
/*
* If the event mask does not contain any poll(2) event, we consider the
* descriptor to be disabled. This condition is likely the effect of the
* EPOLLONESHOT bit that disables the descriptor when an event is received,
* until the next EPOLL_CTL_MOD will be issued.
*/
/* 如果我们想要监听的事件events为空,那么资源文件就绪时,nothing to do */
if (!(epi->event.events & ~EP_PRIVATE_BITS))
goto out_unlock;
/*
* Check the events coming with the callback. At this stage, not
* every device reports the events in the "key" parameter of the
* callback. We need to be able to handle both cases here, hence the
* test for "key" != NULL before the event match test.
*/
/* 判断文件当前状态key中有没有我们关心的事件events */
if (key && !((unsigned long) key & epi->event.events))
goto out_unlock;
/*
* If we are trasfering events to userspace, we can hold no locks
* (because we're accessing user memory, and because of linux f_op->poll()
* semantics). All the events that happens during that period of time are
* chained in ep->ovflist and requeued later on.
*/
/*
* 异步调用ep_send_events_proc()将就绪队列中的事件交付给
* 用户的期间(也就是ep->ovflist != EP_UNACTIVE_PTR时),
* 新就绪的events应该被挂载到eventpoll.ovflist
*/
/* FIXME:查了很多资料,没有查到到ovflist的具体作用,我认为ovflist完全是
* 冗余的设计...欢迎指正... */
if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) {
if (epi->next == EP_UNACTIVE_PTR) {
epi->next = ep->ovflist;
ep->ovflist = epi;
}
goto out_unlock;
}
/* If this file is already in the ready list we exit soon */
/* 如果epitem没有被挂载到所属eventpoll中的就绪队列,就将其添加到就绪队列尾 */
/*
* 如果一个就绪事件被挂载到eventpoll中的就绪队列又没有被处理并卸载,那么当事件
* 再次就绪时不用再次挂载...这就是为什么需要!ep_is_linked(&epi->rdlink)的原因
*/
if (!ep_is_linked(&epi->rdllink))
list_add_tail(&epi->rdllink, &ep->rdllist);
/*
* Wake up ( if active ) both the eventpoll wait list and the ->poll()
* wait list.
*/
/* 唤醒epoll_wait()当前epoll实例的用户 */
if (waitqueue_active(&ep->wq))
wake_up_locked(&ep->wq);
/* 当前epoll文件已就绪 */
if (waitqueue_active(&ep->poll_wait))
pwake++;
out_unlock:
spin_unlock_irqrestore(&ep->lock, flags);
/* We have to call this outside the lock */
if (pwake)
ep_poll_safewake(&ep->poll_wait);
return 1;
}
验证ET模式解决epoll惊群
// server.c
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#define NR_THREAD 5
int listenfd, epollfd;
static void *thrd_func(void *args)
{
int connfd, retval;
socklen_t addrlen;
struct sockaddr_in cliaddr;
struct epoll_event revent;
if ((retval = epoll_wait(epollfd, &revent, 1, -1)) == -1)
err(-1, "thread: %ld: epoll_wait: %d", (long)pthread_self(), __LINE__);
fprintf(stderr, "thread: %ld: epoll_wait() return %d\n", (long)pthread_self(), retval);
addrlen = sizeof(struct sockaddr_in);
while (accept(listenfd, (struct sockaddr *)&cliaddr, &addrlen) == -1) {
if (errno == EAGAIN) {
warn("thread: %ld: accept: %d", (long)pthread_self(), __LINE__);
sleep(1);
continue;
}
err(-1, "thread: %ld: epoll_wait: %d", (long)pthread_self(), __LINE__);
}
fprintf(stderr, "thread: %ld: accept a connection: %s:%d\n", (long)pthread_self(),
inet_ntoa(cliaddr.sin_addr), ntohs(cliaddr.sin_port));
pthread_exit(NULL);
}
int main(int argc, char *argv[])
{
int i;
pthread_t threads[NR_THREAD];
struct sockaddr_in servaddr;
struct epoll_event ev;
if ((listenfd = socket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0)) == -1)
err(-1, "socket: %d", __LINE__);
memset(&servaddr, 0, sizeof(struct sockaddr_in));
servaddr.sin_family = AF_INET;
servaddr.sin_addr.s_addr = htonl(INADDR_ANY);
servaddr.sin_port = htons(10240);
if (bind(listenfd, (struct sockaddr *)&servaddr, sizeof(struct sockaddr_in)) == -1)
err(-1, "bind: %d", __LINE__);
if (listen(listenfd, 0) == -1)
err(-1, "listen: %d", __LINE__);
if ((epollfd = epoll_create1(0)) == -1)
err(-1, "epoll_create1: %d", __LINE__);
ev.events = EPOLLIN;
#ifdef ET
ev.events |= EPOLLET;
#endif
ev.data.fd = listenfd;
if (epoll_ctl(epollfd, EPOLL_CTL_ADD, listenfd, &ev) == -1)
err(-1, "epoll_ctl: %d", __LINE__);
for (i = 0; i != NR_THREAD; ++i) {
if ((errno = pthread_create(&threads[i], NULL, &thrd_func, NULL)) != 0)
err(-1, "pthread_create: %d", __LINE__);
}
for (i = 0; i != NR_THREAD; ++i) {
if ((errno = pthread_join(threads[i], NULL)) != 0)
err(-1, "pthread_join: %d", __LINE__);
}
return 0;
}
server创建NR_THREAD个线程执行epoll_wait()监听listen套接字。这个程序没有任何实际意义,这种并发模型完全可以通过阻塞调用accept(),而如果使用多路转接还会造成不必要的性能浪费...见《Unix网络编程 卷1 套接字联网API》(30.6:TCP预先派生子进程服务器程序,accept无上锁保护)
// client.c
#include
#include
#include
#include
#include
#include
#include
int main(int argc, char *argv[])
{
int connfd;
struct sockaddr_in servaddr;
if ((connfd = socket(AF_INET, SOCK_STREAM, 0)) == -1)
err(-1, "socket: %d", __LINE__);
memset(&servaddr, 0, sizeof(struct sockaddr_in));
servaddr.sin_family = AF_INET;
servaddr.sin_addr.s_addr = inet_addr("127.0.0.1");
servaddr.sin_port = htons(10240);
if (connect(connfd, (struct sockaddr *)&servaddr, sizeof(struct sockaddr_in)) == -1)
err(-1, "connect: %d", __LINE__);
fprintf(stderr, "ok\n");
return 0;
}
client连接一下服务器就退出...(真刺激)
# 这里没有给出客户端的执行情况,可以看server的输出自行想象在哪个时间点启动的client
[Asu@Zombie epoll]$ cc server.c -o server -lpthread
[Asu@Zombie epoll]$ cc client.c -o client
[Asu@Zombie epoll]$ ./server
thread: 140478631536384: epoll_wait() return 1
thread: 140478656714496: epoll_wait() return 1
thread: 140478648321792: epoll_wait() return 1
thread: 140478631536384: accept a connection: 127.0.0.1:38976
thread: 140478656714496: accept: 32: Resource temporarily unavailable
thread: 140478648321792: accept: 32: Resource temporarily unavailable
thread: 140478639929088: epoll_wait() return 1
thread: 140478639929088: accept: 32: Resource temporarily unavailable
thread: 140478639929088: accept: 32: Resource temporarily unavailable
thread: 140478656714496: accept: 32: Resource temporarily unavailable
thread: 140478648321792: accept: 32: Resource temporarily unavailable
thread: 140478639929088: accept: 32: Resource temporarily unavailable
thread: 140478648321792: accept: 32: Resource temporarily unavailable
thread: 140478656714496: accept: 32: Resource temporarily unavailable
thread: 140478665107200: epoll_wait() return 1
thread: 140478665107200: accept a connection: 127.0.0.1:38978
thread: 140478648321792: accept: 32: Resource temporarily unavailable
thread: 140478656714496: accept: 32: Resource temporarily unavailable
thread: 140478639929088: accept: 32: Resource temporarily unavailable
thread: 140478656714496: accept: 32: Resource temporarily unavailable
thread: 140478648321792: accept: 32: Resource temporarily unavailable
thread: 140478639929088: accept: 32: Resource temporarily unavailable
thread: 140478656714496: accept a connection: 127.0.0.1:38980
thread: 140478648321792: accept: 32: Resource temporarily unavailable
thread: 140478639929088: accept: 32: Resource temporarily unavailable
thread: 140478648321792: accept: 32:Resource temporarily unavailable
thread: 140478639929088: accept: 32: Resource temporarily unavailable
thread: 140478639929088: accept a connection: 127.0.0.1:38982
thread: 140478648321792: accept: 32: Resource temporarily unavailable
thread: 140478648321792: accept: 32: Resource temporarily unavailable
thread: 140478648321792: accept a connection: 127.0.0.1:38984
[Asu@Zombie epoll]$
# 注意:这里使用了-DET编译的server.c,也就是以EPOLLET模式监听listen套接字
# 可以看到完美解决了epoll惊群,但是ET模式还是有缺陷的...不安全...
[Asu@Zombie epoll]$ cc server.c -o server -lpthread -DET
[Asu@Zombie epoll]$ cc client.c -o client
[Asu@Zombie epoll]$ ./server
thread: 139991003125504: epoll_wait() return 1
thread: 139991003125504: accept a connection: 127.0.0.1:38990
thread: 139991011518208: epoll_wait() return 1
thread: 139991011518208: accept a connection: 127.0.0.1:38992
thread: 139991019910912: epoll_wait() return 1
thread: 139991019910912: accept a connection: 127.0.0.1:38994
thread: 139991028303616: epoll_wait() return 1
thread: 139991028303616: accept a connection: 127.0.0.1:38996
thread: 139991036696320: epoll_wait() return 1
thread: 139991036696320: accept a connection: 127.0.0.1:38998
[Asu@Zombie epoll]$
linux内核对于epoll惊群的解决方案就是wake up one,但是由于LT模式将epitem重新挂载到就绪队列,导致LT模式的epoll惊群没有被解决...
accept惊群的解决方案
/*
* 早期linux内核没有解决accept惊群,所以需要用户自己来解决,
* 解决方案是:每次accept前加锁,accept之后解锁,这样可以
* 确保任意时间点只有一个线程/进程阻塞在accept()上
* 现代linux内核解决了accept惊群,解决方案就是我们以下所做,
* 只是它将锁保护内置在了accept()调用中...
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#define NR_THREAD 5
int listenfd;
pthread_mutex_t mtx;
static void *thrd_func(void *args)
{
int connfd, retval;
socklen_t addrlen;
struct sockaddr_in cliaddr;
int error_flg = -1;
if ((errno = pthread_mutex_lock(&mtx)) != 0)
err(-1, "thread: %ld: pthread_mutex_lock: %d", (long)pthread_self(), __LINE__);
addrlen = sizeof(struct sockaddr_in);
if (accept(listenfd, (struct sockaddr *)&cliaddr, &addrlen) == -1)
error_flg = __LINE__;
if ((errno = pthread_mutex_unlock(&mtx)) != 0)
err(-1, "thread: %ld: pthread_mutex_unlock: %d", (long)pthread_self(), __LINE__);
if (error_flg != -1)
err(-1, "thread: %ld: accept: %d", (long)pthread_self(), error_flg);
fprintf(stderr, "thread: %ld: accept a connection: %s:%d\n", (long)pthread_self(),
inet_ntoa(cliaddr.sin_addr), ntohs(cliaddr.sin_port));
pthread_exit(NULL);
}
int main(int argc, char *argv[])
{
int i;
pthread_t threads[NR_THREAD];
struct sockaddr_in servaddr;
if ((listenfd = socket(AF_INET, SOCK_STREAM, 0)) == -1)
err(-1, "socket: %d", __LINE__);
memset(&servaddr, 0, sizeof(struct sockaddr_in));
servaddr.sin_family = AF_INET;
servaddr.sin_addr.s_addr = htonl(INADDR_ANY);
servaddr.sin_port = htons(10240);
if (bind(listenfd, (struct sockaddr *)&servaddr, sizeof(struct sockaddr_in)) == -1)
err(-1, "bind: %d", __LINE__);
if (listen(listenfd, 0) == -1)
err(-1, "listen: %d", __LINE__);
if ((errno = pthread_mutex_init(&mtx, NULL)) != 0)
err(-1, "pthread_mutex_init: %d", __LINE__);
for (i = 0; i != NR_THREAD; ++i) {
if ((errno = pthread_create(&threads[i], NULL, &thrd_func, NULL)) != 0)
err(-1, "pthread_create: %d", __LINE__);
}
for (i = 0; i != NR_THREAD; ++i) {
if ((errno = pthread_join(threads[i], NULL)) != 0)
err(-1, "pthread_join: %d", __LINE__);
}
return 0;
}