1 realted structures
Linux-2.6提供如下关于等待队列的操作:
(1) 定义"等待队列头",
wait_queue_head_t my_queue;
defined in linux/wait.h
50 struct __wait_queue_head {
51 spinlock_t lock;
52 struct list_head task_list;
53 };
54 typedef struct __wait_queue_head wait_queue_head_t;
(2) 初始化"等待队列头"
init_waitqueue_head(&my_queue);
defined in linux/wait.c header file
13 void init_waitqueue_head( wait_queue_head_t *q)
14 {
15 spin_lock_init(&q->lock);
16 INIT_LIST_HEAD(&q->task_list);
17 }
定义和初始化的快捷方式:
DECLARE_WAIT_QUEUE_HEAD(my_queue);
linux/wait.h
70 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { /
71 .lock = __SPIN_LOCK_UNLOCKED(name.lock), /
72 .task_list = { &(name).task_list, &(name).task_list } }
74 #define DECLARE_WAIT_QUEUE_HEAD(name) /
75 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
(3) 定义等待队列
DECLARE_WAITQUEUE(name, tsk);
定义并初始化一个名为name的等待队列(wait_queue_t);
linux/wait.h
32 struct __wait_queue {
33 unsigned int flags;
34 #define WQ_FLAG_EXCLUSIVE 0x01
35 void *private;
36 wait_queue_func_t func;
37 struct list_head task_list;
38 };
28 typedef struct __wait_queue wait_queue_t;
62 #define __WAITQUEUE_INITIALIZER(name, tsk) { /
63 .private = tsk, /
64 .func = default_wake_function, /
65 .task_list = { NULL, NULL } }
66
67 #define DECLARE_WAITQUEUE(name, tsk) /
68 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
2 specific analysis
wait_event_interruptible()。该函数修改task的状态为TASK_INTERRUPTIBLE,意味着改进程将不会继续运行直到被唤醒,然后被添加到等待队列wq中。
在wait_event_interruptible()中首先判断condition是不是已经满足,如果是则直接返回0,否则调用__wait_event_interruptible(),并用__ret来存放返回值
---------------------------------------------------------------
#define wait_event_interruptible(wq, condition) /
({ /
int __ret = 0; /
if (!(condition)) /
__wait_event_interruptible(wq, condition, __ret);/
__ret; /
})
wait_event_interruptible() --> __wait_event_interruptible()
__wait_event_interruptible()首先定义并初始化一个wait_queue_t变量__wait,其中数据为当前进程current,并把__wait入队。
在无限循环中,__wait_event_interruptible()将本进程置为可中断的挂起状态,反复检查condition是否成立,如果成立
则退出,如果不成立则继续休眠;条件满足后,即把本进程运行状态置为运行态,并将__wait从等待队列中清除掉,从而进程能够调度运行。如果进程当前有
异步信号(POSIX的),则返回-ERESTARTSYS。
----------------------------------------------------------------
#define __wait_event_interruptible(wq, condition, ret) /
do { /
DEFINE_WAIT(__wait); /
for (;;) { /
prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); /
if (condition) /
break; /
if (!signal_pending(current)) { /
schedule(); /
continue; /
} /
ret = -ERESTARTSYS; /
break; /
} /
finish_wait(&wq, &__wait); /
} while (0)
__wait_event_interruptible() --> DEFINE_WAIT(name)
/usr/src/linux-2.6.21.5/include/linux/wait.h
---------------------------------------------------------
#define DEFINE_WAIT(name) /
wait_queue_t name = { /
.private = current, /
.func = autoremove_wake_function, /
.task_list = LIST_HEAD_INIT((name).task_list), -/
}
wait_queue_t
---------------------------------------------------------
typedef struct __wait_queue wait_queue_t;
struct __wait_queue {
unsigned int flags;
#define WQ_FLAG_EXCLUSIVE 0x01
void *private;
wait_queue_func_t func;
struct list_head task_list;
};
__wait_event_interruptible() --> prepare_to_wait()
void fastcall
prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
{
unsigned long flags;
wait->flags &= ~WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&q->lock, flags);
if (list_empty(&wait->task_list))
__add_wait_queue(q, wait);
if (is_sync_wait(wait))
set_current_state(state);
spin_unlock_irqrestore(&q->lock, flags);
}
124 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
125 {
126 list_add(&new->task_list, &head->task_list);
127 }
here, we can see clearly that that function is just simply to link the variable
wait (type of wait_queue_t) into the variable p(type of wait_queue_head_t).
description of list_add:
that essentially invokes the __list_add(new , head, head->next), which implements the task
of inserting new between head and head->next
104 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
105 {
106 unsigned long flags;
107
108 __set_current_state(TASK_RUNNING);
109 /*
110 * We can check for list emptiness outside the lock
111 * IFF:
112 * - we use the "careful" check that verifies both
113 * the next and prev pointers, so that there cannot
114 * be any half-pending updates in progress on other
115 * CPU's that we haven't seen yet (and that might
116 * still change the stack area.
117 * and
118 * - all other users take the lock (ie we can only
119 * have _one_ other CPU that looks at or modifies
120 * the list).
121 */
122 if (!list_empty_careful(&wait->task_list)) {
123 spin_lock_irqsave(&q->lock, flags);
124 list_del_init(&wait->task_list);
125 spin_unlock_irqrestore(&q->lock, flags);
126 }
127 }
128 EXPORT_SYMBOL(finish_wait);