workqueue-分析一

                            新work queue工作机制

 

工作队列(workqueue)是Linux kernel中将工作推后执行的一种机制。这种机制和BH或Tasklets不同之处在于工作队列是把推后的工作交由一个内核线程去执行,因此工作队列的优势就在于它允许重新调度甚至睡眠。

Linux的work queue在2.6.0 到2.6.19以及到2.6.36工作队列发生了一些变化。本文主要对新版本做一些分析。

虽然自从2.6.0之后,Linux对work queue进行了优化,但是kernel用到create_workqueue的模块越来越多,而调用create_workqueue会在每个cpu上都创建一个work_thread, 每个cpu都分配一个cpu_workqueue_struct以及workqueue_struct,而如果没被queue_work的话根本没机会工作,这样仍然相当浪费内存资源,而且加重了cpu loading。另外,同一个work queue上的每个work都是按照串行执行的,假如其中一个work的调度程序睡眠了,那么后面的work也将无法工作。

自从2.6.36以后,work queue的机制发生了很大变化,所有的work queue都被合并成

一个work queue,work thread也不是和work queue一一关联,work何时工作紧紧按照工作的重要性以及时间紧迫性来划分。也就是说新机制是按照cpu数量来创建work thread,而不是work queue。

下面我们还是通过代码分析吧:

初始化workqueus及创建work threads:

系统启动时调用init_workqueus()@kernel/kernel/workqueue.c

[html] view plaincopy
  1. static int __initinit_workqueues(void)  
  2. {  
  3.        unsigned int cpu;  
  4.        int i;  
  5.    
  6.        cpu_notifier(workqueue_cpu_callback,CPU_PRI_WORKQUEUE);  
  7.    
  8.        /* initialize gcwqs */  
  9. /* 前面有说过,新机制将workqueues都排到gcwq上管理了,  
  10. 每个cpu各一个gcwq。*/  
  11.        for_each_gcwq_cpu(cpu) {  
  12.               struct global_cwq *gcwq =get_gcwq(cpu);  
  13.               spin_lock_init(&gcwq->lock);  
  14.               INIT_LIST_HEAD(&gcwq->worklist);  
  15.               gcwq->cpu = cpu;  
  16.               gcwq->flags |=GCWQ_DISASSOCIATED;  
  17.               INIT_LIST_HEAD(&gcwq->idle_list);  
  18.               for (i = 0; i <BUSY_WORKER_HASH_SIZE; i++)  
  19.                      INIT_HLIST_HEAD(&gcwq->busy_hash[i]);  
  20.               init_timer_deferrable(&gcwq->idle_timer);  
  21.               gcwq->idle_timer.function =idle_worker_timeout;  
  22.               gcwq->idle_timer.data =(unsigned long)gcwq;  
  23.               setup_timer(&gcwq->mayday_timer,gcwq_mayday_timeout,  
  24.                          (unsigned long)gcwq);  
  25.               ida_init(&gcwq->worker_ida);  
  26.               gcwq->trustee_state =TRUSTEE_DONE;  
  27.               init_waitqueue_head(&gcwq->trustee_wait);  
  28.        }  
  29.    
  30.        /* create the initial worker */  
  31.        for_each_online_gcwq_cpu(cpu) {  
  32.               struct global_cwq *gcwq =get_gcwq(cpu);  
  33.               struct worker *worker;  
  34.               if (cpu != WORK_CPU_UNBOUND)  
  35.                      gcwq->flags &=~GCWQ_DISASSOCIATED;  
  36. /* 开机启动初始化后创建workthread主要是这里实现*/  
  37.               worker = create_worker(gcwq,true);  
  38.               BUG_ON(!worker);  
  39.               spin_lock_irq(&gcwq->lock);  
  40.               start_worker(worker);  
  41.               spin_unlock_irq(&gcwq->lock);  
  42.        }  
  43. /* 创建系统开机后默认的workqueue,平常我们调用的  
  44. schedule_work()其实就是用的system_wq这个work queue,可  
  45. 参考schedule_work()实现。*/  
  46.        system_wq =alloc_workqueue("events", 0, 0);  
  47.        system_long_wq =alloc_workqueue("events_long", 0, 0);  
  48.        system_nrt_wq =alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);  
  49.        system_unbound_wq =alloc_workqueue("events_unbound", WQ_UNBOUND,  
  50.                                        WQ_UNBOUND_MAX_ACTIVE);  
  51.        system_freezable_wq =alloc_workqueue("events_freezable",  
  52.                                          WQ_FREEZABLE, 0);  
  53.        BUG_ON(!system_wq || !system_long_wq ||!system_nrt_wq ||  
  54.              !system_unbound_wq || !system_freezable_wq);  
  55.        return 0;  
  56. }  
  57. early_initcall(init_workqueues);  


再看create_worker是如何创建work thread的:

[html] view plaincopy
  1. static structworker *create_worker(struct global_cwq *gcwq, bool bind)  
  2. {  
  3.        bool on_unbound_cpu = gcwq->cpu ==WORK_CPU_UNBOUND;  
  4.        struct worker *worker = NULL;  
  5.        int id = -1;  
  6.    
  7.        spin_lock_irq(&gcwq->lock);  
  8.        while (ida_get_new(&gcwq->worker_ida,&id)) {  
  9.               spin_unlock_irq(&gcwq->lock);  
  10.               if(!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))  
  11.                      goto fail;  
  12.               spin_lock_irq(&gcwq->lock);  
  13.        }  
  14.        spin_unlock_irq(&gcwq->lock);  
  15. /* 为work分配空间,初始化worker*/  
  16.        worker = alloc_worker();  
  17.        if (!worker)  
  18.               goto fail;  
  19.        worker->gcwq = gcwq;  
  20.        worker->id = id;  
  21.        /* kthread_create_on_node和 kthread_create都可以创建worker_thread,区别在于是否和cpu绑定,前者别是不依赖于CPU而工作,可以在任何CPU上工作,而后者表示分别在各个CPU上创建一个workthread来工作。从ps命令里就可以看到像kworker/0:0,kworker/1:0, kworker/u:0这样的进程就是这里创建的workthread了!*/  
  22.        if (!on_unbound_cpu)  
  23.               worker->task =kthread_create_on_node(worker_thread, worker,  
  24. cpu_to_node(gcwq->cpu),"kworker/%u:%d", gcwq->cpu, id);  
  25.        else  
  26.               worker->task =kthread_create(worker_thread, worker, "kworker/u:%d", id);  
  27.        if (IS_ERR(worker->task))  
  28.               goto fail;  
  29.    
  30.        /*  
  31.         *A rogue worker will become a regular one if CPU comes  
  32.         *online later on.  Make sure every workerhas  
  33.         *PF_THREAD_BOUND set.  
  34.         */  
  35.        if (bind && !on_unbound_cpu)  
  36.               kthread_bind(worker->task,gcwq->cpu);  
  37.        else {  
  38.               worker->task->flags |=PF_THREAD_BOUND;  
  39.               if (on_unbound_cpu)  
  40.                      worker->flags |=WORKER_UNBOUND;  
  41.        }  
  42.    
  43.        return worker;  
  44. fail:  
  45.        if (id >= 0) {  
  46.               spin_lock_irq(&gcwq->lock);  
  47.               ida_remove(&gcwq->worker_ida,id);  
  48.               spin_unlock_irq(&gcwq->lock);  
  49.        }  
  50.        kfree(worker);  
  51.        return NULL;  
  52. }  


处理works:

由kthread_create_on_node()或 kthread_create()创建了work thread之后,它就开始运行起来了:

[html] view plaincopy
  1. static intworker_thread(void *__worker)  
  2. {  
  3.        struct worker *worker = __worker;  
  4.        struct global_cwq *gcwq =worker->gcwq;  
  5.    
  6.        /* tell the scheduler that this is aworkqueue worker */  
  7.        worker->task->flags |=PF_WQ_WORKER;  
  8. /* 最后的代码用gotowoke_up来表示work thread是一个无限循环。*/  
  9. woke_up:  
  10.        spin_lock_irq(&gcwq->lock);  
  11.        /* DIE can be set only while we're idle,checking here is enough */  
  12.        if (worker->flags & WORKER_DIE) {  
  13.               spin_unlock_irq(&gcwq->lock);  
  14.               worker->task->flags &=~PF_WQ_WORKER;  
  15.               return 0;  
  16.        }  
  17.        worker_leave_idle(worker);  
  18. recheck:  
  19.        /* no more worker necessary? */  
  20. /* 如果有高优先级的work需要处理,而且当前已经没有空闲的workthread可以来处理掉这个高优先级work,那下一步就要创建新的workthread来处理掉,读者可自行分析need_more_woker()的实现。这里就体现了新机制对于高优先级先处理的方法。*/  
  21.        if (!need_more_worker(gcwq))  
  22.               goto sleep;  
  23. /*新建一个workthread,可以看出,新的机制已经不想老的那样不管如何情况只要creatework queue就创建work thread,浪费内存资源。在manage_workers() -> maybe_create_worker ()-> create_worker (), create_worker()前面分析过了,它会creatework thread!*/  
  24.        /* do we need to manage? */  
  25.        if (unlikely(!may_start_working(gcwq))&& manage_workers(worker))  
  26.               goto recheck;  
  27.        /*  
  28.         *->scheduled list can only be filled while a worker is  
  29.         *preparing to process a work or actually processing it.  
  30.         *Make sure nobody diddled with it while I was sleeping.  
  31.         */  
  32.        BUG_ON(!list_empty(&worker->scheduled));  
  33.    
  34.        /*  
  35.         *When control reaches this point, we're guaranteed to have  
  36.         *at least one idle worker or that someone else has already  
  37.         *assumed the manager role.  
  38.         */  
  39.        worker_clr_flags(worker, WORKER_PREP);  
  40.    
  41.        do {  
  42.               struct work_struct *work =  
  43.                      list_first_entry(&gcwq->worklist,  
  44.                                     struct work_struct, entry);  
  45. /* 在创建里新的work thead去处理高优先级的work之后,终于轮到处理自己的work了。核心在process_one_work().*/  
  46.               if (likely(!(*work_data_bits(work)& WORK_STRUCT_LINKED))) {  
  47.                      /* optimization path, notstrictly necessary */  
  48.                      process_one_work(worker,work);  
  49.                      if(unlikely(!list_empty(&worker->scheduled)))  
  50.                             process_scheduled_works(worker);  
  51.               } else {  
  52.                      move_linked_works(work,&worker->scheduled, NULL);  
  53.                      process_scheduled_works(worker);  
  54.               }  
  55.        } while (keep_working(gcwq));  
  56.        worker_set_flags(worker, WORKER_PREP,false);  
  57. sleep:  
  58.        /*在休眠之前,再一次判断当前有没有新的work需要处理。所以即使本work睡眠了,其他work也可以继续工作,这样就不会存在老的机制那样一个worksleep会阻塞其他work执行。*/  
  59.        if(unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker))  
  60.               goto recheck;  
  61.    
  62.        /*  
  63.         *gcwq->lock is held and there's no work to process and no  
  64.         *need to manage, sleep.  Workers are wokenup only while  
  65.         *holding gcwq->lock or from local cpu, so setting the  
  66.         *current state before releasing gcwq->lock is enough to  
  67.         *prevent losing any event.  
  68.         */  
  69.        worker_enter_idle(worker);  
  70.        __set_current_state(TASK_INTERRUPTIBLE);  
  71.        spin_unlock_irq(&gcwq->lock);  
  72.        schedule();  
  73.        goto woke_up;  
  74. }  


再来看看系统如何将work给处理掉:

[html] view plaincopy
  1. static voidprocess_one_work(struct worker *worker, struct work_struct *work)  
  2. __releases(&gcwq->lock)  
  3. __acquires(&gcwq->lock)  
  4. {  
  5.        struct cpu_workqueue_struct *cwq =get_work_cwq(work);  
  6.        struct global_cwq *gcwq = cwq->gcwq;  
  7.        struct hlist_head *bwh =busy_worker_head(gcwq, work);  
  8.        bool cpu_intensive = cwq->wq->flags& WQ_CPU_INTENSIVE;  
  9. /* 取出用户driver设置的函数*/  
  10.        work_func_t f = work->func;  
  11.        int work_color;  
  12.        struct worker *collision;  
  13. #ifdefCONFIG_LOCKDEP  
  14.        /*  
  15.         *It is permissible to free the struct work_struct from  
  16.         *inside the function that is called from it, this we need to  
  17.         *take into account for lockdep too.  Toavoid bogus "held  
  18.         *lock freed" warnings as well as problems when looking into  
  19.         *work->lockdep_map, make a copy and use that here.  
  20.         */  
  21.        struct lockdep_map lockdep_map =work->lockdep_map;  
  22. #endif  
  23.        /*  
  24.         *A single work shouldn't be executed concurrently by  
  25.         *multiple workers on a single cpu.  Checkwhether anyone is  
  26.         *already processing the work.  If so,defer the work to the  
  27.         *currently executing one.  
  28.         */  
  29.        collision =__find_worker_executing_work(gcwq, bwh, work);  
  30.        if (unlikely(collision)) {  
  31.               move_linked_works(work,&collision->scheduled, NULL);  
  32.               return;  
  33.        }  
  34.    
  35.        /* claim and process */  
  36.        debug_work_deactivate(work);  
  37.        hlist_add_head(&worker->hentry,bwh);  
  38.        worker->current_work = work;  
  39.        worker->current_cwq = cwq;  
  40.        work_color = get_work_color(work);  
  41.    
  42.        /* record the current cpu number in thework data and dequeue */  
  43.        set_work_cpu(work, gcwq->cpu);  
  44.        list_del_init(&work->entry);  
  45.    
  46.        /*  
  47.         *If HIGHPRI_PENDING, check the next work, and, if HIGHPRI,  
  48.         *wake up another worker; otherwise, clear HIGHPRI_PENDING.  
  49.         */  
  50. /* 如果全局的gcwq有高优先级的work需要处理,唤醒它执行!*/  
  51.        if (unlikely(gcwq->flags &GCWQ_HIGHPRI_PENDING)) {  
  52.               struct work_struct *nwork = list_first_entry(&gcwq->worklist,  
  53.                                           structwork_struct, entry);  
  54.               if(!list_empty(&gcwq->worklist) &&  
  55.                  get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI)  
  56.                      /*唤醒高优先级的work所对应的workthread来工作。*/  
  57.                      wake_up_worker(gcwq);  
  58.               else  
  59.                      gcwq->flags &=~GCWQ_HIGHPRI_PENDING;  
  60.        }  
  61.    
  62.        /*  
  63.         *CPU intensive works don't participate in concurrency  
  64.         *management.  They're the scheduler'sresponsibility.  
  65.         */  
  66. /*如果当前有对时间敏感的work,那么如果有空闲的workthread的话,也要唤醒相应work thread来工作。*/  
  67.        if (unlikely(cpu_intensive))  
  68.               worker_set_flags(worker, WORKER_CPU_INTENSIVE,true);  
  69.    
  70.        spin_unlock_irq(&gcwq->lock);  
  71.    
  72.        work_clear_pending(work);  
  73.        lock_map_acquire_read(&cwq->wq->lockdep_map);  
  74.        lock_map_acquire(&lockdep_map);  
  75.        trace_workqueue_execute_start(work);  
  76.        /* 历经千辛万苦,终于跑到要调用的work functionpointer了!!!*/  
  77. f(work);  
  78.        /*  
  79.         *While we must be careful to not use "work" after this, the trace  
  80.         *point will only record its address.  
  81.         */  
  82.        /* 后面就是一些删除work,资源清楚释放,标志重设的工作了。*/  
  83.        trace_workqueue_execute_end(work);  
  84.        lock_map_release(&lockdep_map);  
  85.        lock_map_release(&cwq->wq->lockdep_map);  
  86.    
  87.        if (unlikely(in_atomic() ||lockdep_depth(current) > 0)) {  
  88.               printk(KERN_ERR "BUG:workqueue leaked lock or atomic: "  
  89.                      "%s/0x%08x/%d\n",  
  90.                      current->comm, preempt_count(),task_pid_nr(current));  
  91.               printk(KERN_ERR "    last function: ");  
  92.               print_symbol("%s\n",(unsigned long)f);  
  93.               debug_show_held_locks(current);  
  94.               dump_stack();  
  95.        }  
  96.        spin_lock_irq(&gcwq->lock);  
  97.        /* clear cpu intensive status */  
  98.        if (unlikely(cpu_intensive))  
  99.               worker_clr_flags(worker,WORKER_CPU_INTENSIVE);  
  100.        /* we're done with it, release */  
  101.        hlist_del_init(&worker->hentry);  
  102.        worker->current_work = NULL;  
  103.        worker->current_cwq = NULL;  
  104.        cwq_dec_nr_in_flight(cwq, work_color,false);  
  105. }  
  106.    


创建work queue:

Work thread如何处理掉work已经分析完了,然而对于前面init_workqueues()提到的system_wq是如何得到的还不清楚,另外一个问题:为什么说work thread不依赖于work queue了,下面我们来分析alloc_workqueue():

[html] view plaincopy
  1. #define alloc_workqueue(name, flags, max_active)            \  
  2.        __alloc_workqueue_key((name), (flags),(max_active), NULL, NULL)  
  3. structworkqueue_struct *__alloc_workqueue_key(const char *name,  
  4.                                          unsigned int flags,  
  5.                                           int max_active,  
  6.                                           struct lock_class_key *key,  
  7.                                           const char *lock_name)  
  8. {  
  9.        struct workqueue_struct *wq;  
  10.        unsigned int cpu;  
  11.    
  12.        /*  
  13.         *Workqueues which may be used during memory reclaim should  
  14.         *have a rescuer to guarantee forward progress.  
  15.         */  
  16. /*WQ_MEM_RECLAIM表示当前内存资源是否紧张,都要执行我这个work.*/  
  17.        if (flags & WQ_MEM_RECLAIM)  
  18.               flags |= WQ_RESCUER;  
  19.    
  20.        /*  
  21.         *Unbound workqueues aren't concurrency managed and should be  
  22.         *dispatched to workers immediately.  
  23.         */  
  24.        /* WQ_UNBOUND 表示work不依赖于如何CPU,可以在任意CPU上运行。*/  
  25.        if (flags & WQ_UNBOUND)  
  26.               flags |= WQ_HIGHPRI;  
  27.        /* max_active 限制任意一个CPU上能同时执行的最大work数量。*/  
  28.        max_active = max_active ?: WQ_DFL_ACTIVE;  
  29.        max_active =wq_clamp_max_active(max_active, flags, name);  
  30.        /* 分配 workqueue_struct,将当前workqueue相对应的信息如name, flags等保存起来,其实我们已经知道,在workthread中,这些信息会被用到。*/  
  31.        wq = kzalloc(sizeof(*wq), GFP_KERNEL);  
  32.        if (!wq)  
  33.               goto err;  
  34.        wq->flags = flags;  
  35.        wq->saved_max_active = max_active;  
  36.        mutex_init(&wq->flush_mutex);  
  37.        atomic_set(&wq->nr_cwqs_to_flush,0);  
  38.        INIT_LIST_HEAD(&wq->flusher_queue);  
  39.        INIT_LIST_HEAD(&wq->flusher_overflow);  
  40.        wq->name = name;  
  41.        lockdep_init_map(&wq->lockdep_map,lock_name, key, 0);  
  42.        INIT_LIST_HEAD(&wq->list);  
  43.        if (alloc_cwqs(wq) < 0)  
  44.               goto err;  
  45.        /* 初始化per cpu上的cpu_workqueue_struct信息。*/  
  46.        for_each_cwq_cpu(cpu, wq) {  
  47.               struct cpu_workqueue_struct *cwq =get_cwq(cpu, wq);  
  48.               struct global_cwq *gcwq =get_gcwq(cpu);  
  49.               BUG_ON((unsigned long)cwq &WORK_STRUCT_FLAG_MASK);  
  50.               cwq->gcwq = gcwq;  
  51.               cwq->wq = wq;  
  52.               cwq->flush_color = -1;  
  53.               cwq->max_active = max_active;  
  54.               INIT_LIST_HEAD(&cwq->delayed_works);  
  55.        }  
  56.    
  57.        if (flags & WQ_RESCUER) {  
  58.               struct worker *rescuer;  
  59.               if(!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))  
  60.                      goto err;  
  61.               wq->rescuer = rescuer =alloc_worker();  
  62.               if (!rescuer)  
  63.                      goto err;  
  64.               rescuer->task =kthread_create(rescuer_thread, wq, "%s", name);  
  65.               if (IS_ERR(rescuer->task))  
  66.                      goto err;  
  67.               rescuer->task->flags |=PF_THREAD_BOUND;  
  68.               wake_up_process(rescuer->task);  
  69.        }  
  70.    
  71.        /*  
  72.         *workqueue_lock protects global freeze state and workqueues  
  73.         *list.  Grab it, set max_activeaccordingly and add the new  
  74.         *workqueue to workqueues list.  
  75.         */  
  76.        spin_lock(&workqueue_lock);  
  77.        if (workqueue_freezing &&wq->flags & WQ_FREEZABLE)  
  78.               for_each_cwq_cpu(cpu, wq)  
  79.                      get_cwq(cpu,wq)->max_active = 0;  
  80.        /* 将当前wq添加到workqueues里去。*/  
  81.        list_add(&wq->list,&workqueues);  
  82.        spin_unlock(&workqueue_lock);  
  83.        return wq;  
  84. err:  
  85.        if (wq) {  
  86.               free_cwqs(wq);  
  87.               free_mayday_mask(wq->mayday_mask);  
  88.               kfree(wq->rescuer);  
  89.               kfree(wq);  
  90.        }  
  91.        return NULL;  
  92. }  
  93. EXPORT_SYMBOL_GPL(__alloc_workqueue_key);  

 

新的机制虽然仍然保留了create_workqueue()和 create_singlethread_workqueue()这两个接口,但他们的实现最终其实调用的都是alloc_workqueue(),只是传的flags不一样。如前面所说,新机制的work queue里只有flags才会影响调度的顺序,work queue已经不重要了。

[html] view plaincopy
  1. #definecreate_workqueue(name)                              \  
  2. alloc_workqueue((name),WQ_MEM_RECLAIM, 1)  
  3. #definecreate_freezable_workqueue(name)                \  
  4. alloc_workqueue((name),WQ_FREEZABLE|WQ_UNBOUND|WQ_MEM_RECLAIM, 1)  
  5. #definecreate_singlethread_workqueue(name)                  \  
  6. alloc_workqueue((name),WQ_UNBOUND | WQ_MEM_RECLAIM, 1)  


挂起work:

再看work queue如何触发work:

queue_work –> queue_work_on-> __queue_work

[html] view plaincopy
  1. static void__queue_work(unsigned int cpu, struct workqueue_struct *wq,  
  2.                       struct work_struct *work)  
  3. {  
  4.        struct global_cwq *gcwq;  
  5.        struct cpu_workqueue_struct *cwq;  
  6.        struct list_head *worklist;  
  7.        unsigned int work_flags;  
  8.        unsigned long flags;  
  9.    
  10.        debug_work_activate(work);  
  11.    
  12.        /* if dying, only works from the sameworkqueue are allowed */  
  13.        if (unlikely(wq->flags & WQ_DYING)&&  
  14.           WARN_ON_ONCE(!is_chained_work(wq)))  
  15.               return;  
  16.    
  17.        /* determine gcwq to use */  
  18.        /* 根据flags获取相应gcwq*/  
  19.        if (!(wq->flags & WQ_UNBOUND)) {  
  20.               struct global_cwq *last_gcwq;  
  21.    
  22.               if (unlikely(cpu ==WORK_CPU_UNBOUND))  
  23.                      cpu =raw_smp_processor_id();  
  24.    
  25.               /*  
  26.                * It's multi cpu.  If @wq is non-reentrant and @work  
  27.                * was previously on a different cpu, it mightstill  
  28.                * be running there, in which case the workneeds to  
  29.                * be queued on that cpu to guaranteenon-reentrance.  
  30.                */  
  31.               gcwq = get_gcwq(cpu);  
  32.               if (wq->flags &WQ_NON_REENTRANT &&  
  33.                  (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {  
  34.                      struct worker *worker;  
  35.    
  36.                      spin_lock_irqsave(&last_gcwq->lock,flags);  
  37.    
  38.                      worker =find_worker_executing_work(last_gcwq, work);  
  39.    
  40.                      if (worker &&worker->current_cwq->wq == wq)  
  41.                             gcwq = last_gcwq;  
  42.                      else {  
  43.                             /* meh... notrunning there, queue here */  
  44.                             spin_unlock_irqrestore(&last_gcwq->lock,flags);  
  45.                             spin_lock_irqsave(&gcwq->lock,flags);  
  46.                      }  
  47.               } else  
  48.                      spin_lock_irqsave(&gcwq->lock,flags);  
  49.        } else {  
  50.               gcwq = get_gcwq(WORK_CPU_UNBOUND);  
  51.               spin_lock_irqsave(&gcwq->lock,flags);  
  52.        }  
  53.    
  54.        /* gcwq determined, get cwq and queue */  
  55.        cwq = get_cwq(gcwq->cpu, wq);  
  56.        trace_workqueue_queue_work(cpu, cwq,work);  
  57.    
  58.        BUG_ON(!list_empty(&work->entry));  
  59.    
  60.        cwq->nr_in_flight[cwq->work_color]++;  
  61.        work_flags =work_color_to_flags(cwq->work_color);  
  62.    
  63.        if (likely(cwq->nr_active <cwq->max_active)) {  
  64.               trace_workqueue_activate_work(work);  
  65.               cwq->nr_active++;  
  66.               worklist =gcwq_determine_ins_pos(gcwq, cwq);  
  67.        } else {  
  68.               work_flags |= WORK_STRUCT_DELAYED;  
  69.               worklist =&cwq->delayed_works;  
  70.        }  
  71.        /* 将当前work放到队列上等待执行。*/  
  72.        insert_work(cwq, work, worklist,work_flags);  
  73.    
  74.        spin_unlock_irqrestore(&gcwq->lock,flags);  
  75. }  
  76.    
  77. static voidinsert_work(struct cpu_workqueue_struct *cwq,  
  78.                      struct work_struct *work,struct list_head *head,  
  79.                      unsigned int extra_flags)  
  80. {  
  81.        struct global_cwq *gcwq = cwq->gcwq;  
  82.    
  83.        /* we own @work, set data and link */  
  84.        set_work_cwq(work, cwq, extra_flags);  
  85.    
  86.        /*  
  87.         *Ensure that we get the right work->data if we see the  
  88.         *result of list_add() below, see try_to_grab_pending().  
  89.         */  
  90.        smp_wmb();  
  91.    
  92.        list_add_tail(&work->entry, head);  
  93.    
  94.        /*  
  95.         *Ensure either worker_sched_deactivated() sees the above  
  96.         *list_add_tail() or we see zero nr_running to avoid workers  
  97.         *lying around lazily while there are works to be processed.  
  98.         */  
  99.        smp_mb();  
  100.        /* 如果当前有高优先级的work或者已经没有空闲的workthread了,well,那就再创建一个workthread来处理。*/  
  101.        if (__need_more_worker(gcwq))  
  102.               wake_up_worker(gcwq);  
  103. }  

             

至此,对work queue的工作机制都分析完了。可以看出,新的机制相对来说更灵活,而且基本上不会浪费内存资源,导致系统过量负载。

或许,不久的将来,create_workqueue()接口都将不复存在….

                    

Reference:

http://lwn.net/Articles/403891/

http://gqf2008.iteye.com/blog/447060

kernel/documentation/Workqueue.txt

 

2012/08/10

 

你可能感兴趣的:(workqueue-分析一)