使用定时器任务,可以让内核在将来的一个指定时刻执行一段指定的代码。内核定时器相关的接口在linux/timer.h文件中。
本文将会先介绍定时任务的使用,然后在此基础上了解其内部的实现逻辑。
一、定时任务结构体表示:
struct timer_list { struct list_head entry; //用于链接到内核定时器链表中 unsigned long expires; //定时任务过期时间 void (*function)(unsigned long); //定时任务的工作函数 unsigned long data; //定时任务工作函数参数 struct tvec_base *base; //定时任务关联的内核定时器 #ifdef CONFIG_TIMER_STATS void *start_site; char start_comm[16]; int start_pid; #endif #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; #endif };
二、定时任务相关的接口:
1. 初始化定时任务
#define TIMER_INITIALIZER(_function, _expires, _data) { \ .entry = { .prev = TIMER_ENTRY_STATIC }, \ .function = (_function), \ .expires = (_expires), \ .data = (_data), \ .base = &boot_tvec_bases, \ __TIMER_LOCKDEP_MAP_INITIALIZER( \ __FILE__ ":" __stringify(__LINE__)) \ } #define DEFINE_TIMER(_name, _function, _expires, _data) \ struct timer_list _name = \ TIMER_INITIALIZER(_function, _expires, _data) #define setup_timer(timer, fn, data) \ do { \ static struct lock_class_key __key; \ setup_timer_key((timer), #timer, &__key, (fn), (data));\ } while (0)
` 主要是完成定时任务的成员初始化,这里要注意一下.base = &boot_tvec_bases;boot_tvec_bases是内核在初始化的时候创建好的。
其实过期时间expires在初始化的时候设置,一般是没有什么意义的,通常都是在注册定时器任务的时候才设置过期时间。
2. 注册定时任务:
void add_timer(struct timer_list *timer);
当一个定时任务注册到内核的定时器列表后,就会处于激活状态。这里要注意的是:注册的定时任务在只会被执行一次,因为在执行的时候会将其从定时器链表中移除,如果需要实现每隔一段时间就执行,则需要在其定时任务函数中再次注册,才能再次被执行。
3. 注销定时任务:
int del_timer(struct timer_list * timer); int del_timer_sync(struct timer_list *timer);
有可能在注销定时任务的时候,此时的定时任务正在被执行中,那么调用del_timer_sync()就会等待任务被执行完毕后再注销。
4. 修改定时任务的过期时间
当调用add_timer()函数将定时任务注册后,定时任务就处于激活的状态,此时如果需要修改过期时间,则必须通过如下接口来完成:
int mod_timer(struct timer_list *timer, unsigned long expires);
5. 判断定时任务的状态:
static inline int timer_pending(const struct timer_list * timer) { return timer->entry.next != NULL; }
看完上面的接口介绍之后,再看一个简单的例子:
#include <linux/module.h> #include <linux/timer.h> #include <linux/delay.h> #define ENTER() printk(KERN_DEBUG "%s() Enter", __func__) #define EXIT() printk(KERN_DEBUG "%s() Exit", __func__) #define ERR(fmt, args...) printk(KERN_ERR "%s()-%d: " fmt "\n", __func__, __LINE__, ##args) #define DBG(fmt, args...) printk(KERN_DEBUG "%s()-%d: " fmt "\n", __func__, __LINE__, ##args) struct test_timer { struct timer_list t; unsigned long nums; }; static void my_timer_func(unsigned long data) { struct test_timer *timer = (struct test_timer *)data; DBG("nums: %lu", timer->nums--); if (timer->nums > 0) { mod_timer(&timer->t, timer->t.expires + HZ); //再次注册定时任务 } } static struct test_timer my_timer; static int __init timer_demo_init(void) { setup_timer(&my_timer.t, my_timer_func, (unsigned long)&my_timer); my_timer.nums = 30; msleep_interruptible(2000); DBG("before mod_timer"); mod_timer(&my_timer.t, jiffies + 2 * HZ); DBG("success"); return 0; } static void __exit timer_demo_exit(void) { ENTER(); while (my_timer.nums > 0) { DBG("waiting my_timer exit"); msleep_interruptible(1000); } EXIT(); } MODULE_LICENSE("GPL"); module_init(timer_demo_init); module_exit(timer_demo_exit);
三、定时任务的注册:
接下来,分析一下内核是如何管理我们注册的定时任务的,首先从add_timer()开始:
void add_timer(struct timer_list *timer) { BUG_ON(timer_pending(timer)); mod_timer(timer, timer->expires); }
这里可以看出,我们调用add_timer()和调用mod_timer()进行注册,是一样的。
int mod_timer(struct timer_list *timer, unsigned long expires) { /* * This is a common optimization triggered by the * networking code - if the timer is re-modified * to be the same thing then just return: */ if (timer_pending(timer) && timer->expires == expires) return 1; return __mod_timer(timer, expires, false, TIMER_NOT_PINNED); }
先判断下定时任务是否已经处于激活状态,如果已经处于激活状态,则直接返回,避免重复注册,否则调用__mod_timer():
static inline int __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only, int pinned) { struct tvec_base *base, *new_base; unsigned long flags; int ret = 0 , cpu; timer_stats_timer_set_start_info(timer); BUG_ON(!timer->function); base = lock_timer_base(timer, &flags); /*如果timer_list已经处于激活状态,则先将其从链表中移除:detach_timer()*/ if (timer_pending(timer)) { detach_timer(timer, 0); if (timer->expires == base->next_timer && !tbase_get_deferrable(timer->base)) base->next_timer = base->timer_jiffies; ret = 1; } else { if (pending_only) goto out_unlock; } debug_activate(timer, expires); new_base = __get_cpu_var(tvec_bases); cpu = smp_processor_id(); #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) { int preferred_cpu = get_nohz_load_balancer(); if (preferred_cpu >= 0) cpu = preferred_cpu; } #endif new_base = per_cpu(tvec_bases, cpu); if (base != new_base) { /* * We are trying to schedule the timer on the local CPU. * However we can't change timer's base while it is running, * otherwise del_timer_sync() can't detect that the timer's * handler yet has not finished. This also guarantees that * the timer is serialized wrt itself. */ if (likely(base->running_timer != timer)) { /* See the comment in lock_timer_base() */ timer_set_base(timer, NULL); spin_unlock(&base->lock); base = new_base; spin_lock(&base->lock); timer_set_base(timer, base); } } timer->expires = expires; if (time_before(timer->expires, base->next_timer) && !tbase_get_deferrable(timer->base)) base->next_timer = timer->expires; internal_add_timer(base, timer); out_unlock: spin_unlock_irqrestore(&base->lock, flags); return ret; }
最终调用internal_add_timer()完成注册:
static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) { unsigned long expires = timer->expires; unsigned long idx = expires - base->timer_jiffies; struct list_head *vec; /* 根据过期时间选择合适的的定时器链表 */ if (idx < TVR_SIZE) { int i = expires & TVR_MASK; vec = base->tv1.vec + i; } else if (idx < 1 << (TVR_BITS + TVN_BITS)) { int i = (expires >> TVR_BITS) & TVN_MASK; vec = base->tv2.vec + i; } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) { int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK; vec = base->tv3.vec + i; } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) { int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK; vec = base->tv4.vec + i; } else if ((signed long) idx < 0) { /* * Can happen if you add a timer with expires == jiffies, * or you set a timer to go off in the past */ vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK); } else { int i; /* If the timeout is larger than 0xffffffff on 64-bit * architectures then we use the maximum timeout: */ if (idx > 0xffffffffUL) { idx = 0xffffffffUL; expires = idx + base->timer_jiffies; } i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK; vec = base->tv5.vec + i; } /* * Timers are FIFO: */ list_add_tail(&timer->entry, vec); /*添加到定时器链表尾部*/ }
这里需要补充说明一下struct tvsec_base结构体,看完之后就大致清楚是怎么管理的了:
/* * per-CPU timer vector definitions: */ #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6) #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8) #define TVN_SIZE (1 << TVN_BITS) #define TVR_SIZE (1 << TVR_BITS) #define TVN_MASK (TVN_SIZE - 1) #define TVR_MASK (TVR_SIZE - 1) struct tvec { struct list_head vec[TVN_SIZE]; }; struct tvec_root { struct list_head vec[TVR_SIZE]; }; struct tvec_base { spinlock_t lock; struct timer_list *running_timer; //保存正在运行的定时任务 unsigned long timer_jiffies; unsigned long next_timer; struct tvec_root tv1; struct tvec tv2; struct tvec tv3; struct tvec tv4; struct tvec tv5; } ____cacheline_aligned;
每一个CPU都会包含一个struct tvsec_base类型的对象,用于存储注册到每个CPU上的定时任务。看完这个结构体,可以发现包含有5个链表数组,分别用于存储不同过期时间的定时任务,分布如下:
过期时间在0 ~ (1<<8) --> tv1, 具体在tv1.vec数组的哪个链表,则是通过掩码来确定,即: 过期时间 & ((1 << 8) - 1)
过期时间在(1 << 8) ~ (1 << (8+6)) --> tv2, 具体在tv2.vec数组的哪个链表,则是通过掩码来确定,即: (过期时间 -(1 << 8)) & ((1<<6) - 1)
过期时间在(1 << (8+6)) ~ (1 << (8+2*6)) --> tv3,具体在tv3.vec数组的哪个链表,也是通过掩码确定,即: (过期时间 - (1 << (8+1*6))) & ((1<<6) - 1)
过期时间在(1 << (8 + 6*2)) ~ (1 << (8 + 3*6)) --> tv4, 具体在tv4.vec数组的哪个链表,也是通过掩码确定,即: (过期时间 - (1 << (8+2*6)) & ((1 << 6)- 1)
如果过期时间超过(1 << (8 + 3 * 6)) --> tv5, 具体在tv5.vec数组的哪个链表,也是通过掩码确定,即: (过期时间 - ((1 << (8+3*6)) & ((1 << 6) - 1)
之所以要分成5个数组,就是为了提高效率,因为当有中断发生,就会触发内核去检查是否存在过期的定时任务需要执行,如果把所有的链表都去遍历,那么显然效率会很低下,所以内核每次只会去检查tv1.sec数组上的链表是否存在需要执行的定期任务。具体是怎么执行的,下面会有分析。这里暂时可以理解为注册一个定时任务,就是将此定时任务保存到本地CPU上的定时器的某个链表中。
四、定时任务的执行:
定时器的执行,是在软中断中执行的,是在一个原子上下文环境中,即不允许定时任务发生睡眠等待。
在内核初始化的时候,会调用init_timers()注册软中断:
void __init init_timers(void) { int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, (void *)(long)smp_processor_id()); init_timer_stats(); BUG_ON(err == NOTIFY_BAD); register_cpu_notifier(&timers_nb); open_softirq(TIMER_SOFTIRQ, run_timer_softirq); }
调用open_softirq()函数注册定时器的软中断,处理函数为run_timer_softirq。软中断是由软件模拟的中断,大部分情况下软中断会在irq_exit阶段被执行,在irq_exit阶段没被处理完的软中断,会在守护进程ksoftirqd中执行。这里暂时不深究软中断的实现原理,暂时认为中断发生之后,会触发定时器软中断的处理函数run_timer_softirq的执行。
/* * This function runs timers and the timer-tq in bottom half context. */ static void run_timer_softirq(struct softirq_action *h) { struct tvec_base *base = __get_cpu_var(tvec_bases); perf_event_do_pending(); hrtimer_run_pending(); // 判断是否有超时,jiffies >= base->timer_jiffies则表示有超时,有定时任务需要执行。 if (time_after_eq(jiffies, base->timer_jiffies)) __run_timers(base); } /** * __run_timers - run all expired timers (if any) on this CPU. * @base: the timer vector to be processed. * * This function cascades all vectors and executes all expired timer * vectors. */ static inline void __run_timers(struct tvec_base *base) { struct timer_list *timer; spin_lock_irq(&base->lock); while (time_after_eq(jiffies, base->timer_jiffies)) { struct list_head work_list; struct list_head *head = &work_list; int index = base->timer_jiffies & TVR_MASK; /* * Cascade timers: */ // 寻找已经超时的定时任务链表,并将超时的链表上的定时任务移动到上一级的链表 if (!index && (!cascade(base, &base->tv2, INDEX(0))) && (!cascade(base, &base->tv3, INDEX(1))) && !cascade(base, &base->tv4, INDEX(2))) cascade(base, &base->tv5, INDEX(3)); ++base->timer_jiffies; list_replace_init(base->tv1.vec + index, &work_list); while (!list_empty(head)) { void (*fn)(unsigned long); unsigned long data; timer = list_first_entry(head, struct timer_list,entry); fn = timer->function; // 定时任务函数 data = timer->data; timer_stats_account_timer(timer); set_running_timer(base, timer); detach_timer(timer, 1); spin_unlock_irq(&base->lock); { int preempt_count = preempt_count(); #ifdef CONFIG_LOCKDEP /* * It is permissible to free the timer from * inside the function that is called from * it, this we need to take into account for * lockdep too. To avoid bogus "held lock * freed" warnings as well as problems when * looking into timer->lockdep_map, make a * copy and use that here. */ struct lockdep_map lockdep_map = timer->lockdep_map; #endif /* * Couple the lock chain with the lock chain at * del_timer_sync() by acquiring the lock_map * around the fn() call here and in * del_timer_sync(). */ lock_map_acquire(&lockdep_map); trace_timer_expire_entry(timer); fn(data); // 执行定时任务函数 trace_timer_expire_exit(timer); lock_map_release(&lockdep_map); if (preempt_count != preempt_count()) { printk(KERN_ERR "huh, entered %p " "with preempt_count %08x, exited" " with %08x?\n", fn, preempt_count, preempt_count()); BUG(); } } spin_lock_irq(&base->lock); } } set_running_timer(base, NULL); spin_unlock_irq(&base->lock); }
这段代码的逻辑比较复杂,我也还不能完全理解,不过从上面来看,就是把已经超时的链表取出到work_list,然后依次执行work_list上的定时任务。
在代码的前面部分,有一段是重新调整定时任务链表的操作:
int index = base->timer_jiffies & TVR_MASK; /* * Cascade timers: */ if (!index && (!cascade(base, &base->tv2, INDEX(0))) && (!cascade(base, &base->tv3, INDEX(1))) && !cascade(base, &base->tv4, INDEX(2))) cascade(base, &base->tv5, INDEX(3)); ++base->timer_jiffies;
这里要先看一下INDEX宏和cascade()函数:
static int cascade(struct tvec_base *base, struct tvec *tv, int index) { /* cascade all the timers from tv up one level */ struct timer_list *timer, *tmp; struct list_head tv_list; list_replace_init(tv->vec + index, &tv_list); /* * We are removing _all_ timers from the list, so we * don't have to detach them individually. */ list_for_each_entry_safe(timer, tmp, &tv_list, entry) { BUG_ON(tbase_get_base(timer->base) != base); internal_add_timer(base, timer); } return index; } #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
可以看出INDEX宏是根据定时器的过期时间来得到其所在数组的索引,而cascade()函数就是将此索引对应的链表取出,然后将此链表上的每一个定时任务从新加入到定时器中。