timer :
maintaining timers-mechanism that are able to notify the kernel or user program ;
in Linux code ;有几个宏产生决定时钟中断频率的HZ变量;
#define HZ 1000;
xtime variable :
tv_sec :存放的秒数(UTC)标准;
tv_nsec:存放自上一秒进过的纳秒数;取值范围(0-999999999)
xtime 变量每个节拍更新一次;即大约每秒更新HZ=1000 次;
对于定时器的初始化:
start_kernel()--->time_init()---->初始化时钟中断服务程序;
linux中有动态定时器和间隔定时器;动态定时器在内核使用而间隔定时器由进程在用户态创建;
dynamic timer :
dynamic timer may be created and destroyed ,a dynamic timer is stored in the following timer_list structure ;
<span style="font-size:24px;">struct timer_list { /* * All fields that change during normal runtime grouped to the * same cacheline */ struct list_head entry; unsigned long expires; 定时器到期时间,同时是list 分组的字段; struct tvec_base *base; void (*function)(unsigned long); // 定时器处理回调函数 unsigned long data; int slack; #ifdef CONFIG_TIMER_STATS int start_pid; void *start_site; char start_comm[16]; #endif #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; #endif </span>
/** * add_timer - start a timer * @timer: the timer to be added * * The kernel will do a ->function(->data) callback from the * timer interrupt at the ->expires point in the future. The * current time is 'jiffies'. * * The timer's ->expires, ->function (and if the handler uses it, ->data) * fields must be set prior calling this function. * * Timers with an ->expires field in the past will be executed in the next * timer tick. */ void add_timer(struct timer_list *timer) { BUG_ON(timer_pending(timer)); mod_timer(timer, timer->expires); }
/** * mod_timer - modify a timer's timeout * @timer: the timer to be modified * @expires: new timeout in jiffies * * mod_timer() is a more efficient way to update the expire field of an * active timer (if the timer is inactive it will be activated) * * mod_timer(timer, expires) is equivalent to: * * del_timer(timer); timer->expires = expires; add_timer(timer); * * Note that if there are multiple unserialized concurrent users of the * same timer, then mod_timer() is the only safe way to modify the timeout, * since add_timer() cannot modify an already running timer. * * The function returns whether it has modified a pending timer or not. * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an * active timer returns 1.) */ int mod_timer(struct timer_list *timer, unsigned long expires) { expires = apply_slack(timer, expires); /* * This is a common optimization triggered by the * networking code - if the timer is re-modified * to be the same thing then just return: */ if (timer_pending(timer) && timer->expires == expires) return 1; return __mod_timer(timer, expires, false, TIMER_NOT_PINNED); } /* * Decide where to put the timer while taking the slack into account * * Algorithm: * 1) calculate the maximum (absolute) time * 2) calculate the highest bit where the expires and new max are different * 3) use this bit to make a mask * 4) use the bitmask to round down the maximum time, so that all last * bits are zeros */ static inline unsigned long apply_slack(struct timer_list *timer, unsigned long expires) { <span> </span>unsigned long expires_limit, mask; <span> </span>int bit; <span> </span>if (timer->slack >= 0) { <span> </span>expires_limit = expires + timer->slack; <span> </span>} else { <span> </span>long delta = expires - jiffies; <span> </span>if (delta < 256) <span> </span>return expires; <span> </span>expires_limit = expires + delta / 256; <span> </span>} <span> </span>mask = expires ^ expires_limit; <span> </span>if (mask == 0) <span> </span>return expires; <span> </span>bit = find_last_bit(&mask, BITS_PER_LONG); <span> </span>mask = (1 << bit) - 1; <span> </span>expires_limit = expires_limit & ~(mask); <span> </span>return expires_limit;
</pre><pre name="code" class="cpp">static inline int __mod_timer(struct timer_list *timer, unsigned long expires, <span> </span>bool pending_only, int pinned) { <span> </span>struct tvec_base *base, *new_base; <span> </span>unsigned long flags; <span> </span>int ret = 0 , cpu; <span> </span>timer_stats_timer_set_start_info(timer); <span> </span>BUG_ON(!timer->function); <span> </span>base = lock_timer_base(timer, &flags); <span> </span>ret = detach_if_pending(timer, base, false); <span> </span>if (!ret && pending_only) <span> </span>goto out_unlock; <span> </span>debug_activate(timer, expires); <span> </span>cpu = smp_processor_id(); #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) <span> </span>if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) <span> </span>cpu = get_nohz_timer_target(); #endif <span> </span>new_base = per_cpu(tvec_bases, cpu); <span> </span>if (base != new_base) { <span> </span>/* <span> </span> * We are trying to schedule the timer on the local CPU. <span> </span> * However we can't change timer's base while it is running, <span> </span> * otherwise del_timer_sync() can't detect that the timer's <span> </span> * handler yet has not finished. This also guarantees that <span> </span> * the timer is serialized wrt itself. <span> </span> */ <span> </span>if (likely(base->running_timer != timer)) { <span> </span>/* See the comment in lock_timer_base() */ <span> </span>timer_set_base(timer, NULL); <span> </span>spin_unlock(&base->lock); <span> </span>base = new_base; <span> </span>spin_lock(&base->lock); <span> </span>timer_set_base(timer, base); <span> </span>} <span> </span>} <span> </span>timer->expires = expires; <span> </span>internal_add_timer(base, timer);//insert out_unlock: <span> </span>spin_unlock_irqrestore(&base->lock, flags); <span> </span>return ret; }del:
#ifdef CONFIG_SMP /** * del_timer_sync - deactivate a timer and wait for the handler to finish. * @timer: the timer to be deactivated * * This function only differs from del_timer() on SMP: besides deactivating * the timer it also makes sure the handler has finished executing on other * CPUs. * * Synchronization rules: Callers must prevent restarting of the timer, * otherwise this function is meaningless. It must not be called from * interrupt contexts. The caller must not hold locks which would prevent * completion of the timer's handler. The timer's handler must not call * add_timer_on(). Upon exit the timer is not queued and the handler is * not running on any CPU. * * Note: You must not hold locks that are held in interrupt context * while calling this function. Even if the lock has nothing to do * with the timer in question. Here's why: * * CPU0 CPU1 * ---- ---- * <SOFTIRQ> * call_timer_fn(); * base->running_timer = mytimer; * spin_lock_irq(somelock); * <IRQ> * spin_lock(somelock); * del_timer_sync(mytimer); * while (base->running_timer == mytimer); * * Now del_timer_sync() will never return and never release somelock. * The interrupt on the other CPU is waiting to grab somelock but * it has interrupted the softirq that CPU0 is waiting to finish. * * The function returns whether it has deactivated a pending timer or not. */ int del_timer_sync(struct timer_list *timer) { #ifdef CONFIG_LOCKDEP unsigned long flags; /* * If lockdep gives a backtrace here, please reference * the synchronization rules above. */ local_irq_save(flags); lock_map_acquire(&timer->lockdep_map); lock_map_release(&timer->lockdep_map); local_irq_restore(flags); #endif /* * don't use it in hardirq context, because it * could lead to deadlock. */ WARN_ON(in_irq()); for (;;) { int ret = try_to_del_timer_sync(timer); if (ret >= 0) return ret; cpu_relax(); }
/** * try_to_del_timer_sync - Try to deactivate a timer * @timer: timer do del * * This function tries to deactivate a timer. Upon successful (ret >= 0) * exit the timer is not queued and the handler is not running on any CPU. */ int try_to_del_timer_sync(struct timer_list *timer) { <span> </span>struct tvec_base *base; <span> </span>unsigned long flags; <span> </span>int ret = -1; <span> </span>debug_assert_init(timer); <span> </span>base = lock_timer_base(timer, &flags); <span> </span>if (base->running_timer != timer) { <span> </span>timer_stats_timer_clear_start_info(timer); <span> </span>ret = detach_if_pending(timer, base, true); <span> </span>} <span> </span>spin_unlock_irqrestore(&base->lock, flags); <span> </span>return ret; }
在定时器初始化时:
void __init init_timers(void) { int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, (void *)(long)smp_processor_id()); init_timer_stats(); BUG_ON(err != NOTIFY_OK); register_cpu_notifier(&timers_nb); open_softirq(TIMER_SOFTIRQ, run_timer_softirq); //软中断 }
/* * This function runs timers and the timer-tq in bottom half context. */ static void run_timer_softirq(struct softirq_action *h) { <span> </span>struct tvec_base *base = __this_cpu_read(tvec_bases); <span> </span>hrtimer_run_pending(); <span> </span>if (time_after_eq(jiffies, base->timer_jiffies)) <span> </span>__run_timers(base); }
/** * __run_timers - run all expired timers (if any) on this CPU. * @base: the timer vector to be processed. * * This function cascades all vectors and executes all expired timer * vectors. */ static inline void __run_timers(struct tvec_base *base) { <span> </span>struct timer_list *timer; <span> </span>spin_lock_irq(&base->lock); <span> </span>while (time_after_eq(jiffies, base->timer_jiffies)) { <span> </span>struct list_head work_list; <span> </span>struct list_head *head = &work_list; <span> </span>int index = base->timer_jiffies & TVR_MASK; <span> </span>/* <span> </span> * Cascade timers: <span> </span> */ <span> </span>if (!index && <span> </span>(!cascade(base, &base->tv2, INDEX(0))) && <span> </span>(!cascade(base, &base->tv3, INDEX(1))) && <span> </span>!cascade(base, &base->tv4, INDEX(2))) <span> </span>cascade(base, &base->tv5, INDEX(3)); <span> </span>++base->timer_jiffies; <span> </span>list_replace_init(base->tv1.vec + index, &work_list); <span> </span>while (!list_empty(head)) { <span> </span>void (*fn)(unsigned long); <span> </span>unsigned long data; <span> </span>timer = list_first_entry(head, struct timer_list,entry); <span> </span>fn = timer->function; <span> </span>data = timer->data; <span> </span>timer_stats_account_timer(timer); <span> </span>base->running_timer = timer; <span> </span>detach_expired_timer(timer, base); <span> </span>spin_unlock_irq(&base->lock); <span> </span>call_timer_fn(timer, fn, data); <span> </span>spin_lock_irq(&base->lock); <span> </span>} <span> </span>} <span> </span>base->running_timer = NULL; <span> </span>spin_unlock_irq(&base->lock); }
</pre><pre name="code" class="cpp">static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long), <span> </span> unsigned long data) { <span> </span>int preempt_count = preempt_count(); #ifdef CONFIG_LOCKDEP <span> </span>/* <span> </span> * It is permissible to free the timer from inside the <span> </span> * function that is called from it, this we need to take into <span> </span> * account for lockdep too. To avoid bogus "held lock freed" <span> </span> * warnings as well as problems when looking into <span> </span> * timer->lockdep_map, make a copy and use that here. <span> </span> */ <span> </span>struct lockdep_map lockdep_map; <span> </span>lockdep_copy_map(&lockdep_map, &timer->lockdep_map); #endif <span> </span>/* <span> </span> * Couple the lock chain with the lock chain at <span> </span> * del_timer_sync() by acquiring the lock_map around the fn() <span> </span> * call here and in del_timer_sync(). <span> </span> */ <span> </span>lock_map_acquire(&lockdep_map); <span> </span>trace_timer_expire_entry(timer); <span> </span>fn(data); <span> </span>trace_timer_expire_exit(timer); <span> </span>lock_map_release(&lockdep_map); <span> </span>if (preempt_count != preempt_count()) { <span> </span>WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n", <span> </span> fn, preempt_count, preempt_count()); <span> </span>/* <span> </span> * Restore the preempt count. That gives us a decent <span> </span> * chance to survive and extract information. If the <span> </span> * callback kept a lock held, bad luck, but not worse <span> </span> * than the BUG() we had. <span> </span> */ <span> </span>preempt_count() = preempt_count; <span> </span>} }