threaded interrupt handler support

内核开始支持中断线程(threaded interrupt handler),使用接口request_threaded_irq;原来的request_irq也继续支持。使用时可根据实际情况选择合适的接口,可使用request_threaded_irq的地方没必要继续使用request_irq加tasklet/workqueue或者内核线程的方式;如果中断处理简单时也不要执着使用request_threaded_irq。

下面先贴上2.6.35内核代码中这两个接口声明的代码,然后转贴一点资料。最后把添加threaded interrupt handler的commit原原本本贴上,供参考。

#ifdef CONFIG_GENERIC_HARDIRQS
extern int __must_check
request_threaded_irq(unsigned int irq, irq_handler_t handler,
       irq_handler_t thread_fn,
       unsigned long flags, const char *name, void *dev);

static inline int __must_check
request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
     const char *name, void *dev)
{
 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
}

extern int __must_check
request_any_context_irq(unsigned int irq, irq_handler_t handler,
   unsigned long flags, const char *name, void *dev_id);

extern void exit_irq_thread(void);
#else

extern int __must_check
request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
     const char *name, void *dev);

/*
 * Special function to avoid ifdeffery in kernel/irq/devres.c which
 * gets magically built by GENERIC_HARDIRQS=n architectures (sparc,
 * m68k). I really love these $@%#!* obvious Makefile references:
 * ../../../kernel/irq/devres.o
 */
static inline int __must_check
request_threaded_irq(unsigned int irq, irq_handler_t handler,
       irq_handler_t thread_fn,
       unsigned long flags, const char *name, void *dev)
{
 return request_irq(irq, handler, flags, name, dev);
}

static inline int __must_check
request_any_context_irq(unsigned int irq, irq_handler_t handler,
   unsigned long flags, const char *name, void *dev_id)
{
 return request_irq(irq, handler, flags, name, dev_id);
}

static inline void exit_irq_thread(void) { }
#endif

 

 

http://blog.csdn.net/wangyunqian6/article/details/6585873

在linux里,中断处理分为顶半(top half),底半(bottomhalf),在顶半里处理优先级比较高的事情,要求占用中断时间尽量的短,在处理完成后,就激活底半,有底半处理其余任务。底半的处理方式主要有soft_irq,tasklet,workqueue三种,他们在使用方式和适用情况上各有不同。soft_irq用在对底半执行时间要求比较紧急或者非常重要的场合,主要为一些subsystem用,一般driver基本上用不上。tasklet和work queue在普通的driver里用的相对较多,主要区别是tasklet是在中断上下文执行,而workqueue是在process上下文,因此可以执行可能sleep的操作。

2.6.30里,在ingo molnar的RT tree里存在有一段时间的interruptthread终于merge到mainline了。此时如果使用request_threaded_irq申请的中断,handler不是在中断上下文里执行,而是在新创建的线程里执行,这样,该handler非常像执行workqueue,拥有所有workqueue的特性,但是省掉了创建,初始化,调度workqueue的繁多步骤。处理起来非常简单。让我们看看这个接口。

int request_threaded_irq(unsigned int irq, irq_handler_t handler, irq_handler_t thread_fn,
                         unsigned long irqflags, const char *devname, void *dev_id)

和request_irq非常类似,irq是中断号, handler是在发生中断时,首先要执行的code,非常类似于顶半,该函数最后会return IRQ_WAKE_THREAD来唤醒中断线程,
一般设为NULL,用系统提供的默认处理。thread_fn,是要在线程里执行的handler,非常类似于底半。 后三个参数基本和request_irq相同。irqsflags新增加了一
个标志,IRQF_ONESHOT,用来标明是在中断线程执行完后在打开该中断,该标志非常有用,否则中断有可能一直在顶半执行,而不能处理中断线程。例如对于
gpio level中断,如果不设置该位,在顶半执行完成后,会打开中断,此时由于电平没有变化,马上有执行中断,永远没有机会处理线程。

下边一个实际例子来说明它的应用。在手机平台中,检测耳机的插入一般是通过耳机插孔中机械变化导致一个baseband gpio的电平的变化,
在该gpio中断里进行耳机插入处理。但是耳机插入一般都有个抖动的过程,需要消抖处理。最简单的办法是在中断发生后,延时一段时间(例如200ms),然后再检
查GPIO状态是否稳定来确定是否有效插入。如果用老的中断方式,不得不用workqueue的方式,你需要在顶半里激活一个delay 200ms的workqueue,然后在workqueue
里检查。用线程化的处理方式,你仅仅需要在thread_fn里sleep 200ms,然后在检查即可。看,事情就这么简单!

 

 

函数 request_threaded_irq( ) 首先对传入的参数进行 正确性 检查,根据传入的 irq 号获得数组 irq_desc 中以 irq 为下标的元素,然后动态的创建一个 irqaction 描述符,根据传入的参数初始化新生成的 irqaction 描述符,最后调用函数 __setup_irq(   ) 把该描述符加入到 IRQ 链表中 ,完成中断的动态申请及注册

如果返回值是0则说明申请成功,如果申请不成功,则返回的值非零,一般为负数,可能的取值-16-38例如如果返回值是-16,则说明申请的中断号在内核中已被占用

附注:此函数申请一个中断处理函数(若赋值为NULL,则使用默认的)和线程,当中断发生时,中断处理函数运行于中断上下文,判断是否是自己所属的中断,若是则关闭自身中断并返回IRQ_WAKE_THREAD来唤醒线程函数的执行。在线程执行中,可以睡眠,执行后返回IRQ_HANDLE

 

 

相关修改是在2009年添加进内核的,具体可参考下面的commit。后面还有些小改动

commit 3aa551c9b4c40018f0e261a178e3d25478dc04a9
Author: Thomas Gleixner <[email protected]>
Date:   Mon Mar 23 18:28:15 2009 +0100

    genirq: add threaded interrupt handler support
   
    Add support for threaded interrupt handlers:
   
    A device driver can request that its main interrupt handler runs in a
    thread. To achive this the device driver requests the interrupt with
    request_threaded_irq() and provides additionally to the handler a
    thread function. The handler function is called in hard interrupt
    context and needs to check whether the interrupt originated from the
    device. If the interrupt originated from the device then the handler
    can either return IRQ_HANDLED or IRQ_WAKE_THREAD. IRQ_HANDLED is
    returned when no further action is required. IRQ_WAKE_THREAD causes
    the genirq code to invoke the threaded (main) handler. When
    IRQ_WAKE_THREAD is returned handler must have disabled the interrupt
    on the device level. This is mandatory for shared interrupt handlers,
    but we need to do it as well for obscure x86 hardware where disabling
    an interrupt on the IO_APIC level redirects the interrupt to the
    legacy PIC interrupt lines.
   
    Signed-off-by: Thomas Gleixner <[email protected]>
    Reviewed-by: Ingo Molnar <[email protected]>

diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index f832883..2dfaadb 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -105,7 +105,7 @@
 # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
 #endif
 
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS)
 extern void synchronize_irq(unsigned int irq);
 #else
 # define synchronize_irq(irq) barrier()
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 0c9cb63..6fc2b72 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -59,6 +59,16 @@
 #define IRQF_NOBALANCING 0x00000800
 #define IRQF_IRQPOLL  0x00001000
 
+/*
+ * Bits used by threaded handlers:
+ * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
+ * IRQTF_DIED      - handler thread died
+ */
+enum {
+ IRQTF_RUNTHREAD,
+ IRQTF_DIED,
+};
+
 typedef irqreturn_t (*irq_handler_t)(int, void *);
 
 /**
@@ -71,6 +81,9 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
  * @next: pointer to the next irqaction for shared interrupts
  * @irq: interrupt number
  * @dir: pointer to the proc/irq/NN/name entry
+ * @thread_fn: interupt handler function for threaded interrupts
+ * @thread: thread pointer for threaded interrupts
+ * @thread_flags: flags related to @thread
  */
 struct irqaction {
  irq_handler_t handler;
@@ -81,11 +94,31 @@ struct irqaction {
  struct irqaction *next;
  int irq;
  struct proc_dir_entry *dir;
+ irq_handler_t thread_fn;
+ struct task_struct *thread;
+ unsigned long thread_flags;
 };
 
 extern irqreturn_t no_action(int cpl, void *dev_id);
-extern int __must_check request_irq(unsigned int, irq_handler_t handler,
-         unsigned long, const char *, void *);
+
+extern int __must_check
+request_threaded_irq(unsigned int irq, irq_handler_t handler,
+       irq_handler_t thread_fn,
+       unsigned long flags, const char *name, void *dev);
+
+static inline int __must_check
+request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
+     const char *name, void *dev)
+{
+ return request_threaded_irq(irq, handler, NULL, flags, name, dev);
+}
+
+#ifdef CONFIG_GENERIC_HARDIRQS
+extern void exit_irq_thread(void);
+#else
+static inline void exit_irq_thread(void) { }
+#endif
+
 extern void free_irq(unsigned int, void *);
 
 struct device;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 873e4ac..8b1cf06 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -20,6 +20,7 @@
 #include <linux/irqreturn.h>
 #include <linux/irqnr.h>
 #include <linux/errno.h>
+#include <linux/wait.h>
 
 #include <asm/irq.h>
 #include <asm/ptrace.h>
@@ -155,6 +156,8 @@ struct irq_2_iommu;
  * @affinity:  IRQ affinity on SMP
  * @cpu:  cpu index useful for balancing
  * @pending_mask: pending rebalanced interrupts
+ * @threads_active: number of irqaction threads currently running
+ * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
  * @dir:  /proc/irq/ procfs entry
  * @name:  flow handler name for /proc/interrupts output
  */
@@ -186,6 +189,8 @@ struct irq_desc {
  cpumask_var_t  pending_mask;
 #endif
 #endif
+ atomic_t  threads_active;
+ wait_queue_head_t       wait_for_threads;
 #ifdef CONFIG_PROC_FS
  struct proc_dir_entry *dir;
 #endif
diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h
index c5584ca..819acaa 100644
--- a/include/linux/irqreturn.h
+++ b/include/linux/irqreturn.h
@@ -5,10 +5,12 @@
  * enum irqreturn
  * @IRQ_NONE  interrupt was not from this device
  * @IRQ_HANDLED  interrupt was handled by this device
+ * @IRQ_WAKE_THREAD handler requests to wake the handler thread
  */
 enum irqreturn {
  IRQ_NONE,
  IRQ_HANDLED,
+ IRQ_WAKE_THREAD,
 };
 
 typedef enum irqreturn irqreturn_t;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 46d6806..38b77b0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1292,6 +1292,11 @@ struct task_struct {
 /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */
  spinlock_t alloc_lock;
 
+#ifdef CONFIG_GENERIC_HARDIRQS
+ /* IRQ handler threads */
+ struct irqaction *irqaction;
+#endif
+
  /* Protection of the PI data structures: */
  spinlock_t pi_lock;
 
diff --git a/kernel/exit.c b/kernel/exit.c
index 167e1e3..ca0b348 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1037,6 +1037,8 @@ NORET_TYPE void do_exit(long code)
   schedule();
  }
 
+ exit_irq_thread();
+
  exit_signals(tsk);  /* sets PF_EXITING */
  /*
   * tsk->flags are checked in the futex code to protect against
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 9ebf779..fe8f453 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -357,8 +357,37 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
 
  do {
   ret = action->handler(irq, action->dev_id);
-  if (ret == IRQ_HANDLED)
+
+  switch (ret) {
+  case IRQ_WAKE_THREAD:
+   /*
+    * Wake up the handler thread for this
+    * action. In case the thread crashed and was
+    * killed we just pretend that we handled the
+    * interrupt. The hardirq handler above has
+    * disabled the device interrupt, so no irq
+    * storm is lurking.
+    */
+   if (likely(!test_bit(IRQTF_DIED,
+          &action->thread_flags))) {
+    set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
+    wake_up_process(action->thread);
+   }
+
+   /*
+    * Set it to handled so the spurious check
+    * does not trigger.
+    */
+   ret = IRQ_HANDLED;
+   /* Fall through to add to randomness */
+  case IRQ_HANDLED:
    status |= action->flags;
+   break;
+
+  default:
+   break;
+  }
+
   retval |= ret;
   action = action->next;
  } while (action);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 6458e99..a4c1ab8 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -8,16 +8,15 @@
  */
 
 #include <linux/irq.h>
+#include <linux/kthread.h>
 #include <linux/module.h>
 #include <linux/random.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
+#include <linux/sched.h>
 
 #include "internals.h"
 
-#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
-cpumask_var_t irq_default_affinity;
-
 /**
  * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
  * @irq: interrupt number to wait for
@@ -53,9 +52,18 @@ void synchronize_irq(unsigned int irq)
 
   /* Oops, that failed? */
  } while (status & IRQ_INPROGRESS);
+
+ /*
+  * We made sure that no hardirq handler is running. Now verify
+  * that no threaded handlers are active.
+  */
+ wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
 }
 EXPORT_SYMBOL(synchronize_irq);
 
+#ifdef CONFIG_SMP
+cpumask_var_t irq_default_affinity;
+
 /**
  * irq_can_set_affinity - Check if the affinity of a given irq can be set
  * @irq:  Interrupt to check
@@ -72,6 +80,18 @@ int irq_can_set_affinity(unsigned int irq)
  return 1;
 }
 
+static void
+irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask)
+{
+ struct irqaction *action = desc->action;
+
+ while (action) {
+  if (action->thread)
+   set_cpus_allowed_ptr(action->thread, cpumask);
+  action = action->next;
+ }
+}
+
 /**
  * irq_set_affinity - Set the irq affinity of a given irq
  * @irq:  Interrupt to set affinity
@@ -100,6 +120,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
  cpumask_copy(desc->affinity, cpumask);
  desc->chip->set_affinity(irq, cpumask);
 #endif
+ irq_set_thread_affinity(desc, cpumask);
  desc->status |= IRQ_AFFINITY_SET;
  spin_unlock_irqrestore(&desc->lock, flags);
  return 0;
@@ -150,6 +171,8 @@ int irq_select_affinity_usr(unsigned int irq)
 
  spin_lock_irqsave(&desc->lock, flags);
  ret = setup_affinity(irq, desc);
+ if (!ret)
+  irq_set_thread_affinity(desc, desc->affinity);
  spin_unlock_irqrestore(&desc->lock, flags);
 
  return ret;
@@ -384,6 +407,93 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
  return ret;
 }
 
+static inline int irq_thread_should_run(struct irqaction *action)
+{
+ return test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags);
+}
+
+static int irq_wait_for_interrupt(struct irqaction *action)
+{
+ while (!kthread_should_stop()) {
+  set_current_state(TASK_INTERRUPTIBLE);
+  if (irq_thread_should_run(action)) {
+   __set_current_state(TASK_RUNNING);
+   return 0;
+  } else
+   schedule();
+ }
+ return -1;
+}
+
+/*
+ * Interrupt handler thread
+ */
+static int irq_thread(void *data)
+{
+ struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, };
+ struct irqaction *action = data;
+ struct irq_desc *desc = irq_to_desc(action->irq);
+ int wake;
+
+ sched_setscheduler(current, SCHED_FIFO, &param);
+ current->irqaction = action;
+
+ while (!irq_wait_for_interrupt(action)) {
+
+  atomic_inc(&desc->threads_active);
+
+  spin_lock_irq(&desc->lock);
+  if (unlikely(desc->status & IRQ_DISABLED)) {
+   /*
+    * CHECKME: We might need a dedicated
+    * IRQ_THREAD_PENDING flag here, which
+    * retriggers the thread in check_irq_resend()
+    * but AFAICT IRQ_PENDING should be fine as it
+    * retriggers the interrupt itself --- tglx
+    */
+   desc->status |= IRQ_PENDING;
+   spin_unlock_irq(&desc->lock);
+  } else {
+   spin_unlock_irq(&desc->lock);
+
+   action->thread_fn(action->irq, action->dev_id);
+  }
+
+  wake = atomic_dec_and_test(&desc->threads_active);
+
+  if (wake && waitqueue_active(&desc->wait_for_threads))
+   wake_up(&desc->wait_for_threads);
+ }
+
+ /*
+  * Clear irqaction. Otherwise exit_irq_thread() would make
+  * fuzz about an active irq thread going into nirvana.
+  */
+ current->irqaction = NULL;
+ return 0;
+}
+
+/*
+ * Called from do_exit()
+ */
+void exit_irq_thread(void)
+{
+ struct task_struct *tsk = current;
+
+ if (!tsk->irqaction)
+  return;
+
+ printk(KERN_ERR
+        "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
+        tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
+
+ /*
+  * Set the THREAD DIED flag to prevent further wakeups of the
+  * soon to be gone threaded handler.
+  */
+ set_bit(IRQTF_DIED, &tsk->irqaction->flags);
+}
+
 /*
  * Internal function to register an irqaction - typically used to
  * allocate special interrupts that are part of the architecture.
@@ -420,6 +530,26 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
  }
 
  /*
+  * Threaded handler ?
+  */
+ if (new->thread_fn) {
+  struct task_struct *t;
+
+  t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
+       new->name);
+  if (IS_ERR(t))
+   return PTR_ERR(t);
+  /*
+   * We keep the reference to the task struct even if
+   * the thread dies to avoid that the interrupt code
+   * references an already freed task_struct.
+   */
+  get_task_struct(t);
+  new->thread = t;
+  wake_up_process(t);
+ }
+
+ /*
   * The following block of code has to be executed atomically
   */
  spin_lock_irqsave(&desc->lock, flags);
@@ -456,15 +586,15 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
  if (!shared) {
   irq_chip_set_defaults(desc->chip);
 
+  init_waitqueue_head(&desc->wait_for_threads);
+
   /* Setup the type (level, edge polarity) if configured: */
   if (new->flags & IRQF_TRIGGER_MASK) {
    ret = __irq_set_trigger(desc, irq,
      new->flags & IRQF_TRIGGER_MASK);
 
-   if (ret) {
-    spin_unlock_irqrestore(&desc->lock, flags);
-    return ret;
-   }
+   if (ret)
+    goto out_thread;
   } else
    compat_irq_chip_set_default_handler(desc);
 #if defined(CONFIG_IRQ_PER_CPU)
@@ -532,8 +662,19 @@ mismatch:
   dump_stack();
  }
 #endif
+ ret = -EBUSY;
+
+out_thread:
  spin_unlock_irqrestore(&desc->lock, flags);
- return -EBUSY;
+ if (new->thread) {
+  struct task_struct *t = new->thread;
+
+  new->thread = NULL;
+  if (likely(!test_bit(IRQTF_DIED, &new->thread_flags)))
+   kthread_stop(t);
+  put_task_struct(t);
+ }
+ return ret;
 }
 
 /**
@@ -559,6 +700,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
 {
  struct irq_desc *desc = irq_to_desc(irq);
  struct irqaction *action, **action_ptr;
+ struct task_struct *irqthread;
  unsigned long flags;
 
  WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
@@ -605,6 +747,10 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
   else
    desc->chip->disable(irq);
  }
+
+ irqthread = action->thread;
+ action->thread = NULL;
+
  spin_unlock_irqrestore(&desc->lock, flags);
 
  unregister_handler_proc(irq, action);
@@ -612,6 +758,12 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
  /* Make sure it's not being used on another CPU: */
  synchronize_irq(irq);
 
+ if (irqthread) {
+  if (!test_bit(IRQTF_DIED, &action->thread_flags))
+   kthread_stop(irqthread);
+  put_task_struct(irqthread);
+ }
+
 #ifdef CONFIG_DEBUG_SHIRQ
  /*
   * It's a shared IRQ -- the driver ought to be prepared for an IRQ
@@ -664,9 +816,12 @@ void free_irq(unsigned int irq, void *dev_id)
 EXPORT_SYMBOL(free_irq);
 
 /**
- * request_irq - allocate an interrupt line
+ * request_threaded_irq - allocate an interrupt line
  * @irq: Interrupt line to allocate
- * @handler: Function to be called when the IRQ occurs
+ * @handler: Function to be called when the IRQ occurs.
+ *    Primary handler for threaded interrupts
+ *      @thread_fn: Function called from the irq handler thread
+ *                  If NULL, no irq thread is created
  * @irqflags: Interrupt type flags
  * @devname: An ascii name for the claiming device
  * @dev_id: A cookie passed back to the handler function
@@ -678,6 +833,15 @@ EXPORT_SYMBOL(free_irq);
  * raises, you must take care both to initialise your hardware
  * and to set up the interrupt handler in the right order.
  *
+ * If you want to set up a threaded irq handler for your device
+ * then you need to supply @handler and @thread_fn. @handler ist
+ * still called in hard interrupt context and has to check
+ * whether the interrupt originates from the device. If yes it
+ * needs to disable the interrupt on the device and return
+ * IRQ_THREAD_WAKE which will wake up the handler thread and run
+ * @thread_fn. This split handler design is necessary to support
+ * shared interrupts.
+ *
  * Dev_id must be globally unique. Normally the address of the
  * device data structure is used as the cookie. Since the handler
  * receives this value it makes sense to use it.
@@ -693,8 +857,9 @@ EXPORT_SYMBOL(free_irq);
  * IRQF_TRIGGER_*  Specify active edge(s) or level
  *
  */
-int request_irq(unsigned int irq, irq_handler_t handler,
-  unsigned long irqflags, const char *devname, void *dev_id)
+int request_threaded_irq(unsigned int irq, irq_handler_t handler,
+    irq_handler_t thread_fn, unsigned long irqflags,
+    const char *devname, void *dev_id)
 {
  struct irqaction *action;
  struct irq_desc *desc;
@@ -742,6 +907,7 @@ int request_irq(unsigned int irq, irq_handler_t handler,
   return -ENOMEM;
 
  action->handler = handler;
+ action->thread_fn = thread_fn;
  action->flags = irqflags;
  action->name = devname;
  action->dev_id = dev_id;
@@ -771,4 +937,4 @@ int request_irq(unsigned int irq, irq_handler_t handler,
 #endif
  return retval;
 }
-EXPORT_SYMBOL(request_irq);
+EXPORT_SYMBOL(request_threaded_irq);

 

你可能感兴趣的:(thread,function,struct,null,action,Allocation)