快乐虾
http://blog.csdn.net/lights_joy/
本文适用于
ADSP-BF561
uclinux-2010r1-pre
Visual DSP++ 5.0(update 5)
欢迎转载,但请保留作者信息
创建内核线程可以使用kernel_thread函数:
/*
* Create a kernel thread.
*/
pid_t kernel_thread(int (*fn) (void *), void *arg, unsigned long flags)
{
struct pt_regs regs;
memset(®s, 0, sizeof(regs));
regs.r1 = (unsigned long)arg;
regs.p1 = (unsigned long)fn;
regs.pc = (unsigned long)kernel_thread_helper;
regs.orig_p0 = -1;
/* Set bit 2 to tell ret_from_fork we should be returning to kernel
mode. */
regs.ipend = 0x8002;
__asm__ __volatile__("%0 = syscfg;":"=d"(regs.syscfg):);
return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL,
NULL);
}
注意这里的pc值的设置,它指向了kernel_thread_help,这将是这个内核线程要执行的第一行语句:
/*
* This gets run with P1 containing the
* function to call, and R1 containing
* the "args". Note P0 is clobbered on the way here.
*/
void kernel_thread_helper(void);
__asm__(".section .text/n"
".align 4/n"
"_kernel_thread_helper:/n/t"
"/tsp += -12;/n/t"
"/tr0 = r1;/n/t" "/tcall (p1);/n/t" "/tcall _do_exit;/n" ".previous;");
在这段代码中,将跳转到用户指定的函数,然后调用do_exit进行一些清理工作。
具体的创建工作由do_fork完成,此时传递进去的stack_start和stack_size的值都为0。
这个函数完成线程的创建,它的关键代码如下:
/*
* Ok, this is the main fork-routine.
*
* It copies the process, and if successful kick-starts
* it and waits for it to finish using the VM if required.
*/
long do_fork(unsigned long clone_flags,
unsigned long stack_start,
struct pt_regs *regs,
unsigned long stack_size,
int __user *parent_tidptr,
int __user *child_tidptr)
{
struct task_struct *p;
int trace = 0;
struct pid *pid = alloc_pid();
long nr;
……………………
p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, pid);
/*
* Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly.
*/
if (!IS_ERR(p)) {
struct completion vfork;
…………………….
if (!(clone_flags & CLONE_STOPPED))
wake_up_new_task(p, clone_flags);
else
p->state = TASK_STOPPED;
……………………….
} else {
free_pid(pid);
nr = PTR_ERR(p);
}
return nr;
}
它首先为此线程分配一个pid号,然后复制出一个新的task_struct,最后唤醒此线程,当然此时还不会进入执行状态。
这个函数用于从当前线程复制一个task_struct出来。
/*
* This creates a new process as a copy of the old one,
* but does not actually start it yet.
*
* It copies the registers, and all the appropriate
* parts of the process environment (as per the clone
* flags). The actual kick-off is left to the caller.
*/
static struct task_struct *copy_process(unsigned long clone_flags,
unsigned long stack_start,
struct pt_regs *regs,
unsigned long stack_size,
int __user *parent_tidptr,
int __user *child_tidptr,
struct pid *pid)
{
int retval;
struct task_struct *p = NULL;
……………………….
retval = -ENOMEM;
p = dup_task_struct(current);
if (!p)
goto fork_out;
………………………..
retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
if (retval)
goto bad_fork_cleanup_namespaces;
…………………………
return p;
}
它首先调用dup_task_struct得到一个task_struct,同时也给这个新的线程分配了一个thread_info的结构体,这也是这个新线程的栈,使用BUDDY算法分配,保证以8K对齐。
接着调用copy_thread进行线程的复制。
int
copy_thread(int nr, unsigned long clone_flags,
unsigned long usp, unsigned long topstk,
struct task_struct *p, struct pt_regs *regs)
{
struct pt_regs *childregs;
childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1;
*childregs = *regs;
childregs->r0 = 0;
p->thread.usp = usp;
p->thread.ksp = (unsigned long)childregs;
p->thread.pc = (unsigned long)ret_from_fork;
return 0;
}
注意这里在新线程的栈的底端复制了一份pt_regs,而这份pt_regs的PC指针是指向kernel_thread_helper的。且新线程的PC指针是指向ret_from_fork函数。
这个函数用于把线程放到一个CPU核的任务队列中。
/*
* wake_up_new_task - wake up a newly created task for the first time.
*
* This function will do some initial scheduler statistics housekeeping
* that must be done for every newly created context, then puts the task
* on the runqueue and wakes it.
*/
void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
{
struct rq *rq, *this_rq;
unsigned long flags;
int this_cpu, cpu;
rq = task_rq_lock(p, &flags);
BUG_ON(p->state != TASK_RUNNING);
this_cpu = smp_processor_id();
cpu = task_cpu(p);
/*
* We decrease the sleep average of forking parents
* and children as well, to keep max-interactive tasks
* from forking tasks that are max-interactive. The parent
* (current) is done further down, under its lock.
*/
p->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) *
CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
p->prio = effective_prio(p);
if (likely(cpu == this_cpu)) {
if (!(clone_flags & CLONE_VM)) {
/*
* The VM isn't cloned, so we're in a good position to
* do child-runs-first in anticipation of an exec. This
* usually avoids a lot of COW overhead.
*/
if (unlikely(!current->array))
__activate_task(p, rq);
else {
p->prio = current->prio;
p->normal_prio = current->normal_prio;
list_add_tail(&p->run_list, ¤t->run_list);
p->array = current->array;
p->array->nr_active++;
inc_nr_running(p, rq);
}
set_need_resched();
} else
/* Run child last */
__activate_task(p, rq);
/*
* We skip the following code due to cpu == this_cpu
*
* task_rq_unlock(rq, &flags);
* this_rq = task_rq_lock(current, &flags);
*/
this_rq = rq;
} else {
this_rq = (struct rq *)cpu_rq(this_cpu);
/*
* Not the local CPU - must adjust timestamp. This should
* get optimised away in the !CONFIG_SMP case.
*/
p->timestamp = (p->timestamp - this_rq->most_recent_timestamp)
+ rq->most_recent_timestamp;
__activate_task(p, rq);
if (TASK_PREEMPTS_CURR(p, rq))
resched_task(rq->curr);
/*
* Parent and child are on different CPUs, now get the
* parent runqueue to update the parent's ->sleep_avg:
*/
task_rq_unlock(rq, &flags);
this_rq = task_rq_lock(current, &flags);
}
current->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) *
PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
task_rq_unlock(this_rq, &flags);
}
这个函数挺长的,但实际上将新线程加入队列的工作是由__activate_task这个函数完成的:
/*
* __activate_task - move a task to the runqueue.
*/
static void __activate_task(struct task_struct *p, struct rq *rq)
{
struct prio_array *target = rq->active;
if (batch_task(p))
target = rq->expired;
enqueue_task(p, target);
inc_nr_running(p, rq);
}
再看enqueue_task:
static void enqueue_task(struct task_struct *p, struct prio_array *array)
{
sched_info_queued(p);
list_add_tail(&p->run_list, array->queue + p->prio);
__set_bit(p->prio, array->bitmap);
array->nr_active++;
p->array = array;
}
至此,一个内核线程真正加入到CPU的任务队列。
先看看这个函数的实现:
/**
* kthread_create - create a kthread.
* @threadfn: the function to run until signal_pending(current).
* @data: data ptr for @threadfn.
* @namefmt: printf-style name for the thread.
*
* Description: This helper function creates and names a kernel
* thread. The thread will be stopped: use wake_up_process() to start
* it. See also kthread_run(), kthread_create_on_cpu().
*
* When woken, the thread will run @threadfn() with @data as its
* argument. @threadfn() can either call do_exit() directly if it is a
* standalone thread for which noone will call kthread_stop(), or
* return when 'kthread_should_stop()' is true (which means
* kthread_stop() has been called). The return value should be zero
* or a negative error number; it will be passed to kthread_stop().
*
* Returns a task_struct or ERR_PTR(-ENOMEM).
*/
struct task_struct *kthread_create(int (*threadfn)(void *data),
void *data,
const char namefmt[],
...)
{
struct kthread_create_info create;
create.threadfn = threadfn;
create.data = data;
init_completion(&create.started);
init_completion(&create.done);
spin_lock(&kthread_create_lock);
list_add_tail(&create.list, &kthread_create_list);
spin_unlock(&kthread_create_lock);
wake_up_process(kthreadd_task);
wait_for_completion(&create.done);
if (!IS_ERR(create.result)) {
struct sched_param param = { .sched_priority = 0 };
va_list args;
va_start(args, namefmt);
vsnprintf(create.result->comm, sizeof(create.result->comm),
namefmt, args);
va_end(args);
/*
* root may have changed our (kthreadd's) priority or CPU mask.
* The kernel thread should not inherit these properties.
*/
sched_setscheduler_nocheck(create.result, SCHED_NORMAL, ¶m);
set_user_nice(create.result, KTHREAD_NICE_LEVEL);
set_cpus_allowed_ptr(create.result, cpu_all_mask);
}
return create.result;
}
这个函数将填充一个kthread_create_info结构体,并将之放到待创建的线程链表中,注意,它并没有直接将线程插入到CPU要运行的任务队列中,这个工作将由kthreadd这个内核线程来完成。
int kthreadd(void *unused)
{
struct task_struct *tsk = current;
/* Setup a clean context for our children to inherit. */
set_task_comm(tsk, "kthreadd");
ignore_signals(tsk);
set_user_nice(tsk, KTHREAD_NICE_LEVEL);
set_cpus_allowed_ptr(tsk, cpu_all_mask);
current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (list_empty(&kthread_create_list))
schedule();
__set_current_state(TASK_RUNNING);
spin_lock(&kthread_create_lock);
while (!list_empty(&kthread_create_list)) {
struct kthread_create_info *create;
create = list_entry(kthread_create_list.next,
struct kthread_create_info, list);
list_del_init(&create->list);
spin_unlock(&kthread_create_lock);
create_kthread(create);
spin_lock(&kthread_create_lock);
}
spin_unlock(&kthread_create_lock);
}
return 0;
}
看起来很简单,它不断查询kthread_create_list这个链表上是否有需要创建的内核线程,如果有则创建它,如果这个链表为空,则调用schedule让出CPU。
上述函数使用了create_kthread进行实际线程的创建,看看:
static void create_kthread(struct kthread_create_info *create)
{
int pid;
/* We want our own signal handler (we take no signals by default). */
pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
if (pid < 0)
create->result = ERR_PTR(pid);
else
wait_for_completion(&create->started);
complete(&create->done);
}
原来最终的创建工作还是交给kernel_thread这一函数来完成的!
kthread_run是内核定义的一个宏,用以完成线程创建的辅助工作:
/**
* kthread_run - create and wake a thread.
* @threadfn: the function to run until signal_pending(current).
* @data: data ptr for @threadfn.
* @namefmt: printf-style name for the thread.
*
* Description: Convenient wrapper for kthread_create() followed by
* wake_up_process(). Returns the kthread or ERR_PTR(-ENOMEM).
*/
#define kthread_run(threadfn, data, namefmt, ...) /
({ /
struct task_struct *__k /
= kthread_create(threadfn, data, namefmt, ## __VA_ARGS__); /
if (!IS_ERR(__k)) /
wake_up_process(__k); /
__k; /
})
它在创建线程后直接将之wake_up,这也是它称之为kthread_run的原因。
uclinux内核的任务切换(2009-8-4)
uclinux第一个用户程序的加载(2009-4-23)
uclinux第一个内核线程的运行(2009-4-23)
uclinux内核线程的创建(2009-4-23)
从fork_init看uclinux内核的线程数量限制(2009-4-22)
uclinux内核的任务优先级及其load_weight(2009-4-22)
init_thread_union猜想(2009-1-17)
uclinux2.6(bf561)内核中的current_thread_info(2008/5/12)