本文欢迎大家转载
原文出自: http://blog.csdn.net/dyron
4.1 并发的来源
并发, 是指可能导致对共享资源的访问出现竞争状态的若干执行路径, 不一定是指严格的时间意义上的并发执行#define local_irq_enable() \
do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0)
#define local_irq_disable() \
do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0)
static inline void spin_lock(spinlock_t *lock)
{
raw_spin_lock(&lock->rlock);
}
typedef struct raw_spinlock {
volatile unsigned int raw_lock;
} raw_spinlock_t;
typedef struct spinlock {
union {
struct raw_spin_lock rlock;
};
} spinlock_t;
static inline void raw_spin_lock(raw_spinlock_t *lock)
{
preempt_disable();
do_raw_spin_lock(lock)
}
函数首先调用preempt_disable宏, 后者在定义了CONFIG_PREEMPT, 即在内核支持抢占式调度系统时, 将关闭
static inline void do_raw_spin_lock(raw_spinlock_t *lock)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: ldrex %0, [%1]\n"
"teq %0, #0\n"
"strexeq %0, %2, [%1]\n"
"teqeq %0, #0\n"
"bne 1b"
:"=&r"(tmp)
:"r"(&lock->raw_lock),"r"(1)
:"cc");
smp_mb();
}
static inline int spin_trylock(spinlock_t *lock)
static inline int spin_trylock_irq(spinlock_t *lock)
spin_trylock_irqsave(lock, flags);
int spin_trylock_bh(spinlock_t *lock);
static inline int do_raw_write_lock(raw_rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
"teq %0, #0\n"
"strexeq %0, %2, [%1]\n"
"teq %0, #0\n"
"bne 1b"
:"=&r"(tmp)
:"r"(&rw->lock),"r"(0x80000000)
:"cc");
smp_mb();
}
static inline void do_raw_read_lock(raw_rwlock_t *rw)
{
unsigned long tmp, tmp2;
__asm__ __volatile__(
"1: ldrex %0,[%2]\n"
"adds %0, %0, #1\n"
"strexpl %1, %0, [%2]\n"
"rsbpls %0, %1, #0\n"
"bmi 1b"
:"=&r"(tmp),"=&r"(tmp2)
:"r"(&rw->lock)
:"cc");
smp_mb();
}
rwlock同样有多个版本. 对于读取者
void read_lock(rwlock_t *lock);
void read_lock_irq(rwlock_t *lock);
void read_lock_irqsave(rwlock_t *lock, unsigned long flags);
void read_unlock(rwlock_t *lock);
void read_unlock_irq(rwlock_t *lock);
void read_unlock_irqrestore(rwlock_t *lock, unsigned long flags);
对于写入者:
void write_lock(rwlock_t *lock);
void write_lock_irq(rwlock_t *lock);
void write_lock_irqsace(rwlock_t *lock, unsigned long flags);
void write_unlock(rwlock_t *lock);
void write_unlock_irq(rwlock_t *lock);
void write_unlock_irqsave(rwlock_t *lock, unsigned long flags);
try版本:
int read_lock(rwlock_t *lock);
int write_lock(rwlock_t *lock);
. 如果当前有进程正在写, 其它进程不能读, 也不能写.
struct semaphore {
spinlock_t lock;
unsigned int count;
struct list_head wait_list;
};
其中, lock是自旋锁变量, 用于实现对信号量的另一个成员的原子操作, count用于表示通过该信号量允许进
static inline void sema_init(struct semaphore *sem, int val)
{
sttaic struct lock_class_key __key;
*sem=(struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0);
}
初始化主要通过__SEMAPHOREINITIALIZER宏完成
#define __SEMAPHORE_INITIALIZER(name, n) \
{
.lock = __SPIN_LOCK_UNLOCKED((name).lock), \
.count = n,
.wait_list = LIST_HEAD_INIT((name).wait_list),
}
void down(struct semaphore *sem);
int down_interruptible(struct semaphore *sem);
int down_killable(struct semaphore *sem);
int down_trylock(struct semaphore *sem);
int down_timeout(struct semaphore *sem, long jiffies);
down_interruptible函数定义如下:
int down_interruptible(struct semaphore *sem)
{
unsigned long flags;
int result = 0;
spin_lock_irqsave(&sem->lock, flags);
if (likely(sem->count > 0))
sem->count--;
else
result = __down_interruptible(sem);
spin_unlock_irqrestore(&sem->lock, flags);
return result;
}
函数首先通过对spin_lock_irqsave的调用来保证对sem->count操作的原子性, 防止多进程对sem->count同时
void down(struct semaphore *sem)
与down_interruptible相比, down是不可中断的, 这意味着调用它的进程如果无法获得信号量, 将一直处于
int down_killable(struct semaphore *sem)
睡眠的进程可以收到一些致命性的信号, 被唤醒而导致信号量的操作被中断. 极少使用.
int down_trylock(struct semphore *sem)
进程试图获得信号量, 如果无法获得则直接返回1而不进入睡眠, 返回0表明已经成功获得了信号量.
int down_timeout(struct semaphore *sem, long jiffies);
在无法获得信号量时将进入睡眠, 但处于这种睡眠状态有时间限制, 如果jiffies指时的时间到期时函数依然
void up(struct semaphore *sem)
{
unsigned long flags;
spin_lock_irqsave(&sem->lock, flags);
if (likely(list_empty(&sem->wait_list)))
sem->count++;
else
__up(sem);
spin_unlock_irqrestore(&sem->lock, flags);
}
如果信号量sem的wait_list队列为空, 表时没有其它进程在等待该信号量, 只要把sem的count加1即可, 如果
#define DEFINE_SEMAPHORE(name) \
struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
..........在3.0.8内核上已经没有书上说的DECLARE_MUTEX宏了, 替换的是这个DEFINE_SEMAPHORE
struct rw_semaphore {
__s32 activity;
spinlock_t wait_lock;
struct list_head wait_list;
}
acitity的确切含义是:
void __init_rwsem(struct rw_semaphore *sem)
{
sem->activity = 0;
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
读者的DOWN操作:
void __sched down_read(struct rw_semaphore *sem);
int down_read_trylock(struct rw_semaphore *sem);
读者的UP操作:
void up_read(struct rw_semaphore *sem);
写入者的DOWN操作:
void __sched down_write(struct rw_semaphore *sem);
int down_write_trylock(struct rw_semaphore *sem);
写入者的UP操作:
void up_write(struct rw_semaphore *sem);
4.5 互斥锁mutex
struct mutex {
/* 1: unlocked, 0: locked, negative: locked, possible waiters */
atomic_t count;
spinlock_t wait_lock;
struct list_head wait_list;
#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
struct task_struct *owner;
#endif
#ifdef CONFIG_DEBUG_MUTEXES
const char *name;
void *magic;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
};
定义一个静态的struct mutex 变量同时初始化的方法是利用内核的DEFINE_MUTEX;
#define __MUTEX_INITIALIZER(lockname) \
{
.count = ATOMIC_INIT(1),\
.wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock),\
.wait_list=LIST_HEAD_INIT(lockname.wait_list)\
}
#define DEFINE_MUTEX(mutexname) \
struct mutex mutexname = __MUTEX_INITIALIZER(mutexname);
如果在程序的执行期初始化一个mutex变量, 则可以使用mutex_init宏.
void mutex_init(struct mutex *lock)
{
atomic_set(&lock->count, 1);
spin_lock_init(&lock->wait_lock);
INIT_LIST_HEAD(&lock->wait_list);
}
void __sched mutex_lock(struct mutex *lock)
{
might_sleep();
__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
mutex_set_owner(lock);
}
函数的设计思想体现在__mutex_fastpath_lock和__mutex_lock_slowpath上, __mutex_fastpath_lock用来快
static inline void __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
int __ex_flag, __res;
__asm__ (
"ldrex %0, [%2] \n\t"
"sub %0, %0, #1 \n\t"
"strex %1, %0, [%2] "
: "=&r" (__res), "=&r" (__ex_flag)
: "r" (&(count)->counter)
: "cc","memory" );
__res |= __ex_flag;
if (unlikely(__res != 0))
fail_fn(count);
}
函数通过ldrex完成__res=count->counter, 第二行汇编完成__res=__res-1, 第三行试图用
void __sched mutex_unlock(struct mutex *lock)
{
/*
* The unlocking fastpath is the 0->1 transition from 'locked'
* into 'unlocked' state:
*/
#ifndef CONFIG_DEBUG_MUTEXES
/*
* When debugging is enabled we must not clear the owner before time,
* the slow path will always be taken, and that clears the owner field
* after verifying that it was indeed current.
*/
mutex_clear_owner(lock);
#endif
__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
}
和mutex_lock函数一样,mutex_unlcok也有两条主线: __mutex_fastpath_unlcok和__mutex_unlock_slowpath,
static inline void __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
int __ex_flag, __res, __orig;
__asm__ (
"ldrex %0, [%3] \n\t"
"add %1, %0, #1 \n\t"
"strex %2, %1, [%3] "
: "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
: "r" (&(count)->counter)
: "cc","memory" );
__orig |= __ex_flag;
if (unlikely(__orig != 0))
fail_fn(count);
}
这里除了将count->counter的值加1以外, 代码和__mutex_fastpath_lock中几乎完成一样. 在最后的if语句中
typedef struct {
unsigned sequence;
spinlock_t lock;
} seqlock_t;
sequence用来协调读者与写者的操作, spinlock变量lock在多个写者之间做互斥作用.
#define DEFINE_SEQLOCK(x)\
seqlock_t x = __SEQLOCK_UNLOCKED(x)
#define __SEQLOCK_UNLOCKED(lockname) \
{0, __SPIN_LOCK_UNLOCKED(lockname)}
如果要动态初始化一个seqlock变量, 可以使用seqlock_init:
#define seqlock_init(x) \
do {
(x)->sequence = 0;
spin_lock_init(&(x)->lock);
} while (0)
写者在seqlock上的上锁操作
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
++sl->sequence;
smp_wmb();
}
写者对写之前需要先获得seqlock上的锁lock, 这说明写者之间必须保证互斥操作, 如果某一写者成功获得lo
static inline void write_sequnlock(seqlock_t *sl)
{
smp_wmb();
sl->sequence++;
spin_unlock(&sl->lock);
}
????????为什么write_seqlock中是++sl->sequence, write_sequnlcok中是sl->sequence++; 加两次是为了表明是
static __always__inline unsigned read_seqbegin(const seqlock_t *sl)
{
unsigned ret;
repeat:
ret = sl->sequence;
smb_rmb();
if(unlikely(ret & 1)) {
cpu_relax();
goto repeat;
}
return ret;
}
如果当前正好有写者在进行写操作, 那么该函数将循环直到写结束, 这就是sequence最低位的用途, 这里正好
write_seqlock_irq(lock)
write_seqlock_irqsave(lock, flags)
write_seqlock_bh(lock)
write_sequnlock_irq(lock)
write_sequnlock_irqrestore(lock, flags)
write_sequnlock_bh(lock)
read_seqbegin_irqsave(lock, flags)
read_seqretry_irqrestore(lock, iv, flags)
rwlock与seqlock非常相似, 不同在于seqlock在写的时候只与其它写者互斥, 而rwlock在写的时候与读者和写
void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
RCU的写者负责在替换掉老指针后调用call_rcu向内核注册一回调函数, 回调负责释放老指针指向的内存空间
typedef struct {
int counter;
} atomic_t;
Linux系统中定义了一大堆以"atomic_"打头的原子操作函数, 这些函数的实现都信赖于特定的硬件平台;
struct __wait_queue_head {
spinlock_t lock; // 等待队列的自旋锁, 用于并发互斥
struct list_head task_list; //双向链表结构, 用来将等待队列构成链表
};
typedef struct __wait_queue_head wait_queue_head_t;
#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
.task_list = { &(name).task_list, &(name).task_list } }
#define DECLARE_WAIT_QUEUE_HEAD(name) \
wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
#define init_waitqueue_head(q) \
do { \
static struct lock_class_key __key; \
\
__init_waitqueue_head((q), &__key); \
} while (0)
void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *key)
{
spin_lock_init(&q->lock);
lockdep_set_class(&q->lock, key);
INIT_LIST_HEAD(&q->task_list);
}
4.9.2 等待队列的节点
typedef struct __wait_queue wait_queue_t;
typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
struct __wait_queue {
unsigned int flags;
void *private;
wait_queue_func_t func;
struct list_head task_list;
}
flags, 唤醒等待队列上的进程时, 该标志会影响唤醒操作的行为模式, WQ_FLAG_EXCLUSIVE, 该标志表明睡眠
#define __WAITQUEUE_INITIALIZER(name, tsk) { \
.private = tsk, \
.func = default_wake_function, \
.task_list = { NULL, NULL } }
#define DECLARE_WAITQUEUE(name, tsk) \
wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
如果要在程序运行期初始化一个等待队列节点对象, 可以用init_waitqueue_entry函数:
static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
{
q->flags = 0;
q->private = p;
q->func = default_wake_function;
}
4.9.3 等待队列的应用
struct completion {
unsigned int done; // 表示当前completion的状态
wait_queue_head_t wait; //wait是一等待队列, 用来管理当前在等待在该completion上的所有进程
};
如果要静态定义一个struct completion变量并初始化, 可以使用DECLARE_COMPLETION宏
static inline void init_completion(struct completion *x)
{
x->done = 0;
init_waitqueue_head(&x->wait);
}
完成接口对执行路径间的同步可以通过等待者与完成者模型来表述, 内核定义wait_for_completion;
void __sched wait_for_completion(struct completion *x)
{
wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
}
wait_for_completion内调用wait_for_common来使当前进程以TASK_UNINTERRUPTIBLE睡眠在completion x上的
int wait_for_completion_interruptible(struct completion *x);
可中断的等待状态.
int wait_for_completion_killable(struct completion *x);
可杀死的等待状态. 等待的进程可以被kill signal唤醒并中止等待状态
unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout)
不可中断的等待状态, 但在timeout指定的时间到期后, 进程将中止等待状态
unsigned long wait_for_completion_interrutpible_timeout(struct completion *x,unsigned long
timeout);
可中断的等待状态, 但在timeout指定的时间到期后, 进程将中止等待状态.
unsigned long wait_for_completion_killable_timeout(struct completion *x, unsigned long
timeout);
可杀死的等待状态, 但在timeout指定的时间到期之后, 进程将中止等待状态.
对于完成者的行为, 内核函数为complete和complete_all, 前者只唤醒一个等待者, 后者将唤醒所有等待者.
void complete(struct completion *x)
{
unsigned long flags;
spin_lock_irqsave(&x->wait.lock, flags);
x->done++;
__wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
spin_unlock_irqrestore(&x->wait.lock, flags);
}
函数先将完成者数量加1, 然后调用__wake_up_common执行唤醒等待者的操作. 3,4参数表示排他性唤醒的个数
本文欢迎大家转载
原文出自: http://blog.csdn.net/dyron