linux内核学习(15)内核编程基本功之内核同步与自旋锁spinlock_t

Pro-I、内核同步与自旋锁:

1、为什么要使用内核同步?

同步使得共享数据结构(相对应的代码块—临界区)可以有节奏的被读/写。主要是由于内核控制路径的改变,而且它的改变是随机的。那么我们所要做的就是要保证在任意时刻只有一个内核控制路径处于临界区。如果在单CPU的情况下,只需禁止内核抢占即可实现不会发生内核控制路线的改变,如果在多CPU的情况下,就需要用到自旋锁了,它可以保证多个CPU的运行轨迹同步。当要进入临界区时,一定要得到关于这块临界区的锁,如果已经锁住,说明已经有CPU在了,只有“忙等待”直到那个CPU离开临界区,然后将锁打开,别的CPU才能获得锁而进入临界区。

内核同步的技术很多,我们称为同步原语。

 

技术

说明

适用范围

CPU变量

CPU之间复制数据结构

ALL CPU

原子操作

对一个计数器原子地“读—修改--写”的指令

ALL CPU

内存屏障

避免指令重新排列

LOCAL CPU or ALL CPU

自旋锁

加锁时忙等待

ALL CPU

信号量

加锁时阻塞等待

ALL CPU

顺序锁

基于访问计数器的锁

ALL CPU

本地中断的禁止

禁止单个CPU上的中断处理

LOCAL CPU

本地软中断的禁止

禁止单个CPU上的可延迟函数

LOCAL CPU

读—拷贝--更新(RCU

通过指针而不是锁来访问共享数据结构

ALL CPU

 

2、自旋锁如何实现?

 

2-1、结构体:

2-1-1spinlock_t:(linux/spinlock_types.h

typedef struct spinlock {

union { //联合

struct raw_spinlock rlock;

 

#ifdef CONFIG_DEBUG_LOCK_ALLOC

# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))

struct {

u8 __padding[LOCK_PADSIZE];

struct lockdep_map dep_map;

};

#endif

};

} spinlock_t;

 

2-1-2raw_spinlock_t:(linux/spinlock_types.h

typedef struct raw_spinlock {

arch_spinlock_t raw_lock; //与机器有关

#ifdef CONFIG_GENERIC_LOCKBREAK

unsigned int break_lock;

#endif

#ifdef CONFIG_DEBUG_SPINLOCK

unsigned int magic, owner_cpu;

void *owner;

#endif

#ifdef CONFIG_DEBUG_LOCK_ALLOC

struct lockdep_map dep_map;

#endif

} raw_spinlock_t;

 

2-1-3arch_spinlock_t:(linux/spinlock_types.h

typedef struct {

volatile unsigned int slock;

} arch_spinlock_t;

 

2-2、方法:

 

2-2-1spin_lock_init:初始化(linux/spinlock.h

#define spin_lock_init(_lock) /

do { /

spinlock_check(_lock); /

raw_spin_lock_init(&(_lock)->rlock); /

} while (0)

 

static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)

{

return &lock->rlock;

}

 

# define raw_spin_lock_init(lock) /

do { /

static struct lock_class_key __key; /

/

__raw_spin_lock_init((lock), #lock, &__key); /

} while (0)

 

void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,

struct lock_class_key *key)

{

#ifdef CONFIG_DEBUG_LOCK_ALLOC

/*

* Make sure we are not reinitializing a held lock:

*/

debug_check_no_locks_freed((void *)lock, sizeof(*lock));

lockdep_init_map(&lock->dep_map, name, key, 0);

#endif

lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;

lock->magic = SPINLOCK_MAGIC;

lock->owner = SPINLOCK_OWNER_INIT;

lock->owner_cpu = -1;

}

 

#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }

 

简单点:

#define spin_lock_init(_lock) /

do { /

_lock->rlock->raw_lock=(arch_spinlock_t)1; /

}while(0)

 

2-2-2spin_lock:锁住(linux/spinlock.h

static inline void spin_lock(spinlock_t *lock)

{

raw_spin_lock(&lock->rlock);

}

 

#define raw_spin_lock(lock) _raw_spin_lock(lock)

 

我们走多CPU路线。

#ifdef CONFIG_INLINE_SPIN_LOCK

#define _raw_spin_lock(lock) __raw_spin_lock(lock)

#endif

 

static inline void __raw_spin_lock(raw_spinlock_t *lock)

{

preempt_disable(); //禁止内核抢占

spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); //空语句

LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);

}

 

#define LOCK_CONTENDED(_lock, try, lock) /

lock(_lock)

 

这里的lock(_lock)==> do_raw_spin_lock(_lock)

 

static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)

{

__acquire(lock);

arch_spin_lock(&lock->raw_lock);

}

 

和机器有关,在arch/x86/include/asm/spinlock.h

static __always_inline void arch_spin_lock(arch_spinlock_t *lock)

{

__ticket_spin_lock(lock);

}

 

#if (NR_CPUS < 256)

#define TICKET_SHIFT 8

static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)

{

short inc = 0x0100;

 

asm volatile (

LOCK_PREFIX "xaddw %w0, %1/n"

//LOCK_PREFIX在但CPU下为空,在多CPU下为

//lock,保证这条语句是原子操作

//xaddw指令:先交换,在相加,按照word(一个字)长度

//开始%0 = 0x0100, %1 = slock

//执行后 %0 = 0x0100+slock%1 = slock

"1:/t"

"cmpb %h0, %b0/n/t"

//比较%0的高字节和低字节是否相等,即 cmpb 0x01 ?= slock

"je 2f/n/t"

//如果相等就跳转到末尾2:,说明slock=1,于是获得了锁

"rep ; nop/n/t"

//延时

"movb %1, %b0/n/t"

//延时后,可能slock的值发生了改变,于是重新装入%0的低字节

//注意"+Q" (inc), "+m" (lock->slock)是可读可写的

//一旦%0%1的值被更新后就可以反应在实际变量上

/* don't need lfence here, because loads are in-order */

"jmp 1b/n"

"2:"

//跳到2:后还应该将%11,可是这里没有实现,不理解?

: "+Q" (inc), "+m" (lock->slock)

:

: "memory", "cc");

}

 

2-2-3spin_unlock:解锁(linux/spinlock.h

static inline void spin_unlock(spinlock_t *lock)

{

raw_spin_unlock(&lock->rlock);

}

 

#define raw_spin_unlock(lock) _raw_spin_unlock(lock)

 

#ifdef CONFIG_INLINE_SPIN_UNLOCK

#define _raw_spin_unlock(lock) __raw_spin_unlock(lock)

#endif

 

static inline void __raw_spin_unlock(raw_spinlock_t *lock)

{

spin_release(&lock->dep_map, 1, _RET_IP_);

do_raw_spin_unlock(lock);

preempt_enable(); //重新开启内核抢占

}

 

static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)

{

arch_spin_unlock(&lock->raw_lock);

__release(lock);

}

 

static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)

{

__ticket_spin_unlock(lock);

}

 

static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)

{

asm volatile(UNLOCK_LOCK_PREFIX "incb %0"

//使slock1,恢复锁的使用权

: "+m" (lock->slock)

:

: "memory", "cc");

}

你可能感兴趣的:(linux内核学习,linux内核,编程,struct,数据结构,class)