=================================
本文系本站原创,欢迎转载!
转载请注明出处:http://blog.csdn.net/gdt_A20
=================================
简单浏览一下spinlock,以arm为例,
一、spincklock结构:
include/linux/spinlock_types.h
typedef struct spinlock { union { struct raw_spinlock rlock; #ifdef CONFIG_DEBUG_LOCK_ALLOC # define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) struct { u8 __padding[LOCK_PADSIZE]; struct lockdep_map dep_map; }; #endif }; } spinlock_t;忽略debug部分,继续看,
typedef struct raw_spinlock { arch_spinlock_t raw_lock; } raw_spinlock_t;arch/arm/include/asm/spinlock_types.h
typedef struct { volatile unsigned int lock; } arch_spinlock_t;二、spinlock操作
1.加锁 include/linux/spinlock.h
static inline void spin_lock(spinlock_t *lock) { raw_spin_lock(&lock->rlock); }
#define raw_spin_lock(lock) _raw_spin_lock(lock)kernel/spinlock.c
void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) { __raw_spin_lock(lock); }include/linux/spinlock_api_smp.h
static inline void __raw_spin_lock(raw_spinlock_t *lock) { preempt_disable(); //抢占点,增加抢占计数 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); }include/linux/preempt.h
#define preempt_disable() \ do { \ inc_preempt_count(); \ barrier(); \ } while (0)
#define inc_preempt_count() add_preempt_count(1)
# define add_preempt_count(val) do { preempt_count() += (val); } while (0)
#define preempt_count() (current_thread_info()->preempt_count)#compiler-gcc.h
1 /* Optimization barrier */ 2 /* The "volatile" is due to gcc bugs */ 3 #define barrier() __asm__ __volatile__("": : :"memory") /* 如果汇编指令修改了内存,但是GCC 本身却察觉不到,因为在输出部分没有描述, 此时就需要在修改描述部分增加"memory",告诉GCC 内存已经被修改,GCC 得知这个信息后,就会在这段指令之前, 插入必要的指令将前面因为优化Cache 到寄存器中的变量值先写回内存,如果以后又要使用这些变量再重新读取。 */
#about LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
include/linux/lockdep.h
#define LOCK_CONTENDED(_lock, try, lock) \ lock(_lock)
static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) { __acquire(lock); arch_spin_lock(&lock->raw_lock); }#include/linux/compiler.h
1 # define __acquire(x) __context__(x,1) //it's designed to test that the entry and exit contexts match, //and that no path through a function is ever entered with conflicting contexts.#about arch_spin_lock in arch/arm/include/asm
static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; __asm__ __volatile__( //循环测试 "1: ldrex %0, [%1]\n" " teq %0, #0\n" WFE("ne") " strexeq %0, %2, [%1]\n" " teqeq %0, #0\n" " bne 1b" : "=&r" (tmp) : "r" (&lock->lock), "r" (1) : "cc"); smp_mb(); }
2.顺便看下#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
最终会有如下操作
static inline void __raw_spin_lock_irq(raw_spinlock_t *lock) { local_irq_disable(); preempt_disable(); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); }
Thanks