linux spinlock自旋锁实现
spinlock同一时刻只能被一个内核代码路径持有,如果有另外一个内核路径代码试图获取一个已经被持有的spinlock,那么内核代码路径需要一直自旋忙等待,直到自旋锁持有者释放该锁。如果该锁没有被别人持有(或争用),那么可以立即获得该锁
spinlock同一时刻只能被一个内核代码路径持有,如果有另外一个内核路径代码试图获取一个已经被持有的spinlock,那么内核代码路径需要一直自旋忙等待,直到自旋锁持有者释放该锁。如果该锁没有被别人持有(或争用),那么可以立即获得该锁。
spinlock锁的特性如下:
- 忙等待的锁机制。操作系统中锁的机制分为两类:一类是忙等待,另一类是睡眠等待。spinlock属于前者,当无法获取spinlock锁时会不断尝试,直到获取锁为止。
- 同一时刻只能有一个内核代码路径可以获得该锁。
- 要求spinlock锁持有者尽快完成临界区的执行任务。如果临界区执行时间过长,在锁外面等待的CPU比较浪费,特别是spinlock临界区了不能睡眠。
- spinlock锁可以在中断上下文中使用。
1、spinlock实现
1.1、数据结构
include/linux/spinlock_types.h
typedef struct spinlock {
union {
struct raw_spinlock rlock;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
struct {
u8 __padding[LOCK_PADSIZE];
struct lockdep_map dep_map;
};
#endif
};
} spinlock_t;
typedef struct raw_spinlock {
arch_spinlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock;
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned int magic, owner_cpu;
void *owner;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
} raw_spinlock_t;
#define TICKET_SHIFT 16
typedef struct {
union {
u32 slock;
struct __raw_tickets {
#ifdef __ARMEB__
u16 next;
u16 owner;
#else
/*下面部分有效,slock += (1<<TICKET_SHIFT),arm小端序情况下,修改的是next的值*/
u16 owner;
u16 next;
#endif
} tickets;
};
} arch_spinlock_t;
1.2、spinlock初始化实现:
spinlock初始化,其中slock=0,即next和owner值为0
static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
{
return &lock->rlock;
}
#define spin_lock_init(_lock) \
do { \
spinlock_check(_lock); \
raw_spin_lock_init(&(_lock)->rlock); \
} while (0)
# define raw_spin_lock_init(lock) \
do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
(raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
{ \
.raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
SPIN_DEBUG_INIT(lockname) \
SPIN_DEP_MAP_INIT(lockname) }
#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
声明并初始化spinlock_t变量
#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
#define __SPIN_LOCK_UNLOCKED(lockname) \
(spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
#define __SPIN_LOCK_INITIALIZER(lockname) \
{ { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
1.3、 获取自旋锁,spin_lock实现:
include/linux/spinlock.h
/*spin_lock调用raw_spin_lock*/
static inline void spin_lock(spinlock_t *lock)
{
raw_spin_lock(&lock->rlock);
}
#define raw_spin_lock(lock) _raw_spin_lock(lock)
kernel/locking/spinlock.c
void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
{
__raw_spin_lock(lock);
}
#ifdef CONFIG_SMP
# include <asm/spinlock.h>
#else
# include <linux/spinlock_up.h>
#endif
include/linux/spinlock_api_smp.h
static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
preempt_disable(); /*关闭内核抢占*/
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); /*未定义 CONFIG_LOCKDEP下spin_acquire为空操作*/
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); /*未定义CONFIG_LOCK_STAT下,直接调用do_raw_spin_lock(lock)*/
}
include/linux/spinlock.h
static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
{
__acquire(lock);
arch_spin_lock(&lock->raw_lock);
}
arch/arm/include/asm/spinlock.h
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
u32 newval;
arch_spinlock_t lockval;
prefetchw(&lock->slock);
__asm__ __volatile__(
"1: ldrex %0, [%3]\n" /*lockval = lock->slock*/
" add %1, %0, %4\n" /*newval = lockval + (1 << 16),lockval.tickets.next的值+1*/
" strex %2, %1, [%3]\n" /*lock->slock = newval*/
" teq %2, #0\n" /*测试独占访问是否成功*/
" bne 1b" /*不成功则继续尝试*/
: "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
: "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
: "cc");
while (lockval.tickets.next != lockval.tickets.owner) {
/*
如果next != owner,次序还未到还需要继续等待,调用wfe让CPU进入等待状态。
当其他CPU唤醒本CPU时,说明该spinlock锁的owner域发生变化,即有人释放锁。
*/
wfe();
lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
}
/*
如果next == owner,即owner等于该cpu持有的等待号牌,说明该cpu获取了spinlock锁,arch_spin_lock返回;
*/
smp_mb();/*dmb(ish) 内存屏障*/
}
注意,spinlock的实现在锁争用严重的情况下存在cache颠簸问题。
一旦某个CPU上任务获取锁成功,由于其修改了lock->tickets.next的值,为了保证cache一致性,其他等待该锁的CPU中lock所在的cache line则需要使无效。如果lock所在cache line还存在其他变量,那么这些变量也会无辜受罪,被使无效掉。
当任务释放锁时,修改了lock->tickets.owner的值,其他等待该锁的CPU中lock所在的cache line则又需要使无效。这样造成其他等待锁的CPU中的cache不断地缓存lock值,使无效lock值,造成总线上大量cache访问流量,甚至导致总线延迟。
1.4、释放自旋锁,spin_unlock实现:
static inline void spin_unlock(spinlock_t *lock)
{
raw_spin_unlock(&lock->rlock);
}
#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
#define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_); /*未定义 CONFIG_LOCKDEP下 spin_release为空操作*/
do_raw_spin_unlock(lock);
preempt_enable(); /*开启内核抢占*/
}
static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
{
arch_spin_unlock(&lock->raw_lock);
__release(lock);
}
arch/arm/include/asm/spinloch.
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
/*调用dmb保证调用该函数之前的所有访问内存指令都执行完成*/
smp_mb();
/*将owner++,不加锁*/
lock->tickets.owner++;
/*执行dsb(ishst)保证owner最新值已写入内存,再执行sev,唤醒其他CPU*/
dsb_sev();
}
arch/arm/include/asm/spinlock.h
static inline void dsb_sev(void)
{
dsb(ishst); /*保证前面的owner++写入内存*/
__asm__(SEV); /*执行sev和nop操作*/
}
2、spinlock变种
在spinlock保护的临界区中,关闭了内核抢占,即在本CPU上不会存在其他任务和本任务进行自旋锁的争用。但是此时来了一个中断打断了当前持锁任务的执行,转而去执行中断处理任务,非常不巧地是中断处理任务也碰巧要去持同一个锁,这时就死锁了。我们非常不希望这种情况发生,于是就衍生出了spinlock的拓展功能。
spin_lock_irq = spin_lock+local_irq_disable
spin_unlock_irq = spin_unlock+local_irq_enable
spin_lock_irqsave = spin_lock+local_irq_save
spin_unlock_irqsave = spin_unlock+local_irq_restore
spin_lock_bh=spin_lock+local_bh_disable
spin_unlock_bh=spin_unlock+local_bh_enable
上面这些功能是spinlock和硬中断以及中断下半部结合的接口,就是为了避免在硬中断,中断下半部中存在和持锁任务发生锁的争用导致异常发生的场景。
更多推荐
所有评论(0)