互斥锁与信号量的深层探讨,请赐教!!!

tgvlcw 2011-03-04 02:53:05
加精
最近在研究互斥锁与信号量,信号量还好理解,但是互斥锁的逻辑却有些乱,求人帮忙理一下。太乱了!!!
获得与释放互斥锁最重要的几段代码如下:

//这是提供给驱动的接口
void __sched mutex_lock(struct mutex *lock)
{
might_sleep();
/*
* The locking fastpath is the 1->0 transition from
* 'unlocked' into 'locked' state.
*/
__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
mutex_set_owner(lock);
}

//这个函数会根据CPU所支持的ARM指令版本而选择不同的版本,以下为V5以下
//arm指令通用的函数,V5以上的在arch/arm目录下
static inline void
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_xchg(count, 0) != 1))
fail_fn(count);
}

static __used noinline void __sched
__mutex_lock_slowpath(atomic_t *lock_count)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);

__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
}

//以下为没有获得锁而处理的最核心的函数,看着多,其实不难
static inline int __sched
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
unsigned long ip)
{
struct task_struct *task = current;
struct mutex_waiter waiter;
unsigned long flags;

preempt_disable();
mutex_acquire(&lock->dep_map, subclass, 0, ip);
spin_lock_mutex(&lock->wait_lock, flags);

debug_mutex_lock_common(lock, &waiter);
debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));

/* add waiting tasks to the end of the waitqueue (FIFO): */
list_add_tail(&waiter.list, &lock->wait_list);
waiter.task = task;

if (atomic_xchg(&lock->count, -1) == 1)
goto done;

lock_contended(&lock->dep_map, ip);

for (;;) {

if (atomic_xchg(&lock->count, -1) == 1)
break;

if (unlikely(signal_pending_state(state, task))) {
mutex_remove_waiter(lock, &waiter,
task_thread_info(task));
mutex_release(&lock->dep_map, 1, ip);
spin_unlock_mutex(&lock->wait_lock, flags);

debug_mutex_free_waiter(&waiter);
preempt_enable();
return -EINTR;
}
__set_task_state(task, state);

//printk(KERN_DEBUG "%s: before, lock_count: 0x%x, pid: %d\n", __func__, \
//(int)lock->count.counter, task_tgid_vnr(current));
/* didnt get the lock, go to sleep: */
spin_unlock_mutex(&lock->wait_lock, flags);
preempt_enable_no_resched();
schedule();
preempt_disable();
printk(KERN_DEBUG "%s: after, lock_count: 0x%x, pid: %d\n", __func__, \
(int)lock->count.counter, task_tgid_vnr(current));
spin_lock_mutex(&lock->wait_lock, flags);
}

done:
//printk(KERN_DEBUG "%s: lock_count: 0x%x, pid: %d\n", __func__, \
//(int)lock->count.counter, task_tgid_vnr(current));
lock_acquired(&lock->dep_map, ip);
/* got the lock - rejoice! */
mutex_remove_waiter(lock, &waiter, current_thread_info());
mutex_set_owner(lock);

/* set it to 0 if there are no waiters left: */
if (likely(list_empty(&lock->wait_list)))
atomic_set(&lock->count, 0);

spin_unlock_mutex(&lock->wait_lock, flags);

debug_mutex_free_waiter(&waiter);
preempt_enable();

return 0;
}

//以下为释放锁的
void __sched mutex_unlock(struct mutex *lock)
{
__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
}

static inline void
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_xchg(count, 1) != 0))
fail_fn(count);
}

static __used noinline void
__mutex_unlock_slowpath(atomic_t *lock_count)
{
__mutex_unlock_common_slowpath(lock_count, 1);
}

static inline void
__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
unsigned long flags;

spin_lock_mutex(&lock->wait_lock, flags);
mutex_release(&lock->dep_map, nested, _RET_IP_);
debug_mutex_unlock(lock);

if (__mutex_slowpath_needs_to_unlock())
atomic_set(&lock->count, 1);

if (!list_empty(&lock->wait_list)) {
/* get the first entry from the wait-list: */
struct mutex_waiter *waiter =
list_entry(lock->wait_list.next,
struct mutex_waiter, list);

debug_mutex_wake_waiter(lock, waiter);

wake_up_process(waiter->task);
printk(KERN_DEBUG "%s: lock_count: 0x%x, pid: %d\n", __func__, \
(int)lock->count.counter, task_tgid_vnr(current));
}
spin_unlock_mutex(&lock->wait_lock, flags);
}


互斥锁的实现原理和信号量是一样的,最根本的区别是互斥锁的计数变量是volitale的,而信号量却不是。双方都有一个等待队列。
若此时有a和b两个进程,a先获得锁,然后b也要获得锁,而因为a已经占有锁了,所以b会进入休眠。 当a释放锁后,会去唤醒在等待队列上的进程,按正常逻辑此时应该是b被唤醒而获得锁。但是现在却是,a在释放锁之后,又重新获得锁,a会在调度b之前先将计数值减1,然后才调度进程b,b会因为判断计数值不成功而重新进入休眠。为什么呢?为什么a会在调度b之前而先将计数值减1了呢?

而信号量却不是这样的,信号量是在a释放这个信号之后,唤醒等待队列上的进程,此时会马上调度b,使b获得信号量,若a又要重新获得信号量,会因为信号量的计数值小于等于0而进入休眠。

谁能帮我解释一下。

以下是我打印的log

/这是互斥锁的log
<7>[ 83.890838] i2c_transfer: get i2c lock, slave->addr: 0x34, cur_pid: 206, flags: 0x0, len: 1
<7>[ 83.890930] i2c_transfer: get i2c lock, slave->addr: 0x67, cur_pid: 1041, flags: 0x0, len: 1
<7>[ 83.890960] __mutex_lock_common: before, lock_count: 0xffffffff, pid: 1041
<7>[ 83.891143] __mutex_unlock_common_slowpath: lock_count: 0x1, pid: 206
<7>[ 83.891174] i2c_transfer: release i2c lock, slave->addr: 0x34, cur_pid: 206, flags: 0x0, len: 1
<7>[ 83.891174]
<7>[ 83.891204] i2c_transfer: get i2c lock, slave->addr: 0x34, cur_pid: 206, flags: 0x0, len: 1
<7>[ 83.891235] __mutex_lock_common: after, lock_count: 0x0, pid: 1041
<7>[ 83.891235] __mutex_lock_common: before, lock_count: 0xffffffff, pid: 1041
<7>[ 83.891510] __mutex_unlock_common_slowpath: lock_count: 0x1, pid: 206
<7>[ 83.891510] i2c_transfer: release i2c lock, slave->addr: 0x34, cur_pid: 206, flags: 0x0, len: 1
<7>[ 83.891876]
<7>[ 83.891876] i2c_transfer: get i2c lock, slave->addr: 0x34, cur_pid: 206, flags: 0x0, len: 1
<7>[ 83.891906] __mutex_lock_common: after, lock_count: 0x0, pid: 1041
<7>[ 83.891937] __mutex_lock_common: before, lock_count: 0xffffffff, pid: 1041
<7>[ 83.895751] __mutex_unlock_common_slowpath: lock_count: 0x1, pid: 206
<7>[ 83.895751] i2c_transfer: release i2c lock, slave->addr: 0x34, cur_pid: 206, flags: 0x0, len: 1
.........
<7>[ 83.904693] i2c_transfer: get i2c lock, slave->addr: 0x34, cur_pid: 206, flags: 0x0, len: 2
<7>[ 83.904693] __mutex_lock_common: after, lock_count: 0x0, pid: 1041
<7>[ 83.904724] __mutex_lock_common: before, lock_count: 0xffffffff, pid: 1041
<7>[ 83.904876] __mutex_unlock_common_slowpath: lock_count: 0x1, pid: 206
<7>[ 83.904876] i2c_transfer: release i2c lock, slave->addr: 0x34, cur_pid: 206, flags: 0x0, len: 2
<7>[ 83.904907]
<7>[ 83.904937] __mutex_lock_common: after, lock_count: 0x1, pid: 1041
<7>[ 83.904937] __mutex_lock_common: lock_count: 0xffffffff, pid: 1041
<7>[ 83.905090] i2c_transfer: release i2c lock, slave->addr: 0x67, cur_pid: 1041, flags: 0x0, len: 1

//以下是信号量的log
<7>[ 53.400207] i2c_transfer: get i2c lock, slave->addr: 0x67, cur_pid: 1041, flags: 0x0, len: 1
<7>[ 53.400329] i2c_transfer: get i2c lock, slave->addr: 0x44, cur_pid: 5, flags: 0x0, len: 1
<7>[ 53.400360] __down_common: before count: 0x0, pid: 5, up: 0
<7>[ 53.400390] __up: count: 0x0, pid: 1041, up: 1
<7>[ 53.400390] i2c_transfer: release i2c lock, slave->addr: 0x67, cur_pid: 1041, flags: 0x0, len: 1
<7>[ 53.400421]
<7>[ 53.400421] i2c_transfer: get i2c lock, slave->addr: 0x67, cur_pid: 1041, flags: 0x1, len: 32
<7>[ 53.400451] __down_common: before count: 0x0, pid: 1041, up: 0
<7>[ 53.400451] __down_common: after count: 0x0, pid: 5, up: 1
<7>[ 53.400695] __up: count: 0x0, pid: 5, up: 1
<7>[ 53.400726] i2c_transfer: release i2c lock, slave->addr: 0x44, cur_pid: 5, flags: 0x0, len: 1
<7>[ 53.400726]
<7>[ 53.400726] i2c_transfer: get i2c lock, slave->addr: 0x44, cur_pid: 5, flags: 0x0, len: 1
<7>[ 53.400756] __down_common: before count: 0x0, pid: 5, up: 0
<7>[ 53.400756] __down_common: after count: 0x0, pid: 1041, up: 1
<7>[ 53.402404] __up: count: 0x0, pid: 1041, up: 1
<7>[ 53.402435] i2c_transfer: release i2c lock, slave->addr: 0x67, cur_pid: 1041, flags: 0x1, len: 32
<7>[ 53.402435]
<7>[ 53.402465] i2c_transfer: get i2c lock, slave->addr: 0x67, cur_pid: 1041, flags: 0x0, len: 2
<7>[ 53.402496] __down_common: before count: 0x0, pid: 1041, up: 0
<7>[ 53.402557] __down_common: after count: 0x0, pid: 5, up: 1
<7>[ 53.402832] __up: count: 0x0, pid: 5, up: 1
<7>[ 53.402862] i2c_transfer: release i2c lock, slave->addr: 0x44, cur_pid: 5, flags: 0x0, len: 1
<7>[ 53.402862]
<7>[ 53.402862] i2c_transfer: get i2c lock, slave->addr: 0x44, cur_pid: 5, flags: 0x0, len: 1
<7>[ 53.402893] __down_common: before count: 0x0, pid: 5, up: 0
<7>[ 53.402923] __down_common: after count: 0x0, pid: 1041, up: 1
<7>[ 53.403106] __up: count: 0x0, pid: 1041, up: 1
<7>[ 53.403106] i2c_transfer: release i2c lock, slave->addr: 0x67, cur_pid: 1041, flags: 0x0, len: 2
<7>[ 53.403137]
<7>[ 53.404083] __down_common: after count: 0x0, pid: 5, up: 1
<7>[ 53.404479] i2c_transfer: release i2c lock, slave->addr: 0x44, cur_pid: 5, flags: 0x0, len: 1


...全文
8367 152 打赏 收藏 转发到动态 举报
写回复
用AI写文章
152 条回复
切换为时间正序
请发表友善的回复…
发表回复
hellochick 2011-10-20
  • 打赏
  • 举报
回复
学习了,实际问题的分析比自己看书有效果啊
SonicLing 2011-03-25
  • 打赏
  • 举报
回复
互斥强调访问唯一性,信号量强调并发。所以实现有差别。

互斥无视其它进程的等待,所以效率高。

如果又想互斥,又想并发,可以unlock后schedule()一下。不如直接用信号量。

如果要体现信号量的并发威力,请使用>1的信号量。=1的信号量比较鸡肋。
六六木木 2011-03-17
  • 打赏
  • 举报
回复
void __sched mutex_lock(struct mutex *lock)
{
might_sleep();
/*
* The locking fastpath is the 1->0 transition from
* 'unlocked' into 'locked' state.
*/
__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
mutex_set_owner(lock);
}

diguaseed 2011-03-15
  • 打赏
  • 举报
回复
mingbai ]
ominging 2011-03-14
  • 打赏
  • 举报
回复
哈哈 操作系统
ominging 2011-03-14
  • 打赏
  • 举报
回复
操作系统的只是把
luocheng891019 2011-03-14
  • 打赏
  • 举报
回复
(有点顶93楼了
sd22778 2011-03-13
  • 打赏
  • 举报
回复
不错,收藏了
谢谢楼主分享
snknba 2011-03-13
  • 打赏
  • 举报
回复
相关问题
比bitblt和stretchblt效率更高的函数,
snknba 2011-03-13
  • 打赏
  • 举报
回复
什么晨曦啊
yj411511168 2011-03-11
  • 打赏
  • 举报
回复
TOP top
luocheng891019 2011-03-11
  • 打赏
  • 举报
回复
操作系统这本书貌似有讲解
noayu 2011-03-10
  • 打赏
  • 举报
回复
现在基本明白,个人感觉自己还得认真在研究一下
wenroudeye 2011-03-10
  • 打赏
  • 举报
回复
我是新手,我来看看
寂寞花 2011-03-10
  • 打赏
  • 举报
回复
我也感觉很难。可以稍微简单点撒
stevenzhang1986 2011-03-10
  • 打赏
  • 举报
回复
前排支持。
lieye_leaves 2011-03-09
  • 打赏
  • 举报
回复
学习了。
tgvlcw 2011-03-09
  • 打赏
  • 举报
回复
现在基本上可以的出结论了。
互斥锁与信号量在没有等待进程的时候,处理的动作是一样的。不同的地方在于对等待进程的处理。
互斥锁:
适合处理在少量进程共同使用一个互斥锁处理各自的工作的时候,连续进入临界区的次数少,如:连续n次lock,unlock;且每次进入临界区的时间不是很长;其它进程要进入临界区的工作不是很实时的情况下,建议使用互斥锁。

信号量:
适合处理多个进程共用一个信号量,且有多个进程挂在等待队列上,其中某些进程进入临界区的次数很多,如:连续n次down,up;且其它进程进入临界区的工作很实时的情况下,建议使用信号量。

互斥锁要比信号量的系统开销要少得多,进程a要连续100次进入临界区,当在第2,4,6次离开临界区的时候,此时有b,c,d进程也要进入临界区:
若使用互斥锁b,c,d直接休眠并挂在等待队列上,直到a第100次离开临界区所有的工作处理完,b才开始执行,然后是c,d。 如果一次schedule算一次系统开销的话,那么在这100次临界区中,总共有3次schedule。

若使用信号量(计数值初始为1),b, c, d会在a第2, 4, 6次进入临界区之前挂在等待队列上,然后在a第3, 5, 7次离开临界区的时候获得信号量,系统开销为6次schedule。
joyself 2011-03-09
  • 打赏
  • 举报
回复
对的。

如果没有进程等待的话,行为应该是一样的。
如果有进程等待的时候,不一样。mutex:想要获取mutex的进程,可以“插队”,
semaphore:想要获取semaphore的进程,必须排在队伍的后面,不能“插队”。
zonewone 2011-03-09
  • 打赏
  • 举报
回复
确实,看看up的实现吧,有任务等待,则信号量交给等待任务。
加载更多回复(59)

4,438

社区成员

发帖
与我相关
我的任务
社区描述
Linux/Unix社区 内核源代码研究区
社区管理员
  • 内核源代码研究区社区
加入社区
  • 近7日
  • 近30日
  • 至今
社区公告
暂无公告

试试用AI创作助手写篇文章吧