Chinaunix首页 | 论坛 | 博客
  • 博客访问: 734837
  • 博文数量: 79
  • 博客积分: 2671
  • 博客等级: 少校
  • 技术积分: 1247
  • 用 户 组: 普通用户
  • 注册时间: 2010-04-02 15:26
个人简介

宅男

文章分类

全部博文(79)

文章存档

2017年(11)

2016年(12)

2015年(6)

2012年(10)

2011年(33)

2010年(7)

分类: Android平台

2015-12-09 19:52:56

最近找了一个机会来研究一下futex,但是发现rtmutex是futex的基础,所以就先仔细研究了一下rtmutex.
关于rtmutex的背景,在内核文档rt-mutex-design.txt里面已经有很详细的讲解了https://www.kernel.org/doc/Documentation/locking/rt-mutex-design.txt
,在此不再赘述。
下面来一一解释rtmutex里面用到的三个结构体(基于3.10内核),首先是struct rt_mutex用来描述这个rtmutex锁。

点击(此处)折叠或打开

  1. 21 /**
  2.  22 * The rt_mutex structure
  3.  23 *
  4.  24 * @wait_lock: spinlock to protect the structure
  5.  25 * @waiters: rbtree root to enqueue waiters in priority order
  6.  26 * @waiters_leftmost: top waiter
  7.  27 * @owner: the mutex owner
  8.  28 */
  9.  29 struct rt_mutex {
  10.  30 raw_spinlock_t wait_lock;//用于保护该结构的spinlock
  11.  31 struct rb_root waiters;/RB树的根节点,用于组织所有属于这个rt_mutex的rt_mutex_waiter
  12.  32 struct rb_node *waiters_leftmost;//该rt_mutex上的top waiter,即优先级最高的waiter
  13.  33 struct task_struct *owner;//当前持有改锁的进程。因为系统都是32bit对齐的。所以bit0 用于表示是否有waiters.
  14.  34 #ifdef CONFIG_DEBUG_RT_MUTEXES
  15.  35 int save_state;
  16.  36 const char *name, *file;
  17.  37 int line;
  18.  38 void *magic;
  19.  39 #endif
  20.  40 };

接下来是struct rt_mutex_waiter,用来表示rtmutex的一个等待者(进程)。

点击(此处)折叠或打开

  1. 40 * This is the control structure for tasks blocked on a rt_mutex,
  2.  41 * which is allocated on the kernel stack on of the blocked task.
  3.  42 *
  4.  43 * @tree_entry: pi node to enqueue into the mutex waiters tree
  5.  44 * @pi_tree_entry: pi node to enqueue into the mutex owner waiters tree
  6.  45 * @task: task reference to the blocked task
  7.  46 */
  8.  47 struct rt_mutex_waiter {
  9.  48 struct rb_node tree_entry;//用于链入rt_mutex->waiter所在的RB树
  10.  49 struct rb_node pi_tree_entry;//用于链入struct task_struct 的pi_waiters所在的RB树。但是只有一个rtmutex的优先级最高的waiter才会被链入。
  11.  50 struct task_struct *task;//指向持有该rt_mutex_waiter的进程
  12.  51 struct rt_mutex *lock;//waiter等待的rt_mutex
  13.  52 #ifdef CONFIG_DEBUG_RT_MUTEXES
  14.  53 unsigned long ip;
  15.  54 struct pid *deadlock_task_pid;
  16.  55 struct rt_mutex *deadlock_lock;
  17.  56 #endif
  18.  57 int prio;//waiter的优先级
  19.  58 };

一个rt_mutex可能会被不同的进程所acquire/release.所以rt_mutex_waiter用于搭建rt_mutex跟不同进程之间的联系。同时在task_struct里面也有rtmutex相关的数据成员。

  1. struct task_struct {
  2. ..
  3. 1426 #ifdef CONFIG_RT_MUTEXES
  4. 1427 /* PI waiters blocked on a rt_mutex held by this task */
  5. 1428 struct rb_root pi_waiters;//RB树的根节点,用于链入不同的rt_mutex_waiters,即该进程所持有的不同的RTmutex的top waiter。所以并不是每一个rt_mutex_waiter都会被链入某个进程的task_struct。只有每一个lock的top waiter才会被链入。
  6. 1429 struct rb_node *pi_waiters_leftmost;//RB树的最小节点,即优先级最高的waiter。用于提高查询效率
  7. 1430 /* Deadlock detection and priority inheritance handling */
  8. 1431 struct rt_mutex_waiter *pi_blocked_on;//用于表示当前进程正在等待的锁
  9. 1434 #endif

  10. .
  11. };

这三个结构体之间的关系图大体如下:



对于mutex,我们都知道最基本的三个函数 init, lock和unlcok。init没什么好说的。

  1. 1075 /**
  2. 1076 * rt_mutex_lock - lock a rt_mutex
  3. 1077 *
  4. 1078 * @lock: the rt_mutex to be locked
  5. 1079 */
  6. 1080 void __sched rt_mutex_lock(struct rt_mutex *lock)
  7. 1081 {
  8. 1082 might_sleep();
  9. 1083
  10. 1084 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
  11. 1085 }

1020 /*
1021  * debug aware fast / slowpath lock,trylock,unlock
1022  *
1023  * The atomic acquire/release ops are compiled away, when either the
1024  * architecture does not support cmpxchg or when debugging is enabled.
1025  */
1026 static inline int
1027 rt_mutex_fastlock(struct rt_mutex *lock, int state,
1028                   int detect_deadlock,
1029                   int (*slowfn)(struct rt_mutex *lock, int state,
1030                                 struct hrtimer_sleeper *timeout,
1031                                 int detect_deadlock))
1032 {
1033         if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
1034                 rt_mutex_deadlock_account_lock(lock, current);
1035                 return 0;
1036         } else
1037                 return slowfn(lock, state, NULL, detect_deadlock);
1038 }

首先先调用一些CPU支持的基于指令的comare and exchange操作。如果该操作不支持或者该操作失败的话。接着调用rt_mutex_slowlock函数。
rt_mutex_slowlock这个函数过于庞大,下面的函数调用关系图可以帮助理解。


下面就来详细的分析rt_mutex_slowlock的相关代码

  1. 871 /*
  2.  872 * Slow path lock function:
  3.  873 */
  4.  874 static int __sched
  5.  875 rt_mutex_slowlock(struct rt_mutex *lock, int state,
  6.  876 struct hrtimer_sleeper *timeout,
  7.  877 int detect_deadlock)
  8.  878 {
  9.  879 struct rt_mutex_waiter waiter;//注意这边的waiter是一个局部变量。因为waiter只有在本进程未成功获得锁的情况下才有意义
  10.  880 int ret = 0;
  11.  881
  12.  882 debug_rt_mutex_init_waiter(&waiter);
  13.  883 RB_CLEAR_NODE(&waiter.pi_tree_entry);//初始化RB树
  14.  884 RB_CLEAR_NODE(&waiter.tree_entry);
  15.  885
  16.  886 raw_spin_lock(&lock->wait_lock);
  17.  887
  18.  888 /* Try to acquire the lock again: */
  19.  889 if (try_to_take_rt_mutex(lock, current, NULL)) { //调用try_to_wake_rt_mutex尝试获得rtmutex。该函数在后面__rt_mutex_slowlock还会被调用
  20.  890 raw_spin_unlock(&lock->wait_lock);
  21.  891 return 0;
  22.  892 }
  23.  893
  24.  894 set_current_state(state);
  25.  895
  26.  896 /* Setup the timer, when timeout != NULL */
  27.  897 if (unlikely(timeout)) { //如果设置了timeout值,调用内核high resolution timer来定时,到期唤醒该进程。
  28.  898 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
  29.  899 if (!hrtimer_active(&timeout->timer))
  30.  900 timeout->task = NULL;
  31.  901 }
  32.  902
  33.  903         ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock); //调用task_blocks_on_rt_mutex来准备waiter,并调整PI chain。在这个过程中会多次检查时候能够获得锁
     904 
     905         if (likely(!ret))//函数走到这边,waiter就绪(在lock 以及owner的pi_waiters RB树中)并且PI chain的优先级调整及死锁检查已经完毕。下面准备让当前进程sleep wait
     906                 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);

下面是try_to_take_rt_mutex函数。这个函数在rt_mutex_slowtrylock里面也会被调用。我们都知道lock与trylock之间的区别。所以这个try_to_take_rt_mutex 正如名字的含义一样。尝试去获取锁。
  1. static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
  2.                 struct rt_mutex_waiter *waiter)
  3. {

  4.         mark_rt_mutex_waiters(lock);

  5.         if (rt_mutex_owner(lock)) //如果当前锁已经有了owner,获取失败
  6.                 return 0;

  7.         if (rt_mutex_has_waiters(lock)) {
  8.                 if (task->prio >= rt_mutex_top_waiter(lock)->prio) { //如果lock没有owner,并且当前进程并不是top waiter。获取失败
  9.                         if (!waiter || waiter != rt_mutex_top_waiter(lock))
  10.                                 return 0;
  11.                 }
  12.         }

  13.         if (waiter || rt_mutex_has_waiters(lock)) {//由于rt_mutex_slowlock和rt_mutex_slowtrylock里面调用该函数时,waiter的输入参数都是NULL。
  14.                 unsigned long flags;
  15.                 struct rt_mutex_waiter *top;

  16.                 raw_spin_lock_irqsave(&task->pi_lock, flags);

  17.                 /* remove the queued waiter. */
  18.                 if (waiter) {
  19.                         rt_mutex_dequeue(lock, waiter);
  20.                         task->pi_blocked_on = NULL;
  21.                 }
  22.                 if (rt_mutex_has_waiters(lock)) {
  23.                         top = rt_mutex_top_waiter(lock);//将该锁的top waiter插入到当前进程的pi_waiter里面去。为啥? 因为当前进程即将成为该锁的owner。waiter的优先级将影响当前进程的优先级。
  24.                         rt_mutex_enqueue_pi(task, top);
  25.                 }
  26.                 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  27.         }

  28.         /* We got the lock. */
  29.         debug_rt_mutex_lock(lock);

  30.         rt_mutex_set_owner(lock, task);

  31.         rt_mutex_deadlock_account_lock(lock, task);

  32.         return 1;
  33. }
接下来是task_blocks_on_rt_mutex,这个函数虽然不长,但是有几个子函数比较长。这个函数是RTmutex思想的核心,即优先级继承。
  1. static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
  2.                                    struct rt_mutex_waiter *waiter,
  3.                                    struct task_struct *task,
  4.                                    int detect_deadlock)
  5. {
  6.         struct task_struct *owner = rt_mutex_owner(lock);
  7.         struct rt_mutex_waiter *top_waiter = waiter;
  8.         struct rt_mutex *next_lock;
  9.         int chain_walk = 0, res;
  10.         unsigned long flags;

  11.         if (owner == task) //如果当前进程已经是owner了,死锁。
  12. //比如某个二货在代码重复两次lock同一个锁。 rt_mutex_lock(&lock); rt_mutex_lock(&lock);这样的代码。
  13.                 return -EDEADLK;

  14.         raw_spin_lock_irqsave(&task->pi_lock, flags);
  15.         __rt_mutex_adjust_prio(task);
  16.         waiter->task = task;
  17.         waiter->lock = lock;
  18.         waiter->prio = task->prio;

  19.         /* Get the top priority waiter on the lock */
  20.         if (rt_mutex_has_waiters(lock))
  21.                 top_waiter = rt_mutex_top_waiter(lock);
  22.         rt_mutex_enqueue(lock, waiter);//将waiter插入到lock的RB树中

  23.         task->pi_blocked_on = waiter;//设置当前进程被阻塞在waiter上,如果后面获取锁成功的话,这个pi_blocked_on会被重新置为NULL

  24.         raw_spin_unlock_irqrestore(&task->pi_lock, flags);

  25.         if (!owner)
  26.                 return 0;

  27.         raw_spin_lock_irqsave(&owner->pi_lock, flags);
  28.         if (waiter == rt_mutex_top_waiter(lock)) { //如果当前waiter成为lock的top waiter的话,调整owner的PI_waiter.因为进程的pi_waiters里面链入的是lock的top waiter
  29.                 rt_mutex_dequeue_pi(owner, top_waiter);
  30.                 rt_mutex_enqueue_pi(owner, waiter);

  31.                 __rt_mutex_adjust_prio(owner);//调整owner的优先级,因为一个高优先级的waiter出现了。
  32.                 if (owner->pi_blocked_on)//如果owner同样被另外一把lock阻塞的话。那么需要遍历chain,调整优先级
  33.                         chain_walk = 1;
  34.         } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
  35.                 chain_walk = 1;
  36.         }
  1.         /* Store the lock on which owner is blocked or NULL */
  2.         next_lock = task_blocked_on_lock(owner);//获去阻塞owner的lock。如果next_lock存在的话,那么需要一步一步往下去调整整个pi chain

  3.         raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
  4.         /*
  5.          * Even if full deadlock detection is on, if the owner is not
  6.          * blocked itself, we can avoid finding this out in the chain
  7.          * walk.
  8.          */
  9.         if (!chain_walk || !next_lock)
  10.                 return 0;

  11.         /*
  12.          * The owner can't disappear while holding a lock,
  13.          * so the owner struct is protected by wait_lock.
  14.          * Gets dropped in rt_mutex_adjust_prio_chain()!
  15.          */
  16.         get_task_struct(owner);

  17.         raw_spin_unlock(&lock->wait_lock);

  18.         res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock,
  19.                                          next_lock, waiter, task); //遍历PI chain,调整优先级。关于pi chain,建议先看一下rt-mutex-design.txt大体了解一下。

  20.         raw_spin_lock(&lock->wait_lock);

  21.         return res;
  22. }

下面重头戏来了,我个人认为这个是rtmutex里面代码部分最难懂的部分rt_mutex_adjust_prio_chain。这段代码中,输入参数之间的关系要搞清楚,因为其中包含了两个
rt_mutex和两个task_struct

  1. static int rt_mutex_adjust_prio_chain(struct task_struct *task, //orig_lock的owner
  2.                                       int deadlock_detect, //是否检查死锁
  3.                                       struct rt_mutex *orig_lock, //当前进程尝试要获取的lock
  4.                                       struct rt_mutex *next_lock, //owner尝试要获取的lock
  5.                                       struct rt_mutex_waiter *orig_waiter, //当前进程的waiter
  6.                                       struct task_struct *top_task) //当前进程
  7. {
  8.         struct rt_mutex *lock;
  9.         struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
  10.         int detect_deadlock, ret = 0, depth = 0;
  11.         unsigned long flags;

  12.         detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
  13.                                                          deadlock_detect);
  14.         
  15.         /*
  16.          * The (de)boosting is a step by step approach with a lot of
  17.          * pitfalls. We want this to be preemptible and we want hold a
  18.          * maximum of two locks per step. So we have to check
  19.          * carefully whether things change under us.
  20.          */
  21.  again:
  22.         if (++depth > max_lock_depth) {
  23. //该函数会遍历PI chain,但也不是无限变量。出于复杂度和性能的考虑,做了1024的限制
  24.                 static int prev_max;

  25.                 /*
  26.                  * Print this only once. If the admin changes the limit,
  27.                  * print a new message when reaching the limit again.
  28.                  */
  29.                 if (prev_max != max_lock_depth) {
  30.                         prev_max = max_lock_depth;
  31.                         printk(KERN_WARNING "Maximum lock depth %d reached "
  32.                                "task: %s (%d)\n", max_lock_depth,
  33.                                top_task->comm, task_pid_nr(top_task));
  34.                 }
  35.                 put_task_struct(task);

  36.                 return -EDEADLK;
  37.         }

    1. retry:
    2.         /*
    3.          * Task can not go away as we did a get_task() before !
    4.          */
    5.         raw_spin_lock_irqsave(&task->pi_lock, flags);

    6.         waiter = task->pi_blocked_on;//获取task的相应waiter,注意这边的task参数其实是orig_waiter的owner。
    7. //该函数主要是遍历PI-chain来调整优先级,直到某一个owner并没有被阻塞的话。
    8.         /*
    9.          * Check whether the end of the boosting chain has been
    10.          * reached or the state of the chain has changed while we
    11.          * dropped the locks.
    12.          */
    13.         if (!waiter)
    14.                 goto out_unlock_pi;

    15.         /*
    16.          * Check the orig_waiter state. After we dropped the locks,
    17.          * the previous owner of the lock might have released the lock.
    18.          */
    19.         if (orig_waiter && !rt_mutex_owner(orig_lock))//检查orig_lock的状态,因为可能在我们调整的过程中,orig_lock被释放了。
    20.                 goto out_unlock_pi;

    21.         /*
    22.          * We dropped all locks after taking a refcount on @task, so
    23.          * the task might have moved on in the lock chain or even left
    24.          * the chain completely and blocks now on an unrelated lock or
    25.          * on @orig_lock.
    26.          *
    27.          * We stored the lock on which @task was blocked in @next_lock,
    28.          * so we can detect the chain change.
    29.          */
    30.         if (next_lock != waiter->lock)//一般情况下,这边的next_lock是等于waiter->lock的。但是如果owner的阻塞从一个锁变成了另外一个锁。那么就没必要继续遍历了。
    31.                 goto out_unlock_pi;
            /*
             * Drop out, when the task has no waiters. Note,
             * top_waiter can be NULL, when we are in the deboosting
             * mode!
             */
            if (top_waiter) {
                    if (!task_has_pi_waiters(task))//在遍历的过程中,如果一个task即owner不在阻塞了,那么没有必要继续遍历了。
                            goto out_unlock_pi;
                    /*
                     * If deadlock detection is off, we stop here if we
                     * are not the top pi waiter of the task.
                     */
                    if (!detect_deadlock && top_waiter != task_top_pi_waiter(task))
                            goto out_unlock_pi;
            }


            /*
             * When deadlock detection is off then we check, if further
             * priority adjustment is necessary.
             */
            if (!detect_deadlock && waiter->prio == task->prio)//如果进程的优先级没有发生改变,那么在没有开启死锁检查的情况下,没有必要再继续遍历pi chain了
                    goto out_unlock_pi;

            lock = waiter->lock;
            if (!raw_spin_trylock(&lock->wait_lock)) {
                    raw_spin_unlock_irqrestore(&task->pi_lock, flags);
                    cpu_relax();
                    goto retry;
            }


            /*
             * Deadlock detection. If the lock is the same as the original
             * lock which caused us to walk the lock chain or if the
             * current lock is owned by the task which initiated the chain
             * walk, we detected a deadlock.
             */
            if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
                    debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
                    raw_spin_unlock(&lock->wait_lock);
                    ret = -EDEADLK;
                    goto out_unlock_pi;
            }


  1. 这边是检查死锁的关键。
  2. (1)lock == orig_lock, 注意orig_lock是阻塞当前调用rt_mutex_lock的进程的锁,而lock是pi chain中任意一把锁。如果lock == orig_lock。即代表
  3. pi chain的全部或者局部形成了一个环。打个比方
  4. rt_mutex A, B ,;
  5. func_A {
  6. rt_mutex_lock ( &A);
  7. rt_mutex_lock (&B);
  8. };

  9. func_B {
  10. rt_mutex_lock (&B);
  11. rt_mutex_lock (&C);
  12. };

  13. func_C {
  14. rt_mutex_lock (&C);
  15. rt_mutex_lock (&A);
  16. };

  17. 上面三个函数分别代表三个进程,用pi chain表示大致如下

  18. A->func_A->B->func_B->C->fun_C->A ;这样就形成了一个环。

  19. (2)第二种情况rt_mutex_owner(lock) == top_task ,这个标记其实跟上面形成环的比较是一样的,只是比较的时机跟上面稍有不同而已。
  20. func_A->B->func_B->C->fun_C->->func_A. 环大概是这样而已。下面继续看代码


  1.         top_waiter = rt_mutex_top_waiter(lock);

  2.         /* Requeue the waiter */// 重新requee waiter,因为优先级发生了改变。一个waiter的优先级是肯定大于等于task的优先级的。上面我们已经处理的等于的情况,这里继续处理大于的情况。
  3.         rt_mutex_dequeue(lock, waiter);
  4.         waiter->prio = task->prio;
  5.         rt_mutex_enqueue(lock, waiter);

  6.         /* Release the task */
  7.         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  8.         if (!rt_mutex_owner(lock)) {
  9.                 /*
  10.                  * If the requeue above changed the top waiter, then we need
  11.                  * to wake the new top waiter up to try to get the lock.
  12.                  */


  13.                 if (top_waiter != rt_mutex_top_waiter(lock))//如果在处理的过程中,一个lock已经被释放了,那么唤醒这个lock的top waiter
  14.                         wake_up_process(rt_mutex_top_waiter(lock)->task);
  15.                 raw_spin_unlock(&lock->wait_lock);
  16.                 goto out_put_task;
  17.         }
  18.         put_task_struct(task);

  19.         /* Grab the next task */
  20.         task = rt_mutex_owner(lock); //获取下一个owner
  21.         get_task_struct(task);
  22.         raw_spin_lock_irqsave(&task->pi_lock, flags);


  23.         if (waiter == rt_mutex_top_waiter(lock)) { //因为该lock的top waiter发生了变化。调整owner的pi_waiters。(其实waiter已经变了。。。)
  24.                 /* Boost the owner */
  25.                 rt_mutex_dequeue_pi(task, top_waiter);
  26.                 rt_mutex_enqueue_pi(task, waiter);
  27.                 __rt_mutex_adjust_prio(task);


  28.         } else if (top_waiter == waiter) {//该进程的waiter本来是top waiter的,但是很凑巧,这个top waiter获得了锁。那么就需要调整task的优先级(降低)。
  29.                 /* Deboost the owner */
  30.                 rt_mutex_dequeue_pi(task, waiter);
  31.                 waiter = rt_mutex_top_waiter(lock);
  32.                 rt_mutex_enqueue_pi(task, waiter);
  33.                 __rt_mutex_adjust_prio(task);
  34.         }

  35.         next_lock = task_blocked_on_lock(task);//获得下一把阻塞task的lock


  36.         raw_spin_unlock_irqrestore(&task->pi_lock, flags);


  37.         top_waiter = rt_mutex_top_waiter(lock);
  38.         raw_spin_unlock(&lock->wait_lock);


  39.         /*
  40.          * We reached the end of the lock chain. Stop right here. No
  41.          * point to go back just to figure that out.
  42.          */
  43.         if (!next_lock)
  44.                 goto out_put_task;


  45.         if (!detect_deadlock && waiter != top_waiter)
  46.                 goto out_put_task;


  47.         goto again;
再次回到rt_mutex_slow函数,在task_blocks_on_rt_mutex执行完之后,如果还是没有获得锁的话,继续执行__rt_mutex_slowlock
static int __sched
  1. __rt_mutex_slowlock(struct rt_mutex *lock, int state,
  2.                     struct hrtimer_sleeper *timeout,
  3.                     struct rt_mutex_waiter *waiter)
  4. {
  5.         int ret = 0;


  6.         for (;;) {
  7.                 /* Try to acquire the lock: */
  8.                 if (try_to_take_rt_mutex(lock, current, waiter))//上次已经提到了try_to_take_rt_mutex函数,但是注意这边的输入参数waiter不是NULL了。所以实际情况会稍有不同
  9.                         break;


  10.                 /*
  11.                  * TASK_INTERRUPTIBLE checks for signals and
  12.                  * timeout. Ignored otherwise.
  13.                  */
  14.                 if (unlikely(state == TASK_INTERRUPTIBLE)) {
  15.                         /* Signal pending? */
  16.                         if (signal_pending(current)) //如果进程状态是TASK_INTERRUPTIBLE的话,即代表可以被信号打断。这边检查是否有未处理的信号。
  17.                                 ret = -EINTR;
  18.                         if (timeout && !timeout->task)
  19.                                 ret = -ETIMEDOUT;
  20.                         if (ret)
  21.                                 break;
  22.                 }


  23.                 raw_spin_unlock(&lock->wait_lock);


  24.                 debug_rt_mutex_print_deadlock(waiter);


  25.                 schedule_rt_mutex(lock);//调用schedule()睡眠


  26.                 raw_spin_lock(&lock->wait_lock);
  27.                 set_current_state(state);
  28.         }


  29.         return ret;
  30. }
到此rt_mutex_lock的过程大体结束。
 
  1. if (unlikely(ret)) {//如果返回值不为0的话,那么就是被信号打断或者timeout时间到了。不是被unlock唤醒的,所以需要在rt_mutex_lock中删除waiter.
    1. //如果是被unlock唤醒的话,waiter的清理工作由unlock函数完成。

  2.                 remove_waiter(lock, &waiter);
  3.                 rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter);
  4.         }

  5.         /*
  6.          * try_to_take_rt_mutex() sets the waiter bit
  7.          * unconditionally. We might have to fix that up.
  8.          */
  9.         fixup_rt_mutex_waiters(lock);


  10.         raw_spin_unlock(&lock->wait_lock);


  11.         /* Remove pending timer: */
  12.         if (unlikely(timeout))
  13.                 hrtimer_cancel(&timeout->timer);


  14.         debug_rt_mutex_free_waiter(&waiter);


阅读(2475) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~