xref: /openbmc/linux/kernel/locking/rtmutex.c (revision a8a28aff)
1 /*
2  * RT-Mutexes: simple blocking mutual exclusion locks with PI support
3  *
4  * started by Ingo Molnar and Thomas Gleixner.
5  *
6  *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7  *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8  *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9  *  Copyright (C) 2006 Esben Nielsen
10  *
11  *  See Documentation/rt-mutex-design.txt for details.
12  */
13 #include <linux/spinlock.h>
14 #include <linux/export.h>
15 #include <linux/sched.h>
16 #include <linux/sched/rt.h>
17 #include <linux/sched/deadline.h>
18 #include <linux/timer.h>
19 
20 #include "rtmutex_common.h"
21 
22 /*
23  * lock->owner state tracking:
24  *
25  * lock->owner holds the task_struct pointer of the owner. Bit 0
26  * is used to keep track of the "lock has waiters" state.
27  *
28  * owner	bit0
29  * NULL		0	lock is free (fast acquire possible)
30  * NULL		1	lock is free and has waiters and the top waiter
31  *				is going to take the lock*
32  * taskpointer	0	lock is held (fast release possible)
33  * taskpointer	1	lock is held and has waiters**
34  *
35  * The fast atomic compare exchange based acquire and release is only
36  * possible when bit 0 of lock->owner is 0.
37  *
38  * (*) It also can be a transitional state when grabbing the lock
39  * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
40  * we need to set the bit0 before looking at the lock, and the owner may be
41  * NULL in this small time, hence this can be a transitional state.
42  *
43  * (**) There is a small time when bit 0 is set but there are no
44  * waiters. This can happen when grabbing the lock in the slow path.
45  * To prevent a cmpxchg of the owner releasing the lock, we need to
46  * set this bit before looking at the lock.
47  */
48 
49 static void
50 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
51 {
52 	unsigned long val = (unsigned long)owner;
53 
54 	if (rt_mutex_has_waiters(lock))
55 		val |= RT_MUTEX_HAS_WAITERS;
56 
57 	lock->owner = (struct task_struct *)val;
58 }
59 
60 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
61 {
62 	lock->owner = (struct task_struct *)
63 			((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
64 }
65 
66 static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
67 {
68 	if (!rt_mutex_has_waiters(lock))
69 		clear_rt_mutex_waiters(lock);
70 }
71 
72 /*
73  * We can speed up the acquire/release, if the architecture
74  * supports cmpxchg and if there's no debugging state to be set up
75  */
76 #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
77 # define rt_mutex_cmpxchg(l,c,n)	(cmpxchg(&l->owner, c, n) == c)
78 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
79 {
80 	unsigned long owner, *p = (unsigned long *) &lock->owner;
81 
82 	do {
83 		owner = *p;
84 	} while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
85 }
86 
87 /*
88  * Safe fastpath aware unlock:
89  * 1) Clear the waiters bit
90  * 2) Drop lock->wait_lock
91  * 3) Try to unlock the lock with cmpxchg
92  */
93 static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
94 	__releases(lock->wait_lock)
95 {
96 	struct task_struct *owner = rt_mutex_owner(lock);
97 
98 	clear_rt_mutex_waiters(lock);
99 	raw_spin_unlock(&lock->wait_lock);
100 	/*
101 	 * If a new waiter comes in between the unlock and the cmpxchg
102 	 * we have two situations:
103 	 *
104 	 * unlock(wait_lock);
105 	 *					lock(wait_lock);
106 	 * cmpxchg(p, owner, 0) == owner
107 	 *					mark_rt_mutex_waiters(lock);
108 	 *					acquire(lock);
109 	 * or:
110 	 *
111 	 * unlock(wait_lock);
112 	 *					lock(wait_lock);
113 	 *					mark_rt_mutex_waiters(lock);
114 	 *
115 	 * cmpxchg(p, owner, 0) != owner
116 	 *					enqueue_waiter();
117 	 *					unlock(wait_lock);
118 	 * lock(wait_lock);
119 	 * wake waiter();
120 	 * unlock(wait_lock);
121 	 *					lock(wait_lock);
122 	 *					acquire(lock);
123 	 */
124 	return rt_mutex_cmpxchg(lock, owner, NULL);
125 }
126 
127 #else
128 # define rt_mutex_cmpxchg(l,c,n)	(0)
129 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
130 {
131 	lock->owner = (struct task_struct *)
132 			((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
133 }
134 
135 /*
136  * Simple slow path only version: lock->owner is protected by lock->wait_lock.
137  */
138 static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
139 	__releases(lock->wait_lock)
140 {
141 	lock->owner = NULL;
142 	raw_spin_unlock(&lock->wait_lock);
143 	return true;
144 }
145 #endif
146 
147 static inline int
148 rt_mutex_waiter_less(struct rt_mutex_waiter *left,
149 		     struct rt_mutex_waiter *right)
150 {
151 	if (left->prio < right->prio)
152 		return 1;
153 
154 	/*
155 	 * If both waiters have dl_prio(), we check the deadlines of the
156 	 * associated tasks.
157 	 * If left waiter has a dl_prio(), and we didn't return 1 above,
158 	 * then right waiter has a dl_prio() too.
159 	 */
160 	if (dl_prio(left->prio))
161 		return (left->task->dl.deadline < right->task->dl.deadline);
162 
163 	return 0;
164 }
165 
166 static void
167 rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
168 {
169 	struct rb_node **link = &lock->waiters.rb_node;
170 	struct rb_node *parent = NULL;
171 	struct rt_mutex_waiter *entry;
172 	int leftmost = 1;
173 
174 	while (*link) {
175 		parent = *link;
176 		entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
177 		if (rt_mutex_waiter_less(waiter, entry)) {
178 			link = &parent->rb_left;
179 		} else {
180 			link = &parent->rb_right;
181 			leftmost = 0;
182 		}
183 	}
184 
185 	if (leftmost)
186 		lock->waiters_leftmost = &waiter->tree_entry;
187 
188 	rb_link_node(&waiter->tree_entry, parent, link);
189 	rb_insert_color(&waiter->tree_entry, &lock->waiters);
190 }
191 
192 static void
193 rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
194 {
195 	if (RB_EMPTY_NODE(&waiter->tree_entry))
196 		return;
197 
198 	if (lock->waiters_leftmost == &waiter->tree_entry)
199 		lock->waiters_leftmost = rb_next(&waiter->tree_entry);
200 
201 	rb_erase(&waiter->tree_entry, &lock->waiters);
202 	RB_CLEAR_NODE(&waiter->tree_entry);
203 }
204 
205 static void
206 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
207 {
208 	struct rb_node **link = &task->pi_waiters.rb_node;
209 	struct rb_node *parent = NULL;
210 	struct rt_mutex_waiter *entry;
211 	int leftmost = 1;
212 
213 	while (*link) {
214 		parent = *link;
215 		entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
216 		if (rt_mutex_waiter_less(waiter, entry)) {
217 			link = &parent->rb_left;
218 		} else {
219 			link = &parent->rb_right;
220 			leftmost = 0;
221 		}
222 	}
223 
224 	if (leftmost)
225 		task->pi_waiters_leftmost = &waiter->pi_tree_entry;
226 
227 	rb_link_node(&waiter->pi_tree_entry, parent, link);
228 	rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters);
229 }
230 
231 static void
232 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
233 {
234 	if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
235 		return;
236 
237 	if (task->pi_waiters_leftmost == &waiter->pi_tree_entry)
238 		task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry);
239 
240 	rb_erase(&waiter->pi_tree_entry, &task->pi_waiters);
241 	RB_CLEAR_NODE(&waiter->pi_tree_entry);
242 }
243 
244 /*
245  * Calculate task priority from the waiter tree priority
246  *
247  * Return task->normal_prio when the waiter tree is empty or when
248  * the waiter is not allowed to do priority boosting
249  */
250 int rt_mutex_getprio(struct task_struct *task)
251 {
252 	if (likely(!task_has_pi_waiters(task)))
253 		return task->normal_prio;
254 
255 	return min(task_top_pi_waiter(task)->prio,
256 		   task->normal_prio);
257 }
258 
259 struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
260 {
261 	if (likely(!task_has_pi_waiters(task)))
262 		return NULL;
263 
264 	return task_top_pi_waiter(task)->task;
265 }
266 
267 /*
268  * Called by sched_setscheduler() to check whether the priority change
269  * is overruled by a possible priority boosting.
270  */
271 int rt_mutex_check_prio(struct task_struct *task, int newprio)
272 {
273 	if (!task_has_pi_waiters(task))
274 		return 0;
275 
276 	return task_top_pi_waiter(task)->task->prio <= newprio;
277 }
278 
279 /*
280  * Adjust the priority of a task, after its pi_waiters got modified.
281  *
282  * This can be both boosting and unboosting. task->pi_lock must be held.
283  */
284 static void __rt_mutex_adjust_prio(struct task_struct *task)
285 {
286 	int prio = rt_mutex_getprio(task);
287 
288 	if (task->prio != prio || dl_prio(prio))
289 		rt_mutex_setprio(task, prio);
290 }
291 
292 /*
293  * Adjust task priority (undo boosting). Called from the exit path of
294  * rt_mutex_slowunlock() and rt_mutex_slowlock().
295  *
296  * (Note: We do this outside of the protection of lock->wait_lock to
297  * allow the lock to be taken while or before we readjust the priority
298  * of task. We do not use the spin_xx_mutex() variants here as we are
299  * outside of the debug path.)
300  */
301 static void rt_mutex_adjust_prio(struct task_struct *task)
302 {
303 	unsigned long flags;
304 
305 	raw_spin_lock_irqsave(&task->pi_lock, flags);
306 	__rt_mutex_adjust_prio(task);
307 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
308 }
309 
310 /*
311  * Max number of times we'll walk the boosting chain:
312  */
313 int max_lock_depth = 1024;
314 
315 static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
316 {
317 	return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
318 }
319 
320 /*
321  * Adjust the priority chain. Also used for deadlock detection.
322  * Decreases task's usage by one - may thus free the task.
323  *
324  * @task:	the task owning the mutex (owner) for which a chain walk is
325  *		probably needed
326  * @deadlock_detect: do we have to carry out deadlock detection?
327  * @orig_lock:	the mutex (can be NULL if we are walking the chain to recheck
328  *		things for a task that has just got its priority adjusted, and
329  *		is waiting on a mutex)
330  * @next_lock:	the mutex on which the owner of @orig_lock was blocked before
331  *		we dropped its pi_lock. Is never dereferenced, only used for
332  *		comparison to detect lock chain changes.
333  * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
334  *		its priority to the mutex owner (can be NULL in the case
335  *		depicted above or if the top waiter is gone away and we are
336  *		actually deboosting the owner)
337  * @top_task:	the current top waiter
338  *
339  * Returns 0 or -EDEADLK.
340  */
341 static int rt_mutex_adjust_prio_chain(struct task_struct *task,
342 				      int deadlock_detect,
343 				      struct rt_mutex *orig_lock,
344 				      struct rt_mutex *next_lock,
345 				      struct rt_mutex_waiter *orig_waiter,
346 				      struct task_struct *top_task)
347 {
348 	struct rt_mutex *lock;
349 	struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
350 	int detect_deadlock, ret = 0, depth = 0;
351 	unsigned long flags;
352 
353 	detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
354 							 deadlock_detect);
355 
356 	/*
357 	 * The (de)boosting is a step by step approach with a lot of
358 	 * pitfalls. We want this to be preemptible and we want hold a
359 	 * maximum of two locks per step. So we have to check
360 	 * carefully whether things change under us.
361 	 */
362  again:
363 	if (++depth > max_lock_depth) {
364 		static int prev_max;
365 
366 		/*
367 		 * Print this only once. If the admin changes the limit,
368 		 * print a new message when reaching the limit again.
369 		 */
370 		if (prev_max != max_lock_depth) {
371 			prev_max = max_lock_depth;
372 			printk(KERN_WARNING "Maximum lock depth %d reached "
373 			       "task: %s (%d)\n", max_lock_depth,
374 			       top_task->comm, task_pid_nr(top_task));
375 		}
376 		put_task_struct(task);
377 
378 		return -EDEADLK;
379 	}
380  retry:
381 	/*
382 	 * Task can not go away as we did a get_task() before !
383 	 */
384 	raw_spin_lock_irqsave(&task->pi_lock, flags);
385 
386 	waiter = task->pi_blocked_on;
387 	/*
388 	 * Check whether the end of the boosting chain has been
389 	 * reached or the state of the chain has changed while we
390 	 * dropped the locks.
391 	 */
392 	if (!waiter)
393 		goto out_unlock_pi;
394 
395 	/*
396 	 * Check the orig_waiter state. After we dropped the locks,
397 	 * the previous owner of the lock might have released the lock.
398 	 */
399 	if (orig_waiter && !rt_mutex_owner(orig_lock))
400 		goto out_unlock_pi;
401 
402 	/*
403 	 * We dropped all locks after taking a refcount on @task, so
404 	 * the task might have moved on in the lock chain or even left
405 	 * the chain completely and blocks now on an unrelated lock or
406 	 * on @orig_lock.
407 	 *
408 	 * We stored the lock on which @task was blocked in @next_lock,
409 	 * so we can detect the chain change.
410 	 */
411 	if (next_lock != waiter->lock)
412 		goto out_unlock_pi;
413 
414 	/*
415 	 * Drop out, when the task has no waiters. Note,
416 	 * top_waiter can be NULL, when we are in the deboosting
417 	 * mode!
418 	 */
419 	if (top_waiter) {
420 		if (!task_has_pi_waiters(task))
421 			goto out_unlock_pi;
422 		/*
423 		 * If deadlock detection is off, we stop here if we
424 		 * are not the top pi waiter of the task.
425 		 */
426 		if (!detect_deadlock && top_waiter != task_top_pi_waiter(task))
427 			goto out_unlock_pi;
428 	}
429 
430 	/*
431 	 * When deadlock detection is off then we check, if further
432 	 * priority adjustment is necessary.
433 	 */
434 	if (!detect_deadlock && waiter->prio == task->prio)
435 		goto out_unlock_pi;
436 
437 	lock = waiter->lock;
438 	if (!raw_spin_trylock(&lock->wait_lock)) {
439 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
440 		cpu_relax();
441 		goto retry;
442 	}
443 
444 	/*
445 	 * Deadlock detection. If the lock is the same as the original
446 	 * lock which caused us to walk the lock chain or if the
447 	 * current lock is owned by the task which initiated the chain
448 	 * walk, we detected a deadlock.
449 	 */
450 	if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
451 		debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
452 		raw_spin_unlock(&lock->wait_lock);
453 		ret = -EDEADLK;
454 		goto out_unlock_pi;
455 	}
456 
457 	top_waiter = rt_mutex_top_waiter(lock);
458 
459 	/* Requeue the waiter */
460 	rt_mutex_dequeue(lock, waiter);
461 	waiter->prio = task->prio;
462 	rt_mutex_enqueue(lock, waiter);
463 
464 	/* Release the task */
465 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
466 	if (!rt_mutex_owner(lock)) {
467 		/*
468 		 * If the requeue above changed the top waiter, then we need
469 		 * to wake the new top waiter up to try to get the lock.
470 		 */
471 
472 		if (top_waiter != rt_mutex_top_waiter(lock))
473 			wake_up_process(rt_mutex_top_waiter(lock)->task);
474 		raw_spin_unlock(&lock->wait_lock);
475 		goto out_put_task;
476 	}
477 	put_task_struct(task);
478 
479 	/* Grab the next task */
480 	task = rt_mutex_owner(lock);
481 	get_task_struct(task);
482 	raw_spin_lock_irqsave(&task->pi_lock, flags);
483 
484 	if (waiter == rt_mutex_top_waiter(lock)) {
485 		/* Boost the owner */
486 		rt_mutex_dequeue_pi(task, top_waiter);
487 		rt_mutex_enqueue_pi(task, waiter);
488 		__rt_mutex_adjust_prio(task);
489 
490 	} else if (top_waiter == waiter) {
491 		/* Deboost the owner */
492 		rt_mutex_dequeue_pi(task, waiter);
493 		waiter = rt_mutex_top_waiter(lock);
494 		rt_mutex_enqueue_pi(task, waiter);
495 		__rt_mutex_adjust_prio(task);
496 	}
497 
498 	/*
499 	 * Check whether the task which owns the current lock is pi
500 	 * blocked itself. If yes we store a pointer to the lock for
501 	 * the lock chain change detection above. After we dropped
502 	 * task->pi_lock next_lock cannot be dereferenced anymore.
503 	 */
504 	next_lock = task_blocked_on_lock(task);
505 
506 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
507 
508 	top_waiter = rt_mutex_top_waiter(lock);
509 	raw_spin_unlock(&lock->wait_lock);
510 
511 	/*
512 	 * We reached the end of the lock chain. Stop right here. No
513 	 * point to go back just to figure that out.
514 	 */
515 	if (!next_lock)
516 		goto out_put_task;
517 
518 	if (!detect_deadlock && waiter != top_waiter)
519 		goto out_put_task;
520 
521 	goto again;
522 
523  out_unlock_pi:
524 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
525  out_put_task:
526 	put_task_struct(task);
527 
528 	return ret;
529 }
530 
531 /*
532  * Try to take an rt-mutex
533  *
534  * Must be called with lock->wait_lock held.
535  *
536  * @lock:   the lock to be acquired.
537  * @task:   the task which wants to acquire the lock
538  * @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
539  */
540 static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
541 		struct rt_mutex_waiter *waiter)
542 {
543 	/*
544 	 * We have to be careful here if the atomic speedups are
545 	 * enabled, such that, when
546 	 *  - no other waiter is on the lock
547 	 *  - the lock has been released since we did the cmpxchg
548 	 * the lock can be released or taken while we are doing the
549 	 * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
550 	 *
551 	 * The atomic acquire/release aware variant of
552 	 * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
553 	 * the WAITERS bit, the atomic release / acquire can not
554 	 * happen anymore and lock->wait_lock protects us from the
555 	 * non-atomic case.
556 	 *
557 	 * Note, that this might set lock->owner =
558 	 * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
559 	 * any more. This is fixed up when we take the ownership.
560 	 * This is the transitional state explained at the top of this file.
561 	 */
562 	mark_rt_mutex_waiters(lock);
563 
564 	if (rt_mutex_owner(lock))
565 		return 0;
566 
567 	/*
568 	 * It will get the lock because of one of these conditions:
569 	 * 1) there is no waiter
570 	 * 2) higher priority than waiters
571 	 * 3) it is top waiter
572 	 */
573 	if (rt_mutex_has_waiters(lock)) {
574 		if (task->prio >= rt_mutex_top_waiter(lock)->prio) {
575 			if (!waiter || waiter != rt_mutex_top_waiter(lock))
576 				return 0;
577 		}
578 	}
579 
580 	if (waiter || rt_mutex_has_waiters(lock)) {
581 		unsigned long flags;
582 		struct rt_mutex_waiter *top;
583 
584 		raw_spin_lock_irqsave(&task->pi_lock, flags);
585 
586 		/* remove the queued waiter. */
587 		if (waiter) {
588 			rt_mutex_dequeue(lock, waiter);
589 			task->pi_blocked_on = NULL;
590 		}
591 
592 		/*
593 		 * We have to enqueue the top waiter(if it exists) into
594 		 * task->pi_waiters list.
595 		 */
596 		if (rt_mutex_has_waiters(lock)) {
597 			top = rt_mutex_top_waiter(lock);
598 			rt_mutex_enqueue_pi(task, top);
599 		}
600 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
601 	}
602 
603 	/* We got the lock. */
604 	debug_rt_mutex_lock(lock);
605 
606 	rt_mutex_set_owner(lock, task);
607 
608 	rt_mutex_deadlock_account_lock(lock, task);
609 
610 	return 1;
611 }
612 
613 /*
614  * Task blocks on lock.
615  *
616  * Prepare waiter and propagate pi chain
617  *
618  * This must be called with lock->wait_lock held.
619  */
620 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
621 				   struct rt_mutex_waiter *waiter,
622 				   struct task_struct *task,
623 				   int detect_deadlock)
624 {
625 	struct task_struct *owner = rt_mutex_owner(lock);
626 	struct rt_mutex_waiter *top_waiter = waiter;
627 	struct rt_mutex *next_lock;
628 	int chain_walk = 0, res;
629 	unsigned long flags;
630 
631 	/*
632 	 * Early deadlock detection. We really don't want the task to
633 	 * enqueue on itself just to untangle the mess later. It's not
634 	 * only an optimization. We drop the locks, so another waiter
635 	 * can come in before the chain walk detects the deadlock. So
636 	 * the other will detect the deadlock and return -EDEADLOCK,
637 	 * which is wrong, as the other waiter is not in a deadlock
638 	 * situation.
639 	 */
640 	if (owner == task)
641 		return -EDEADLK;
642 
643 	raw_spin_lock_irqsave(&task->pi_lock, flags);
644 	__rt_mutex_adjust_prio(task);
645 	waiter->task = task;
646 	waiter->lock = lock;
647 	waiter->prio = task->prio;
648 
649 	/* Get the top priority waiter on the lock */
650 	if (rt_mutex_has_waiters(lock))
651 		top_waiter = rt_mutex_top_waiter(lock);
652 	rt_mutex_enqueue(lock, waiter);
653 
654 	task->pi_blocked_on = waiter;
655 
656 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
657 
658 	if (!owner)
659 		return 0;
660 
661 	raw_spin_lock_irqsave(&owner->pi_lock, flags);
662 	if (waiter == rt_mutex_top_waiter(lock)) {
663 		rt_mutex_dequeue_pi(owner, top_waiter);
664 		rt_mutex_enqueue_pi(owner, waiter);
665 
666 		__rt_mutex_adjust_prio(owner);
667 		if (owner->pi_blocked_on)
668 			chain_walk = 1;
669 	} else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
670 		chain_walk = 1;
671 	}
672 
673 	/* Store the lock on which owner is blocked or NULL */
674 	next_lock = task_blocked_on_lock(owner);
675 
676 	raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
677 	/*
678 	 * Even if full deadlock detection is on, if the owner is not
679 	 * blocked itself, we can avoid finding this out in the chain
680 	 * walk.
681 	 */
682 	if (!chain_walk || !next_lock)
683 		return 0;
684 
685 	/*
686 	 * The owner can't disappear while holding a lock,
687 	 * so the owner struct is protected by wait_lock.
688 	 * Gets dropped in rt_mutex_adjust_prio_chain()!
689 	 */
690 	get_task_struct(owner);
691 
692 	raw_spin_unlock(&lock->wait_lock);
693 
694 	res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock,
695 					 next_lock, waiter, task);
696 
697 	raw_spin_lock(&lock->wait_lock);
698 
699 	return res;
700 }
701 
702 /*
703  * Wake up the next waiter on the lock.
704  *
705  * Remove the top waiter from the current tasks pi waiter list and
706  * wake it up.
707  *
708  * Called with lock->wait_lock held.
709  */
710 static void wakeup_next_waiter(struct rt_mutex *lock)
711 {
712 	struct rt_mutex_waiter *waiter;
713 	unsigned long flags;
714 
715 	raw_spin_lock_irqsave(&current->pi_lock, flags);
716 
717 	waiter = rt_mutex_top_waiter(lock);
718 
719 	/*
720 	 * Remove it from current->pi_waiters. We do not adjust a
721 	 * possible priority boost right now. We execute wakeup in the
722 	 * boosted mode and go back to normal after releasing
723 	 * lock->wait_lock.
724 	 */
725 	rt_mutex_dequeue_pi(current, waiter);
726 
727 	/*
728 	 * As we are waking up the top waiter, and the waiter stays
729 	 * queued on the lock until it gets the lock, this lock
730 	 * obviously has waiters. Just set the bit here and this has
731 	 * the added benefit of forcing all new tasks into the
732 	 * slow path making sure no task of lower priority than
733 	 * the top waiter can steal this lock.
734 	 */
735 	lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
736 
737 	raw_spin_unlock_irqrestore(&current->pi_lock, flags);
738 
739 	/*
740 	 * It's safe to dereference waiter as it cannot go away as
741 	 * long as we hold lock->wait_lock. The waiter task needs to
742 	 * acquire it in order to dequeue the waiter.
743 	 */
744 	wake_up_process(waiter->task);
745 }
746 
747 /*
748  * Remove a waiter from a lock and give up
749  *
750  * Must be called with lock->wait_lock held and
751  * have just failed to try_to_take_rt_mutex().
752  */
753 static void remove_waiter(struct rt_mutex *lock,
754 			  struct rt_mutex_waiter *waiter)
755 {
756 	int first = (waiter == rt_mutex_top_waiter(lock));
757 	struct task_struct *owner = rt_mutex_owner(lock);
758 	struct rt_mutex *next_lock = NULL;
759 	unsigned long flags;
760 
761 	raw_spin_lock_irqsave(&current->pi_lock, flags);
762 	rt_mutex_dequeue(lock, waiter);
763 	current->pi_blocked_on = NULL;
764 	raw_spin_unlock_irqrestore(&current->pi_lock, flags);
765 
766 	if (!owner)
767 		return;
768 
769 	if (first) {
770 
771 		raw_spin_lock_irqsave(&owner->pi_lock, flags);
772 
773 		rt_mutex_dequeue_pi(owner, waiter);
774 
775 		if (rt_mutex_has_waiters(lock)) {
776 			struct rt_mutex_waiter *next;
777 
778 			next = rt_mutex_top_waiter(lock);
779 			rt_mutex_enqueue_pi(owner, next);
780 		}
781 		__rt_mutex_adjust_prio(owner);
782 
783 		/* Store the lock on which owner is blocked or NULL */
784 		next_lock = task_blocked_on_lock(owner);
785 
786 		raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
787 	}
788 
789 	if (!next_lock)
790 		return;
791 
792 	/* gets dropped in rt_mutex_adjust_prio_chain()! */
793 	get_task_struct(owner);
794 
795 	raw_spin_unlock(&lock->wait_lock);
796 
797 	rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current);
798 
799 	raw_spin_lock(&lock->wait_lock);
800 }
801 
802 /*
803  * Recheck the pi chain, in case we got a priority setting
804  *
805  * Called from sched_setscheduler
806  */
807 void rt_mutex_adjust_pi(struct task_struct *task)
808 {
809 	struct rt_mutex_waiter *waiter;
810 	struct rt_mutex *next_lock;
811 	unsigned long flags;
812 
813 	raw_spin_lock_irqsave(&task->pi_lock, flags);
814 
815 	waiter = task->pi_blocked_on;
816 	if (!waiter || (waiter->prio == task->prio &&
817 			!dl_prio(task->prio))) {
818 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
819 		return;
820 	}
821 	next_lock = waiter->lock;
822 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
823 
824 	/* gets dropped in rt_mutex_adjust_prio_chain()! */
825 	get_task_struct(task);
826 
827 	rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task);
828 }
829 
830 /**
831  * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
832  * @lock:		 the rt_mutex to take
833  * @state:		 the state the task should block in (TASK_INTERRUPTIBLE
834  * 			 or TASK_UNINTERRUPTIBLE)
835  * @timeout:		 the pre-initialized and started timer, or NULL for none
836  * @waiter:		 the pre-initialized rt_mutex_waiter
837  *
838  * lock->wait_lock must be held by the caller.
839  */
840 static int __sched
841 __rt_mutex_slowlock(struct rt_mutex *lock, int state,
842 		    struct hrtimer_sleeper *timeout,
843 		    struct rt_mutex_waiter *waiter)
844 {
845 	int ret = 0;
846 
847 	for (;;) {
848 		/* Try to acquire the lock: */
849 		if (try_to_take_rt_mutex(lock, current, waiter))
850 			break;
851 
852 		/*
853 		 * TASK_INTERRUPTIBLE checks for signals and
854 		 * timeout. Ignored otherwise.
855 		 */
856 		if (unlikely(state == TASK_INTERRUPTIBLE)) {
857 			/* Signal pending? */
858 			if (signal_pending(current))
859 				ret = -EINTR;
860 			if (timeout && !timeout->task)
861 				ret = -ETIMEDOUT;
862 			if (ret)
863 				break;
864 		}
865 
866 		raw_spin_unlock(&lock->wait_lock);
867 
868 		debug_rt_mutex_print_deadlock(waiter);
869 
870 		schedule_rt_mutex(lock);
871 
872 		raw_spin_lock(&lock->wait_lock);
873 		set_current_state(state);
874 	}
875 
876 	return ret;
877 }
878 
879 static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
880 				     struct rt_mutex_waiter *w)
881 {
882 	/*
883 	 * If the result is not -EDEADLOCK or the caller requested
884 	 * deadlock detection, nothing to do here.
885 	 */
886 	if (res != -EDEADLOCK || detect_deadlock)
887 		return;
888 
889 	/*
890 	 * Yell lowdly and stop the task right here.
891 	 */
892 	rt_mutex_print_deadlock(w);
893 	while (1) {
894 		set_current_state(TASK_INTERRUPTIBLE);
895 		schedule();
896 	}
897 }
898 
899 /*
900  * Slow path lock function:
901  */
902 static int __sched
903 rt_mutex_slowlock(struct rt_mutex *lock, int state,
904 		  struct hrtimer_sleeper *timeout,
905 		  int detect_deadlock)
906 {
907 	struct rt_mutex_waiter waiter;
908 	int ret = 0;
909 
910 	debug_rt_mutex_init_waiter(&waiter);
911 	RB_CLEAR_NODE(&waiter.pi_tree_entry);
912 	RB_CLEAR_NODE(&waiter.tree_entry);
913 
914 	raw_spin_lock(&lock->wait_lock);
915 
916 	/* Try to acquire the lock again: */
917 	if (try_to_take_rt_mutex(lock, current, NULL)) {
918 		raw_spin_unlock(&lock->wait_lock);
919 		return 0;
920 	}
921 
922 	set_current_state(state);
923 
924 	/* Setup the timer, when timeout != NULL */
925 	if (unlikely(timeout)) {
926 		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
927 		if (!hrtimer_active(&timeout->timer))
928 			timeout->task = NULL;
929 	}
930 
931 	ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock);
932 
933 	if (likely(!ret))
934 		ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
935 
936 	set_current_state(TASK_RUNNING);
937 
938 	if (unlikely(ret)) {
939 		remove_waiter(lock, &waiter);
940 		rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter);
941 	}
942 
943 	/*
944 	 * try_to_take_rt_mutex() sets the waiter bit
945 	 * unconditionally. We might have to fix that up.
946 	 */
947 	fixup_rt_mutex_waiters(lock);
948 
949 	raw_spin_unlock(&lock->wait_lock);
950 
951 	/* Remove pending timer: */
952 	if (unlikely(timeout))
953 		hrtimer_cancel(&timeout->timer);
954 
955 	debug_rt_mutex_free_waiter(&waiter);
956 
957 	return ret;
958 }
959 
960 /*
961  * Slow path try-lock function:
962  */
963 static inline int
964 rt_mutex_slowtrylock(struct rt_mutex *lock)
965 {
966 	int ret = 0;
967 
968 	raw_spin_lock(&lock->wait_lock);
969 
970 	if (likely(rt_mutex_owner(lock) != current)) {
971 
972 		ret = try_to_take_rt_mutex(lock, current, NULL);
973 		/*
974 		 * try_to_take_rt_mutex() sets the lock waiters
975 		 * bit unconditionally. Clean this up.
976 		 */
977 		fixup_rt_mutex_waiters(lock);
978 	}
979 
980 	raw_spin_unlock(&lock->wait_lock);
981 
982 	return ret;
983 }
984 
985 /*
986  * Slow path to release a rt-mutex:
987  */
988 static void __sched
989 rt_mutex_slowunlock(struct rt_mutex *lock)
990 {
991 	raw_spin_lock(&lock->wait_lock);
992 
993 	debug_rt_mutex_unlock(lock);
994 
995 	rt_mutex_deadlock_account_unlock(current);
996 
997 	/*
998 	 * We must be careful here if the fast path is enabled. If we
999 	 * have no waiters queued we cannot set owner to NULL here
1000 	 * because of:
1001 	 *
1002 	 * foo->lock->owner = NULL;
1003 	 *			rtmutex_lock(foo->lock);   <- fast path
1004 	 *			free = atomic_dec_and_test(foo->refcnt);
1005 	 *			rtmutex_unlock(foo->lock); <- fast path
1006 	 *			if (free)
1007 	 *				kfree(foo);
1008 	 * raw_spin_unlock(foo->lock->wait_lock);
1009 	 *
1010 	 * So for the fastpath enabled kernel:
1011 	 *
1012 	 * Nothing can set the waiters bit as long as we hold
1013 	 * lock->wait_lock. So we do the following sequence:
1014 	 *
1015 	 *	owner = rt_mutex_owner(lock);
1016 	 *	clear_rt_mutex_waiters(lock);
1017 	 *	raw_spin_unlock(&lock->wait_lock);
1018 	 *	if (cmpxchg(&lock->owner, owner, 0) == owner)
1019 	 *		return;
1020 	 *	goto retry;
1021 	 *
1022 	 * The fastpath disabled variant is simple as all access to
1023 	 * lock->owner is serialized by lock->wait_lock:
1024 	 *
1025 	 *	lock->owner = NULL;
1026 	 *	raw_spin_unlock(&lock->wait_lock);
1027 	 */
1028 	while (!rt_mutex_has_waiters(lock)) {
1029 		/* Drops lock->wait_lock ! */
1030 		if (unlock_rt_mutex_safe(lock) == true)
1031 			return;
1032 		/* Relock the rtmutex and try again */
1033 		raw_spin_lock(&lock->wait_lock);
1034 	}
1035 
1036 	/*
1037 	 * The wakeup next waiter path does not suffer from the above
1038 	 * race. See the comments there.
1039 	 */
1040 	wakeup_next_waiter(lock);
1041 
1042 	raw_spin_unlock(&lock->wait_lock);
1043 
1044 	/* Undo pi boosting if necessary: */
1045 	rt_mutex_adjust_prio(current);
1046 }
1047 
1048 /*
1049  * debug aware fast / slowpath lock,trylock,unlock
1050  *
1051  * The atomic acquire/release ops are compiled away, when either the
1052  * architecture does not support cmpxchg or when debugging is enabled.
1053  */
1054 static inline int
1055 rt_mutex_fastlock(struct rt_mutex *lock, int state,
1056 		  int detect_deadlock,
1057 		  int (*slowfn)(struct rt_mutex *lock, int state,
1058 				struct hrtimer_sleeper *timeout,
1059 				int detect_deadlock))
1060 {
1061 	if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
1062 		rt_mutex_deadlock_account_lock(lock, current);
1063 		return 0;
1064 	} else
1065 		return slowfn(lock, state, NULL, detect_deadlock);
1066 }
1067 
1068 static inline int
1069 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
1070 			struct hrtimer_sleeper *timeout, int detect_deadlock,
1071 			int (*slowfn)(struct rt_mutex *lock, int state,
1072 				      struct hrtimer_sleeper *timeout,
1073 				      int detect_deadlock))
1074 {
1075 	if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
1076 		rt_mutex_deadlock_account_lock(lock, current);
1077 		return 0;
1078 	} else
1079 		return slowfn(lock, state, timeout, detect_deadlock);
1080 }
1081 
1082 static inline int
1083 rt_mutex_fasttrylock(struct rt_mutex *lock,
1084 		     int (*slowfn)(struct rt_mutex *lock))
1085 {
1086 	if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
1087 		rt_mutex_deadlock_account_lock(lock, current);
1088 		return 1;
1089 	}
1090 	return slowfn(lock);
1091 }
1092 
1093 static inline void
1094 rt_mutex_fastunlock(struct rt_mutex *lock,
1095 		    void (*slowfn)(struct rt_mutex *lock))
1096 {
1097 	if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
1098 		rt_mutex_deadlock_account_unlock(current);
1099 	else
1100 		slowfn(lock);
1101 }
1102 
1103 /**
1104  * rt_mutex_lock - lock a rt_mutex
1105  *
1106  * @lock: the rt_mutex to be locked
1107  */
1108 void __sched rt_mutex_lock(struct rt_mutex *lock)
1109 {
1110 	might_sleep();
1111 
1112 	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
1113 }
1114 EXPORT_SYMBOL_GPL(rt_mutex_lock);
1115 
1116 /**
1117  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
1118  *
1119  * @lock: 		the rt_mutex to be locked
1120  * @detect_deadlock:	deadlock detection on/off
1121  *
1122  * Returns:
1123  *  0 		on success
1124  * -EINTR 	when interrupted by a signal
1125  * -EDEADLK	when the lock would deadlock (when deadlock detection is on)
1126  */
1127 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
1128 						 int detect_deadlock)
1129 {
1130 	might_sleep();
1131 
1132 	return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
1133 				 detect_deadlock, rt_mutex_slowlock);
1134 }
1135 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
1136 
1137 /**
1138  * rt_mutex_timed_lock - lock a rt_mutex interruptible
1139  *			the timeout structure is provided
1140  *			by the caller
1141  *
1142  * @lock: 		the rt_mutex to be locked
1143  * @timeout:		timeout structure or NULL (no timeout)
1144  * @detect_deadlock:	deadlock detection on/off
1145  *
1146  * Returns:
1147  *  0 		on success
1148  * -EINTR 	when interrupted by a signal
1149  * -ETIMEDOUT	when the timeout expired
1150  * -EDEADLK	when the lock would deadlock (when deadlock detection is on)
1151  */
1152 int
1153 rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
1154 		    int detect_deadlock)
1155 {
1156 	might_sleep();
1157 
1158 	return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
1159 				       detect_deadlock, rt_mutex_slowlock);
1160 }
1161 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
1162 
1163 /**
1164  * rt_mutex_trylock - try to lock a rt_mutex
1165  *
1166  * @lock:	the rt_mutex to be locked
1167  *
1168  * Returns 1 on success and 0 on contention
1169  */
1170 int __sched rt_mutex_trylock(struct rt_mutex *lock)
1171 {
1172 	return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
1173 }
1174 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
1175 
1176 /**
1177  * rt_mutex_unlock - unlock a rt_mutex
1178  *
1179  * @lock: the rt_mutex to be unlocked
1180  */
1181 void __sched rt_mutex_unlock(struct rt_mutex *lock)
1182 {
1183 	rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
1184 }
1185 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
1186 
1187 /**
1188  * rt_mutex_destroy - mark a mutex unusable
1189  * @lock: the mutex to be destroyed
1190  *
1191  * This function marks the mutex uninitialized, and any subsequent
1192  * use of the mutex is forbidden. The mutex must not be locked when
1193  * this function is called.
1194  */
1195 void rt_mutex_destroy(struct rt_mutex *lock)
1196 {
1197 	WARN_ON(rt_mutex_is_locked(lock));
1198 #ifdef CONFIG_DEBUG_RT_MUTEXES
1199 	lock->magic = NULL;
1200 #endif
1201 }
1202 
1203 EXPORT_SYMBOL_GPL(rt_mutex_destroy);
1204 
1205 /**
1206  * __rt_mutex_init - initialize the rt lock
1207  *
1208  * @lock: the rt lock to be initialized
1209  *
1210  * Initialize the rt lock to unlocked state.
1211  *
1212  * Initializing of a locked rt lock is not allowed
1213  */
1214 void __rt_mutex_init(struct rt_mutex *lock, const char *name)
1215 {
1216 	lock->owner = NULL;
1217 	raw_spin_lock_init(&lock->wait_lock);
1218 	lock->waiters = RB_ROOT;
1219 	lock->waiters_leftmost = NULL;
1220 
1221 	debug_rt_mutex_init(lock, name);
1222 }
1223 EXPORT_SYMBOL_GPL(__rt_mutex_init);
1224 
1225 /**
1226  * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
1227  *				proxy owner
1228  *
1229  * @lock: 	the rt_mutex to be locked
1230  * @proxy_owner:the task to set as owner
1231  *
1232  * No locking. Caller has to do serializing itself
1233  * Special API call for PI-futex support
1234  */
1235 void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
1236 				struct task_struct *proxy_owner)
1237 {
1238 	__rt_mutex_init(lock, NULL);
1239 	debug_rt_mutex_proxy_lock(lock, proxy_owner);
1240 	rt_mutex_set_owner(lock, proxy_owner);
1241 	rt_mutex_deadlock_account_lock(lock, proxy_owner);
1242 }
1243 
1244 /**
1245  * rt_mutex_proxy_unlock - release a lock on behalf of owner
1246  *
1247  * @lock: 	the rt_mutex to be locked
1248  *
1249  * No locking. Caller has to do serializing itself
1250  * Special API call for PI-futex support
1251  */
1252 void rt_mutex_proxy_unlock(struct rt_mutex *lock,
1253 			   struct task_struct *proxy_owner)
1254 {
1255 	debug_rt_mutex_proxy_unlock(lock);
1256 	rt_mutex_set_owner(lock, NULL);
1257 	rt_mutex_deadlock_account_unlock(proxy_owner);
1258 }
1259 
1260 /**
1261  * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
1262  * @lock:		the rt_mutex to take
1263  * @waiter:		the pre-initialized rt_mutex_waiter
1264  * @task:		the task to prepare
1265  * @detect_deadlock:	perform deadlock detection (1) or not (0)
1266  *
1267  * Returns:
1268  *  0 - task blocked on lock
1269  *  1 - acquired the lock for task, caller should wake it up
1270  * <0 - error
1271  *
1272  * Special API call for FUTEX_REQUEUE_PI support.
1273  */
1274 int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1275 			      struct rt_mutex_waiter *waiter,
1276 			      struct task_struct *task, int detect_deadlock)
1277 {
1278 	int ret;
1279 
1280 	raw_spin_lock(&lock->wait_lock);
1281 
1282 	if (try_to_take_rt_mutex(lock, task, NULL)) {
1283 		raw_spin_unlock(&lock->wait_lock);
1284 		return 1;
1285 	}
1286 
1287 	/* We enforce deadlock detection for futexes */
1288 	ret = task_blocks_on_rt_mutex(lock, waiter, task, 1);
1289 
1290 	if (ret && !rt_mutex_owner(lock)) {
1291 		/*
1292 		 * Reset the return value. We might have
1293 		 * returned with -EDEADLK and the owner
1294 		 * released the lock while we were walking the
1295 		 * pi chain.  Let the waiter sort it out.
1296 		 */
1297 		ret = 0;
1298 	}
1299 
1300 	if (unlikely(ret))
1301 		remove_waiter(lock, waiter);
1302 
1303 	raw_spin_unlock(&lock->wait_lock);
1304 
1305 	debug_rt_mutex_print_deadlock(waiter);
1306 
1307 	return ret;
1308 }
1309 
1310 /**
1311  * rt_mutex_next_owner - return the next owner of the lock
1312  *
1313  * @lock: the rt lock query
1314  *
1315  * Returns the next owner of the lock or NULL
1316  *
1317  * Caller has to serialize against other accessors to the lock
1318  * itself.
1319  *
1320  * Special API call for PI-futex support
1321  */
1322 struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
1323 {
1324 	if (!rt_mutex_has_waiters(lock))
1325 		return NULL;
1326 
1327 	return rt_mutex_top_waiter(lock)->task;
1328 }
1329 
1330 /**
1331  * rt_mutex_finish_proxy_lock() - Complete lock acquisition
1332  * @lock:		the rt_mutex we were woken on
1333  * @to:			the timeout, null if none. hrtimer should already have
1334  * 			been started.
1335  * @waiter:		the pre-initialized rt_mutex_waiter
1336  * @detect_deadlock:	perform deadlock detection (1) or not (0)
1337  *
1338  * Complete the lock acquisition started our behalf by another thread.
1339  *
1340  * Returns:
1341  *  0 - success
1342  * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK
1343  *
1344  * Special API call for PI-futex requeue support
1345  */
1346 int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
1347 			       struct hrtimer_sleeper *to,
1348 			       struct rt_mutex_waiter *waiter,
1349 			       int detect_deadlock)
1350 {
1351 	int ret;
1352 
1353 	raw_spin_lock(&lock->wait_lock);
1354 
1355 	set_current_state(TASK_INTERRUPTIBLE);
1356 
1357 	ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
1358 
1359 	set_current_state(TASK_RUNNING);
1360 
1361 	if (unlikely(ret))
1362 		remove_waiter(lock, waiter);
1363 
1364 	/*
1365 	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
1366 	 * have to fix that up.
1367 	 */
1368 	fixup_rt_mutex_waiters(lock);
1369 
1370 	raw_spin_unlock(&lock->wait_lock);
1371 
1372 	return ret;
1373 }
1374