xref: /openbmc/linux/kernel/locking/rtmutex.c (revision 5f32c314)
1 /*
2  * RT-Mutexes: simple blocking mutual exclusion locks with PI support
3  *
4  * started by Ingo Molnar and Thomas Gleixner.
5  *
6  *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7  *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8  *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9  *  Copyright (C) 2006 Esben Nielsen
10  *
11  *  See Documentation/rt-mutex-design.txt for details.
12  */
13 #include <linux/spinlock.h>
14 #include <linux/export.h>
15 #include <linux/sched.h>
16 #include <linux/sched/rt.h>
17 #include <linux/sched/deadline.h>
18 #include <linux/timer.h>
19 
20 #include "rtmutex_common.h"
21 
22 /*
23  * lock->owner state tracking:
24  *
25  * lock->owner holds the task_struct pointer of the owner. Bit 0
26  * is used to keep track of the "lock has waiters" state.
27  *
28  * owner	bit0
29  * NULL		0	lock is free (fast acquire possible)
30  * NULL		1	lock is free and has waiters and the top waiter
31  *				is going to take the lock*
32  * taskpointer	0	lock is held (fast release possible)
33  * taskpointer	1	lock is held and has waiters**
34  *
35  * The fast atomic compare exchange based acquire and release is only
36  * possible when bit 0 of lock->owner is 0.
37  *
38  * (*) It also can be a transitional state when grabbing the lock
39  * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
40  * we need to set the bit0 before looking at the lock, and the owner may be
41  * NULL in this small time, hence this can be a transitional state.
42  *
43  * (**) There is a small time when bit 0 is set but there are no
44  * waiters. This can happen when grabbing the lock in the slow path.
45  * To prevent a cmpxchg of the owner releasing the lock, we need to
46  * set this bit before looking at the lock.
47  */
48 
49 static void
50 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
51 {
52 	unsigned long val = (unsigned long)owner;
53 
54 	if (rt_mutex_has_waiters(lock))
55 		val |= RT_MUTEX_HAS_WAITERS;
56 
57 	lock->owner = (struct task_struct *)val;
58 }
59 
60 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
61 {
62 	lock->owner = (struct task_struct *)
63 			((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
64 }
65 
66 static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
67 {
68 	if (!rt_mutex_has_waiters(lock))
69 		clear_rt_mutex_waiters(lock);
70 }
71 
72 /*
73  * We can speed up the acquire/release, if the architecture
74  * supports cmpxchg and if there's no debugging state to be set up
75  */
76 #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
77 # define rt_mutex_cmpxchg(l,c,n)	(cmpxchg(&l->owner, c, n) == c)
78 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
79 {
80 	unsigned long owner, *p = (unsigned long *) &lock->owner;
81 
82 	do {
83 		owner = *p;
84 	} while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
85 }
86 #else
87 # define rt_mutex_cmpxchg(l,c,n)	(0)
88 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
89 {
90 	lock->owner = (struct task_struct *)
91 			((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
92 }
93 #endif
94 
95 static inline int
96 rt_mutex_waiter_less(struct rt_mutex_waiter *left,
97 		     struct rt_mutex_waiter *right)
98 {
99 	if (left->prio < right->prio)
100 		return 1;
101 
102 	/*
103 	 * If both waiters have dl_prio(), we check the deadlines of the
104 	 * associated tasks.
105 	 * If left waiter has a dl_prio(), and we didn't return 1 above,
106 	 * then right waiter has a dl_prio() too.
107 	 */
108 	if (dl_prio(left->prio))
109 		return (left->task->dl.deadline < right->task->dl.deadline);
110 
111 	return 0;
112 }
113 
114 static void
115 rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
116 {
117 	struct rb_node **link = &lock->waiters.rb_node;
118 	struct rb_node *parent = NULL;
119 	struct rt_mutex_waiter *entry;
120 	int leftmost = 1;
121 
122 	while (*link) {
123 		parent = *link;
124 		entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
125 		if (rt_mutex_waiter_less(waiter, entry)) {
126 			link = &parent->rb_left;
127 		} else {
128 			link = &parent->rb_right;
129 			leftmost = 0;
130 		}
131 	}
132 
133 	if (leftmost)
134 		lock->waiters_leftmost = &waiter->tree_entry;
135 
136 	rb_link_node(&waiter->tree_entry, parent, link);
137 	rb_insert_color(&waiter->tree_entry, &lock->waiters);
138 }
139 
140 static void
141 rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
142 {
143 	if (RB_EMPTY_NODE(&waiter->tree_entry))
144 		return;
145 
146 	if (lock->waiters_leftmost == &waiter->tree_entry)
147 		lock->waiters_leftmost = rb_next(&waiter->tree_entry);
148 
149 	rb_erase(&waiter->tree_entry, &lock->waiters);
150 	RB_CLEAR_NODE(&waiter->tree_entry);
151 }
152 
153 static void
154 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
155 {
156 	struct rb_node **link = &task->pi_waiters.rb_node;
157 	struct rb_node *parent = NULL;
158 	struct rt_mutex_waiter *entry;
159 	int leftmost = 1;
160 
161 	while (*link) {
162 		parent = *link;
163 		entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
164 		if (rt_mutex_waiter_less(waiter, entry)) {
165 			link = &parent->rb_left;
166 		} else {
167 			link = &parent->rb_right;
168 			leftmost = 0;
169 		}
170 	}
171 
172 	if (leftmost)
173 		task->pi_waiters_leftmost = &waiter->pi_tree_entry;
174 
175 	rb_link_node(&waiter->pi_tree_entry, parent, link);
176 	rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters);
177 }
178 
179 static void
180 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
181 {
182 	if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
183 		return;
184 
185 	if (task->pi_waiters_leftmost == &waiter->pi_tree_entry)
186 		task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry);
187 
188 	rb_erase(&waiter->pi_tree_entry, &task->pi_waiters);
189 	RB_CLEAR_NODE(&waiter->pi_tree_entry);
190 }
191 
192 /*
193  * Calculate task priority from the waiter tree priority
194  *
195  * Return task->normal_prio when the waiter tree is empty or when
196  * the waiter is not allowed to do priority boosting
197  */
198 int rt_mutex_getprio(struct task_struct *task)
199 {
200 	if (likely(!task_has_pi_waiters(task)))
201 		return task->normal_prio;
202 
203 	return min(task_top_pi_waiter(task)->prio,
204 		   task->normal_prio);
205 }
206 
207 struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
208 {
209 	if (likely(!task_has_pi_waiters(task)))
210 		return NULL;
211 
212 	return task_top_pi_waiter(task)->task;
213 }
214 
215 /*
216  * Adjust the priority of a task, after its pi_waiters got modified.
217  *
218  * This can be both boosting and unboosting. task->pi_lock must be held.
219  */
220 static void __rt_mutex_adjust_prio(struct task_struct *task)
221 {
222 	int prio = rt_mutex_getprio(task);
223 
224 	if (task->prio != prio || dl_prio(prio))
225 		rt_mutex_setprio(task, prio);
226 }
227 
228 /*
229  * Adjust task priority (undo boosting). Called from the exit path of
230  * rt_mutex_slowunlock() and rt_mutex_slowlock().
231  *
232  * (Note: We do this outside of the protection of lock->wait_lock to
233  * allow the lock to be taken while or before we readjust the priority
234  * of task. We do not use the spin_xx_mutex() variants here as we are
235  * outside of the debug path.)
236  */
237 static void rt_mutex_adjust_prio(struct task_struct *task)
238 {
239 	unsigned long flags;
240 
241 	raw_spin_lock_irqsave(&task->pi_lock, flags);
242 	__rt_mutex_adjust_prio(task);
243 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
244 }
245 
246 /*
247  * Max number of times we'll walk the boosting chain:
248  */
249 int max_lock_depth = 1024;
250 
251 /*
252  * Adjust the priority chain. Also used for deadlock detection.
253  * Decreases task's usage by one - may thus free the task.
254  *
255  * @task: the task owning the mutex (owner) for which a chain walk is probably
256  *	  needed
257  * @deadlock_detect: do we have to carry out deadlock detection?
258  * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
259  * 	       things for a task that has just got its priority adjusted, and
260  *	       is waiting on a mutex)
261  * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
262  *		 its priority to the mutex owner (can be NULL in the case
263  *		 depicted above or if the top waiter is gone away and we are
264  *		 actually deboosting the owner)
265  * @top_task: the current top waiter
266  *
267  * Returns 0 or -EDEADLK.
268  */
269 static int rt_mutex_adjust_prio_chain(struct task_struct *task,
270 				      int deadlock_detect,
271 				      struct rt_mutex *orig_lock,
272 				      struct rt_mutex_waiter *orig_waiter,
273 				      struct task_struct *top_task)
274 {
275 	struct rt_mutex *lock;
276 	struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
277 	int detect_deadlock, ret = 0, depth = 0;
278 	unsigned long flags;
279 
280 	detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
281 							 deadlock_detect);
282 
283 	/*
284 	 * The (de)boosting is a step by step approach with a lot of
285 	 * pitfalls. We want this to be preemptible and we want hold a
286 	 * maximum of two locks per step. So we have to check
287 	 * carefully whether things change under us.
288 	 */
289  again:
290 	if (++depth > max_lock_depth) {
291 		static int prev_max;
292 
293 		/*
294 		 * Print this only once. If the admin changes the limit,
295 		 * print a new message when reaching the limit again.
296 		 */
297 		if (prev_max != max_lock_depth) {
298 			prev_max = max_lock_depth;
299 			printk(KERN_WARNING "Maximum lock depth %d reached "
300 			       "task: %s (%d)\n", max_lock_depth,
301 			       top_task->comm, task_pid_nr(top_task));
302 		}
303 		put_task_struct(task);
304 
305 		return deadlock_detect ? -EDEADLK : 0;
306 	}
307  retry:
308 	/*
309 	 * Task can not go away as we did a get_task() before !
310 	 */
311 	raw_spin_lock_irqsave(&task->pi_lock, flags);
312 
313 	waiter = task->pi_blocked_on;
314 	/*
315 	 * Check whether the end of the boosting chain has been
316 	 * reached or the state of the chain has changed while we
317 	 * dropped the locks.
318 	 */
319 	if (!waiter)
320 		goto out_unlock_pi;
321 
322 	/*
323 	 * Check the orig_waiter state. After we dropped the locks,
324 	 * the previous owner of the lock might have released the lock.
325 	 */
326 	if (orig_waiter && !rt_mutex_owner(orig_lock))
327 		goto out_unlock_pi;
328 
329 	/*
330 	 * Drop out, when the task has no waiters. Note,
331 	 * top_waiter can be NULL, when we are in the deboosting
332 	 * mode!
333 	 */
334 	if (top_waiter && (!task_has_pi_waiters(task) ||
335 			   top_waiter != task_top_pi_waiter(task)))
336 		goto out_unlock_pi;
337 
338 	/*
339 	 * When deadlock detection is off then we check, if further
340 	 * priority adjustment is necessary.
341 	 */
342 	if (!detect_deadlock && waiter->prio == task->prio)
343 		goto out_unlock_pi;
344 
345 	lock = waiter->lock;
346 	if (!raw_spin_trylock(&lock->wait_lock)) {
347 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
348 		cpu_relax();
349 		goto retry;
350 	}
351 
352 	/* Deadlock detection */
353 	if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
354 		debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
355 		raw_spin_unlock(&lock->wait_lock);
356 		ret = deadlock_detect ? -EDEADLK : 0;
357 		goto out_unlock_pi;
358 	}
359 
360 	top_waiter = rt_mutex_top_waiter(lock);
361 
362 	/* Requeue the waiter */
363 	rt_mutex_dequeue(lock, waiter);
364 	waiter->prio = task->prio;
365 	rt_mutex_enqueue(lock, waiter);
366 
367 	/* Release the task */
368 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
369 	if (!rt_mutex_owner(lock)) {
370 		/*
371 		 * If the requeue above changed the top waiter, then we need
372 		 * to wake the new top waiter up to try to get the lock.
373 		 */
374 
375 		if (top_waiter != rt_mutex_top_waiter(lock))
376 			wake_up_process(rt_mutex_top_waiter(lock)->task);
377 		raw_spin_unlock(&lock->wait_lock);
378 		goto out_put_task;
379 	}
380 	put_task_struct(task);
381 
382 	/* Grab the next task */
383 	task = rt_mutex_owner(lock);
384 	get_task_struct(task);
385 	raw_spin_lock_irqsave(&task->pi_lock, flags);
386 
387 	if (waiter == rt_mutex_top_waiter(lock)) {
388 		/* Boost the owner */
389 		rt_mutex_dequeue_pi(task, top_waiter);
390 		rt_mutex_enqueue_pi(task, waiter);
391 		__rt_mutex_adjust_prio(task);
392 
393 	} else if (top_waiter == waiter) {
394 		/* Deboost the owner */
395 		rt_mutex_dequeue_pi(task, waiter);
396 		waiter = rt_mutex_top_waiter(lock);
397 		rt_mutex_enqueue_pi(task, waiter);
398 		__rt_mutex_adjust_prio(task);
399 	}
400 
401 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
402 
403 	top_waiter = rt_mutex_top_waiter(lock);
404 	raw_spin_unlock(&lock->wait_lock);
405 
406 	if (!detect_deadlock && waiter != top_waiter)
407 		goto out_put_task;
408 
409 	goto again;
410 
411  out_unlock_pi:
412 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
413  out_put_task:
414 	put_task_struct(task);
415 
416 	return ret;
417 }
418 
419 /*
420  * Try to take an rt-mutex
421  *
422  * Must be called with lock->wait_lock held.
423  *
424  * @lock:   the lock to be acquired.
425  * @task:   the task which wants to acquire the lock
426  * @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
427  */
428 static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
429 		struct rt_mutex_waiter *waiter)
430 {
431 	/*
432 	 * We have to be careful here if the atomic speedups are
433 	 * enabled, such that, when
434 	 *  - no other waiter is on the lock
435 	 *  - the lock has been released since we did the cmpxchg
436 	 * the lock can be released or taken while we are doing the
437 	 * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
438 	 *
439 	 * The atomic acquire/release aware variant of
440 	 * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
441 	 * the WAITERS bit, the atomic release / acquire can not
442 	 * happen anymore and lock->wait_lock protects us from the
443 	 * non-atomic case.
444 	 *
445 	 * Note, that this might set lock->owner =
446 	 * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
447 	 * any more. This is fixed up when we take the ownership.
448 	 * This is the transitional state explained at the top of this file.
449 	 */
450 	mark_rt_mutex_waiters(lock);
451 
452 	if (rt_mutex_owner(lock))
453 		return 0;
454 
455 	/*
456 	 * It will get the lock because of one of these conditions:
457 	 * 1) there is no waiter
458 	 * 2) higher priority than waiters
459 	 * 3) it is top waiter
460 	 */
461 	if (rt_mutex_has_waiters(lock)) {
462 		if (task->prio >= rt_mutex_top_waiter(lock)->prio) {
463 			if (!waiter || waiter != rt_mutex_top_waiter(lock))
464 				return 0;
465 		}
466 	}
467 
468 	if (waiter || rt_mutex_has_waiters(lock)) {
469 		unsigned long flags;
470 		struct rt_mutex_waiter *top;
471 
472 		raw_spin_lock_irqsave(&task->pi_lock, flags);
473 
474 		/* remove the queued waiter. */
475 		if (waiter) {
476 			rt_mutex_dequeue(lock, waiter);
477 			task->pi_blocked_on = NULL;
478 		}
479 
480 		/*
481 		 * We have to enqueue the top waiter(if it exists) into
482 		 * task->pi_waiters list.
483 		 */
484 		if (rt_mutex_has_waiters(lock)) {
485 			top = rt_mutex_top_waiter(lock);
486 			rt_mutex_enqueue_pi(task, top);
487 		}
488 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
489 	}
490 
491 	/* We got the lock. */
492 	debug_rt_mutex_lock(lock);
493 
494 	rt_mutex_set_owner(lock, task);
495 
496 	rt_mutex_deadlock_account_lock(lock, task);
497 
498 	return 1;
499 }
500 
501 /*
502  * Task blocks on lock.
503  *
504  * Prepare waiter and propagate pi chain
505  *
506  * This must be called with lock->wait_lock held.
507  */
508 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
509 				   struct rt_mutex_waiter *waiter,
510 				   struct task_struct *task,
511 				   int detect_deadlock)
512 {
513 	struct task_struct *owner = rt_mutex_owner(lock);
514 	struct rt_mutex_waiter *top_waiter = waiter;
515 	unsigned long flags;
516 	int chain_walk = 0, res;
517 
518 	raw_spin_lock_irqsave(&task->pi_lock, flags);
519 	__rt_mutex_adjust_prio(task);
520 	waiter->task = task;
521 	waiter->lock = lock;
522 	waiter->prio = task->prio;
523 
524 	/* Get the top priority waiter on the lock */
525 	if (rt_mutex_has_waiters(lock))
526 		top_waiter = rt_mutex_top_waiter(lock);
527 	rt_mutex_enqueue(lock, waiter);
528 
529 	task->pi_blocked_on = waiter;
530 
531 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
532 
533 	if (!owner)
534 		return 0;
535 
536 	if (waiter == rt_mutex_top_waiter(lock)) {
537 		raw_spin_lock_irqsave(&owner->pi_lock, flags);
538 		rt_mutex_dequeue_pi(owner, top_waiter);
539 		rt_mutex_enqueue_pi(owner, waiter);
540 
541 		__rt_mutex_adjust_prio(owner);
542 		if (owner->pi_blocked_on)
543 			chain_walk = 1;
544 		raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
545 	}
546 	else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
547 		chain_walk = 1;
548 
549 	if (!chain_walk)
550 		return 0;
551 
552 	/*
553 	 * The owner can't disappear while holding a lock,
554 	 * so the owner struct is protected by wait_lock.
555 	 * Gets dropped in rt_mutex_adjust_prio_chain()!
556 	 */
557 	get_task_struct(owner);
558 
559 	raw_spin_unlock(&lock->wait_lock);
560 
561 	res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
562 					 task);
563 
564 	raw_spin_lock(&lock->wait_lock);
565 
566 	return res;
567 }
568 
569 /*
570  * Wake up the next waiter on the lock.
571  *
572  * Remove the top waiter from the current tasks waiter list and wake it up.
573  *
574  * Called with lock->wait_lock held.
575  */
576 static void wakeup_next_waiter(struct rt_mutex *lock)
577 {
578 	struct rt_mutex_waiter *waiter;
579 	unsigned long flags;
580 
581 	raw_spin_lock_irqsave(&current->pi_lock, flags);
582 
583 	waiter = rt_mutex_top_waiter(lock);
584 
585 	/*
586 	 * Remove it from current->pi_waiters. We do not adjust a
587 	 * possible priority boost right now. We execute wakeup in the
588 	 * boosted mode and go back to normal after releasing
589 	 * lock->wait_lock.
590 	 */
591 	rt_mutex_dequeue_pi(current, waiter);
592 
593 	rt_mutex_set_owner(lock, NULL);
594 
595 	raw_spin_unlock_irqrestore(&current->pi_lock, flags);
596 
597 	wake_up_process(waiter->task);
598 }
599 
600 /*
601  * Remove a waiter from a lock and give up
602  *
603  * Must be called with lock->wait_lock held and
604  * have just failed to try_to_take_rt_mutex().
605  */
606 static void remove_waiter(struct rt_mutex *lock,
607 			  struct rt_mutex_waiter *waiter)
608 {
609 	int first = (waiter == rt_mutex_top_waiter(lock));
610 	struct task_struct *owner = rt_mutex_owner(lock);
611 	unsigned long flags;
612 	int chain_walk = 0;
613 
614 	raw_spin_lock_irqsave(&current->pi_lock, flags);
615 	rt_mutex_dequeue(lock, waiter);
616 	current->pi_blocked_on = NULL;
617 	raw_spin_unlock_irqrestore(&current->pi_lock, flags);
618 
619 	if (!owner)
620 		return;
621 
622 	if (first) {
623 
624 		raw_spin_lock_irqsave(&owner->pi_lock, flags);
625 
626 		rt_mutex_dequeue_pi(owner, waiter);
627 
628 		if (rt_mutex_has_waiters(lock)) {
629 			struct rt_mutex_waiter *next;
630 
631 			next = rt_mutex_top_waiter(lock);
632 			rt_mutex_enqueue_pi(owner, next);
633 		}
634 		__rt_mutex_adjust_prio(owner);
635 
636 		if (owner->pi_blocked_on)
637 			chain_walk = 1;
638 
639 		raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
640 	}
641 
642 	if (!chain_walk)
643 		return;
644 
645 	/* gets dropped in rt_mutex_adjust_prio_chain()! */
646 	get_task_struct(owner);
647 
648 	raw_spin_unlock(&lock->wait_lock);
649 
650 	rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
651 
652 	raw_spin_lock(&lock->wait_lock);
653 }
654 
655 /*
656  * Recheck the pi chain, in case we got a priority setting
657  *
658  * Called from sched_setscheduler
659  */
660 void rt_mutex_adjust_pi(struct task_struct *task)
661 {
662 	struct rt_mutex_waiter *waiter;
663 	unsigned long flags;
664 
665 	raw_spin_lock_irqsave(&task->pi_lock, flags);
666 
667 	waiter = task->pi_blocked_on;
668 	if (!waiter || (waiter->prio == task->prio &&
669 			!dl_prio(task->prio))) {
670 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
671 		return;
672 	}
673 
674 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
675 
676 	/* gets dropped in rt_mutex_adjust_prio_chain()! */
677 	get_task_struct(task);
678 	rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
679 }
680 
681 /**
682  * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
683  * @lock:		 the rt_mutex to take
684  * @state:		 the state the task should block in (TASK_INTERRUPTIBLE
685  * 			 or TASK_UNINTERRUPTIBLE)
686  * @timeout:		 the pre-initialized and started timer, or NULL for none
687  * @waiter:		 the pre-initialized rt_mutex_waiter
688  *
689  * lock->wait_lock must be held by the caller.
690  */
691 static int __sched
692 __rt_mutex_slowlock(struct rt_mutex *lock, int state,
693 		    struct hrtimer_sleeper *timeout,
694 		    struct rt_mutex_waiter *waiter)
695 {
696 	int ret = 0;
697 
698 	for (;;) {
699 		/* Try to acquire the lock: */
700 		if (try_to_take_rt_mutex(lock, current, waiter))
701 			break;
702 
703 		/*
704 		 * TASK_INTERRUPTIBLE checks for signals and
705 		 * timeout. Ignored otherwise.
706 		 */
707 		if (unlikely(state == TASK_INTERRUPTIBLE)) {
708 			/* Signal pending? */
709 			if (signal_pending(current))
710 				ret = -EINTR;
711 			if (timeout && !timeout->task)
712 				ret = -ETIMEDOUT;
713 			if (ret)
714 				break;
715 		}
716 
717 		raw_spin_unlock(&lock->wait_lock);
718 
719 		debug_rt_mutex_print_deadlock(waiter);
720 
721 		schedule_rt_mutex(lock);
722 
723 		raw_spin_lock(&lock->wait_lock);
724 		set_current_state(state);
725 	}
726 
727 	return ret;
728 }
729 
730 /*
731  * Slow path lock function:
732  */
733 static int __sched
734 rt_mutex_slowlock(struct rt_mutex *lock, int state,
735 		  struct hrtimer_sleeper *timeout,
736 		  int detect_deadlock)
737 {
738 	struct rt_mutex_waiter waiter;
739 	int ret = 0;
740 
741 	debug_rt_mutex_init_waiter(&waiter);
742 	RB_CLEAR_NODE(&waiter.pi_tree_entry);
743 	RB_CLEAR_NODE(&waiter.tree_entry);
744 
745 	raw_spin_lock(&lock->wait_lock);
746 
747 	/* Try to acquire the lock again: */
748 	if (try_to_take_rt_mutex(lock, current, NULL)) {
749 		raw_spin_unlock(&lock->wait_lock);
750 		return 0;
751 	}
752 
753 	set_current_state(state);
754 
755 	/* Setup the timer, when timeout != NULL */
756 	if (unlikely(timeout)) {
757 		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
758 		if (!hrtimer_active(&timeout->timer))
759 			timeout->task = NULL;
760 	}
761 
762 	ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock);
763 
764 	if (likely(!ret))
765 		ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
766 
767 	set_current_state(TASK_RUNNING);
768 
769 	if (unlikely(ret))
770 		remove_waiter(lock, &waiter);
771 
772 	/*
773 	 * try_to_take_rt_mutex() sets the waiter bit
774 	 * unconditionally. We might have to fix that up.
775 	 */
776 	fixup_rt_mutex_waiters(lock);
777 
778 	raw_spin_unlock(&lock->wait_lock);
779 
780 	/* Remove pending timer: */
781 	if (unlikely(timeout))
782 		hrtimer_cancel(&timeout->timer);
783 
784 	debug_rt_mutex_free_waiter(&waiter);
785 
786 	return ret;
787 }
788 
789 /*
790  * Slow path try-lock function:
791  */
792 static inline int
793 rt_mutex_slowtrylock(struct rt_mutex *lock)
794 {
795 	int ret = 0;
796 
797 	raw_spin_lock(&lock->wait_lock);
798 
799 	if (likely(rt_mutex_owner(lock) != current)) {
800 
801 		ret = try_to_take_rt_mutex(lock, current, NULL);
802 		/*
803 		 * try_to_take_rt_mutex() sets the lock waiters
804 		 * bit unconditionally. Clean this up.
805 		 */
806 		fixup_rt_mutex_waiters(lock);
807 	}
808 
809 	raw_spin_unlock(&lock->wait_lock);
810 
811 	return ret;
812 }
813 
814 /*
815  * Slow path to release a rt-mutex:
816  */
817 static void __sched
818 rt_mutex_slowunlock(struct rt_mutex *lock)
819 {
820 	raw_spin_lock(&lock->wait_lock);
821 
822 	debug_rt_mutex_unlock(lock);
823 
824 	rt_mutex_deadlock_account_unlock(current);
825 
826 	if (!rt_mutex_has_waiters(lock)) {
827 		lock->owner = NULL;
828 		raw_spin_unlock(&lock->wait_lock);
829 		return;
830 	}
831 
832 	wakeup_next_waiter(lock);
833 
834 	raw_spin_unlock(&lock->wait_lock);
835 
836 	/* Undo pi boosting if necessary: */
837 	rt_mutex_adjust_prio(current);
838 }
839 
840 /*
841  * debug aware fast / slowpath lock,trylock,unlock
842  *
843  * The atomic acquire/release ops are compiled away, when either the
844  * architecture does not support cmpxchg or when debugging is enabled.
845  */
846 static inline int
847 rt_mutex_fastlock(struct rt_mutex *lock, int state,
848 		  int detect_deadlock,
849 		  int (*slowfn)(struct rt_mutex *lock, int state,
850 				struct hrtimer_sleeper *timeout,
851 				int detect_deadlock))
852 {
853 	if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
854 		rt_mutex_deadlock_account_lock(lock, current);
855 		return 0;
856 	} else
857 		return slowfn(lock, state, NULL, detect_deadlock);
858 }
859 
860 static inline int
861 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
862 			struct hrtimer_sleeper *timeout, int detect_deadlock,
863 			int (*slowfn)(struct rt_mutex *lock, int state,
864 				      struct hrtimer_sleeper *timeout,
865 				      int detect_deadlock))
866 {
867 	if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
868 		rt_mutex_deadlock_account_lock(lock, current);
869 		return 0;
870 	} else
871 		return slowfn(lock, state, timeout, detect_deadlock);
872 }
873 
874 static inline int
875 rt_mutex_fasttrylock(struct rt_mutex *lock,
876 		     int (*slowfn)(struct rt_mutex *lock))
877 {
878 	if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
879 		rt_mutex_deadlock_account_lock(lock, current);
880 		return 1;
881 	}
882 	return slowfn(lock);
883 }
884 
885 static inline void
886 rt_mutex_fastunlock(struct rt_mutex *lock,
887 		    void (*slowfn)(struct rt_mutex *lock))
888 {
889 	if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
890 		rt_mutex_deadlock_account_unlock(current);
891 	else
892 		slowfn(lock);
893 }
894 
895 /**
896  * rt_mutex_lock - lock a rt_mutex
897  *
898  * @lock: the rt_mutex to be locked
899  */
900 void __sched rt_mutex_lock(struct rt_mutex *lock)
901 {
902 	might_sleep();
903 
904 	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
905 }
906 EXPORT_SYMBOL_GPL(rt_mutex_lock);
907 
908 /**
909  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
910  *
911  * @lock: 		the rt_mutex to be locked
912  * @detect_deadlock:	deadlock detection on/off
913  *
914  * Returns:
915  *  0 		on success
916  * -EINTR 	when interrupted by a signal
917  * -EDEADLK	when the lock would deadlock (when deadlock detection is on)
918  */
919 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
920 						 int detect_deadlock)
921 {
922 	might_sleep();
923 
924 	return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
925 				 detect_deadlock, rt_mutex_slowlock);
926 }
927 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
928 
929 /**
930  * rt_mutex_timed_lock - lock a rt_mutex interruptible
931  *			the timeout structure is provided
932  *			by the caller
933  *
934  * @lock: 		the rt_mutex to be locked
935  * @timeout:		timeout structure or NULL (no timeout)
936  * @detect_deadlock:	deadlock detection on/off
937  *
938  * Returns:
939  *  0 		on success
940  * -EINTR 	when interrupted by a signal
941  * -ETIMEDOUT	when the timeout expired
942  * -EDEADLK	when the lock would deadlock (when deadlock detection is on)
943  */
944 int
945 rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
946 		    int detect_deadlock)
947 {
948 	might_sleep();
949 
950 	return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
951 				       detect_deadlock, rt_mutex_slowlock);
952 }
953 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
954 
955 /**
956  * rt_mutex_trylock - try to lock a rt_mutex
957  *
958  * @lock:	the rt_mutex to be locked
959  *
960  * Returns 1 on success and 0 on contention
961  */
962 int __sched rt_mutex_trylock(struct rt_mutex *lock)
963 {
964 	return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
965 }
966 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
967 
968 /**
969  * rt_mutex_unlock - unlock a rt_mutex
970  *
971  * @lock: the rt_mutex to be unlocked
972  */
973 void __sched rt_mutex_unlock(struct rt_mutex *lock)
974 {
975 	rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
976 }
977 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
978 
979 /**
980  * rt_mutex_destroy - mark a mutex unusable
981  * @lock: the mutex to be destroyed
982  *
983  * This function marks the mutex uninitialized, and any subsequent
984  * use of the mutex is forbidden. The mutex must not be locked when
985  * this function is called.
986  */
987 void rt_mutex_destroy(struct rt_mutex *lock)
988 {
989 	WARN_ON(rt_mutex_is_locked(lock));
990 #ifdef CONFIG_DEBUG_RT_MUTEXES
991 	lock->magic = NULL;
992 #endif
993 }
994 
995 EXPORT_SYMBOL_GPL(rt_mutex_destroy);
996 
997 /**
998  * __rt_mutex_init - initialize the rt lock
999  *
1000  * @lock: the rt lock to be initialized
1001  *
1002  * Initialize the rt lock to unlocked state.
1003  *
1004  * Initializing of a locked rt lock is not allowed
1005  */
1006 void __rt_mutex_init(struct rt_mutex *lock, const char *name)
1007 {
1008 	lock->owner = NULL;
1009 	raw_spin_lock_init(&lock->wait_lock);
1010 	lock->waiters = RB_ROOT;
1011 	lock->waiters_leftmost = NULL;
1012 
1013 	debug_rt_mutex_init(lock, name);
1014 }
1015 EXPORT_SYMBOL_GPL(__rt_mutex_init);
1016 
1017 /**
1018  * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
1019  *				proxy owner
1020  *
1021  * @lock: 	the rt_mutex to be locked
1022  * @proxy_owner:the task to set as owner
1023  *
1024  * No locking. Caller has to do serializing itself
1025  * Special API call for PI-futex support
1026  */
1027 void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
1028 				struct task_struct *proxy_owner)
1029 {
1030 	__rt_mutex_init(lock, NULL);
1031 	debug_rt_mutex_proxy_lock(lock, proxy_owner);
1032 	rt_mutex_set_owner(lock, proxy_owner);
1033 	rt_mutex_deadlock_account_lock(lock, proxy_owner);
1034 }
1035 
1036 /**
1037  * rt_mutex_proxy_unlock - release a lock on behalf of owner
1038  *
1039  * @lock: 	the rt_mutex to be locked
1040  *
1041  * No locking. Caller has to do serializing itself
1042  * Special API call for PI-futex support
1043  */
1044 void rt_mutex_proxy_unlock(struct rt_mutex *lock,
1045 			   struct task_struct *proxy_owner)
1046 {
1047 	debug_rt_mutex_proxy_unlock(lock);
1048 	rt_mutex_set_owner(lock, NULL);
1049 	rt_mutex_deadlock_account_unlock(proxy_owner);
1050 }
1051 
1052 /**
1053  * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
1054  * @lock:		the rt_mutex to take
1055  * @waiter:		the pre-initialized rt_mutex_waiter
1056  * @task:		the task to prepare
1057  * @detect_deadlock:	perform deadlock detection (1) or not (0)
1058  *
1059  * Returns:
1060  *  0 - task blocked on lock
1061  *  1 - acquired the lock for task, caller should wake it up
1062  * <0 - error
1063  *
1064  * Special API call for FUTEX_REQUEUE_PI support.
1065  */
1066 int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1067 			      struct rt_mutex_waiter *waiter,
1068 			      struct task_struct *task, int detect_deadlock)
1069 {
1070 	int ret;
1071 
1072 	raw_spin_lock(&lock->wait_lock);
1073 
1074 	if (try_to_take_rt_mutex(lock, task, NULL)) {
1075 		raw_spin_unlock(&lock->wait_lock);
1076 		return 1;
1077 	}
1078 
1079 	ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
1080 
1081 	if (ret && !rt_mutex_owner(lock)) {
1082 		/*
1083 		 * Reset the return value. We might have
1084 		 * returned with -EDEADLK and the owner
1085 		 * released the lock while we were walking the
1086 		 * pi chain.  Let the waiter sort it out.
1087 		 */
1088 		ret = 0;
1089 	}
1090 
1091 	if (unlikely(ret))
1092 		remove_waiter(lock, waiter);
1093 
1094 	raw_spin_unlock(&lock->wait_lock);
1095 
1096 	debug_rt_mutex_print_deadlock(waiter);
1097 
1098 	return ret;
1099 }
1100 
1101 /**
1102  * rt_mutex_next_owner - return the next owner of the lock
1103  *
1104  * @lock: the rt lock query
1105  *
1106  * Returns the next owner of the lock or NULL
1107  *
1108  * Caller has to serialize against other accessors to the lock
1109  * itself.
1110  *
1111  * Special API call for PI-futex support
1112  */
1113 struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
1114 {
1115 	if (!rt_mutex_has_waiters(lock))
1116 		return NULL;
1117 
1118 	return rt_mutex_top_waiter(lock)->task;
1119 }
1120 
1121 /**
1122  * rt_mutex_finish_proxy_lock() - Complete lock acquisition
1123  * @lock:		the rt_mutex we were woken on
1124  * @to:			the timeout, null if none. hrtimer should already have
1125  * 			been started.
1126  * @waiter:		the pre-initialized rt_mutex_waiter
1127  * @detect_deadlock:	perform deadlock detection (1) or not (0)
1128  *
1129  * Complete the lock acquisition started our behalf by another thread.
1130  *
1131  * Returns:
1132  *  0 - success
1133  * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK
1134  *
1135  * Special API call for PI-futex requeue support
1136  */
1137 int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
1138 			       struct hrtimer_sleeper *to,
1139 			       struct rt_mutex_waiter *waiter,
1140 			       int detect_deadlock)
1141 {
1142 	int ret;
1143 
1144 	raw_spin_lock(&lock->wait_lock);
1145 
1146 	set_current_state(TASK_INTERRUPTIBLE);
1147 
1148 	ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
1149 
1150 	set_current_state(TASK_RUNNING);
1151 
1152 	if (unlikely(ret))
1153 		remove_waiter(lock, waiter);
1154 
1155 	/*
1156 	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
1157 	 * have to fix that up.
1158 	 */
1159 	fixup_rt_mutex_waiters(lock);
1160 
1161 	raw_spin_unlock(&lock->wait_lock);
1162 
1163 	return ret;
1164 }
1165