xref: /openbmc/linux/kernel/locking/rtmutex.c (revision 84d517f3)
1 /*
2  * RT-Mutexes: simple blocking mutual exclusion locks with PI support
3  *
4  * started by Ingo Molnar and Thomas Gleixner.
5  *
6  *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7  *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8  *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9  *  Copyright (C) 2006 Esben Nielsen
10  *
11  *  See Documentation/rt-mutex-design.txt for details.
12  */
13 #include <linux/spinlock.h>
14 #include <linux/export.h>
15 #include <linux/sched.h>
16 #include <linux/sched/rt.h>
17 #include <linux/sched/deadline.h>
18 #include <linux/timer.h>
19 
20 #include "rtmutex_common.h"
21 
22 /*
23  * lock->owner state tracking:
24  *
25  * lock->owner holds the task_struct pointer of the owner. Bit 0
26  * is used to keep track of the "lock has waiters" state.
27  *
28  * owner	bit0
29  * NULL		0	lock is free (fast acquire possible)
30  * NULL		1	lock is free and has waiters and the top waiter
31  *				is going to take the lock*
32  * taskpointer	0	lock is held (fast release possible)
33  * taskpointer	1	lock is held and has waiters**
34  *
35  * The fast atomic compare exchange based acquire and release is only
36  * possible when bit 0 of lock->owner is 0.
37  *
38  * (*) It also can be a transitional state when grabbing the lock
39  * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
40  * we need to set the bit0 before looking at the lock, and the owner may be
41  * NULL in this small time, hence this can be a transitional state.
42  *
43  * (**) There is a small time when bit 0 is set but there are no
44  * waiters. This can happen when grabbing the lock in the slow path.
45  * To prevent a cmpxchg of the owner releasing the lock, we need to
46  * set this bit before looking at the lock.
47  */
48 
49 static void
50 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
51 {
52 	unsigned long val = (unsigned long)owner;
53 
54 	if (rt_mutex_has_waiters(lock))
55 		val |= RT_MUTEX_HAS_WAITERS;
56 
57 	lock->owner = (struct task_struct *)val;
58 }
59 
60 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
61 {
62 	lock->owner = (struct task_struct *)
63 			((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
64 }
65 
66 static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
67 {
68 	if (!rt_mutex_has_waiters(lock))
69 		clear_rt_mutex_waiters(lock);
70 }
71 
72 /*
73  * We can speed up the acquire/release, if the architecture
74  * supports cmpxchg and if there's no debugging state to be set up
75  */
76 #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
77 # define rt_mutex_cmpxchg(l,c,n)	(cmpxchg(&l->owner, c, n) == c)
78 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
79 {
80 	unsigned long owner, *p = (unsigned long *) &lock->owner;
81 
82 	do {
83 		owner = *p;
84 	} while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
85 }
86 #else
87 # define rt_mutex_cmpxchg(l,c,n)	(0)
88 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
89 {
90 	lock->owner = (struct task_struct *)
91 			((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
92 }
93 #endif
94 
95 static inline int
96 rt_mutex_waiter_less(struct rt_mutex_waiter *left,
97 		     struct rt_mutex_waiter *right)
98 {
99 	if (left->prio < right->prio)
100 		return 1;
101 
102 	/*
103 	 * If both waiters have dl_prio(), we check the deadlines of the
104 	 * associated tasks.
105 	 * If left waiter has a dl_prio(), and we didn't return 1 above,
106 	 * then right waiter has a dl_prio() too.
107 	 */
108 	if (dl_prio(left->prio))
109 		return (left->task->dl.deadline < right->task->dl.deadline);
110 
111 	return 0;
112 }
113 
114 static void
115 rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
116 {
117 	struct rb_node **link = &lock->waiters.rb_node;
118 	struct rb_node *parent = NULL;
119 	struct rt_mutex_waiter *entry;
120 	int leftmost = 1;
121 
122 	while (*link) {
123 		parent = *link;
124 		entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
125 		if (rt_mutex_waiter_less(waiter, entry)) {
126 			link = &parent->rb_left;
127 		} else {
128 			link = &parent->rb_right;
129 			leftmost = 0;
130 		}
131 	}
132 
133 	if (leftmost)
134 		lock->waiters_leftmost = &waiter->tree_entry;
135 
136 	rb_link_node(&waiter->tree_entry, parent, link);
137 	rb_insert_color(&waiter->tree_entry, &lock->waiters);
138 }
139 
140 static void
141 rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
142 {
143 	if (RB_EMPTY_NODE(&waiter->tree_entry))
144 		return;
145 
146 	if (lock->waiters_leftmost == &waiter->tree_entry)
147 		lock->waiters_leftmost = rb_next(&waiter->tree_entry);
148 
149 	rb_erase(&waiter->tree_entry, &lock->waiters);
150 	RB_CLEAR_NODE(&waiter->tree_entry);
151 }
152 
153 static void
154 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
155 {
156 	struct rb_node **link = &task->pi_waiters.rb_node;
157 	struct rb_node *parent = NULL;
158 	struct rt_mutex_waiter *entry;
159 	int leftmost = 1;
160 
161 	while (*link) {
162 		parent = *link;
163 		entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
164 		if (rt_mutex_waiter_less(waiter, entry)) {
165 			link = &parent->rb_left;
166 		} else {
167 			link = &parent->rb_right;
168 			leftmost = 0;
169 		}
170 	}
171 
172 	if (leftmost)
173 		task->pi_waiters_leftmost = &waiter->pi_tree_entry;
174 
175 	rb_link_node(&waiter->pi_tree_entry, parent, link);
176 	rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters);
177 }
178 
179 static void
180 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
181 {
182 	if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
183 		return;
184 
185 	if (task->pi_waiters_leftmost == &waiter->pi_tree_entry)
186 		task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry);
187 
188 	rb_erase(&waiter->pi_tree_entry, &task->pi_waiters);
189 	RB_CLEAR_NODE(&waiter->pi_tree_entry);
190 }
191 
192 /*
193  * Calculate task priority from the waiter tree priority
194  *
195  * Return task->normal_prio when the waiter tree is empty or when
196  * the waiter is not allowed to do priority boosting
197  */
198 int rt_mutex_getprio(struct task_struct *task)
199 {
200 	if (likely(!task_has_pi_waiters(task)))
201 		return task->normal_prio;
202 
203 	return min(task_top_pi_waiter(task)->prio,
204 		   task->normal_prio);
205 }
206 
207 struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
208 {
209 	if (likely(!task_has_pi_waiters(task)))
210 		return NULL;
211 
212 	return task_top_pi_waiter(task)->task;
213 }
214 
215 /*
216  * Called by sched_setscheduler() to check whether the priority change
217  * is overruled by a possible priority boosting.
218  */
219 int rt_mutex_check_prio(struct task_struct *task, int newprio)
220 {
221 	if (!task_has_pi_waiters(task))
222 		return 0;
223 
224 	return task_top_pi_waiter(task)->task->prio <= newprio;
225 }
226 
227 /*
228  * Adjust the priority of a task, after its pi_waiters got modified.
229  *
230  * This can be both boosting and unboosting. task->pi_lock must be held.
231  */
232 static void __rt_mutex_adjust_prio(struct task_struct *task)
233 {
234 	int prio = rt_mutex_getprio(task);
235 
236 	if (task->prio != prio || dl_prio(prio))
237 		rt_mutex_setprio(task, prio);
238 }
239 
240 /*
241  * Adjust task priority (undo boosting). Called from the exit path of
242  * rt_mutex_slowunlock() and rt_mutex_slowlock().
243  *
244  * (Note: We do this outside of the protection of lock->wait_lock to
245  * allow the lock to be taken while or before we readjust the priority
246  * of task. We do not use the spin_xx_mutex() variants here as we are
247  * outside of the debug path.)
248  */
249 static void rt_mutex_adjust_prio(struct task_struct *task)
250 {
251 	unsigned long flags;
252 
253 	raw_spin_lock_irqsave(&task->pi_lock, flags);
254 	__rt_mutex_adjust_prio(task);
255 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
256 }
257 
258 /*
259  * Max number of times we'll walk the boosting chain:
260  */
261 int max_lock_depth = 1024;
262 
263 /*
264  * Adjust the priority chain. Also used for deadlock detection.
265  * Decreases task's usage by one - may thus free the task.
266  *
267  * @task: the task owning the mutex (owner) for which a chain walk is probably
268  *	  needed
269  * @deadlock_detect: do we have to carry out deadlock detection?
270  * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
271  * 	       things for a task that has just got its priority adjusted, and
272  *	       is waiting on a mutex)
273  * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
274  *		 its priority to the mutex owner (can be NULL in the case
275  *		 depicted above or if the top waiter is gone away and we are
276  *		 actually deboosting the owner)
277  * @top_task: the current top waiter
278  *
279  * Returns 0 or -EDEADLK.
280  */
281 static int rt_mutex_adjust_prio_chain(struct task_struct *task,
282 				      int deadlock_detect,
283 				      struct rt_mutex *orig_lock,
284 				      struct rt_mutex_waiter *orig_waiter,
285 				      struct task_struct *top_task)
286 {
287 	struct rt_mutex *lock;
288 	struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
289 	int detect_deadlock, ret = 0, depth = 0;
290 	unsigned long flags;
291 
292 	detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
293 							 deadlock_detect);
294 
295 	/*
296 	 * The (de)boosting is a step by step approach with a lot of
297 	 * pitfalls. We want this to be preemptible and we want hold a
298 	 * maximum of two locks per step. So we have to check
299 	 * carefully whether things change under us.
300 	 */
301  again:
302 	if (++depth > max_lock_depth) {
303 		static int prev_max;
304 
305 		/*
306 		 * Print this only once. If the admin changes the limit,
307 		 * print a new message when reaching the limit again.
308 		 */
309 		if (prev_max != max_lock_depth) {
310 			prev_max = max_lock_depth;
311 			printk(KERN_WARNING "Maximum lock depth %d reached "
312 			       "task: %s (%d)\n", max_lock_depth,
313 			       top_task->comm, task_pid_nr(top_task));
314 		}
315 		put_task_struct(task);
316 
317 		return deadlock_detect ? -EDEADLK : 0;
318 	}
319  retry:
320 	/*
321 	 * Task can not go away as we did a get_task() before !
322 	 */
323 	raw_spin_lock_irqsave(&task->pi_lock, flags);
324 
325 	waiter = task->pi_blocked_on;
326 	/*
327 	 * Check whether the end of the boosting chain has been
328 	 * reached or the state of the chain has changed while we
329 	 * dropped the locks.
330 	 */
331 	if (!waiter)
332 		goto out_unlock_pi;
333 
334 	/*
335 	 * Check the orig_waiter state. After we dropped the locks,
336 	 * the previous owner of the lock might have released the lock.
337 	 */
338 	if (orig_waiter && !rt_mutex_owner(orig_lock))
339 		goto out_unlock_pi;
340 
341 	/*
342 	 * Drop out, when the task has no waiters. Note,
343 	 * top_waiter can be NULL, when we are in the deboosting
344 	 * mode!
345 	 */
346 	if (top_waiter) {
347 		if (!task_has_pi_waiters(task))
348 			goto out_unlock_pi;
349 		/*
350 		 * If deadlock detection is off, we stop here if we
351 		 * are not the top pi waiter of the task.
352 		 */
353 		if (!detect_deadlock && top_waiter != task_top_pi_waiter(task))
354 			goto out_unlock_pi;
355 	}
356 
357 	/*
358 	 * When deadlock detection is off then we check, if further
359 	 * priority adjustment is necessary.
360 	 */
361 	if (!detect_deadlock && waiter->prio == task->prio)
362 		goto out_unlock_pi;
363 
364 	lock = waiter->lock;
365 	if (!raw_spin_trylock(&lock->wait_lock)) {
366 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
367 		cpu_relax();
368 		goto retry;
369 	}
370 
371 	/*
372 	 * Deadlock detection. If the lock is the same as the original
373 	 * lock which caused us to walk the lock chain or if the
374 	 * current lock is owned by the task which initiated the chain
375 	 * walk, we detected a deadlock.
376 	 */
377 	if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
378 		debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
379 		raw_spin_unlock(&lock->wait_lock);
380 		ret = deadlock_detect ? -EDEADLK : 0;
381 		goto out_unlock_pi;
382 	}
383 
384 	top_waiter = rt_mutex_top_waiter(lock);
385 
386 	/* Requeue the waiter */
387 	rt_mutex_dequeue(lock, waiter);
388 	waiter->prio = task->prio;
389 	rt_mutex_enqueue(lock, waiter);
390 
391 	/* Release the task */
392 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
393 	if (!rt_mutex_owner(lock)) {
394 		/*
395 		 * If the requeue above changed the top waiter, then we need
396 		 * to wake the new top waiter up to try to get the lock.
397 		 */
398 
399 		if (top_waiter != rt_mutex_top_waiter(lock))
400 			wake_up_process(rt_mutex_top_waiter(lock)->task);
401 		raw_spin_unlock(&lock->wait_lock);
402 		goto out_put_task;
403 	}
404 	put_task_struct(task);
405 
406 	/* Grab the next task */
407 	task = rt_mutex_owner(lock);
408 	get_task_struct(task);
409 	raw_spin_lock_irqsave(&task->pi_lock, flags);
410 
411 	if (waiter == rt_mutex_top_waiter(lock)) {
412 		/* Boost the owner */
413 		rt_mutex_dequeue_pi(task, top_waiter);
414 		rt_mutex_enqueue_pi(task, waiter);
415 		__rt_mutex_adjust_prio(task);
416 
417 	} else if (top_waiter == waiter) {
418 		/* Deboost the owner */
419 		rt_mutex_dequeue_pi(task, waiter);
420 		waiter = rt_mutex_top_waiter(lock);
421 		rt_mutex_enqueue_pi(task, waiter);
422 		__rt_mutex_adjust_prio(task);
423 	}
424 
425 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
426 
427 	top_waiter = rt_mutex_top_waiter(lock);
428 	raw_spin_unlock(&lock->wait_lock);
429 
430 	if (!detect_deadlock && waiter != top_waiter)
431 		goto out_put_task;
432 
433 	goto again;
434 
435  out_unlock_pi:
436 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
437  out_put_task:
438 	put_task_struct(task);
439 
440 	return ret;
441 }
442 
443 /*
444  * Try to take an rt-mutex
445  *
446  * Must be called with lock->wait_lock held.
447  *
448  * @lock:   the lock to be acquired.
449  * @task:   the task which wants to acquire the lock
450  * @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
451  */
452 static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
453 		struct rt_mutex_waiter *waiter)
454 {
455 	/*
456 	 * We have to be careful here if the atomic speedups are
457 	 * enabled, such that, when
458 	 *  - no other waiter is on the lock
459 	 *  - the lock has been released since we did the cmpxchg
460 	 * the lock can be released or taken while we are doing the
461 	 * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
462 	 *
463 	 * The atomic acquire/release aware variant of
464 	 * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
465 	 * the WAITERS bit, the atomic release / acquire can not
466 	 * happen anymore and lock->wait_lock protects us from the
467 	 * non-atomic case.
468 	 *
469 	 * Note, that this might set lock->owner =
470 	 * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
471 	 * any more. This is fixed up when we take the ownership.
472 	 * This is the transitional state explained at the top of this file.
473 	 */
474 	mark_rt_mutex_waiters(lock);
475 
476 	if (rt_mutex_owner(lock))
477 		return 0;
478 
479 	/*
480 	 * It will get the lock because of one of these conditions:
481 	 * 1) there is no waiter
482 	 * 2) higher priority than waiters
483 	 * 3) it is top waiter
484 	 */
485 	if (rt_mutex_has_waiters(lock)) {
486 		if (task->prio >= rt_mutex_top_waiter(lock)->prio) {
487 			if (!waiter || waiter != rt_mutex_top_waiter(lock))
488 				return 0;
489 		}
490 	}
491 
492 	if (waiter || rt_mutex_has_waiters(lock)) {
493 		unsigned long flags;
494 		struct rt_mutex_waiter *top;
495 
496 		raw_spin_lock_irqsave(&task->pi_lock, flags);
497 
498 		/* remove the queued waiter. */
499 		if (waiter) {
500 			rt_mutex_dequeue(lock, waiter);
501 			task->pi_blocked_on = NULL;
502 		}
503 
504 		/*
505 		 * We have to enqueue the top waiter(if it exists) into
506 		 * task->pi_waiters list.
507 		 */
508 		if (rt_mutex_has_waiters(lock)) {
509 			top = rt_mutex_top_waiter(lock);
510 			rt_mutex_enqueue_pi(task, top);
511 		}
512 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
513 	}
514 
515 	/* We got the lock. */
516 	debug_rt_mutex_lock(lock);
517 
518 	rt_mutex_set_owner(lock, task);
519 
520 	rt_mutex_deadlock_account_lock(lock, task);
521 
522 	return 1;
523 }
524 
525 /*
526  * Task blocks on lock.
527  *
528  * Prepare waiter and propagate pi chain
529  *
530  * This must be called with lock->wait_lock held.
531  */
532 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
533 				   struct rt_mutex_waiter *waiter,
534 				   struct task_struct *task,
535 				   int detect_deadlock)
536 {
537 	struct task_struct *owner = rt_mutex_owner(lock);
538 	struct rt_mutex_waiter *top_waiter = waiter;
539 	unsigned long flags;
540 	int chain_walk = 0, res;
541 
542 	/*
543 	 * Early deadlock detection. We really don't want the task to
544 	 * enqueue on itself just to untangle the mess later. It's not
545 	 * only an optimization. We drop the locks, so another waiter
546 	 * can come in before the chain walk detects the deadlock. So
547 	 * the other will detect the deadlock and return -EDEADLOCK,
548 	 * which is wrong, as the other waiter is not in a deadlock
549 	 * situation.
550 	 */
551 	if (detect_deadlock && owner == task)
552 		return -EDEADLK;
553 
554 	raw_spin_lock_irqsave(&task->pi_lock, flags);
555 	__rt_mutex_adjust_prio(task);
556 	waiter->task = task;
557 	waiter->lock = lock;
558 	waiter->prio = task->prio;
559 
560 	/* Get the top priority waiter on the lock */
561 	if (rt_mutex_has_waiters(lock))
562 		top_waiter = rt_mutex_top_waiter(lock);
563 	rt_mutex_enqueue(lock, waiter);
564 
565 	task->pi_blocked_on = waiter;
566 
567 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
568 
569 	if (!owner)
570 		return 0;
571 
572 	if (waiter == rt_mutex_top_waiter(lock)) {
573 		raw_spin_lock_irqsave(&owner->pi_lock, flags);
574 		rt_mutex_dequeue_pi(owner, top_waiter);
575 		rt_mutex_enqueue_pi(owner, waiter);
576 
577 		__rt_mutex_adjust_prio(owner);
578 		if (owner->pi_blocked_on)
579 			chain_walk = 1;
580 		raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
581 	}
582 	else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
583 		chain_walk = 1;
584 
585 	if (!chain_walk)
586 		return 0;
587 
588 	/*
589 	 * The owner can't disappear while holding a lock,
590 	 * so the owner struct is protected by wait_lock.
591 	 * Gets dropped in rt_mutex_adjust_prio_chain()!
592 	 */
593 	get_task_struct(owner);
594 
595 	raw_spin_unlock(&lock->wait_lock);
596 
597 	res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
598 					 task);
599 
600 	raw_spin_lock(&lock->wait_lock);
601 
602 	return res;
603 }
604 
605 /*
606  * Wake up the next waiter on the lock.
607  *
608  * Remove the top waiter from the current tasks waiter list and wake it up.
609  *
610  * Called with lock->wait_lock held.
611  */
612 static void wakeup_next_waiter(struct rt_mutex *lock)
613 {
614 	struct rt_mutex_waiter *waiter;
615 	unsigned long flags;
616 
617 	raw_spin_lock_irqsave(&current->pi_lock, flags);
618 
619 	waiter = rt_mutex_top_waiter(lock);
620 
621 	/*
622 	 * Remove it from current->pi_waiters. We do not adjust a
623 	 * possible priority boost right now. We execute wakeup in the
624 	 * boosted mode and go back to normal after releasing
625 	 * lock->wait_lock.
626 	 */
627 	rt_mutex_dequeue_pi(current, waiter);
628 
629 	rt_mutex_set_owner(lock, NULL);
630 
631 	raw_spin_unlock_irqrestore(&current->pi_lock, flags);
632 
633 	wake_up_process(waiter->task);
634 }
635 
636 /*
637  * Remove a waiter from a lock and give up
638  *
639  * Must be called with lock->wait_lock held and
640  * have just failed to try_to_take_rt_mutex().
641  */
642 static void remove_waiter(struct rt_mutex *lock,
643 			  struct rt_mutex_waiter *waiter)
644 {
645 	int first = (waiter == rt_mutex_top_waiter(lock));
646 	struct task_struct *owner = rt_mutex_owner(lock);
647 	unsigned long flags;
648 	int chain_walk = 0;
649 
650 	raw_spin_lock_irqsave(&current->pi_lock, flags);
651 	rt_mutex_dequeue(lock, waiter);
652 	current->pi_blocked_on = NULL;
653 	raw_spin_unlock_irqrestore(&current->pi_lock, flags);
654 
655 	if (!owner)
656 		return;
657 
658 	if (first) {
659 
660 		raw_spin_lock_irqsave(&owner->pi_lock, flags);
661 
662 		rt_mutex_dequeue_pi(owner, waiter);
663 
664 		if (rt_mutex_has_waiters(lock)) {
665 			struct rt_mutex_waiter *next;
666 
667 			next = rt_mutex_top_waiter(lock);
668 			rt_mutex_enqueue_pi(owner, next);
669 		}
670 		__rt_mutex_adjust_prio(owner);
671 
672 		if (owner->pi_blocked_on)
673 			chain_walk = 1;
674 
675 		raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
676 	}
677 
678 	if (!chain_walk)
679 		return;
680 
681 	/* gets dropped in rt_mutex_adjust_prio_chain()! */
682 	get_task_struct(owner);
683 
684 	raw_spin_unlock(&lock->wait_lock);
685 
686 	rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
687 
688 	raw_spin_lock(&lock->wait_lock);
689 }
690 
691 /*
692  * Recheck the pi chain, in case we got a priority setting
693  *
694  * Called from sched_setscheduler
695  */
696 void rt_mutex_adjust_pi(struct task_struct *task)
697 {
698 	struct rt_mutex_waiter *waiter;
699 	unsigned long flags;
700 
701 	raw_spin_lock_irqsave(&task->pi_lock, flags);
702 
703 	waiter = task->pi_blocked_on;
704 	if (!waiter || (waiter->prio == task->prio &&
705 			!dl_prio(task->prio))) {
706 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
707 		return;
708 	}
709 
710 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
711 
712 	/* gets dropped in rt_mutex_adjust_prio_chain()! */
713 	get_task_struct(task);
714 	rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
715 }
716 
717 /**
718  * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
719  * @lock:		 the rt_mutex to take
720  * @state:		 the state the task should block in (TASK_INTERRUPTIBLE
721  * 			 or TASK_UNINTERRUPTIBLE)
722  * @timeout:		 the pre-initialized and started timer, or NULL for none
723  * @waiter:		 the pre-initialized rt_mutex_waiter
724  *
725  * lock->wait_lock must be held by the caller.
726  */
727 static int __sched
728 __rt_mutex_slowlock(struct rt_mutex *lock, int state,
729 		    struct hrtimer_sleeper *timeout,
730 		    struct rt_mutex_waiter *waiter)
731 {
732 	int ret = 0;
733 
734 	for (;;) {
735 		/* Try to acquire the lock: */
736 		if (try_to_take_rt_mutex(lock, current, waiter))
737 			break;
738 
739 		/*
740 		 * TASK_INTERRUPTIBLE checks for signals and
741 		 * timeout. Ignored otherwise.
742 		 */
743 		if (unlikely(state == TASK_INTERRUPTIBLE)) {
744 			/* Signal pending? */
745 			if (signal_pending(current))
746 				ret = -EINTR;
747 			if (timeout && !timeout->task)
748 				ret = -ETIMEDOUT;
749 			if (ret)
750 				break;
751 		}
752 
753 		raw_spin_unlock(&lock->wait_lock);
754 
755 		debug_rt_mutex_print_deadlock(waiter);
756 
757 		schedule_rt_mutex(lock);
758 
759 		raw_spin_lock(&lock->wait_lock);
760 		set_current_state(state);
761 	}
762 
763 	return ret;
764 }
765 
766 /*
767  * Slow path lock function:
768  */
769 static int __sched
770 rt_mutex_slowlock(struct rt_mutex *lock, int state,
771 		  struct hrtimer_sleeper *timeout,
772 		  int detect_deadlock)
773 {
774 	struct rt_mutex_waiter waiter;
775 	int ret = 0;
776 
777 	debug_rt_mutex_init_waiter(&waiter);
778 	RB_CLEAR_NODE(&waiter.pi_tree_entry);
779 	RB_CLEAR_NODE(&waiter.tree_entry);
780 
781 	raw_spin_lock(&lock->wait_lock);
782 
783 	/* Try to acquire the lock again: */
784 	if (try_to_take_rt_mutex(lock, current, NULL)) {
785 		raw_spin_unlock(&lock->wait_lock);
786 		return 0;
787 	}
788 
789 	set_current_state(state);
790 
791 	/* Setup the timer, when timeout != NULL */
792 	if (unlikely(timeout)) {
793 		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
794 		if (!hrtimer_active(&timeout->timer))
795 			timeout->task = NULL;
796 	}
797 
798 	ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock);
799 
800 	if (likely(!ret))
801 		ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
802 
803 	set_current_state(TASK_RUNNING);
804 
805 	if (unlikely(ret))
806 		remove_waiter(lock, &waiter);
807 
808 	/*
809 	 * try_to_take_rt_mutex() sets the waiter bit
810 	 * unconditionally. We might have to fix that up.
811 	 */
812 	fixup_rt_mutex_waiters(lock);
813 
814 	raw_spin_unlock(&lock->wait_lock);
815 
816 	/* Remove pending timer: */
817 	if (unlikely(timeout))
818 		hrtimer_cancel(&timeout->timer);
819 
820 	debug_rt_mutex_free_waiter(&waiter);
821 
822 	return ret;
823 }
824 
825 /*
826  * Slow path try-lock function:
827  */
828 static inline int
829 rt_mutex_slowtrylock(struct rt_mutex *lock)
830 {
831 	int ret = 0;
832 
833 	raw_spin_lock(&lock->wait_lock);
834 
835 	if (likely(rt_mutex_owner(lock) != current)) {
836 
837 		ret = try_to_take_rt_mutex(lock, current, NULL);
838 		/*
839 		 * try_to_take_rt_mutex() sets the lock waiters
840 		 * bit unconditionally. Clean this up.
841 		 */
842 		fixup_rt_mutex_waiters(lock);
843 	}
844 
845 	raw_spin_unlock(&lock->wait_lock);
846 
847 	return ret;
848 }
849 
850 /*
851  * Slow path to release a rt-mutex:
852  */
853 static void __sched
854 rt_mutex_slowunlock(struct rt_mutex *lock)
855 {
856 	raw_spin_lock(&lock->wait_lock);
857 
858 	debug_rt_mutex_unlock(lock);
859 
860 	rt_mutex_deadlock_account_unlock(current);
861 
862 	if (!rt_mutex_has_waiters(lock)) {
863 		lock->owner = NULL;
864 		raw_spin_unlock(&lock->wait_lock);
865 		return;
866 	}
867 
868 	wakeup_next_waiter(lock);
869 
870 	raw_spin_unlock(&lock->wait_lock);
871 
872 	/* Undo pi boosting if necessary: */
873 	rt_mutex_adjust_prio(current);
874 }
875 
876 /*
877  * debug aware fast / slowpath lock,trylock,unlock
878  *
879  * The atomic acquire/release ops are compiled away, when either the
880  * architecture does not support cmpxchg or when debugging is enabled.
881  */
882 static inline int
883 rt_mutex_fastlock(struct rt_mutex *lock, int state,
884 		  int detect_deadlock,
885 		  int (*slowfn)(struct rt_mutex *lock, int state,
886 				struct hrtimer_sleeper *timeout,
887 				int detect_deadlock))
888 {
889 	if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
890 		rt_mutex_deadlock_account_lock(lock, current);
891 		return 0;
892 	} else
893 		return slowfn(lock, state, NULL, detect_deadlock);
894 }
895 
896 static inline int
897 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
898 			struct hrtimer_sleeper *timeout, int detect_deadlock,
899 			int (*slowfn)(struct rt_mutex *lock, int state,
900 				      struct hrtimer_sleeper *timeout,
901 				      int detect_deadlock))
902 {
903 	if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
904 		rt_mutex_deadlock_account_lock(lock, current);
905 		return 0;
906 	} else
907 		return slowfn(lock, state, timeout, detect_deadlock);
908 }
909 
910 static inline int
911 rt_mutex_fasttrylock(struct rt_mutex *lock,
912 		     int (*slowfn)(struct rt_mutex *lock))
913 {
914 	if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
915 		rt_mutex_deadlock_account_lock(lock, current);
916 		return 1;
917 	}
918 	return slowfn(lock);
919 }
920 
921 static inline void
922 rt_mutex_fastunlock(struct rt_mutex *lock,
923 		    void (*slowfn)(struct rt_mutex *lock))
924 {
925 	if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
926 		rt_mutex_deadlock_account_unlock(current);
927 	else
928 		slowfn(lock);
929 }
930 
931 /**
932  * rt_mutex_lock - lock a rt_mutex
933  *
934  * @lock: the rt_mutex to be locked
935  */
936 void __sched rt_mutex_lock(struct rt_mutex *lock)
937 {
938 	might_sleep();
939 
940 	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
941 }
942 EXPORT_SYMBOL_GPL(rt_mutex_lock);
943 
944 /**
945  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
946  *
947  * @lock: 		the rt_mutex to be locked
948  * @detect_deadlock:	deadlock detection on/off
949  *
950  * Returns:
951  *  0 		on success
952  * -EINTR 	when interrupted by a signal
953  * -EDEADLK	when the lock would deadlock (when deadlock detection is on)
954  */
955 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
956 						 int detect_deadlock)
957 {
958 	might_sleep();
959 
960 	return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
961 				 detect_deadlock, rt_mutex_slowlock);
962 }
963 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
964 
965 /**
966  * rt_mutex_timed_lock - lock a rt_mutex interruptible
967  *			the timeout structure is provided
968  *			by the caller
969  *
970  * @lock: 		the rt_mutex to be locked
971  * @timeout:		timeout structure or NULL (no timeout)
972  * @detect_deadlock:	deadlock detection on/off
973  *
974  * Returns:
975  *  0 		on success
976  * -EINTR 	when interrupted by a signal
977  * -ETIMEDOUT	when the timeout expired
978  * -EDEADLK	when the lock would deadlock (when deadlock detection is on)
979  */
980 int
981 rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
982 		    int detect_deadlock)
983 {
984 	might_sleep();
985 
986 	return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
987 				       detect_deadlock, rt_mutex_slowlock);
988 }
989 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
990 
991 /**
992  * rt_mutex_trylock - try to lock a rt_mutex
993  *
994  * @lock:	the rt_mutex to be locked
995  *
996  * Returns 1 on success and 0 on contention
997  */
998 int __sched rt_mutex_trylock(struct rt_mutex *lock)
999 {
1000 	return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
1001 }
1002 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
1003 
1004 /**
1005  * rt_mutex_unlock - unlock a rt_mutex
1006  *
1007  * @lock: the rt_mutex to be unlocked
1008  */
1009 void __sched rt_mutex_unlock(struct rt_mutex *lock)
1010 {
1011 	rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
1012 }
1013 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
1014 
1015 /**
1016  * rt_mutex_destroy - mark a mutex unusable
1017  * @lock: the mutex to be destroyed
1018  *
1019  * This function marks the mutex uninitialized, and any subsequent
1020  * use of the mutex is forbidden. The mutex must not be locked when
1021  * this function is called.
1022  */
1023 void rt_mutex_destroy(struct rt_mutex *lock)
1024 {
1025 	WARN_ON(rt_mutex_is_locked(lock));
1026 #ifdef CONFIG_DEBUG_RT_MUTEXES
1027 	lock->magic = NULL;
1028 #endif
1029 }
1030 
1031 EXPORT_SYMBOL_GPL(rt_mutex_destroy);
1032 
1033 /**
1034  * __rt_mutex_init - initialize the rt lock
1035  *
1036  * @lock: the rt lock to be initialized
1037  *
1038  * Initialize the rt lock to unlocked state.
1039  *
1040  * Initializing of a locked rt lock is not allowed
1041  */
1042 void __rt_mutex_init(struct rt_mutex *lock, const char *name)
1043 {
1044 	lock->owner = NULL;
1045 	raw_spin_lock_init(&lock->wait_lock);
1046 	lock->waiters = RB_ROOT;
1047 	lock->waiters_leftmost = NULL;
1048 
1049 	debug_rt_mutex_init(lock, name);
1050 }
1051 EXPORT_SYMBOL_GPL(__rt_mutex_init);
1052 
1053 /**
1054  * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
1055  *				proxy owner
1056  *
1057  * @lock: 	the rt_mutex to be locked
1058  * @proxy_owner:the task to set as owner
1059  *
1060  * No locking. Caller has to do serializing itself
1061  * Special API call for PI-futex support
1062  */
1063 void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
1064 				struct task_struct *proxy_owner)
1065 {
1066 	__rt_mutex_init(lock, NULL);
1067 	debug_rt_mutex_proxy_lock(lock, proxy_owner);
1068 	rt_mutex_set_owner(lock, proxy_owner);
1069 	rt_mutex_deadlock_account_lock(lock, proxy_owner);
1070 }
1071 
1072 /**
1073  * rt_mutex_proxy_unlock - release a lock on behalf of owner
1074  *
1075  * @lock: 	the rt_mutex to be locked
1076  *
1077  * No locking. Caller has to do serializing itself
1078  * Special API call for PI-futex support
1079  */
1080 void rt_mutex_proxy_unlock(struct rt_mutex *lock,
1081 			   struct task_struct *proxy_owner)
1082 {
1083 	debug_rt_mutex_proxy_unlock(lock);
1084 	rt_mutex_set_owner(lock, NULL);
1085 	rt_mutex_deadlock_account_unlock(proxy_owner);
1086 }
1087 
1088 /**
1089  * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
1090  * @lock:		the rt_mutex to take
1091  * @waiter:		the pre-initialized rt_mutex_waiter
1092  * @task:		the task to prepare
1093  * @detect_deadlock:	perform deadlock detection (1) or not (0)
1094  *
1095  * Returns:
1096  *  0 - task blocked on lock
1097  *  1 - acquired the lock for task, caller should wake it up
1098  * <0 - error
1099  *
1100  * Special API call for FUTEX_REQUEUE_PI support.
1101  */
1102 int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1103 			      struct rt_mutex_waiter *waiter,
1104 			      struct task_struct *task, int detect_deadlock)
1105 {
1106 	int ret;
1107 
1108 	raw_spin_lock(&lock->wait_lock);
1109 
1110 	if (try_to_take_rt_mutex(lock, task, NULL)) {
1111 		raw_spin_unlock(&lock->wait_lock);
1112 		return 1;
1113 	}
1114 
1115 	ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
1116 
1117 	if (ret && !rt_mutex_owner(lock)) {
1118 		/*
1119 		 * Reset the return value. We might have
1120 		 * returned with -EDEADLK and the owner
1121 		 * released the lock while we were walking the
1122 		 * pi chain.  Let the waiter sort it out.
1123 		 */
1124 		ret = 0;
1125 	}
1126 
1127 	if (unlikely(ret))
1128 		remove_waiter(lock, waiter);
1129 
1130 	raw_spin_unlock(&lock->wait_lock);
1131 
1132 	debug_rt_mutex_print_deadlock(waiter);
1133 
1134 	return ret;
1135 }
1136 
1137 /**
1138  * rt_mutex_next_owner - return the next owner of the lock
1139  *
1140  * @lock: the rt lock query
1141  *
1142  * Returns the next owner of the lock or NULL
1143  *
1144  * Caller has to serialize against other accessors to the lock
1145  * itself.
1146  *
1147  * Special API call for PI-futex support
1148  */
1149 struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
1150 {
1151 	if (!rt_mutex_has_waiters(lock))
1152 		return NULL;
1153 
1154 	return rt_mutex_top_waiter(lock)->task;
1155 }
1156 
1157 /**
1158  * rt_mutex_finish_proxy_lock() - Complete lock acquisition
1159  * @lock:		the rt_mutex we were woken on
1160  * @to:			the timeout, null if none. hrtimer should already have
1161  * 			been started.
1162  * @waiter:		the pre-initialized rt_mutex_waiter
1163  * @detect_deadlock:	perform deadlock detection (1) or not (0)
1164  *
1165  * Complete the lock acquisition started our behalf by another thread.
1166  *
1167  * Returns:
1168  *  0 - success
1169  * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK
1170  *
1171  * Special API call for PI-futex requeue support
1172  */
1173 int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
1174 			       struct hrtimer_sleeper *to,
1175 			       struct rt_mutex_waiter *waiter,
1176 			       int detect_deadlock)
1177 {
1178 	int ret;
1179 
1180 	raw_spin_lock(&lock->wait_lock);
1181 
1182 	set_current_state(TASK_INTERRUPTIBLE);
1183 
1184 	ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
1185 
1186 	set_current_state(TASK_RUNNING);
1187 
1188 	if (unlikely(ret))
1189 		remove_waiter(lock, waiter);
1190 
1191 	/*
1192 	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
1193 	 * have to fix that up.
1194 	 */
1195 	fixup_rt_mutex_waiters(lock);
1196 
1197 	raw_spin_unlock(&lock->wait_lock);
1198 
1199 	return ret;
1200 }
1201