xref: /openbmc/linux/kernel/locking/mutex.c (revision 4e1a33b1)
1 /*
2  * kernel/locking/mutex.c
3  *
4  * Mutexes: blocking mutual exclusion locks
5  *
6  * Started by Ingo Molnar:
7  *
8  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9  *
10  * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11  * David Howells for suggestions and improvements.
12  *
13  *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14  *    from the -rt tree, where it was originally implemented for rtmutexes
15  *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
16  *    and Sven Dietrich.
17  *
18  * Also see Documentation/locking/mutex-design.txt.
19  */
20 #include <linux/mutex.h>
21 #include <linux/ww_mutex.h>
22 #include <linux/sched.h>
23 #include <linux/sched/rt.h>
24 #include <linux/export.h>
25 #include <linux/spinlock.h>
26 #include <linux/interrupt.h>
27 #include <linux/debug_locks.h>
28 #include <linux/osq_lock.h>
29 
30 #ifdef CONFIG_DEBUG_MUTEXES
31 # include "mutex-debug.h"
32 #else
33 # include "mutex.h"
34 #endif
35 
36 void
37 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
38 {
39 	atomic_long_set(&lock->owner, 0);
40 	spin_lock_init(&lock->wait_lock);
41 	INIT_LIST_HEAD(&lock->wait_list);
42 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
43 	osq_lock_init(&lock->osq);
44 #endif
45 
46 	debug_mutex_init(lock, name, key);
47 }
48 EXPORT_SYMBOL(__mutex_init);
49 
50 /*
51  * @owner: contains: 'struct task_struct *' to the current lock owner,
52  * NULL means not owned. Since task_struct pointers are aligned at
53  * at least L1_CACHE_BYTES, we have low bits to store extra state.
54  *
55  * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
56  * Bit1 indicates unlock needs to hand the lock to the top-waiter
57  * Bit2 indicates handoff has been done and we're waiting for pickup.
58  */
59 #define MUTEX_FLAG_WAITERS	0x01
60 #define MUTEX_FLAG_HANDOFF	0x02
61 #define MUTEX_FLAG_PICKUP	0x04
62 
63 #define MUTEX_FLAGS		0x07
64 
65 static inline struct task_struct *__owner_task(unsigned long owner)
66 {
67 	return (struct task_struct *)(owner & ~MUTEX_FLAGS);
68 }
69 
70 static inline unsigned long __owner_flags(unsigned long owner)
71 {
72 	return owner & MUTEX_FLAGS;
73 }
74 
75 /*
76  * Trylock variant that retuns the owning task on failure.
77  */
78 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
79 {
80 	unsigned long owner, curr = (unsigned long)current;
81 
82 	owner = atomic_long_read(&lock->owner);
83 	for (;;) { /* must loop, can race against a flag */
84 		unsigned long old, flags = __owner_flags(owner);
85 		unsigned long task = owner & ~MUTEX_FLAGS;
86 
87 		if (task) {
88 			if (likely(task != curr))
89 				break;
90 
91 			if (likely(!(flags & MUTEX_FLAG_PICKUP)))
92 				break;
93 
94 			flags &= ~MUTEX_FLAG_PICKUP;
95 		} else {
96 #ifdef CONFIG_DEBUG_MUTEXES
97 			DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
98 #endif
99 		}
100 
101 		/*
102 		 * We set the HANDOFF bit, we must make sure it doesn't live
103 		 * past the point where we acquire it. This would be possible
104 		 * if we (accidentally) set the bit on an unlocked mutex.
105 		 */
106 		flags &= ~MUTEX_FLAG_HANDOFF;
107 
108 		old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
109 		if (old == owner)
110 			return NULL;
111 
112 		owner = old;
113 	}
114 
115 	return __owner_task(owner);
116 }
117 
118 /*
119  * Actual trylock that will work on any unlocked state.
120  */
121 static inline bool __mutex_trylock(struct mutex *lock)
122 {
123 	return !__mutex_trylock_or_owner(lock);
124 }
125 
126 #ifndef CONFIG_DEBUG_LOCK_ALLOC
127 /*
128  * Lockdep annotations are contained to the slow paths for simplicity.
129  * There is nothing that would stop spreading the lockdep annotations outwards
130  * except more code.
131  */
132 
133 /*
134  * Optimistic trylock that only works in the uncontended case. Make sure to
135  * follow with a __mutex_trylock() before failing.
136  */
137 static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
138 {
139 	unsigned long curr = (unsigned long)current;
140 
141 	if (!atomic_long_cmpxchg_acquire(&lock->owner, 0UL, curr))
142 		return true;
143 
144 	return false;
145 }
146 
147 static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
148 {
149 	unsigned long curr = (unsigned long)current;
150 
151 	if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
152 		return true;
153 
154 	return false;
155 }
156 #endif
157 
158 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
159 {
160 	atomic_long_or(flag, &lock->owner);
161 }
162 
163 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
164 {
165 	atomic_long_andnot(flag, &lock->owner);
166 }
167 
168 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
169 {
170 	return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
171 }
172 
173 /*
174  * Give up ownership to a specific task, when @task = NULL, this is equivalent
175  * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
176  * WAITERS. Provides RELEASE semantics like a regular unlock, the
177  * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
178  */
179 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
180 {
181 	unsigned long owner = atomic_long_read(&lock->owner);
182 
183 	for (;;) {
184 		unsigned long old, new;
185 
186 #ifdef CONFIG_DEBUG_MUTEXES
187 		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
188 		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
189 #endif
190 
191 		new = (owner & MUTEX_FLAG_WAITERS);
192 		new |= (unsigned long)task;
193 		if (task)
194 			new |= MUTEX_FLAG_PICKUP;
195 
196 		old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
197 		if (old == owner)
198 			break;
199 
200 		owner = old;
201 	}
202 }
203 
204 #ifndef CONFIG_DEBUG_LOCK_ALLOC
205 /*
206  * We split the mutex lock/unlock logic into separate fastpath and
207  * slowpath functions, to reduce the register pressure on the fastpath.
208  * We also put the fastpath first in the kernel image, to make sure the
209  * branch is predicted by the CPU as default-untaken.
210  */
211 static void __sched __mutex_lock_slowpath(struct mutex *lock);
212 
213 /**
214  * mutex_lock - acquire the mutex
215  * @lock: the mutex to be acquired
216  *
217  * Lock the mutex exclusively for this task. If the mutex is not
218  * available right now, it will sleep until it can get it.
219  *
220  * The mutex must later on be released by the same task that
221  * acquired it. Recursive locking is not allowed. The task
222  * may not exit without first unlocking the mutex. Also, kernel
223  * memory where the mutex resides must not be freed with
224  * the mutex still locked. The mutex must first be initialized
225  * (or statically defined) before it can be locked. memset()-ing
226  * the mutex to 0 is not allowed.
227  *
228  * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
229  *   checks that will enforce the restrictions and will also do
230  *   deadlock debugging. )
231  *
232  * This function is similar to (but not equivalent to) down().
233  */
234 void __sched mutex_lock(struct mutex *lock)
235 {
236 	might_sleep();
237 
238 	if (!__mutex_trylock_fast(lock))
239 		__mutex_lock_slowpath(lock);
240 }
241 EXPORT_SYMBOL(mutex_lock);
242 #endif
243 
244 static __always_inline void
245 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
246 {
247 #ifdef CONFIG_DEBUG_MUTEXES
248 	/*
249 	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
250 	 * but released with a normal mutex_unlock in this call.
251 	 *
252 	 * This should never happen, always use ww_mutex_unlock.
253 	 */
254 	DEBUG_LOCKS_WARN_ON(ww->ctx);
255 
256 	/*
257 	 * Not quite done after calling ww_acquire_done() ?
258 	 */
259 	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
260 
261 	if (ww_ctx->contending_lock) {
262 		/*
263 		 * After -EDEADLK you tried to
264 		 * acquire a different ww_mutex? Bad!
265 		 */
266 		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
267 
268 		/*
269 		 * You called ww_mutex_lock after receiving -EDEADLK,
270 		 * but 'forgot' to unlock everything else first?
271 		 */
272 		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
273 		ww_ctx->contending_lock = NULL;
274 	}
275 
276 	/*
277 	 * Naughty, using a different class will lead to undefined behavior!
278 	 */
279 	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
280 #endif
281 	ww_ctx->acquired++;
282 }
283 
284 static inline bool __sched
285 __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
286 {
287 	return a->stamp - b->stamp <= LONG_MAX &&
288 	       (a->stamp != b->stamp || a > b);
289 }
290 
291 /*
292  * Wake up any waiters that may have to back off when the lock is held by the
293  * given context.
294  *
295  * Due to the invariants on the wait list, this can only affect the first
296  * waiter with a context.
297  *
298  * The current task must not be on the wait list.
299  */
300 static void __sched
301 __ww_mutex_wakeup_for_backoff(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
302 {
303 	struct mutex_waiter *cur;
304 
305 	lockdep_assert_held(&lock->wait_lock);
306 
307 	list_for_each_entry(cur, &lock->wait_list, list) {
308 		if (!cur->ww_ctx)
309 			continue;
310 
311 		if (cur->ww_ctx->acquired > 0 &&
312 		    __ww_ctx_stamp_after(cur->ww_ctx, ww_ctx)) {
313 			debug_mutex_wake_waiter(lock, cur);
314 			wake_up_process(cur->task);
315 		}
316 
317 		break;
318 	}
319 }
320 
321 /*
322  * After acquiring lock with fastpath or when we lost out in contested
323  * slowpath, set ctx and wake up any waiters so they can recheck.
324  */
325 static __always_inline void
326 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
327 {
328 	ww_mutex_lock_acquired(lock, ctx);
329 
330 	lock->ctx = ctx;
331 
332 	/*
333 	 * The lock->ctx update should be visible on all cores before
334 	 * the atomic read is done, otherwise contended waiters might be
335 	 * missed. The contended waiters will either see ww_ctx == NULL
336 	 * and keep spinning, or it will acquire wait_lock, add itself
337 	 * to waiter list and sleep.
338 	 */
339 	smp_mb(); /* ^^^ */
340 
341 	/*
342 	 * Check if lock is contended, if not there is nobody to wake up
343 	 */
344 	if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
345 		return;
346 
347 	/*
348 	 * Uh oh, we raced in fastpath, wake up everyone in this case,
349 	 * so they can see the new lock->ctx.
350 	 */
351 	spin_lock(&lock->base.wait_lock);
352 	__ww_mutex_wakeup_for_backoff(&lock->base, ctx);
353 	spin_unlock(&lock->base.wait_lock);
354 }
355 
356 /*
357  * After acquiring lock in the slowpath set ctx.
358  *
359  * Unlike for the fast path, the caller ensures that waiters are woken up where
360  * necessary.
361  *
362  * Callers must hold the mutex wait_lock.
363  */
364 static __always_inline void
365 ww_mutex_set_context_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
366 {
367 	ww_mutex_lock_acquired(lock, ctx);
368 	lock->ctx = ctx;
369 }
370 
371 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
372 
373 static inline
374 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
375 			    struct mutex_waiter *waiter)
376 {
377 	struct ww_mutex *ww;
378 
379 	ww = container_of(lock, struct ww_mutex, base);
380 
381 	/*
382 	 * If ww->ctx is set the contents are undefined, only
383 	 * by acquiring wait_lock there is a guarantee that
384 	 * they are not invalid when reading.
385 	 *
386 	 * As such, when deadlock detection needs to be
387 	 * performed the optimistic spinning cannot be done.
388 	 *
389 	 * Check this in every inner iteration because we may
390 	 * be racing against another thread's ww_mutex_lock.
391 	 */
392 	if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
393 		return false;
394 
395 	/*
396 	 * If we aren't on the wait list yet, cancel the spin
397 	 * if there are waiters. We want  to avoid stealing the
398 	 * lock from a waiter with an earlier stamp, since the
399 	 * other thread may already own a lock that we also
400 	 * need.
401 	 */
402 	if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
403 		return false;
404 
405 	/*
406 	 * Similarly, stop spinning if we are no longer the
407 	 * first waiter.
408 	 */
409 	if (waiter && !__mutex_waiter_is_first(lock, waiter))
410 		return false;
411 
412 	return true;
413 }
414 
415 /*
416  * Look out! "owner" is an entirely speculative pointer access and not
417  * reliable.
418  *
419  * "noinline" so that this function shows up on perf profiles.
420  */
421 static noinline
422 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
423 			 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
424 {
425 	bool ret = true;
426 
427 	rcu_read_lock();
428 	while (__mutex_owner(lock) == owner) {
429 		/*
430 		 * Ensure we emit the owner->on_cpu, dereference _after_
431 		 * checking lock->owner still matches owner. If that fails,
432 		 * owner might point to freed memory. If it still matches,
433 		 * the rcu_read_lock() ensures the memory stays valid.
434 		 */
435 		barrier();
436 
437 		/*
438 		 * Use vcpu_is_preempted to detect lock holder preemption issue.
439 		 */
440 		if (!owner->on_cpu || need_resched() ||
441 				vcpu_is_preempted(task_cpu(owner))) {
442 			ret = false;
443 			break;
444 		}
445 
446 		if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
447 			ret = false;
448 			break;
449 		}
450 
451 		cpu_relax();
452 	}
453 	rcu_read_unlock();
454 
455 	return ret;
456 }
457 
458 /*
459  * Initial check for entering the mutex spinning loop
460  */
461 static inline int mutex_can_spin_on_owner(struct mutex *lock)
462 {
463 	struct task_struct *owner;
464 	int retval = 1;
465 
466 	if (need_resched())
467 		return 0;
468 
469 	rcu_read_lock();
470 	owner = __mutex_owner(lock);
471 
472 	/*
473 	 * As lock holder preemption issue, we both skip spinning if task is not
474 	 * on cpu or its cpu is preempted
475 	 */
476 	if (owner)
477 		retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
478 	rcu_read_unlock();
479 
480 	/*
481 	 * If lock->owner is not set, the mutex has been released. Return true
482 	 * such that we'll trylock in the spin path, which is a faster option
483 	 * than the blocking slow path.
484 	 */
485 	return retval;
486 }
487 
488 /*
489  * Optimistic spinning.
490  *
491  * We try to spin for acquisition when we find that the lock owner
492  * is currently running on a (different) CPU and while we don't
493  * need to reschedule. The rationale is that if the lock owner is
494  * running, it is likely to release the lock soon.
495  *
496  * The mutex spinners are queued up using MCS lock so that only one
497  * spinner can compete for the mutex. However, if mutex spinning isn't
498  * going to happen, there is no point in going through the lock/unlock
499  * overhead.
500  *
501  * Returns true when the lock was taken, otherwise false, indicating
502  * that we need to jump to the slowpath and sleep.
503  *
504  * The waiter flag is set to true if the spinner is a waiter in the wait
505  * queue. The waiter-spinner will spin on the lock directly and concurrently
506  * with the spinner at the head of the OSQ, if present, until the owner is
507  * changed to itself.
508  */
509 static __always_inline bool
510 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
511 		      const bool use_ww_ctx, struct mutex_waiter *waiter)
512 {
513 	if (!waiter) {
514 		/*
515 		 * The purpose of the mutex_can_spin_on_owner() function is
516 		 * to eliminate the overhead of osq_lock() and osq_unlock()
517 		 * in case spinning isn't possible. As a waiter-spinner
518 		 * is not going to take OSQ lock anyway, there is no need
519 		 * to call mutex_can_spin_on_owner().
520 		 */
521 		if (!mutex_can_spin_on_owner(lock))
522 			goto fail;
523 
524 		/*
525 		 * In order to avoid a stampede of mutex spinners trying to
526 		 * acquire the mutex all at once, the spinners need to take a
527 		 * MCS (queued) lock first before spinning on the owner field.
528 		 */
529 		if (!osq_lock(&lock->osq))
530 			goto fail;
531 	}
532 
533 	for (;;) {
534 		struct task_struct *owner;
535 
536 		/* Try to acquire the mutex... */
537 		owner = __mutex_trylock_or_owner(lock);
538 		if (!owner)
539 			break;
540 
541 		/*
542 		 * There's an owner, wait for it to either
543 		 * release the lock or go to sleep.
544 		 */
545 		if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
546 			goto fail_unlock;
547 
548 		/*
549 		 * The cpu_relax() call is a compiler barrier which forces
550 		 * everything in this loop to be re-loaded. We don't need
551 		 * memory barriers as we'll eventually observe the right
552 		 * values at the cost of a few extra spins.
553 		 */
554 		cpu_relax();
555 	}
556 
557 	if (!waiter)
558 		osq_unlock(&lock->osq);
559 
560 	return true;
561 
562 
563 fail_unlock:
564 	if (!waiter)
565 		osq_unlock(&lock->osq);
566 
567 fail:
568 	/*
569 	 * If we fell out of the spin path because of need_resched(),
570 	 * reschedule now, before we try-lock the mutex. This avoids getting
571 	 * scheduled out right after we obtained the mutex.
572 	 */
573 	if (need_resched()) {
574 		/*
575 		 * We _should_ have TASK_RUNNING here, but just in case
576 		 * we do not, make it so, otherwise we might get stuck.
577 		 */
578 		__set_current_state(TASK_RUNNING);
579 		schedule_preempt_disabled();
580 	}
581 
582 	return false;
583 }
584 #else
585 static __always_inline bool
586 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
587 		      const bool use_ww_ctx, struct mutex_waiter *waiter)
588 {
589 	return false;
590 }
591 #endif
592 
593 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
594 
595 /**
596  * mutex_unlock - release the mutex
597  * @lock: the mutex to be released
598  *
599  * Unlock a mutex that has been locked by this task previously.
600  *
601  * This function must not be used in interrupt context. Unlocking
602  * of a not locked mutex is not allowed.
603  *
604  * This function is similar to (but not equivalent to) up().
605  */
606 void __sched mutex_unlock(struct mutex *lock)
607 {
608 #ifndef CONFIG_DEBUG_LOCK_ALLOC
609 	if (__mutex_unlock_fast(lock))
610 		return;
611 #endif
612 	__mutex_unlock_slowpath(lock, _RET_IP_);
613 }
614 EXPORT_SYMBOL(mutex_unlock);
615 
616 /**
617  * ww_mutex_unlock - release the w/w mutex
618  * @lock: the mutex to be released
619  *
620  * Unlock a mutex that has been locked by this task previously with any of the
621  * ww_mutex_lock* functions (with or without an acquire context). It is
622  * forbidden to release the locks after releasing the acquire context.
623  *
624  * This function must not be used in interrupt context. Unlocking
625  * of a unlocked mutex is not allowed.
626  */
627 void __sched ww_mutex_unlock(struct ww_mutex *lock)
628 {
629 	/*
630 	 * The unlocking fastpath is the 0->1 transition from 'locked'
631 	 * into 'unlocked' state:
632 	 */
633 	if (lock->ctx) {
634 #ifdef CONFIG_DEBUG_MUTEXES
635 		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
636 #endif
637 		if (lock->ctx->acquired > 0)
638 			lock->ctx->acquired--;
639 		lock->ctx = NULL;
640 	}
641 
642 	mutex_unlock(&lock->base);
643 }
644 EXPORT_SYMBOL(ww_mutex_unlock);
645 
646 static inline int __sched
647 __ww_mutex_lock_check_stamp(struct mutex *lock, struct mutex_waiter *waiter,
648 			    struct ww_acquire_ctx *ctx)
649 {
650 	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
651 	struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
652 	struct mutex_waiter *cur;
653 
654 	if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
655 		goto deadlock;
656 
657 	/*
658 	 * If there is a waiter in front of us that has a context, then its
659 	 * stamp is earlier than ours and we must back off.
660 	 */
661 	cur = waiter;
662 	list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
663 		if (cur->ww_ctx)
664 			goto deadlock;
665 	}
666 
667 	return 0;
668 
669 deadlock:
670 #ifdef CONFIG_DEBUG_MUTEXES
671 	DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
672 	ctx->contending_lock = ww;
673 #endif
674 	return -EDEADLK;
675 }
676 
677 static inline int __sched
678 __ww_mutex_add_waiter(struct mutex_waiter *waiter,
679 		      struct mutex *lock,
680 		      struct ww_acquire_ctx *ww_ctx)
681 {
682 	struct mutex_waiter *cur;
683 	struct list_head *pos;
684 
685 	if (!ww_ctx) {
686 		list_add_tail(&waiter->list, &lock->wait_list);
687 		return 0;
688 	}
689 
690 	/*
691 	 * Add the waiter before the first waiter with a higher stamp.
692 	 * Waiters without a context are skipped to avoid starving
693 	 * them.
694 	 */
695 	pos = &lock->wait_list;
696 	list_for_each_entry_reverse(cur, &lock->wait_list, list) {
697 		if (!cur->ww_ctx)
698 			continue;
699 
700 		if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
701 			/* Back off immediately if necessary. */
702 			if (ww_ctx->acquired > 0) {
703 #ifdef CONFIG_DEBUG_MUTEXES
704 				struct ww_mutex *ww;
705 
706 				ww = container_of(lock, struct ww_mutex, base);
707 				DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
708 				ww_ctx->contending_lock = ww;
709 #endif
710 				return -EDEADLK;
711 			}
712 
713 			break;
714 		}
715 
716 		pos = &cur->list;
717 
718 		/*
719 		 * Wake up the waiter so that it gets a chance to back
720 		 * off.
721 		 */
722 		if (cur->ww_ctx->acquired > 0) {
723 			debug_mutex_wake_waiter(lock, cur);
724 			wake_up_process(cur->task);
725 		}
726 	}
727 
728 	list_add_tail(&waiter->list, pos);
729 	return 0;
730 }
731 
732 /*
733  * Lock a mutex (possibly interruptible), slowpath:
734  */
735 static __always_inline int __sched
736 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
737 		    struct lockdep_map *nest_lock, unsigned long ip,
738 		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
739 {
740 	struct mutex_waiter waiter;
741 	bool first = false;
742 	struct ww_mutex *ww;
743 	int ret;
744 
745 	might_sleep();
746 
747 	ww = container_of(lock, struct ww_mutex, base);
748 	if (use_ww_ctx && ww_ctx) {
749 		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
750 			return -EALREADY;
751 	}
752 
753 	preempt_disable();
754 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
755 
756 	if (__mutex_trylock(lock) ||
757 	    mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
758 		/* got the lock, yay! */
759 		lock_acquired(&lock->dep_map, ip);
760 		if (use_ww_ctx && ww_ctx)
761 			ww_mutex_set_context_fastpath(ww, ww_ctx);
762 		preempt_enable();
763 		return 0;
764 	}
765 
766 	spin_lock(&lock->wait_lock);
767 	/*
768 	 * After waiting to acquire the wait_lock, try again.
769 	 */
770 	if (__mutex_trylock(lock)) {
771 		if (use_ww_ctx && ww_ctx)
772 			__ww_mutex_wakeup_for_backoff(lock, ww_ctx);
773 
774 		goto skip_wait;
775 	}
776 
777 	debug_mutex_lock_common(lock, &waiter);
778 	debug_mutex_add_waiter(lock, &waiter, current);
779 
780 	lock_contended(&lock->dep_map, ip);
781 
782 	if (!use_ww_ctx) {
783 		/* add waiting tasks to the end of the waitqueue (FIFO): */
784 		list_add_tail(&waiter.list, &lock->wait_list);
785 
786 #ifdef CONFIG_DEBUG_MUTEXES
787 		waiter.ww_ctx = MUTEX_POISON_WW_CTX;
788 #endif
789 	} else {
790 		/* Add in stamp order, waking up waiters that must back off. */
791 		ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
792 		if (ret)
793 			goto err_early_backoff;
794 
795 		waiter.ww_ctx = ww_ctx;
796 	}
797 
798 	waiter.task = current;
799 
800 	if (__mutex_waiter_is_first(lock, &waiter))
801 		__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
802 
803 	set_current_state(state);
804 	for (;;) {
805 		/*
806 		 * Once we hold wait_lock, we're serialized against
807 		 * mutex_unlock() handing the lock off to us, do a trylock
808 		 * before testing the error conditions to make sure we pick up
809 		 * the handoff.
810 		 */
811 		if (__mutex_trylock(lock))
812 			goto acquired;
813 
814 		/*
815 		 * Check for signals and wound conditions while holding
816 		 * wait_lock. This ensures the lock cancellation is ordered
817 		 * against mutex_unlock() and wake-ups do not go missing.
818 		 */
819 		if (unlikely(signal_pending_state(state, current))) {
820 			ret = -EINTR;
821 			goto err;
822 		}
823 
824 		if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
825 			ret = __ww_mutex_lock_check_stamp(lock, &waiter, ww_ctx);
826 			if (ret)
827 				goto err;
828 		}
829 
830 		spin_unlock(&lock->wait_lock);
831 		schedule_preempt_disabled();
832 
833 		/*
834 		 * ww_mutex needs to always recheck its position since its waiter
835 		 * list is not FIFO ordered.
836 		 */
837 		if ((use_ww_ctx && ww_ctx) || !first) {
838 			first = __mutex_waiter_is_first(lock, &waiter);
839 			if (first)
840 				__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
841 		}
842 
843 		set_current_state(state);
844 		/*
845 		 * Here we order against unlock; we must either see it change
846 		 * state back to RUNNING and fall through the next schedule(),
847 		 * or we must see its unlock and acquire.
848 		 */
849 		if (__mutex_trylock(lock) ||
850 		    (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
851 			break;
852 
853 		spin_lock(&lock->wait_lock);
854 	}
855 	spin_lock(&lock->wait_lock);
856 acquired:
857 	__set_current_state(TASK_RUNNING);
858 
859 	mutex_remove_waiter(lock, &waiter, current);
860 	if (likely(list_empty(&lock->wait_list)))
861 		__mutex_clear_flag(lock, MUTEX_FLAGS);
862 
863 	debug_mutex_free_waiter(&waiter);
864 
865 skip_wait:
866 	/* got the lock - cleanup and rejoice! */
867 	lock_acquired(&lock->dep_map, ip);
868 
869 	if (use_ww_ctx && ww_ctx)
870 		ww_mutex_set_context_slowpath(ww, ww_ctx);
871 
872 	spin_unlock(&lock->wait_lock);
873 	preempt_enable();
874 	return 0;
875 
876 err:
877 	__set_current_state(TASK_RUNNING);
878 	mutex_remove_waiter(lock, &waiter, current);
879 err_early_backoff:
880 	spin_unlock(&lock->wait_lock);
881 	debug_mutex_free_waiter(&waiter);
882 	mutex_release(&lock->dep_map, 1, ip);
883 	preempt_enable();
884 	return ret;
885 }
886 
887 static int __sched
888 __mutex_lock(struct mutex *lock, long state, unsigned int subclass,
889 	     struct lockdep_map *nest_lock, unsigned long ip)
890 {
891 	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
892 }
893 
894 static int __sched
895 __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
896 		struct lockdep_map *nest_lock, unsigned long ip,
897 		struct ww_acquire_ctx *ww_ctx)
898 {
899 	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
900 }
901 
902 #ifdef CONFIG_DEBUG_LOCK_ALLOC
903 void __sched
904 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
905 {
906 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
907 }
908 
909 EXPORT_SYMBOL_GPL(mutex_lock_nested);
910 
911 void __sched
912 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
913 {
914 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
915 }
916 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
917 
918 int __sched
919 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
920 {
921 	return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
922 }
923 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
924 
925 int __sched
926 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
927 {
928 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
929 }
930 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
931 
932 void __sched
933 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
934 {
935 	int token;
936 
937 	might_sleep();
938 
939 	token = io_schedule_prepare();
940 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
941 			    subclass, NULL, _RET_IP_, NULL, 0);
942 	io_schedule_finish(token);
943 }
944 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
945 
946 static inline int
947 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
948 {
949 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
950 	unsigned tmp;
951 
952 	if (ctx->deadlock_inject_countdown-- == 0) {
953 		tmp = ctx->deadlock_inject_interval;
954 		if (tmp > UINT_MAX/4)
955 			tmp = UINT_MAX;
956 		else
957 			tmp = tmp*2 + tmp + tmp/2;
958 
959 		ctx->deadlock_inject_interval = tmp;
960 		ctx->deadlock_inject_countdown = tmp;
961 		ctx->contending_lock = lock;
962 
963 		ww_mutex_unlock(lock);
964 
965 		return -EDEADLK;
966 	}
967 #endif
968 
969 	return 0;
970 }
971 
972 int __sched
973 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
974 {
975 	int ret;
976 
977 	might_sleep();
978 	ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
979 			       0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
980 			       ctx);
981 	if (!ret && ctx && ctx->acquired > 1)
982 		return ww_mutex_deadlock_injection(lock, ctx);
983 
984 	return ret;
985 }
986 EXPORT_SYMBOL_GPL(ww_mutex_lock);
987 
988 int __sched
989 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
990 {
991 	int ret;
992 
993 	might_sleep();
994 	ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
995 			      0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
996 			      ctx);
997 
998 	if (!ret && ctx && ctx->acquired > 1)
999 		return ww_mutex_deadlock_injection(lock, ctx);
1000 
1001 	return ret;
1002 }
1003 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
1004 
1005 #endif
1006 
1007 /*
1008  * Release the lock, slowpath:
1009  */
1010 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
1011 {
1012 	struct task_struct *next = NULL;
1013 	DEFINE_WAKE_Q(wake_q);
1014 	unsigned long owner;
1015 
1016 	mutex_release(&lock->dep_map, 1, ip);
1017 
1018 	/*
1019 	 * Release the lock before (potentially) taking the spinlock such that
1020 	 * other contenders can get on with things ASAP.
1021 	 *
1022 	 * Except when HANDOFF, in that case we must not clear the owner field,
1023 	 * but instead set it to the top waiter.
1024 	 */
1025 	owner = atomic_long_read(&lock->owner);
1026 	for (;;) {
1027 		unsigned long old;
1028 
1029 #ifdef CONFIG_DEBUG_MUTEXES
1030 		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
1031 		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
1032 #endif
1033 
1034 		if (owner & MUTEX_FLAG_HANDOFF)
1035 			break;
1036 
1037 		old = atomic_long_cmpxchg_release(&lock->owner, owner,
1038 						  __owner_flags(owner));
1039 		if (old == owner) {
1040 			if (owner & MUTEX_FLAG_WAITERS)
1041 				break;
1042 
1043 			return;
1044 		}
1045 
1046 		owner = old;
1047 	}
1048 
1049 	spin_lock(&lock->wait_lock);
1050 	debug_mutex_unlock(lock);
1051 	if (!list_empty(&lock->wait_list)) {
1052 		/* get the first entry from the wait-list: */
1053 		struct mutex_waiter *waiter =
1054 			list_first_entry(&lock->wait_list,
1055 					 struct mutex_waiter, list);
1056 
1057 		next = waiter->task;
1058 
1059 		debug_mutex_wake_waiter(lock, waiter);
1060 		wake_q_add(&wake_q, next);
1061 	}
1062 
1063 	if (owner & MUTEX_FLAG_HANDOFF)
1064 		__mutex_handoff(lock, next);
1065 
1066 	spin_unlock(&lock->wait_lock);
1067 
1068 	wake_up_q(&wake_q);
1069 }
1070 
1071 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1072 /*
1073  * Here come the less common (and hence less performance-critical) APIs:
1074  * mutex_lock_interruptible() and mutex_trylock().
1075  */
1076 static noinline int __sched
1077 __mutex_lock_killable_slowpath(struct mutex *lock);
1078 
1079 static noinline int __sched
1080 __mutex_lock_interruptible_slowpath(struct mutex *lock);
1081 
1082 /**
1083  * mutex_lock_interruptible - acquire the mutex, interruptible
1084  * @lock: the mutex to be acquired
1085  *
1086  * Lock the mutex like mutex_lock(), and return 0 if the mutex has
1087  * been acquired or sleep until the mutex becomes available. If a
1088  * signal arrives while waiting for the lock then this function
1089  * returns -EINTR.
1090  *
1091  * This function is similar to (but not equivalent to) down_interruptible().
1092  */
1093 int __sched mutex_lock_interruptible(struct mutex *lock)
1094 {
1095 	might_sleep();
1096 
1097 	if (__mutex_trylock_fast(lock))
1098 		return 0;
1099 
1100 	return __mutex_lock_interruptible_slowpath(lock);
1101 }
1102 
1103 EXPORT_SYMBOL(mutex_lock_interruptible);
1104 
1105 int __sched mutex_lock_killable(struct mutex *lock)
1106 {
1107 	might_sleep();
1108 
1109 	if (__mutex_trylock_fast(lock))
1110 		return 0;
1111 
1112 	return __mutex_lock_killable_slowpath(lock);
1113 }
1114 EXPORT_SYMBOL(mutex_lock_killable);
1115 
1116 void __sched mutex_lock_io(struct mutex *lock)
1117 {
1118 	int token;
1119 
1120 	token = io_schedule_prepare();
1121 	mutex_lock(lock);
1122 	io_schedule_finish(token);
1123 }
1124 EXPORT_SYMBOL_GPL(mutex_lock_io);
1125 
1126 static noinline void __sched
1127 __mutex_lock_slowpath(struct mutex *lock)
1128 {
1129 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1130 }
1131 
1132 static noinline int __sched
1133 __mutex_lock_killable_slowpath(struct mutex *lock)
1134 {
1135 	return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1136 }
1137 
1138 static noinline int __sched
1139 __mutex_lock_interruptible_slowpath(struct mutex *lock)
1140 {
1141 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1142 }
1143 
1144 static noinline int __sched
1145 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1146 {
1147 	return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
1148 			       _RET_IP_, ctx);
1149 }
1150 
1151 static noinline int __sched
1152 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1153 					    struct ww_acquire_ctx *ctx)
1154 {
1155 	return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
1156 			       _RET_IP_, ctx);
1157 }
1158 
1159 #endif
1160 
1161 /**
1162  * mutex_trylock - try to acquire the mutex, without waiting
1163  * @lock: the mutex to be acquired
1164  *
1165  * Try to acquire the mutex atomically. Returns 1 if the mutex
1166  * has been acquired successfully, and 0 on contention.
1167  *
1168  * NOTE: this function follows the spin_trylock() convention, so
1169  * it is negated from the down_trylock() return values! Be careful
1170  * about this when converting semaphore users to mutexes.
1171  *
1172  * This function must not be used in interrupt context. The
1173  * mutex must be released by the same task that acquired it.
1174  */
1175 int __sched mutex_trylock(struct mutex *lock)
1176 {
1177 	bool locked = __mutex_trylock(lock);
1178 
1179 	if (locked)
1180 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1181 
1182 	return locked;
1183 }
1184 EXPORT_SYMBOL(mutex_trylock);
1185 
1186 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1187 int __sched
1188 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1189 {
1190 	might_sleep();
1191 
1192 	if (__mutex_trylock_fast(&lock->base)) {
1193 		if (ctx)
1194 			ww_mutex_set_context_fastpath(lock, ctx);
1195 		return 0;
1196 	}
1197 
1198 	return __ww_mutex_lock_slowpath(lock, ctx);
1199 }
1200 EXPORT_SYMBOL(ww_mutex_lock);
1201 
1202 int __sched
1203 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1204 {
1205 	might_sleep();
1206 
1207 	if (__mutex_trylock_fast(&lock->base)) {
1208 		if (ctx)
1209 			ww_mutex_set_context_fastpath(lock, ctx);
1210 		return 0;
1211 	}
1212 
1213 	return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1214 }
1215 EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1216 
1217 #endif
1218 
1219 /**
1220  * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1221  * @cnt: the atomic which we are to dec
1222  * @lock: the mutex to return holding if we dec to 0
1223  *
1224  * return true and hold lock if we dec to 0, return false otherwise
1225  */
1226 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1227 {
1228 	/* dec if we can't possibly hit 0 */
1229 	if (atomic_add_unless(cnt, -1, 1))
1230 		return 0;
1231 	/* we might hit 0, so take the lock */
1232 	mutex_lock(lock);
1233 	if (!atomic_dec_and_test(cnt)) {
1234 		/* when we actually did the dec, we didn't hit 0 */
1235 		mutex_unlock(lock);
1236 		return 0;
1237 	}
1238 	/* we hit 0, and we hold the lock */
1239 	return 1;
1240 }
1241 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1242