xref: /openbmc/linux/kernel/locking/mutex.c (revision 174cd4b1)
1 /*
2  * kernel/locking/mutex.c
3  *
4  * Mutexes: blocking mutual exclusion locks
5  *
6  * Started by Ingo Molnar:
7  *
8  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9  *
10  * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11  * David Howells for suggestions and improvements.
12  *
13  *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14  *    from the -rt tree, where it was originally implemented for rtmutexes
15  *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
16  *    and Sven Dietrich.
17  *
18  * Also see Documentation/locking/mutex-design.txt.
19  */
20 #include <linux/mutex.h>
21 #include <linux/ww_mutex.h>
22 #include <linux/sched/signal.h>
23 #include <linux/sched/rt.h>
24 #include <linux/sched/wake_q.h>
25 #include <linux/export.h>
26 #include <linux/spinlock.h>
27 #include <linux/interrupt.h>
28 #include <linux/debug_locks.h>
29 #include <linux/osq_lock.h>
30 
31 #ifdef CONFIG_DEBUG_MUTEXES
32 # include "mutex-debug.h"
33 #else
34 # include "mutex.h"
35 #endif
36 
37 void
38 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
39 {
40 	atomic_long_set(&lock->owner, 0);
41 	spin_lock_init(&lock->wait_lock);
42 	INIT_LIST_HEAD(&lock->wait_list);
43 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
44 	osq_lock_init(&lock->osq);
45 #endif
46 
47 	debug_mutex_init(lock, name, key);
48 }
49 EXPORT_SYMBOL(__mutex_init);
50 
51 /*
52  * @owner: contains: 'struct task_struct *' to the current lock owner,
53  * NULL means not owned. Since task_struct pointers are aligned at
54  * at least L1_CACHE_BYTES, we have low bits to store extra state.
55  *
56  * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
57  * Bit1 indicates unlock needs to hand the lock to the top-waiter
58  * Bit2 indicates handoff has been done and we're waiting for pickup.
59  */
60 #define MUTEX_FLAG_WAITERS	0x01
61 #define MUTEX_FLAG_HANDOFF	0x02
62 #define MUTEX_FLAG_PICKUP	0x04
63 
64 #define MUTEX_FLAGS		0x07
65 
66 static inline struct task_struct *__owner_task(unsigned long owner)
67 {
68 	return (struct task_struct *)(owner & ~MUTEX_FLAGS);
69 }
70 
71 static inline unsigned long __owner_flags(unsigned long owner)
72 {
73 	return owner & MUTEX_FLAGS;
74 }
75 
76 /*
77  * Trylock variant that retuns the owning task on failure.
78  */
79 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
80 {
81 	unsigned long owner, curr = (unsigned long)current;
82 
83 	owner = atomic_long_read(&lock->owner);
84 	for (;;) { /* must loop, can race against a flag */
85 		unsigned long old, flags = __owner_flags(owner);
86 		unsigned long task = owner & ~MUTEX_FLAGS;
87 
88 		if (task) {
89 			if (likely(task != curr))
90 				break;
91 
92 			if (likely(!(flags & MUTEX_FLAG_PICKUP)))
93 				break;
94 
95 			flags &= ~MUTEX_FLAG_PICKUP;
96 		} else {
97 #ifdef CONFIG_DEBUG_MUTEXES
98 			DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
99 #endif
100 		}
101 
102 		/*
103 		 * We set the HANDOFF bit, we must make sure it doesn't live
104 		 * past the point where we acquire it. This would be possible
105 		 * if we (accidentally) set the bit on an unlocked mutex.
106 		 */
107 		flags &= ~MUTEX_FLAG_HANDOFF;
108 
109 		old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
110 		if (old == owner)
111 			return NULL;
112 
113 		owner = old;
114 	}
115 
116 	return __owner_task(owner);
117 }
118 
119 /*
120  * Actual trylock that will work on any unlocked state.
121  */
122 static inline bool __mutex_trylock(struct mutex *lock)
123 {
124 	return !__mutex_trylock_or_owner(lock);
125 }
126 
127 #ifndef CONFIG_DEBUG_LOCK_ALLOC
128 /*
129  * Lockdep annotations are contained to the slow paths for simplicity.
130  * There is nothing that would stop spreading the lockdep annotations outwards
131  * except more code.
132  */
133 
134 /*
135  * Optimistic trylock that only works in the uncontended case. Make sure to
136  * follow with a __mutex_trylock() before failing.
137  */
138 static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
139 {
140 	unsigned long curr = (unsigned long)current;
141 
142 	if (!atomic_long_cmpxchg_acquire(&lock->owner, 0UL, curr))
143 		return true;
144 
145 	return false;
146 }
147 
148 static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
149 {
150 	unsigned long curr = (unsigned long)current;
151 
152 	if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
153 		return true;
154 
155 	return false;
156 }
157 #endif
158 
159 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
160 {
161 	atomic_long_or(flag, &lock->owner);
162 }
163 
164 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
165 {
166 	atomic_long_andnot(flag, &lock->owner);
167 }
168 
169 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
170 {
171 	return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
172 }
173 
174 /*
175  * Give up ownership to a specific task, when @task = NULL, this is equivalent
176  * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
177  * WAITERS. Provides RELEASE semantics like a regular unlock, the
178  * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
179  */
180 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
181 {
182 	unsigned long owner = atomic_long_read(&lock->owner);
183 
184 	for (;;) {
185 		unsigned long old, new;
186 
187 #ifdef CONFIG_DEBUG_MUTEXES
188 		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
189 		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
190 #endif
191 
192 		new = (owner & MUTEX_FLAG_WAITERS);
193 		new |= (unsigned long)task;
194 		if (task)
195 			new |= MUTEX_FLAG_PICKUP;
196 
197 		old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
198 		if (old == owner)
199 			break;
200 
201 		owner = old;
202 	}
203 }
204 
205 #ifndef CONFIG_DEBUG_LOCK_ALLOC
206 /*
207  * We split the mutex lock/unlock logic into separate fastpath and
208  * slowpath functions, to reduce the register pressure on the fastpath.
209  * We also put the fastpath first in the kernel image, to make sure the
210  * branch is predicted by the CPU as default-untaken.
211  */
212 static void __sched __mutex_lock_slowpath(struct mutex *lock);
213 
214 /**
215  * mutex_lock - acquire the mutex
216  * @lock: the mutex to be acquired
217  *
218  * Lock the mutex exclusively for this task. If the mutex is not
219  * available right now, it will sleep until it can get it.
220  *
221  * The mutex must later on be released by the same task that
222  * acquired it. Recursive locking is not allowed. The task
223  * may not exit without first unlocking the mutex. Also, kernel
224  * memory where the mutex resides must not be freed with
225  * the mutex still locked. The mutex must first be initialized
226  * (or statically defined) before it can be locked. memset()-ing
227  * the mutex to 0 is not allowed.
228  *
229  * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
230  *   checks that will enforce the restrictions and will also do
231  *   deadlock debugging. )
232  *
233  * This function is similar to (but not equivalent to) down().
234  */
235 void __sched mutex_lock(struct mutex *lock)
236 {
237 	might_sleep();
238 
239 	if (!__mutex_trylock_fast(lock))
240 		__mutex_lock_slowpath(lock);
241 }
242 EXPORT_SYMBOL(mutex_lock);
243 #endif
244 
245 static __always_inline void
246 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
247 {
248 #ifdef CONFIG_DEBUG_MUTEXES
249 	/*
250 	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
251 	 * but released with a normal mutex_unlock in this call.
252 	 *
253 	 * This should never happen, always use ww_mutex_unlock.
254 	 */
255 	DEBUG_LOCKS_WARN_ON(ww->ctx);
256 
257 	/*
258 	 * Not quite done after calling ww_acquire_done() ?
259 	 */
260 	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
261 
262 	if (ww_ctx->contending_lock) {
263 		/*
264 		 * After -EDEADLK you tried to
265 		 * acquire a different ww_mutex? Bad!
266 		 */
267 		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
268 
269 		/*
270 		 * You called ww_mutex_lock after receiving -EDEADLK,
271 		 * but 'forgot' to unlock everything else first?
272 		 */
273 		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
274 		ww_ctx->contending_lock = NULL;
275 	}
276 
277 	/*
278 	 * Naughty, using a different class will lead to undefined behavior!
279 	 */
280 	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
281 #endif
282 	ww_ctx->acquired++;
283 }
284 
285 static inline bool __sched
286 __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
287 {
288 	return a->stamp - b->stamp <= LONG_MAX &&
289 	       (a->stamp != b->stamp || a > b);
290 }
291 
292 /*
293  * Wake up any waiters that may have to back off when the lock is held by the
294  * given context.
295  *
296  * Due to the invariants on the wait list, this can only affect the first
297  * waiter with a context.
298  *
299  * The current task must not be on the wait list.
300  */
301 static void __sched
302 __ww_mutex_wakeup_for_backoff(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
303 {
304 	struct mutex_waiter *cur;
305 
306 	lockdep_assert_held(&lock->wait_lock);
307 
308 	list_for_each_entry(cur, &lock->wait_list, list) {
309 		if (!cur->ww_ctx)
310 			continue;
311 
312 		if (cur->ww_ctx->acquired > 0 &&
313 		    __ww_ctx_stamp_after(cur->ww_ctx, ww_ctx)) {
314 			debug_mutex_wake_waiter(lock, cur);
315 			wake_up_process(cur->task);
316 		}
317 
318 		break;
319 	}
320 }
321 
322 /*
323  * After acquiring lock with fastpath or when we lost out in contested
324  * slowpath, set ctx and wake up any waiters so they can recheck.
325  */
326 static __always_inline void
327 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
328 {
329 	ww_mutex_lock_acquired(lock, ctx);
330 
331 	lock->ctx = ctx;
332 
333 	/*
334 	 * The lock->ctx update should be visible on all cores before
335 	 * the atomic read is done, otherwise contended waiters might be
336 	 * missed. The contended waiters will either see ww_ctx == NULL
337 	 * and keep spinning, or it will acquire wait_lock, add itself
338 	 * to waiter list and sleep.
339 	 */
340 	smp_mb(); /* ^^^ */
341 
342 	/*
343 	 * Check if lock is contended, if not there is nobody to wake up
344 	 */
345 	if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
346 		return;
347 
348 	/*
349 	 * Uh oh, we raced in fastpath, wake up everyone in this case,
350 	 * so they can see the new lock->ctx.
351 	 */
352 	spin_lock(&lock->base.wait_lock);
353 	__ww_mutex_wakeup_for_backoff(&lock->base, ctx);
354 	spin_unlock(&lock->base.wait_lock);
355 }
356 
357 /*
358  * After acquiring lock in the slowpath set ctx.
359  *
360  * Unlike for the fast path, the caller ensures that waiters are woken up where
361  * necessary.
362  *
363  * Callers must hold the mutex wait_lock.
364  */
365 static __always_inline void
366 ww_mutex_set_context_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
367 {
368 	ww_mutex_lock_acquired(lock, ctx);
369 	lock->ctx = ctx;
370 }
371 
372 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
373 
374 static inline
375 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
376 			    struct mutex_waiter *waiter)
377 {
378 	struct ww_mutex *ww;
379 
380 	ww = container_of(lock, struct ww_mutex, base);
381 
382 	/*
383 	 * If ww->ctx is set the contents are undefined, only
384 	 * by acquiring wait_lock there is a guarantee that
385 	 * they are not invalid when reading.
386 	 *
387 	 * As such, when deadlock detection needs to be
388 	 * performed the optimistic spinning cannot be done.
389 	 *
390 	 * Check this in every inner iteration because we may
391 	 * be racing against another thread's ww_mutex_lock.
392 	 */
393 	if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
394 		return false;
395 
396 	/*
397 	 * If we aren't on the wait list yet, cancel the spin
398 	 * if there are waiters. We want  to avoid stealing the
399 	 * lock from a waiter with an earlier stamp, since the
400 	 * other thread may already own a lock that we also
401 	 * need.
402 	 */
403 	if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
404 		return false;
405 
406 	/*
407 	 * Similarly, stop spinning if we are no longer the
408 	 * first waiter.
409 	 */
410 	if (waiter && !__mutex_waiter_is_first(lock, waiter))
411 		return false;
412 
413 	return true;
414 }
415 
416 /*
417  * Look out! "owner" is an entirely speculative pointer access and not
418  * reliable.
419  *
420  * "noinline" so that this function shows up on perf profiles.
421  */
422 static noinline
423 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
424 			 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
425 {
426 	bool ret = true;
427 
428 	rcu_read_lock();
429 	while (__mutex_owner(lock) == owner) {
430 		/*
431 		 * Ensure we emit the owner->on_cpu, dereference _after_
432 		 * checking lock->owner still matches owner. If that fails,
433 		 * owner might point to freed memory. If it still matches,
434 		 * the rcu_read_lock() ensures the memory stays valid.
435 		 */
436 		barrier();
437 
438 		/*
439 		 * Use vcpu_is_preempted to detect lock holder preemption issue.
440 		 */
441 		if (!owner->on_cpu || need_resched() ||
442 				vcpu_is_preempted(task_cpu(owner))) {
443 			ret = false;
444 			break;
445 		}
446 
447 		if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
448 			ret = false;
449 			break;
450 		}
451 
452 		cpu_relax();
453 	}
454 	rcu_read_unlock();
455 
456 	return ret;
457 }
458 
459 /*
460  * Initial check for entering the mutex spinning loop
461  */
462 static inline int mutex_can_spin_on_owner(struct mutex *lock)
463 {
464 	struct task_struct *owner;
465 	int retval = 1;
466 
467 	if (need_resched())
468 		return 0;
469 
470 	rcu_read_lock();
471 	owner = __mutex_owner(lock);
472 
473 	/*
474 	 * As lock holder preemption issue, we both skip spinning if task is not
475 	 * on cpu or its cpu is preempted
476 	 */
477 	if (owner)
478 		retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
479 	rcu_read_unlock();
480 
481 	/*
482 	 * If lock->owner is not set, the mutex has been released. Return true
483 	 * such that we'll trylock in the spin path, which is a faster option
484 	 * than the blocking slow path.
485 	 */
486 	return retval;
487 }
488 
489 /*
490  * Optimistic spinning.
491  *
492  * We try to spin for acquisition when we find that the lock owner
493  * is currently running on a (different) CPU and while we don't
494  * need to reschedule. The rationale is that if the lock owner is
495  * running, it is likely to release the lock soon.
496  *
497  * The mutex spinners are queued up using MCS lock so that only one
498  * spinner can compete for the mutex. However, if mutex spinning isn't
499  * going to happen, there is no point in going through the lock/unlock
500  * overhead.
501  *
502  * Returns true when the lock was taken, otherwise false, indicating
503  * that we need to jump to the slowpath and sleep.
504  *
505  * The waiter flag is set to true if the spinner is a waiter in the wait
506  * queue. The waiter-spinner will spin on the lock directly and concurrently
507  * with the spinner at the head of the OSQ, if present, until the owner is
508  * changed to itself.
509  */
510 static __always_inline bool
511 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
512 		      const bool use_ww_ctx, struct mutex_waiter *waiter)
513 {
514 	if (!waiter) {
515 		/*
516 		 * The purpose of the mutex_can_spin_on_owner() function is
517 		 * to eliminate the overhead of osq_lock() and osq_unlock()
518 		 * in case spinning isn't possible. As a waiter-spinner
519 		 * is not going to take OSQ lock anyway, there is no need
520 		 * to call mutex_can_spin_on_owner().
521 		 */
522 		if (!mutex_can_spin_on_owner(lock))
523 			goto fail;
524 
525 		/*
526 		 * In order to avoid a stampede of mutex spinners trying to
527 		 * acquire the mutex all at once, the spinners need to take a
528 		 * MCS (queued) lock first before spinning on the owner field.
529 		 */
530 		if (!osq_lock(&lock->osq))
531 			goto fail;
532 	}
533 
534 	for (;;) {
535 		struct task_struct *owner;
536 
537 		/* Try to acquire the mutex... */
538 		owner = __mutex_trylock_or_owner(lock);
539 		if (!owner)
540 			break;
541 
542 		/*
543 		 * There's an owner, wait for it to either
544 		 * release the lock or go to sleep.
545 		 */
546 		if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
547 			goto fail_unlock;
548 
549 		/*
550 		 * The cpu_relax() call is a compiler barrier which forces
551 		 * everything in this loop to be re-loaded. We don't need
552 		 * memory barriers as we'll eventually observe the right
553 		 * values at the cost of a few extra spins.
554 		 */
555 		cpu_relax();
556 	}
557 
558 	if (!waiter)
559 		osq_unlock(&lock->osq);
560 
561 	return true;
562 
563 
564 fail_unlock:
565 	if (!waiter)
566 		osq_unlock(&lock->osq);
567 
568 fail:
569 	/*
570 	 * If we fell out of the spin path because of need_resched(),
571 	 * reschedule now, before we try-lock the mutex. This avoids getting
572 	 * scheduled out right after we obtained the mutex.
573 	 */
574 	if (need_resched()) {
575 		/*
576 		 * We _should_ have TASK_RUNNING here, but just in case
577 		 * we do not, make it so, otherwise we might get stuck.
578 		 */
579 		__set_current_state(TASK_RUNNING);
580 		schedule_preempt_disabled();
581 	}
582 
583 	return false;
584 }
585 #else
586 static __always_inline bool
587 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
588 		      const bool use_ww_ctx, struct mutex_waiter *waiter)
589 {
590 	return false;
591 }
592 #endif
593 
594 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
595 
596 /**
597  * mutex_unlock - release the mutex
598  * @lock: the mutex to be released
599  *
600  * Unlock a mutex that has been locked by this task previously.
601  *
602  * This function must not be used in interrupt context. Unlocking
603  * of a not locked mutex is not allowed.
604  *
605  * This function is similar to (but not equivalent to) up().
606  */
607 void __sched mutex_unlock(struct mutex *lock)
608 {
609 #ifndef CONFIG_DEBUG_LOCK_ALLOC
610 	if (__mutex_unlock_fast(lock))
611 		return;
612 #endif
613 	__mutex_unlock_slowpath(lock, _RET_IP_);
614 }
615 EXPORT_SYMBOL(mutex_unlock);
616 
617 /**
618  * ww_mutex_unlock - release the w/w mutex
619  * @lock: the mutex to be released
620  *
621  * Unlock a mutex that has been locked by this task previously with any of the
622  * ww_mutex_lock* functions (with or without an acquire context). It is
623  * forbidden to release the locks after releasing the acquire context.
624  *
625  * This function must not be used in interrupt context. Unlocking
626  * of a unlocked mutex is not allowed.
627  */
628 void __sched ww_mutex_unlock(struct ww_mutex *lock)
629 {
630 	/*
631 	 * The unlocking fastpath is the 0->1 transition from 'locked'
632 	 * into 'unlocked' state:
633 	 */
634 	if (lock->ctx) {
635 #ifdef CONFIG_DEBUG_MUTEXES
636 		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
637 #endif
638 		if (lock->ctx->acquired > 0)
639 			lock->ctx->acquired--;
640 		lock->ctx = NULL;
641 	}
642 
643 	mutex_unlock(&lock->base);
644 }
645 EXPORT_SYMBOL(ww_mutex_unlock);
646 
647 static inline int __sched
648 __ww_mutex_lock_check_stamp(struct mutex *lock, struct mutex_waiter *waiter,
649 			    struct ww_acquire_ctx *ctx)
650 {
651 	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
652 	struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
653 	struct mutex_waiter *cur;
654 
655 	if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
656 		goto deadlock;
657 
658 	/*
659 	 * If there is a waiter in front of us that has a context, then its
660 	 * stamp is earlier than ours and we must back off.
661 	 */
662 	cur = waiter;
663 	list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
664 		if (cur->ww_ctx)
665 			goto deadlock;
666 	}
667 
668 	return 0;
669 
670 deadlock:
671 #ifdef CONFIG_DEBUG_MUTEXES
672 	DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
673 	ctx->contending_lock = ww;
674 #endif
675 	return -EDEADLK;
676 }
677 
678 static inline int __sched
679 __ww_mutex_add_waiter(struct mutex_waiter *waiter,
680 		      struct mutex *lock,
681 		      struct ww_acquire_ctx *ww_ctx)
682 {
683 	struct mutex_waiter *cur;
684 	struct list_head *pos;
685 
686 	if (!ww_ctx) {
687 		list_add_tail(&waiter->list, &lock->wait_list);
688 		return 0;
689 	}
690 
691 	/*
692 	 * Add the waiter before the first waiter with a higher stamp.
693 	 * Waiters without a context are skipped to avoid starving
694 	 * them.
695 	 */
696 	pos = &lock->wait_list;
697 	list_for_each_entry_reverse(cur, &lock->wait_list, list) {
698 		if (!cur->ww_ctx)
699 			continue;
700 
701 		if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
702 			/* Back off immediately if necessary. */
703 			if (ww_ctx->acquired > 0) {
704 #ifdef CONFIG_DEBUG_MUTEXES
705 				struct ww_mutex *ww;
706 
707 				ww = container_of(lock, struct ww_mutex, base);
708 				DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
709 				ww_ctx->contending_lock = ww;
710 #endif
711 				return -EDEADLK;
712 			}
713 
714 			break;
715 		}
716 
717 		pos = &cur->list;
718 
719 		/*
720 		 * Wake up the waiter so that it gets a chance to back
721 		 * off.
722 		 */
723 		if (cur->ww_ctx->acquired > 0) {
724 			debug_mutex_wake_waiter(lock, cur);
725 			wake_up_process(cur->task);
726 		}
727 	}
728 
729 	list_add_tail(&waiter->list, pos);
730 	return 0;
731 }
732 
733 /*
734  * Lock a mutex (possibly interruptible), slowpath:
735  */
736 static __always_inline int __sched
737 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
738 		    struct lockdep_map *nest_lock, unsigned long ip,
739 		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
740 {
741 	struct mutex_waiter waiter;
742 	bool first = false;
743 	struct ww_mutex *ww;
744 	int ret;
745 
746 	might_sleep();
747 
748 	ww = container_of(lock, struct ww_mutex, base);
749 	if (use_ww_ctx && ww_ctx) {
750 		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
751 			return -EALREADY;
752 	}
753 
754 	preempt_disable();
755 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
756 
757 	if (__mutex_trylock(lock) ||
758 	    mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
759 		/* got the lock, yay! */
760 		lock_acquired(&lock->dep_map, ip);
761 		if (use_ww_ctx && ww_ctx)
762 			ww_mutex_set_context_fastpath(ww, ww_ctx);
763 		preempt_enable();
764 		return 0;
765 	}
766 
767 	spin_lock(&lock->wait_lock);
768 	/*
769 	 * After waiting to acquire the wait_lock, try again.
770 	 */
771 	if (__mutex_trylock(lock)) {
772 		if (use_ww_ctx && ww_ctx)
773 			__ww_mutex_wakeup_for_backoff(lock, ww_ctx);
774 
775 		goto skip_wait;
776 	}
777 
778 	debug_mutex_lock_common(lock, &waiter);
779 	debug_mutex_add_waiter(lock, &waiter, current);
780 
781 	lock_contended(&lock->dep_map, ip);
782 
783 	if (!use_ww_ctx) {
784 		/* add waiting tasks to the end of the waitqueue (FIFO): */
785 		list_add_tail(&waiter.list, &lock->wait_list);
786 
787 #ifdef CONFIG_DEBUG_MUTEXES
788 		waiter.ww_ctx = MUTEX_POISON_WW_CTX;
789 #endif
790 	} else {
791 		/* Add in stamp order, waking up waiters that must back off. */
792 		ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
793 		if (ret)
794 			goto err_early_backoff;
795 
796 		waiter.ww_ctx = ww_ctx;
797 	}
798 
799 	waiter.task = current;
800 
801 	if (__mutex_waiter_is_first(lock, &waiter))
802 		__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
803 
804 	set_current_state(state);
805 	for (;;) {
806 		/*
807 		 * Once we hold wait_lock, we're serialized against
808 		 * mutex_unlock() handing the lock off to us, do a trylock
809 		 * before testing the error conditions to make sure we pick up
810 		 * the handoff.
811 		 */
812 		if (__mutex_trylock(lock))
813 			goto acquired;
814 
815 		/*
816 		 * Check for signals and wound conditions while holding
817 		 * wait_lock. This ensures the lock cancellation is ordered
818 		 * against mutex_unlock() and wake-ups do not go missing.
819 		 */
820 		if (unlikely(signal_pending_state(state, current))) {
821 			ret = -EINTR;
822 			goto err;
823 		}
824 
825 		if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
826 			ret = __ww_mutex_lock_check_stamp(lock, &waiter, ww_ctx);
827 			if (ret)
828 				goto err;
829 		}
830 
831 		spin_unlock(&lock->wait_lock);
832 		schedule_preempt_disabled();
833 
834 		/*
835 		 * ww_mutex needs to always recheck its position since its waiter
836 		 * list is not FIFO ordered.
837 		 */
838 		if ((use_ww_ctx && ww_ctx) || !first) {
839 			first = __mutex_waiter_is_first(lock, &waiter);
840 			if (first)
841 				__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
842 		}
843 
844 		set_current_state(state);
845 		/*
846 		 * Here we order against unlock; we must either see it change
847 		 * state back to RUNNING and fall through the next schedule(),
848 		 * or we must see its unlock and acquire.
849 		 */
850 		if (__mutex_trylock(lock) ||
851 		    (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
852 			break;
853 
854 		spin_lock(&lock->wait_lock);
855 	}
856 	spin_lock(&lock->wait_lock);
857 acquired:
858 	__set_current_state(TASK_RUNNING);
859 
860 	mutex_remove_waiter(lock, &waiter, current);
861 	if (likely(list_empty(&lock->wait_list)))
862 		__mutex_clear_flag(lock, MUTEX_FLAGS);
863 
864 	debug_mutex_free_waiter(&waiter);
865 
866 skip_wait:
867 	/* got the lock - cleanup and rejoice! */
868 	lock_acquired(&lock->dep_map, ip);
869 
870 	if (use_ww_ctx && ww_ctx)
871 		ww_mutex_set_context_slowpath(ww, ww_ctx);
872 
873 	spin_unlock(&lock->wait_lock);
874 	preempt_enable();
875 	return 0;
876 
877 err:
878 	__set_current_state(TASK_RUNNING);
879 	mutex_remove_waiter(lock, &waiter, current);
880 err_early_backoff:
881 	spin_unlock(&lock->wait_lock);
882 	debug_mutex_free_waiter(&waiter);
883 	mutex_release(&lock->dep_map, 1, ip);
884 	preempt_enable();
885 	return ret;
886 }
887 
888 static int __sched
889 __mutex_lock(struct mutex *lock, long state, unsigned int subclass,
890 	     struct lockdep_map *nest_lock, unsigned long ip)
891 {
892 	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
893 }
894 
895 static int __sched
896 __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
897 		struct lockdep_map *nest_lock, unsigned long ip,
898 		struct ww_acquire_ctx *ww_ctx)
899 {
900 	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
901 }
902 
903 #ifdef CONFIG_DEBUG_LOCK_ALLOC
904 void __sched
905 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
906 {
907 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
908 }
909 
910 EXPORT_SYMBOL_GPL(mutex_lock_nested);
911 
912 void __sched
913 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
914 {
915 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
916 }
917 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
918 
919 int __sched
920 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
921 {
922 	return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
923 }
924 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
925 
926 int __sched
927 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
928 {
929 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
930 }
931 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
932 
933 void __sched
934 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
935 {
936 	int token;
937 
938 	might_sleep();
939 
940 	token = io_schedule_prepare();
941 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
942 			    subclass, NULL, _RET_IP_, NULL, 0);
943 	io_schedule_finish(token);
944 }
945 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
946 
947 static inline int
948 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
949 {
950 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
951 	unsigned tmp;
952 
953 	if (ctx->deadlock_inject_countdown-- == 0) {
954 		tmp = ctx->deadlock_inject_interval;
955 		if (tmp > UINT_MAX/4)
956 			tmp = UINT_MAX;
957 		else
958 			tmp = tmp*2 + tmp + tmp/2;
959 
960 		ctx->deadlock_inject_interval = tmp;
961 		ctx->deadlock_inject_countdown = tmp;
962 		ctx->contending_lock = lock;
963 
964 		ww_mutex_unlock(lock);
965 
966 		return -EDEADLK;
967 	}
968 #endif
969 
970 	return 0;
971 }
972 
973 int __sched
974 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
975 {
976 	int ret;
977 
978 	might_sleep();
979 	ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
980 			       0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
981 			       ctx);
982 	if (!ret && ctx && ctx->acquired > 1)
983 		return ww_mutex_deadlock_injection(lock, ctx);
984 
985 	return ret;
986 }
987 EXPORT_SYMBOL_GPL(ww_mutex_lock);
988 
989 int __sched
990 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
991 {
992 	int ret;
993 
994 	might_sleep();
995 	ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
996 			      0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
997 			      ctx);
998 
999 	if (!ret && ctx && ctx->acquired > 1)
1000 		return ww_mutex_deadlock_injection(lock, ctx);
1001 
1002 	return ret;
1003 }
1004 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
1005 
1006 #endif
1007 
1008 /*
1009  * Release the lock, slowpath:
1010  */
1011 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
1012 {
1013 	struct task_struct *next = NULL;
1014 	DEFINE_WAKE_Q(wake_q);
1015 	unsigned long owner;
1016 
1017 	mutex_release(&lock->dep_map, 1, ip);
1018 
1019 	/*
1020 	 * Release the lock before (potentially) taking the spinlock such that
1021 	 * other contenders can get on with things ASAP.
1022 	 *
1023 	 * Except when HANDOFF, in that case we must not clear the owner field,
1024 	 * but instead set it to the top waiter.
1025 	 */
1026 	owner = atomic_long_read(&lock->owner);
1027 	for (;;) {
1028 		unsigned long old;
1029 
1030 #ifdef CONFIG_DEBUG_MUTEXES
1031 		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
1032 		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
1033 #endif
1034 
1035 		if (owner & MUTEX_FLAG_HANDOFF)
1036 			break;
1037 
1038 		old = atomic_long_cmpxchg_release(&lock->owner, owner,
1039 						  __owner_flags(owner));
1040 		if (old == owner) {
1041 			if (owner & MUTEX_FLAG_WAITERS)
1042 				break;
1043 
1044 			return;
1045 		}
1046 
1047 		owner = old;
1048 	}
1049 
1050 	spin_lock(&lock->wait_lock);
1051 	debug_mutex_unlock(lock);
1052 	if (!list_empty(&lock->wait_list)) {
1053 		/* get the first entry from the wait-list: */
1054 		struct mutex_waiter *waiter =
1055 			list_first_entry(&lock->wait_list,
1056 					 struct mutex_waiter, list);
1057 
1058 		next = waiter->task;
1059 
1060 		debug_mutex_wake_waiter(lock, waiter);
1061 		wake_q_add(&wake_q, next);
1062 	}
1063 
1064 	if (owner & MUTEX_FLAG_HANDOFF)
1065 		__mutex_handoff(lock, next);
1066 
1067 	spin_unlock(&lock->wait_lock);
1068 
1069 	wake_up_q(&wake_q);
1070 }
1071 
1072 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1073 /*
1074  * Here come the less common (and hence less performance-critical) APIs:
1075  * mutex_lock_interruptible() and mutex_trylock().
1076  */
1077 static noinline int __sched
1078 __mutex_lock_killable_slowpath(struct mutex *lock);
1079 
1080 static noinline int __sched
1081 __mutex_lock_interruptible_slowpath(struct mutex *lock);
1082 
1083 /**
1084  * mutex_lock_interruptible - acquire the mutex, interruptible
1085  * @lock: the mutex to be acquired
1086  *
1087  * Lock the mutex like mutex_lock(), and return 0 if the mutex has
1088  * been acquired or sleep until the mutex becomes available. If a
1089  * signal arrives while waiting for the lock then this function
1090  * returns -EINTR.
1091  *
1092  * This function is similar to (but not equivalent to) down_interruptible().
1093  */
1094 int __sched mutex_lock_interruptible(struct mutex *lock)
1095 {
1096 	might_sleep();
1097 
1098 	if (__mutex_trylock_fast(lock))
1099 		return 0;
1100 
1101 	return __mutex_lock_interruptible_slowpath(lock);
1102 }
1103 
1104 EXPORT_SYMBOL(mutex_lock_interruptible);
1105 
1106 int __sched mutex_lock_killable(struct mutex *lock)
1107 {
1108 	might_sleep();
1109 
1110 	if (__mutex_trylock_fast(lock))
1111 		return 0;
1112 
1113 	return __mutex_lock_killable_slowpath(lock);
1114 }
1115 EXPORT_SYMBOL(mutex_lock_killable);
1116 
1117 void __sched mutex_lock_io(struct mutex *lock)
1118 {
1119 	int token;
1120 
1121 	token = io_schedule_prepare();
1122 	mutex_lock(lock);
1123 	io_schedule_finish(token);
1124 }
1125 EXPORT_SYMBOL_GPL(mutex_lock_io);
1126 
1127 static noinline void __sched
1128 __mutex_lock_slowpath(struct mutex *lock)
1129 {
1130 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1131 }
1132 
1133 static noinline int __sched
1134 __mutex_lock_killable_slowpath(struct mutex *lock)
1135 {
1136 	return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1137 }
1138 
1139 static noinline int __sched
1140 __mutex_lock_interruptible_slowpath(struct mutex *lock)
1141 {
1142 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1143 }
1144 
1145 static noinline int __sched
1146 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1147 {
1148 	return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
1149 			       _RET_IP_, ctx);
1150 }
1151 
1152 static noinline int __sched
1153 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1154 					    struct ww_acquire_ctx *ctx)
1155 {
1156 	return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
1157 			       _RET_IP_, ctx);
1158 }
1159 
1160 #endif
1161 
1162 /**
1163  * mutex_trylock - try to acquire the mutex, without waiting
1164  * @lock: the mutex to be acquired
1165  *
1166  * Try to acquire the mutex atomically. Returns 1 if the mutex
1167  * has been acquired successfully, and 0 on contention.
1168  *
1169  * NOTE: this function follows the spin_trylock() convention, so
1170  * it is negated from the down_trylock() return values! Be careful
1171  * about this when converting semaphore users to mutexes.
1172  *
1173  * This function must not be used in interrupt context. The
1174  * mutex must be released by the same task that acquired it.
1175  */
1176 int __sched mutex_trylock(struct mutex *lock)
1177 {
1178 	bool locked = __mutex_trylock(lock);
1179 
1180 	if (locked)
1181 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1182 
1183 	return locked;
1184 }
1185 EXPORT_SYMBOL(mutex_trylock);
1186 
1187 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1188 int __sched
1189 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1190 {
1191 	might_sleep();
1192 
1193 	if (__mutex_trylock_fast(&lock->base)) {
1194 		if (ctx)
1195 			ww_mutex_set_context_fastpath(lock, ctx);
1196 		return 0;
1197 	}
1198 
1199 	return __ww_mutex_lock_slowpath(lock, ctx);
1200 }
1201 EXPORT_SYMBOL(ww_mutex_lock);
1202 
1203 int __sched
1204 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1205 {
1206 	might_sleep();
1207 
1208 	if (__mutex_trylock_fast(&lock->base)) {
1209 		if (ctx)
1210 			ww_mutex_set_context_fastpath(lock, ctx);
1211 		return 0;
1212 	}
1213 
1214 	return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1215 }
1216 EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1217 
1218 #endif
1219 
1220 /**
1221  * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1222  * @cnt: the atomic which we are to dec
1223  * @lock: the mutex to return holding if we dec to 0
1224  *
1225  * return true and hold lock if we dec to 0, return false otherwise
1226  */
1227 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1228 {
1229 	/* dec if we can't possibly hit 0 */
1230 	if (atomic_add_unless(cnt, -1, 1))
1231 		return 0;
1232 	/* we might hit 0, so take the lock */
1233 	mutex_lock(lock);
1234 	if (!atomic_dec_and_test(cnt)) {
1235 		/* when we actually did the dec, we didn't hit 0 */
1236 		mutex_unlock(lock);
1237 		return 0;
1238 	}
1239 	/* we hit 0, and we hold the lock */
1240 	return 1;
1241 }
1242 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1243