xref: /openbmc/linux/kernel/locking/mutex.c (revision c8f14e2b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kernel/locking/mutex.c
4  *
5  * Mutexes: blocking mutual exclusion locks
6  *
7  * Started by Ingo Molnar:
8  *
9  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10  *
11  * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
12  * David Howells for suggestions and improvements.
13  *
14  *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15  *    from the -rt tree, where it was originally implemented for rtmutexes
16  *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
17  *    and Sven Dietrich.
18  *
19  * Also see Documentation/locking/mutex-design.rst.
20  */
21 #include <linux/mutex.h>
22 #include <linux/ww_mutex.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/rt.h>
25 #include <linux/sched/wake_q.h>
26 #include <linux/sched/debug.h>
27 #include <linux/export.h>
28 #include <linux/spinlock.h>
29 #include <linux/interrupt.h>
30 #include <linux/debug_locks.h>
31 #include <linux/osq_lock.h>
32 
33 #ifndef CONFIG_PREEMPT_RT
34 #include "mutex.h"
35 
36 #ifdef CONFIG_DEBUG_MUTEXES
37 # define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond)
38 #else
39 # define MUTEX_WARN_ON(cond)
40 #endif
41 
42 void
43 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
44 {
45 	atomic_long_set(&lock->owner, 0);
46 	raw_spin_lock_init(&lock->wait_lock);
47 	INIT_LIST_HEAD(&lock->wait_list);
48 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
49 	osq_lock_init(&lock->osq);
50 #endif
51 
52 	debug_mutex_init(lock, name, key);
53 }
54 EXPORT_SYMBOL(__mutex_init);
55 
56 /*
57  * @owner: contains: 'struct task_struct *' to the current lock owner,
58  * NULL means not owned. Since task_struct pointers are aligned at
59  * at least L1_CACHE_BYTES, we have low bits to store extra state.
60  *
61  * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
62  * Bit1 indicates unlock needs to hand the lock to the top-waiter
63  * Bit2 indicates handoff has been done and we're waiting for pickup.
64  */
65 #define MUTEX_FLAG_WAITERS	0x01
66 #define MUTEX_FLAG_HANDOFF	0x02
67 #define MUTEX_FLAG_PICKUP	0x04
68 
69 #define MUTEX_FLAGS		0x07
70 
71 /*
72  * Internal helper function; C doesn't allow us to hide it :/
73  *
74  * DO NOT USE (outside of mutex code).
75  */
76 static inline struct task_struct *__mutex_owner(struct mutex *lock)
77 {
78 	return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
79 }
80 
81 static inline struct task_struct *__owner_task(unsigned long owner)
82 {
83 	return (struct task_struct *)(owner & ~MUTEX_FLAGS);
84 }
85 
86 bool mutex_is_locked(struct mutex *lock)
87 {
88 	return __mutex_owner(lock) != NULL;
89 }
90 EXPORT_SYMBOL(mutex_is_locked);
91 
92 static inline unsigned long __owner_flags(unsigned long owner)
93 {
94 	return owner & MUTEX_FLAGS;
95 }
96 
97 /*
98  * Returns: __mutex_owner(lock) on failure or NULL on success.
99  */
100 static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
101 {
102 	unsigned long owner, curr = (unsigned long)current;
103 
104 	owner = atomic_long_read(&lock->owner);
105 	for (;;) { /* must loop, can race against a flag */
106 		unsigned long flags = __owner_flags(owner);
107 		unsigned long task = owner & ~MUTEX_FLAGS;
108 
109 		if (task) {
110 			if (flags & MUTEX_FLAG_PICKUP) {
111 				if (task != curr)
112 					break;
113 				flags &= ~MUTEX_FLAG_PICKUP;
114 			} else if (handoff) {
115 				if (flags & MUTEX_FLAG_HANDOFF)
116 					break;
117 				flags |= MUTEX_FLAG_HANDOFF;
118 			} else {
119 				break;
120 			}
121 		} else {
122 			MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
123 			task = curr;
124 		}
125 
126 		if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
127 			if (task == curr)
128 				return NULL;
129 			break;
130 		}
131 	}
132 
133 	return __owner_task(owner);
134 }
135 
136 /*
137  * Trylock or set HANDOFF
138  */
139 static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff)
140 {
141 	return !__mutex_trylock_common(lock, handoff);
142 }
143 
144 /*
145  * Actual trylock that will work on any unlocked state.
146  */
147 static inline bool __mutex_trylock(struct mutex *lock)
148 {
149 	return !__mutex_trylock_common(lock, false);
150 }
151 
152 #ifndef CONFIG_DEBUG_LOCK_ALLOC
153 /*
154  * Lockdep annotations are contained to the slow paths for simplicity.
155  * There is nothing that would stop spreading the lockdep annotations outwards
156  * except more code.
157  */
158 
159 /*
160  * Optimistic trylock that only works in the uncontended case. Make sure to
161  * follow with a __mutex_trylock() before failing.
162  */
163 static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
164 {
165 	unsigned long curr = (unsigned long)current;
166 	unsigned long zero = 0UL;
167 
168 	if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
169 		return true;
170 
171 	return false;
172 }
173 
174 static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
175 {
176 	unsigned long curr = (unsigned long)current;
177 
178 	return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
179 }
180 #endif
181 
182 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
183 {
184 	atomic_long_or(flag, &lock->owner);
185 }
186 
187 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
188 {
189 	atomic_long_andnot(flag, &lock->owner);
190 }
191 
192 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
193 {
194 	return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
195 }
196 
197 /*
198  * Add @waiter to a given location in the lock wait_list and set the
199  * FLAG_WAITERS flag if it's the first waiter.
200  */
201 static void
202 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
203 		   struct list_head *list)
204 {
205 	debug_mutex_add_waiter(lock, waiter, current);
206 
207 	list_add_tail(&waiter->list, list);
208 	if (__mutex_waiter_is_first(lock, waiter))
209 		__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
210 }
211 
212 static void
213 __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
214 {
215 	list_del(&waiter->list);
216 	if (likely(list_empty(&lock->wait_list)))
217 		__mutex_clear_flag(lock, MUTEX_FLAGS);
218 
219 	debug_mutex_remove_waiter(lock, waiter, current);
220 }
221 
222 /*
223  * Give up ownership to a specific task, when @task = NULL, this is equivalent
224  * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
225  * WAITERS. Provides RELEASE semantics like a regular unlock, the
226  * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
227  */
228 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
229 {
230 	unsigned long owner = atomic_long_read(&lock->owner);
231 
232 	for (;;) {
233 		unsigned long new;
234 
235 		MUTEX_WARN_ON(__owner_task(owner) != current);
236 		MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
237 
238 		new = (owner & MUTEX_FLAG_WAITERS);
239 		new |= (unsigned long)task;
240 		if (task)
241 			new |= MUTEX_FLAG_PICKUP;
242 
243 		if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new))
244 			break;
245 	}
246 }
247 
248 #ifndef CONFIG_DEBUG_LOCK_ALLOC
249 /*
250  * We split the mutex lock/unlock logic into separate fastpath and
251  * slowpath functions, to reduce the register pressure on the fastpath.
252  * We also put the fastpath first in the kernel image, to make sure the
253  * branch is predicted by the CPU as default-untaken.
254  */
255 static void __sched __mutex_lock_slowpath(struct mutex *lock);
256 
257 /**
258  * mutex_lock - acquire the mutex
259  * @lock: the mutex to be acquired
260  *
261  * Lock the mutex exclusively for this task. If the mutex is not
262  * available right now, it will sleep until it can get it.
263  *
264  * The mutex must later on be released by the same task that
265  * acquired it. Recursive locking is not allowed. The task
266  * may not exit without first unlocking the mutex. Also, kernel
267  * memory where the mutex resides must not be freed with
268  * the mutex still locked. The mutex must first be initialized
269  * (or statically defined) before it can be locked. memset()-ing
270  * the mutex to 0 is not allowed.
271  *
272  * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
273  * checks that will enforce the restrictions and will also do
274  * deadlock debugging)
275  *
276  * This function is similar to (but not equivalent to) down().
277  */
278 void __sched mutex_lock(struct mutex *lock)
279 {
280 	might_sleep();
281 
282 	if (!__mutex_trylock_fast(lock))
283 		__mutex_lock_slowpath(lock);
284 }
285 EXPORT_SYMBOL(mutex_lock);
286 #endif
287 
288 #include "ww_mutex.h"
289 
290 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
291 
292 /*
293  * Trylock variant that returns the owning task on failure.
294  */
295 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
296 {
297 	return __mutex_trylock_common(lock, false);
298 }
299 
300 static inline
301 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
302 			    struct mutex_waiter *waiter)
303 {
304 	struct ww_mutex *ww;
305 
306 	ww = container_of(lock, struct ww_mutex, base);
307 
308 	/*
309 	 * If ww->ctx is set the contents are undefined, only
310 	 * by acquiring wait_lock there is a guarantee that
311 	 * they are not invalid when reading.
312 	 *
313 	 * As such, when deadlock detection needs to be
314 	 * performed the optimistic spinning cannot be done.
315 	 *
316 	 * Check this in every inner iteration because we may
317 	 * be racing against another thread's ww_mutex_lock.
318 	 */
319 	if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
320 		return false;
321 
322 	/*
323 	 * If we aren't on the wait list yet, cancel the spin
324 	 * if there are waiters. We want  to avoid stealing the
325 	 * lock from a waiter with an earlier stamp, since the
326 	 * other thread may already own a lock that we also
327 	 * need.
328 	 */
329 	if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
330 		return false;
331 
332 	/*
333 	 * Similarly, stop spinning if we are no longer the
334 	 * first waiter.
335 	 */
336 	if (waiter && !__mutex_waiter_is_first(lock, waiter))
337 		return false;
338 
339 	return true;
340 }
341 
342 /*
343  * Look out! "owner" is an entirely speculative pointer access and not
344  * reliable.
345  *
346  * "noinline" so that this function shows up on perf profiles.
347  */
348 static noinline
349 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
350 			 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
351 {
352 	bool ret = true;
353 
354 	lockdep_assert_preemption_disabled();
355 
356 	while (__mutex_owner(lock) == owner) {
357 		/*
358 		 * Ensure we emit the owner->on_cpu, dereference _after_
359 		 * checking lock->owner still matches owner. And we already
360 		 * disabled preemption which is equal to the RCU read-side
361 		 * crital section in optimistic spinning code. Thus the
362 		 * task_strcut structure won't go away during the spinning
363 		 * period
364 		 */
365 		barrier();
366 
367 		/*
368 		 * Use vcpu_is_preempted to detect lock holder preemption issue.
369 		 */
370 		if (!owner_on_cpu(owner) || need_resched()) {
371 			ret = false;
372 			break;
373 		}
374 
375 		if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
376 			ret = false;
377 			break;
378 		}
379 
380 		cpu_relax();
381 	}
382 
383 	return ret;
384 }
385 
386 /*
387  * Initial check for entering the mutex spinning loop
388  */
389 static inline int mutex_can_spin_on_owner(struct mutex *lock)
390 {
391 	struct task_struct *owner;
392 	int retval = 1;
393 
394 	lockdep_assert_preemption_disabled();
395 
396 	if (need_resched())
397 		return 0;
398 
399 	/*
400 	 * We already disabled preemption which is equal to the RCU read-side
401 	 * crital section in optimistic spinning code. Thus the task_strcut
402 	 * structure won't go away during the spinning period.
403 	 */
404 	owner = __mutex_owner(lock);
405 	if (owner)
406 		retval = owner_on_cpu(owner);
407 
408 	/*
409 	 * If lock->owner is not set, the mutex has been released. Return true
410 	 * such that we'll trylock in the spin path, which is a faster option
411 	 * than the blocking slow path.
412 	 */
413 	return retval;
414 }
415 
416 /*
417  * Optimistic spinning.
418  *
419  * We try to spin for acquisition when we find that the lock owner
420  * is currently running on a (different) CPU and while we don't
421  * need to reschedule. The rationale is that if the lock owner is
422  * running, it is likely to release the lock soon.
423  *
424  * The mutex spinners are queued up using MCS lock so that only one
425  * spinner can compete for the mutex. However, if mutex spinning isn't
426  * going to happen, there is no point in going through the lock/unlock
427  * overhead.
428  *
429  * Returns true when the lock was taken, otherwise false, indicating
430  * that we need to jump to the slowpath and sleep.
431  *
432  * The waiter flag is set to true if the spinner is a waiter in the wait
433  * queue. The waiter-spinner will spin on the lock directly and concurrently
434  * with the spinner at the head of the OSQ, if present, until the owner is
435  * changed to itself.
436  */
437 static __always_inline bool
438 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
439 		      struct mutex_waiter *waiter)
440 {
441 	if (!waiter) {
442 		/*
443 		 * The purpose of the mutex_can_spin_on_owner() function is
444 		 * to eliminate the overhead of osq_lock() and osq_unlock()
445 		 * in case spinning isn't possible. As a waiter-spinner
446 		 * is not going to take OSQ lock anyway, there is no need
447 		 * to call mutex_can_spin_on_owner().
448 		 */
449 		if (!mutex_can_spin_on_owner(lock))
450 			goto fail;
451 
452 		/*
453 		 * In order to avoid a stampede of mutex spinners trying to
454 		 * acquire the mutex all at once, the spinners need to take a
455 		 * MCS (queued) lock first before spinning on the owner field.
456 		 */
457 		if (!osq_lock(&lock->osq))
458 			goto fail;
459 	}
460 
461 	for (;;) {
462 		struct task_struct *owner;
463 
464 		/* Try to acquire the mutex... */
465 		owner = __mutex_trylock_or_owner(lock);
466 		if (!owner)
467 			break;
468 
469 		/*
470 		 * There's an owner, wait for it to either
471 		 * release the lock or go to sleep.
472 		 */
473 		if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
474 			goto fail_unlock;
475 
476 		/*
477 		 * The cpu_relax() call is a compiler barrier which forces
478 		 * everything in this loop to be re-loaded. We don't need
479 		 * memory barriers as we'll eventually observe the right
480 		 * values at the cost of a few extra spins.
481 		 */
482 		cpu_relax();
483 	}
484 
485 	if (!waiter)
486 		osq_unlock(&lock->osq);
487 
488 	return true;
489 
490 
491 fail_unlock:
492 	if (!waiter)
493 		osq_unlock(&lock->osq);
494 
495 fail:
496 	/*
497 	 * If we fell out of the spin path because of need_resched(),
498 	 * reschedule now, before we try-lock the mutex. This avoids getting
499 	 * scheduled out right after we obtained the mutex.
500 	 */
501 	if (need_resched()) {
502 		/*
503 		 * We _should_ have TASK_RUNNING here, but just in case
504 		 * we do not, make it so, otherwise we might get stuck.
505 		 */
506 		__set_current_state(TASK_RUNNING);
507 		schedule_preempt_disabled();
508 	}
509 
510 	return false;
511 }
512 #else
513 static __always_inline bool
514 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
515 		      struct mutex_waiter *waiter)
516 {
517 	return false;
518 }
519 #endif
520 
521 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
522 
523 /**
524  * mutex_unlock - release the mutex
525  * @lock: the mutex to be released
526  *
527  * Unlock a mutex that has been locked by this task previously.
528  *
529  * This function must not be used in interrupt context. Unlocking
530  * of a not locked mutex is not allowed.
531  *
532  * This function is similar to (but not equivalent to) up().
533  */
534 void __sched mutex_unlock(struct mutex *lock)
535 {
536 #ifndef CONFIG_DEBUG_LOCK_ALLOC
537 	if (__mutex_unlock_fast(lock))
538 		return;
539 #endif
540 	__mutex_unlock_slowpath(lock, _RET_IP_);
541 }
542 EXPORT_SYMBOL(mutex_unlock);
543 
544 /**
545  * ww_mutex_unlock - release the w/w mutex
546  * @lock: the mutex to be released
547  *
548  * Unlock a mutex that has been locked by this task previously with any of the
549  * ww_mutex_lock* functions (with or without an acquire context). It is
550  * forbidden to release the locks after releasing the acquire context.
551  *
552  * This function must not be used in interrupt context. Unlocking
553  * of a unlocked mutex is not allowed.
554  */
555 void __sched ww_mutex_unlock(struct ww_mutex *lock)
556 {
557 	__ww_mutex_unlock(lock);
558 	mutex_unlock(&lock->base);
559 }
560 EXPORT_SYMBOL(ww_mutex_unlock);
561 
562 /*
563  * Lock a mutex (possibly interruptible), slowpath:
564  */
565 static __always_inline int __sched
566 __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
567 		    struct lockdep_map *nest_lock, unsigned long ip,
568 		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
569 {
570 	struct mutex_waiter waiter;
571 	struct ww_mutex *ww;
572 	int ret;
573 
574 	if (!use_ww_ctx)
575 		ww_ctx = NULL;
576 
577 	might_sleep();
578 
579 	MUTEX_WARN_ON(lock->magic != lock);
580 
581 	ww = container_of(lock, struct ww_mutex, base);
582 	if (ww_ctx) {
583 		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
584 			return -EALREADY;
585 
586 		/*
587 		 * Reset the wounded flag after a kill. No other process can
588 		 * race and wound us here since they can't have a valid owner
589 		 * pointer if we don't have any locks held.
590 		 */
591 		if (ww_ctx->acquired == 0)
592 			ww_ctx->wounded = 0;
593 
594 #ifdef CONFIG_DEBUG_LOCK_ALLOC
595 		nest_lock = &ww_ctx->dep_map;
596 #endif
597 	}
598 
599 	preempt_disable();
600 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
601 
602 	if (__mutex_trylock(lock) ||
603 	    mutex_optimistic_spin(lock, ww_ctx, NULL)) {
604 		/* got the lock, yay! */
605 		lock_acquired(&lock->dep_map, ip);
606 		if (ww_ctx)
607 			ww_mutex_set_context_fastpath(ww, ww_ctx);
608 		preempt_enable();
609 		return 0;
610 	}
611 
612 	raw_spin_lock(&lock->wait_lock);
613 	/*
614 	 * After waiting to acquire the wait_lock, try again.
615 	 */
616 	if (__mutex_trylock(lock)) {
617 		if (ww_ctx)
618 			__ww_mutex_check_waiters(lock, ww_ctx);
619 
620 		goto skip_wait;
621 	}
622 
623 	debug_mutex_lock_common(lock, &waiter);
624 	waiter.task = current;
625 	if (use_ww_ctx)
626 		waiter.ww_ctx = ww_ctx;
627 
628 	lock_contended(&lock->dep_map, ip);
629 
630 	if (!use_ww_ctx) {
631 		/* add waiting tasks to the end of the waitqueue (FIFO): */
632 		__mutex_add_waiter(lock, &waiter, &lock->wait_list);
633 	} else {
634 		/*
635 		 * Add in stamp order, waking up waiters that must kill
636 		 * themselves.
637 		 */
638 		ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
639 		if (ret)
640 			goto err_early_kill;
641 	}
642 
643 	set_current_state(state);
644 	for (;;) {
645 		bool first;
646 
647 		/*
648 		 * Once we hold wait_lock, we're serialized against
649 		 * mutex_unlock() handing the lock off to us, do a trylock
650 		 * before testing the error conditions to make sure we pick up
651 		 * the handoff.
652 		 */
653 		if (__mutex_trylock(lock))
654 			goto acquired;
655 
656 		/*
657 		 * Check for signals and kill conditions while holding
658 		 * wait_lock. This ensures the lock cancellation is ordered
659 		 * against mutex_unlock() and wake-ups do not go missing.
660 		 */
661 		if (signal_pending_state(state, current)) {
662 			ret = -EINTR;
663 			goto err;
664 		}
665 
666 		if (ww_ctx) {
667 			ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
668 			if (ret)
669 				goto err;
670 		}
671 
672 		raw_spin_unlock(&lock->wait_lock);
673 		schedule_preempt_disabled();
674 
675 		first = __mutex_waiter_is_first(lock, &waiter);
676 
677 		set_current_state(state);
678 		/*
679 		 * Here we order against unlock; we must either see it change
680 		 * state back to RUNNING and fall through the next schedule(),
681 		 * or we must see its unlock and acquire.
682 		 */
683 		if (__mutex_trylock_or_handoff(lock, first) ||
684 		    (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
685 			break;
686 
687 		raw_spin_lock(&lock->wait_lock);
688 	}
689 	raw_spin_lock(&lock->wait_lock);
690 acquired:
691 	__set_current_state(TASK_RUNNING);
692 
693 	if (ww_ctx) {
694 		/*
695 		 * Wound-Wait; we stole the lock (!first_waiter), check the
696 		 * waiters as anyone might want to wound us.
697 		 */
698 		if (!ww_ctx->is_wait_die &&
699 		    !__mutex_waiter_is_first(lock, &waiter))
700 			__ww_mutex_check_waiters(lock, ww_ctx);
701 	}
702 
703 	__mutex_remove_waiter(lock, &waiter);
704 
705 	debug_mutex_free_waiter(&waiter);
706 
707 skip_wait:
708 	/* got the lock - cleanup and rejoice! */
709 	lock_acquired(&lock->dep_map, ip);
710 
711 	if (ww_ctx)
712 		ww_mutex_lock_acquired(ww, ww_ctx);
713 
714 	raw_spin_unlock(&lock->wait_lock);
715 	preempt_enable();
716 	return 0;
717 
718 err:
719 	__set_current_state(TASK_RUNNING);
720 	__mutex_remove_waiter(lock, &waiter);
721 err_early_kill:
722 	raw_spin_unlock(&lock->wait_lock);
723 	debug_mutex_free_waiter(&waiter);
724 	mutex_release(&lock->dep_map, ip);
725 	preempt_enable();
726 	return ret;
727 }
728 
729 static int __sched
730 __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
731 	     struct lockdep_map *nest_lock, unsigned long ip)
732 {
733 	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
734 }
735 
736 static int __sched
737 __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
738 		unsigned long ip, struct ww_acquire_ctx *ww_ctx)
739 {
740 	return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true);
741 }
742 
743 /**
744  * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context
745  * @ww: mutex to lock
746  * @ww_ctx: optional w/w acquire context
747  *
748  * Trylocks a mutex with the optional acquire context; no deadlock detection is
749  * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
750  *
751  * Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx is
752  * specified, -EALREADY handling may happen in calls to ww_mutex_trylock.
753  *
754  * A mutex acquired with this function must be released with ww_mutex_unlock.
755  */
756 int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
757 {
758 	if (!ww_ctx)
759 		return mutex_trylock(&ww->base);
760 
761 	MUTEX_WARN_ON(ww->base.magic != &ww->base);
762 
763 	/*
764 	 * Reset the wounded flag after a kill. No other process can
765 	 * race and wound us here, since they can't have a valid owner
766 	 * pointer if we don't have any locks held.
767 	 */
768 	if (ww_ctx->acquired == 0)
769 		ww_ctx->wounded = 0;
770 
771 	if (__mutex_trylock(&ww->base)) {
772 		ww_mutex_set_context_fastpath(ww, ww_ctx);
773 		mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
774 		return 1;
775 	}
776 
777 	return 0;
778 }
779 EXPORT_SYMBOL(ww_mutex_trylock);
780 
781 #ifdef CONFIG_DEBUG_LOCK_ALLOC
782 void __sched
783 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
784 {
785 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
786 }
787 
788 EXPORT_SYMBOL_GPL(mutex_lock_nested);
789 
790 void __sched
791 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
792 {
793 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
794 }
795 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
796 
797 int __sched
798 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
799 {
800 	return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
801 }
802 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
803 
804 int __sched
805 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
806 {
807 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
808 }
809 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
810 
811 void __sched
812 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
813 {
814 	int token;
815 
816 	might_sleep();
817 
818 	token = io_schedule_prepare();
819 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
820 			    subclass, NULL, _RET_IP_, NULL, 0);
821 	io_schedule_finish(token);
822 }
823 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
824 
825 static inline int
826 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
827 {
828 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
829 	unsigned tmp;
830 
831 	if (ctx->deadlock_inject_countdown-- == 0) {
832 		tmp = ctx->deadlock_inject_interval;
833 		if (tmp > UINT_MAX/4)
834 			tmp = UINT_MAX;
835 		else
836 			tmp = tmp*2 + tmp + tmp/2;
837 
838 		ctx->deadlock_inject_interval = tmp;
839 		ctx->deadlock_inject_countdown = tmp;
840 		ctx->contending_lock = lock;
841 
842 		ww_mutex_unlock(lock);
843 
844 		return -EDEADLK;
845 	}
846 #endif
847 
848 	return 0;
849 }
850 
851 int __sched
852 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
853 {
854 	int ret;
855 
856 	might_sleep();
857 	ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
858 			       0, _RET_IP_, ctx);
859 	if (!ret && ctx && ctx->acquired > 1)
860 		return ww_mutex_deadlock_injection(lock, ctx);
861 
862 	return ret;
863 }
864 EXPORT_SYMBOL_GPL(ww_mutex_lock);
865 
866 int __sched
867 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
868 {
869 	int ret;
870 
871 	might_sleep();
872 	ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
873 			      0, _RET_IP_, ctx);
874 
875 	if (!ret && ctx && ctx->acquired > 1)
876 		return ww_mutex_deadlock_injection(lock, ctx);
877 
878 	return ret;
879 }
880 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
881 
882 #endif
883 
884 /*
885  * Release the lock, slowpath:
886  */
887 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
888 {
889 	struct task_struct *next = NULL;
890 	DEFINE_WAKE_Q(wake_q);
891 	unsigned long owner;
892 
893 	mutex_release(&lock->dep_map, ip);
894 
895 	/*
896 	 * Release the lock before (potentially) taking the spinlock such that
897 	 * other contenders can get on with things ASAP.
898 	 *
899 	 * Except when HANDOFF, in that case we must not clear the owner field,
900 	 * but instead set it to the top waiter.
901 	 */
902 	owner = atomic_long_read(&lock->owner);
903 	for (;;) {
904 		MUTEX_WARN_ON(__owner_task(owner) != current);
905 		MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
906 
907 		if (owner & MUTEX_FLAG_HANDOFF)
908 			break;
909 
910 		if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) {
911 			if (owner & MUTEX_FLAG_WAITERS)
912 				break;
913 
914 			return;
915 		}
916 	}
917 
918 	raw_spin_lock(&lock->wait_lock);
919 	debug_mutex_unlock(lock);
920 	if (!list_empty(&lock->wait_list)) {
921 		/* get the first entry from the wait-list: */
922 		struct mutex_waiter *waiter =
923 			list_first_entry(&lock->wait_list,
924 					 struct mutex_waiter, list);
925 
926 		next = waiter->task;
927 
928 		debug_mutex_wake_waiter(lock, waiter);
929 		wake_q_add(&wake_q, next);
930 	}
931 
932 	if (owner & MUTEX_FLAG_HANDOFF)
933 		__mutex_handoff(lock, next);
934 
935 	raw_spin_unlock(&lock->wait_lock);
936 
937 	wake_up_q(&wake_q);
938 }
939 
940 #ifndef CONFIG_DEBUG_LOCK_ALLOC
941 /*
942  * Here come the less common (and hence less performance-critical) APIs:
943  * mutex_lock_interruptible() and mutex_trylock().
944  */
945 static noinline int __sched
946 __mutex_lock_killable_slowpath(struct mutex *lock);
947 
948 static noinline int __sched
949 __mutex_lock_interruptible_slowpath(struct mutex *lock);
950 
951 /**
952  * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
953  * @lock: The mutex to be acquired.
954  *
955  * Lock the mutex like mutex_lock().  If a signal is delivered while the
956  * process is sleeping, this function will return without acquiring the
957  * mutex.
958  *
959  * Context: Process context.
960  * Return: 0 if the lock was successfully acquired or %-EINTR if a
961  * signal arrived.
962  */
963 int __sched mutex_lock_interruptible(struct mutex *lock)
964 {
965 	might_sleep();
966 
967 	if (__mutex_trylock_fast(lock))
968 		return 0;
969 
970 	return __mutex_lock_interruptible_slowpath(lock);
971 }
972 
973 EXPORT_SYMBOL(mutex_lock_interruptible);
974 
975 /**
976  * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
977  * @lock: The mutex to be acquired.
978  *
979  * Lock the mutex like mutex_lock().  If a signal which will be fatal to
980  * the current process is delivered while the process is sleeping, this
981  * function will return without acquiring the mutex.
982  *
983  * Context: Process context.
984  * Return: 0 if the lock was successfully acquired or %-EINTR if a
985  * fatal signal arrived.
986  */
987 int __sched mutex_lock_killable(struct mutex *lock)
988 {
989 	might_sleep();
990 
991 	if (__mutex_trylock_fast(lock))
992 		return 0;
993 
994 	return __mutex_lock_killable_slowpath(lock);
995 }
996 EXPORT_SYMBOL(mutex_lock_killable);
997 
998 /**
999  * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1000  * @lock: The mutex to be acquired.
1001  *
1002  * Lock the mutex like mutex_lock().  While the task is waiting for this
1003  * mutex, it will be accounted as being in the IO wait state by the
1004  * scheduler.
1005  *
1006  * Context: Process context.
1007  */
1008 void __sched mutex_lock_io(struct mutex *lock)
1009 {
1010 	int token;
1011 
1012 	token = io_schedule_prepare();
1013 	mutex_lock(lock);
1014 	io_schedule_finish(token);
1015 }
1016 EXPORT_SYMBOL_GPL(mutex_lock_io);
1017 
1018 static noinline void __sched
1019 __mutex_lock_slowpath(struct mutex *lock)
1020 {
1021 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1022 }
1023 
1024 static noinline int __sched
1025 __mutex_lock_killable_slowpath(struct mutex *lock)
1026 {
1027 	return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1028 }
1029 
1030 static noinline int __sched
1031 __mutex_lock_interruptible_slowpath(struct mutex *lock)
1032 {
1033 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1034 }
1035 
1036 static noinline int __sched
1037 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1038 {
1039 	return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0,
1040 			       _RET_IP_, ctx);
1041 }
1042 
1043 static noinline int __sched
1044 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1045 					    struct ww_acquire_ctx *ctx)
1046 {
1047 	return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0,
1048 			       _RET_IP_, ctx);
1049 }
1050 
1051 #endif
1052 
1053 /**
1054  * mutex_trylock - try to acquire the mutex, without waiting
1055  * @lock: the mutex to be acquired
1056  *
1057  * Try to acquire the mutex atomically. Returns 1 if the mutex
1058  * has been acquired successfully, and 0 on contention.
1059  *
1060  * NOTE: this function follows the spin_trylock() convention, so
1061  * it is negated from the down_trylock() return values! Be careful
1062  * about this when converting semaphore users to mutexes.
1063  *
1064  * This function must not be used in interrupt context. The
1065  * mutex must be released by the same task that acquired it.
1066  */
1067 int __sched mutex_trylock(struct mutex *lock)
1068 {
1069 	bool locked;
1070 
1071 	MUTEX_WARN_ON(lock->magic != lock);
1072 
1073 	locked = __mutex_trylock(lock);
1074 	if (locked)
1075 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1076 
1077 	return locked;
1078 }
1079 EXPORT_SYMBOL(mutex_trylock);
1080 
1081 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1082 int __sched
1083 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1084 {
1085 	might_sleep();
1086 
1087 	if (__mutex_trylock_fast(&lock->base)) {
1088 		if (ctx)
1089 			ww_mutex_set_context_fastpath(lock, ctx);
1090 		return 0;
1091 	}
1092 
1093 	return __ww_mutex_lock_slowpath(lock, ctx);
1094 }
1095 EXPORT_SYMBOL(ww_mutex_lock);
1096 
1097 int __sched
1098 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1099 {
1100 	might_sleep();
1101 
1102 	if (__mutex_trylock_fast(&lock->base)) {
1103 		if (ctx)
1104 			ww_mutex_set_context_fastpath(lock, ctx);
1105 		return 0;
1106 	}
1107 
1108 	return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1109 }
1110 EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1111 
1112 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
1113 #endif /* !CONFIG_PREEMPT_RT */
1114 
1115 /**
1116  * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1117  * @cnt: the atomic which we are to dec
1118  * @lock: the mutex to return holding if we dec to 0
1119  *
1120  * return true and hold lock if we dec to 0, return false otherwise
1121  */
1122 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1123 {
1124 	/* dec if we can't possibly hit 0 */
1125 	if (atomic_add_unless(cnt, -1, 1))
1126 		return 0;
1127 	/* we might hit 0, so take the lock */
1128 	mutex_lock(lock);
1129 	if (!atomic_dec_and_test(cnt)) {
1130 		/* when we actually did the dec, we didn't hit 0 */
1131 		mutex_unlock(lock);
1132 		return 0;
1133 	}
1134 	/* we hit 0, and we hold the lock */
1135 	return 1;
1136 }
1137 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1138