xref: /openbmc/linux/kernel/locking/mutex.c (revision e274795ea7b7caa0fd74ef651594382a69e2a951)
1 /*
2  * kernel/locking/mutex.c
3  *
4  * Mutexes: blocking mutual exclusion locks
5  *
6  * Started by Ingo Molnar:
7  *
8  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9  *
10  * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11  * David Howells for suggestions and improvements.
12  *
13  *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14  *    from the -rt tree, where it was originally implemented for rtmutexes
15  *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
16  *    and Sven Dietrich.
17  *
18  * Also see Documentation/locking/mutex-design.txt.
19  */
20 #include <linux/mutex.h>
21 #include <linux/ww_mutex.h>
22 #include <linux/sched.h>
23 #include <linux/sched/rt.h>
24 #include <linux/export.h>
25 #include <linux/spinlock.h>
26 #include <linux/interrupt.h>
27 #include <linux/debug_locks.h>
28 #include <linux/osq_lock.h>
29 
30 #ifdef CONFIG_DEBUG_MUTEXES
31 # include "mutex-debug.h"
32 #else
33 # include "mutex.h"
34 #endif
35 
36 void
37 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
38 {
39 	atomic_long_set(&lock->owner, 0);
40 	spin_lock_init(&lock->wait_lock);
41 	INIT_LIST_HEAD(&lock->wait_list);
42 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
43 	osq_lock_init(&lock->osq);
44 #endif
45 
46 	debug_mutex_init(lock, name, key);
47 }
48 EXPORT_SYMBOL(__mutex_init);
49 
50 /*
51  * @owner: contains: 'struct task_struct *' to the current lock owner,
52  * NULL means not owned. Since task_struct pointers are aligned at
53  * at least L1_CACHE_BYTES, we have low bits to store extra state.
54  *
55  * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
56  * Bit1 indicates unlock needs to hand the lock to the top-waiter
57  * Bit2 indicates handoff has been done and we're waiting for pickup.
58  */
59 #define MUTEX_FLAG_WAITERS	0x01
60 #define MUTEX_FLAG_HANDOFF	0x02
61 #define MUTEX_FLAG_PICKUP	0x04
62 
63 #define MUTEX_FLAGS		0x07
64 
65 static inline struct task_struct *__owner_task(unsigned long owner)
66 {
67 	return (struct task_struct *)(owner & ~MUTEX_FLAGS);
68 }
69 
70 static inline unsigned long __owner_flags(unsigned long owner)
71 {
72 	return owner & MUTEX_FLAGS;
73 }
74 
75 /*
76  * Trylock variant that retuns the owning task on failure.
77  */
78 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
79 {
80 	unsigned long owner, curr = (unsigned long)current;
81 
82 	owner = atomic_long_read(&lock->owner);
83 	for (;;) { /* must loop, can race against a flag */
84 		unsigned long old, flags = __owner_flags(owner);
85 		unsigned long task = owner & ~MUTEX_FLAGS;
86 
87 		if (task) {
88 			if (likely(task != curr))
89 				break;
90 
91 			if (likely(!(flags & MUTEX_FLAG_PICKUP)))
92 				break;
93 
94 			flags &= ~MUTEX_FLAG_PICKUP;
95 		} else {
96 #ifdef CONFIG_DEBUG_MUTEXES
97 			DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
98 #endif
99 		}
100 
101 		/*
102 		 * We set the HANDOFF bit, we must make sure it doesn't live
103 		 * past the point where we acquire it. This would be possible
104 		 * if we (accidentally) set the bit on an unlocked mutex.
105 		 */
106 		flags &= ~MUTEX_FLAG_HANDOFF;
107 
108 		old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
109 		if (old == owner)
110 			return NULL;
111 
112 		owner = old;
113 	}
114 
115 	return __owner_task(owner);
116 }
117 
118 /*
119  * Actual trylock that will work on any unlocked state.
120  */
121 static inline bool __mutex_trylock(struct mutex *lock)
122 {
123 	return !__mutex_trylock_or_owner(lock);
124 }
125 
126 #ifndef CONFIG_DEBUG_LOCK_ALLOC
127 /*
128  * Lockdep annotations are contained to the slow paths for simplicity.
129  * There is nothing that would stop spreading the lockdep annotations outwards
130  * except more code.
131  */
132 
133 /*
134  * Optimistic trylock that only works in the uncontended case. Make sure to
135  * follow with a __mutex_trylock() before failing.
136  */
137 static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
138 {
139 	unsigned long curr = (unsigned long)current;
140 
141 	if (!atomic_long_cmpxchg_acquire(&lock->owner, 0UL, curr))
142 		return true;
143 
144 	return false;
145 }
146 
147 static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
148 {
149 	unsigned long curr = (unsigned long)current;
150 
151 	if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
152 		return true;
153 
154 	return false;
155 }
156 #endif
157 
158 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
159 {
160 	atomic_long_or(flag, &lock->owner);
161 }
162 
163 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
164 {
165 	atomic_long_andnot(flag, &lock->owner);
166 }
167 
168 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
169 {
170 	return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
171 }
172 
173 /*
174  * Give up ownership to a specific task, when @task = NULL, this is equivalent
175  * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
176  * WAITERS. Provides RELEASE semantics like a regular unlock, the
177  * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
178  */
179 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
180 {
181 	unsigned long owner = atomic_long_read(&lock->owner);
182 
183 	for (;;) {
184 		unsigned long old, new;
185 
186 #ifdef CONFIG_DEBUG_MUTEXES
187 		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
188 		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
189 #endif
190 
191 		new = (owner & MUTEX_FLAG_WAITERS);
192 		new |= (unsigned long)task;
193 		if (task)
194 			new |= MUTEX_FLAG_PICKUP;
195 
196 		old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
197 		if (old == owner)
198 			break;
199 
200 		owner = old;
201 	}
202 }
203 
204 #ifndef CONFIG_DEBUG_LOCK_ALLOC
205 /*
206  * We split the mutex lock/unlock logic into separate fastpath and
207  * slowpath functions, to reduce the register pressure on the fastpath.
208  * We also put the fastpath first in the kernel image, to make sure the
209  * branch is predicted by the CPU as default-untaken.
210  */
211 static void __sched __mutex_lock_slowpath(struct mutex *lock);
212 
213 /**
214  * mutex_lock - acquire the mutex
215  * @lock: the mutex to be acquired
216  *
217  * Lock the mutex exclusively for this task. If the mutex is not
218  * available right now, it will sleep until it can get it.
219  *
220  * The mutex must later on be released by the same task that
221  * acquired it. Recursive locking is not allowed. The task
222  * may not exit without first unlocking the mutex. Also, kernel
223  * memory where the mutex resides must not be freed with
224  * the mutex still locked. The mutex must first be initialized
225  * (or statically defined) before it can be locked. memset()-ing
226  * the mutex to 0 is not allowed.
227  *
228  * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
229  *   checks that will enforce the restrictions and will also do
230  *   deadlock debugging. )
231  *
232  * This function is similar to (but not equivalent to) down().
233  */
234 void __sched mutex_lock(struct mutex *lock)
235 {
236 	might_sleep();
237 
238 	if (!__mutex_trylock_fast(lock))
239 		__mutex_lock_slowpath(lock);
240 }
241 EXPORT_SYMBOL(mutex_lock);
242 #endif
243 
244 static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
245 						   struct ww_acquire_ctx *ww_ctx)
246 {
247 #ifdef CONFIG_DEBUG_MUTEXES
248 	/*
249 	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
250 	 * but released with a normal mutex_unlock in this call.
251 	 *
252 	 * This should never happen, always use ww_mutex_unlock.
253 	 */
254 	DEBUG_LOCKS_WARN_ON(ww->ctx);
255 
256 	/*
257 	 * Not quite done after calling ww_acquire_done() ?
258 	 */
259 	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
260 
261 	if (ww_ctx->contending_lock) {
262 		/*
263 		 * After -EDEADLK you tried to
264 		 * acquire a different ww_mutex? Bad!
265 		 */
266 		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
267 
268 		/*
269 		 * You called ww_mutex_lock after receiving -EDEADLK,
270 		 * but 'forgot' to unlock everything else first?
271 		 */
272 		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
273 		ww_ctx->contending_lock = NULL;
274 	}
275 
276 	/*
277 	 * Naughty, using a different class will lead to undefined behavior!
278 	 */
279 	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
280 #endif
281 	ww_ctx->acquired++;
282 }
283 
284 /*
285  * After acquiring lock with fastpath or when we lost out in contested
286  * slowpath, set ctx and wake up any waiters so they can recheck.
287  */
288 static __always_inline void
289 ww_mutex_set_context_fastpath(struct ww_mutex *lock,
290 			       struct ww_acquire_ctx *ctx)
291 {
292 	unsigned long flags;
293 	struct mutex_waiter *cur;
294 
295 	ww_mutex_lock_acquired(lock, ctx);
296 
297 	lock->ctx = ctx;
298 
299 	/*
300 	 * The lock->ctx update should be visible on all cores before
301 	 * the atomic read is done, otherwise contended waiters might be
302 	 * missed. The contended waiters will either see ww_ctx == NULL
303 	 * and keep spinning, or it will acquire wait_lock, add itself
304 	 * to waiter list and sleep.
305 	 */
306 	smp_mb(); /* ^^^ */
307 
308 	/*
309 	 * Check if lock is contended, if not there is nobody to wake up
310 	 */
311 	if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
312 		return;
313 
314 	/*
315 	 * Uh oh, we raced in fastpath, wake up everyone in this case,
316 	 * so they can see the new lock->ctx.
317 	 */
318 	spin_lock_mutex(&lock->base.wait_lock, flags);
319 	list_for_each_entry(cur, &lock->base.wait_list, list) {
320 		debug_mutex_wake_waiter(&lock->base, cur);
321 		wake_up_process(cur->task);
322 	}
323 	spin_unlock_mutex(&lock->base.wait_lock, flags);
324 }
325 
326 /*
327  * After acquiring lock in the slowpath set ctx and wake up any
328  * waiters so they can recheck.
329  *
330  * Callers must hold the mutex wait_lock.
331  */
332 static __always_inline void
333 ww_mutex_set_context_slowpath(struct ww_mutex *lock,
334 			      struct ww_acquire_ctx *ctx)
335 {
336 	struct mutex_waiter *cur;
337 
338 	ww_mutex_lock_acquired(lock, ctx);
339 	lock->ctx = ctx;
340 
341 	/*
342 	 * Give any possible sleeping processes the chance to wake up,
343 	 * so they can recheck if they have to back off.
344 	 */
345 	list_for_each_entry(cur, &lock->base.wait_list, list) {
346 		debug_mutex_wake_waiter(&lock->base, cur);
347 		wake_up_process(cur->task);
348 	}
349 }
350 
351 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
352 /*
353  * Look out! "owner" is an entirely speculative pointer
354  * access and not reliable.
355  */
356 static noinline
357 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
358 {
359 	bool ret = true;
360 
361 	rcu_read_lock();
362 	while (__mutex_owner(lock) == owner) {
363 		/*
364 		 * Ensure we emit the owner->on_cpu, dereference _after_
365 		 * checking lock->owner still matches owner. If that fails,
366 		 * owner might point to freed memory. If it still matches,
367 		 * the rcu_read_lock() ensures the memory stays valid.
368 		 */
369 		barrier();
370 
371 		/*
372 		 * Use vcpu_is_preempted to detect lock holder preemption issue.
373 		 */
374 		if (!owner->on_cpu || need_resched() ||
375 				vcpu_is_preempted(task_cpu(owner))) {
376 			ret = false;
377 			break;
378 		}
379 
380 		cpu_relax();
381 	}
382 	rcu_read_unlock();
383 
384 	return ret;
385 }
386 
387 /*
388  * Initial check for entering the mutex spinning loop
389  */
390 static inline int mutex_can_spin_on_owner(struct mutex *lock)
391 {
392 	struct task_struct *owner;
393 	int retval = 1;
394 
395 	if (need_resched())
396 		return 0;
397 
398 	rcu_read_lock();
399 	owner = __mutex_owner(lock);
400 
401 	/*
402 	 * As lock holder preemption issue, we both skip spinning if task is not
403 	 * on cpu or its cpu is preempted
404 	 */
405 	if (owner)
406 		retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
407 	rcu_read_unlock();
408 
409 	/*
410 	 * If lock->owner is not set, the mutex has been released. Return true
411 	 * such that we'll trylock in the spin path, which is a faster option
412 	 * than the blocking slow path.
413 	 */
414 	return retval;
415 }
416 
417 /*
418  * Optimistic spinning.
419  *
420  * We try to spin for acquisition when we find that the lock owner
421  * is currently running on a (different) CPU and while we don't
422  * need to reschedule. The rationale is that if the lock owner is
423  * running, it is likely to release the lock soon.
424  *
425  * The mutex spinners are queued up using MCS lock so that only one
426  * spinner can compete for the mutex. However, if mutex spinning isn't
427  * going to happen, there is no point in going through the lock/unlock
428  * overhead.
429  *
430  * Returns true when the lock was taken, otherwise false, indicating
431  * that we need to jump to the slowpath and sleep.
432  *
433  * The waiter flag is set to true if the spinner is a waiter in the wait
434  * queue. The waiter-spinner will spin on the lock directly and concurrently
435  * with the spinner at the head of the OSQ, if present, until the owner is
436  * changed to itself.
437  */
438 static bool mutex_optimistic_spin(struct mutex *lock,
439 				  struct ww_acquire_ctx *ww_ctx,
440 				  const bool use_ww_ctx, const bool waiter)
441 {
442 	if (!waiter) {
443 		/*
444 		 * The purpose of the mutex_can_spin_on_owner() function is
445 		 * to eliminate the overhead of osq_lock() and osq_unlock()
446 		 * in case spinning isn't possible. As a waiter-spinner
447 		 * is not going to take OSQ lock anyway, there is no need
448 		 * to call mutex_can_spin_on_owner().
449 		 */
450 		if (!mutex_can_spin_on_owner(lock))
451 			goto fail;
452 
453 		/*
454 		 * In order to avoid a stampede of mutex spinners trying to
455 		 * acquire the mutex all at once, the spinners need to take a
456 		 * MCS (queued) lock first before spinning on the owner field.
457 		 */
458 		if (!osq_lock(&lock->osq))
459 			goto fail;
460 	}
461 
462 	for (;;) {
463 		struct task_struct *owner;
464 
465 		if (use_ww_ctx && ww_ctx->acquired > 0) {
466 			struct ww_mutex *ww;
467 
468 			ww = container_of(lock, struct ww_mutex, base);
469 			/*
470 			 * If ww->ctx is set the contents are undefined, only
471 			 * by acquiring wait_lock there is a guarantee that
472 			 * they are not invalid when reading.
473 			 *
474 			 * As such, when deadlock detection needs to be
475 			 * performed the optimistic spinning cannot be done.
476 			 */
477 			if (READ_ONCE(ww->ctx))
478 				goto fail_unlock;
479 		}
480 
481 		/* Try to acquire the mutex... */
482 		owner = __mutex_trylock_or_owner(lock);
483 		if (!owner)
484 			break;
485 
486 		/*
487 		 * There's an owner, wait for it to either
488 		 * release the lock or go to sleep.
489 		 */
490 		if (!mutex_spin_on_owner(lock, owner))
491 			goto fail_unlock;
492 
493 		/*
494 		 * The cpu_relax() call is a compiler barrier which forces
495 		 * everything in this loop to be re-loaded. We don't need
496 		 * memory barriers as we'll eventually observe the right
497 		 * values at the cost of a few extra spins.
498 		 */
499 		cpu_relax();
500 	}
501 
502 	if (!waiter)
503 		osq_unlock(&lock->osq);
504 
505 	return true;
506 
507 
508 fail_unlock:
509 	if (!waiter)
510 		osq_unlock(&lock->osq);
511 
512 fail:
513 	/*
514 	 * If we fell out of the spin path because of need_resched(),
515 	 * reschedule now, before we try-lock the mutex. This avoids getting
516 	 * scheduled out right after we obtained the mutex.
517 	 */
518 	if (need_resched()) {
519 		/*
520 		 * We _should_ have TASK_RUNNING here, but just in case
521 		 * we do not, make it so, otherwise we might get stuck.
522 		 */
523 		__set_current_state(TASK_RUNNING);
524 		schedule_preempt_disabled();
525 	}
526 
527 	return false;
528 }
529 #else
530 static bool mutex_optimistic_spin(struct mutex *lock,
531 				  struct ww_acquire_ctx *ww_ctx,
532 				  const bool use_ww_ctx, const bool waiter)
533 {
534 	return false;
535 }
536 #endif
537 
538 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
539 
540 /**
541  * mutex_unlock - release the mutex
542  * @lock: the mutex to be released
543  *
544  * Unlock a mutex that has been locked by this task previously.
545  *
546  * This function must not be used in interrupt context. Unlocking
547  * of a not locked mutex is not allowed.
548  *
549  * This function is similar to (but not equivalent to) up().
550  */
551 void __sched mutex_unlock(struct mutex *lock)
552 {
553 #ifndef CONFIG_DEBUG_LOCK_ALLOC
554 	if (__mutex_unlock_fast(lock))
555 		return;
556 #endif
557 	__mutex_unlock_slowpath(lock, _RET_IP_);
558 }
559 EXPORT_SYMBOL(mutex_unlock);
560 
561 /**
562  * ww_mutex_unlock - release the w/w mutex
563  * @lock: the mutex to be released
564  *
565  * Unlock a mutex that has been locked by this task previously with any of the
566  * ww_mutex_lock* functions (with or without an acquire context). It is
567  * forbidden to release the locks after releasing the acquire context.
568  *
569  * This function must not be used in interrupt context. Unlocking
570  * of a unlocked mutex is not allowed.
571  */
572 void __sched ww_mutex_unlock(struct ww_mutex *lock)
573 {
574 	/*
575 	 * The unlocking fastpath is the 0->1 transition from 'locked'
576 	 * into 'unlocked' state:
577 	 */
578 	if (lock->ctx) {
579 #ifdef CONFIG_DEBUG_MUTEXES
580 		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
581 #endif
582 		if (lock->ctx->acquired > 0)
583 			lock->ctx->acquired--;
584 		lock->ctx = NULL;
585 	}
586 
587 	mutex_unlock(&lock->base);
588 }
589 EXPORT_SYMBOL(ww_mutex_unlock);
590 
591 static inline int __sched
592 __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
593 {
594 	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
595 	struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
596 
597 	if (!hold_ctx)
598 		return 0;
599 
600 	if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
601 	    (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
602 #ifdef CONFIG_DEBUG_MUTEXES
603 		DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
604 		ctx->contending_lock = ww;
605 #endif
606 		return -EDEADLK;
607 	}
608 
609 	return 0;
610 }
611 
612 /*
613  * Lock a mutex (possibly interruptible), slowpath:
614  */
615 static __always_inline int __sched
616 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
617 		    struct lockdep_map *nest_lock, unsigned long ip,
618 		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
619 {
620 	struct mutex_waiter waiter;
621 	unsigned long flags;
622 	bool first = false;
623 	struct ww_mutex *ww;
624 	int ret;
625 
626 	if (use_ww_ctx) {
627 		ww = container_of(lock, struct ww_mutex, base);
628 		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
629 			return -EALREADY;
630 	}
631 
632 	preempt_disable();
633 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
634 
635 	if (__mutex_trylock(lock) ||
636 	    mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) {
637 		/* got the lock, yay! */
638 		lock_acquired(&lock->dep_map, ip);
639 		if (use_ww_ctx)
640 			ww_mutex_set_context_fastpath(ww, ww_ctx);
641 		preempt_enable();
642 		return 0;
643 	}
644 
645 	spin_lock_mutex(&lock->wait_lock, flags);
646 	/*
647 	 * After waiting to acquire the wait_lock, try again.
648 	 */
649 	if (__mutex_trylock(lock))
650 		goto skip_wait;
651 
652 	debug_mutex_lock_common(lock, &waiter);
653 	debug_mutex_add_waiter(lock, &waiter, current);
654 
655 	/* add waiting tasks to the end of the waitqueue (FIFO): */
656 	list_add_tail(&waiter.list, &lock->wait_list);
657 	waiter.task = current;
658 
659 	if (__mutex_waiter_is_first(lock, &waiter))
660 		__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
661 
662 	lock_contended(&lock->dep_map, ip);
663 
664 	set_current_state(state);
665 	for (;;) {
666 		/*
667 		 * Once we hold wait_lock, we're serialized against
668 		 * mutex_unlock() handing the lock off to us, do a trylock
669 		 * before testing the error conditions to make sure we pick up
670 		 * the handoff.
671 		 */
672 		if (__mutex_trylock(lock))
673 			goto acquired;
674 
675 		/*
676 		 * Check for signals and wound conditions while holding
677 		 * wait_lock. This ensures the lock cancellation is ordered
678 		 * against mutex_unlock() and wake-ups do not go missing.
679 		 */
680 		if (unlikely(signal_pending_state(state, current))) {
681 			ret = -EINTR;
682 			goto err;
683 		}
684 
685 		if (use_ww_ctx && ww_ctx->acquired > 0) {
686 			ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
687 			if (ret)
688 				goto err;
689 		}
690 
691 		spin_unlock_mutex(&lock->wait_lock, flags);
692 		schedule_preempt_disabled();
693 
694 		if (!first && __mutex_waiter_is_first(lock, &waiter)) {
695 			first = true;
696 			__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
697 		}
698 
699 		set_current_state(state);
700 		/*
701 		 * Here we order against unlock; we must either see it change
702 		 * state back to RUNNING and fall through the next schedule(),
703 		 * or we must see its unlock and acquire.
704 		 */
705 		if (__mutex_trylock(lock) ||
706 		    (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, true)))
707 			break;
708 
709 		spin_lock_mutex(&lock->wait_lock, flags);
710 	}
711 	spin_lock_mutex(&lock->wait_lock, flags);
712 acquired:
713 	__set_current_state(TASK_RUNNING);
714 
715 	mutex_remove_waiter(lock, &waiter, current);
716 	if (likely(list_empty(&lock->wait_list)))
717 		__mutex_clear_flag(lock, MUTEX_FLAGS);
718 
719 	debug_mutex_free_waiter(&waiter);
720 
721 skip_wait:
722 	/* got the lock - cleanup and rejoice! */
723 	lock_acquired(&lock->dep_map, ip);
724 
725 	if (use_ww_ctx)
726 		ww_mutex_set_context_slowpath(ww, ww_ctx);
727 
728 	spin_unlock_mutex(&lock->wait_lock, flags);
729 	preempt_enable();
730 	return 0;
731 
732 err:
733 	__set_current_state(TASK_RUNNING);
734 	mutex_remove_waiter(lock, &waiter, current);
735 	spin_unlock_mutex(&lock->wait_lock, flags);
736 	debug_mutex_free_waiter(&waiter);
737 	mutex_release(&lock->dep_map, 1, ip);
738 	preempt_enable();
739 	return ret;
740 }
741 
742 #ifdef CONFIG_DEBUG_LOCK_ALLOC
743 void __sched
744 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
745 {
746 	might_sleep();
747 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
748 			    subclass, NULL, _RET_IP_, NULL, 0);
749 }
750 
751 EXPORT_SYMBOL_GPL(mutex_lock_nested);
752 
753 void __sched
754 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
755 {
756 	might_sleep();
757 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
758 			    0, nest, _RET_IP_, NULL, 0);
759 }
760 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
761 
762 int __sched
763 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
764 {
765 	might_sleep();
766 	return __mutex_lock_common(lock, TASK_KILLABLE,
767 				   subclass, NULL, _RET_IP_, NULL, 0);
768 }
769 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
770 
771 int __sched
772 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
773 {
774 	might_sleep();
775 	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
776 				   subclass, NULL, _RET_IP_, NULL, 0);
777 }
778 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
779 
780 static inline int
781 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
782 {
783 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
784 	unsigned tmp;
785 
786 	if (ctx->deadlock_inject_countdown-- == 0) {
787 		tmp = ctx->deadlock_inject_interval;
788 		if (tmp > UINT_MAX/4)
789 			tmp = UINT_MAX;
790 		else
791 			tmp = tmp*2 + tmp + tmp/2;
792 
793 		ctx->deadlock_inject_interval = tmp;
794 		ctx->deadlock_inject_countdown = tmp;
795 		ctx->contending_lock = lock;
796 
797 		ww_mutex_unlock(lock);
798 
799 		return -EDEADLK;
800 	}
801 #endif
802 
803 	return 0;
804 }
805 
806 int __sched
807 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
808 {
809 	int ret;
810 
811 	might_sleep();
812 	ret =  __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
813 				   0, &ctx->dep_map, _RET_IP_, ctx, 1);
814 	if (!ret && ctx->acquired > 1)
815 		return ww_mutex_deadlock_injection(lock, ctx);
816 
817 	return ret;
818 }
819 EXPORT_SYMBOL_GPL(__ww_mutex_lock);
820 
821 int __sched
822 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
823 {
824 	int ret;
825 
826 	might_sleep();
827 	ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
828 				  0, &ctx->dep_map, _RET_IP_, ctx, 1);
829 
830 	if (!ret && ctx->acquired > 1)
831 		return ww_mutex_deadlock_injection(lock, ctx);
832 
833 	return ret;
834 }
835 EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
836 
837 #endif
838 
839 /*
840  * Release the lock, slowpath:
841  */
842 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
843 {
844 	struct task_struct *next = NULL;
845 	unsigned long owner, flags;
846 	DEFINE_WAKE_Q(wake_q);
847 
848 	mutex_release(&lock->dep_map, 1, ip);
849 
850 	/*
851 	 * Release the lock before (potentially) taking the spinlock such that
852 	 * other contenders can get on with things ASAP.
853 	 *
854 	 * Except when HANDOFF, in that case we must not clear the owner field,
855 	 * but instead set it to the top waiter.
856 	 */
857 	owner = atomic_long_read(&lock->owner);
858 	for (;;) {
859 		unsigned long old;
860 
861 #ifdef CONFIG_DEBUG_MUTEXES
862 		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
863 		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
864 #endif
865 
866 		if (owner & MUTEX_FLAG_HANDOFF)
867 			break;
868 
869 		old = atomic_long_cmpxchg_release(&lock->owner, owner,
870 						  __owner_flags(owner));
871 		if (old == owner) {
872 			if (owner & MUTEX_FLAG_WAITERS)
873 				break;
874 
875 			return;
876 		}
877 
878 		owner = old;
879 	}
880 
881 	spin_lock_mutex(&lock->wait_lock, flags);
882 	debug_mutex_unlock(lock);
883 	if (!list_empty(&lock->wait_list)) {
884 		/* get the first entry from the wait-list: */
885 		struct mutex_waiter *waiter =
886 			list_first_entry(&lock->wait_list,
887 					 struct mutex_waiter, list);
888 
889 		next = waiter->task;
890 
891 		debug_mutex_wake_waiter(lock, waiter);
892 		wake_q_add(&wake_q, next);
893 	}
894 
895 	if (owner & MUTEX_FLAG_HANDOFF)
896 		__mutex_handoff(lock, next);
897 
898 	spin_unlock_mutex(&lock->wait_lock, flags);
899 
900 	wake_up_q(&wake_q);
901 }
902 
903 #ifndef CONFIG_DEBUG_LOCK_ALLOC
904 /*
905  * Here come the less common (and hence less performance-critical) APIs:
906  * mutex_lock_interruptible() and mutex_trylock().
907  */
908 static noinline int __sched
909 __mutex_lock_killable_slowpath(struct mutex *lock);
910 
911 static noinline int __sched
912 __mutex_lock_interruptible_slowpath(struct mutex *lock);
913 
914 /**
915  * mutex_lock_interruptible - acquire the mutex, interruptible
916  * @lock: the mutex to be acquired
917  *
918  * Lock the mutex like mutex_lock(), and return 0 if the mutex has
919  * been acquired or sleep until the mutex becomes available. If a
920  * signal arrives while waiting for the lock then this function
921  * returns -EINTR.
922  *
923  * This function is similar to (but not equivalent to) down_interruptible().
924  */
925 int __sched mutex_lock_interruptible(struct mutex *lock)
926 {
927 	might_sleep();
928 
929 	if (__mutex_trylock_fast(lock))
930 		return 0;
931 
932 	return __mutex_lock_interruptible_slowpath(lock);
933 }
934 
935 EXPORT_SYMBOL(mutex_lock_interruptible);
936 
937 int __sched mutex_lock_killable(struct mutex *lock)
938 {
939 	might_sleep();
940 
941 	if (__mutex_trylock_fast(lock))
942 		return 0;
943 
944 	return __mutex_lock_killable_slowpath(lock);
945 }
946 EXPORT_SYMBOL(mutex_lock_killable);
947 
948 static noinline void __sched
949 __mutex_lock_slowpath(struct mutex *lock)
950 {
951 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
952 			    NULL, _RET_IP_, NULL, 0);
953 }
954 
955 static noinline int __sched
956 __mutex_lock_killable_slowpath(struct mutex *lock)
957 {
958 	return __mutex_lock_common(lock, TASK_KILLABLE, 0,
959 				   NULL, _RET_IP_, NULL, 0);
960 }
961 
962 static noinline int __sched
963 __mutex_lock_interruptible_slowpath(struct mutex *lock)
964 {
965 	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
966 				   NULL, _RET_IP_, NULL, 0);
967 }
968 
969 static noinline int __sched
970 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
971 {
972 	return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
973 				   NULL, _RET_IP_, ctx, 1);
974 }
975 
976 static noinline int __sched
977 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
978 					    struct ww_acquire_ctx *ctx)
979 {
980 	return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
981 				   NULL, _RET_IP_, ctx, 1);
982 }
983 
984 #endif
985 
986 /**
987  * mutex_trylock - try to acquire the mutex, without waiting
988  * @lock: the mutex to be acquired
989  *
990  * Try to acquire the mutex atomically. Returns 1 if the mutex
991  * has been acquired successfully, and 0 on contention.
992  *
993  * NOTE: this function follows the spin_trylock() convention, so
994  * it is negated from the down_trylock() return values! Be careful
995  * about this when converting semaphore users to mutexes.
996  *
997  * This function must not be used in interrupt context. The
998  * mutex must be released by the same task that acquired it.
999  */
1000 int __sched mutex_trylock(struct mutex *lock)
1001 {
1002 	bool locked = __mutex_trylock(lock);
1003 
1004 	if (locked)
1005 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1006 
1007 	return locked;
1008 }
1009 EXPORT_SYMBOL(mutex_trylock);
1010 
1011 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1012 int __sched
1013 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1014 {
1015 	might_sleep();
1016 
1017 	if (__mutex_trylock_fast(&lock->base)) {
1018 		ww_mutex_set_context_fastpath(lock, ctx);
1019 		return 0;
1020 	}
1021 
1022 	return __ww_mutex_lock_slowpath(lock, ctx);
1023 }
1024 EXPORT_SYMBOL(__ww_mutex_lock);
1025 
1026 int __sched
1027 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1028 {
1029 	might_sleep();
1030 
1031 	if (__mutex_trylock_fast(&lock->base)) {
1032 		ww_mutex_set_context_fastpath(lock, ctx);
1033 		return 0;
1034 	}
1035 
1036 	return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1037 }
1038 EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
1039 
1040 #endif
1041 
1042 /**
1043  * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1044  * @cnt: the atomic which we are to dec
1045  * @lock: the mutex to return holding if we dec to 0
1046  *
1047  * return true and hold lock if we dec to 0, return false otherwise
1048  */
1049 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1050 {
1051 	/* dec if we can't possibly hit 0 */
1052 	if (atomic_add_unless(cnt, -1, 1))
1053 		return 0;
1054 	/* we might hit 0, so take the lock */
1055 	mutex_lock(lock);
1056 	if (!atomic_dec_and_test(cnt)) {
1057 		/* when we actually did the dec, we didn't hit 0 */
1058 		mutex_unlock(lock);
1059 		return 0;
1060 	}
1061 	/* we hit 0, and we hold the lock */
1062 	return 1;
1063 }
1064 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1065