mutex.c (642fa448ae6b3a4e5e8737054a094173405b7643) mutex.c (e274795ea7b7caa0fd74ef651594382a69e2a951)
1/*
2 * kernel/locking/mutex.c
3 *
4 * Mutexes: blocking mutual exclusion locks
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>

--- 36 unchanged lines hidden (view full) ---

45
46 debug_mutex_init(lock, name, key);
47}
48EXPORT_SYMBOL(__mutex_init);
49
50/*
51 * @owner: contains: 'struct task_struct *' to the current lock owner,
52 * NULL means not owned. Since task_struct pointers are aligned at
1/*
2 * kernel/locking/mutex.c
3 *
4 * Mutexes: blocking mutual exclusion locks
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>

--- 36 unchanged lines hidden (view full) ---

45
46 debug_mutex_init(lock, name, key);
47}
48EXPORT_SYMBOL(__mutex_init);
49
50/*
51 * @owner: contains: 'struct task_struct *' to the current lock owner,
52 * NULL means not owned. Since task_struct pointers are aligned at
53 * ARCH_MIN_TASKALIGN (which is at least sizeof(void *)), we have low
54 * bits to store extra state.
53 * at least L1_CACHE_BYTES, we have low bits to store extra state.
55 *
56 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
57 * Bit1 indicates unlock needs to hand the lock to the top-waiter
54 *
55 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
56 * Bit1 indicates unlock needs to hand the lock to the top-waiter
57 * Bit2 indicates handoff has been done and we're waiting for pickup.
58 */
59#define MUTEX_FLAG_WAITERS 0x01
60#define MUTEX_FLAG_HANDOFF 0x02
58 */
59#define MUTEX_FLAG_WAITERS 0x01
60#define MUTEX_FLAG_HANDOFF 0x02
61#define MUTEX_FLAG_PICKUP 0x04
61
62
62#define MUTEX_FLAGS 0x03
63#define MUTEX_FLAGS 0x07
63
64static inline struct task_struct *__owner_task(unsigned long owner)
65{
66 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
67}
68
69static inline unsigned long __owner_flags(unsigned long owner)
70{
71 return owner & MUTEX_FLAGS;
72}
73
74/*
64
65static inline struct task_struct *__owner_task(unsigned long owner)
66{
67 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
68}
69
70static inline unsigned long __owner_flags(unsigned long owner)
71{
72 return owner & MUTEX_FLAGS;
73}
74
75/*
75 * Actual trylock that will work on any unlocked state.
76 *
77 * When setting the owner field, we must preserve the low flag bits.
78 *
79 * Be careful with @handoff, only set that in a wait-loop (where you set
80 * HANDOFF) to avoid recursive lock attempts.
76 * Trylock variant that retuns the owning task on failure.
81 */
77 */
82static inline bool __mutex_trylock(struct mutex *lock, const bool handoff)
78static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
83{
84 unsigned long owner, curr = (unsigned long)current;
85
86 owner = atomic_long_read(&lock->owner);
87 for (;;) { /* must loop, can race against a flag */
88 unsigned long old, flags = __owner_flags(owner);
79{
80 unsigned long owner, curr = (unsigned long)current;
81
82 owner = atomic_long_read(&lock->owner);
83 for (;;) { /* must loop, can race against a flag */
84 unsigned long old, flags = __owner_flags(owner);
85 unsigned long task = owner & ~MUTEX_FLAGS;
89
86
90 if (__owner_task(owner)) {
91 if (handoff && unlikely(__owner_task(owner) == current)) {
92 /*
93 * Provide ACQUIRE semantics for the lock-handoff.
94 *
95 * We cannot easily use load-acquire here, since
96 * the actual load is a failed cmpxchg, which
97 * doesn't imply any barriers.
98 *
99 * Also, this is a fairly unlikely scenario, and
100 * this contains the cost.
101 */
102 smp_mb(); /* ACQUIRE */
103 return true;
104 }
87 if (task) {
88 if (likely(task != curr))
89 break;
105
90
106 return false;
91 if (likely(!(flags & MUTEX_FLAG_PICKUP)))
92 break;
93
94 flags &= ~MUTEX_FLAG_PICKUP;
95 } else {
96#ifdef CONFIG_DEBUG_MUTEXES
97 DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
98#endif
107 }
108
109 /*
110 * We set the HANDOFF bit, we must make sure it doesn't live
111 * past the point where we acquire it. This would be possible
112 * if we (accidentally) set the bit on an unlocked mutex.
113 */
99 }
100
101 /*
102 * We set the HANDOFF bit, we must make sure it doesn't live
103 * past the point where we acquire it. This would be possible
104 * if we (accidentally) set the bit on an unlocked mutex.
105 */
114 if (handoff)
115 flags &= ~MUTEX_FLAG_HANDOFF;
106 flags &= ~MUTEX_FLAG_HANDOFF;
116
117 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
118 if (old == owner)
107
108 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
109 if (old == owner)
119 return true;
110 return NULL;
120
121 owner = old;
122 }
111
112 owner = old;
113 }
114
115 return __owner_task(owner);
123}
124
116}
117
118/*
119 * Actual trylock that will work on any unlocked state.
120 */
121static inline bool __mutex_trylock(struct mutex *lock)
122{
123 return !__mutex_trylock_or_owner(lock);
124}
125
125#ifndef CONFIG_DEBUG_LOCK_ALLOC
126/*
127 * Lockdep annotations are contained to the slow paths for simplicity.
128 * There is nothing that would stop spreading the lockdep annotations outwards
129 * except more code.
130 */
131
132/*

--- 33 unchanged lines hidden (view full) ---

166
167static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
168{
169 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
170}
171
172/*
173 * Give up ownership to a specific task, when @task = NULL, this is equivalent
126#ifndef CONFIG_DEBUG_LOCK_ALLOC
127/*
128 * Lockdep annotations are contained to the slow paths for simplicity.
129 * There is nothing that would stop spreading the lockdep annotations outwards
130 * except more code.
131 */
132
133/*

--- 33 unchanged lines hidden (view full) ---

167
168static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
169{
170 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
171}
172
173/*
174 * Give up ownership to a specific task, when @task = NULL, this is equivalent
174 * to a regular unlock. Clears HANDOFF, preserves WAITERS. Provides RELEASE
175 * semantics like a regular unlock, the __mutex_trylock() provides matching
176 * ACQUIRE semantics for the handoff.
175 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
176 * WAITERS. Provides RELEASE semantics like a regular unlock, the
177 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
177 */
178static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
179{
180 unsigned long owner = atomic_long_read(&lock->owner);
181
182 for (;;) {
183 unsigned long old, new;
184
185#ifdef CONFIG_DEBUG_MUTEXES
186 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
178 */
179static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
180{
181 unsigned long owner = atomic_long_read(&lock->owner);
182
183 for (;;) {
184 unsigned long old, new;
185
186#ifdef CONFIG_DEBUG_MUTEXES
187 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
188 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
187#endif
188
189 new = (owner & MUTEX_FLAG_WAITERS);
190 new |= (unsigned long)task;
189#endif
190
191 new = (owner & MUTEX_FLAG_WAITERS);
192 new |= (unsigned long)task;
193 if (task)
194 new |= MUTEX_FLAG_PICKUP;
191
192 old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
193 if (old == owner)
194 break;
195
196 owner = old;
197 }
198}

--- 231 unchanged lines hidden (view full) ---

430 * queue. The waiter-spinner will spin on the lock directly and concurrently
431 * with the spinner at the head of the OSQ, if present, until the owner is
432 * changed to itself.
433 */
434static bool mutex_optimistic_spin(struct mutex *lock,
435 struct ww_acquire_ctx *ww_ctx,
436 const bool use_ww_ctx, const bool waiter)
437{
195
196 old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
197 if (old == owner)
198 break;
199
200 owner = old;
201 }
202}

--- 231 unchanged lines hidden (view full) ---

434 * queue. The waiter-spinner will spin on the lock directly and concurrently
435 * with the spinner at the head of the OSQ, if present, until the owner is
436 * changed to itself.
437 */
438static bool mutex_optimistic_spin(struct mutex *lock,
439 struct ww_acquire_ctx *ww_ctx,
440 const bool use_ww_ctx, const bool waiter)
441{
438 struct task_struct *task = current;
439
440 if (!waiter) {
441 /*
442 * The purpose of the mutex_can_spin_on_owner() function is
443 * to eliminate the overhead of osq_lock() and osq_unlock()
444 * in case spinning isn't possible. As a waiter-spinner
445 * is not going to take OSQ lock anyway, there is no need
446 * to call mutex_can_spin_on_owner().
447 */

--- 23 unchanged lines hidden (view full) ---

471 *
472 * As such, when deadlock detection needs to be
473 * performed the optimistic spinning cannot be done.
474 */
475 if (READ_ONCE(ww->ctx))
476 goto fail_unlock;
477 }
478
442 if (!waiter) {
443 /*
444 * The purpose of the mutex_can_spin_on_owner() function is
445 * to eliminate the overhead of osq_lock() and osq_unlock()
446 * in case spinning isn't possible. As a waiter-spinner
447 * is not going to take OSQ lock anyway, there is no need
448 * to call mutex_can_spin_on_owner().
449 */

--- 23 unchanged lines hidden (view full) ---

473 *
474 * As such, when deadlock detection needs to be
475 * performed the optimistic spinning cannot be done.
476 */
477 if (READ_ONCE(ww->ctx))
478 goto fail_unlock;
479 }
480
481 /* Try to acquire the mutex... */
482 owner = __mutex_trylock_or_owner(lock);
483 if (!owner)
484 break;
485
479 /*
486 /*
480 * If there's an owner, wait for it to either
487 * There's an owner, wait for it to either
481 * release the lock or go to sleep.
482 */
488 * release the lock or go to sleep.
489 */
483 owner = __mutex_owner(lock);
484 if (owner) {
485 if (waiter && owner == task) {
486 smp_mb(); /* ACQUIRE */
487 break;
488 }
490 if (!mutex_spin_on_owner(lock, owner))
491 goto fail_unlock;
489
492
490 if (!mutex_spin_on_owner(lock, owner))
491 goto fail_unlock;
492 }
493
494 /* Try to acquire the mutex if it is unlocked. */
495 if (__mutex_trylock(lock, waiter))
496 break;
497
498 /*
499 * The cpu_relax() call is a compiler barrier which forces
500 * everything in this loop to be re-loaded. We don't need
501 * memory barriers as we'll eventually observe the right
502 * values at the cost of a few extra spins.
503 */
504 cpu_relax();
505 }

--- 126 unchanged lines hidden (view full) ---

632 ww = container_of(lock, struct ww_mutex, base);
633 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
634 return -EALREADY;
635 }
636
637 preempt_disable();
638 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
639
493 /*
494 * The cpu_relax() call is a compiler barrier which forces
495 * everything in this loop to be re-loaded. We don't need
496 * memory barriers as we'll eventually observe the right
497 * values at the cost of a few extra spins.
498 */
499 cpu_relax();
500 }

--- 126 unchanged lines hidden (view full) ---

627 ww = container_of(lock, struct ww_mutex, base);
628 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
629 return -EALREADY;
630 }
631
632 preempt_disable();
633 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
634
640 if (__mutex_trylock(lock, false) ||
635 if (__mutex_trylock(lock) ||
641 mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) {
642 /* got the lock, yay! */
643 lock_acquired(&lock->dep_map, ip);
644 if (use_ww_ctx)
645 ww_mutex_set_context_fastpath(ww, ww_ctx);
646 preempt_enable();
647 return 0;
648 }
649
650 spin_lock_mutex(&lock->wait_lock, flags);
651 /*
652 * After waiting to acquire the wait_lock, try again.
653 */
636 mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) {
637 /* got the lock, yay! */
638 lock_acquired(&lock->dep_map, ip);
639 if (use_ww_ctx)
640 ww_mutex_set_context_fastpath(ww, ww_ctx);
641 preempt_enable();
642 return 0;
643 }
644
645 spin_lock_mutex(&lock->wait_lock, flags);
646 /*
647 * After waiting to acquire the wait_lock, try again.
648 */
654 if (__mutex_trylock(lock, false))
649 if (__mutex_trylock(lock))
655 goto skip_wait;
656
657 debug_mutex_lock_common(lock, &waiter);
658 debug_mutex_add_waiter(lock, &waiter, current);
659
660 /* add waiting tasks to the end of the waitqueue (FIFO): */
661 list_add_tail(&waiter.list, &lock->wait_list);
662 waiter.task = current;

--- 6 unchanged lines hidden (view full) ---

669 set_current_state(state);
670 for (;;) {
671 /*
672 * Once we hold wait_lock, we're serialized against
673 * mutex_unlock() handing the lock off to us, do a trylock
674 * before testing the error conditions to make sure we pick up
675 * the handoff.
676 */
650 goto skip_wait;
651
652 debug_mutex_lock_common(lock, &waiter);
653 debug_mutex_add_waiter(lock, &waiter, current);
654
655 /* add waiting tasks to the end of the waitqueue (FIFO): */
656 list_add_tail(&waiter.list, &lock->wait_list);
657 waiter.task = current;

--- 6 unchanged lines hidden (view full) ---

664 set_current_state(state);
665 for (;;) {
666 /*
667 * Once we hold wait_lock, we're serialized against
668 * mutex_unlock() handing the lock off to us, do a trylock
669 * before testing the error conditions to make sure we pick up
670 * the handoff.
671 */
677 if (__mutex_trylock(lock, first))
672 if (__mutex_trylock(lock))
678 goto acquired;
679
680 /*
681 * Check for signals and wound conditions while holding
682 * wait_lock. This ensures the lock cancellation is ordered
683 * against mutex_unlock() and wake-ups do not go missing.
684 */
685 if (unlikely(signal_pending_state(state, current))) {

--- 16 unchanged lines hidden (view full) ---

702 }
703
704 set_current_state(state);
705 /*
706 * Here we order against unlock; we must either see it change
707 * state back to RUNNING and fall through the next schedule(),
708 * or we must see its unlock and acquire.
709 */
673 goto acquired;
674
675 /*
676 * Check for signals and wound conditions while holding
677 * wait_lock. This ensures the lock cancellation is ordered
678 * against mutex_unlock() and wake-ups do not go missing.
679 */
680 if (unlikely(signal_pending_state(state, current))) {

--- 16 unchanged lines hidden (view full) ---

697 }
698
699 set_current_state(state);
700 /*
701 * Here we order against unlock; we must either see it change
702 * state back to RUNNING and fall through the next schedule(),
703 * or we must see its unlock and acquire.
704 */
710 if ((first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, true)) ||
711 __mutex_trylock(lock, first))
705 if (__mutex_trylock(lock) ||
706 (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, true)))
712 break;
713
714 spin_lock_mutex(&lock->wait_lock, flags);
715 }
716 spin_lock_mutex(&lock->wait_lock, flags);
717acquired:
718 __set_current_state(TASK_RUNNING);
719

--- 140 unchanged lines hidden (view full) ---

860 * but instead set it to the top waiter.
861 */
862 owner = atomic_long_read(&lock->owner);
863 for (;;) {
864 unsigned long old;
865
866#ifdef CONFIG_DEBUG_MUTEXES
867 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
707 break;
708
709 spin_lock_mutex(&lock->wait_lock, flags);
710 }
711 spin_lock_mutex(&lock->wait_lock, flags);
712acquired:
713 __set_current_state(TASK_RUNNING);
714

--- 140 unchanged lines hidden (view full) ---

855 * but instead set it to the top waiter.
856 */
857 owner = atomic_long_read(&lock->owner);
858 for (;;) {
859 unsigned long old;
860
861#ifdef CONFIG_DEBUG_MUTEXES
862 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
863 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
868#endif
869
870 if (owner & MUTEX_FLAG_HANDOFF)
871 break;
872
873 old = atomic_long_cmpxchg_release(&lock->owner, owner,
874 __owner_flags(owner));
875 if (old == owner) {

--- 122 unchanged lines hidden (view full) ---

998 * it is negated from the down_trylock() return values! Be careful
999 * about this when converting semaphore users to mutexes.
1000 *
1001 * This function must not be used in interrupt context. The
1002 * mutex must be released by the same task that acquired it.
1003 */
1004int __sched mutex_trylock(struct mutex *lock)
1005{
864#endif
865
866 if (owner & MUTEX_FLAG_HANDOFF)
867 break;
868
869 old = atomic_long_cmpxchg_release(&lock->owner, owner,
870 __owner_flags(owner));
871 if (old == owner) {

--- 122 unchanged lines hidden (view full) ---

994 * it is negated from the down_trylock() return values! Be careful
995 * about this when converting semaphore users to mutexes.
996 *
997 * This function must not be used in interrupt context. The
998 * mutex must be released by the same task that acquired it.
999 */
1000int __sched mutex_trylock(struct mutex *lock)
1001{
1006 bool locked = __mutex_trylock(lock, false);
1002 bool locked = __mutex_trylock(lock);
1007
1008 if (locked)
1009 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1010
1011 return locked;
1012}
1013EXPORT_SYMBOL(mutex_trylock);
1014

--- 54 unchanged lines hidden ---
1003
1004 if (locked)
1005 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1006
1007 return locked;
1008}
1009EXPORT_SYMBOL(mutex_trylock);
1010

--- 54 unchanged lines hidden ---