xref: /openbmc/linux/kernel/locking/mcs_spinlock.h (revision bbecb07f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * MCS lock defines
4  *
5  * This file contains the main data structure and API definitions of MCS lock.
6  *
7  * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock
8  * with the desirable properties of being fair, and with each cpu trying
9  * to acquire the lock spinning on a local variable.
10  * It avoids expensive cache bouncings that common test-and-set spin-lock
11  * implementations incur.
12  */
13 #ifndef __LINUX_MCS_SPINLOCK_H
14 #define __LINUX_MCS_SPINLOCK_H
15 
16 #include <asm/mcs_spinlock.h>
17 
18 struct mcs_spinlock {
19 	struct mcs_spinlock *next;
20 	int locked; /* 1 if lock acquired */
21 	int count;  /* nesting count, see qspinlock.c */
22 };
23 
24 #ifndef arch_mcs_spin_lock_contended
25 /*
26  * Using smp_load_acquire() provides a memory barrier that ensures
27  * subsequent operations happen after the lock is acquired.
28  */
29 #define arch_mcs_spin_lock_contended(l)					\
30 do {									\
31 	while (!(smp_load_acquire(l)))					\
32 		cpu_relax();						\
33 } while (0)
34 #endif
35 
36 #ifndef arch_mcs_spin_unlock_contended
37 /*
38  * smp_store_release() provides a memory barrier to ensure all
39  * operations in the critical section has been completed before
40  * unlocking.
41  */
42 #define arch_mcs_spin_unlock_contended(l)				\
43 	smp_store_release((l), 1)
44 #endif
45 
46 /*
47  * Note: the smp_load_acquire/smp_store_release pair is not
48  * sufficient to form a full memory barrier across
49  * cpus for many architectures (except x86) for mcs_unlock and mcs_lock.
50  * For applications that need a full barrier across multiple cpus
51  * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be
52  * used after mcs_lock.
53  */
54 
55 /*
56  * In order to acquire the lock, the caller should declare a local node and
57  * pass a reference of the node to this function in addition to the lock.
58  * If the lock has already been acquired, then this will proceed to spin
59  * on this node->locked until the previous lock holder sets the node->locked
60  * in mcs_spin_unlock().
61  */
62 static inline
63 void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
64 {
65 	struct mcs_spinlock *prev;
66 
67 	/* Init node */
68 	node->locked = 0;
69 	node->next   = NULL;
70 
71 	/*
72 	 * We rely on the full barrier with global transitivity implied by the
73 	 * below xchg() to order the initialization stores above against any
74 	 * observation of @node. And to provide the ACQUIRE ordering associated
75 	 * with a LOCK primitive.
76 	 */
77 	prev = xchg(lock, node);
78 	if (likely(prev == NULL)) {
79 		/*
80 		 * Lock acquired, don't need to set node->locked to 1. Threads
81 		 * only spin on its own node->locked value for lock acquisition.
82 		 * However, since this thread can immediately acquire the lock
83 		 * and does not proceed to spin on its own node->locked, this
84 		 * value won't be used. If a debug mode is needed to
85 		 * audit lock status, then set node->locked value here.
86 		 */
87 		return;
88 	}
89 	WRITE_ONCE(prev->next, node);
90 
91 	/* Wait until the lock holder passes the lock down. */
92 	arch_mcs_spin_lock_contended(&node->locked);
93 }
94 
95 /*
96  * Releases the lock. The caller should pass in the corresponding node that
97  * was used to acquire the lock.
98  */
99 static inline
100 void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
101 {
102 	struct mcs_spinlock *next = READ_ONCE(node->next);
103 
104 	if (likely(!next)) {
105 		/*
106 		 * Release the lock by setting it to NULL
107 		 */
108 		if (likely(cmpxchg_release(lock, node, NULL) == node))
109 			return;
110 		/* Wait until the next pointer is set */
111 		while (!(next = READ_ONCE(node->next)))
112 			cpu_relax();
113 	}
114 
115 	/* Pass lock to next waiter. */
116 	arch_mcs_spin_unlock_contended(&next->locked);
117 }
118 
119 #endif /* __LINUX_MCS_SPINLOCK_H */
120