xref: /openbmc/linux/kernel/locking/osq_lock.c (revision d8bcaabe)
1 #include <linux/percpu.h>
2 #include <linux/sched.h>
3 #include <linux/osq_lock.h>
4 
5 /*
6  * An MCS like lock especially tailored for optimistic spinning for sleeping
7  * lock implementations (mutex, rwsem, etc).
8  *
9  * Using a single mcs node per CPU is safe because sleeping locks should not be
10  * called from interrupt context and we have preemption disabled while
11  * spinning.
12  */
13 static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
14 
15 /*
16  * We use the value 0 to represent "no CPU", thus the encoded value
17  * will be the CPU number incremented by 1.
18  */
19 static inline int encode_cpu(int cpu_nr)
20 {
21 	return cpu_nr + 1;
22 }
23 
24 static inline int node_cpu(struct optimistic_spin_node *node)
25 {
26 	return node->cpu - 1;
27 }
28 
29 static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
30 {
31 	int cpu_nr = encoded_cpu_val - 1;
32 
33 	return per_cpu_ptr(&osq_node, cpu_nr);
34 }
35 
36 /*
37  * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
38  * Can return NULL in case we were the last queued and we updated @lock instead.
39  */
40 static inline struct optimistic_spin_node *
41 osq_wait_next(struct optimistic_spin_queue *lock,
42 	      struct optimistic_spin_node *node,
43 	      struct optimistic_spin_node *prev)
44 {
45 	struct optimistic_spin_node *next = NULL;
46 	int curr = encode_cpu(smp_processor_id());
47 	int old;
48 
49 	/*
50 	 * If there is a prev node in queue, then the 'old' value will be
51 	 * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if
52 	 * we're currently last in queue, then the queue will then become empty.
53 	 */
54 	old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
55 
56 	for (;;) {
57 		if (atomic_read(&lock->tail) == curr &&
58 		    atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) {
59 			/*
60 			 * We were the last queued, we moved @lock back. @prev
61 			 * will now observe @lock and will complete its
62 			 * unlock()/unqueue().
63 			 */
64 			break;
65 		}
66 
67 		/*
68 		 * We must xchg() the @node->next value, because if we were to
69 		 * leave it in, a concurrent unlock()/unqueue() from
70 		 * @node->next might complete Step-A and think its @prev is
71 		 * still valid.
72 		 *
73 		 * If the concurrent unlock()/unqueue() wins the race, we'll
74 		 * wait for either @lock to point to us, through its Step-B, or
75 		 * wait for a new @node->next from its Step-C.
76 		 */
77 		if (node->next) {
78 			next = xchg(&node->next, NULL);
79 			if (next)
80 				break;
81 		}
82 
83 		cpu_relax();
84 	}
85 
86 	return next;
87 }
88 
89 bool osq_lock(struct optimistic_spin_queue *lock)
90 {
91 	struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
92 	struct optimistic_spin_node *prev, *next;
93 	int curr = encode_cpu(smp_processor_id());
94 	int old;
95 
96 	node->locked = 0;
97 	node->next = NULL;
98 	node->cpu = curr;
99 
100 	/*
101 	 * We need both ACQUIRE (pairs with corresponding RELEASE in
102 	 * unlock() uncontended, or fastpath) and RELEASE (to publish
103 	 * the node fields we just initialised) semantics when updating
104 	 * the lock tail.
105 	 */
106 	old = atomic_xchg(&lock->tail, curr);
107 	if (old == OSQ_UNLOCKED_VAL)
108 		return true;
109 
110 	prev = decode_cpu(old);
111 	node->prev = prev;
112 
113 	/*
114 	 * osq_lock()			unqueue
115 	 *
116 	 * node->prev = prev		osq_wait_next()
117 	 * WMB				MB
118 	 * prev->next = node		next->prev = prev // unqueue-C
119 	 *
120 	 * Here 'node->prev' and 'next->prev' are the same variable and we need
121 	 * to ensure these stores happen in-order to avoid corrupting the list.
122 	 */
123 	smp_wmb();
124 
125 	WRITE_ONCE(prev->next, node);
126 
127 	/*
128 	 * Normally @prev is untouchable after the above store; because at that
129 	 * moment unlock can proceed and wipe the node element from stack.
130 	 *
131 	 * However, since our nodes are static per-cpu storage, we're
132 	 * guaranteed their existence -- this allows us to apply
133 	 * cmpxchg in an attempt to undo our queueing.
134 	 */
135 
136 	while (!READ_ONCE(node->locked)) {
137 		/*
138 		 * If we need to reschedule bail... so we can block.
139 		 * Use vcpu_is_preempted() to avoid waiting for a preempted
140 		 * lock holder:
141 		 */
142 		if (need_resched() || vcpu_is_preempted(node_cpu(node->prev)))
143 			goto unqueue;
144 
145 		cpu_relax();
146 	}
147 	return true;
148 
149 unqueue:
150 	/*
151 	 * Step - A  -- stabilize @prev
152 	 *
153 	 * Undo our @prev->next assignment; this will make @prev's
154 	 * unlock()/unqueue() wait for a next pointer since @lock points to us
155 	 * (or later).
156 	 */
157 
158 	for (;;) {
159 		if (prev->next == node &&
160 		    cmpxchg(&prev->next, node, NULL) == node)
161 			break;
162 
163 		/*
164 		 * We can only fail the cmpxchg() racing against an unlock(),
165 		 * in which case we should observe @node->locked becomming
166 		 * true.
167 		 */
168 		if (smp_load_acquire(&node->locked))
169 			return true;
170 
171 		cpu_relax();
172 
173 		/*
174 		 * Or we race against a concurrent unqueue()'s step-B, in which
175 		 * case its step-C will write us a new @node->prev pointer.
176 		 */
177 		prev = READ_ONCE(node->prev);
178 	}
179 
180 	/*
181 	 * Step - B -- stabilize @next
182 	 *
183 	 * Similar to unlock(), wait for @node->next or move @lock from @node
184 	 * back to @prev.
185 	 */
186 
187 	next = osq_wait_next(lock, node, prev);
188 	if (!next)
189 		return false;
190 
191 	/*
192 	 * Step - C -- unlink
193 	 *
194 	 * @prev is stable because its still waiting for a new @prev->next
195 	 * pointer, @next is stable because our @node->next pointer is NULL and
196 	 * it will wait in Step-A.
197 	 */
198 
199 	WRITE_ONCE(next->prev, prev);
200 	WRITE_ONCE(prev->next, next);
201 
202 	return false;
203 }
204 
205 void osq_unlock(struct optimistic_spin_queue *lock)
206 {
207 	struct optimistic_spin_node *node, *next;
208 	int curr = encode_cpu(smp_processor_id());
209 
210 	/*
211 	 * Fast path for the uncontended case.
212 	 */
213 	if (likely(atomic_cmpxchg_release(&lock->tail, curr,
214 					  OSQ_UNLOCKED_VAL) == curr))
215 		return;
216 
217 	/*
218 	 * Second most likely case.
219 	 */
220 	node = this_cpu_ptr(&osq_node);
221 	next = xchg(&node->next, NULL);
222 	if (next) {
223 		WRITE_ONCE(next->locked, 1);
224 		return;
225 	}
226 
227 	next = osq_wait_next(lock, node, NULL);
228 	if (next)
229 		WRITE_ONCE(next->locked, 1);
230 }
231