xref: /openbmc/linux/kernel/locking/qspinlock.c (revision 5ff32883)
1 /*
2  * Queued spinlock
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
15  * (C) Copyright 2013-2014,2018 Red Hat, Inc.
16  * (C) Copyright 2015 Intel Corp.
17  * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
18  *
19  * Authors: Waiman Long <longman@redhat.com>
20  *          Peter Zijlstra <peterz@infradead.org>
21  */
22 
23 #ifndef _GEN_PV_LOCK_SLOWPATH
24 
25 #include <linux/smp.h>
26 #include <linux/bug.h>
27 #include <linux/cpumask.h>
28 #include <linux/percpu.h>
29 #include <linux/hardirq.h>
30 #include <linux/mutex.h>
31 #include <linux/prefetch.h>
32 #include <asm/byteorder.h>
33 #include <asm/qspinlock.h>
34 
35 /*
36  * Include queued spinlock statistics code
37  */
38 #include "qspinlock_stat.h"
39 
40 /*
41  * The basic principle of a queue-based spinlock can best be understood
42  * by studying a classic queue-based spinlock implementation called the
43  * MCS lock. The paper below provides a good description for this kind
44  * of lock.
45  *
46  * http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf
47  *
48  * This queued spinlock implementation is based on the MCS lock, however to make
49  * it fit the 4 bytes we assume spinlock_t to be, and preserve its existing
50  * API, we must modify it somehow.
51  *
52  * In particular; where the traditional MCS lock consists of a tail pointer
53  * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to
54  * unlock the next pending (next->locked), we compress both these: {tail,
55  * next->locked} into a single u32 value.
56  *
57  * Since a spinlock disables recursion of its own context and there is a limit
58  * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there
59  * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now
60  * we can encode the tail by combining the 2-bit nesting level with the cpu
61  * number. With one byte for the lock value and 3 bytes for the tail, only a
62  * 32-bit word is now needed. Even though we only need 1 bit for the lock,
63  * we extend it to a full byte to achieve better performance for architectures
64  * that support atomic byte write.
65  *
66  * We also change the first spinner to spin on the lock bit instead of its
67  * node; whereby avoiding the need to carry a node from lock to unlock, and
68  * preserving existing lock API. This also makes the unlock code simpler and
69  * faster.
70  *
71  * N.B. The current implementation only supports architectures that allow
72  *      atomic operations on smaller 8-bit and 16-bit data types.
73  *
74  */
75 
76 #include "mcs_spinlock.h"
77 #define MAX_NODES	4
78 
79 /*
80  * On 64-bit architectures, the mcs_spinlock structure will be 16 bytes in
81  * size and four of them will fit nicely in one 64-byte cacheline. For
82  * pvqspinlock, however, we need more space for extra data. To accommodate
83  * that, we insert two more long words to pad it up to 32 bytes. IOW, only
84  * two of them can fit in a cacheline in this case. That is OK as it is rare
85  * to have more than 2 levels of slowpath nesting in actual use. We don't
86  * want to penalize pvqspinlocks to optimize for a rare case in native
87  * qspinlocks.
88  */
89 struct qnode {
90 	struct mcs_spinlock mcs;
91 #ifdef CONFIG_PARAVIRT_SPINLOCKS
92 	long reserved[2];
93 #endif
94 };
95 
96 /*
97  * The pending bit spinning loop count.
98  * This heuristic is used to limit the number of lockword accesses
99  * made by atomic_cond_read_relaxed when waiting for the lock to
100  * transition out of the "== _Q_PENDING_VAL" state. We don't spin
101  * indefinitely because there's no guarantee that we'll make forward
102  * progress.
103  */
104 #ifndef _Q_PENDING_LOOPS
105 #define _Q_PENDING_LOOPS	1
106 #endif
107 
108 /*
109  * Per-CPU queue node structures; we can never have more than 4 nested
110  * contexts: task, softirq, hardirq, nmi.
111  *
112  * Exactly fits one 64-byte cacheline on a 64-bit architecture.
113  *
114  * PV doubles the storage and uses the second cacheline for PV state.
115  */
116 static DEFINE_PER_CPU_ALIGNED(struct qnode, qnodes[MAX_NODES]);
117 
118 /*
119  * We must be able to distinguish between no-tail and the tail at 0:0,
120  * therefore increment the cpu number by one.
121  */
122 
123 static inline __pure u32 encode_tail(int cpu, int idx)
124 {
125 	u32 tail;
126 
127 #ifdef CONFIG_DEBUG_SPINLOCK
128 	BUG_ON(idx > 3);
129 #endif
130 	tail  = (cpu + 1) << _Q_TAIL_CPU_OFFSET;
131 	tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */
132 
133 	return tail;
134 }
135 
136 static inline __pure struct mcs_spinlock *decode_tail(u32 tail)
137 {
138 	int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1;
139 	int idx = (tail &  _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
140 
141 	return per_cpu_ptr(&qnodes[idx].mcs, cpu);
142 }
143 
144 static inline __pure
145 struct mcs_spinlock *grab_mcs_node(struct mcs_spinlock *base, int idx)
146 {
147 	return &((struct qnode *)base + idx)->mcs;
148 }
149 
150 #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
151 
152 #if _Q_PENDING_BITS == 8
153 /**
154  * clear_pending - clear the pending bit.
155  * @lock: Pointer to queued spinlock structure
156  *
157  * *,1,* -> *,0,*
158  */
159 static __always_inline void clear_pending(struct qspinlock *lock)
160 {
161 	WRITE_ONCE(lock->pending, 0);
162 }
163 
164 /**
165  * clear_pending_set_locked - take ownership and clear the pending bit.
166  * @lock: Pointer to queued spinlock structure
167  *
168  * *,1,0 -> *,0,1
169  *
170  * Lock stealing is not allowed if this function is used.
171  */
172 static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
173 {
174 	WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL);
175 }
176 
177 /*
178  * xchg_tail - Put in the new queue tail code word & retrieve previous one
179  * @lock : Pointer to queued spinlock structure
180  * @tail : The new queue tail code word
181  * Return: The previous queue tail code word
182  *
183  * xchg(lock, tail), which heads an address dependency
184  *
185  * p,*,* -> n,*,* ; prev = xchg(lock, node)
186  */
187 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
188 {
189 	/*
190 	 * We can use relaxed semantics since the caller ensures that the
191 	 * MCS node is properly initialized before updating the tail.
192 	 */
193 	return (u32)xchg_relaxed(&lock->tail,
194 				 tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
195 }
196 
197 #else /* _Q_PENDING_BITS == 8 */
198 
199 /**
200  * clear_pending - clear the pending bit.
201  * @lock: Pointer to queued spinlock structure
202  *
203  * *,1,* -> *,0,*
204  */
205 static __always_inline void clear_pending(struct qspinlock *lock)
206 {
207 	atomic_andnot(_Q_PENDING_VAL, &lock->val);
208 }
209 
210 /**
211  * clear_pending_set_locked - take ownership and clear the pending bit.
212  * @lock: Pointer to queued spinlock structure
213  *
214  * *,1,0 -> *,0,1
215  */
216 static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
217 {
218 	atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val);
219 }
220 
221 /**
222  * xchg_tail - Put in the new queue tail code word & retrieve previous one
223  * @lock : Pointer to queued spinlock structure
224  * @tail : The new queue tail code word
225  * Return: The previous queue tail code word
226  *
227  * xchg(lock, tail)
228  *
229  * p,*,* -> n,*,* ; prev = xchg(lock, node)
230  */
231 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
232 {
233 	u32 old, new, val = atomic_read(&lock->val);
234 
235 	for (;;) {
236 		new = (val & _Q_LOCKED_PENDING_MASK) | tail;
237 		/*
238 		 * We can use relaxed semantics since the caller ensures that
239 		 * the MCS node is properly initialized before updating the
240 		 * tail.
241 		 */
242 		old = atomic_cmpxchg_relaxed(&lock->val, val, new);
243 		if (old == val)
244 			break;
245 
246 		val = old;
247 	}
248 	return old;
249 }
250 #endif /* _Q_PENDING_BITS == 8 */
251 
252 /**
253  * queued_fetch_set_pending_acquire - fetch the whole lock value and set pending
254  * @lock : Pointer to queued spinlock structure
255  * Return: The previous lock value
256  *
257  * *,*,* -> *,1,*
258  */
259 #ifndef queued_fetch_set_pending_acquire
260 static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
261 {
262 	return atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val);
263 }
264 #endif
265 
266 /**
267  * set_locked - Set the lock bit and own the lock
268  * @lock: Pointer to queued spinlock structure
269  *
270  * *,*,0 -> *,0,1
271  */
272 static __always_inline void set_locked(struct qspinlock *lock)
273 {
274 	WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
275 }
276 
277 
278 /*
279  * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for
280  * all the PV callbacks.
281  */
282 
283 static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
284 static __always_inline void __pv_wait_node(struct mcs_spinlock *node,
285 					   struct mcs_spinlock *prev) { }
286 static __always_inline void __pv_kick_node(struct qspinlock *lock,
287 					   struct mcs_spinlock *node) { }
288 static __always_inline u32  __pv_wait_head_or_lock(struct qspinlock *lock,
289 						   struct mcs_spinlock *node)
290 						   { return 0; }
291 
292 #define pv_enabled()		false
293 
294 #define pv_init_node		__pv_init_node
295 #define pv_wait_node		__pv_wait_node
296 #define pv_kick_node		__pv_kick_node
297 #define pv_wait_head_or_lock	__pv_wait_head_or_lock
298 
299 #ifdef CONFIG_PARAVIRT_SPINLOCKS
300 #define queued_spin_lock_slowpath	native_queued_spin_lock_slowpath
301 #endif
302 
303 #endif /* _GEN_PV_LOCK_SLOWPATH */
304 
305 /**
306  * queued_spin_lock_slowpath - acquire the queued spinlock
307  * @lock: Pointer to queued spinlock structure
308  * @val: Current value of the queued spinlock 32-bit word
309  *
310  * (queue tail, pending bit, lock value)
311  *
312  *              fast     :    slow                                  :    unlock
313  *                       :                                          :
314  * uncontended  (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0)
315  *                       :       | ^--------.------.             /  :
316  *                       :       v           \      \            |  :
317  * pending               :    (0,1,1) +--> (0,1,0)   \           |  :
318  *                       :       | ^--'              |           |  :
319  *                       :       v                   |           |  :
320  * uncontended           :    (n,x,y) +--> (n,0,0) --'           |  :
321  *   queue               :       | ^--'                          |  :
322  *                       :       v                               |  :
323  * contended             :    (*,x,y) +--> (*,0,0) ---> (*,0,1) -'  :
324  *   queue               :         ^--'                             :
325  */
326 void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
327 {
328 	struct mcs_spinlock *prev, *next, *node;
329 	u32 old, tail;
330 	int idx;
331 
332 	BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
333 
334 	if (pv_enabled())
335 		goto pv_queue;
336 
337 	if (virt_spin_lock(lock))
338 		return;
339 
340 	/*
341 	 * Wait for in-progress pending->locked hand-overs with a bounded
342 	 * number of spins so that we guarantee forward progress.
343 	 *
344 	 * 0,1,0 -> 0,0,1
345 	 */
346 	if (val == _Q_PENDING_VAL) {
347 		int cnt = _Q_PENDING_LOOPS;
348 		val = atomic_cond_read_relaxed(&lock->val,
349 					       (VAL != _Q_PENDING_VAL) || !cnt--);
350 	}
351 
352 	/*
353 	 * If we observe any contention; queue.
354 	 */
355 	if (val & ~_Q_LOCKED_MASK)
356 		goto queue;
357 
358 	/*
359 	 * trylock || pending
360 	 *
361 	 * 0,0,* -> 0,1,* -> 0,0,1 pending, trylock
362 	 */
363 	val = queued_fetch_set_pending_acquire(lock);
364 
365 	/*
366 	 * If we observe contention, there is a concurrent locker.
367 	 *
368 	 * Undo and queue; our setting of PENDING might have made the
369 	 * n,0,0 -> 0,0,0 transition fail and it will now be waiting
370 	 * on @next to become !NULL.
371 	 */
372 	if (unlikely(val & ~_Q_LOCKED_MASK)) {
373 
374 		/* Undo PENDING if we set it. */
375 		if (!(val & _Q_PENDING_MASK))
376 			clear_pending(lock);
377 
378 		goto queue;
379 	}
380 
381 	/*
382 	 * We're pending, wait for the owner to go away.
383 	 *
384 	 * 0,1,1 -> 0,1,0
385 	 *
386 	 * this wait loop must be a load-acquire such that we match the
387 	 * store-release that clears the locked bit and create lock
388 	 * sequentiality; this is because not all
389 	 * clear_pending_set_locked() implementations imply full
390 	 * barriers.
391 	 */
392 	if (val & _Q_LOCKED_MASK)
393 		atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_MASK));
394 
395 	/*
396 	 * take ownership and clear the pending bit.
397 	 *
398 	 * 0,1,0 -> 0,0,1
399 	 */
400 	clear_pending_set_locked(lock);
401 	qstat_inc(qstat_lock_pending, true);
402 	return;
403 
404 	/*
405 	 * End of pending bit optimistic spinning and beginning of MCS
406 	 * queuing.
407 	 */
408 queue:
409 	qstat_inc(qstat_lock_slowpath, true);
410 pv_queue:
411 	node = this_cpu_ptr(&qnodes[0].mcs);
412 	idx = node->count++;
413 	tail = encode_tail(smp_processor_id(), idx);
414 
415 	node = grab_mcs_node(node, idx);
416 
417 	/*
418 	 * Keep counts of non-zero index values:
419 	 */
420 	qstat_inc(qstat_lock_idx1 + idx - 1, idx);
421 
422 	/*
423 	 * Ensure that we increment the head node->count before initialising
424 	 * the actual node. If the compiler is kind enough to reorder these
425 	 * stores, then an IRQ could overwrite our assignments.
426 	 */
427 	barrier();
428 
429 	node->locked = 0;
430 	node->next = NULL;
431 	pv_init_node(node);
432 
433 	/*
434 	 * We touched a (possibly) cold cacheline in the per-cpu queue node;
435 	 * attempt the trylock once more in the hope someone let go while we
436 	 * weren't watching.
437 	 */
438 	if (queued_spin_trylock(lock))
439 		goto release;
440 
441 	/*
442 	 * Ensure that the initialisation of @node is complete before we
443 	 * publish the updated tail via xchg_tail() and potentially link
444 	 * @node into the waitqueue via WRITE_ONCE(prev->next, node) below.
445 	 */
446 	smp_wmb();
447 
448 	/*
449 	 * Publish the updated tail.
450 	 * We have already touched the queueing cacheline; don't bother with
451 	 * pending stuff.
452 	 *
453 	 * p,*,* -> n,*,*
454 	 */
455 	old = xchg_tail(lock, tail);
456 	next = NULL;
457 
458 	/*
459 	 * if there was a previous node; link it and wait until reaching the
460 	 * head of the waitqueue.
461 	 */
462 	if (old & _Q_TAIL_MASK) {
463 		prev = decode_tail(old);
464 
465 		/* Link @node into the waitqueue. */
466 		WRITE_ONCE(prev->next, node);
467 
468 		pv_wait_node(node, prev);
469 		arch_mcs_spin_lock_contended(&node->locked);
470 
471 		/*
472 		 * While waiting for the MCS lock, the next pointer may have
473 		 * been set by another lock waiter. We optimistically load
474 		 * the next pointer & prefetch the cacheline for writing
475 		 * to reduce latency in the upcoming MCS unlock operation.
476 		 */
477 		next = READ_ONCE(node->next);
478 		if (next)
479 			prefetchw(next);
480 	}
481 
482 	/*
483 	 * we're at the head of the waitqueue, wait for the owner & pending to
484 	 * go away.
485 	 *
486 	 * *,x,y -> *,0,0
487 	 *
488 	 * this wait loop must use a load-acquire such that we match the
489 	 * store-release that clears the locked bit and create lock
490 	 * sequentiality; this is because the set_locked() function below
491 	 * does not imply a full barrier.
492 	 *
493 	 * The PV pv_wait_head_or_lock function, if active, will acquire
494 	 * the lock and return a non-zero value. So we have to skip the
495 	 * atomic_cond_read_acquire() call. As the next PV queue head hasn't
496 	 * been designated yet, there is no way for the locked value to become
497 	 * _Q_SLOW_VAL. So both the set_locked() and the
498 	 * atomic_cmpxchg_relaxed() calls will be safe.
499 	 *
500 	 * If PV isn't active, 0 will be returned instead.
501 	 *
502 	 */
503 	if ((val = pv_wait_head_or_lock(lock, node)))
504 		goto locked;
505 
506 	val = atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK));
507 
508 locked:
509 	/*
510 	 * claim the lock:
511 	 *
512 	 * n,0,0 -> 0,0,1 : lock, uncontended
513 	 * *,*,0 -> *,*,1 : lock, contended
514 	 *
515 	 * If the queue head is the only one in the queue (lock value == tail)
516 	 * and nobody is pending, clear the tail code and grab the lock.
517 	 * Otherwise, we only need to grab the lock.
518 	 */
519 
520 	/*
521 	 * In the PV case we might already have _Q_LOCKED_VAL set, because
522 	 * of lock stealing; therefore we must also allow:
523 	 *
524 	 * n,0,1 -> 0,0,1
525 	 *
526 	 * Note: at this point: (val & _Q_PENDING_MASK) == 0, because of the
527 	 *       above wait condition, therefore any concurrent setting of
528 	 *       PENDING will make the uncontended transition fail.
529 	 */
530 	if ((val & _Q_TAIL_MASK) == tail) {
531 		if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL))
532 			goto release; /* No contention */
533 	}
534 
535 	/*
536 	 * Either somebody is queued behind us or _Q_PENDING_VAL got set
537 	 * which will then detect the remaining tail and queue behind us
538 	 * ensuring we'll see a @next.
539 	 */
540 	set_locked(lock);
541 
542 	/*
543 	 * contended path; wait for next if not observed yet, release.
544 	 */
545 	if (!next)
546 		next = smp_cond_load_relaxed(&node->next, (VAL));
547 
548 	arch_mcs_spin_unlock_contended(&next->locked);
549 	pv_kick_node(lock, next);
550 
551 release:
552 	/*
553 	 * release the node
554 	 */
555 	__this_cpu_dec(qnodes[0].mcs.count);
556 }
557 EXPORT_SYMBOL(queued_spin_lock_slowpath);
558 
559 /*
560  * Generate the paravirt code for queued_spin_unlock_slowpath().
561  */
562 #if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
563 #define _GEN_PV_LOCK_SLOWPATH
564 
565 #undef  pv_enabled
566 #define pv_enabled()	true
567 
568 #undef pv_init_node
569 #undef pv_wait_node
570 #undef pv_kick_node
571 #undef pv_wait_head_or_lock
572 
573 #undef  queued_spin_lock_slowpath
574 #define queued_spin_lock_slowpath	__pv_queued_spin_lock_slowpath
575 
576 #include "qspinlock_paravirt.h"
577 #include "qspinlock.c"
578 
579 #endif
580