xref: /openbmc/linux/kernel/locking/qspinlock_paravirt.h (revision b802fb99ae964681d1754428f67970911e0476e9)
1 #ifndef _GEN_PV_LOCK_SLOWPATH
2 #error "do not include this file"
3 #endif
4 
5 #include <linux/hash.h>
6 #include <linux/bootmem.h>
7 #include <linux/debug_locks.h>
8 
9 /*
10  * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
11  * of spinning them.
12  *
13  * This relies on the architecture to provide two paravirt hypercalls:
14  *
15  *   pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
16  *   pv_kick(cpu)             -- wakes a suspended vcpu
17  *
18  * Using these we implement __pv_queued_spin_lock_slowpath() and
19  * __pv_queued_spin_unlock() to replace native_queued_spin_lock_slowpath() and
20  * native_queued_spin_unlock().
21  */
22 
23 #define _Q_SLOW_VAL	(3U << _Q_LOCKED_OFFSET)
24 
25 /*
26  * Queue Node Adaptive Spinning
27  *
28  * A queue node vCPU will stop spinning if the vCPU in the previous node is
29  * not running. The one lock stealing attempt allowed at slowpath entry
30  * mitigates the slight slowdown for non-overcommitted guest with this
31  * aggressive wait-early mechanism.
32  *
33  * The status of the previous node will be checked at fixed interval
34  * controlled by PV_PREV_CHECK_MASK. This is to ensure that we won't
35  * pound on the cacheline of the previous node too heavily.
36  */
37 #define PV_PREV_CHECK_MASK	0xff
38 
39 /*
40  * Queue node uses: vcpu_running & vcpu_halted.
41  * Queue head uses: vcpu_running & vcpu_hashed.
42  */
43 enum vcpu_state {
44 	vcpu_running = 0,
45 	vcpu_halted,		/* Used only in pv_wait_node */
46 	vcpu_hashed,		/* = pv_hash'ed + vcpu_halted */
47 };
48 
49 struct pv_node {
50 	struct mcs_spinlock	mcs;
51 	struct mcs_spinlock	__res[3];
52 
53 	int			cpu;
54 	u8			state;
55 };
56 
57 /*
58  * By replacing the regular queued_spin_trylock() with the function below,
59  * it will be called once when a lock waiter enter the PV slowpath before
60  * being queued. By allowing one lock stealing attempt here when the pending
61  * bit is off, it helps to reduce the performance impact of lock waiter
62  * preemption without the drawback of lock starvation.
63  */
64 #define queued_spin_trylock(l)	pv_queued_spin_steal_lock(l)
65 static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock)
66 {
67 	struct __qspinlock *l = (void *)lock;
68 
69 	return !(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) &&
70 		(cmpxchg(&l->locked, 0, _Q_LOCKED_VAL) == 0);
71 }
72 
73 /*
74  * The pending bit is used by the queue head vCPU to indicate that it
75  * is actively spinning on the lock and no lock stealing is allowed.
76  */
77 #if _Q_PENDING_BITS == 8
78 static __always_inline void set_pending(struct qspinlock *lock)
79 {
80 	struct __qspinlock *l = (void *)lock;
81 
82 	WRITE_ONCE(l->pending, 1);
83 }
84 
85 static __always_inline void clear_pending(struct qspinlock *lock)
86 {
87 	struct __qspinlock *l = (void *)lock;
88 
89 	WRITE_ONCE(l->pending, 0);
90 }
91 
92 /*
93  * The pending bit check in pv_queued_spin_steal_lock() isn't a memory
94  * barrier. Therefore, an atomic cmpxchg() is used to acquire the lock
95  * just to be sure that it will get it.
96  */
97 static __always_inline int trylock_clear_pending(struct qspinlock *lock)
98 {
99 	struct __qspinlock *l = (void *)lock;
100 
101 	return !READ_ONCE(l->locked) &&
102 	       (cmpxchg(&l->locked_pending, _Q_PENDING_VAL, _Q_LOCKED_VAL)
103 			== _Q_PENDING_VAL);
104 }
105 #else /* _Q_PENDING_BITS == 8 */
106 static __always_inline void set_pending(struct qspinlock *lock)
107 {
108 	atomic_set_mask(_Q_PENDING_VAL, &lock->val);
109 }
110 
111 static __always_inline void clear_pending(struct qspinlock *lock)
112 {
113 	atomic_clear_mask(_Q_PENDING_VAL, &lock->val);
114 }
115 
116 static __always_inline int trylock_clear_pending(struct qspinlock *lock)
117 {
118 	int val = atomic_read(&lock->val);
119 
120 	for (;;) {
121 		int old, new;
122 
123 		if (val  & _Q_LOCKED_MASK)
124 			break;
125 
126 		/*
127 		 * Try to clear pending bit & set locked bit
128 		 */
129 		old = val;
130 		new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL;
131 		val = atomic_cmpxchg(&lock->val, old, new);
132 
133 		if (val == old)
134 			return 1;
135 	}
136 	return 0;
137 }
138 #endif /* _Q_PENDING_BITS == 8 */
139 
140 /*
141  * Include queued spinlock statistics code
142  */
143 #include "qspinlock_stat.h"
144 
145 /*
146  * Lock and MCS node addresses hash table for fast lookup
147  *
148  * Hashing is done on a per-cacheline basis to minimize the need to access
149  * more than one cacheline.
150  *
151  * Dynamically allocate a hash table big enough to hold at least 4X the
152  * number of possible cpus in the system. Allocation is done on page
153  * granularity. So the minimum number of hash buckets should be at least
154  * 256 (64-bit) or 512 (32-bit) to fully utilize a 4k page.
155  *
156  * Since we should not be holding locks from NMI context (very rare indeed) the
157  * max load factor is 0.75, which is around the point where open addressing
158  * breaks down.
159  *
160  */
161 struct pv_hash_entry {
162 	struct qspinlock *lock;
163 	struct pv_node   *node;
164 };
165 
166 #define PV_HE_PER_LINE	(SMP_CACHE_BYTES / sizeof(struct pv_hash_entry))
167 #define PV_HE_MIN	(PAGE_SIZE / sizeof(struct pv_hash_entry))
168 
169 static struct pv_hash_entry *pv_lock_hash;
170 static unsigned int pv_lock_hash_bits __read_mostly;
171 
172 /*
173  * Allocate memory for the PV qspinlock hash buckets
174  *
175  * This function should be called from the paravirt spinlock initialization
176  * routine.
177  */
178 void __init __pv_init_lock_hash(void)
179 {
180 	int pv_hash_size = ALIGN(4 * num_possible_cpus(), PV_HE_PER_LINE);
181 
182 	if (pv_hash_size < PV_HE_MIN)
183 		pv_hash_size = PV_HE_MIN;
184 
185 	/*
186 	 * Allocate space from bootmem which should be page-size aligned
187 	 * and hence cacheline aligned.
188 	 */
189 	pv_lock_hash = alloc_large_system_hash("PV qspinlock",
190 					       sizeof(struct pv_hash_entry),
191 					       pv_hash_size, 0, HASH_EARLY,
192 					       &pv_lock_hash_bits, NULL,
193 					       pv_hash_size, pv_hash_size);
194 }
195 
196 #define for_each_hash_entry(he, offset, hash)						\
197 	for (hash &= ~(PV_HE_PER_LINE - 1), he = &pv_lock_hash[hash], offset = 0;	\
198 	     offset < (1 << pv_lock_hash_bits);						\
199 	     offset++, he = &pv_lock_hash[(hash + offset) & ((1 << pv_lock_hash_bits) - 1)])
200 
201 static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node)
202 {
203 	unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits);
204 	struct pv_hash_entry *he;
205 	int hopcnt = 0;
206 
207 	for_each_hash_entry(he, offset, hash) {
208 		hopcnt++;
209 		if (!cmpxchg(&he->lock, NULL, lock)) {
210 			WRITE_ONCE(he->node, node);
211 			qstat_hop(hopcnt);
212 			return &he->lock;
213 		}
214 	}
215 	/*
216 	 * Hard assume there is a free entry for us.
217 	 *
218 	 * This is guaranteed by ensuring every blocked lock only ever consumes
219 	 * a single entry, and since we only have 4 nesting levels per CPU
220 	 * and allocated 4*nr_possible_cpus(), this must be so.
221 	 *
222 	 * The single entry is guaranteed by having the lock owner unhash
223 	 * before it releases.
224 	 */
225 	BUG();
226 }
227 
228 static struct pv_node *pv_unhash(struct qspinlock *lock)
229 {
230 	unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits);
231 	struct pv_hash_entry *he;
232 	struct pv_node *node;
233 
234 	for_each_hash_entry(he, offset, hash) {
235 		if (READ_ONCE(he->lock) == lock) {
236 			node = READ_ONCE(he->node);
237 			WRITE_ONCE(he->lock, NULL);
238 			return node;
239 		}
240 	}
241 	/*
242 	 * Hard assume we'll find an entry.
243 	 *
244 	 * This guarantees a limited lookup time and is itself guaranteed by
245 	 * having the lock owner do the unhash -- IFF the unlock sees the
246 	 * SLOW flag, there MUST be a hash entry.
247 	 */
248 	BUG();
249 }
250 
251 /*
252  * Return true if when it is time to check the previous node which is not
253  * in a running state.
254  */
255 static inline bool
256 pv_wait_early(struct pv_node *prev, int loop)
257 {
258 
259 	if ((loop & PV_PREV_CHECK_MASK) != 0)
260 		return false;
261 
262 	return READ_ONCE(prev->state) != vcpu_running;
263 }
264 
265 /*
266  * Initialize the PV part of the mcs_spinlock node.
267  */
268 static void pv_init_node(struct mcs_spinlock *node)
269 {
270 	struct pv_node *pn = (struct pv_node *)node;
271 
272 	BUILD_BUG_ON(sizeof(struct pv_node) > 5*sizeof(struct mcs_spinlock));
273 
274 	pn->cpu = smp_processor_id();
275 	pn->state = vcpu_running;
276 }
277 
278 /*
279  * Wait for node->locked to become true, halt the vcpu after a short spin.
280  * pv_kick_node() is used to set _Q_SLOW_VAL and fill in hash table on its
281  * behalf.
282  */
283 static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
284 {
285 	struct pv_node *pn = (struct pv_node *)node;
286 	struct pv_node *pp = (struct pv_node *)prev;
287 	int waitcnt = 0;
288 	int loop;
289 	bool wait_early;
290 
291 	/* waitcnt processing will be compiled out if !QUEUED_LOCK_STAT */
292 	for (;; waitcnt++) {
293 		for (wait_early = false, loop = SPIN_THRESHOLD; loop; loop--) {
294 			if (READ_ONCE(node->locked))
295 				return;
296 			if (pv_wait_early(pp, loop)) {
297 				wait_early = true;
298 				break;
299 			}
300 			cpu_relax();
301 		}
302 
303 		/*
304 		 * Order pn->state vs pn->locked thusly:
305 		 *
306 		 * [S] pn->state = vcpu_halted	  [S] next->locked = 1
307 		 *     MB			      MB
308 		 * [L] pn->locked		[RmW] pn->state = vcpu_hashed
309 		 *
310 		 * Matches the cmpxchg() from pv_kick_node().
311 		 */
312 		smp_store_mb(pn->state, vcpu_halted);
313 
314 		if (!READ_ONCE(node->locked)) {
315 			qstat_inc(qstat_pv_wait_node, true);
316 			qstat_inc(qstat_pv_wait_again, waitcnt);
317 			qstat_inc(qstat_pv_wait_early, wait_early);
318 			pv_wait(&pn->state, vcpu_halted);
319 		}
320 
321 		/*
322 		 * If pv_kick_node() changed us to vcpu_hashed, retain that
323 		 * value so that pv_wait_head_or_lock() knows to not also try
324 		 * to hash this lock.
325 		 */
326 		cmpxchg(&pn->state, vcpu_halted, vcpu_running);
327 
328 		/*
329 		 * If the locked flag is still not set after wakeup, it is a
330 		 * spurious wakeup and the vCPU should wait again. However,
331 		 * there is a pretty high overhead for CPU halting and kicking.
332 		 * So it is better to spin for a while in the hope that the
333 		 * MCS lock will be released soon.
334 		 */
335 		qstat_inc(qstat_pv_spurious_wakeup, !READ_ONCE(node->locked));
336 	}
337 
338 	/*
339 	 * By now our node->locked should be 1 and our caller will not actually
340 	 * spin-wait for it. We do however rely on our caller to do a
341 	 * load-acquire for us.
342 	 */
343 }
344 
345 /*
346  * Called after setting next->locked = 1 when we're the lock owner.
347  *
348  * Instead of waking the waiters stuck in pv_wait_node() advance their state
349  * such that they're waiting in pv_wait_head_or_lock(), this avoids a
350  * wake/sleep cycle.
351  */
352 static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
353 {
354 	struct pv_node *pn = (struct pv_node *)node;
355 	struct __qspinlock *l = (void *)lock;
356 
357 	/*
358 	 * If the vCPU is indeed halted, advance its state to match that of
359 	 * pv_wait_node(). If OTOH this fails, the vCPU was running and will
360 	 * observe its next->locked value and advance itself.
361 	 *
362 	 * Matches with smp_store_mb() and cmpxchg() in pv_wait_node()
363 	 */
364 	if (cmpxchg(&pn->state, vcpu_halted, vcpu_hashed) != vcpu_halted)
365 		return;
366 
367 	/*
368 	 * Put the lock into the hash table and set the _Q_SLOW_VAL.
369 	 *
370 	 * As this is the same vCPU that will check the _Q_SLOW_VAL value and
371 	 * the hash table later on at unlock time, no atomic instruction is
372 	 * needed.
373 	 */
374 	WRITE_ONCE(l->locked, _Q_SLOW_VAL);
375 	(void)pv_hash(lock, pn);
376 }
377 
378 /*
379  * Wait for l->locked to become clear and acquire the lock;
380  * halt the vcpu after a short spin.
381  * __pv_queued_spin_unlock() will wake us.
382  *
383  * The current value of the lock will be returned for additional processing.
384  */
385 static u32
386 pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
387 {
388 	struct pv_node *pn = (struct pv_node *)node;
389 	struct __qspinlock *l = (void *)lock;
390 	struct qspinlock **lp = NULL;
391 	int waitcnt = 0;
392 	int loop;
393 
394 	/*
395 	 * If pv_kick_node() already advanced our state, we don't need to
396 	 * insert ourselves into the hash table anymore.
397 	 */
398 	if (READ_ONCE(pn->state) == vcpu_hashed)
399 		lp = (struct qspinlock **)1;
400 
401 	for (;; waitcnt++) {
402 		/*
403 		 * Set correct vCPU state to be used by queue node wait-early
404 		 * mechanism.
405 		 */
406 		WRITE_ONCE(pn->state, vcpu_running);
407 
408 		/*
409 		 * Set the pending bit in the active lock spinning loop to
410 		 * disable lock stealing before attempting to acquire the lock.
411 		 */
412 		set_pending(lock);
413 		for (loop = SPIN_THRESHOLD; loop; loop--) {
414 			if (trylock_clear_pending(lock))
415 				goto gotlock;
416 			cpu_relax();
417 		}
418 		clear_pending(lock);
419 
420 
421 		if (!lp) { /* ONCE */
422 			lp = pv_hash(lock, pn);
423 
424 			/*
425 			 * We must hash before setting _Q_SLOW_VAL, such that
426 			 * when we observe _Q_SLOW_VAL in __pv_queued_spin_unlock()
427 			 * we'll be sure to be able to observe our hash entry.
428 			 *
429 			 *   [S] <hash>                 [Rmw] l->locked == _Q_SLOW_VAL
430 			 *       MB                           RMB
431 			 * [RmW] l->locked = _Q_SLOW_VAL  [L] <unhash>
432 			 *
433 			 * Matches the smp_rmb() in __pv_queued_spin_unlock().
434 			 */
435 			if (xchg(&l->locked, _Q_SLOW_VAL) == 0) {
436 				/*
437 				 * The lock was free and now we own the lock.
438 				 * Change the lock value back to _Q_LOCKED_VAL
439 				 * and unhash the table.
440 				 */
441 				WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
442 				WRITE_ONCE(*lp, NULL);
443 				goto gotlock;
444 			}
445 		}
446 		WRITE_ONCE(pn->state, vcpu_halted);
447 		qstat_inc(qstat_pv_wait_head, true);
448 		qstat_inc(qstat_pv_wait_again, waitcnt);
449 		pv_wait(&l->locked, _Q_SLOW_VAL);
450 
451 		/*
452 		 * The unlocker should have freed the lock before kicking the
453 		 * CPU. So if the lock is still not free, it is a spurious
454 		 * wakeup or another vCPU has stolen the lock. The current
455 		 * vCPU should spin again.
456 		 */
457 		qstat_inc(qstat_pv_spurious_wakeup, READ_ONCE(l->locked));
458 	}
459 
460 	/*
461 	 * The cmpxchg() or xchg() call before coming here provides the
462 	 * acquire semantics for locking. The dummy ORing of _Q_LOCKED_VAL
463 	 * here is to indicate to the compiler that the value will always
464 	 * be nozero to enable better code optimization.
465 	 */
466 gotlock:
467 	return (u32)(atomic_read(&lock->val) | _Q_LOCKED_VAL);
468 }
469 
470 /*
471  * PV versions of the unlock fastpath and slowpath functions to be used
472  * instead of queued_spin_unlock().
473  */
474 __visible void
475 __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
476 {
477 	struct __qspinlock *l = (void *)lock;
478 	struct pv_node *node;
479 
480 	if (unlikely(locked != _Q_SLOW_VAL)) {
481 		WARN(!debug_locks_silent,
482 		     "pvqspinlock: lock 0x%lx has corrupted value 0x%x!\n",
483 		     (unsigned long)lock, atomic_read(&lock->val));
484 		return;
485 	}
486 
487 	/*
488 	 * A failed cmpxchg doesn't provide any memory-ordering guarantees,
489 	 * so we need a barrier to order the read of the node data in
490 	 * pv_unhash *after* we've read the lock being _Q_SLOW_VAL.
491 	 *
492 	 * Matches the cmpxchg() in pv_wait_head_or_lock() setting _Q_SLOW_VAL.
493 	 */
494 	smp_rmb();
495 
496 	/*
497 	 * Since the above failed to release, this must be the SLOW path.
498 	 * Therefore start by looking up the blocked node and unhashing it.
499 	 */
500 	node = pv_unhash(lock);
501 
502 	/*
503 	 * Now that we have a reference to the (likely) blocked pv_node,
504 	 * release the lock.
505 	 */
506 	smp_store_release(&l->locked, 0);
507 
508 	/*
509 	 * At this point the memory pointed at by lock can be freed/reused,
510 	 * however we can still use the pv_node to kick the CPU.
511 	 * The other vCPU may not really be halted, but kicking an active
512 	 * vCPU is harmless other than the additional latency in completing
513 	 * the unlock.
514 	 */
515 	qstat_inc(qstat_pv_kick_unlock, true);
516 	pv_kick(node->cpu);
517 }
518 
519 /*
520  * Include the architecture specific callee-save thunk of the
521  * __pv_queued_spin_unlock(). This thunk is put together with
522  * __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock
523  * function close to each other sharing consecutive instruction cachelines.
524  * Alternatively, architecture specific version of __pv_queued_spin_unlock()
525  * can be defined.
526  */
527 #include <asm/qspinlock_paravirt.h>
528 
529 #ifndef __pv_queued_spin_unlock
530 __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
531 {
532 	struct __qspinlock *l = (void *)lock;
533 	u8 locked;
534 
535 	/*
536 	 * We must not unlock if SLOW, because in that case we must first
537 	 * unhash. Otherwise it would be possible to have multiple @lock
538 	 * entries, which would be BAD.
539 	 */
540 	locked = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
541 	if (likely(locked == _Q_LOCKED_VAL))
542 		return;
543 
544 	__pv_queued_spin_unlock_slowpath(lock, locked);
545 }
546 #endif /* __pv_queued_spin_unlock */
547