1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _GEN_PV_LOCK_SLOWPATH
3 #error "do not include this file"
4 #endif
5 
6 #include <linux/hash.h>
7 #include <linux/bootmem.h>
8 #include <linux/debug_locks.h>
9 
10 /*
11  * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
12  * of spinning them.
13  *
14  * This relies on the architecture to provide two paravirt hypercalls:
15  *
16  *   pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
17  *   pv_kick(cpu)             -- wakes a suspended vcpu
18  *
19  * Using these we implement __pv_queued_spin_lock_slowpath() and
20  * __pv_queued_spin_unlock() to replace native_queued_spin_lock_slowpath() and
21  * native_queued_spin_unlock().
22  */
23 
24 #define _Q_SLOW_VAL	(3U << _Q_LOCKED_OFFSET)
25 
26 /*
27  * Queue Node Adaptive Spinning
28  *
29  * A queue node vCPU will stop spinning if the vCPU in the previous node is
30  * not running. The one lock stealing attempt allowed at slowpath entry
31  * mitigates the slight slowdown for non-overcommitted guest with this
32  * aggressive wait-early mechanism.
33  *
34  * The status of the previous node will be checked at fixed interval
35  * controlled by PV_PREV_CHECK_MASK. This is to ensure that we won't
36  * pound on the cacheline of the previous node too heavily.
37  */
38 #define PV_PREV_CHECK_MASK	0xff
39 
40 /*
41  * Queue node uses: vcpu_running & vcpu_halted.
42  * Queue head uses: vcpu_running & vcpu_hashed.
43  */
44 enum vcpu_state {
45 	vcpu_running = 0,
46 	vcpu_halted,		/* Used only in pv_wait_node */
47 	vcpu_hashed,		/* = pv_hash'ed + vcpu_halted */
48 };
49 
50 struct pv_node {
51 	struct mcs_spinlock	mcs;
52 	struct mcs_spinlock	__res[3];
53 
54 	int			cpu;
55 	u8			state;
56 };
57 
58 /*
59  * Include queued spinlock statistics code
60  */
61 #include "qspinlock_stat.h"
62 
63 /*
64  * Hybrid PV queued/unfair lock
65  *
66  * By replacing the regular queued_spin_trylock() with the function below,
67  * it will be called once when a lock waiter enter the PV slowpath before
68  * being queued.
69  *
70  * The pending bit is set by the queue head vCPU of the MCS wait queue in
71  * pv_wait_head_or_lock() to signal that it is ready to spin on the lock.
72  * When that bit becomes visible to the incoming waiters, no lock stealing
73  * is allowed. The function will return immediately to make the waiters
74  * enter the MCS wait queue. So lock starvation shouldn't happen as long
75  * as the queued mode vCPUs are actively running to set the pending bit
76  * and hence disabling lock stealing.
77  *
78  * When the pending bit isn't set, the lock waiters will stay in the unfair
79  * mode spinning on the lock unless the MCS wait queue is empty. In this
80  * case, the lock waiters will enter the queued mode slowpath trying to
81  * become the queue head and set the pending bit.
82  *
83  * This hybrid PV queued/unfair lock combines the best attributes of a
84  * queued lock (no lock starvation) and an unfair lock (good performance
85  * on not heavily contended locks).
86  */
87 #define queued_spin_trylock(l)	pv_hybrid_queued_unfair_trylock(l)
88 static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
89 {
90 	struct __qspinlock *l = (void *)lock;
91 
92 	/*
93 	 * Stay in unfair lock mode as long as queued mode waiters are
94 	 * present in the MCS wait queue but the pending bit isn't set.
95 	 */
96 	for (;;) {
97 		int val = atomic_read(&lock->val);
98 
99 		if (!(val & _Q_LOCKED_PENDING_MASK) &&
100 		   (cmpxchg_acquire(&l->locked, 0, _Q_LOCKED_VAL) == 0)) {
101 			qstat_inc(qstat_pv_lock_stealing, true);
102 			return true;
103 		}
104 		if (!(val & _Q_TAIL_MASK) || (val & _Q_PENDING_MASK))
105 			break;
106 
107 		cpu_relax();
108 	}
109 
110 	return false;
111 }
112 
113 /*
114  * The pending bit is used by the queue head vCPU to indicate that it
115  * is actively spinning on the lock and no lock stealing is allowed.
116  */
117 #if _Q_PENDING_BITS == 8
118 static __always_inline void set_pending(struct qspinlock *lock)
119 {
120 	struct __qspinlock *l = (void *)lock;
121 
122 	WRITE_ONCE(l->pending, 1);
123 }
124 
125 static __always_inline void clear_pending(struct qspinlock *lock)
126 {
127 	struct __qspinlock *l = (void *)lock;
128 
129 	WRITE_ONCE(l->pending, 0);
130 }
131 
132 /*
133  * The pending bit check in pv_queued_spin_steal_lock() isn't a memory
134  * barrier. Therefore, an atomic cmpxchg_acquire() is used to acquire the
135  * lock just to be sure that it will get it.
136  */
137 static __always_inline int trylock_clear_pending(struct qspinlock *lock)
138 {
139 	struct __qspinlock *l = (void *)lock;
140 
141 	return !READ_ONCE(l->locked) &&
142 	       (cmpxchg_acquire(&l->locked_pending, _Q_PENDING_VAL,
143 				_Q_LOCKED_VAL) == _Q_PENDING_VAL);
144 }
145 #else /* _Q_PENDING_BITS == 8 */
146 static __always_inline void set_pending(struct qspinlock *lock)
147 {
148 	atomic_or(_Q_PENDING_VAL, &lock->val);
149 }
150 
151 static __always_inline void clear_pending(struct qspinlock *lock)
152 {
153 	atomic_andnot(_Q_PENDING_VAL, &lock->val);
154 }
155 
156 static __always_inline int trylock_clear_pending(struct qspinlock *lock)
157 {
158 	int val = atomic_read(&lock->val);
159 
160 	for (;;) {
161 		int old, new;
162 
163 		if (val  & _Q_LOCKED_MASK)
164 			break;
165 
166 		/*
167 		 * Try to clear pending bit & set locked bit
168 		 */
169 		old = val;
170 		new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL;
171 		val = atomic_cmpxchg_acquire(&lock->val, old, new);
172 
173 		if (val == old)
174 			return 1;
175 	}
176 	return 0;
177 }
178 #endif /* _Q_PENDING_BITS == 8 */
179 
180 /*
181  * Lock and MCS node addresses hash table for fast lookup
182  *
183  * Hashing is done on a per-cacheline basis to minimize the need to access
184  * more than one cacheline.
185  *
186  * Dynamically allocate a hash table big enough to hold at least 4X the
187  * number of possible cpus in the system. Allocation is done on page
188  * granularity. So the minimum number of hash buckets should be at least
189  * 256 (64-bit) or 512 (32-bit) to fully utilize a 4k page.
190  *
191  * Since we should not be holding locks from NMI context (very rare indeed) the
192  * max load factor is 0.75, which is around the point where open addressing
193  * breaks down.
194  *
195  */
196 struct pv_hash_entry {
197 	struct qspinlock *lock;
198 	struct pv_node   *node;
199 };
200 
201 #define PV_HE_PER_LINE	(SMP_CACHE_BYTES / sizeof(struct pv_hash_entry))
202 #define PV_HE_MIN	(PAGE_SIZE / sizeof(struct pv_hash_entry))
203 
204 static struct pv_hash_entry *pv_lock_hash;
205 static unsigned int pv_lock_hash_bits __read_mostly;
206 
207 /*
208  * Allocate memory for the PV qspinlock hash buckets
209  *
210  * This function should be called from the paravirt spinlock initialization
211  * routine.
212  */
213 void __init __pv_init_lock_hash(void)
214 {
215 	int pv_hash_size = ALIGN(4 * num_possible_cpus(), PV_HE_PER_LINE);
216 
217 	if (pv_hash_size < PV_HE_MIN)
218 		pv_hash_size = PV_HE_MIN;
219 
220 	/*
221 	 * Allocate space from bootmem which should be page-size aligned
222 	 * and hence cacheline aligned.
223 	 */
224 	pv_lock_hash = alloc_large_system_hash("PV qspinlock",
225 					       sizeof(struct pv_hash_entry),
226 					       pv_hash_size, 0,
227 					       HASH_EARLY | HASH_ZERO,
228 					       &pv_lock_hash_bits, NULL,
229 					       pv_hash_size, pv_hash_size);
230 }
231 
232 #define for_each_hash_entry(he, offset, hash)						\
233 	for (hash &= ~(PV_HE_PER_LINE - 1), he = &pv_lock_hash[hash], offset = 0;	\
234 	     offset < (1 << pv_lock_hash_bits);						\
235 	     offset++, he = &pv_lock_hash[(hash + offset) & ((1 << pv_lock_hash_bits) - 1)])
236 
237 static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node)
238 {
239 	unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits);
240 	struct pv_hash_entry *he;
241 	int hopcnt = 0;
242 
243 	for_each_hash_entry(he, offset, hash) {
244 		hopcnt++;
245 		if (!cmpxchg(&he->lock, NULL, lock)) {
246 			WRITE_ONCE(he->node, node);
247 			qstat_hop(hopcnt);
248 			return &he->lock;
249 		}
250 	}
251 	/*
252 	 * Hard assume there is a free entry for us.
253 	 *
254 	 * This is guaranteed by ensuring every blocked lock only ever consumes
255 	 * a single entry, and since we only have 4 nesting levels per CPU
256 	 * and allocated 4*nr_possible_cpus(), this must be so.
257 	 *
258 	 * The single entry is guaranteed by having the lock owner unhash
259 	 * before it releases.
260 	 */
261 	BUG();
262 }
263 
264 static struct pv_node *pv_unhash(struct qspinlock *lock)
265 {
266 	unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits);
267 	struct pv_hash_entry *he;
268 	struct pv_node *node;
269 
270 	for_each_hash_entry(he, offset, hash) {
271 		if (READ_ONCE(he->lock) == lock) {
272 			node = READ_ONCE(he->node);
273 			WRITE_ONCE(he->lock, NULL);
274 			return node;
275 		}
276 	}
277 	/*
278 	 * Hard assume we'll find an entry.
279 	 *
280 	 * This guarantees a limited lookup time and is itself guaranteed by
281 	 * having the lock owner do the unhash -- IFF the unlock sees the
282 	 * SLOW flag, there MUST be a hash entry.
283 	 */
284 	BUG();
285 }
286 
287 /*
288  * Return true if when it is time to check the previous node which is not
289  * in a running state.
290  */
291 static inline bool
292 pv_wait_early(struct pv_node *prev, int loop)
293 {
294 	if ((loop & PV_PREV_CHECK_MASK) != 0)
295 		return false;
296 
297 	return READ_ONCE(prev->state) != vcpu_running || vcpu_is_preempted(prev->cpu);
298 }
299 
300 /*
301  * Initialize the PV part of the mcs_spinlock node.
302  */
303 static void pv_init_node(struct mcs_spinlock *node)
304 {
305 	struct pv_node *pn = (struct pv_node *)node;
306 
307 	BUILD_BUG_ON(sizeof(struct pv_node) > 5*sizeof(struct mcs_spinlock));
308 
309 	pn->cpu = smp_processor_id();
310 	pn->state = vcpu_running;
311 }
312 
313 /*
314  * Wait for node->locked to become true, halt the vcpu after a short spin.
315  * pv_kick_node() is used to set _Q_SLOW_VAL and fill in hash table on its
316  * behalf.
317  */
318 static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
319 {
320 	struct pv_node *pn = (struct pv_node *)node;
321 	struct pv_node *pp = (struct pv_node *)prev;
322 	int loop;
323 	bool wait_early;
324 
325 	for (;;) {
326 		for (wait_early = false, loop = SPIN_THRESHOLD; loop; loop--) {
327 			if (READ_ONCE(node->locked))
328 				return;
329 			if (pv_wait_early(pp, loop)) {
330 				wait_early = true;
331 				break;
332 			}
333 			cpu_relax();
334 		}
335 
336 		/*
337 		 * Order pn->state vs pn->locked thusly:
338 		 *
339 		 * [S] pn->state = vcpu_halted	  [S] next->locked = 1
340 		 *     MB			      MB
341 		 * [L] pn->locked		[RmW] pn->state = vcpu_hashed
342 		 *
343 		 * Matches the cmpxchg() from pv_kick_node().
344 		 */
345 		smp_store_mb(pn->state, vcpu_halted);
346 
347 		if (!READ_ONCE(node->locked)) {
348 			qstat_inc(qstat_pv_wait_node, true);
349 			qstat_inc(qstat_pv_wait_early, wait_early);
350 			pv_wait(&pn->state, vcpu_halted);
351 		}
352 
353 		/*
354 		 * If pv_kick_node() changed us to vcpu_hashed, retain that
355 		 * value so that pv_wait_head_or_lock() knows to not also try
356 		 * to hash this lock.
357 		 */
358 		cmpxchg(&pn->state, vcpu_halted, vcpu_running);
359 
360 		/*
361 		 * If the locked flag is still not set after wakeup, it is a
362 		 * spurious wakeup and the vCPU should wait again. However,
363 		 * there is a pretty high overhead for CPU halting and kicking.
364 		 * So it is better to spin for a while in the hope that the
365 		 * MCS lock will be released soon.
366 		 */
367 		qstat_inc(qstat_pv_spurious_wakeup, !READ_ONCE(node->locked));
368 	}
369 
370 	/*
371 	 * By now our node->locked should be 1 and our caller will not actually
372 	 * spin-wait for it. We do however rely on our caller to do a
373 	 * load-acquire for us.
374 	 */
375 }
376 
377 /*
378  * Called after setting next->locked = 1 when we're the lock owner.
379  *
380  * Instead of waking the waiters stuck in pv_wait_node() advance their state
381  * such that they're waiting in pv_wait_head_or_lock(), this avoids a
382  * wake/sleep cycle.
383  */
384 static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
385 {
386 	struct pv_node *pn = (struct pv_node *)node;
387 	struct __qspinlock *l = (void *)lock;
388 
389 	/*
390 	 * If the vCPU is indeed halted, advance its state to match that of
391 	 * pv_wait_node(). If OTOH this fails, the vCPU was running and will
392 	 * observe its next->locked value and advance itself.
393 	 *
394 	 * Matches with smp_store_mb() and cmpxchg() in pv_wait_node()
395 	 *
396 	 * The write to next->locked in arch_mcs_spin_unlock_contended()
397 	 * must be ordered before the read of pn->state in the cmpxchg()
398 	 * below for the code to work correctly. To guarantee full ordering
399 	 * irrespective of the success or failure of the cmpxchg(),
400 	 * a relaxed version with explicit barrier is used. The control
401 	 * dependency will order the reading of pn->state before any
402 	 * subsequent writes.
403 	 */
404 	smp_mb__before_atomic();
405 	if (cmpxchg_relaxed(&pn->state, vcpu_halted, vcpu_hashed)
406 	    != vcpu_halted)
407 		return;
408 
409 	/*
410 	 * Put the lock into the hash table and set the _Q_SLOW_VAL.
411 	 *
412 	 * As this is the same vCPU that will check the _Q_SLOW_VAL value and
413 	 * the hash table later on at unlock time, no atomic instruction is
414 	 * needed.
415 	 */
416 	WRITE_ONCE(l->locked, _Q_SLOW_VAL);
417 	(void)pv_hash(lock, pn);
418 }
419 
420 /*
421  * Wait for l->locked to become clear and acquire the lock;
422  * halt the vcpu after a short spin.
423  * __pv_queued_spin_unlock() will wake us.
424  *
425  * The current value of the lock will be returned for additional processing.
426  */
427 static u32
428 pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
429 {
430 	struct pv_node *pn = (struct pv_node *)node;
431 	struct __qspinlock *l = (void *)lock;
432 	struct qspinlock **lp = NULL;
433 	int waitcnt = 0;
434 	int loop;
435 
436 	/*
437 	 * If pv_kick_node() already advanced our state, we don't need to
438 	 * insert ourselves into the hash table anymore.
439 	 */
440 	if (READ_ONCE(pn->state) == vcpu_hashed)
441 		lp = (struct qspinlock **)1;
442 
443 	/*
444 	 * Tracking # of slowpath locking operations
445 	 */
446 	qstat_inc(qstat_pv_lock_slowpath, true);
447 
448 	for (;; waitcnt++) {
449 		/*
450 		 * Set correct vCPU state to be used by queue node wait-early
451 		 * mechanism.
452 		 */
453 		WRITE_ONCE(pn->state, vcpu_running);
454 
455 		/*
456 		 * Set the pending bit in the active lock spinning loop to
457 		 * disable lock stealing before attempting to acquire the lock.
458 		 */
459 		set_pending(lock);
460 		for (loop = SPIN_THRESHOLD; loop; loop--) {
461 			if (trylock_clear_pending(lock))
462 				goto gotlock;
463 			cpu_relax();
464 		}
465 		clear_pending(lock);
466 
467 
468 		if (!lp) { /* ONCE */
469 			lp = pv_hash(lock, pn);
470 
471 			/*
472 			 * We must hash before setting _Q_SLOW_VAL, such that
473 			 * when we observe _Q_SLOW_VAL in __pv_queued_spin_unlock()
474 			 * we'll be sure to be able to observe our hash entry.
475 			 *
476 			 *   [S] <hash>                 [Rmw] l->locked == _Q_SLOW_VAL
477 			 *       MB                           RMB
478 			 * [RmW] l->locked = _Q_SLOW_VAL  [L] <unhash>
479 			 *
480 			 * Matches the smp_rmb() in __pv_queued_spin_unlock().
481 			 */
482 			if (xchg(&l->locked, _Q_SLOW_VAL) == 0) {
483 				/*
484 				 * The lock was free and now we own the lock.
485 				 * Change the lock value back to _Q_LOCKED_VAL
486 				 * and unhash the table.
487 				 */
488 				WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
489 				WRITE_ONCE(*lp, NULL);
490 				goto gotlock;
491 			}
492 		}
493 		WRITE_ONCE(pn->state, vcpu_hashed);
494 		qstat_inc(qstat_pv_wait_head, true);
495 		qstat_inc(qstat_pv_wait_again, waitcnt);
496 		pv_wait(&l->locked, _Q_SLOW_VAL);
497 
498 		/*
499 		 * Because of lock stealing, the queue head vCPU may not be
500 		 * able to acquire the lock before it has to wait again.
501 		 */
502 	}
503 
504 	/*
505 	 * The cmpxchg() or xchg() call before coming here provides the
506 	 * acquire semantics for locking. The dummy ORing of _Q_LOCKED_VAL
507 	 * here is to indicate to the compiler that the value will always
508 	 * be nozero to enable better code optimization.
509 	 */
510 gotlock:
511 	return (u32)(atomic_read(&lock->val) | _Q_LOCKED_VAL);
512 }
513 
514 /*
515  * PV versions of the unlock fastpath and slowpath functions to be used
516  * instead of queued_spin_unlock().
517  */
518 __visible void
519 __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
520 {
521 	struct __qspinlock *l = (void *)lock;
522 	struct pv_node *node;
523 
524 	if (unlikely(locked != _Q_SLOW_VAL)) {
525 		WARN(!debug_locks_silent,
526 		     "pvqspinlock: lock 0x%lx has corrupted value 0x%x!\n",
527 		     (unsigned long)lock, atomic_read(&lock->val));
528 		return;
529 	}
530 
531 	/*
532 	 * A failed cmpxchg doesn't provide any memory-ordering guarantees,
533 	 * so we need a barrier to order the read of the node data in
534 	 * pv_unhash *after* we've read the lock being _Q_SLOW_VAL.
535 	 *
536 	 * Matches the cmpxchg() in pv_wait_head_or_lock() setting _Q_SLOW_VAL.
537 	 */
538 	smp_rmb();
539 
540 	/*
541 	 * Since the above failed to release, this must be the SLOW path.
542 	 * Therefore start by looking up the blocked node and unhashing it.
543 	 */
544 	node = pv_unhash(lock);
545 
546 	/*
547 	 * Now that we have a reference to the (likely) blocked pv_node,
548 	 * release the lock.
549 	 */
550 	smp_store_release(&l->locked, 0);
551 
552 	/*
553 	 * At this point the memory pointed at by lock can be freed/reused,
554 	 * however we can still use the pv_node to kick the CPU.
555 	 * The other vCPU may not really be halted, but kicking an active
556 	 * vCPU is harmless other than the additional latency in completing
557 	 * the unlock.
558 	 */
559 	qstat_inc(qstat_pv_kick_unlock, true);
560 	pv_kick(node->cpu);
561 }
562 
563 /*
564  * Include the architecture specific callee-save thunk of the
565  * __pv_queued_spin_unlock(). This thunk is put together with
566  * __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock
567  * function close to each other sharing consecutive instruction cachelines.
568  * Alternatively, architecture specific version of __pv_queued_spin_unlock()
569  * can be defined.
570  */
571 #include <asm/qspinlock_paravirt.h>
572 
573 #ifndef __pv_queued_spin_unlock
574 __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
575 {
576 	struct __qspinlock *l = (void *)lock;
577 	u8 locked;
578 
579 	/*
580 	 * We must not unlock if SLOW, because in that case we must first
581 	 * unhash. Otherwise it would be possible to have multiple @lock
582 	 * entries, which would be BAD.
583 	 */
584 	locked = cmpxchg_release(&l->locked, _Q_LOCKED_VAL, 0);
585 	if (likely(locked == _Q_LOCKED_VAL))
586 		return;
587 
588 	__pv_queued_spin_unlock_slowpath(lock, locked);
589 }
590 #endif /* __pv_queued_spin_unlock */
591