xref: /openbmc/linux/kernel/locking/qspinlock_paravirt.h (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef _GEN_PV_LOCK_SLOWPATH
3  #error "do not include this file"
4  #endif
5  
6  #include <linux/hash.h>
7  #include <linux/memblock.h>
8  #include <linux/debug_locks.h>
9  
10  /*
11   * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
12   * of spinning them.
13   *
14   * This relies on the architecture to provide two paravirt hypercalls:
15   *
16   *   pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
17   *   pv_kick(cpu)             -- wakes a suspended vcpu
18   *
19   * Using these we implement __pv_queued_spin_lock_slowpath() and
20   * __pv_queued_spin_unlock() to replace native_queued_spin_lock_slowpath() and
21   * native_queued_spin_unlock().
22   */
23  
24  #define _Q_SLOW_VAL	(3U << _Q_LOCKED_OFFSET)
25  
26  /*
27   * Queue Node Adaptive Spinning
28   *
29   * A queue node vCPU will stop spinning if the vCPU in the previous node is
30   * not running. The one lock stealing attempt allowed at slowpath entry
31   * mitigates the slight slowdown for non-overcommitted guest with this
32   * aggressive wait-early mechanism.
33   *
34   * The status of the previous node will be checked at fixed interval
35   * controlled by PV_PREV_CHECK_MASK. This is to ensure that we won't
36   * pound on the cacheline of the previous node too heavily.
37   */
38  #define PV_PREV_CHECK_MASK	0xff
39  
40  /*
41   * Queue node uses: vcpu_running & vcpu_halted.
42   * Queue head uses: vcpu_running & vcpu_hashed.
43   */
44  enum vcpu_state {
45  	vcpu_running = 0,
46  	vcpu_halted,		/* Used only in pv_wait_node */
47  	vcpu_hashed,		/* = pv_hash'ed + vcpu_halted */
48  };
49  
50  struct pv_node {
51  	struct mcs_spinlock	mcs;
52  	int			cpu;
53  	u8			state;
54  };
55  
56  /*
57   * Hybrid PV queued/unfair lock
58   *
59   * By replacing the regular queued_spin_trylock() with the function below,
60   * it will be called once when a lock waiter enter the PV slowpath before
61   * being queued.
62   *
63   * The pending bit is set by the queue head vCPU of the MCS wait queue in
64   * pv_wait_head_or_lock() to signal that it is ready to spin on the lock.
65   * When that bit becomes visible to the incoming waiters, no lock stealing
66   * is allowed. The function will return immediately to make the waiters
67   * enter the MCS wait queue. So lock starvation shouldn't happen as long
68   * as the queued mode vCPUs are actively running to set the pending bit
69   * and hence disabling lock stealing.
70   *
71   * When the pending bit isn't set, the lock waiters will stay in the unfair
72   * mode spinning on the lock unless the MCS wait queue is empty. In this
73   * case, the lock waiters will enter the queued mode slowpath trying to
74   * become the queue head and set the pending bit.
75   *
76   * This hybrid PV queued/unfair lock combines the best attributes of a
77   * queued lock (no lock starvation) and an unfair lock (good performance
78   * on not heavily contended locks).
79   */
80  #define queued_spin_trylock(l)	pv_hybrid_queued_unfair_trylock(l)
pv_hybrid_queued_unfair_trylock(struct qspinlock * lock)81  static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
82  {
83  	/*
84  	 * Stay in unfair lock mode as long as queued mode waiters are
85  	 * present in the MCS wait queue but the pending bit isn't set.
86  	 */
87  	for (;;) {
88  		int val = atomic_read(&lock->val);
89  
90  		if (!(val & _Q_LOCKED_PENDING_MASK) &&
91  		   (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) {
92  			lockevent_inc(pv_lock_stealing);
93  			return true;
94  		}
95  		if (!(val & _Q_TAIL_MASK) || (val & _Q_PENDING_MASK))
96  			break;
97  
98  		cpu_relax();
99  	}
100  
101  	return false;
102  }
103  
104  /*
105   * The pending bit is used by the queue head vCPU to indicate that it
106   * is actively spinning on the lock and no lock stealing is allowed.
107   */
108  #if _Q_PENDING_BITS == 8
set_pending(struct qspinlock * lock)109  static __always_inline void set_pending(struct qspinlock *lock)
110  {
111  	WRITE_ONCE(lock->pending, 1);
112  }
113  
114  /*
115   * The pending bit check in pv_queued_spin_steal_lock() isn't a memory
116   * barrier. Therefore, an atomic cmpxchg_acquire() is used to acquire the
117   * lock just to be sure that it will get it.
118   */
trylock_clear_pending(struct qspinlock * lock)119  static __always_inline int trylock_clear_pending(struct qspinlock *lock)
120  {
121  	return !READ_ONCE(lock->locked) &&
122  	       (cmpxchg_acquire(&lock->locked_pending, _Q_PENDING_VAL,
123  				_Q_LOCKED_VAL) == _Q_PENDING_VAL);
124  }
125  #else /* _Q_PENDING_BITS == 8 */
set_pending(struct qspinlock * lock)126  static __always_inline void set_pending(struct qspinlock *lock)
127  {
128  	atomic_or(_Q_PENDING_VAL, &lock->val);
129  }
130  
trylock_clear_pending(struct qspinlock * lock)131  static __always_inline int trylock_clear_pending(struct qspinlock *lock)
132  {
133  	int val = atomic_read(&lock->val);
134  
135  	for (;;) {
136  		int old, new;
137  
138  		if (val  & _Q_LOCKED_MASK)
139  			break;
140  
141  		/*
142  		 * Try to clear pending bit & set locked bit
143  		 */
144  		old = val;
145  		new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL;
146  		val = atomic_cmpxchg_acquire(&lock->val, old, new);
147  
148  		if (val == old)
149  			return 1;
150  	}
151  	return 0;
152  }
153  #endif /* _Q_PENDING_BITS == 8 */
154  
155  /*
156   * Lock and MCS node addresses hash table for fast lookup
157   *
158   * Hashing is done on a per-cacheline basis to minimize the need to access
159   * more than one cacheline.
160   *
161   * Dynamically allocate a hash table big enough to hold at least 4X the
162   * number of possible cpus in the system. Allocation is done on page
163   * granularity. So the minimum number of hash buckets should be at least
164   * 256 (64-bit) or 512 (32-bit) to fully utilize a 4k page.
165   *
166   * Since we should not be holding locks from NMI context (very rare indeed) the
167   * max load factor is 0.75, which is around the point where open addressing
168   * breaks down.
169   *
170   */
171  struct pv_hash_entry {
172  	struct qspinlock *lock;
173  	struct pv_node   *node;
174  };
175  
176  #define PV_HE_PER_LINE	(SMP_CACHE_BYTES / sizeof(struct pv_hash_entry))
177  #define PV_HE_MIN	(PAGE_SIZE / sizeof(struct pv_hash_entry))
178  
179  static struct pv_hash_entry *pv_lock_hash;
180  static unsigned int pv_lock_hash_bits __read_mostly;
181  
182  /*
183   * Allocate memory for the PV qspinlock hash buckets
184   *
185   * This function should be called from the paravirt spinlock initialization
186   * routine.
187   */
__pv_init_lock_hash(void)188  void __init __pv_init_lock_hash(void)
189  {
190  	int pv_hash_size = ALIGN(4 * num_possible_cpus(), PV_HE_PER_LINE);
191  
192  	if (pv_hash_size < PV_HE_MIN)
193  		pv_hash_size = PV_HE_MIN;
194  
195  	/*
196  	 * Allocate space from bootmem which should be page-size aligned
197  	 * and hence cacheline aligned.
198  	 */
199  	pv_lock_hash = alloc_large_system_hash("PV qspinlock",
200  					       sizeof(struct pv_hash_entry),
201  					       pv_hash_size, 0,
202  					       HASH_EARLY | HASH_ZERO,
203  					       &pv_lock_hash_bits, NULL,
204  					       pv_hash_size, pv_hash_size);
205  }
206  
207  #define for_each_hash_entry(he, offset, hash)						\
208  	for (hash &= ~(PV_HE_PER_LINE - 1), he = &pv_lock_hash[hash], offset = 0;	\
209  	     offset < (1 << pv_lock_hash_bits);						\
210  	     offset++, he = &pv_lock_hash[(hash + offset) & ((1 << pv_lock_hash_bits) - 1)])
211  
pv_hash(struct qspinlock * lock,struct pv_node * node)212  static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node)
213  {
214  	unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits);
215  	struct pv_hash_entry *he;
216  	int hopcnt = 0;
217  
218  	for_each_hash_entry(he, offset, hash) {
219  		hopcnt++;
220  		if (!cmpxchg(&he->lock, NULL, lock)) {
221  			WRITE_ONCE(he->node, node);
222  			lockevent_pv_hop(hopcnt);
223  			return &he->lock;
224  		}
225  	}
226  	/*
227  	 * Hard assume there is a free entry for us.
228  	 *
229  	 * This is guaranteed by ensuring every blocked lock only ever consumes
230  	 * a single entry, and since we only have 4 nesting levels per CPU
231  	 * and allocated 4*nr_possible_cpus(), this must be so.
232  	 *
233  	 * The single entry is guaranteed by having the lock owner unhash
234  	 * before it releases.
235  	 */
236  	BUG();
237  }
238  
pv_unhash(struct qspinlock * lock)239  static struct pv_node *pv_unhash(struct qspinlock *lock)
240  {
241  	unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits);
242  	struct pv_hash_entry *he;
243  	struct pv_node *node;
244  
245  	for_each_hash_entry(he, offset, hash) {
246  		if (READ_ONCE(he->lock) == lock) {
247  			node = READ_ONCE(he->node);
248  			WRITE_ONCE(he->lock, NULL);
249  			return node;
250  		}
251  	}
252  	/*
253  	 * Hard assume we'll find an entry.
254  	 *
255  	 * This guarantees a limited lookup time and is itself guaranteed by
256  	 * having the lock owner do the unhash -- IFF the unlock sees the
257  	 * SLOW flag, there MUST be a hash entry.
258  	 */
259  	BUG();
260  }
261  
262  /*
263   * Return true if when it is time to check the previous node which is not
264   * in a running state.
265   */
266  static inline bool
pv_wait_early(struct pv_node * prev,int loop)267  pv_wait_early(struct pv_node *prev, int loop)
268  {
269  	if ((loop & PV_PREV_CHECK_MASK) != 0)
270  		return false;
271  
272  	return READ_ONCE(prev->state) != vcpu_running;
273  }
274  
275  /*
276   * Initialize the PV part of the mcs_spinlock node.
277   */
pv_init_node(struct mcs_spinlock * node)278  static void pv_init_node(struct mcs_spinlock *node)
279  {
280  	struct pv_node *pn = (struct pv_node *)node;
281  
282  	BUILD_BUG_ON(sizeof(struct pv_node) > sizeof(struct qnode));
283  
284  	pn->cpu = smp_processor_id();
285  	pn->state = vcpu_running;
286  }
287  
288  /*
289   * Wait for node->locked to become true, halt the vcpu after a short spin.
290   * pv_kick_node() is used to set _Q_SLOW_VAL and fill in hash table on its
291   * behalf.
292   */
pv_wait_node(struct mcs_spinlock * node,struct mcs_spinlock * prev)293  static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
294  {
295  	struct pv_node *pn = (struct pv_node *)node;
296  	struct pv_node *pp = (struct pv_node *)prev;
297  	int loop;
298  	bool wait_early;
299  
300  	for (;;) {
301  		for (wait_early = false, loop = SPIN_THRESHOLD; loop; loop--) {
302  			if (READ_ONCE(node->locked))
303  				return;
304  			if (pv_wait_early(pp, loop)) {
305  				wait_early = true;
306  				break;
307  			}
308  			cpu_relax();
309  		}
310  
311  		/*
312  		 * Order pn->state vs pn->locked thusly:
313  		 *
314  		 * [S] pn->state = vcpu_halted	  [S] next->locked = 1
315  		 *     MB			      MB
316  		 * [L] pn->locked		[RmW] pn->state = vcpu_hashed
317  		 *
318  		 * Matches the cmpxchg() from pv_kick_node().
319  		 */
320  		smp_store_mb(pn->state, vcpu_halted);
321  
322  		if (!READ_ONCE(node->locked)) {
323  			lockevent_inc(pv_wait_node);
324  			lockevent_cond_inc(pv_wait_early, wait_early);
325  			pv_wait(&pn->state, vcpu_halted);
326  		}
327  
328  		/*
329  		 * If pv_kick_node() changed us to vcpu_hashed, retain that
330  		 * value so that pv_wait_head_or_lock() knows to not also try
331  		 * to hash this lock.
332  		 */
333  		cmpxchg(&pn->state, vcpu_halted, vcpu_running);
334  
335  		/*
336  		 * If the locked flag is still not set after wakeup, it is a
337  		 * spurious wakeup and the vCPU should wait again. However,
338  		 * there is a pretty high overhead for CPU halting and kicking.
339  		 * So it is better to spin for a while in the hope that the
340  		 * MCS lock will be released soon.
341  		 */
342  		lockevent_cond_inc(pv_spurious_wakeup,
343  				  !READ_ONCE(node->locked));
344  	}
345  
346  	/*
347  	 * By now our node->locked should be 1 and our caller will not actually
348  	 * spin-wait for it. We do however rely on our caller to do a
349  	 * load-acquire for us.
350  	 */
351  }
352  
353  /*
354   * Called after setting next->locked = 1 when we're the lock owner.
355   *
356   * Instead of waking the waiters stuck in pv_wait_node() advance their state
357   * such that they're waiting in pv_wait_head_or_lock(), this avoids a
358   * wake/sleep cycle.
359   */
pv_kick_node(struct qspinlock * lock,struct mcs_spinlock * node)360  static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
361  {
362  	struct pv_node *pn = (struct pv_node *)node;
363  
364  	/*
365  	 * If the vCPU is indeed halted, advance its state to match that of
366  	 * pv_wait_node(). If OTOH this fails, the vCPU was running and will
367  	 * observe its next->locked value and advance itself.
368  	 *
369  	 * Matches with smp_store_mb() and cmpxchg() in pv_wait_node()
370  	 *
371  	 * The write to next->locked in arch_mcs_spin_unlock_contended()
372  	 * must be ordered before the read of pn->state in the cmpxchg()
373  	 * below for the code to work correctly. To guarantee full ordering
374  	 * irrespective of the success or failure of the cmpxchg(),
375  	 * a relaxed version with explicit barrier is used. The control
376  	 * dependency will order the reading of pn->state before any
377  	 * subsequent writes.
378  	 */
379  	smp_mb__before_atomic();
380  	if (cmpxchg_relaxed(&pn->state, vcpu_halted, vcpu_hashed)
381  	    != vcpu_halted)
382  		return;
383  
384  	/*
385  	 * Put the lock into the hash table and set the _Q_SLOW_VAL.
386  	 *
387  	 * As this is the same vCPU that will check the _Q_SLOW_VAL value and
388  	 * the hash table later on at unlock time, no atomic instruction is
389  	 * needed.
390  	 */
391  	WRITE_ONCE(lock->locked, _Q_SLOW_VAL);
392  	(void)pv_hash(lock, pn);
393  }
394  
395  /*
396   * Wait for l->locked to become clear and acquire the lock;
397   * halt the vcpu after a short spin.
398   * __pv_queued_spin_unlock() will wake us.
399   *
400   * The current value of the lock will be returned for additional processing.
401   */
402  static u32
pv_wait_head_or_lock(struct qspinlock * lock,struct mcs_spinlock * node)403  pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
404  {
405  	struct pv_node *pn = (struct pv_node *)node;
406  	struct qspinlock **lp = NULL;
407  	int waitcnt = 0;
408  	int loop;
409  
410  	/*
411  	 * If pv_kick_node() already advanced our state, we don't need to
412  	 * insert ourselves into the hash table anymore.
413  	 */
414  	if (READ_ONCE(pn->state) == vcpu_hashed)
415  		lp = (struct qspinlock **)1;
416  
417  	/*
418  	 * Tracking # of slowpath locking operations
419  	 */
420  	lockevent_inc(lock_slowpath);
421  
422  	for (;; waitcnt++) {
423  		/*
424  		 * Set correct vCPU state to be used by queue node wait-early
425  		 * mechanism.
426  		 */
427  		WRITE_ONCE(pn->state, vcpu_running);
428  
429  		/*
430  		 * Set the pending bit in the active lock spinning loop to
431  		 * disable lock stealing before attempting to acquire the lock.
432  		 */
433  		set_pending(lock);
434  		for (loop = SPIN_THRESHOLD; loop; loop--) {
435  			if (trylock_clear_pending(lock))
436  				goto gotlock;
437  			cpu_relax();
438  		}
439  		clear_pending(lock);
440  
441  
442  		if (!lp) { /* ONCE */
443  			lp = pv_hash(lock, pn);
444  
445  			/*
446  			 * We must hash before setting _Q_SLOW_VAL, such that
447  			 * when we observe _Q_SLOW_VAL in __pv_queued_spin_unlock()
448  			 * we'll be sure to be able to observe our hash entry.
449  			 *
450  			 *   [S] <hash>                 [Rmw] l->locked == _Q_SLOW_VAL
451  			 *       MB                           RMB
452  			 * [RmW] l->locked = _Q_SLOW_VAL  [L] <unhash>
453  			 *
454  			 * Matches the smp_rmb() in __pv_queued_spin_unlock().
455  			 */
456  			if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) {
457  				/*
458  				 * The lock was free and now we own the lock.
459  				 * Change the lock value back to _Q_LOCKED_VAL
460  				 * and unhash the table.
461  				 */
462  				WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
463  				WRITE_ONCE(*lp, NULL);
464  				goto gotlock;
465  			}
466  		}
467  		WRITE_ONCE(pn->state, vcpu_hashed);
468  		lockevent_inc(pv_wait_head);
469  		lockevent_cond_inc(pv_wait_again, waitcnt);
470  		pv_wait(&lock->locked, _Q_SLOW_VAL);
471  
472  		/*
473  		 * Because of lock stealing, the queue head vCPU may not be
474  		 * able to acquire the lock before it has to wait again.
475  		 */
476  	}
477  
478  	/*
479  	 * The cmpxchg() or xchg() call before coming here provides the
480  	 * acquire semantics for locking. The dummy ORing of _Q_LOCKED_VAL
481  	 * here is to indicate to the compiler that the value will always
482  	 * be nozero to enable better code optimization.
483  	 */
484  gotlock:
485  	return (u32)(atomic_read(&lock->val) | _Q_LOCKED_VAL);
486  }
487  
488  /*
489   * Include the architecture specific callee-save thunk of the
490   * __pv_queued_spin_unlock(). This thunk is put together with
491   * __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock
492   * function close to each other sharing consecutive instruction cachelines.
493   * Alternatively, architecture specific version of __pv_queued_spin_unlock()
494   * can be defined.
495   */
496  #include <asm/qspinlock_paravirt.h>
497  
498  /*
499   * PV versions of the unlock fastpath and slowpath functions to be used
500   * instead of queued_spin_unlock().
501   */
502  __visible __lockfunc void
__pv_queued_spin_unlock_slowpath(struct qspinlock * lock,u8 locked)503  __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
504  {
505  	struct pv_node *node;
506  
507  	if (unlikely(locked != _Q_SLOW_VAL)) {
508  		WARN(!debug_locks_silent,
509  		     "pvqspinlock: lock 0x%lx has corrupted value 0x%x!\n",
510  		     (unsigned long)lock, atomic_read(&lock->val));
511  		return;
512  	}
513  
514  	/*
515  	 * A failed cmpxchg doesn't provide any memory-ordering guarantees,
516  	 * so we need a barrier to order the read of the node data in
517  	 * pv_unhash *after* we've read the lock being _Q_SLOW_VAL.
518  	 *
519  	 * Matches the cmpxchg() in pv_wait_head_or_lock() setting _Q_SLOW_VAL.
520  	 */
521  	smp_rmb();
522  
523  	/*
524  	 * Since the above failed to release, this must be the SLOW path.
525  	 * Therefore start by looking up the blocked node and unhashing it.
526  	 */
527  	node = pv_unhash(lock);
528  
529  	/*
530  	 * Now that we have a reference to the (likely) blocked pv_node,
531  	 * release the lock.
532  	 */
533  	smp_store_release(&lock->locked, 0);
534  
535  	/*
536  	 * At this point the memory pointed at by lock can be freed/reused,
537  	 * however we can still use the pv_node to kick the CPU.
538  	 * The other vCPU may not really be halted, but kicking an active
539  	 * vCPU is harmless other than the additional latency in completing
540  	 * the unlock.
541  	 */
542  	lockevent_inc(pv_kick_unlock);
543  	pv_kick(node->cpu);
544  }
545  
546  #ifndef __pv_queued_spin_unlock
__pv_queued_spin_unlock(struct qspinlock * lock)547  __visible __lockfunc void __pv_queued_spin_unlock(struct qspinlock *lock)
548  {
549  	u8 locked;
550  
551  	/*
552  	 * We must not unlock if SLOW, because in that case we must first
553  	 * unhash. Otherwise it would be possible to have multiple @lock
554  	 * entries, which would be BAD.
555  	 */
556  	locked = cmpxchg_release(&lock->locked, _Q_LOCKED_VAL, 0);
557  	if (likely(locked == _Q_LOCKED_VAL))
558  		return;
559  
560  	__pv_queued_spin_unlock_slowpath(lock, locked);
561  }
562  #endif /* __pv_queued_spin_unlock */
563