1 #ifndef _GEN_PV_LOCK_SLOWPATH 2 #error "do not include this file" 3 #endif 4 5 #include <linux/hash.h> 6 #include <linux/bootmem.h> 7 #include <linux/debug_locks.h> 8 9 /* 10 * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead 11 * of spinning them. 12 * 13 * This relies on the architecture to provide two paravirt hypercalls: 14 * 15 * pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val 16 * pv_kick(cpu) -- wakes a suspended vcpu 17 * 18 * Using these we implement __pv_queued_spin_lock_slowpath() and 19 * __pv_queued_spin_unlock() to replace native_queued_spin_lock_slowpath() and 20 * native_queued_spin_unlock(). 21 */ 22 23 #define _Q_SLOW_VAL (3U << _Q_LOCKED_OFFSET) 24 25 /* 26 * Queue Node Adaptive Spinning 27 * 28 * A queue node vCPU will stop spinning if the vCPU in the previous node is 29 * not running. The one lock stealing attempt allowed at slowpath entry 30 * mitigates the slight slowdown for non-overcommitted guest with this 31 * aggressive wait-early mechanism. 32 * 33 * The status of the previous node will be checked at fixed interval 34 * controlled by PV_PREV_CHECK_MASK. This is to ensure that we won't 35 * pound on the cacheline of the previous node too heavily. 36 */ 37 #define PV_PREV_CHECK_MASK 0xff 38 39 /* 40 * Queue node uses: vcpu_running & vcpu_halted. 41 * Queue head uses: vcpu_running & vcpu_hashed. 42 */ 43 enum vcpu_state { 44 vcpu_running = 0, 45 vcpu_halted, /* Used only in pv_wait_node */ 46 vcpu_hashed, /* = pv_hash'ed + vcpu_halted */ 47 }; 48 49 struct pv_node { 50 struct mcs_spinlock mcs; 51 struct mcs_spinlock __res[3]; 52 53 int cpu; 54 u8 state; 55 }; 56 57 /* 58 * Include queued spinlock statistics code 59 */ 60 #include "qspinlock_stat.h" 61 62 /* 63 * By replacing the regular queued_spin_trylock() with the function below, 64 * it will be called once when a lock waiter enter the PV slowpath before 65 * being queued. By allowing one lock stealing attempt here when the pending 66 * bit is off, it helps to reduce the performance impact of lock waiter 67 * preemption without the drawback of lock starvation. 68 */ 69 #define queued_spin_trylock(l) pv_queued_spin_steal_lock(l) 70 static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock) 71 { 72 struct __qspinlock *l = (void *)lock; 73 int ret = !(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) && 74 (cmpxchg(&l->locked, 0, _Q_LOCKED_VAL) == 0); 75 76 qstat_inc(qstat_pv_lock_stealing, ret); 77 return ret; 78 } 79 80 /* 81 * The pending bit is used by the queue head vCPU to indicate that it 82 * is actively spinning on the lock and no lock stealing is allowed. 83 */ 84 #if _Q_PENDING_BITS == 8 85 static __always_inline void set_pending(struct qspinlock *lock) 86 { 87 struct __qspinlock *l = (void *)lock; 88 89 WRITE_ONCE(l->pending, 1); 90 } 91 92 static __always_inline void clear_pending(struct qspinlock *lock) 93 { 94 struct __qspinlock *l = (void *)lock; 95 96 WRITE_ONCE(l->pending, 0); 97 } 98 99 /* 100 * The pending bit check in pv_queued_spin_steal_lock() isn't a memory 101 * barrier. Therefore, an atomic cmpxchg() is used to acquire the lock 102 * just to be sure that it will get it. 103 */ 104 static __always_inline int trylock_clear_pending(struct qspinlock *lock) 105 { 106 struct __qspinlock *l = (void *)lock; 107 108 return !READ_ONCE(l->locked) && 109 (cmpxchg(&l->locked_pending, _Q_PENDING_VAL, _Q_LOCKED_VAL) 110 == _Q_PENDING_VAL); 111 } 112 #else /* _Q_PENDING_BITS == 8 */ 113 static __always_inline void set_pending(struct qspinlock *lock) 114 { 115 atomic_or(_Q_PENDING_VAL, &lock->val); 116 } 117 118 static __always_inline void clear_pending(struct qspinlock *lock) 119 { 120 atomic_andnot(_Q_PENDING_VAL, &lock->val); 121 } 122 123 static __always_inline int trylock_clear_pending(struct qspinlock *lock) 124 { 125 int val = atomic_read(&lock->val); 126 127 for (;;) { 128 int old, new; 129 130 if (val & _Q_LOCKED_MASK) 131 break; 132 133 /* 134 * Try to clear pending bit & set locked bit 135 */ 136 old = val; 137 new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL; 138 val = atomic_cmpxchg(&lock->val, old, new); 139 140 if (val == old) 141 return 1; 142 } 143 return 0; 144 } 145 #endif /* _Q_PENDING_BITS == 8 */ 146 147 /* 148 * Lock and MCS node addresses hash table for fast lookup 149 * 150 * Hashing is done on a per-cacheline basis to minimize the need to access 151 * more than one cacheline. 152 * 153 * Dynamically allocate a hash table big enough to hold at least 4X the 154 * number of possible cpus in the system. Allocation is done on page 155 * granularity. So the minimum number of hash buckets should be at least 156 * 256 (64-bit) or 512 (32-bit) to fully utilize a 4k page. 157 * 158 * Since we should not be holding locks from NMI context (very rare indeed) the 159 * max load factor is 0.75, which is around the point where open addressing 160 * breaks down. 161 * 162 */ 163 struct pv_hash_entry { 164 struct qspinlock *lock; 165 struct pv_node *node; 166 }; 167 168 #define PV_HE_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_entry)) 169 #define PV_HE_MIN (PAGE_SIZE / sizeof(struct pv_hash_entry)) 170 171 static struct pv_hash_entry *pv_lock_hash; 172 static unsigned int pv_lock_hash_bits __read_mostly; 173 174 /* 175 * Allocate memory for the PV qspinlock hash buckets 176 * 177 * This function should be called from the paravirt spinlock initialization 178 * routine. 179 */ 180 void __init __pv_init_lock_hash(void) 181 { 182 int pv_hash_size = ALIGN(4 * num_possible_cpus(), PV_HE_PER_LINE); 183 184 if (pv_hash_size < PV_HE_MIN) 185 pv_hash_size = PV_HE_MIN; 186 187 /* 188 * Allocate space from bootmem which should be page-size aligned 189 * and hence cacheline aligned. 190 */ 191 pv_lock_hash = alloc_large_system_hash("PV qspinlock", 192 sizeof(struct pv_hash_entry), 193 pv_hash_size, 0, HASH_EARLY, 194 &pv_lock_hash_bits, NULL, 195 pv_hash_size, pv_hash_size); 196 } 197 198 #define for_each_hash_entry(he, offset, hash) \ 199 for (hash &= ~(PV_HE_PER_LINE - 1), he = &pv_lock_hash[hash], offset = 0; \ 200 offset < (1 << pv_lock_hash_bits); \ 201 offset++, he = &pv_lock_hash[(hash + offset) & ((1 << pv_lock_hash_bits) - 1)]) 202 203 static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node) 204 { 205 unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits); 206 struct pv_hash_entry *he; 207 int hopcnt = 0; 208 209 for_each_hash_entry(he, offset, hash) { 210 hopcnt++; 211 if (!cmpxchg(&he->lock, NULL, lock)) { 212 WRITE_ONCE(he->node, node); 213 qstat_hop(hopcnt); 214 return &he->lock; 215 } 216 } 217 /* 218 * Hard assume there is a free entry for us. 219 * 220 * This is guaranteed by ensuring every blocked lock only ever consumes 221 * a single entry, and since we only have 4 nesting levels per CPU 222 * and allocated 4*nr_possible_cpus(), this must be so. 223 * 224 * The single entry is guaranteed by having the lock owner unhash 225 * before it releases. 226 */ 227 BUG(); 228 } 229 230 static struct pv_node *pv_unhash(struct qspinlock *lock) 231 { 232 unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits); 233 struct pv_hash_entry *he; 234 struct pv_node *node; 235 236 for_each_hash_entry(he, offset, hash) { 237 if (READ_ONCE(he->lock) == lock) { 238 node = READ_ONCE(he->node); 239 WRITE_ONCE(he->lock, NULL); 240 return node; 241 } 242 } 243 /* 244 * Hard assume we'll find an entry. 245 * 246 * This guarantees a limited lookup time and is itself guaranteed by 247 * having the lock owner do the unhash -- IFF the unlock sees the 248 * SLOW flag, there MUST be a hash entry. 249 */ 250 BUG(); 251 } 252 253 /* 254 * Return true if when it is time to check the previous node which is not 255 * in a running state. 256 */ 257 static inline bool 258 pv_wait_early(struct pv_node *prev, int loop) 259 { 260 261 if ((loop & PV_PREV_CHECK_MASK) != 0) 262 return false; 263 264 return READ_ONCE(prev->state) != vcpu_running; 265 } 266 267 /* 268 * Initialize the PV part of the mcs_spinlock node. 269 */ 270 static void pv_init_node(struct mcs_spinlock *node) 271 { 272 struct pv_node *pn = (struct pv_node *)node; 273 274 BUILD_BUG_ON(sizeof(struct pv_node) > 5*sizeof(struct mcs_spinlock)); 275 276 pn->cpu = smp_processor_id(); 277 pn->state = vcpu_running; 278 } 279 280 /* 281 * Wait for node->locked to become true, halt the vcpu after a short spin. 282 * pv_kick_node() is used to set _Q_SLOW_VAL and fill in hash table on its 283 * behalf. 284 */ 285 static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev) 286 { 287 struct pv_node *pn = (struct pv_node *)node; 288 struct pv_node *pp = (struct pv_node *)prev; 289 int waitcnt = 0; 290 int loop; 291 bool wait_early; 292 293 /* waitcnt processing will be compiled out if !QUEUED_LOCK_STAT */ 294 for (;; waitcnt++) { 295 for (wait_early = false, loop = SPIN_THRESHOLD; loop; loop--) { 296 if (READ_ONCE(node->locked)) 297 return; 298 if (pv_wait_early(pp, loop)) { 299 wait_early = true; 300 break; 301 } 302 cpu_relax(); 303 } 304 305 /* 306 * Order pn->state vs pn->locked thusly: 307 * 308 * [S] pn->state = vcpu_halted [S] next->locked = 1 309 * MB MB 310 * [L] pn->locked [RmW] pn->state = vcpu_hashed 311 * 312 * Matches the cmpxchg() from pv_kick_node(). 313 */ 314 smp_store_mb(pn->state, vcpu_halted); 315 316 if (!READ_ONCE(node->locked)) { 317 qstat_inc(qstat_pv_wait_node, true); 318 qstat_inc(qstat_pv_wait_again, waitcnt); 319 qstat_inc(qstat_pv_wait_early, wait_early); 320 pv_wait(&pn->state, vcpu_halted); 321 } 322 323 /* 324 * If pv_kick_node() changed us to vcpu_hashed, retain that 325 * value so that pv_wait_head_or_lock() knows to not also try 326 * to hash this lock. 327 */ 328 cmpxchg(&pn->state, vcpu_halted, vcpu_running); 329 330 /* 331 * If the locked flag is still not set after wakeup, it is a 332 * spurious wakeup and the vCPU should wait again. However, 333 * there is a pretty high overhead for CPU halting and kicking. 334 * So it is better to spin for a while in the hope that the 335 * MCS lock will be released soon. 336 */ 337 qstat_inc(qstat_pv_spurious_wakeup, !READ_ONCE(node->locked)); 338 } 339 340 /* 341 * By now our node->locked should be 1 and our caller will not actually 342 * spin-wait for it. We do however rely on our caller to do a 343 * load-acquire for us. 344 */ 345 } 346 347 /* 348 * Called after setting next->locked = 1 when we're the lock owner. 349 * 350 * Instead of waking the waiters stuck in pv_wait_node() advance their state 351 * such that they're waiting in pv_wait_head_or_lock(), this avoids a 352 * wake/sleep cycle. 353 */ 354 static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node) 355 { 356 struct pv_node *pn = (struct pv_node *)node; 357 struct __qspinlock *l = (void *)lock; 358 359 /* 360 * If the vCPU is indeed halted, advance its state to match that of 361 * pv_wait_node(). If OTOH this fails, the vCPU was running and will 362 * observe its next->locked value and advance itself. 363 * 364 * Matches with smp_store_mb() and cmpxchg() in pv_wait_node() 365 */ 366 if (cmpxchg(&pn->state, vcpu_halted, vcpu_hashed) != vcpu_halted) 367 return; 368 369 /* 370 * Put the lock into the hash table and set the _Q_SLOW_VAL. 371 * 372 * As this is the same vCPU that will check the _Q_SLOW_VAL value and 373 * the hash table later on at unlock time, no atomic instruction is 374 * needed. 375 */ 376 WRITE_ONCE(l->locked, _Q_SLOW_VAL); 377 (void)pv_hash(lock, pn); 378 } 379 380 /* 381 * Wait for l->locked to become clear and acquire the lock; 382 * halt the vcpu after a short spin. 383 * __pv_queued_spin_unlock() will wake us. 384 * 385 * The current value of the lock will be returned for additional processing. 386 */ 387 static u32 388 pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node) 389 { 390 struct pv_node *pn = (struct pv_node *)node; 391 struct __qspinlock *l = (void *)lock; 392 struct qspinlock **lp = NULL; 393 int waitcnt = 0; 394 int loop; 395 396 /* 397 * If pv_kick_node() already advanced our state, we don't need to 398 * insert ourselves into the hash table anymore. 399 */ 400 if (READ_ONCE(pn->state) == vcpu_hashed) 401 lp = (struct qspinlock **)1; 402 403 /* 404 * Tracking # of slowpath locking operations 405 */ 406 qstat_inc(qstat_pv_lock_slowpath, true); 407 408 for (;; waitcnt++) { 409 /* 410 * Set correct vCPU state to be used by queue node wait-early 411 * mechanism. 412 */ 413 WRITE_ONCE(pn->state, vcpu_running); 414 415 /* 416 * Set the pending bit in the active lock spinning loop to 417 * disable lock stealing before attempting to acquire the lock. 418 */ 419 set_pending(lock); 420 for (loop = SPIN_THRESHOLD; loop; loop--) { 421 if (trylock_clear_pending(lock)) 422 goto gotlock; 423 cpu_relax(); 424 } 425 clear_pending(lock); 426 427 428 if (!lp) { /* ONCE */ 429 lp = pv_hash(lock, pn); 430 431 /* 432 * We must hash before setting _Q_SLOW_VAL, such that 433 * when we observe _Q_SLOW_VAL in __pv_queued_spin_unlock() 434 * we'll be sure to be able to observe our hash entry. 435 * 436 * [S] <hash> [Rmw] l->locked == _Q_SLOW_VAL 437 * MB RMB 438 * [RmW] l->locked = _Q_SLOW_VAL [L] <unhash> 439 * 440 * Matches the smp_rmb() in __pv_queued_spin_unlock(). 441 */ 442 if (xchg(&l->locked, _Q_SLOW_VAL) == 0) { 443 /* 444 * The lock was free and now we own the lock. 445 * Change the lock value back to _Q_LOCKED_VAL 446 * and unhash the table. 447 */ 448 WRITE_ONCE(l->locked, _Q_LOCKED_VAL); 449 WRITE_ONCE(*lp, NULL); 450 goto gotlock; 451 } 452 } 453 WRITE_ONCE(pn->state, vcpu_hashed); 454 qstat_inc(qstat_pv_wait_head, true); 455 qstat_inc(qstat_pv_wait_again, waitcnt); 456 pv_wait(&l->locked, _Q_SLOW_VAL); 457 458 /* 459 * The unlocker should have freed the lock before kicking the 460 * CPU. So if the lock is still not free, it is a spurious 461 * wakeup or another vCPU has stolen the lock. The current 462 * vCPU should spin again. 463 */ 464 qstat_inc(qstat_pv_spurious_wakeup, READ_ONCE(l->locked)); 465 } 466 467 /* 468 * The cmpxchg() or xchg() call before coming here provides the 469 * acquire semantics for locking. The dummy ORing of _Q_LOCKED_VAL 470 * here is to indicate to the compiler that the value will always 471 * be nozero to enable better code optimization. 472 */ 473 gotlock: 474 return (u32)(atomic_read(&lock->val) | _Q_LOCKED_VAL); 475 } 476 477 /* 478 * PV versions of the unlock fastpath and slowpath functions to be used 479 * instead of queued_spin_unlock(). 480 */ 481 __visible void 482 __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked) 483 { 484 struct __qspinlock *l = (void *)lock; 485 struct pv_node *node; 486 487 if (unlikely(locked != _Q_SLOW_VAL)) { 488 WARN(!debug_locks_silent, 489 "pvqspinlock: lock 0x%lx has corrupted value 0x%x!\n", 490 (unsigned long)lock, atomic_read(&lock->val)); 491 return; 492 } 493 494 /* 495 * A failed cmpxchg doesn't provide any memory-ordering guarantees, 496 * so we need a barrier to order the read of the node data in 497 * pv_unhash *after* we've read the lock being _Q_SLOW_VAL. 498 * 499 * Matches the cmpxchg() in pv_wait_head_or_lock() setting _Q_SLOW_VAL. 500 */ 501 smp_rmb(); 502 503 /* 504 * Since the above failed to release, this must be the SLOW path. 505 * Therefore start by looking up the blocked node and unhashing it. 506 */ 507 node = pv_unhash(lock); 508 509 /* 510 * Now that we have a reference to the (likely) blocked pv_node, 511 * release the lock. 512 */ 513 smp_store_release(&l->locked, 0); 514 515 /* 516 * At this point the memory pointed at by lock can be freed/reused, 517 * however we can still use the pv_node to kick the CPU. 518 * The other vCPU may not really be halted, but kicking an active 519 * vCPU is harmless other than the additional latency in completing 520 * the unlock. 521 */ 522 qstat_inc(qstat_pv_kick_unlock, true); 523 pv_kick(node->cpu); 524 } 525 526 /* 527 * Include the architecture specific callee-save thunk of the 528 * __pv_queued_spin_unlock(). This thunk is put together with 529 * __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock 530 * function close to each other sharing consecutive instruction cachelines. 531 * Alternatively, architecture specific version of __pv_queued_spin_unlock() 532 * can be defined. 533 */ 534 #include <asm/qspinlock_paravirt.h> 535 536 #ifndef __pv_queued_spin_unlock 537 __visible void __pv_queued_spin_unlock(struct qspinlock *lock) 538 { 539 struct __qspinlock *l = (void *)lock; 540 u8 locked; 541 542 /* 543 * We must not unlock if SLOW, because in that case we must first 544 * unhash. Otherwise it would be possible to have multiple @lock 545 * entries, which would be BAD. 546 */ 547 locked = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0); 548 if (likely(locked == _Q_LOCKED_VAL)) 549 return; 550 551 __pv_queued_spin_unlock_slowpath(lock, locked); 552 } 553 #endif /* __pv_queued_spin_unlock */ 554