1 /* 2 * Queued spinlock 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. 15 * (C) Copyright 2013-2014 Red Hat, Inc. 16 * (C) Copyright 2015 Intel Corp. 17 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP 18 * 19 * Authors: Waiman Long <waiman.long@hpe.com> 20 * Peter Zijlstra <peterz@infradead.org> 21 */ 22 23 #ifndef _GEN_PV_LOCK_SLOWPATH 24 25 #include <linux/smp.h> 26 #include <linux/bug.h> 27 #include <linux/cpumask.h> 28 #include <linux/percpu.h> 29 #include <linux/hardirq.h> 30 #include <linux/mutex.h> 31 #include <linux/prefetch.h> 32 #include <asm/byteorder.h> 33 #include <asm/qspinlock.h> 34 35 /* 36 * The basic principle of a queue-based spinlock can best be understood 37 * by studying a classic queue-based spinlock implementation called the 38 * MCS lock. The paper below provides a good description for this kind 39 * of lock. 40 * 41 * http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf 42 * 43 * This queued spinlock implementation is based on the MCS lock, however to make 44 * it fit the 4 bytes we assume spinlock_t to be, and preserve its existing 45 * API, we must modify it somehow. 46 * 47 * In particular; where the traditional MCS lock consists of a tail pointer 48 * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to 49 * unlock the next pending (next->locked), we compress both these: {tail, 50 * next->locked} into a single u32 value. 51 * 52 * Since a spinlock disables recursion of its own context and there is a limit 53 * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there 54 * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now 55 * we can encode the tail by combining the 2-bit nesting level with the cpu 56 * number. With one byte for the lock value and 3 bytes for the tail, only a 57 * 32-bit word is now needed. Even though we only need 1 bit for the lock, 58 * we extend it to a full byte to achieve better performance for architectures 59 * that support atomic byte write. 60 * 61 * We also change the first spinner to spin on the lock bit instead of its 62 * node; whereby avoiding the need to carry a node from lock to unlock, and 63 * preserving existing lock API. This also makes the unlock code simpler and 64 * faster. 65 * 66 * N.B. The current implementation only supports architectures that allow 67 * atomic operations on smaller 8-bit and 16-bit data types. 68 * 69 */ 70 71 #include "mcs_spinlock.h" 72 73 #ifdef CONFIG_PARAVIRT_SPINLOCKS 74 #define MAX_NODES 8 75 #else 76 #define MAX_NODES 4 77 #endif 78 79 /* 80 * Per-CPU queue node structures; we can never have more than 4 nested 81 * contexts: task, softirq, hardirq, nmi. 82 * 83 * Exactly fits one 64-byte cacheline on a 64-bit architecture. 84 * 85 * PV doubles the storage and uses the second cacheline for PV state. 86 */ 87 static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]); 88 89 /* 90 * We must be able to distinguish between no-tail and the tail at 0:0, 91 * therefore increment the cpu number by one. 92 */ 93 94 static inline __pure u32 encode_tail(int cpu, int idx) 95 { 96 u32 tail; 97 98 #ifdef CONFIG_DEBUG_SPINLOCK 99 BUG_ON(idx > 3); 100 #endif 101 tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET; 102 tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */ 103 104 return tail; 105 } 106 107 static inline __pure struct mcs_spinlock *decode_tail(u32 tail) 108 { 109 int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1; 110 int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET; 111 112 return per_cpu_ptr(&mcs_nodes[idx], cpu); 113 } 114 115 #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK) 116 117 /* 118 * By using the whole 2nd least significant byte for the pending bit, we 119 * can allow better optimization of the lock acquisition for the pending 120 * bit holder. 121 * 122 * This internal structure is also used by the set_locked function which 123 * is not restricted to _Q_PENDING_BITS == 8. 124 */ 125 struct __qspinlock { 126 union { 127 atomic_t val; 128 #ifdef __LITTLE_ENDIAN 129 struct { 130 u8 locked; 131 u8 pending; 132 }; 133 struct { 134 u16 locked_pending; 135 u16 tail; 136 }; 137 #else 138 struct { 139 u16 tail; 140 u16 locked_pending; 141 }; 142 struct { 143 u8 reserved[2]; 144 u8 pending; 145 u8 locked; 146 }; 147 #endif 148 }; 149 }; 150 151 #if _Q_PENDING_BITS == 8 152 /** 153 * clear_pending_set_locked - take ownership and clear the pending bit. 154 * @lock: Pointer to queued spinlock structure 155 * 156 * *,1,0 -> *,0,1 157 * 158 * Lock stealing is not allowed if this function is used. 159 */ 160 static __always_inline void clear_pending_set_locked(struct qspinlock *lock) 161 { 162 struct __qspinlock *l = (void *)lock; 163 164 WRITE_ONCE(l->locked_pending, _Q_LOCKED_VAL); 165 } 166 167 /* 168 * xchg_tail - Put in the new queue tail code word & retrieve previous one 169 * @lock : Pointer to queued spinlock structure 170 * @tail : The new queue tail code word 171 * Return: The previous queue tail code word 172 * 173 * xchg(lock, tail) 174 * 175 * p,*,* -> n,*,* ; prev = xchg(lock, node) 176 */ 177 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) 178 { 179 struct __qspinlock *l = (void *)lock; 180 181 /* 182 * Use release semantics to make sure that the MCS node is properly 183 * initialized before changing the tail code. 184 */ 185 return (u32)xchg_release(&l->tail, 186 tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET; 187 } 188 189 #else /* _Q_PENDING_BITS == 8 */ 190 191 /** 192 * clear_pending_set_locked - take ownership and clear the pending bit. 193 * @lock: Pointer to queued spinlock structure 194 * 195 * *,1,0 -> *,0,1 196 */ 197 static __always_inline void clear_pending_set_locked(struct qspinlock *lock) 198 { 199 atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val); 200 } 201 202 /** 203 * xchg_tail - Put in the new queue tail code word & retrieve previous one 204 * @lock : Pointer to queued spinlock structure 205 * @tail : The new queue tail code word 206 * Return: The previous queue tail code word 207 * 208 * xchg(lock, tail) 209 * 210 * p,*,* -> n,*,* ; prev = xchg(lock, node) 211 */ 212 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) 213 { 214 u32 old, new, val = atomic_read(&lock->val); 215 216 for (;;) { 217 new = (val & _Q_LOCKED_PENDING_MASK) | tail; 218 /* 219 * Use release semantics to make sure that the MCS node is 220 * properly initialized before changing the tail code. 221 */ 222 old = atomic_cmpxchg_release(&lock->val, val, new); 223 if (old == val) 224 break; 225 226 val = old; 227 } 228 return old; 229 } 230 #endif /* _Q_PENDING_BITS == 8 */ 231 232 /** 233 * set_locked - Set the lock bit and own the lock 234 * @lock: Pointer to queued spinlock structure 235 * 236 * *,*,0 -> *,0,1 237 */ 238 static __always_inline void set_locked(struct qspinlock *lock) 239 { 240 struct __qspinlock *l = (void *)lock; 241 242 WRITE_ONCE(l->locked, _Q_LOCKED_VAL); 243 } 244 245 246 /* 247 * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for 248 * all the PV callbacks. 249 */ 250 251 static __always_inline void __pv_init_node(struct mcs_spinlock *node) { } 252 static __always_inline void __pv_wait_node(struct mcs_spinlock *node, 253 struct mcs_spinlock *prev) { } 254 static __always_inline void __pv_kick_node(struct qspinlock *lock, 255 struct mcs_spinlock *node) { } 256 static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock, 257 struct mcs_spinlock *node) 258 { return 0; } 259 260 #define pv_enabled() false 261 262 #define pv_init_node __pv_init_node 263 #define pv_wait_node __pv_wait_node 264 #define pv_kick_node __pv_kick_node 265 #define pv_wait_head_or_lock __pv_wait_head_or_lock 266 267 #ifdef CONFIG_PARAVIRT_SPINLOCKS 268 #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath 269 #endif 270 271 /* 272 * Various notes on spin_is_locked() and spin_unlock_wait(), which are 273 * 'interesting' functions: 274 * 275 * PROBLEM: some architectures have an interesting issue with atomic ACQUIRE 276 * operations in that the ACQUIRE applies to the LOAD _not_ the STORE (ARM64, 277 * PPC). Also qspinlock has a similar issue per construction, the setting of 278 * the locked byte can be unordered acquiring the lock proper. 279 * 280 * This gets to be 'interesting' in the following cases, where the /should/s 281 * end up false because of this issue. 282 * 283 * 284 * CASE 1: 285 * 286 * So the spin_is_locked() correctness issue comes from something like: 287 * 288 * CPU0 CPU1 289 * 290 * global_lock(); local_lock(i) 291 * spin_lock(&G) spin_lock(&L[i]) 292 * for (i) if (!spin_is_locked(&G)) { 293 * spin_unlock_wait(&L[i]); smp_acquire__after_ctrl_dep(); 294 * return; 295 * } 296 * // deal with fail 297 * 298 * Where it is important CPU1 sees G locked or CPU0 sees L[i] locked such 299 * that there is exclusion between the two critical sections. 300 * 301 * The load from spin_is_locked(&G) /should/ be constrained by the ACQUIRE from 302 * spin_lock(&L[i]), and similarly the load(s) from spin_unlock_wait(&L[i]) 303 * /should/ be constrained by the ACQUIRE from spin_lock(&G). 304 * 305 * Similarly, later stuff is constrained by the ACQUIRE from CTRL+RMB. 306 * 307 * 308 * CASE 2: 309 * 310 * For spin_unlock_wait() there is a second correctness issue, namely: 311 * 312 * CPU0 CPU1 313 * 314 * flag = set; 315 * smp_mb(); spin_lock(&l) 316 * spin_unlock_wait(&l); if (!flag) 317 * // add to lockless list 318 * spin_unlock(&l); 319 * // iterate lockless list 320 * 321 * Which wants to ensure that CPU1 will stop adding bits to the list and CPU0 322 * will observe the last entry on the list (if spin_unlock_wait() had ACQUIRE 323 * semantics etc..) 324 * 325 * Where flag /should/ be ordered against the locked store of l. 326 */ 327 328 /* 329 * queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before 330 * issuing an _unordered_ store to set _Q_LOCKED_VAL. 331 * 332 * This means that the store can be delayed, but no later than the 333 * store-release from the unlock. This means that simply observing 334 * _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired. 335 * 336 * There are two paths that can issue the unordered store: 337 * 338 * (1) clear_pending_set_locked(): *,1,0 -> *,0,1 339 * 340 * (2) set_locked(): t,0,0 -> t,0,1 ; t != 0 341 * atomic_cmpxchg_relaxed(): t,0,0 -> 0,0,1 342 * 343 * However, in both cases we have other !0 state we've set before to queue 344 * ourseves: 345 * 346 * For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our 347 * load is constrained by that ACQUIRE to not pass before that, and thus must 348 * observe the store. 349 * 350 * For (2) we have a more intersting scenario. We enqueue ourselves using 351 * xchg_tail(), which ends up being a RELEASE. This in itself is not 352 * sufficient, however that is followed by an smp_cond_acquire() on the same 353 * word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and 354 * guarantees we must observe that store. 355 * 356 * Therefore both cases have other !0 state that is observable before the 357 * unordered locked byte store comes through. This means we can use that to 358 * wait for the lock store, and then wait for an unlock. 359 */ 360 #ifndef queued_spin_unlock_wait 361 void queued_spin_unlock_wait(struct qspinlock *lock) 362 { 363 u32 val; 364 365 for (;;) { 366 val = atomic_read(&lock->val); 367 368 if (!val) /* not locked, we're done */ 369 goto done; 370 371 if (val & _Q_LOCKED_MASK) /* locked, go wait for unlock */ 372 break; 373 374 /* not locked, but pending, wait until we observe the lock */ 375 cpu_relax(); 376 } 377 378 /* any unlock is good */ 379 while (atomic_read(&lock->val) & _Q_LOCKED_MASK) 380 cpu_relax(); 381 382 done: 383 smp_acquire__after_ctrl_dep(); 384 } 385 EXPORT_SYMBOL(queued_spin_unlock_wait); 386 #endif 387 388 #endif /* _GEN_PV_LOCK_SLOWPATH */ 389 390 /** 391 * queued_spin_lock_slowpath - acquire the queued spinlock 392 * @lock: Pointer to queued spinlock structure 393 * @val: Current value of the queued spinlock 32-bit word 394 * 395 * (queue tail, pending bit, lock value) 396 * 397 * fast : slow : unlock 398 * : : 399 * uncontended (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0) 400 * : | ^--------.------. / : 401 * : v \ \ | : 402 * pending : (0,1,1) +--> (0,1,0) \ | : 403 * : | ^--' | | : 404 * : v | | : 405 * uncontended : (n,x,y) +--> (n,0,0) --' | : 406 * queue : | ^--' | : 407 * : v | : 408 * contended : (*,x,y) +--> (*,0,0) ---> (*,0,1) -' : 409 * queue : ^--' : 410 */ 411 void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) 412 { 413 struct mcs_spinlock *prev, *next, *node; 414 u32 new, old, tail; 415 int idx; 416 417 BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); 418 419 if (pv_enabled()) 420 goto queue; 421 422 if (virt_spin_lock(lock)) 423 return; 424 425 /* 426 * wait for in-progress pending->locked hand-overs 427 * 428 * 0,1,0 -> 0,0,1 429 */ 430 if (val == _Q_PENDING_VAL) { 431 while ((val = atomic_read(&lock->val)) == _Q_PENDING_VAL) 432 cpu_relax(); 433 } 434 435 /* 436 * trylock || pending 437 * 438 * 0,0,0 -> 0,0,1 ; trylock 439 * 0,0,1 -> 0,1,1 ; pending 440 */ 441 for (;;) { 442 /* 443 * If we observe any contention; queue. 444 */ 445 if (val & ~_Q_LOCKED_MASK) 446 goto queue; 447 448 new = _Q_LOCKED_VAL; 449 if (val == new) 450 new |= _Q_PENDING_VAL; 451 452 /* 453 * Acquire semantic is required here as the function may 454 * return immediately if the lock was free. 455 */ 456 old = atomic_cmpxchg_acquire(&lock->val, val, new); 457 if (old == val) 458 break; 459 460 val = old; 461 } 462 463 /* 464 * we won the trylock 465 */ 466 if (new == _Q_LOCKED_VAL) 467 return; 468 469 /* 470 * we're pending, wait for the owner to go away. 471 * 472 * *,1,1 -> *,1,0 473 * 474 * this wait loop must be a load-acquire such that we match the 475 * store-release that clears the locked bit and create lock 476 * sequentiality; this is because not all clear_pending_set_locked() 477 * implementations imply full barriers. 478 */ 479 smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_MASK)); 480 481 /* 482 * take ownership and clear the pending bit. 483 * 484 * *,1,0 -> *,0,1 485 */ 486 clear_pending_set_locked(lock); 487 return; 488 489 /* 490 * End of pending bit optimistic spinning and beginning of MCS 491 * queuing. 492 */ 493 queue: 494 node = this_cpu_ptr(&mcs_nodes[0]); 495 idx = node->count++; 496 tail = encode_tail(smp_processor_id(), idx); 497 498 node += idx; 499 node->locked = 0; 500 node->next = NULL; 501 pv_init_node(node); 502 503 /* 504 * We touched a (possibly) cold cacheline in the per-cpu queue node; 505 * attempt the trylock once more in the hope someone let go while we 506 * weren't watching. 507 */ 508 if (queued_spin_trylock(lock)) 509 goto release; 510 511 /* 512 * We have already touched the queueing cacheline; don't bother with 513 * pending stuff. 514 * 515 * p,*,* -> n,*,* 516 * 517 * RELEASE, such that the stores to @node must be complete. 518 */ 519 old = xchg_tail(lock, tail); 520 next = NULL; 521 522 /* 523 * if there was a previous node; link it and wait until reaching the 524 * head of the waitqueue. 525 */ 526 if (old & _Q_TAIL_MASK) { 527 prev = decode_tail(old); 528 /* 529 * The above xchg_tail() is also a load of @lock which generates, 530 * through decode_tail(), a pointer. 531 * 532 * The address dependency matches the RELEASE of xchg_tail() 533 * such that the access to @prev must happen after. 534 */ 535 smp_read_barrier_depends(); 536 537 WRITE_ONCE(prev->next, node); 538 539 pv_wait_node(node, prev); 540 arch_mcs_spin_lock_contended(&node->locked); 541 542 /* 543 * While waiting for the MCS lock, the next pointer may have 544 * been set by another lock waiter. We optimistically load 545 * the next pointer & prefetch the cacheline for writing 546 * to reduce latency in the upcoming MCS unlock operation. 547 */ 548 next = READ_ONCE(node->next); 549 if (next) 550 prefetchw(next); 551 } 552 553 /* 554 * we're at the head of the waitqueue, wait for the owner & pending to 555 * go away. 556 * 557 * *,x,y -> *,0,0 558 * 559 * this wait loop must use a load-acquire such that we match the 560 * store-release that clears the locked bit and create lock 561 * sequentiality; this is because the set_locked() function below 562 * does not imply a full barrier. 563 * 564 * The PV pv_wait_head_or_lock function, if active, will acquire 565 * the lock and return a non-zero value. So we have to skip the 566 * smp_cond_load_acquire() call. As the next PV queue head hasn't been 567 * designated yet, there is no way for the locked value to become 568 * _Q_SLOW_VAL. So both the set_locked() and the 569 * atomic_cmpxchg_relaxed() calls will be safe. 570 * 571 * If PV isn't active, 0 will be returned instead. 572 * 573 */ 574 if ((val = pv_wait_head_or_lock(lock, node))) 575 goto locked; 576 577 val = smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_PENDING_MASK)); 578 579 locked: 580 /* 581 * claim the lock: 582 * 583 * n,0,0 -> 0,0,1 : lock, uncontended 584 * *,0,0 -> *,0,1 : lock, contended 585 * 586 * If the queue head is the only one in the queue (lock value == tail), 587 * clear the tail code and grab the lock. Otherwise, we only need 588 * to grab the lock. 589 */ 590 for (;;) { 591 /* In the PV case we might already have _Q_LOCKED_VAL set */ 592 if ((val & _Q_TAIL_MASK) != tail) { 593 set_locked(lock); 594 break; 595 } 596 /* 597 * The smp_cond_load_acquire() call above has provided the 598 * necessary acquire semantics required for locking. At most 599 * two iterations of this loop may be ran. 600 */ 601 old = atomic_cmpxchg_relaxed(&lock->val, val, _Q_LOCKED_VAL); 602 if (old == val) 603 goto release; /* No contention */ 604 605 val = old; 606 } 607 608 /* 609 * contended path; wait for next if not observed yet, release. 610 */ 611 if (!next) { 612 while (!(next = READ_ONCE(node->next))) 613 cpu_relax(); 614 } 615 616 arch_mcs_spin_unlock_contended(&next->locked); 617 pv_kick_node(lock, next); 618 619 release: 620 /* 621 * release the node 622 */ 623 __this_cpu_dec(mcs_nodes[0].count); 624 } 625 EXPORT_SYMBOL(queued_spin_lock_slowpath); 626 627 /* 628 * Generate the paravirt code for queued_spin_unlock_slowpath(). 629 */ 630 #if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS) 631 #define _GEN_PV_LOCK_SLOWPATH 632 633 #undef pv_enabled 634 #define pv_enabled() true 635 636 #undef pv_init_node 637 #undef pv_wait_node 638 #undef pv_kick_node 639 #undef pv_wait_head_or_lock 640 641 #undef queued_spin_lock_slowpath 642 #define queued_spin_lock_slowpath __pv_queued_spin_lock_slowpath 643 644 #include "qspinlock_paravirt.h" 645 #include "qspinlock.c" 646 647 #endif 648