1 /* 2 * Queued spinlock 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. 15 * (C) Copyright 2013-2014 Red Hat, Inc. 16 * (C) Copyright 2015 Intel Corp. 17 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP 18 * 19 * Authors: Waiman Long <waiman.long@hpe.com> 20 * Peter Zijlstra <peterz@infradead.org> 21 */ 22 23 #ifndef _GEN_PV_LOCK_SLOWPATH 24 25 #include <linux/smp.h> 26 #include <linux/bug.h> 27 #include <linux/cpumask.h> 28 #include <linux/percpu.h> 29 #include <linux/hardirq.h> 30 #include <linux/mutex.h> 31 #include <asm/byteorder.h> 32 #include <asm/qspinlock.h> 33 34 /* 35 * The basic principle of a queue-based spinlock can best be understood 36 * by studying a classic queue-based spinlock implementation called the 37 * MCS lock. The paper below provides a good description for this kind 38 * of lock. 39 * 40 * http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf 41 * 42 * This queued spinlock implementation is based on the MCS lock, however to make 43 * it fit the 4 bytes we assume spinlock_t to be, and preserve its existing 44 * API, we must modify it somehow. 45 * 46 * In particular; where the traditional MCS lock consists of a tail pointer 47 * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to 48 * unlock the next pending (next->locked), we compress both these: {tail, 49 * next->locked} into a single u32 value. 50 * 51 * Since a spinlock disables recursion of its own context and there is a limit 52 * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there 53 * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now 54 * we can encode the tail by combining the 2-bit nesting level with the cpu 55 * number. With one byte for the lock value and 3 bytes for the tail, only a 56 * 32-bit word is now needed. Even though we only need 1 bit for the lock, 57 * we extend it to a full byte to achieve better performance for architectures 58 * that support atomic byte write. 59 * 60 * We also change the first spinner to spin on the lock bit instead of its 61 * node; whereby avoiding the need to carry a node from lock to unlock, and 62 * preserving existing lock API. This also makes the unlock code simpler and 63 * faster. 64 * 65 * N.B. The current implementation only supports architectures that allow 66 * atomic operations on smaller 8-bit and 16-bit data types. 67 * 68 */ 69 70 #include "mcs_spinlock.h" 71 72 #ifdef CONFIG_PARAVIRT_SPINLOCKS 73 #define MAX_NODES 8 74 #else 75 #define MAX_NODES 4 76 #endif 77 78 /* 79 * Per-CPU queue node structures; we can never have more than 4 nested 80 * contexts: task, softirq, hardirq, nmi. 81 * 82 * Exactly fits one 64-byte cacheline on a 64-bit architecture. 83 * 84 * PV doubles the storage and uses the second cacheline for PV state. 85 */ 86 static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]); 87 88 /* 89 * We must be able to distinguish between no-tail and the tail at 0:0, 90 * therefore increment the cpu number by one. 91 */ 92 93 static inline __pure u32 encode_tail(int cpu, int idx) 94 { 95 u32 tail; 96 97 #ifdef CONFIG_DEBUG_SPINLOCK 98 BUG_ON(idx > 3); 99 #endif 100 tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET; 101 tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */ 102 103 return tail; 104 } 105 106 static inline __pure struct mcs_spinlock *decode_tail(u32 tail) 107 { 108 int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1; 109 int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET; 110 111 return per_cpu_ptr(&mcs_nodes[idx], cpu); 112 } 113 114 #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK) 115 116 /* 117 * By using the whole 2nd least significant byte for the pending bit, we 118 * can allow better optimization of the lock acquisition for the pending 119 * bit holder. 120 * 121 * This internal structure is also used by the set_locked function which 122 * is not restricted to _Q_PENDING_BITS == 8. 123 */ 124 struct __qspinlock { 125 union { 126 atomic_t val; 127 #ifdef __LITTLE_ENDIAN 128 struct { 129 u8 locked; 130 u8 pending; 131 }; 132 struct { 133 u16 locked_pending; 134 u16 tail; 135 }; 136 #else 137 struct { 138 u16 tail; 139 u16 locked_pending; 140 }; 141 struct { 142 u8 reserved[2]; 143 u8 pending; 144 u8 locked; 145 }; 146 #endif 147 }; 148 }; 149 150 #if _Q_PENDING_BITS == 8 151 /** 152 * clear_pending_set_locked - take ownership and clear the pending bit. 153 * @lock: Pointer to queued spinlock structure 154 * 155 * *,1,0 -> *,0,1 156 * 157 * Lock stealing is not allowed if this function is used. 158 */ 159 static __always_inline void clear_pending_set_locked(struct qspinlock *lock) 160 { 161 struct __qspinlock *l = (void *)lock; 162 163 WRITE_ONCE(l->locked_pending, _Q_LOCKED_VAL); 164 } 165 166 /* 167 * xchg_tail - Put in the new queue tail code word & retrieve previous one 168 * @lock : Pointer to queued spinlock structure 169 * @tail : The new queue tail code word 170 * Return: The previous queue tail code word 171 * 172 * xchg(lock, tail) 173 * 174 * p,*,* -> n,*,* ; prev = xchg(lock, node) 175 */ 176 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) 177 { 178 struct __qspinlock *l = (void *)lock; 179 180 /* 181 * Use release semantics to make sure that the MCS node is properly 182 * initialized before changing the tail code. 183 */ 184 return (u32)xchg_release(&l->tail, 185 tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET; 186 } 187 188 #else /* _Q_PENDING_BITS == 8 */ 189 190 /** 191 * clear_pending_set_locked - take ownership and clear the pending bit. 192 * @lock: Pointer to queued spinlock structure 193 * 194 * *,1,0 -> *,0,1 195 */ 196 static __always_inline void clear_pending_set_locked(struct qspinlock *lock) 197 { 198 atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val); 199 } 200 201 /** 202 * xchg_tail - Put in the new queue tail code word & retrieve previous one 203 * @lock : Pointer to queued spinlock structure 204 * @tail : The new queue tail code word 205 * Return: The previous queue tail code word 206 * 207 * xchg(lock, tail) 208 * 209 * p,*,* -> n,*,* ; prev = xchg(lock, node) 210 */ 211 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) 212 { 213 u32 old, new, val = atomic_read(&lock->val); 214 215 for (;;) { 216 new = (val & _Q_LOCKED_PENDING_MASK) | tail; 217 /* 218 * Use release semantics to make sure that the MCS node is 219 * properly initialized before changing the tail code. 220 */ 221 old = atomic_cmpxchg_release(&lock->val, val, new); 222 if (old == val) 223 break; 224 225 val = old; 226 } 227 return old; 228 } 229 #endif /* _Q_PENDING_BITS == 8 */ 230 231 /** 232 * set_locked - Set the lock bit and own the lock 233 * @lock: Pointer to queued spinlock structure 234 * 235 * *,*,0 -> *,0,1 236 */ 237 static __always_inline void set_locked(struct qspinlock *lock) 238 { 239 struct __qspinlock *l = (void *)lock; 240 241 WRITE_ONCE(l->locked, _Q_LOCKED_VAL); 242 } 243 244 245 /* 246 * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for 247 * all the PV callbacks. 248 */ 249 250 static __always_inline void __pv_init_node(struct mcs_spinlock *node) { } 251 static __always_inline void __pv_wait_node(struct mcs_spinlock *node, 252 struct mcs_spinlock *prev) { } 253 static __always_inline void __pv_kick_node(struct qspinlock *lock, 254 struct mcs_spinlock *node) { } 255 static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock, 256 struct mcs_spinlock *node) 257 { return 0; } 258 259 #define pv_enabled() false 260 261 #define pv_init_node __pv_init_node 262 #define pv_wait_node __pv_wait_node 263 #define pv_kick_node __pv_kick_node 264 #define pv_wait_head_or_lock __pv_wait_head_or_lock 265 266 #ifdef CONFIG_PARAVIRT_SPINLOCKS 267 #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath 268 #endif 269 270 /* 271 * Various notes on spin_is_locked() and spin_unlock_wait(), which are 272 * 'interesting' functions: 273 * 274 * PROBLEM: some architectures have an interesting issue with atomic ACQUIRE 275 * operations in that the ACQUIRE applies to the LOAD _not_ the STORE (ARM64, 276 * PPC). Also qspinlock has a similar issue per construction, the setting of 277 * the locked byte can be unordered acquiring the lock proper. 278 * 279 * This gets to be 'interesting' in the following cases, where the /should/s 280 * end up false because of this issue. 281 * 282 * 283 * CASE 1: 284 * 285 * So the spin_is_locked() correctness issue comes from something like: 286 * 287 * CPU0 CPU1 288 * 289 * global_lock(); local_lock(i) 290 * spin_lock(&G) spin_lock(&L[i]) 291 * for (i) if (!spin_is_locked(&G)) { 292 * spin_unlock_wait(&L[i]); smp_acquire__after_ctrl_dep(); 293 * return; 294 * } 295 * // deal with fail 296 * 297 * Where it is important CPU1 sees G locked or CPU0 sees L[i] locked such 298 * that there is exclusion between the two critical sections. 299 * 300 * The load from spin_is_locked(&G) /should/ be constrained by the ACQUIRE from 301 * spin_lock(&L[i]), and similarly the load(s) from spin_unlock_wait(&L[i]) 302 * /should/ be constrained by the ACQUIRE from spin_lock(&G). 303 * 304 * Similarly, later stuff is constrained by the ACQUIRE from CTRL+RMB. 305 * 306 * 307 * CASE 2: 308 * 309 * For spin_unlock_wait() there is a second correctness issue, namely: 310 * 311 * CPU0 CPU1 312 * 313 * flag = set; 314 * smp_mb(); spin_lock(&l) 315 * spin_unlock_wait(&l); if (!flag) 316 * // add to lockless list 317 * spin_unlock(&l); 318 * // iterate lockless list 319 * 320 * Which wants to ensure that CPU1 will stop adding bits to the list and CPU0 321 * will observe the last entry on the list (if spin_unlock_wait() had ACQUIRE 322 * semantics etc..) 323 * 324 * Where flag /should/ be ordered against the locked store of l. 325 */ 326 327 /* 328 * queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before 329 * issuing an _unordered_ store to set _Q_LOCKED_VAL. 330 * 331 * This means that the store can be delayed, but no later than the 332 * store-release from the unlock. This means that simply observing 333 * _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired. 334 * 335 * There are two paths that can issue the unordered store: 336 * 337 * (1) clear_pending_set_locked(): *,1,0 -> *,0,1 338 * 339 * (2) set_locked(): t,0,0 -> t,0,1 ; t != 0 340 * atomic_cmpxchg_relaxed(): t,0,0 -> 0,0,1 341 * 342 * However, in both cases we have other !0 state we've set before to queue 343 * ourseves: 344 * 345 * For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our 346 * load is constrained by that ACQUIRE to not pass before that, and thus must 347 * observe the store. 348 * 349 * For (2) we have a more intersting scenario. We enqueue ourselves using 350 * xchg_tail(), which ends up being a RELEASE. This in itself is not 351 * sufficient, however that is followed by an smp_cond_acquire() on the same 352 * word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and 353 * guarantees we must observe that store. 354 * 355 * Therefore both cases have other !0 state that is observable before the 356 * unordered locked byte store comes through. This means we can use that to 357 * wait for the lock store, and then wait for an unlock. 358 */ 359 #ifndef queued_spin_unlock_wait 360 void queued_spin_unlock_wait(struct qspinlock *lock) 361 { 362 u32 val; 363 364 for (;;) { 365 val = atomic_read(&lock->val); 366 367 if (!val) /* not locked, we're done */ 368 goto done; 369 370 if (val & _Q_LOCKED_MASK) /* locked, go wait for unlock */ 371 break; 372 373 /* not locked, but pending, wait until we observe the lock */ 374 cpu_relax(); 375 } 376 377 /* any unlock is good */ 378 while (atomic_read(&lock->val) & _Q_LOCKED_MASK) 379 cpu_relax(); 380 381 done: 382 smp_acquire__after_ctrl_dep(); 383 } 384 EXPORT_SYMBOL(queued_spin_unlock_wait); 385 #endif 386 387 #endif /* _GEN_PV_LOCK_SLOWPATH */ 388 389 /** 390 * queued_spin_lock_slowpath - acquire the queued spinlock 391 * @lock: Pointer to queued spinlock structure 392 * @val: Current value of the queued spinlock 32-bit word 393 * 394 * (queue tail, pending bit, lock value) 395 * 396 * fast : slow : unlock 397 * : : 398 * uncontended (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0) 399 * : | ^--------.------. / : 400 * : v \ \ | : 401 * pending : (0,1,1) +--> (0,1,0) \ | : 402 * : | ^--' | | : 403 * : v | | : 404 * uncontended : (n,x,y) +--> (n,0,0) --' | : 405 * queue : | ^--' | : 406 * : v | : 407 * contended : (*,x,y) +--> (*,0,0) ---> (*,0,1) -' : 408 * queue : ^--' : 409 */ 410 void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) 411 { 412 struct mcs_spinlock *prev, *next, *node; 413 u32 new, old, tail; 414 int idx; 415 416 BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); 417 418 if (pv_enabled()) 419 goto queue; 420 421 if (virt_spin_lock(lock)) 422 return; 423 424 /* 425 * wait for in-progress pending->locked hand-overs 426 * 427 * 0,1,0 -> 0,0,1 428 */ 429 if (val == _Q_PENDING_VAL) { 430 while ((val = atomic_read(&lock->val)) == _Q_PENDING_VAL) 431 cpu_relax(); 432 } 433 434 /* 435 * trylock || pending 436 * 437 * 0,0,0 -> 0,0,1 ; trylock 438 * 0,0,1 -> 0,1,1 ; pending 439 */ 440 for (;;) { 441 /* 442 * If we observe any contention; queue. 443 */ 444 if (val & ~_Q_LOCKED_MASK) 445 goto queue; 446 447 new = _Q_LOCKED_VAL; 448 if (val == new) 449 new |= _Q_PENDING_VAL; 450 451 /* 452 * Acquire semantic is required here as the function may 453 * return immediately if the lock was free. 454 */ 455 old = atomic_cmpxchg_acquire(&lock->val, val, new); 456 if (old == val) 457 break; 458 459 val = old; 460 } 461 462 /* 463 * we won the trylock 464 */ 465 if (new == _Q_LOCKED_VAL) 466 return; 467 468 /* 469 * we're pending, wait for the owner to go away. 470 * 471 * *,1,1 -> *,1,0 472 * 473 * this wait loop must be a load-acquire such that we match the 474 * store-release that clears the locked bit and create lock 475 * sequentiality; this is because not all clear_pending_set_locked() 476 * implementations imply full barriers. 477 */ 478 smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_MASK)); 479 480 /* 481 * take ownership and clear the pending bit. 482 * 483 * *,1,0 -> *,0,1 484 */ 485 clear_pending_set_locked(lock); 486 return; 487 488 /* 489 * End of pending bit optimistic spinning and beginning of MCS 490 * queuing. 491 */ 492 queue: 493 node = this_cpu_ptr(&mcs_nodes[0]); 494 idx = node->count++; 495 tail = encode_tail(smp_processor_id(), idx); 496 497 node += idx; 498 node->locked = 0; 499 node->next = NULL; 500 pv_init_node(node); 501 502 /* 503 * We touched a (possibly) cold cacheline in the per-cpu queue node; 504 * attempt the trylock once more in the hope someone let go while we 505 * weren't watching. 506 */ 507 if (queued_spin_trylock(lock)) 508 goto release; 509 510 /* 511 * We have already touched the queueing cacheline; don't bother with 512 * pending stuff. 513 * 514 * p,*,* -> n,*,* 515 * 516 * RELEASE, such that the stores to @node must be complete. 517 */ 518 old = xchg_tail(lock, tail); 519 next = NULL; 520 521 /* 522 * if there was a previous node; link it and wait until reaching the 523 * head of the waitqueue. 524 */ 525 if (old & _Q_TAIL_MASK) { 526 prev = decode_tail(old); 527 /* 528 * The above xchg_tail() is also a load of @lock which generates, 529 * through decode_tail(), a pointer. 530 * 531 * The address dependency matches the RELEASE of xchg_tail() 532 * such that the access to @prev must happen after. 533 */ 534 smp_read_barrier_depends(); 535 536 WRITE_ONCE(prev->next, node); 537 538 pv_wait_node(node, prev); 539 arch_mcs_spin_lock_contended(&node->locked); 540 541 /* 542 * While waiting for the MCS lock, the next pointer may have 543 * been set by another lock waiter. We optimistically load 544 * the next pointer & prefetch the cacheline for writing 545 * to reduce latency in the upcoming MCS unlock operation. 546 */ 547 next = READ_ONCE(node->next); 548 if (next) 549 prefetchw(next); 550 } 551 552 /* 553 * we're at the head of the waitqueue, wait for the owner & pending to 554 * go away. 555 * 556 * *,x,y -> *,0,0 557 * 558 * this wait loop must use a load-acquire such that we match the 559 * store-release that clears the locked bit and create lock 560 * sequentiality; this is because the set_locked() function below 561 * does not imply a full barrier. 562 * 563 * The PV pv_wait_head_or_lock function, if active, will acquire 564 * the lock and return a non-zero value. So we have to skip the 565 * smp_cond_load_acquire() call. As the next PV queue head hasn't been 566 * designated yet, there is no way for the locked value to become 567 * _Q_SLOW_VAL. So both the set_locked() and the 568 * atomic_cmpxchg_relaxed() calls will be safe. 569 * 570 * If PV isn't active, 0 will be returned instead. 571 * 572 */ 573 if ((val = pv_wait_head_or_lock(lock, node))) 574 goto locked; 575 576 val = smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_PENDING_MASK)); 577 578 locked: 579 /* 580 * claim the lock: 581 * 582 * n,0,0 -> 0,0,1 : lock, uncontended 583 * *,0,0 -> *,0,1 : lock, contended 584 * 585 * If the queue head is the only one in the queue (lock value == tail), 586 * clear the tail code and grab the lock. Otherwise, we only need 587 * to grab the lock. 588 */ 589 for (;;) { 590 /* In the PV case we might already have _Q_LOCKED_VAL set */ 591 if ((val & _Q_TAIL_MASK) != tail) { 592 set_locked(lock); 593 break; 594 } 595 /* 596 * The smp_cond_load_acquire() call above has provided the 597 * necessary acquire semantics required for locking. At most 598 * two iterations of this loop may be ran. 599 */ 600 old = atomic_cmpxchg_relaxed(&lock->val, val, _Q_LOCKED_VAL); 601 if (old == val) 602 goto release; /* No contention */ 603 604 val = old; 605 } 606 607 /* 608 * contended path; wait for next if not observed yet, release. 609 */ 610 if (!next) { 611 while (!(next = READ_ONCE(node->next))) 612 cpu_relax(); 613 } 614 615 arch_mcs_spin_unlock_contended(&next->locked); 616 pv_kick_node(lock, next); 617 618 release: 619 /* 620 * release the node 621 */ 622 __this_cpu_dec(mcs_nodes[0].count); 623 } 624 EXPORT_SYMBOL(queued_spin_lock_slowpath); 625 626 /* 627 * Generate the paravirt code for queued_spin_unlock_slowpath(). 628 */ 629 #if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS) 630 #define _GEN_PV_LOCK_SLOWPATH 631 632 #undef pv_enabled 633 #define pv_enabled() true 634 635 #undef pv_init_node 636 #undef pv_wait_node 637 #undef pv_kick_node 638 #undef pv_wait_head_or_lock 639 640 #undef queued_spin_lock_slowpath 641 #define queued_spin_lock_slowpath __pv_queued_spin_lock_slowpath 642 643 #include "qspinlock_paravirt.h" 644 #include "qspinlock.c" 645 646 #endif 647