1 /* 2 * Queued spinlock 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. 15 * (C) Copyright 2013-2014,2018 Red Hat, Inc. 16 * (C) Copyright 2015 Intel Corp. 17 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP 18 * 19 * Authors: Waiman Long <longman@redhat.com> 20 * Peter Zijlstra <peterz@infradead.org> 21 */ 22 23 #ifndef _GEN_PV_LOCK_SLOWPATH 24 25 #include <linux/smp.h> 26 #include <linux/bug.h> 27 #include <linux/cpumask.h> 28 #include <linux/percpu.h> 29 #include <linux/hardirq.h> 30 #include <linux/mutex.h> 31 #include <linux/prefetch.h> 32 #include <asm/byteorder.h> 33 #include <asm/qspinlock.h> 34 35 /* 36 * Include queued spinlock statistics code 37 */ 38 #include "qspinlock_stat.h" 39 40 /* 41 * The basic principle of a queue-based spinlock can best be understood 42 * by studying a classic queue-based spinlock implementation called the 43 * MCS lock. The paper below provides a good description for this kind 44 * of lock. 45 * 46 * http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf 47 * 48 * This queued spinlock implementation is based on the MCS lock, however to make 49 * it fit the 4 bytes we assume spinlock_t to be, and preserve its existing 50 * API, we must modify it somehow. 51 * 52 * In particular; where the traditional MCS lock consists of a tail pointer 53 * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to 54 * unlock the next pending (next->locked), we compress both these: {tail, 55 * next->locked} into a single u32 value. 56 * 57 * Since a spinlock disables recursion of its own context and there is a limit 58 * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there 59 * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now 60 * we can encode the tail by combining the 2-bit nesting level with the cpu 61 * number. With one byte for the lock value and 3 bytes for the tail, only a 62 * 32-bit word is now needed. Even though we only need 1 bit for the lock, 63 * we extend it to a full byte to achieve better performance for architectures 64 * that support atomic byte write. 65 * 66 * We also change the first spinner to spin on the lock bit instead of its 67 * node; whereby avoiding the need to carry a node from lock to unlock, and 68 * preserving existing lock API. This also makes the unlock code simpler and 69 * faster. 70 * 71 * N.B. The current implementation only supports architectures that allow 72 * atomic operations on smaller 8-bit and 16-bit data types. 73 * 74 */ 75 76 #include "mcs_spinlock.h" 77 #define MAX_NODES 4 78 79 /* 80 * On 64-bit architectures, the mcs_spinlock structure will be 16 bytes in 81 * size and four of them will fit nicely in one 64-byte cacheline. For 82 * pvqspinlock, however, we need more space for extra data. To accommodate 83 * that, we insert two more long words to pad it up to 32 bytes. IOW, only 84 * two of them can fit in a cacheline in this case. That is OK as it is rare 85 * to have more than 2 levels of slowpath nesting in actual use. We don't 86 * want to penalize pvqspinlocks to optimize for a rare case in native 87 * qspinlocks. 88 */ 89 struct qnode { 90 struct mcs_spinlock mcs; 91 #ifdef CONFIG_PARAVIRT_SPINLOCKS 92 long reserved[2]; 93 #endif 94 }; 95 96 /* 97 * The pending bit spinning loop count. 98 * This heuristic is used to limit the number of lockword accesses 99 * made by atomic_cond_read_relaxed when waiting for the lock to 100 * transition out of the "== _Q_PENDING_VAL" state. We don't spin 101 * indefinitely because there's no guarantee that we'll make forward 102 * progress. 103 */ 104 #ifndef _Q_PENDING_LOOPS 105 #define _Q_PENDING_LOOPS 1 106 #endif 107 108 /* 109 * Per-CPU queue node structures; we can never have more than 4 nested 110 * contexts: task, softirq, hardirq, nmi. 111 * 112 * Exactly fits one 64-byte cacheline on a 64-bit architecture. 113 * 114 * PV doubles the storage and uses the second cacheline for PV state. 115 */ 116 static DEFINE_PER_CPU_ALIGNED(struct qnode, qnodes[MAX_NODES]); 117 118 /* 119 * We must be able to distinguish between no-tail and the tail at 0:0, 120 * therefore increment the cpu number by one. 121 */ 122 123 static inline __pure u32 encode_tail(int cpu, int idx) 124 { 125 u32 tail; 126 127 tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET; 128 tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */ 129 130 return tail; 131 } 132 133 static inline __pure struct mcs_spinlock *decode_tail(u32 tail) 134 { 135 int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1; 136 int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET; 137 138 return per_cpu_ptr(&qnodes[idx].mcs, cpu); 139 } 140 141 static inline __pure 142 struct mcs_spinlock *grab_mcs_node(struct mcs_spinlock *base, int idx) 143 { 144 return &((struct qnode *)base + idx)->mcs; 145 } 146 147 #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK) 148 149 #if _Q_PENDING_BITS == 8 150 /** 151 * clear_pending - clear the pending bit. 152 * @lock: Pointer to queued spinlock structure 153 * 154 * *,1,* -> *,0,* 155 */ 156 static __always_inline void clear_pending(struct qspinlock *lock) 157 { 158 WRITE_ONCE(lock->pending, 0); 159 } 160 161 /** 162 * clear_pending_set_locked - take ownership and clear the pending bit. 163 * @lock: Pointer to queued spinlock structure 164 * 165 * *,1,0 -> *,0,1 166 * 167 * Lock stealing is not allowed if this function is used. 168 */ 169 static __always_inline void clear_pending_set_locked(struct qspinlock *lock) 170 { 171 WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL); 172 } 173 174 /* 175 * xchg_tail - Put in the new queue tail code word & retrieve previous one 176 * @lock : Pointer to queued spinlock structure 177 * @tail : The new queue tail code word 178 * Return: The previous queue tail code word 179 * 180 * xchg(lock, tail), which heads an address dependency 181 * 182 * p,*,* -> n,*,* ; prev = xchg(lock, node) 183 */ 184 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) 185 { 186 /* 187 * We can use relaxed semantics since the caller ensures that the 188 * MCS node is properly initialized before updating the tail. 189 */ 190 return (u32)xchg_relaxed(&lock->tail, 191 tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET; 192 } 193 194 #else /* _Q_PENDING_BITS == 8 */ 195 196 /** 197 * clear_pending - clear the pending bit. 198 * @lock: Pointer to queued spinlock structure 199 * 200 * *,1,* -> *,0,* 201 */ 202 static __always_inline void clear_pending(struct qspinlock *lock) 203 { 204 atomic_andnot(_Q_PENDING_VAL, &lock->val); 205 } 206 207 /** 208 * clear_pending_set_locked - take ownership and clear the pending bit. 209 * @lock: Pointer to queued spinlock structure 210 * 211 * *,1,0 -> *,0,1 212 */ 213 static __always_inline void clear_pending_set_locked(struct qspinlock *lock) 214 { 215 atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val); 216 } 217 218 /** 219 * xchg_tail - Put in the new queue tail code word & retrieve previous one 220 * @lock : Pointer to queued spinlock structure 221 * @tail : The new queue tail code word 222 * Return: The previous queue tail code word 223 * 224 * xchg(lock, tail) 225 * 226 * p,*,* -> n,*,* ; prev = xchg(lock, node) 227 */ 228 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) 229 { 230 u32 old, new, val = atomic_read(&lock->val); 231 232 for (;;) { 233 new = (val & _Q_LOCKED_PENDING_MASK) | tail; 234 /* 235 * We can use relaxed semantics since the caller ensures that 236 * the MCS node is properly initialized before updating the 237 * tail. 238 */ 239 old = atomic_cmpxchg_relaxed(&lock->val, val, new); 240 if (old == val) 241 break; 242 243 val = old; 244 } 245 return old; 246 } 247 #endif /* _Q_PENDING_BITS == 8 */ 248 249 /** 250 * queued_fetch_set_pending_acquire - fetch the whole lock value and set pending 251 * @lock : Pointer to queued spinlock structure 252 * Return: The previous lock value 253 * 254 * *,*,* -> *,1,* 255 */ 256 #ifndef queued_fetch_set_pending_acquire 257 static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock) 258 { 259 return atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val); 260 } 261 #endif 262 263 /** 264 * set_locked - Set the lock bit and own the lock 265 * @lock: Pointer to queued spinlock structure 266 * 267 * *,*,0 -> *,0,1 268 */ 269 static __always_inline void set_locked(struct qspinlock *lock) 270 { 271 WRITE_ONCE(lock->locked, _Q_LOCKED_VAL); 272 } 273 274 275 /* 276 * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for 277 * all the PV callbacks. 278 */ 279 280 static __always_inline void __pv_init_node(struct mcs_spinlock *node) { } 281 static __always_inline void __pv_wait_node(struct mcs_spinlock *node, 282 struct mcs_spinlock *prev) { } 283 static __always_inline void __pv_kick_node(struct qspinlock *lock, 284 struct mcs_spinlock *node) { } 285 static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock, 286 struct mcs_spinlock *node) 287 { return 0; } 288 289 #define pv_enabled() false 290 291 #define pv_init_node __pv_init_node 292 #define pv_wait_node __pv_wait_node 293 #define pv_kick_node __pv_kick_node 294 #define pv_wait_head_or_lock __pv_wait_head_or_lock 295 296 #ifdef CONFIG_PARAVIRT_SPINLOCKS 297 #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath 298 #endif 299 300 #endif /* _GEN_PV_LOCK_SLOWPATH */ 301 302 /** 303 * queued_spin_lock_slowpath - acquire the queued spinlock 304 * @lock: Pointer to queued spinlock structure 305 * @val: Current value of the queued spinlock 32-bit word 306 * 307 * (queue tail, pending bit, lock value) 308 * 309 * fast : slow : unlock 310 * : : 311 * uncontended (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0) 312 * : | ^--------.------. / : 313 * : v \ \ | : 314 * pending : (0,1,1) +--> (0,1,0) \ | : 315 * : | ^--' | | : 316 * : v | | : 317 * uncontended : (n,x,y) +--> (n,0,0) --' | : 318 * queue : | ^--' | : 319 * : v | : 320 * contended : (*,x,y) +--> (*,0,0) ---> (*,0,1) -' : 321 * queue : ^--' : 322 */ 323 void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) 324 { 325 struct mcs_spinlock *prev, *next, *node; 326 u32 old, tail; 327 int idx; 328 329 BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); 330 331 if (pv_enabled()) 332 goto pv_queue; 333 334 if (virt_spin_lock(lock)) 335 return; 336 337 /* 338 * Wait for in-progress pending->locked hand-overs with a bounded 339 * number of spins so that we guarantee forward progress. 340 * 341 * 0,1,0 -> 0,0,1 342 */ 343 if (val == _Q_PENDING_VAL) { 344 int cnt = _Q_PENDING_LOOPS; 345 val = atomic_cond_read_relaxed(&lock->val, 346 (VAL != _Q_PENDING_VAL) || !cnt--); 347 } 348 349 /* 350 * If we observe any contention; queue. 351 */ 352 if (val & ~_Q_LOCKED_MASK) 353 goto queue; 354 355 /* 356 * trylock || pending 357 * 358 * 0,0,* -> 0,1,* -> 0,0,1 pending, trylock 359 */ 360 val = queued_fetch_set_pending_acquire(lock); 361 362 /* 363 * If we observe contention, there is a concurrent locker. 364 * 365 * Undo and queue; our setting of PENDING might have made the 366 * n,0,0 -> 0,0,0 transition fail and it will now be waiting 367 * on @next to become !NULL. 368 */ 369 if (unlikely(val & ~_Q_LOCKED_MASK)) { 370 371 /* Undo PENDING if we set it. */ 372 if (!(val & _Q_PENDING_MASK)) 373 clear_pending(lock); 374 375 goto queue; 376 } 377 378 /* 379 * We're pending, wait for the owner to go away. 380 * 381 * 0,1,1 -> 0,1,0 382 * 383 * this wait loop must be a load-acquire such that we match the 384 * store-release that clears the locked bit and create lock 385 * sequentiality; this is because not all 386 * clear_pending_set_locked() implementations imply full 387 * barriers. 388 */ 389 if (val & _Q_LOCKED_MASK) 390 atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_MASK)); 391 392 /* 393 * take ownership and clear the pending bit. 394 * 395 * 0,1,0 -> 0,0,1 396 */ 397 clear_pending_set_locked(lock); 398 lockevent_inc(lock_pending); 399 return; 400 401 /* 402 * End of pending bit optimistic spinning and beginning of MCS 403 * queuing. 404 */ 405 queue: 406 lockevent_inc(lock_slowpath); 407 pv_queue: 408 node = this_cpu_ptr(&qnodes[0].mcs); 409 idx = node->count++; 410 tail = encode_tail(smp_processor_id(), idx); 411 412 /* 413 * 4 nodes are allocated based on the assumption that there will 414 * not be nested NMIs taking spinlocks. That may not be true in 415 * some architectures even though the chance of needing more than 416 * 4 nodes will still be extremely unlikely. When that happens, 417 * we fall back to spinning on the lock directly without using 418 * any MCS node. This is not the most elegant solution, but is 419 * simple enough. 420 */ 421 if (unlikely(idx >= MAX_NODES)) { 422 lockevent_inc(lock_no_node); 423 while (!queued_spin_trylock(lock)) 424 cpu_relax(); 425 goto release; 426 } 427 428 node = grab_mcs_node(node, idx); 429 430 /* 431 * Keep counts of non-zero index values: 432 */ 433 lockevent_cond_inc(lock_use_node2 + idx - 1, idx); 434 435 /* 436 * Ensure that we increment the head node->count before initialising 437 * the actual node. If the compiler is kind enough to reorder these 438 * stores, then an IRQ could overwrite our assignments. 439 */ 440 barrier(); 441 442 node->locked = 0; 443 node->next = NULL; 444 pv_init_node(node); 445 446 /* 447 * We touched a (possibly) cold cacheline in the per-cpu queue node; 448 * attempt the trylock once more in the hope someone let go while we 449 * weren't watching. 450 */ 451 if (queued_spin_trylock(lock)) 452 goto release; 453 454 /* 455 * Ensure that the initialisation of @node is complete before we 456 * publish the updated tail via xchg_tail() and potentially link 457 * @node into the waitqueue via WRITE_ONCE(prev->next, node) below. 458 */ 459 smp_wmb(); 460 461 /* 462 * Publish the updated tail. 463 * We have already touched the queueing cacheline; don't bother with 464 * pending stuff. 465 * 466 * p,*,* -> n,*,* 467 */ 468 old = xchg_tail(lock, tail); 469 next = NULL; 470 471 /* 472 * if there was a previous node; link it and wait until reaching the 473 * head of the waitqueue. 474 */ 475 if (old & _Q_TAIL_MASK) { 476 prev = decode_tail(old); 477 478 /* Link @node into the waitqueue. */ 479 WRITE_ONCE(prev->next, node); 480 481 pv_wait_node(node, prev); 482 arch_mcs_spin_lock_contended(&node->locked); 483 484 /* 485 * While waiting for the MCS lock, the next pointer may have 486 * been set by another lock waiter. We optimistically load 487 * the next pointer & prefetch the cacheline for writing 488 * to reduce latency in the upcoming MCS unlock operation. 489 */ 490 next = READ_ONCE(node->next); 491 if (next) 492 prefetchw(next); 493 } 494 495 /* 496 * we're at the head of the waitqueue, wait for the owner & pending to 497 * go away. 498 * 499 * *,x,y -> *,0,0 500 * 501 * this wait loop must use a load-acquire such that we match the 502 * store-release that clears the locked bit and create lock 503 * sequentiality; this is because the set_locked() function below 504 * does not imply a full barrier. 505 * 506 * The PV pv_wait_head_or_lock function, if active, will acquire 507 * the lock and return a non-zero value. So we have to skip the 508 * atomic_cond_read_acquire() call. As the next PV queue head hasn't 509 * been designated yet, there is no way for the locked value to become 510 * _Q_SLOW_VAL. So both the set_locked() and the 511 * atomic_cmpxchg_relaxed() calls will be safe. 512 * 513 * If PV isn't active, 0 will be returned instead. 514 * 515 */ 516 if ((val = pv_wait_head_or_lock(lock, node))) 517 goto locked; 518 519 val = atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK)); 520 521 locked: 522 /* 523 * claim the lock: 524 * 525 * n,0,0 -> 0,0,1 : lock, uncontended 526 * *,*,0 -> *,*,1 : lock, contended 527 * 528 * If the queue head is the only one in the queue (lock value == tail) 529 * and nobody is pending, clear the tail code and grab the lock. 530 * Otherwise, we only need to grab the lock. 531 */ 532 533 /* 534 * In the PV case we might already have _Q_LOCKED_VAL set, because 535 * of lock stealing; therefore we must also allow: 536 * 537 * n,0,1 -> 0,0,1 538 * 539 * Note: at this point: (val & _Q_PENDING_MASK) == 0, because of the 540 * above wait condition, therefore any concurrent setting of 541 * PENDING will make the uncontended transition fail. 542 */ 543 if ((val & _Q_TAIL_MASK) == tail) { 544 if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL)) 545 goto release; /* No contention */ 546 } 547 548 /* 549 * Either somebody is queued behind us or _Q_PENDING_VAL got set 550 * which will then detect the remaining tail and queue behind us 551 * ensuring we'll see a @next. 552 */ 553 set_locked(lock); 554 555 /* 556 * contended path; wait for next if not observed yet, release. 557 */ 558 if (!next) 559 next = smp_cond_load_relaxed(&node->next, (VAL)); 560 561 arch_mcs_spin_unlock_contended(&next->locked); 562 pv_kick_node(lock, next); 563 564 release: 565 /* 566 * release the node 567 */ 568 __this_cpu_dec(qnodes[0].mcs.count); 569 } 570 EXPORT_SYMBOL(queued_spin_lock_slowpath); 571 572 /* 573 * Generate the paravirt code for queued_spin_unlock_slowpath(). 574 */ 575 #if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS) 576 #define _GEN_PV_LOCK_SLOWPATH 577 578 #undef pv_enabled 579 #define pv_enabled() true 580 581 #undef pv_init_node 582 #undef pv_wait_node 583 #undef pv_kick_node 584 #undef pv_wait_head_or_lock 585 586 #undef queued_spin_lock_slowpath 587 #define queued_spin_lock_slowpath __pv_queued_spin_lock_slowpath 588 589 #include "qspinlock_paravirt.h" 590 #include "qspinlock.c" 591 592 #endif 593