1 // SPDX-License-Identifier: GPL-2.0 2 /* kernel/rwsem.c: R/W semaphores, public implementation 3 * 4 * Written by David Howells (dhowells@redhat.com). 5 * Derived from asm-i386/semaphore.h 6 * 7 * Writer lock-stealing by Alex Shi <alex.shi@intel.com> 8 * and Michel Lespinasse <walken@google.com> 9 * 10 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com> 11 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes. 12 * 13 * Rwsem count bit fields re-definition and rwsem rearchitecture by 14 * Waiman Long <longman@redhat.com> and 15 * Peter Zijlstra <peterz@infradead.org>. 16 */ 17 18 #include <linux/types.h> 19 #include <linux/kernel.h> 20 #include <linux/sched.h> 21 #include <linux/sched/rt.h> 22 #include <linux/sched/task.h> 23 #include <linux/sched/debug.h> 24 #include <linux/sched/wake_q.h> 25 #include <linux/sched/signal.h> 26 #include <linux/sched/clock.h> 27 #include <linux/export.h> 28 #include <linux/rwsem.h> 29 #include <linux/atomic.h> 30 31 #ifndef CONFIG_PREEMPT_RT 32 #include "lock_events.h" 33 34 /* 35 * The least significant 2 bits of the owner value has the following 36 * meanings when set. 37 * - Bit 0: RWSEM_READER_OWNED - The rwsem is owned by readers 38 * - Bit 1: RWSEM_NONSPINNABLE - Cannot spin on a reader-owned lock 39 * 40 * When the rwsem is reader-owned and a spinning writer has timed out, 41 * the nonspinnable bit will be set to disable optimistic spinning. 42 43 * When a writer acquires a rwsem, it puts its task_struct pointer 44 * into the owner field. It is cleared after an unlock. 45 * 46 * When a reader acquires a rwsem, it will also puts its task_struct 47 * pointer into the owner field with the RWSEM_READER_OWNED bit set. 48 * On unlock, the owner field will largely be left untouched. So 49 * for a free or reader-owned rwsem, the owner value may contain 50 * information about the last reader that acquires the rwsem. 51 * 52 * That information may be helpful in debugging cases where the system 53 * seems to hang on a reader owned rwsem especially if only one reader 54 * is involved. Ideally we would like to track all the readers that own 55 * a rwsem, but the overhead is simply too big. 56 * 57 * A fast path reader optimistic lock stealing is supported when the rwsem 58 * is previously owned by a writer and the following conditions are met: 59 * - rwsem is not currently writer owned 60 * - the handoff isn't set. 61 */ 62 #define RWSEM_READER_OWNED (1UL << 0) 63 #define RWSEM_NONSPINNABLE (1UL << 1) 64 #define RWSEM_OWNER_FLAGS_MASK (RWSEM_READER_OWNED | RWSEM_NONSPINNABLE) 65 66 #ifdef CONFIG_DEBUG_RWSEMS 67 # define DEBUG_RWSEMS_WARN_ON(c, sem) do { \ 68 if (!debug_locks_silent && \ 69 WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, magic = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\ 70 #c, atomic_long_read(&(sem)->count), \ 71 (unsigned long) sem->magic, \ 72 atomic_long_read(&(sem)->owner), (long)current, \ 73 list_empty(&(sem)->wait_list) ? "" : "not ")) \ 74 debug_locks_off(); \ 75 } while (0) 76 #else 77 # define DEBUG_RWSEMS_WARN_ON(c, sem) 78 #endif 79 80 /* 81 * On 64-bit architectures, the bit definitions of the count are: 82 * 83 * Bit 0 - writer locked bit 84 * Bit 1 - waiters present bit 85 * Bit 2 - lock handoff bit 86 * Bits 3-7 - reserved 87 * Bits 8-62 - 55-bit reader count 88 * Bit 63 - read fail bit 89 * 90 * On 32-bit architectures, the bit definitions of the count are: 91 * 92 * Bit 0 - writer locked bit 93 * Bit 1 - waiters present bit 94 * Bit 2 - lock handoff bit 95 * Bits 3-7 - reserved 96 * Bits 8-30 - 23-bit reader count 97 * Bit 31 - read fail bit 98 * 99 * It is not likely that the most significant bit (read fail bit) will ever 100 * be set. This guard bit is still checked anyway in the down_read() fastpath 101 * just in case we need to use up more of the reader bits for other purpose 102 * in the future. 103 * 104 * atomic_long_fetch_add() is used to obtain reader lock, whereas 105 * atomic_long_cmpxchg() will be used to obtain writer lock. 106 * 107 * There are three places where the lock handoff bit may be set or cleared. 108 * 1) rwsem_mark_wake() for readers. 109 * 2) rwsem_try_write_lock() for writers. 110 * 3) Error path of rwsem_down_write_slowpath(). 111 * 112 * For all the above cases, wait_lock will be held. A writer must also 113 * be the first one in the wait_list to be eligible for setting the handoff 114 * bit. So concurrent setting/clearing of handoff bit is not possible. 115 */ 116 #define RWSEM_WRITER_LOCKED (1UL << 0) 117 #define RWSEM_FLAG_WAITERS (1UL << 1) 118 #define RWSEM_FLAG_HANDOFF (1UL << 2) 119 #define RWSEM_FLAG_READFAIL (1UL << (BITS_PER_LONG - 1)) 120 121 #define RWSEM_READER_SHIFT 8 122 #define RWSEM_READER_BIAS (1UL << RWSEM_READER_SHIFT) 123 #define RWSEM_READER_MASK (~(RWSEM_READER_BIAS - 1)) 124 #define RWSEM_WRITER_MASK RWSEM_WRITER_LOCKED 125 #define RWSEM_LOCK_MASK (RWSEM_WRITER_MASK|RWSEM_READER_MASK) 126 #define RWSEM_READ_FAILED_MASK (RWSEM_WRITER_MASK|RWSEM_FLAG_WAITERS|\ 127 RWSEM_FLAG_HANDOFF|RWSEM_FLAG_READFAIL) 128 129 /* 130 * All writes to owner are protected by WRITE_ONCE() to make sure that 131 * store tearing can't happen as optimistic spinners may read and use 132 * the owner value concurrently without lock. Read from owner, however, 133 * may not need READ_ONCE() as long as the pointer value is only used 134 * for comparison and isn't being dereferenced. 135 */ 136 static inline void rwsem_set_owner(struct rw_semaphore *sem) 137 { 138 atomic_long_set(&sem->owner, (long)current); 139 } 140 141 static inline void rwsem_clear_owner(struct rw_semaphore *sem) 142 { 143 atomic_long_set(&sem->owner, 0); 144 } 145 146 /* 147 * Test the flags in the owner field. 148 */ 149 static inline bool rwsem_test_oflags(struct rw_semaphore *sem, long flags) 150 { 151 return atomic_long_read(&sem->owner) & flags; 152 } 153 154 /* 155 * The task_struct pointer of the last owning reader will be left in 156 * the owner field. 157 * 158 * Note that the owner value just indicates the task has owned the rwsem 159 * previously, it may not be the real owner or one of the real owners 160 * anymore when that field is examined, so take it with a grain of salt. 161 * 162 * The reader non-spinnable bit is preserved. 163 */ 164 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem, 165 struct task_struct *owner) 166 { 167 unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED | 168 (atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE); 169 170 atomic_long_set(&sem->owner, val); 171 } 172 173 static inline void rwsem_set_reader_owned(struct rw_semaphore *sem) 174 { 175 __rwsem_set_reader_owned(sem, current); 176 } 177 178 /* 179 * Return true if the rwsem is owned by a reader. 180 */ 181 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem) 182 { 183 #ifdef CONFIG_DEBUG_RWSEMS 184 /* 185 * Check the count to see if it is write-locked. 186 */ 187 long count = atomic_long_read(&sem->count); 188 189 if (count & RWSEM_WRITER_MASK) 190 return false; 191 #endif 192 return rwsem_test_oflags(sem, RWSEM_READER_OWNED); 193 } 194 195 #ifdef CONFIG_DEBUG_RWSEMS 196 /* 197 * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there 198 * is a task pointer in owner of a reader-owned rwsem, it will be the 199 * real owner or one of the real owners. The only exception is when the 200 * unlock is done by up_read_non_owner(). 201 */ 202 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem) 203 { 204 unsigned long val = atomic_long_read(&sem->owner); 205 206 while ((val & ~RWSEM_OWNER_FLAGS_MASK) == (unsigned long)current) { 207 if (atomic_long_try_cmpxchg(&sem->owner, &val, 208 val & RWSEM_OWNER_FLAGS_MASK)) 209 return; 210 } 211 } 212 #else 213 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem) 214 { 215 } 216 #endif 217 218 /* 219 * Set the RWSEM_NONSPINNABLE bits if the RWSEM_READER_OWNED flag 220 * remains set. Otherwise, the operation will be aborted. 221 */ 222 static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem) 223 { 224 unsigned long owner = atomic_long_read(&sem->owner); 225 226 do { 227 if (!(owner & RWSEM_READER_OWNED)) 228 break; 229 if (owner & RWSEM_NONSPINNABLE) 230 break; 231 } while (!atomic_long_try_cmpxchg(&sem->owner, &owner, 232 owner | RWSEM_NONSPINNABLE)); 233 } 234 235 static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp) 236 { 237 *cntp = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count); 238 239 if (WARN_ON_ONCE(*cntp < 0)) 240 rwsem_set_nonspinnable(sem); 241 242 if (!(*cntp & RWSEM_READ_FAILED_MASK)) { 243 rwsem_set_reader_owned(sem); 244 return true; 245 } 246 247 return false; 248 } 249 250 static inline bool rwsem_write_trylock(struct rw_semaphore *sem) 251 { 252 long tmp = RWSEM_UNLOCKED_VALUE; 253 254 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) { 255 rwsem_set_owner(sem); 256 return true; 257 } 258 259 return false; 260 } 261 262 /* 263 * Return just the real task structure pointer of the owner 264 */ 265 static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem) 266 { 267 return (struct task_struct *) 268 (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK); 269 } 270 271 /* 272 * Return the real task structure pointer of the owner and the embedded 273 * flags in the owner. pflags must be non-NULL. 274 */ 275 static inline struct task_struct * 276 rwsem_owner_flags(struct rw_semaphore *sem, unsigned long *pflags) 277 { 278 unsigned long owner = atomic_long_read(&sem->owner); 279 280 *pflags = owner & RWSEM_OWNER_FLAGS_MASK; 281 return (struct task_struct *)(owner & ~RWSEM_OWNER_FLAGS_MASK); 282 } 283 284 /* 285 * Guide to the rw_semaphore's count field. 286 * 287 * When the RWSEM_WRITER_LOCKED bit in count is set, the lock is owned 288 * by a writer. 289 * 290 * The lock is owned by readers when 291 * (1) the RWSEM_WRITER_LOCKED isn't set in count, 292 * (2) some of the reader bits are set in count, and 293 * (3) the owner field has RWSEM_READ_OWNED bit set. 294 * 295 * Having some reader bits set is not enough to guarantee a readers owned 296 * lock as the readers may be in the process of backing out from the count 297 * and a writer has just released the lock. So another writer may steal 298 * the lock immediately after that. 299 */ 300 301 /* 302 * Initialize an rwsem: 303 */ 304 void __init_rwsem(struct rw_semaphore *sem, const char *name, 305 struct lock_class_key *key) 306 { 307 #ifdef CONFIG_DEBUG_LOCK_ALLOC 308 /* 309 * Make sure we are not reinitializing a held semaphore: 310 */ 311 debug_check_no_locks_freed((void *)sem, sizeof(*sem)); 312 lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP); 313 #endif 314 #ifdef CONFIG_DEBUG_RWSEMS 315 sem->magic = sem; 316 #endif 317 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE); 318 raw_spin_lock_init(&sem->wait_lock); 319 INIT_LIST_HEAD(&sem->wait_list); 320 atomic_long_set(&sem->owner, 0L); 321 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER 322 osq_lock_init(&sem->osq); 323 #endif 324 } 325 EXPORT_SYMBOL(__init_rwsem); 326 327 enum rwsem_waiter_type { 328 RWSEM_WAITING_FOR_WRITE, 329 RWSEM_WAITING_FOR_READ 330 }; 331 332 struct rwsem_waiter { 333 struct list_head list; 334 struct task_struct *task; 335 enum rwsem_waiter_type type; 336 unsigned long timeout; 337 }; 338 #define rwsem_first_waiter(sem) \ 339 list_first_entry(&sem->wait_list, struct rwsem_waiter, list) 340 341 enum rwsem_wake_type { 342 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */ 343 RWSEM_WAKE_READERS, /* Wake readers only */ 344 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */ 345 }; 346 347 enum writer_wait_state { 348 WRITER_NOT_FIRST, /* Writer is not first in wait list */ 349 WRITER_FIRST, /* Writer is first in wait list */ 350 WRITER_HANDOFF /* Writer is first & handoff needed */ 351 }; 352 353 /* 354 * The typical HZ value is either 250 or 1000. So set the minimum waiting 355 * time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait 356 * queue before initiating the handoff protocol. 357 */ 358 #define RWSEM_WAIT_TIMEOUT DIV_ROUND_UP(HZ, 250) 359 360 /* 361 * Magic number to batch-wakeup waiting readers, even when writers are 362 * also present in the queue. This both limits the amount of work the 363 * waking thread must do and also prevents any potential counter overflow, 364 * however unlikely. 365 */ 366 #define MAX_READERS_WAKEUP 0x100 367 368 /* 369 * handle the lock release when processes blocked on it that can now run 370 * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must 371 * have been set. 372 * - there must be someone on the queue 373 * - the wait_lock must be held by the caller 374 * - tasks are marked for wakeup, the caller must later invoke wake_up_q() 375 * to actually wakeup the blocked task(s) and drop the reference count, 376 * preferably when the wait_lock is released 377 * - woken process blocks are discarded from the list after having task zeroed 378 * - writers are only marked woken if downgrading is false 379 */ 380 static void rwsem_mark_wake(struct rw_semaphore *sem, 381 enum rwsem_wake_type wake_type, 382 struct wake_q_head *wake_q) 383 { 384 struct rwsem_waiter *waiter, *tmp; 385 long oldcount, woken = 0, adjustment = 0; 386 struct list_head wlist; 387 388 lockdep_assert_held(&sem->wait_lock); 389 390 /* 391 * Take a peek at the queue head waiter such that we can determine 392 * the wakeup(s) to perform. 393 */ 394 waiter = rwsem_first_waiter(sem); 395 396 if (waiter->type == RWSEM_WAITING_FOR_WRITE) { 397 if (wake_type == RWSEM_WAKE_ANY) { 398 /* 399 * Mark writer at the front of the queue for wakeup. 400 * Until the task is actually later awoken later by 401 * the caller, other writers are able to steal it. 402 * Readers, on the other hand, will block as they 403 * will notice the queued writer. 404 */ 405 wake_q_add(wake_q, waiter->task); 406 lockevent_inc(rwsem_wake_writer); 407 } 408 409 return; 410 } 411 412 /* 413 * No reader wakeup if there are too many of them already. 414 */ 415 if (unlikely(atomic_long_read(&sem->count) < 0)) 416 return; 417 418 /* 419 * Writers might steal the lock before we grant it to the next reader. 420 * We prefer to do the first reader grant before counting readers 421 * so we can bail out early if a writer stole the lock. 422 */ 423 if (wake_type != RWSEM_WAKE_READ_OWNED) { 424 struct task_struct *owner; 425 426 adjustment = RWSEM_READER_BIAS; 427 oldcount = atomic_long_fetch_add(adjustment, &sem->count); 428 if (unlikely(oldcount & RWSEM_WRITER_MASK)) { 429 /* 430 * When we've been waiting "too" long (for writers 431 * to give up the lock), request a HANDOFF to 432 * force the issue. 433 */ 434 if (!(oldcount & RWSEM_FLAG_HANDOFF) && 435 time_after(jiffies, waiter->timeout)) { 436 adjustment -= RWSEM_FLAG_HANDOFF; 437 lockevent_inc(rwsem_rlock_handoff); 438 } 439 440 atomic_long_add(-adjustment, &sem->count); 441 return; 442 } 443 /* 444 * Set it to reader-owned to give spinners an early 445 * indication that readers now have the lock. 446 * The reader nonspinnable bit seen at slowpath entry of 447 * the reader is copied over. 448 */ 449 owner = waiter->task; 450 __rwsem_set_reader_owned(sem, owner); 451 } 452 453 /* 454 * Grant up to MAX_READERS_WAKEUP read locks to all the readers in the 455 * queue. We know that the woken will be at least 1 as we accounted 456 * for above. Note we increment the 'active part' of the count by the 457 * number of readers before waking any processes up. 458 * 459 * This is an adaptation of the phase-fair R/W locks where at the 460 * reader phase (first waiter is a reader), all readers are eligible 461 * to acquire the lock at the same time irrespective of their order 462 * in the queue. The writers acquire the lock according to their 463 * order in the queue. 464 * 465 * We have to do wakeup in 2 passes to prevent the possibility that 466 * the reader count may be decremented before it is incremented. It 467 * is because the to-be-woken waiter may not have slept yet. So it 468 * may see waiter->task got cleared, finish its critical section and 469 * do an unlock before the reader count increment. 470 * 471 * 1) Collect the read-waiters in a separate list, count them and 472 * fully increment the reader count in rwsem. 473 * 2) For each waiters in the new list, clear waiter->task and 474 * put them into wake_q to be woken up later. 475 */ 476 INIT_LIST_HEAD(&wlist); 477 list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) { 478 if (waiter->type == RWSEM_WAITING_FOR_WRITE) 479 continue; 480 481 woken++; 482 list_move_tail(&waiter->list, &wlist); 483 484 /* 485 * Limit # of readers that can be woken up per wakeup call. 486 */ 487 if (unlikely(woken >= MAX_READERS_WAKEUP)) 488 break; 489 } 490 491 adjustment = woken * RWSEM_READER_BIAS - adjustment; 492 lockevent_cond_inc(rwsem_wake_reader, woken); 493 if (list_empty(&sem->wait_list)) { 494 /* hit end of list above */ 495 adjustment -= RWSEM_FLAG_WAITERS; 496 } 497 498 /* 499 * When we've woken a reader, we no longer need to force writers 500 * to give up the lock and we can clear HANDOFF. 501 */ 502 if (woken && (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF)) 503 adjustment -= RWSEM_FLAG_HANDOFF; 504 505 if (adjustment) 506 atomic_long_add(adjustment, &sem->count); 507 508 /* 2nd pass */ 509 list_for_each_entry_safe(waiter, tmp, &wlist, list) { 510 struct task_struct *tsk; 511 512 tsk = waiter->task; 513 get_task_struct(tsk); 514 515 /* 516 * Ensure calling get_task_struct() before setting the reader 517 * waiter to nil such that rwsem_down_read_slowpath() cannot 518 * race with do_exit() by always holding a reference count 519 * to the task to wakeup. 520 */ 521 smp_store_release(&waiter->task, NULL); 522 /* 523 * Ensure issuing the wakeup (either by us or someone else) 524 * after setting the reader waiter to nil. 525 */ 526 wake_q_add_safe(wake_q, tsk); 527 } 528 } 529 530 /* 531 * This function must be called with the sem->wait_lock held to prevent 532 * race conditions between checking the rwsem wait list and setting the 533 * sem->count accordingly. 534 * 535 * If wstate is WRITER_HANDOFF, it will make sure that either the handoff 536 * bit is set or the lock is acquired with handoff bit cleared. 537 */ 538 static inline bool rwsem_try_write_lock(struct rw_semaphore *sem, 539 enum writer_wait_state wstate) 540 { 541 long count, new; 542 543 lockdep_assert_held(&sem->wait_lock); 544 545 count = atomic_long_read(&sem->count); 546 do { 547 bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF); 548 549 if (has_handoff && wstate == WRITER_NOT_FIRST) 550 return false; 551 552 new = count; 553 554 if (count & RWSEM_LOCK_MASK) { 555 if (has_handoff || (wstate != WRITER_HANDOFF)) 556 return false; 557 558 new |= RWSEM_FLAG_HANDOFF; 559 } else { 560 new |= RWSEM_WRITER_LOCKED; 561 new &= ~RWSEM_FLAG_HANDOFF; 562 563 if (list_is_singular(&sem->wait_list)) 564 new &= ~RWSEM_FLAG_WAITERS; 565 } 566 } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new)); 567 568 /* 569 * We have either acquired the lock with handoff bit cleared or 570 * set the handoff bit. 571 */ 572 if (new & RWSEM_FLAG_HANDOFF) 573 return false; 574 575 rwsem_set_owner(sem); 576 return true; 577 } 578 579 /* 580 * The rwsem_spin_on_owner() function returns the following 4 values 581 * depending on the lock owner state. 582 * OWNER_NULL : owner is currently NULL 583 * OWNER_WRITER: when owner changes and is a writer 584 * OWNER_READER: when owner changes and the new owner may be a reader. 585 * OWNER_NONSPINNABLE: 586 * when optimistic spinning has to stop because either the 587 * owner stops running, is unknown, or its timeslice has 588 * been used up. 589 */ 590 enum owner_state { 591 OWNER_NULL = 1 << 0, 592 OWNER_WRITER = 1 << 1, 593 OWNER_READER = 1 << 2, 594 OWNER_NONSPINNABLE = 1 << 3, 595 }; 596 597 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER 598 /* 599 * Try to acquire write lock before the writer has been put on wait queue. 600 */ 601 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) 602 { 603 long count = atomic_long_read(&sem->count); 604 605 while (!(count & (RWSEM_LOCK_MASK|RWSEM_FLAG_HANDOFF))) { 606 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count, 607 count | RWSEM_WRITER_LOCKED)) { 608 rwsem_set_owner(sem); 609 lockevent_inc(rwsem_opt_lock); 610 return true; 611 } 612 } 613 return false; 614 } 615 616 static inline bool owner_on_cpu(struct task_struct *owner) 617 { 618 /* 619 * As lock holder preemption issue, we both skip spinning if 620 * task is not on cpu or its cpu is preempted 621 */ 622 return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); 623 } 624 625 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) 626 { 627 struct task_struct *owner; 628 unsigned long flags; 629 bool ret = true; 630 631 if (need_resched()) { 632 lockevent_inc(rwsem_opt_fail); 633 return false; 634 } 635 636 preempt_disable(); 637 /* 638 * Disable preemption is equal to the RCU read-side crital section, 639 * thus the task_strcut structure won't go away. 640 */ 641 owner = rwsem_owner_flags(sem, &flags); 642 /* 643 * Don't check the read-owner as the entry may be stale. 644 */ 645 if ((flags & RWSEM_NONSPINNABLE) || 646 (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner))) 647 ret = false; 648 preempt_enable(); 649 650 lockevent_cond_inc(rwsem_opt_fail, !ret); 651 return ret; 652 } 653 654 #define OWNER_SPINNABLE (OWNER_NULL | OWNER_WRITER | OWNER_READER) 655 656 static inline enum owner_state 657 rwsem_owner_state(struct task_struct *owner, unsigned long flags) 658 { 659 if (flags & RWSEM_NONSPINNABLE) 660 return OWNER_NONSPINNABLE; 661 662 if (flags & RWSEM_READER_OWNED) 663 return OWNER_READER; 664 665 return owner ? OWNER_WRITER : OWNER_NULL; 666 } 667 668 static noinline enum owner_state 669 rwsem_spin_on_owner(struct rw_semaphore *sem) 670 { 671 struct task_struct *new, *owner; 672 unsigned long flags, new_flags; 673 enum owner_state state; 674 675 lockdep_assert_preemption_disabled(); 676 677 owner = rwsem_owner_flags(sem, &flags); 678 state = rwsem_owner_state(owner, flags); 679 if (state != OWNER_WRITER) 680 return state; 681 682 for (;;) { 683 /* 684 * When a waiting writer set the handoff flag, it may spin 685 * on the owner as well. Once that writer acquires the lock, 686 * we can spin on it. So we don't need to quit even when the 687 * handoff bit is set. 688 */ 689 new = rwsem_owner_flags(sem, &new_flags); 690 if ((new != owner) || (new_flags != flags)) { 691 state = rwsem_owner_state(new, new_flags); 692 break; 693 } 694 695 /* 696 * Ensure we emit the owner->on_cpu, dereference _after_ 697 * checking sem->owner still matches owner, if that fails, 698 * owner might point to free()d memory, if it still matches, 699 * our spinning context already disabled preemption which is 700 * equal to RCU read-side crital section ensures the memory 701 * stays valid. 702 */ 703 barrier(); 704 705 if (need_resched() || !owner_on_cpu(owner)) { 706 state = OWNER_NONSPINNABLE; 707 break; 708 } 709 710 cpu_relax(); 711 } 712 713 return state; 714 } 715 716 /* 717 * Calculate reader-owned rwsem spinning threshold for writer 718 * 719 * The more readers own the rwsem, the longer it will take for them to 720 * wind down and free the rwsem. So the empirical formula used to 721 * determine the actual spinning time limit here is: 722 * 723 * Spinning threshold = (10 + nr_readers/2)us 724 * 725 * The limit is capped to a maximum of 25us (30 readers). This is just 726 * a heuristic and is subjected to change in the future. 727 */ 728 static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem) 729 { 730 long count = atomic_long_read(&sem->count); 731 int readers = count >> RWSEM_READER_SHIFT; 732 u64 delta; 733 734 if (readers > 30) 735 readers = 30; 736 delta = (20 + readers) * NSEC_PER_USEC / 2; 737 738 return sched_clock() + delta; 739 } 740 741 static bool rwsem_optimistic_spin(struct rw_semaphore *sem) 742 { 743 bool taken = false; 744 int prev_owner_state = OWNER_NULL; 745 int loop = 0; 746 u64 rspin_threshold = 0; 747 748 preempt_disable(); 749 750 /* sem->wait_lock should not be held when doing optimistic spinning */ 751 if (!osq_lock(&sem->osq)) 752 goto done; 753 754 /* 755 * Optimistically spin on the owner field and attempt to acquire the 756 * lock whenever the owner changes. Spinning will be stopped when: 757 * 1) the owning writer isn't running; or 758 * 2) readers own the lock and spinning time has exceeded limit. 759 */ 760 for (;;) { 761 enum owner_state owner_state; 762 763 owner_state = rwsem_spin_on_owner(sem); 764 if (!(owner_state & OWNER_SPINNABLE)) 765 break; 766 767 /* 768 * Try to acquire the lock 769 */ 770 taken = rwsem_try_write_lock_unqueued(sem); 771 772 if (taken) 773 break; 774 775 /* 776 * Time-based reader-owned rwsem optimistic spinning 777 */ 778 if (owner_state == OWNER_READER) { 779 /* 780 * Re-initialize rspin_threshold every time when 781 * the owner state changes from non-reader to reader. 782 * This allows a writer to steal the lock in between 783 * 2 reader phases and have the threshold reset at 784 * the beginning of the 2nd reader phase. 785 */ 786 if (prev_owner_state != OWNER_READER) { 787 if (rwsem_test_oflags(sem, RWSEM_NONSPINNABLE)) 788 break; 789 rspin_threshold = rwsem_rspin_threshold(sem); 790 loop = 0; 791 } 792 793 /* 794 * Check time threshold once every 16 iterations to 795 * avoid calling sched_clock() too frequently so 796 * as to reduce the average latency between the times 797 * when the lock becomes free and when the spinner 798 * is ready to do a trylock. 799 */ 800 else if (!(++loop & 0xf) && (sched_clock() > rspin_threshold)) { 801 rwsem_set_nonspinnable(sem); 802 lockevent_inc(rwsem_opt_nospin); 803 break; 804 } 805 } 806 807 /* 808 * An RT task cannot do optimistic spinning if it cannot 809 * be sure the lock holder is running or live-lock may 810 * happen if the current task and the lock holder happen 811 * to run in the same CPU. However, aborting optimistic 812 * spinning while a NULL owner is detected may miss some 813 * opportunity where spinning can continue without causing 814 * problem. 815 * 816 * There are 2 possible cases where an RT task may be able 817 * to continue spinning. 818 * 819 * 1) The lock owner is in the process of releasing the 820 * lock, sem->owner is cleared but the lock has not 821 * been released yet. 822 * 2) The lock was free and owner cleared, but another 823 * task just comes in and acquire the lock before 824 * we try to get it. The new owner may be a spinnable 825 * writer. 826 * 827 * To take advantage of two scenarios listed above, the RT 828 * task is made to retry one more time to see if it can 829 * acquire the lock or continue spinning on the new owning 830 * writer. Of course, if the time lag is long enough or the 831 * new owner is not a writer or spinnable, the RT task will 832 * quit spinning. 833 * 834 * If the owner is a writer, the need_resched() check is 835 * done inside rwsem_spin_on_owner(). If the owner is not 836 * a writer, need_resched() check needs to be done here. 837 */ 838 if (owner_state != OWNER_WRITER) { 839 if (need_resched()) 840 break; 841 if (rt_task(current) && 842 (prev_owner_state != OWNER_WRITER)) 843 break; 844 } 845 prev_owner_state = owner_state; 846 847 /* 848 * The cpu_relax() call is a compiler barrier which forces 849 * everything in this loop to be re-loaded. We don't need 850 * memory barriers as we'll eventually observe the right 851 * values at the cost of a few extra spins. 852 */ 853 cpu_relax(); 854 } 855 osq_unlock(&sem->osq); 856 done: 857 preempt_enable(); 858 lockevent_cond_inc(rwsem_opt_fail, !taken); 859 return taken; 860 } 861 862 /* 863 * Clear the owner's RWSEM_NONSPINNABLE bit if it is set. This should 864 * only be called when the reader count reaches 0. 865 */ 866 static inline void clear_nonspinnable(struct rw_semaphore *sem) 867 { 868 if (rwsem_test_oflags(sem, RWSEM_NONSPINNABLE)) 869 atomic_long_andnot(RWSEM_NONSPINNABLE, &sem->owner); 870 } 871 872 #else 873 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) 874 { 875 return false; 876 } 877 878 static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem) 879 { 880 return false; 881 } 882 883 static inline void clear_nonspinnable(struct rw_semaphore *sem) { } 884 885 static inline enum owner_state 886 rwsem_spin_on_owner(struct rw_semaphore *sem) 887 { 888 return OWNER_NONSPINNABLE; 889 } 890 #endif 891 892 /* 893 * Wait for the read lock to be granted 894 */ 895 static struct rw_semaphore __sched * 896 rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int state) 897 { 898 long adjustment = -RWSEM_READER_BIAS; 899 long rcnt = (count >> RWSEM_READER_SHIFT); 900 struct rwsem_waiter waiter; 901 DEFINE_WAKE_Q(wake_q); 902 bool wake = false; 903 904 /* 905 * To prevent a constant stream of readers from starving a sleeping 906 * waiter, don't attempt optimistic lock stealing if the lock is 907 * currently owned by readers. 908 */ 909 if ((atomic_long_read(&sem->owner) & RWSEM_READER_OWNED) && 910 (rcnt > 1) && !(count & RWSEM_WRITER_LOCKED)) 911 goto queue; 912 913 /* 914 * Reader optimistic lock stealing. 915 */ 916 if (!(count & (RWSEM_WRITER_LOCKED | RWSEM_FLAG_HANDOFF))) { 917 rwsem_set_reader_owned(sem); 918 lockevent_inc(rwsem_rlock_steal); 919 920 /* 921 * Wake up other readers in the wait queue if it is 922 * the first reader. 923 */ 924 if ((rcnt == 1) && (count & RWSEM_FLAG_WAITERS)) { 925 raw_spin_lock_irq(&sem->wait_lock); 926 if (!list_empty(&sem->wait_list)) 927 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, 928 &wake_q); 929 raw_spin_unlock_irq(&sem->wait_lock); 930 wake_up_q(&wake_q); 931 } 932 return sem; 933 } 934 935 queue: 936 waiter.task = current; 937 waiter.type = RWSEM_WAITING_FOR_READ; 938 waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT; 939 940 raw_spin_lock_irq(&sem->wait_lock); 941 if (list_empty(&sem->wait_list)) { 942 /* 943 * In case the wait queue is empty and the lock isn't owned 944 * by a writer or has the handoff bit set, this reader can 945 * exit the slowpath and return immediately as its 946 * RWSEM_READER_BIAS has already been set in the count. 947 */ 948 if (!(atomic_long_read(&sem->count) & 949 (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) { 950 /* Provide lock ACQUIRE */ 951 smp_acquire__after_ctrl_dep(); 952 raw_spin_unlock_irq(&sem->wait_lock); 953 rwsem_set_reader_owned(sem); 954 lockevent_inc(rwsem_rlock_fast); 955 return sem; 956 } 957 adjustment += RWSEM_FLAG_WAITERS; 958 } 959 list_add_tail(&waiter.list, &sem->wait_list); 960 961 /* we're now waiting on the lock, but no longer actively locking */ 962 count = atomic_long_add_return(adjustment, &sem->count); 963 964 /* 965 * If there are no active locks, wake the front queued process(es). 966 * 967 * If there are no writers and we are first in the queue, 968 * wake our own waiter to join the existing active readers ! 969 */ 970 if (!(count & RWSEM_LOCK_MASK)) { 971 clear_nonspinnable(sem); 972 wake = true; 973 } 974 if (wake || (!(count & RWSEM_WRITER_MASK) && 975 (adjustment & RWSEM_FLAG_WAITERS))) 976 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); 977 978 raw_spin_unlock_irq(&sem->wait_lock); 979 wake_up_q(&wake_q); 980 981 /* wait to be given the lock */ 982 for (;;) { 983 set_current_state(state); 984 if (!smp_load_acquire(&waiter.task)) { 985 /* Matches rwsem_mark_wake()'s smp_store_release(). */ 986 break; 987 } 988 if (signal_pending_state(state, current)) { 989 raw_spin_lock_irq(&sem->wait_lock); 990 if (waiter.task) 991 goto out_nolock; 992 raw_spin_unlock_irq(&sem->wait_lock); 993 /* Ordered by sem->wait_lock against rwsem_mark_wake(). */ 994 break; 995 } 996 schedule(); 997 lockevent_inc(rwsem_sleep_reader); 998 } 999 1000 __set_current_state(TASK_RUNNING); 1001 lockevent_inc(rwsem_rlock); 1002 return sem; 1003 1004 out_nolock: 1005 list_del(&waiter.list); 1006 if (list_empty(&sem->wait_list)) { 1007 atomic_long_andnot(RWSEM_FLAG_WAITERS|RWSEM_FLAG_HANDOFF, 1008 &sem->count); 1009 } 1010 raw_spin_unlock_irq(&sem->wait_lock); 1011 __set_current_state(TASK_RUNNING); 1012 lockevent_inc(rwsem_rlock_fail); 1013 return ERR_PTR(-EINTR); 1014 } 1015 1016 /* 1017 * Wait until we successfully acquire the write lock 1018 */ 1019 static struct rw_semaphore * 1020 rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) 1021 { 1022 long count; 1023 enum writer_wait_state wstate; 1024 struct rwsem_waiter waiter; 1025 struct rw_semaphore *ret = sem; 1026 DEFINE_WAKE_Q(wake_q); 1027 1028 /* do optimistic spinning and steal lock if possible */ 1029 if (rwsem_can_spin_on_owner(sem) && rwsem_optimistic_spin(sem)) { 1030 /* rwsem_optimistic_spin() implies ACQUIRE on success */ 1031 return sem; 1032 } 1033 1034 /* 1035 * Optimistic spinning failed, proceed to the slowpath 1036 * and block until we can acquire the sem. 1037 */ 1038 waiter.task = current; 1039 waiter.type = RWSEM_WAITING_FOR_WRITE; 1040 waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT; 1041 1042 raw_spin_lock_irq(&sem->wait_lock); 1043 1044 /* account for this before adding a new element to the list */ 1045 wstate = list_empty(&sem->wait_list) ? WRITER_FIRST : WRITER_NOT_FIRST; 1046 1047 list_add_tail(&waiter.list, &sem->wait_list); 1048 1049 /* we're now waiting on the lock */ 1050 if (wstate == WRITER_NOT_FIRST) { 1051 count = atomic_long_read(&sem->count); 1052 1053 /* 1054 * If there were already threads queued before us and: 1055 * 1) there are no active locks, wake the front 1056 * queued process(es) as the handoff bit might be set. 1057 * 2) there are no active writers and some readers, the lock 1058 * must be read owned; so we try to wake any read lock 1059 * waiters that were queued ahead of us. 1060 */ 1061 if (count & RWSEM_WRITER_MASK) 1062 goto wait; 1063 1064 rwsem_mark_wake(sem, (count & RWSEM_READER_MASK) 1065 ? RWSEM_WAKE_READERS 1066 : RWSEM_WAKE_ANY, &wake_q); 1067 1068 if (!wake_q_empty(&wake_q)) { 1069 /* 1070 * We want to minimize wait_lock hold time especially 1071 * when a large number of readers are to be woken up. 1072 */ 1073 raw_spin_unlock_irq(&sem->wait_lock); 1074 wake_up_q(&wake_q); 1075 wake_q_init(&wake_q); /* Used again, reinit */ 1076 raw_spin_lock_irq(&sem->wait_lock); 1077 } 1078 } else { 1079 atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count); 1080 } 1081 1082 wait: 1083 /* wait until we successfully acquire the lock */ 1084 set_current_state(state); 1085 for (;;) { 1086 if (rwsem_try_write_lock(sem, wstate)) { 1087 /* rwsem_try_write_lock() implies ACQUIRE on success */ 1088 break; 1089 } 1090 1091 raw_spin_unlock_irq(&sem->wait_lock); 1092 1093 /* 1094 * After setting the handoff bit and failing to acquire 1095 * the lock, attempt to spin on owner to accelerate lock 1096 * transfer. If the previous owner is a on-cpu writer and it 1097 * has just released the lock, OWNER_NULL will be returned. 1098 * In this case, we attempt to acquire the lock again 1099 * without sleeping. 1100 */ 1101 if (wstate == WRITER_HANDOFF) { 1102 enum owner_state owner_state; 1103 1104 preempt_disable(); 1105 owner_state = rwsem_spin_on_owner(sem); 1106 preempt_enable(); 1107 1108 if (owner_state == OWNER_NULL) 1109 goto trylock_again; 1110 } 1111 1112 /* Block until there are no active lockers. */ 1113 for (;;) { 1114 if (signal_pending_state(state, current)) 1115 goto out_nolock; 1116 1117 schedule(); 1118 lockevent_inc(rwsem_sleep_writer); 1119 set_current_state(state); 1120 /* 1121 * If HANDOFF bit is set, unconditionally do 1122 * a trylock. 1123 */ 1124 if (wstate == WRITER_HANDOFF) 1125 break; 1126 1127 if ((wstate == WRITER_NOT_FIRST) && 1128 (rwsem_first_waiter(sem) == &waiter)) 1129 wstate = WRITER_FIRST; 1130 1131 count = atomic_long_read(&sem->count); 1132 if (!(count & RWSEM_LOCK_MASK)) 1133 break; 1134 1135 /* 1136 * The setting of the handoff bit is deferred 1137 * until rwsem_try_write_lock() is called. 1138 */ 1139 if ((wstate == WRITER_FIRST) && (rt_task(current) || 1140 time_after(jiffies, waiter.timeout))) { 1141 wstate = WRITER_HANDOFF; 1142 lockevent_inc(rwsem_wlock_handoff); 1143 break; 1144 } 1145 } 1146 trylock_again: 1147 raw_spin_lock_irq(&sem->wait_lock); 1148 } 1149 __set_current_state(TASK_RUNNING); 1150 list_del(&waiter.list); 1151 raw_spin_unlock_irq(&sem->wait_lock); 1152 lockevent_inc(rwsem_wlock); 1153 1154 return ret; 1155 1156 out_nolock: 1157 __set_current_state(TASK_RUNNING); 1158 raw_spin_lock_irq(&sem->wait_lock); 1159 list_del(&waiter.list); 1160 1161 if (unlikely(wstate == WRITER_HANDOFF)) 1162 atomic_long_add(-RWSEM_FLAG_HANDOFF, &sem->count); 1163 1164 if (list_empty(&sem->wait_list)) 1165 atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count); 1166 else 1167 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); 1168 raw_spin_unlock_irq(&sem->wait_lock); 1169 wake_up_q(&wake_q); 1170 lockevent_inc(rwsem_wlock_fail); 1171 1172 return ERR_PTR(-EINTR); 1173 } 1174 1175 /* 1176 * handle waking up a waiter on the semaphore 1177 * - up_read/up_write has decremented the active part of count if we come here 1178 */ 1179 static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) 1180 { 1181 unsigned long flags; 1182 DEFINE_WAKE_Q(wake_q); 1183 1184 raw_spin_lock_irqsave(&sem->wait_lock, flags); 1185 1186 if (!list_empty(&sem->wait_list)) 1187 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); 1188 1189 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 1190 wake_up_q(&wake_q); 1191 1192 return sem; 1193 } 1194 1195 /* 1196 * downgrade a write lock into a read lock 1197 * - caller incremented waiting part of count and discovered it still negative 1198 * - just wake up any readers at the front of the queue 1199 */ 1200 static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) 1201 { 1202 unsigned long flags; 1203 DEFINE_WAKE_Q(wake_q); 1204 1205 raw_spin_lock_irqsave(&sem->wait_lock, flags); 1206 1207 if (!list_empty(&sem->wait_list)) 1208 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q); 1209 1210 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 1211 wake_up_q(&wake_q); 1212 1213 return sem; 1214 } 1215 1216 /* 1217 * lock for reading 1218 */ 1219 static inline int __down_read_common(struct rw_semaphore *sem, int state) 1220 { 1221 long count; 1222 1223 if (!rwsem_read_trylock(sem, &count)) { 1224 if (IS_ERR(rwsem_down_read_slowpath(sem, count, state))) 1225 return -EINTR; 1226 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); 1227 } 1228 return 0; 1229 } 1230 1231 static inline void __down_read(struct rw_semaphore *sem) 1232 { 1233 __down_read_common(sem, TASK_UNINTERRUPTIBLE); 1234 } 1235 1236 static inline int __down_read_interruptible(struct rw_semaphore *sem) 1237 { 1238 return __down_read_common(sem, TASK_INTERRUPTIBLE); 1239 } 1240 1241 static inline int __down_read_killable(struct rw_semaphore *sem) 1242 { 1243 return __down_read_common(sem, TASK_KILLABLE); 1244 } 1245 1246 static inline int __down_read_trylock(struct rw_semaphore *sem) 1247 { 1248 long tmp; 1249 1250 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); 1251 1252 /* 1253 * Optimize for the case when the rwsem is not locked at all. 1254 */ 1255 tmp = RWSEM_UNLOCKED_VALUE; 1256 do { 1257 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, 1258 tmp + RWSEM_READER_BIAS)) { 1259 rwsem_set_reader_owned(sem); 1260 return 1; 1261 } 1262 } while (!(tmp & RWSEM_READ_FAILED_MASK)); 1263 return 0; 1264 } 1265 1266 /* 1267 * lock for writing 1268 */ 1269 static inline int __down_write_common(struct rw_semaphore *sem, int state) 1270 { 1271 if (unlikely(!rwsem_write_trylock(sem))) { 1272 if (IS_ERR(rwsem_down_write_slowpath(sem, state))) 1273 return -EINTR; 1274 } 1275 1276 return 0; 1277 } 1278 1279 static inline void __down_write(struct rw_semaphore *sem) 1280 { 1281 __down_write_common(sem, TASK_UNINTERRUPTIBLE); 1282 } 1283 1284 static inline int __down_write_killable(struct rw_semaphore *sem) 1285 { 1286 return __down_write_common(sem, TASK_KILLABLE); 1287 } 1288 1289 static inline int __down_write_trylock(struct rw_semaphore *sem) 1290 { 1291 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); 1292 return rwsem_write_trylock(sem); 1293 } 1294 1295 /* 1296 * unlock after reading 1297 */ 1298 static inline void __up_read(struct rw_semaphore *sem) 1299 { 1300 long tmp; 1301 1302 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); 1303 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); 1304 1305 rwsem_clear_reader_owned(sem); 1306 tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count); 1307 DEBUG_RWSEMS_WARN_ON(tmp < 0, sem); 1308 if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) == 1309 RWSEM_FLAG_WAITERS)) { 1310 clear_nonspinnable(sem); 1311 rwsem_wake(sem); 1312 } 1313 } 1314 1315 /* 1316 * unlock after writing 1317 */ 1318 static inline void __up_write(struct rw_semaphore *sem) 1319 { 1320 long tmp; 1321 1322 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); 1323 /* 1324 * sem->owner may differ from current if the ownership is transferred 1325 * to an anonymous writer by setting the RWSEM_NONSPINNABLE bits. 1326 */ 1327 DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) && 1328 !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem); 1329 1330 rwsem_clear_owner(sem); 1331 tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count); 1332 if (unlikely(tmp & RWSEM_FLAG_WAITERS)) 1333 rwsem_wake(sem); 1334 } 1335 1336 /* 1337 * downgrade write lock to read lock 1338 */ 1339 static inline void __downgrade_write(struct rw_semaphore *sem) 1340 { 1341 long tmp; 1342 1343 /* 1344 * When downgrading from exclusive to shared ownership, 1345 * anything inside the write-locked region cannot leak 1346 * into the read side. In contrast, anything in the 1347 * read-locked region is ok to be re-ordered into the 1348 * write side. As such, rely on RELEASE semantics. 1349 */ 1350 DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem); 1351 tmp = atomic_long_fetch_add_release( 1352 -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count); 1353 rwsem_set_reader_owned(sem); 1354 if (tmp & RWSEM_FLAG_WAITERS) 1355 rwsem_downgrade_wake(sem); 1356 } 1357 1358 #else /* !CONFIG_PREEMPT_RT */ 1359 1360 #define RT_MUTEX_BUILD_MUTEX 1361 #include "rtmutex.c" 1362 1363 #define rwbase_set_and_save_current_state(state) \ 1364 set_current_state(state) 1365 1366 #define rwbase_restore_current_state() \ 1367 __set_current_state(TASK_RUNNING) 1368 1369 #define rwbase_rtmutex_lock_state(rtm, state) \ 1370 __rt_mutex_lock(rtm, state) 1371 1372 #define rwbase_rtmutex_slowlock_locked(rtm, state) \ 1373 __rt_mutex_slowlock_locked(rtm, NULL, state) 1374 1375 #define rwbase_rtmutex_unlock(rtm) \ 1376 __rt_mutex_unlock(rtm) 1377 1378 #define rwbase_rtmutex_trylock(rtm) \ 1379 __rt_mutex_trylock(rtm) 1380 1381 #define rwbase_signal_pending_state(state, current) \ 1382 signal_pending_state(state, current) 1383 1384 #define rwbase_schedule() \ 1385 schedule() 1386 1387 #include "rwbase_rt.c" 1388 1389 void __init_rwsem(struct rw_semaphore *sem, const char *name, 1390 struct lock_class_key *key) 1391 { 1392 init_rwbase_rt(&(sem)->rwbase); 1393 1394 #ifdef CONFIG_DEBUG_LOCK_ALLOC 1395 debug_check_no_locks_freed((void *)sem, sizeof(*sem)); 1396 lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP); 1397 #endif 1398 } 1399 EXPORT_SYMBOL(__init_rwsem); 1400 1401 static inline void __down_read(struct rw_semaphore *sem) 1402 { 1403 rwbase_read_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE); 1404 } 1405 1406 static inline int __down_read_interruptible(struct rw_semaphore *sem) 1407 { 1408 return rwbase_read_lock(&sem->rwbase, TASK_INTERRUPTIBLE); 1409 } 1410 1411 static inline int __down_read_killable(struct rw_semaphore *sem) 1412 { 1413 return rwbase_read_lock(&sem->rwbase, TASK_KILLABLE); 1414 } 1415 1416 static inline int __down_read_trylock(struct rw_semaphore *sem) 1417 { 1418 return rwbase_read_trylock(&sem->rwbase); 1419 } 1420 1421 static inline void __up_read(struct rw_semaphore *sem) 1422 { 1423 rwbase_read_unlock(&sem->rwbase, TASK_NORMAL); 1424 } 1425 1426 static inline void __sched __down_write(struct rw_semaphore *sem) 1427 { 1428 rwbase_write_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE); 1429 } 1430 1431 static inline int __sched __down_write_killable(struct rw_semaphore *sem) 1432 { 1433 return rwbase_write_lock(&sem->rwbase, TASK_KILLABLE); 1434 } 1435 1436 static inline int __down_write_trylock(struct rw_semaphore *sem) 1437 { 1438 return rwbase_write_trylock(&sem->rwbase); 1439 } 1440 1441 static inline void __up_write(struct rw_semaphore *sem) 1442 { 1443 rwbase_write_unlock(&sem->rwbase); 1444 } 1445 1446 static inline void __downgrade_write(struct rw_semaphore *sem) 1447 { 1448 rwbase_write_downgrade(&sem->rwbase); 1449 } 1450 1451 /* Debug stubs for the common API */ 1452 #define DEBUG_RWSEMS_WARN_ON(c, sem) 1453 1454 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem, 1455 struct task_struct *owner) 1456 { 1457 } 1458 1459 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem) 1460 { 1461 int count = atomic_read(&sem->rwbase.readers); 1462 1463 return count < 0 && count != READER_BIAS; 1464 } 1465 1466 #endif /* CONFIG_PREEMPT_RT */ 1467 1468 /* 1469 * lock for reading 1470 */ 1471 void __sched down_read(struct rw_semaphore *sem) 1472 { 1473 might_sleep(); 1474 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); 1475 1476 LOCK_CONTENDED(sem, __down_read_trylock, __down_read); 1477 } 1478 EXPORT_SYMBOL(down_read); 1479 1480 int __sched down_read_interruptible(struct rw_semaphore *sem) 1481 { 1482 might_sleep(); 1483 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); 1484 1485 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) { 1486 rwsem_release(&sem->dep_map, _RET_IP_); 1487 return -EINTR; 1488 } 1489 1490 return 0; 1491 } 1492 EXPORT_SYMBOL(down_read_interruptible); 1493 1494 int __sched down_read_killable(struct rw_semaphore *sem) 1495 { 1496 might_sleep(); 1497 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); 1498 1499 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) { 1500 rwsem_release(&sem->dep_map, _RET_IP_); 1501 return -EINTR; 1502 } 1503 1504 return 0; 1505 } 1506 EXPORT_SYMBOL(down_read_killable); 1507 1508 /* 1509 * trylock for reading -- returns 1 if successful, 0 if contention 1510 */ 1511 int down_read_trylock(struct rw_semaphore *sem) 1512 { 1513 int ret = __down_read_trylock(sem); 1514 1515 if (ret == 1) 1516 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_); 1517 return ret; 1518 } 1519 EXPORT_SYMBOL(down_read_trylock); 1520 1521 /* 1522 * lock for writing 1523 */ 1524 void __sched down_write(struct rw_semaphore *sem) 1525 { 1526 might_sleep(); 1527 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); 1528 LOCK_CONTENDED(sem, __down_write_trylock, __down_write); 1529 } 1530 EXPORT_SYMBOL(down_write); 1531 1532 /* 1533 * lock for writing 1534 */ 1535 int __sched down_write_killable(struct rw_semaphore *sem) 1536 { 1537 might_sleep(); 1538 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); 1539 1540 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, 1541 __down_write_killable)) { 1542 rwsem_release(&sem->dep_map, _RET_IP_); 1543 return -EINTR; 1544 } 1545 1546 return 0; 1547 } 1548 EXPORT_SYMBOL(down_write_killable); 1549 1550 /* 1551 * trylock for writing -- returns 1 if successful, 0 if contention 1552 */ 1553 int down_write_trylock(struct rw_semaphore *sem) 1554 { 1555 int ret = __down_write_trylock(sem); 1556 1557 if (ret == 1) 1558 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_); 1559 1560 return ret; 1561 } 1562 EXPORT_SYMBOL(down_write_trylock); 1563 1564 /* 1565 * release a read lock 1566 */ 1567 void up_read(struct rw_semaphore *sem) 1568 { 1569 rwsem_release(&sem->dep_map, _RET_IP_); 1570 __up_read(sem); 1571 } 1572 EXPORT_SYMBOL(up_read); 1573 1574 /* 1575 * release a write lock 1576 */ 1577 void up_write(struct rw_semaphore *sem) 1578 { 1579 rwsem_release(&sem->dep_map, _RET_IP_); 1580 __up_write(sem); 1581 } 1582 EXPORT_SYMBOL(up_write); 1583 1584 /* 1585 * downgrade write lock to read lock 1586 */ 1587 void downgrade_write(struct rw_semaphore *sem) 1588 { 1589 lock_downgrade(&sem->dep_map, _RET_IP_); 1590 __downgrade_write(sem); 1591 } 1592 EXPORT_SYMBOL(downgrade_write); 1593 1594 #ifdef CONFIG_DEBUG_LOCK_ALLOC 1595 1596 void down_read_nested(struct rw_semaphore *sem, int subclass) 1597 { 1598 might_sleep(); 1599 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_); 1600 LOCK_CONTENDED(sem, __down_read_trylock, __down_read); 1601 } 1602 EXPORT_SYMBOL(down_read_nested); 1603 1604 int down_read_killable_nested(struct rw_semaphore *sem, int subclass) 1605 { 1606 might_sleep(); 1607 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_); 1608 1609 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) { 1610 rwsem_release(&sem->dep_map, _RET_IP_); 1611 return -EINTR; 1612 } 1613 1614 return 0; 1615 } 1616 EXPORT_SYMBOL(down_read_killable_nested); 1617 1618 void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest) 1619 { 1620 might_sleep(); 1621 rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_); 1622 LOCK_CONTENDED(sem, __down_write_trylock, __down_write); 1623 } 1624 EXPORT_SYMBOL(_down_write_nest_lock); 1625 1626 void down_read_non_owner(struct rw_semaphore *sem) 1627 { 1628 might_sleep(); 1629 __down_read(sem); 1630 __rwsem_set_reader_owned(sem, NULL); 1631 } 1632 EXPORT_SYMBOL(down_read_non_owner); 1633 1634 void down_write_nested(struct rw_semaphore *sem, int subclass) 1635 { 1636 might_sleep(); 1637 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_); 1638 LOCK_CONTENDED(sem, __down_write_trylock, __down_write); 1639 } 1640 EXPORT_SYMBOL(down_write_nested); 1641 1642 int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass) 1643 { 1644 might_sleep(); 1645 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_); 1646 1647 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, 1648 __down_write_killable)) { 1649 rwsem_release(&sem->dep_map, _RET_IP_); 1650 return -EINTR; 1651 } 1652 1653 return 0; 1654 } 1655 EXPORT_SYMBOL(down_write_killable_nested); 1656 1657 void up_read_non_owner(struct rw_semaphore *sem) 1658 { 1659 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); 1660 __up_read(sem); 1661 } 1662 EXPORT_SYMBOL(up_read_non_owner); 1663 1664 #endif 1665