1 /* 2 * Generic waiting primitives. 3 * 4 * (C) 2004 Nadia Yvette Chambers, Oracle 5 */ 6 #include <linux/init.h> 7 #include <linux/export.h> 8 #include <linux/sched.h> 9 #include <linux/mm.h> 10 #include <linux/wait.h> 11 #include <linux/hash.h> 12 #include <linux/kthread.h> 13 14 void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key) 15 { 16 spin_lock_init(&q->lock); 17 lockdep_set_class_and_name(&q->lock, key, name); 18 INIT_LIST_HEAD(&q->task_list); 19 } 20 21 EXPORT_SYMBOL(__init_waitqueue_head); 22 23 void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) 24 { 25 unsigned long flags; 26 27 wait->flags &= ~WQ_FLAG_EXCLUSIVE; 28 spin_lock_irqsave(&q->lock, flags); 29 __add_wait_queue(q, wait); 30 spin_unlock_irqrestore(&q->lock, flags); 31 } 32 EXPORT_SYMBOL(add_wait_queue); 33 34 void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait) 35 { 36 unsigned long flags; 37 38 wait->flags |= WQ_FLAG_EXCLUSIVE; 39 spin_lock_irqsave(&q->lock, flags); 40 __add_wait_queue_tail(q, wait); 41 spin_unlock_irqrestore(&q->lock, flags); 42 } 43 EXPORT_SYMBOL(add_wait_queue_exclusive); 44 45 void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) 46 { 47 unsigned long flags; 48 49 spin_lock_irqsave(&q->lock, flags); 50 __remove_wait_queue(q, wait); 51 spin_unlock_irqrestore(&q->lock, flags); 52 } 53 EXPORT_SYMBOL(remove_wait_queue); 54 55 56 /* 57 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just 58 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve 59 * number) then we wake all the non-exclusive tasks and one exclusive task. 60 * 61 * There are circumstances in which we can try to wake a task which has already 62 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns 63 * zero in this (rare) case, and we handle it by continuing to scan the queue. 64 */ 65 static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, 66 int nr_exclusive, int wake_flags, void *key) 67 { 68 wait_queue_t *curr, *next; 69 70 list_for_each_entry_safe(curr, next, &q->task_list, task_list) { 71 unsigned flags = curr->flags; 72 73 if (curr->func(curr, mode, wake_flags, key) && 74 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) 75 break; 76 } 77 } 78 79 /** 80 * __wake_up - wake up threads blocked on a waitqueue. 81 * @q: the waitqueue 82 * @mode: which threads 83 * @nr_exclusive: how many wake-one or wake-many threads to wake up 84 * @key: is directly passed to the wakeup function 85 * 86 * It may be assumed that this function implies a write memory barrier before 87 * changing the task state if and only if any tasks are woken up. 88 */ 89 void __wake_up(wait_queue_head_t *q, unsigned int mode, 90 int nr_exclusive, void *key) 91 { 92 unsigned long flags; 93 94 spin_lock_irqsave(&q->lock, flags); 95 __wake_up_common(q, mode, nr_exclusive, 0, key); 96 spin_unlock_irqrestore(&q->lock, flags); 97 } 98 EXPORT_SYMBOL(__wake_up); 99 100 /* 101 * Same as __wake_up but called with the spinlock in wait_queue_head_t held. 102 */ 103 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr) 104 { 105 __wake_up_common(q, mode, nr, 0, NULL); 106 } 107 EXPORT_SYMBOL_GPL(__wake_up_locked); 108 109 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key) 110 { 111 __wake_up_common(q, mode, 1, 0, key); 112 } 113 EXPORT_SYMBOL_GPL(__wake_up_locked_key); 114 115 /** 116 * __wake_up_sync_key - wake up threads blocked on a waitqueue. 117 * @q: the waitqueue 118 * @mode: which threads 119 * @nr_exclusive: how many wake-one or wake-many threads to wake up 120 * @key: opaque value to be passed to wakeup targets 121 * 122 * The sync wakeup differs that the waker knows that it will schedule 123 * away soon, so while the target thread will be woken up, it will not 124 * be migrated to another CPU - ie. the two threads are 'synchronized' 125 * with each other. This can prevent needless bouncing between CPUs. 126 * 127 * On UP it can prevent extra preemption. 128 * 129 * It may be assumed that this function implies a write memory barrier before 130 * changing the task state if and only if any tasks are woken up. 131 */ 132 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, 133 int nr_exclusive, void *key) 134 { 135 unsigned long flags; 136 int wake_flags = 1; /* XXX WF_SYNC */ 137 138 if (unlikely(!q)) 139 return; 140 141 if (unlikely(nr_exclusive != 1)) 142 wake_flags = 0; 143 144 spin_lock_irqsave(&q->lock, flags); 145 __wake_up_common(q, mode, nr_exclusive, wake_flags, key); 146 spin_unlock_irqrestore(&q->lock, flags); 147 } 148 EXPORT_SYMBOL_GPL(__wake_up_sync_key); 149 150 /* 151 * __wake_up_sync - see __wake_up_sync_key() 152 */ 153 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) 154 { 155 __wake_up_sync_key(q, mode, nr_exclusive, NULL); 156 } 157 EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ 158 159 /* 160 * Note: we use "set_current_state()" _after_ the wait-queue add, 161 * because we need a memory barrier there on SMP, so that any 162 * wake-function that tests for the wait-queue being active 163 * will be guaranteed to see waitqueue addition _or_ subsequent 164 * tests in this thread will see the wakeup having taken place. 165 * 166 * The spin_unlock() itself is semi-permeable and only protects 167 * one way (it only protects stuff inside the critical region and 168 * stops them from bleeding out - it would still allow subsequent 169 * loads to move into the critical region). 170 */ 171 void 172 prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) 173 { 174 unsigned long flags; 175 176 wait->flags &= ~WQ_FLAG_EXCLUSIVE; 177 spin_lock_irqsave(&q->lock, flags); 178 if (list_empty(&wait->task_list)) 179 __add_wait_queue(q, wait); 180 set_current_state(state); 181 spin_unlock_irqrestore(&q->lock, flags); 182 } 183 EXPORT_SYMBOL(prepare_to_wait); 184 185 void 186 prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) 187 { 188 unsigned long flags; 189 190 wait->flags |= WQ_FLAG_EXCLUSIVE; 191 spin_lock_irqsave(&q->lock, flags); 192 if (list_empty(&wait->task_list)) 193 __add_wait_queue_tail(q, wait); 194 set_current_state(state); 195 spin_unlock_irqrestore(&q->lock, flags); 196 } 197 EXPORT_SYMBOL(prepare_to_wait_exclusive); 198 199 void init_wait_entry(wait_queue_t *wait, int flags) 200 { 201 wait->flags = flags; 202 wait->private = current; 203 wait->func = autoremove_wake_function; 204 INIT_LIST_HEAD(&wait->task_list); 205 } 206 EXPORT_SYMBOL(init_wait_entry); 207 208 long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state) 209 { 210 unsigned long flags; 211 long ret = 0; 212 213 spin_lock_irqsave(&q->lock, flags); 214 if (unlikely(signal_pending_state(state, current))) { 215 /* 216 * Exclusive waiter must not fail if it was selected by wakeup, 217 * it should "consume" the condition we were waiting for. 218 * 219 * The caller will recheck the condition and return success if 220 * we were already woken up, we can not miss the event because 221 * wakeup locks/unlocks the same q->lock. 222 * 223 * But we need to ensure that set-condition + wakeup after that 224 * can't see us, it should wake up another exclusive waiter if 225 * we fail. 226 */ 227 list_del_init(&wait->task_list); 228 ret = -ERESTARTSYS; 229 } else { 230 if (list_empty(&wait->task_list)) { 231 if (wait->flags & WQ_FLAG_EXCLUSIVE) 232 __add_wait_queue_tail(q, wait); 233 else 234 __add_wait_queue(q, wait); 235 } 236 set_current_state(state); 237 } 238 spin_unlock_irqrestore(&q->lock, flags); 239 240 return ret; 241 } 242 EXPORT_SYMBOL(prepare_to_wait_event); 243 244 /** 245 * finish_wait - clean up after waiting in a queue 246 * @q: waitqueue waited on 247 * @wait: wait descriptor 248 * 249 * Sets current thread back to running state and removes 250 * the wait descriptor from the given waitqueue if still 251 * queued. 252 */ 253 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) 254 { 255 unsigned long flags; 256 257 __set_current_state(TASK_RUNNING); 258 /* 259 * We can check for list emptiness outside the lock 260 * IFF: 261 * - we use the "careful" check that verifies both 262 * the next and prev pointers, so that there cannot 263 * be any half-pending updates in progress on other 264 * CPU's that we haven't seen yet (and that might 265 * still change the stack area. 266 * and 267 * - all other users take the lock (ie we can only 268 * have _one_ other CPU that looks at or modifies 269 * the list). 270 */ 271 if (!list_empty_careful(&wait->task_list)) { 272 spin_lock_irqsave(&q->lock, flags); 273 list_del_init(&wait->task_list); 274 spin_unlock_irqrestore(&q->lock, flags); 275 } 276 } 277 EXPORT_SYMBOL(finish_wait); 278 279 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) 280 { 281 int ret = default_wake_function(wait, mode, sync, key); 282 283 if (ret) 284 list_del_init(&wait->task_list); 285 return ret; 286 } 287 EXPORT_SYMBOL(autoremove_wake_function); 288 289 static inline bool is_kthread_should_stop(void) 290 { 291 return (current->flags & PF_KTHREAD) && kthread_should_stop(); 292 } 293 294 /* 295 * DEFINE_WAIT_FUNC(wait, woken_wake_func); 296 * 297 * add_wait_queue(&wq, &wait); 298 * for (;;) { 299 * if (condition) 300 * break; 301 * 302 * p->state = mode; condition = true; 303 * smp_mb(); // A smp_wmb(); // C 304 * if (!wait->flags & WQ_FLAG_WOKEN) wait->flags |= WQ_FLAG_WOKEN; 305 * schedule() try_to_wake_up(); 306 * p->state = TASK_RUNNING; ~~~~~~~~~~~~~~~~~~ 307 * wait->flags &= ~WQ_FLAG_WOKEN; condition = true; 308 * smp_mb() // B smp_wmb(); // C 309 * wait->flags |= WQ_FLAG_WOKEN; 310 * } 311 * remove_wait_queue(&wq, &wait); 312 * 313 */ 314 long wait_woken(wait_queue_t *wait, unsigned mode, long timeout) 315 { 316 set_current_state(mode); /* A */ 317 /* 318 * The above implies an smp_mb(), which matches with the smp_wmb() from 319 * woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must 320 * also observe all state before the wakeup. 321 */ 322 if (!(wait->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop()) 323 timeout = schedule_timeout(timeout); 324 __set_current_state(TASK_RUNNING); 325 326 /* 327 * The below implies an smp_mb(), it too pairs with the smp_wmb() from 328 * woken_wake_function() such that we must either observe the wait 329 * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss 330 * an event. 331 */ 332 smp_store_mb(wait->flags, wait->flags & ~WQ_FLAG_WOKEN); /* B */ 333 334 return timeout; 335 } 336 EXPORT_SYMBOL(wait_woken); 337 338 int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) 339 { 340 /* 341 * Although this function is called under waitqueue lock, LOCK 342 * doesn't imply write barrier and the users expects write 343 * barrier semantics on wakeup functions. The following 344 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() 345 * and is paired with smp_store_mb() in wait_woken(). 346 */ 347 smp_wmb(); /* C */ 348 wait->flags |= WQ_FLAG_WOKEN; 349 350 return default_wake_function(wait, mode, sync, key); 351 } 352 EXPORT_SYMBOL(woken_wake_function); 353 354 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg) 355 { 356 struct wait_bit_key *key = arg; 357 struct wait_bit_queue *wait_bit 358 = container_of(wait, struct wait_bit_queue, wait); 359 360 if (wait_bit->key.flags != key->flags || 361 wait_bit->key.bit_nr != key->bit_nr || 362 test_bit(key->bit_nr, key->flags)) 363 return 0; 364 else 365 return autoremove_wake_function(wait, mode, sync, key); 366 } 367 EXPORT_SYMBOL(wake_bit_function); 368 369 /* 370 * To allow interruptible waiting and asynchronous (i.e. nonblocking) 371 * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are 372 * permitted return codes. Nonzero return codes halt waiting and return. 373 */ 374 int __sched 375 __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q, 376 wait_bit_action_f *action, unsigned mode) 377 { 378 int ret = 0; 379 380 do { 381 prepare_to_wait(wq, &q->wait, mode); 382 if (test_bit(q->key.bit_nr, q->key.flags)) 383 ret = (*action)(&q->key, mode); 384 } while (test_bit(q->key.bit_nr, q->key.flags) && !ret); 385 finish_wait(wq, &q->wait); 386 return ret; 387 } 388 EXPORT_SYMBOL(__wait_on_bit); 389 390 int __sched out_of_line_wait_on_bit(void *word, int bit, 391 wait_bit_action_f *action, unsigned mode) 392 { 393 wait_queue_head_t *wq = bit_waitqueue(word, bit); 394 DEFINE_WAIT_BIT(wait, word, bit); 395 396 return __wait_on_bit(wq, &wait, action, mode); 397 } 398 EXPORT_SYMBOL(out_of_line_wait_on_bit); 399 400 int __sched out_of_line_wait_on_bit_timeout( 401 void *word, int bit, wait_bit_action_f *action, 402 unsigned mode, unsigned long timeout) 403 { 404 wait_queue_head_t *wq = bit_waitqueue(word, bit); 405 DEFINE_WAIT_BIT(wait, word, bit); 406 407 wait.key.timeout = jiffies + timeout; 408 return __wait_on_bit(wq, &wait, action, mode); 409 } 410 EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout); 411 412 int __sched 413 __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, 414 wait_bit_action_f *action, unsigned mode) 415 { 416 int ret = 0; 417 418 for (;;) { 419 prepare_to_wait_exclusive(wq, &q->wait, mode); 420 if (test_bit(q->key.bit_nr, q->key.flags)) { 421 ret = action(&q->key, mode); 422 /* 423 * See the comment in prepare_to_wait_event(). 424 * finish_wait() does not necessarily takes wq->lock, 425 * but test_and_set_bit() implies mb() which pairs with 426 * smp_mb__after_atomic() before wake_up_page(). 427 */ 428 if (ret) 429 finish_wait(wq, &q->wait); 430 } 431 if (!test_and_set_bit(q->key.bit_nr, q->key.flags)) { 432 if (!ret) 433 finish_wait(wq, &q->wait); 434 return 0; 435 } else if (ret) { 436 return ret; 437 } 438 } 439 } 440 EXPORT_SYMBOL(__wait_on_bit_lock); 441 442 int __sched out_of_line_wait_on_bit_lock(void *word, int bit, 443 wait_bit_action_f *action, unsigned mode) 444 { 445 wait_queue_head_t *wq = bit_waitqueue(word, bit); 446 DEFINE_WAIT_BIT(wait, word, bit); 447 448 return __wait_on_bit_lock(wq, &wait, action, mode); 449 } 450 EXPORT_SYMBOL(out_of_line_wait_on_bit_lock); 451 452 void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit) 453 { 454 struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit); 455 if (waitqueue_active(wq)) 456 __wake_up(wq, TASK_NORMAL, 1, &key); 457 } 458 EXPORT_SYMBOL(__wake_up_bit); 459 460 /** 461 * wake_up_bit - wake up a waiter on a bit 462 * @word: the word being waited on, a kernel virtual address 463 * @bit: the bit of the word being waited on 464 * 465 * There is a standard hashed waitqueue table for generic use. This 466 * is the part of the hashtable's accessor API that wakes up waiters 467 * on a bit. For instance, if one were to have waiters on a bitflag, 468 * one would call wake_up_bit() after clearing the bit. 469 * 470 * In order for this to function properly, as it uses waitqueue_active() 471 * internally, some kind of memory barrier must be done prior to calling 472 * this. Typically, this will be smp_mb__after_atomic(), but in some 473 * cases where bitflags are manipulated non-atomically under a lock, one 474 * may need to use a less regular barrier, such fs/inode.c's smp_mb(), 475 * because spin_unlock() does not guarantee a memory barrier. 476 */ 477 void wake_up_bit(void *word, int bit) 478 { 479 __wake_up_bit(bit_waitqueue(word, bit), word, bit); 480 } 481 EXPORT_SYMBOL(wake_up_bit); 482 483 wait_queue_head_t *bit_waitqueue(void *word, int bit) 484 { 485 const int shift = BITS_PER_LONG == 32 ? 5 : 6; 486 const struct zone *zone = page_zone(virt_to_page(word)); 487 unsigned long val = (unsigned long)word << shift | bit; 488 489 return &zone->wait_table[hash_long(val, zone->wait_table_bits)]; 490 } 491 EXPORT_SYMBOL(bit_waitqueue); 492 493 /* 494 * Manipulate the atomic_t address to produce a better bit waitqueue table hash 495 * index (we're keying off bit -1, but that would produce a horrible hash 496 * value). 497 */ 498 static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p) 499 { 500 if (BITS_PER_LONG == 64) { 501 unsigned long q = (unsigned long)p; 502 return bit_waitqueue((void *)(q & ~1), q & 1); 503 } 504 return bit_waitqueue(p, 0); 505 } 506 507 static int wake_atomic_t_function(wait_queue_t *wait, unsigned mode, int sync, 508 void *arg) 509 { 510 struct wait_bit_key *key = arg; 511 struct wait_bit_queue *wait_bit 512 = container_of(wait, struct wait_bit_queue, wait); 513 atomic_t *val = key->flags; 514 515 if (wait_bit->key.flags != key->flags || 516 wait_bit->key.bit_nr != key->bit_nr || 517 atomic_read(val) != 0) 518 return 0; 519 return autoremove_wake_function(wait, mode, sync, key); 520 } 521 522 /* 523 * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting, 524 * the actions of __wait_on_atomic_t() are permitted return codes. Nonzero 525 * return codes halt waiting and return. 526 */ 527 static __sched 528 int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q, 529 int (*action)(atomic_t *), unsigned mode) 530 { 531 atomic_t *val; 532 int ret = 0; 533 534 do { 535 prepare_to_wait(wq, &q->wait, mode); 536 val = q->key.flags; 537 if (atomic_read(val) == 0) 538 break; 539 ret = (*action)(val); 540 } while (!ret && atomic_read(val) != 0); 541 finish_wait(wq, &q->wait); 542 return ret; 543 } 544 545 #define DEFINE_WAIT_ATOMIC_T(name, p) \ 546 struct wait_bit_queue name = { \ 547 .key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p), \ 548 .wait = { \ 549 .private = current, \ 550 .func = wake_atomic_t_function, \ 551 .task_list = \ 552 LIST_HEAD_INIT((name).wait.task_list), \ 553 }, \ 554 } 555 556 __sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *), 557 unsigned mode) 558 { 559 wait_queue_head_t *wq = atomic_t_waitqueue(p); 560 DEFINE_WAIT_ATOMIC_T(wait, p); 561 562 return __wait_on_atomic_t(wq, &wait, action, mode); 563 } 564 EXPORT_SYMBOL(out_of_line_wait_on_atomic_t); 565 566 /** 567 * wake_up_atomic_t - Wake up a waiter on a atomic_t 568 * @p: The atomic_t being waited on, a kernel virtual address 569 * 570 * Wake up anyone waiting for the atomic_t to go to zero. 571 * 572 * Abuse the bit-waker function and its waitqueue hash table set (the atomic_t 573 * check is done by the waiter's wake function, not the by the waker itself). 574 */ 575 void wake_up_atomic_t(atomic_t *p) 576 { 577 __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR); 578 } 579 EXPORT_SYMBOL(wake_up_atomic_t); 580 581 __sched int bit_wait(struct wait_bit_key *word, int mode) 582 { 583 schedule(); 584 if (signal_pending_state(mode, current)) 585 return -EINTR; 586 return 0; 587 } 588 EXPORT_SYMBOL(bit_wait); 589 590 __sched int bit_wait_io(struct wait_bit_key *word, int mode) 591 { 592 io_schedule(); 593 if (signal_pending_state(mode, current)) 594 return -EINTR; 595 return 0; 596 } 597 EXPORT_SYMBOL(bit_wait_io); 598 599 __sched int bit_wait_timeout(struct wait_bit_key *word, int mode) 600 { 601 unsigned long now = READ_ONCE(jiffies); 602 if (time_after_eq(now, word->timeout)) 603 return -EAGAIN; 604 schedule_timeout(word->timeout - now); 605 if (signal_pending_state(mode, current)) 606 return -EINTR; 607 return 0; 608 } 609 EXPORT_SYMBOL_GPL(bit_wait_timeout); 610 611 __sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode) 612 { 613 unsigned long now = READ_ONCE(jiffies); 614 if (time_after_eq(now, word->timeout)) 615 return -EAGAIN; 616 io_schedule_timeout(word->timeout - now); 617 if (signal_pending_state(mode, current)) 618 return -EINTR; 619 return 0; 620 } 621 EXPORT_SYMBOL_GPL(bit_wait_io_timeout); 622