1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Fast Userspace Mutexes (which I call "Futexes!"). 4 * (C) Rusty Russell, IBM 2002 5 * 6 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar 7 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved 8 * 9 * Removed page pinning, fix privately mapped COW pages and other cleanups 10 * (C) Copyright 2003, 2004 Jamie Lokier 11 * 12 * Robust futex support started by Ingo Molnar 13 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved 14 * Thanks to Thomas Gleixner for suggestions, analysis and fixes. 15 * 16 * PI-futex support started by Ingo Molnar and Thomas Gleixner 17 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 18 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> 19 * 20 * PRIVATE futexes by Eric Dumazet 21 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com> 22 * 23 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com> 24 * Copyright (C) IBM Corporation, 2009 25 * Thanks to Thomas Gleixner for conceptual design and careful reviews. 26 * 27 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly 28 * enough at me, Linus for the original (flawed) idea, Matthew 29 * Kirkwood for proof-of-concept implementation. 30 * 31 * "The futexes are also cursed." 32 * "But they come in a choice of three flavours!" 33 */ 34 #include <linux/compat.h> 35 #include <linux/jhash.h> 36 #include <linux/pagemap.h> 37 #include <linux/memblock.h> 38 #include <linux/fault-inject.h> 39 #include <linux/slab.h> 40 41 #include "futex.h" 42 #include "../locking/rtmutex_common.h" 43 44 /* 45 * The base of the bucket array and its size are always used together 46 * (after initialization only in futex_hash()), so ensure that they 47 * reside in the same cacheline. 48 */ 49 static struct { 50 struct futex_hash_bucket *queues; 51 unsigned long hashsize; 52 } __futex_data __read_mostly __aligned(2*sizeof(long)); 53 #define futex_queues (__futex_data.queues) 54 #define futex_hashsize (__futex_data.hashsize) 55 56 57 /* 58 * Fault injections for futexes. 59 */ 60 #ifdef CONFIG_FAIL_FUTEX 61 62 static struct { 63 struct fault_attr attr; 64 65 bool ignore_private; 66 } fail_futex = { 67 .attr = FAULT_ATTR_INITIALIZER, 68 .ignore_private = false, 69 }; 70 71 static int __init setup_fail_futex(char *str) 72 { 73 return setup_fault_attr(&fail_futex.attr, str); 74 } 75 __setup("fail_futex=", setup_fail_futex); 76 77 bool should_fail_futex(bool fshared) 78 { 79 if (fail_futex.ignore_private && !fshared) 80 return false; 81 82 return should_fail(&fail_futex.attr, 1); 83 } 84 85 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 86 87 static int __init fail_futex_debugfs(void) 88 { 89 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 90 struct dentry *dir; 91 92 dir = fault_create_debugfs_attr("fail_futex", NULL, 93 &fail_futex.attr); 94 if (IS_ERR(dir)) 95 return PTR_ERR(dir); 96 97 debugfs_create_bool("ignore-private", mode, dir, 98 &fail_futex.ignore_private); 99 return 0; 100 } 101 102 late_initcall(fail_futex_debugfs); 103 104 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 105 106 #endif /* CONFIG_FAIL_FUTEX */ 107 108 /** 109 * futex_hash - Return the hash bucket in the global hash 110 * @key: Pointer to the futex key for which the hash is calculated 111 * 112 * We hash on the keys returned from get_futex_key (see below) and return the 113 * corresponding hash bucket in the global hash. 114 */ 115 struct futex_hash_bucket *futex_hash(union futex_key *key) 116 { 117 u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4, 118 key->both.offset); 119 120 return &futex_queues[hash & (futex_hashsize - 1)]; 121 } 122 123 124 /** 125 * futex_setup_timer - set up the sleeping hrtimer. 126 * @time: ptr to the given timeout value 127 * @timeout: the hrtimer_sleeper structure to be set up 128 * @flags: futex flags 129 * @range_ns: optional range in ns 130 * 131 * Return: Initialized hrtimer_sleeper structure or NULL if no timeout 132 * value given 133 */ 134 struct hrtimer_sleeper * 135 futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout, 136 int flags, u64 range_ns) 137 { 138 if (!time) 139 return NULL; 140 141 hrtimer_init_sleeper_on_stack(timeout, (flags & FLAGS_CLOCKRT) ? 142 CLOCK_REALTIME : CLOCK_MONOTONIC, 143 HRTIMER_MODE_ABS); 144 /* 145 * If range_ns is 0, calling hrtimer_set_expires_range_ns() is 146 * effectively the same as calling hrtimer_set_expires(). 147 */ 148 hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns); 149 150 return timeout; 151 } 152 153 /* 154 * Generate a machine wide unique identifier for this inode. 155 * 156 * This relies on u64 not wrapping in the life-time of the machine; which with 157 * 1ns resolution means almost 585 years. 158 * 159 * This further relies on the fact that a well formed program will not unmap 160 * the file while it has a (shared) futex waiting on it. This mapping will have 161 * a file reference which pins the mount and inode. 162 * 163 * If for some reason an inode gets evicted and read back in again, it will get 164 * a new sequence number and will _NOT_ match, even though it is the exact same 165 * file. 166 * 167 * It is important that futex_match() will never have a false-positive, esp. 168 * for PI futexes that can mess up the state. The above argues that false-negatives 169 * are only possible for malformed programs. 170 */ 171 static u64 get_inode_sequence_number(struct inode *inode) 172 { 173 static atomic64_t i_seq; 174 u64 old; 175 176 /* Does the inode already have a sequence number? */ 177 old = atomic64_read(&inode->i_sequence); 178 if (likely(old)) 179 return old; 180 181 for (;;) { 182 u64 new = atomic64_add_return(1, &i_seq); 183 if (WARN_ON_ONCE(!new)) 184 continue; 185 186 old = atomic64_cmpxchg_relaxed(&inode->i_sequence, 0, new); 187 if (old) 188 return old; 189 return new; 190 } 191 } 192 193 /** 194 * get_futex_key() - Get parameters which are the keys for a futex 195 * @uaddr: virtual address of the futex 196 * @fshared: false for a PROCESS_PRIVATE futex, true for PROCESS_SHARED 197 * @key: address where result is stored. 198 * @rw: mapping needs to be read/write (values: FUTEX_READ, 199 * FUTEX_WRITE) 200 * 201 * Return: a negative error code or 0 202 * 203 * The key words are stored in @key on success. 204 * 205 * For shared mappings (when @fshared), the key is: 206 * 207 * ( inode->i_sequence, page->index, offset_within_page ) 208 * 209 * [ also see get_inode_sequence_number() ] 210 * 211 * For private mappings (or when !@fshared), the key is: 212 * 213 * ( current->mm, address, 0 ) 214 * 215 * This allows (cross process, where applicable) identification of the futex 216 * without keeping the page pinned for the duration of the FUTEX_WAIT. 217 * 218 * lock_page() might sleep, the caller should not hold a spinlock. 219 */ 220 int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key, 221 enum futex_access rw) 222 { 223 unsigned long address = (unsigned long)uaddr; 224 struct mm_struct *mm = current->mm; 225 struct page *page, *tail; 226 struct address_space *mapping; 227 int err, ro = 0; 228 229 /* 230 * The futex address must be "naturally" aligned. 231 */ 232 key->both.offset = address % PAGE_SIZE; 233 if (unlikely((address % sizeof(u32)) != 0)) 234 return -EINVAL; 235 address -= key->both.offset; 236 237 if (unlikely(!access_ok(uaddr, sizeof(u32)))) 238 return -EFAULT; 239 240 if (unlikely(should_fail_futex(fshared))) 241 return -EFAULT; 242 243 /* 244 * PROCESS_PRIVATE futexes are fast. 245 * As the mm cannot disappear under us and the 'key' only needs 246 * virtual address, we dont even have to find the underlying vma. 247 * Note : We do have to check 'uaddr' is a valid user address, 248 * but access_ok() should be faster than find_vma() 249 */ 250 if (!fshared) { 251 /* 252 * On no-MMU, shared futexes are treated as private, therefore 253 * we must not include the current process in the key. Since 254 * there is only one address space, the address is a unique key 255 * on its own. 256 */ 257 if (IS_ENABLED(CONFIG_MMU)) 258 key->private.mm = mm; 259 else 260 key->private.mm = NULL; 261 262 key->private.address = address; 263 return 0; 264 } 265 266 again: 267 /* Ignore any VERIFY_READ mapping (futex common case) */ 268 if (unlikely(should_fail_futex(true))) 269 return -EFAULT; 270 271 err = get_user_pages_fast(address, 1, FOLL_WRITE, &page); 272 /* 273 * If write access is not required (eg. FUTEX_WAIT), try 274 * and get read-only access. 275 */ 276 if (err == -EFAULT && rw == FUTEX_READ) { 277 err = get_user_pages_fast(address, 1, 0, &page); 278 ro = 1; 279 } 280 if (err < 0) 281 return err; 282 else 283 err = 0; 284 285 /* 286 * The treatment of mapping from this point on is critical. The page 287 * lock protects many things but in this context the page lock 288 * stabilizes mapping, prevents inode freeing in the shared 289 * file-backed region case and guards against movement to swap cache. 290 * 291 * Strictly speaking the page lock is not needed in all cases being 292 * considered here and page lock forces unnecessarily serialization 293 * From this point on, mapping will be re-verified if necessary and 294 * page lock will be acquired only if it is unavoidable 295 * 296 * Mapping checks require the head page for any compound page so the 297 * head page and mapping is looked up now. For anonymous pages, it 298 * does not matter if the page splits in the future as the key is 299 * based on the address. For filesystem-backed pages, the tail is 300 * required as the index of the page determines the key. For 301 * base pages, there is no tail page and tail == page. 302 */ 303 tail = page; 304 page = compound_head(page); 305 mapping = READ_ONCE(page->mapping); 306 307 /* 308 * If page->mapping is NULL, then it cannot be a PageAnon 309 * page; but it might be the ZERO_PAGE or in the gate area or 310 * in a special mapping (all cases which we are happy to fail); 311 * or it may have been a good file page when get_user_pages_fast 312 * found it, but truncated or holepunched or subjected to 313 * invalidate_complete_page2 before we got the page lock (also 314 * cases which we are happy to fail). And we hold a reference, 315 * so refcount care in invalidate_inode_page's remove_mapping 316 * prevents drop_caches from setting mapping to NULL beneath us. 317 * 318 * The case we do have to guard against is when memory pressure made 319 * shmem_writepage move it from filecache to swapcache beneath us: 320 * an unlikely race, but we do need to retry for page->mapping. 321 */ 322 if (unlikely(!mapping)) { 323 int shmem_swizzled; 324 325 /* 326 * Page lock is required to identify which special case above 327 * applies. If this is really a shmem page then the page lock 328 * will prevent unexpected transitions. 329 */ 330 lock_page(page); 331 shmem_swizzled = PageSwapCache(page) || page->mapping; 332 unlock_page(page); 333 put_page(page); 334 335 if (shmem_swizzled) 336 goto again; 337 338 return -EFAULT; 339 } 340 341 /* 342 * Private mappings are handled in a simple way. 343 * 344 * If the futex key is stored on an anonymous page, then the associated 345 * object is the mm which is implicitly pinned by the calling process. 346 * 347 * NOTE: When userspace waits on a MAP_SHARED mapping, even if 348 * it's a read-only handle, it's expected that futexes attach to 349 * the object not the particular process. 350 */ 351 if (PageAnon(page)) { 352 /* 353 * A RO anonymous page will never change and thus doesn't make 354 * sense for futex operations. 355 */ 356 if (unlikely(should_fail_futex(true)) || ro) { 357 err = -EFAULT; 358 goto out; 359 } 360 361 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ 362 key->private.mm = mm; 363 key->private.address = address; 364 365 } else { 366 struct inode *inode; 367 368 /* 369 * The associated futex object in this case is the inode and 370 * the page->mapping must be traversed. Ordinarily this should 371 * be stabilised under page lock but it's not strictly 372 * necessary in this case as we just want to pin the inode, not 373 * update the radix tree or anything like that. 374 * 375 * The RCU read lock is taken as the inode is finally freed 376 * under RCU. If the mapping still matches expectations then the 377 * mapping->host can be safely accessed as being a valid inode. 378 */ 379 rcu_read_lock(); 380 381 if (READ_ONCE(page->mapping) != mapping) { 382 rcu_read_unlock(); 383 put_page(page); 384 385 goto again; 386 } 387 388 inode = READ_ONCE(mapping->host); 389 if (!inode) { 390 rcu_read_unlock(); 391 put_page(page); 392 393 goto again; 394 } 395 396 key->both.offset |= FUT_OFF_INODE; /* inode-based key */ 397 key->shared.i_seq = get_inode_sequence_number(inode); 398 key->shared.pgoff = page_to_pgoff(tail); 399 rcu_read_unlock(); 400 } 401 402 out: 403 put_page(page); 404 return err; 405 } 406 407 /** 408 * fault_in_user_writeable() - Fault in user address and verify RW access 409 * @uaddr: pointer to faulting user space address 410 * 411 * Slow path to fixup the fault we just took in the atomic write 412 * access to @uaddr. 413 * 414 * We have no generic implementation of a non-destructive write to the 415 * user address. We know that we faulted in the atomic pagefault 416 * disabled section so we can as well avoid the #PF overhead by 417 * calling get_user_pages() right away. 418 */ 419 int fault_in_user_writeable(u32 __user *uaddr) 420 { 421 struct mm_struct *mm = current->mm; 422 int ret; 423 424 mmap_read_lock(mm); 425 ret = fixup_user_fault(mm, (unsigned long)uaddr, 426 FAULT_FLAG_WRITE, NULL); 427 mmap_read_unlock(mm); 428 429 return ret < 0 ? ret : 0; 430 } 431 432 /** 433 * futex_top_waiter() - Return the highest priority waiter on a futex 434 * @hb: the hash bucket the futex_q's reside in 435 * @key: the futex key (to distinguish it from other futex futex_q's) 436 * 437 * Must be called with the hb lock held. 438 */ 439 struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key) 440 { 441 struct futex_q *this; 442 443 plist_for_each_entry(this, &hb->chain, list) { 444 if (futex_match(&this->key, key)) 445 return this; 446 } 447 return NULL; 448 } 449 450 int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32 uval, u32 newval) 451 { 452 int ret; 453 454 pagefault_disable(); 455 ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval); 456 pagefault_enable(); 457 458 return ret; 459 } 460 461 int futex_get_value_locked(u32 *dest, u32 __user *from) 462 { 463 int ret; 464 465 pagefault_disable(); 466 ret = __get_user(*dest, from); 467 pagefault_enable(); 468 469 return ret ? -EFAULT : 0; 470 } 471 472 /** 473 * wait_for_owner_exiting - Block until the owner has exited 474 * @ret: owner's current futex lock status 475 * @exiting: Pointer to the exiting task 476 * 477 * Caller must hold a refcount on @exiting. 478 */ 479 void wait_for_owner_exiting(int ret, struct task_struct *exiting) 480 { 481 if (ret != -EBUSY) { 482 WARN_ON_ONCE(exiting); 483 return; 484 } 485 486 if (WARN_ON_ONCE(ret == -EBUSY && !exiting)) 487 return; 488 489 mutex_lock(&exiting->futex_exit_mutex); 490 /* 491 * No point in doing state checking here. If the waiter got here 492 * while the task was in exec()->exec_futex_release() then it can 493 * have any FUTEX_STATE_* value when the waiter has acquired the 494 * mutex. OK, if running, EXITING or DEAD if it reached exit() 495 * already. Highly unlikely and not a problem. Just one more round 496 * through the futex maze. 497 */ 498 mutex_unlock(&exiting->futex_exit_mutex); 499 500 put_task_struct(exiting); 501 } 502 503 /** 504 * __futex_unqueue() - Remove the futex_q from its futex_hash_bucket 505 * @q: The futex_q to unqueue 506 * 507 * The q->lock_ptr must not be NULL and must be held by the caller. 508 */ 509 void __futex_unqueue(struct futex_q *q) 510 { 511 struct futex_hash_bucket *hb; 512 513 if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list))) 514 return; 515 lockdep_assert_held(q->lock_ptr); 516 517 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); 518 plist_del(&q->list, &hb->chain); 519 futex_hb_waiters_dec(hb); 520 } 521 522 /* The key must be already stored in q->key. */ 523 struct futex_hash_bucket *futex_q_lock(struct futex_q *q) 524 __acquires(&hb->lock) 525 { 526 struct futex_hash_bucket *hb; 527 528 hb = futex_hash(&q->key); 529 530 /* 531 * Increment the counter before taking the lock so that 532 * a potential waker won't miss a to-be-slept task that is 533 * waiting for the spinlock. This is safe as all futex_q_lock() 534 * users end up calling futex_queue(). Similarly, for housekeeping, 535 * decrement the counter at futex_q_unlock() when some error has 536 * occurred and we don't end up adding the task to the list. 537 */ 538 futex_hb_waiters_inc(hb); /* implies smp_mb(); (A) */ 539 540 q->lock_ptr = &hb->lock; 541 542 spin_lock(&hb->lock); 543 return hb; 544 } 545 546 void futex_q_unlock(struct futex_hash_bucket *hb) 547 __releases(&hb->lock) 548 { 549 spin_unlock(&hb->lock); 550 futex_hb_waiters_dec(hb); 551 } 552 553 void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb) 554 { 555 int prio; 556 557 /* 558 * The priority used to register this element is 559 * - either the real thread-priority for the real-time threads 560 * (i.e. threads with a priority lower than MAX_RT_PRIO) 561 * - or MAX_RT_PRIO for non-RT threads. 562 * Thus, all RT-threads are woken first in priority order, and 563 * the others are woken last, in FIFO order. 564 */ 565 prio = min(current->normal_prio, MAX_RT_PRIO); 566 567 plist_node_init(&q->list, prio); 568 plist_add(&q->list, &hb->chain); 569 q->task = current; 570 } 571 572 /** 573 * futex_unqueue() - Remove the futex_q from its futex_hash_bucket 574 * @q: The futex_q to unqueue 575 * 576 * The q->lock_ptr must not be held by the caller. A call to futex_unqueue() must 577 * be paired with exactly one earlier call to futex_queue(). 578 * 579 * Return: 580 * - 1 - if the futex_q was still queued (and we removed unqueued it); 581 * - 0 - if the futex_q was already removed by the waking thread 582 */ 583 int futex_unqueue(struct futex_q *q) 584 { 585 spinlock_t *lock_ptr; 586 int ret = 0; 587 588 /* In the common case we don't take the spinlock, which is nice. */ 589 retry: 590 /* 591 * q->lock_ptr can change between this read and the following spin_lock. 592 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and 593 * optimizing lock_ptr out of the logic below. 594 */ 595 lock_ptr = READ_ONCE(q->lock_ptr); 596 if (lock_ptr != NULL) { 597 spin_lock(lock_ptr); 598 /* 599 * q->lock_ptr can change between reading it and 600 * spin_lock(), causing us to take the wrong lock. This 601 * corrects the race condition. 602 * 603 * Reasoning goes like this: if we have the wrong lock, 604 * q->lock_ptr must have changed (maybe several times) 605 * between reading it and the spin_lock(). It can 606 * change again after the spin_lock() but only if it was 607 * already changed before the spin_lock(). It cannot, 608 * however, change back to the original value. Therefore 609 * we can detect whether we acquired the correct lock. 610 */ 611 if (unlikely(lock_ptr != q->lock_ptr)) { 612 spin_unlock(lock_ptr); 613 goto retry; 614 } 615 __futex_unqueue(q); 616 617 BUG_ON(q->pi_state); 618 619 spin_unlock(lock_ptr); 620 ret = 1; 621 } 622 623 return ret; 624 } 625 626 /* 627 * PI futexes can not be requeued and must remove themselves from the 628 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held. 629 */ 630 void futex_unqueue_pi(struct futex_q *q) 631 { 632 __futex_unqueue(q); 633 634 BUG_ON(!q->pi_state); 635 put_pi_state(q->pi_state); 636 q->pi_state = NULL; 637 } 638 639 /* Constants for the pending_op argument of handle_futex_death */ 640 #define HANDLE_DEATH_PENDING true 641 #define HANDLE_DEATH_LIST false 642 643 /* 644 * Process a futex-list entry, check whether it's owned by the 645 * dying task, and do notification if so: 646 */ 647 static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, 648 bool pi, bool pending_op) 649 { 650 u32 uval, nval, mval; 651 pid_t owner; 652 int err; 653 654 /* Futex address must be 32bit aligned */ 655 if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0) 656 return -1; 657 658 retry: 659 if (get_user(uval, uaddr)) 660 return -1; 661 662 /* 663 * Special case for regular (non PI) futexes. The unlock path in 664 * user space has two race scenarios: 665 * 666 * 1. The unlock path releases the user space futex value and 667 * before it can execute the futex() syscall to wake up 668 * waiters it is killed. 669 * 670 * 2. A woken up waiter is killed before it can acquire the 671 * futex in user space. 672 * 673 * In the second case, the wake up notification could be generated 674 * by the unlock path in user space after setting the futex value 675 * to zero or by the kernel after setting the OWNER_DIED bit below. 676 * 677 * In both cases the TID validation below prevents a wakeup of 678 * potential waiters which can cause these waiters to block 679 * forever. 680 * 681 * In both cases the following conditions are met: 682 * 683 * 1) task->robust_list->list_op_pending != NULL 684 * @pending_op == true 685 * 2) The owner part of user space futex value == 0 686 * 3) Regular futex: @pi == false 687 * 688 * If these conditions are met, it is safe to attempt waking up a 689 * potential waiter without touching the user space futex value and 690 * trying to set the OWNER_DIED bit. If the futex value is zero, 691 * the rest of the user space mutex state is consistent, so a woken 692 * waiter will just take over the uncontended futex. Setting the 693 * OWNER_DIED bit would create inconsistent state and malfunction 694 * of the user space owner died handling. Otherwise, the OWNER_DIED 695 * bit is already set, and the woken waiter is expected to deal with 696 * this. 697 */ 698 owner = uval & FUTEX_TID_MASK; 699 700 if (pending_op && !pi && !owner) { 701 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); 702 return 0; 703 } 704 705 if (owner != task_pid_vnr(curr)) 706 return 0; 707 708 /* 709 * Ok, this dying thread is truly holding a futex 710 * of interest. Set the OWNER_DIED bit atomically 711 * via cmpxchg, and if the value had FUTEX_WAITERS 712 * set, wake up a waiter (if any). (We have to do a 713 * futex_wake() even if OWNER_DIED is already set - 714 * to handle the rare but possible case of recursive 715 * thread-death.) The rest of the cleanup is done in 716 * userspace. 717 */ 718 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; 719 720 /* 721 * We are not holding a lock here, but we want to have 722 * the pagefault_disable/enable() protection because 723 * we want to handle the fault gracefully. If the 724 * access fails we try to fault in the futex with R/W 725 * verification via get_user_pages. get_user() above 726 * does not guarantee R/W access. If that fails we 727 * give up and leave the futex locked. 728 */ 729 if ((err = futex_cmpxchg_value_locked(&nval, uaddr, uval, mval))) { 730 switch (err) { 731 case -EFAULT: 732 if (fault_in_user_writeable(uaddr)) 733 return -1; 734 goto retry; 735 736 case -EAGAIN: 737 cond_resched(); 738 goto retry; 739 740 default: 741 WARN_ON_ONCE(1); 742 return err; 743 } 744 } 745 746 if (nval != uval) 747 goto retry; 748 749 /* 750 * Wake robust non-PI futexes here. The wakeup of 751 * PI futexes happens in exit_pi_state(): 752 */ 753 if (!pi && (uval & FUTEX_WAITERS)) 754 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); 755 756 return 0; 757 } 758 759 /* 760 * Fetch a robust-list pointer. Bit 0 signals PI futexes: 761 */ 762 static inline int fetch_robust_entry(struct robust_list __user **entry, 763 struct robust_list __user * __user *head, 764 unsigned int *pi) 765 { 766 unsigned long uentry; 767 768 if (get_user(uentry, (unsigned long __user *)head)) 769 return -EFAULT; 770 771 *entry = (void __user *)(uentry & ~1UL); 772 *pi = uentry & 1; 773 774 return 0; 775 } 776 777 /* 778 * Walk curr->robust_list (very carefully, it's a userspace list!) 779 * and mark any locks found there dead, and notify any waiters. 780 * 781 * We silently return on any sign of list-walking problem. 782 */ 783 static void exit_robust_list(struct task_struct *curr) 784 { 785 struct robust_list_head __user *head = curr->robust_list; 786 struct robust_list __user *entry, *next_entry, *pending; 787 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; 788 unsigned int next_pi; 789 unsigned long futex_offset; 790 int rc; 791 792 /* 793 * Fetch the list head (which was registered earlier, via 794 * sys_set_robust_list()): 795 */ 796 if (fetch_robust_entry(&entry, &head->list.next, &pi)) 797 return; 798 /* 799 * Fetch the relative futex offset: 800 */ 801 if (get_user(futex_offset, &head->futex_offset)) 802 return; 803 /* 804 * Fetch any possibly pending lock-add first, and handle it 805 * if it exists: 806 */ 807 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) 808 return; 809 810 next_entry = NULL; /* avoid warning with gcc */ 811 while (entry != &head->list) { 812 /* 813 * Fetch the next entry in the list before calling 814 * handle_futex_death: 815 */ 816 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); 817 /* 818 * A pending lock might already be on the list, so 819 * don't process it twice: 820 */ 821 if (entry != pending) { 822 if (handle_futex_death((void __user *)entry + futex_offset, 823 curr, pi, HANDLE_DEATH_LIST)) 824 return; 825 } 826 if (rc) 827 return; 828 entry = next_entry; 829 pi = next_pi; 830 /* 831 * Avoid excessively long or circular lists: 832 */ 833 if (!--limit) 834 break; 835 836 cond_resched(); 837 } 838 839 if (pending) { 840 handle_futex_death((void __user *)pending + futex_offset, 841 curr, pip, HANDLE_DEATH_PENDING); 842 } 843 } 844 845 #ifdef CONFIG_COMPAT 846 static void __user *futex_uaddr(struct robust_list __user *entry, 847 compat_long_t futex_offset) 848 { 849 compat_uptr_t base = ptr_to_compat(entry); 850 void __user *uaddr = compat_ptr(base + futex_offset); 851 852 return uaddr; 853 } 854 855 /* 856 * Fetch a robust-list pointer. Bit 0 signals PI futexes: 857 */ 858 static inline int 859 compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, 860 compat_uptr_t __user *head, unsigned int *pi) 861 { 862 if (get_user(*uentry, head)) 863 return -EFAULT; 864 865 *entry = compat_ptr((*uentry) & ~1); 866 *pi = (unsigned int)(*uentry) & 1; 867 868 return 0; 869 } 870 871 /* 872 * Walk curr->robust_list (very carefully, it's a userspace list!) 873 * and mark any locks found there dead, and notify any waiters. 874 * 875 * We silently return on any sign of list-walking problem. 876 */ 877 static void compat_exit_robust_list(struct task_struct *curr) 878 { 879 struct compat_robust_list_head __user *head = curr->compat_robust_list; 880 struct robust_list __user *entry, *next_entry, *pending; 881 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; 882 unsigned int next_pi; 883 compat_uptr_t uentry, next_uentry, upending; 884 compat_long_t futex_offset; 885 int rc; 886 887 /* 888 * Fetch the list head (which was registered earlier, via 889 * sys_set_robust_list()): 890 */ 891 if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) 892 return; 893 /* 894 * Fetch the relative futex offset: 895 */ 896 if (get_user(futex_offset, &head->futex_offset)) 897 return; 898 /* 899 * Fetch any possibly pending lock-add first, and handle it 900 * if it exists: 901 */ 902 if (compat_fetch_robust_entry(&upending, &pending, 903 &head->list_op_pending, &pip)) 904 return; 905 906 next_entry = NULL; /* avoid warning with gcc */ 907 while (entry != (struct robust_list __user *) &head->list) { 908 /* 909 * Fetch the next entry in the list before calling 910 * handle_futex_death: 911 */ 912 rc = compat_fetch_robust_entry(&next_uentry, &next_entry, 913 (compat_uptr_t __user *)&entry->next, &next_pi); 914 /* 915 * A pending lock might already be on the list, so 916 * dont process it twice: 917 */ 918 if (entry != pending) { 919 void __user *uaddr = futex_uaddr(entry, futex_offset); 920 921 if (handle_futex_death(uaddr, curr, pi, 922 HANDLE_DEATH_LIST)) 923 return; 924 } 925 if (rc) 926 return; 927 uentry = next_uentry; 928 entry = next_entry; 929 pi = next_pi; 930 /* 931 * Avoid excessively long or circular lists: 932 */ 933 if (!--limit) 934 break; 935 936 cond_resched(); 937 } 938 if (pending) { 939 void __user *uaddr = futex_uaddr(pending, futex_offset); 940 941 handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING); 942 } 943 } 944 #endif 945 946 #ifdef CONFIG_FUTEX_PI 947 948 /* 949 * This task is holding PI mutexes at exit time => bad. 950 * Kernel cleans up PI-state, but userspace is likely hosed. 951 * (Robust-futex cleanup is separate and might save the day for userspace.) 952 */ 953 static void exit_pi_state_list(struct task_struct *curr) 954 { 955 struct list_head *next, *head = &curr->pi_state_list; 956 struct futex_pi_state *pi_state; 957 struct futex_hash_bucket *hb; 958 union futex_key key = FUTEX_KEY_INIT; 959 960 /* 961 * We are a ZOMBIE and nobody can enqueue itself on 962 * pi_state_list anymore, but we have to be careful 963 * versus waiters unqueueing themselves: 964 */ 965 raw_spin_lock_irq(&curr->pi_lock); 966 while (!list_empty(head)) { 967 next = head->next; 968 pi_state = list_entry(next, struct futex_pi_state, list); 969 key = pi_state->key; 970 hb = futex_hash(&key); 971 972 /* 973 * We can race against put_pi_state() removing itself from the 974 * list (a waiter going away). put_pi_state() will first 975 * decrement the reference count and then modify the list, so 976 * its possible to see the list entry but fail this reference 977 * acquire. 978 * 979 * In that case; drop the locks to let put_pi_state() make 980 * progress and retry the loop. 981 */ 982 if (!refcount_inc_not_zero(&pi_state->refcount)) { 983 raw_spin_unlock_irq(&curr->pi_lock); 984 cpu_relax(); 985 raw_spin_lock_irq(&curr->pi_lock); 986 continue; 987 } 988 raw_spin_unlock_irq(&curr->pi_lock); 989 990 spin_lock(&hb->lock); 991 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); 992 raw_spin_lock(&curr->pi_lock); 993 /* 994 * We dropped the pi-lock, so re-check whether this 995 * task still owns the PI-state: 996 */ 997 if (head->next != next) { 998 /* retain curr->pi_lock for the loop invariant */ 999 raw_spin_unlock(&pi_state->pi_mutex.wait_lock); 1000 spin_unlock(&hb->lock); 1001 put_pi_state(pi_state); 1002 continue; 1003 } 1004 1005 WARN_ON(pi_state->owner != curr); 1006 WARN_ON(list_empty(&pi_state->list)); 1007 list_del_init(&pi_state->list); 1008 pi_state->owner = NULL; 1009 1010 raw_spin_unlock(&curr->pi_lock); 1011 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); 1012 spin_unlock(&hb->lock); 1013 1014 rt_mutex_futex_unlock(&pi_state->pi_mutex); 1015 put_pi_state(pi_state); 1016 1017 raw_spin_lock_irq(&curr->pi_lock); 1018 } 1019 raw_spin_unlock_irq(&curr->pi_lock); 1020 } 1021 #else 1022 static inline void exit_pi_state_list(struct task_struct *curr) { } 1023 #endif 1024 1025 static void futex_cleanup(struct task_struct *tsk) 1026 { 1027 if (unlikely(tsk->robust_list)) { 1028 exit_robust_list(tsk); 1029 tsk->robust_list = NULL; 1030 } 1031 1032 #ifdef CONFIG_COMPAT 1033 if (unlikely(tsk->compat_robust_list)) { 1034 compat_exit_robust_list(tsk); 1035 tsk->compat_robust_list = NULL; 1036 } 1037 #endif 1038 1039 if (unlikely(!list_empty(&tsk->pi_state_list))) 1040 exit_pi_state_list(tsk); 1041 } 1042 1043 /** 1044 * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD 1045 * @tsk: task to set the state on 1046 * 1047 * Set the futex exit state of the task lockless. The futex waiter code 1048 * observes that state when a task is exiting and loops until the task has 1049 * actually finished the futex cleanup. The worst case for this is that the 1050 * waiter runs through the wait loop until the state becomes visible. 1051 * 1052 * This is called from the recursive fault handling path in make_task_dead(). 1053 * 1054 * This is best effort. Either the futex exit code has run already or 1055 * not. If the OWNER_DIED bit has been set on the futex then the waiter can 1056 * take it over. If not, the problem is pushed back to user space. If the 1057 * futex exit code did not run yet, then an already queued waiter might 1058 * block forever, but there is nothing which can be done about that. 1059 */ 1060 void futex_exit_recursive(struct task_struct *tsk) 1061 { 1062 /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */ 1063 if (tsk->futex_state == FUTEX_STATE_EXITING) 1064 mutex_unlock(&tsk->futex_exit_mutex); 1065 tsk->futex_state = FUTEX_STATE_DEAD; 1066 } 1067 1068 static void futex_cleanup_begin(struct task_struct *tsk) 1069 { 1070 /* 1071 * Prevent various race issues against a concurrent incoming waiter 1072 * including live locks by forcing the waiter to block on 1073 * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in 1074 * attach_to_pi_owner(). 1075 */ 1076 mutex_lock(&tsk->futex_exit_mutex); 1077 1078 /* 1079 * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock. 1080 * 1081 * This ensures that all subsequent checks of tsk->futex_state in 1082 * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with 1083 * tsk->pi_lock held. 1084 * 1085 * It guarantees also that a pi_state which was queued right before 1086 * the state change under tsk->pi_lock by a concurrent waiter must 1087 * be observed in exit_pi_state_list(). 1088 */ 1089 raw_spin_lock_irq(&tsk->pi_lock); 1090 tsk->futex_state = FUTEX_STATE_EXITING; 1091 raw_spin_unlock_irq(&tsk->pi_lock); 1092 } 1093 1094 static void futex_cleanup_end(struct task_struct *tsk, int state) 1095 { 1096 /* 1097 * Lockless store. The only side effect is that an observer might 1098 * take another loop until it becomes visible. 1099 */ 1100 tsk->futex_state = state; 1101 /* 1102 * Drop the exit protection. This unblocks waiters which observed 1103 * FUTEX_STATE_EXITING to reevaluate the state. 1104 */ 1105 mutex_unlock(&tsk->futex_exit_mutex); 1106 } 1107 1108 void futex_exec_release(struct task_struct *tsk) 1109 { 1110 /* 1111 * The state handling is done for consistency, but in the case of 1112 * exec() there is no way to prevent further damage as the PID stays 1113 * the same. But for the unlikely and arguably buggy case that a 1114 * futex is held on exec(), this provides at least as much state 1115 * consistency protection which is possible. 1116 */ 1117 futex_cleanup_begin(tsk); 1118 futex_cleanup(tsk); 1119 /* 1120 * Reset the state to FUTEX_STATE_OK. The task is alive and about 1121 * exec a new binary. 1122 */ 1123 futex_cleanup_end(tsk, FUTEX_STATE_OK); 1124 } 1125 1126 void futex_exit_release(struct task_struct *tsk) 1127 { 1128 futex_cleanup_begin(tsk); 1129 futex_cleanup(tsk); 1130 futex_cleanup_end(tsk, FUTEX_STATE_DEAD); 1131 } 1132 1133 static int __init futex_init(void) 1134 { 1135 unsigned int futex_shift; 1136 unsigned long i; 1137 1138 #if CONFIG_BASE_SMALL 1139 futex_hashsize = 16; 1140 #else 1141 futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus()); 1142 #endif 1143 1144 futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues), 1145 futex_hashsize, 0, 0, 1146 &futex_shift, NULL, 1147 futex_hashsize, futex_hashsize); 1148 futex_hashsize = 1UL << futex_shift; 1149 1150 for (i = 0; i < futex_hashsize; i++) { 1151 atomic_set(&futex_queues[i].waiters, 0); 1152 plist_head_init(&futex_queues[i].chain); 1153 spin_lock_init(&futex_queues[i].lock); 1154 } 1155 1156 return 0; 1157 } 1158 core_initcall(futex_init); 1159