Lines Matching +full:block +full:- +full:fetch

1 // SPDX-License-Identifier: GPL-2.0-or-later
16 * PI-futex support started by Ingo Molnar and Thomas Gleixner
23 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
29 * Kirkwood for proof-of-concept implementation.
38 #include <linux/fault-inject.h>
97 debugfs_create_bool("ignore-private", mode, dir, in fail_futex_debugfs()
109 * futex_hash - Return the hash bucket in the global hash
118 key->both.offset); in futex_hash()
120 return &futex_queues[hash & (futex_hashsize - 1)]; in futex_hash()
125 * futex_setup_timer - set up the sleeping hrtimer.
148 hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns); in futex_setup_timer()
156 * This relies on u64 not wrapping in the life-time of the machine; which with
167 * It is important that futex_match() will never have a false-positive, esp.
168 * for PI futexes that can mess up the state. The above argues that false-negatives
177 old = atomic64_read(&inode->i_sequence); in get_inode_sequence_number()
186 old = atomic64_cmpxchg_relaxed(&inode->i_sequence, 0, new); in get_inode_sequence_number()
194 * get_futex_key() - Get parameters which are the keys for a futex
207 * ( inode->i_sequence, page->index, offset_within_page )
213 * ( current->mm, address, 0 )
224 struct mm_struct *mm = current->mm; in get_futex_key()
232 key->both.offset = address % PAGE_SIZE; in get_futex_key()
234 return -EINVAL; in get_futex_key()
235 address -= key->both.offset; in get_futex_key()
238 return -EFAULT; in get_futex_key()
241 return -EFAULT; in get_futex_key()
252 * On no-MMU, shared futexes are treated as private, therefore in get_futex_key()
258 key->private.mm = mm; in get_futex_key()
260 key->private.mm = NULL; in get_futex_key()
262 key->private.address = address; in get_futex_key()
269 return -EFAULT; in get_futex_key()
274 * and get read-only access. in get_futex_key()
276 if (err == -EFAULT && rw == FUTEX_READ) { in get_futex_key()
289 * file-backed region case and guards against movement to swap cache. in get_futex_key()
293 * From this point on, mapping will be re-verified if necessary and in get_futex_key()
299 * based on the address. For filesystem-backed pages, the tail is in get_futex_key()
305 mapping = READ_ONCE(page->mapping); in get_futex_key()
308 * If page->mapping is NULL, then it cannot be a PageAnon in get_futex_key()
320 * an unlikely race, but we do need to retry for page->mapping. in get_futex_key()
331 shmem_swizzled = PageSwapCache(page) || page->mapping; in get_futex_key()
338 return -EFAULT; in get_futex_key()
348 * it's a read-only handle, it's expected that futexes attach to in get_futex_key()
357 err = -EFAULT; in get_futex_key()
361 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ in get_futex_key()
362 key->private.mm = mm; in get_futex_key()
363 key->private.address = address; in get_futex_key()
370 * the page->mapping must be traversed. Ordinarily this should in get_futex_key()
377 * mapping->host can be safely accessed as being a valid inode. in get_futex_key()
381 if (READ_ONCE(page->mapping) != mapping) { in get_futex_key()
388 inode = READ_ONCE(mapping->host); in get_futex_key()
396 key->both.offset |= FUT_OFF_INODE; /* inode-based key */ in get_futex_key()
397 key->shared.i_seq = get_inode_sequence_number(inode); in get_futex_key()
398 key->shared.pgoff = page_to_pgoff(tail); in get_futex_key()
408 * fault_in_user_writeable() - Fault in user address and verify RW access
414 * We have no generic implementation of a non-destructive write to the
421 struct mm_struct *mm = current->mm; in fault_in_user_writeable()
433 * futex_top_waiter() - Return the highest priority waiter on a futex
443 plist_for_each_entry(this, &hb->chain, list) { in futex_top_waiter()
444 if (futex_match(&this->key, key)) in futex_top_waiter()
469 return ret ? -EFAULT : 0; in futex_get_value_locked()
473 * wait_for_owner_exiting - Block until the owner has exited
481 if (ret != -EBUSY) { in wait_for_owner_exiting()
486 if (WARN_ON_ONCE(ret == -EBUSY && !exiting)) in wait_for_owner_exiting()
489 mutex_lock(&exiting->futex_exit_mutex); in wait_for_owner_exiting()
492 * while the task was in exec()->exec_futex_release() then it can in wait_for_owner_exiting()
498 mutex_unlock(&exiting->futex_exit_mutex); in wait_for_owner_exiting()
504 * __futex_unqueue() - Remove the futex_q from its futex_hash_bucket
507 * The q->lock_ptr must not be NULL and must be held by the caller.
513 if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list))) in __futex_unqueue()
515 lockdep_assert_held(q->lock_ptr); in __futex_unqueue()
517 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); in __futex_unqueue()
518 plist_del(&q->list, &hb->chain); in __futex_unqueue()
522 /* The key must be already stored in q->key. */
524 __acquires(&hb->lock) in futex_q_lock()
528 hb = futex_hash(&q->key); in futex_q_lock()
532 * a potential waker won't miss a to-be-slept task that is in futex_q_lock()
540 q->lock_ptr = &hb->lock; in futex_q_lock()
542 spin_lock(&hb->lock); in futex_q_lock()
547 __releases(&hb->lock) in futex_q_unlock()
549 spin_unlock(&hb->lock); in futex_q_unlock()
559 * - either the real thread-priority for the real-time threads in __futex_queue()
561 * - or MAX_RT_PRIO for non-RT threads. in __futex_queue()
562 * Thus, all RT-threads are woken first in priority order, and in __futex_queue()
565 prio = min(current->normal_prio, MAX_RT_PRIO); in __futex_queue()
567 plist_node_init(&q->list, prio); in __futex_queue()
568 plist_add(&q->list, &hb->chain); in __futex_queue()
569 q->task = current; in __futex_queue()
573 * futex_unqueue() - Remove the futex_q from its futex_hash_bucket
576 * The q->lock_ptr must not be held by the caller. A call to futex_unqueue() must
580 * - 1 - if the futex_q was still queued (and we removed unqueued it);
581 * - 0 - if the futex_q was already removed by the waking thread
591 * q->lock_ptr can change between this read and the following spin_lock. in futex_unqueue()
592 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and in futex_unqueue()
595 lock_ptr = READ_ONCE(q->lock_ptr); in futex_unqueue()
599 * q->lock_ptr can change between reading it and in futex_unqueue()
604 * q->lock_ptr must have changed (maybe several times) in futex_unqueue()
611 if (unlikely(lock_ptr != q->lock_ptr)) { in futex_unqueue()
617 BUG_ON(q->pi_state); in futex_unqueue()
634 BUG_ON(!q->pi_state); in futex_unqueue_pi()
635 put_pi_state(q->pi_state); in futex_unqueue_pi()
636 q->pi_state = NULL; in futex_unqueue_pi()
644 * Process a futex-list entry, check whether it's owned by the
656 return -1; in handle_futex_death()
660 return -1; in handle_futex_death()
678 * potential waiters which can cause these waiters to block in handle_futex_death()
683 * 1) task->robust_list->list_op_pending != NULL in handle_futex_death()
713 * futex_wake() even if OWNER_DIED is already set - in handle_futex_death()
715 * thread-death.) The rest of the cleanup is done in in handle_futex_death()
731 case -EFAULT: in handle_futex_death()
733 return -1; in handle_futex_death()
736 case -EAGAIN: in handle_futex_death()
750 * Wake robust non-PI futexes here. The wakeup of in handle_futex_death()
760 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
769 return -EFAULT; in fetch_robust_entry()
778 * Walk curr->robust_list (very carefully, it's a userspace list!)
781 * We silently return on any sign of list-walking problem.
785 struct robust_list_head __user *head = curr->robust_list; in exit_robust_list()
793 * Fetch the list head (which was registered earlier, via in exit_robust_list()
796 if (fetch_robust_entry(&entry, &head->list.next, &pi)) in exit_robust_list()
799 * Fetch the relative futex offset: in exit_robust_list()
801 if (get_user(futex_offset, &head->futex_offset)) in exit_robust_list()
804 * Fetch any possibly pending lock-add first, and handle it in exit_robust_list()
807 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) in exit_robust_list()
811 while (entry != &head->list) { in exit_robust_list()
813 * Fetch the next entry in the list before calling in exit_robust_list()
816 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); in exit_robust_list()
833 if (!--limit) in exit_robust_list()
856 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
863 return -EFAULT; in compat_fetch_robust_entry()
872 * Walk curr->robust_list (very carefully, it's a userspace list!)
875 * We silently return on any sign of list-walking problem.
879 struct compat_robust_list_head __user *head = curr->compat_robust_list; in compat_exit_robust_list()
888 * Fetch the list head (which was registered earlier, via in compat_exit_robust_list()
891 if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) in compat_exit_robust_list()
894 * Fetch the relative futex offset: in compat_exit_robust_list()
896 if (get_user(futex_offset, &head->futex_offset)) in compat_exit_robust_list()
899 * Fetch any possibly pending lock-add first, and handle it in compat_exit_robust_list()
903 &head->list_op_pending, &pip)) in compat_exit_robust_list()
907 while (entry != (struct robust_list __user *) &head->list) { in compat_exit_robust_list()
909 * Fetch the next entry in the list before calling in compat_exit_robust_list()
913 (compat_uptr_t __user *)&entry->next, &next_pi); in compat_exit_robust_list()
933 if (!--limit) in compat_exit_robust_list()
950 * Kernel cleans up PI-state, but userspace is likely hosed.
951 * (Robust-futex cleanup is separate and might save the day for userspace.)
955 struct list_head *next, *head = &curr->pi_state_list; in exit_pi_state_list()
965 raw_spin_lock_irq(&curr->pi_lock); in exit_pi_state_list()
967 next = head->next; in exit_pi_state_list()
969 key = pi_state->key; in exit_pi_state_list()
982 if (!refcount_inc_not_zero(&pi_state->refcount)) { in exit_pi_state_list()
983 raw_spin_unlock_irq(&curr->pi_lock); in exit_pi_state_list()
985 raw_spin_lock_irq(&curr->pi_lock); in exit_pi_state_list()
988 raw_spin_unlock_irq(&curr->pi_lock); in exit_pi_state_list()
990 spin_lock(&hb->lock); in exit_pi_state_list()
991 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); in exit_pi_state_list()
992 raw_spin_lock(&curr->pi_lock); in exit_pi_state_list()
994 * We dropped the pi-lock, so re-check whether this in exit_pi_state_list()
995 * task still owns the PI-state: in exit_pi_state_list()
997 if (head->next != next) { in exit_pi_state_list()
998 /* retain curr->pi_lock for the loop invariant */ in exit_pi_state_list()
999 raw_spin_unlock(&pi_state->pi_mutex.wait_lock); in exit_pi_state_list()
1000 spin_unlock(&hb->lock); in exit_pi_state_list()
1005 WARN_ON(pi_state->owner != curr); in exit_pi_state_list()
1006 WARN_ON(list_empty(&pi_state->list)); in exit_pi_state_list()
1007 list_del_init(&pi_state->list); in exit_pi_state_list()
1008 pi_state->owner = NULL; in exit_pi_state_list()
1010 raw_spin_unlock(&curr->pi_lock); in exit_pi_state_list()
1011 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); in exit_pi_state_list()
1012 spin_unlock(&hb->lock); in exit_pi_state_list()
1014 rt_mutex_futex_unlock(&pi_state->pi_mutex); in exit_pi_state_list()
1017 raw_spin_lock_irq(&curr->pi_lock); in exit_pi_state_list()
1019 raw_spin_unlock_irq(&curr->pi_lock); in exit_pi_state_list()
1027 if (unlikely(tsk->robust_list)) { in futex_cleanup()
1029 tsk->robust_list = NULL; in futex_cleanup()
1033 if (unlikely(tsk->compat_robust_list)) { in futex_cleanup()
1035 tsk->compat_robust_list = NULL; in futex_cleanup()
1039 if (unlikely(!list_empty(&tsk->pi_state_list))) in futex_cleanup()
1044 * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
1058 * block forever, but there is nothing which can be done about that.
1063 if (tsk->futex_state == FUTEX_STATE_EXITING) in futex_exit_recursive()
1064 mutex_unlock(&tsk->futex_exit_mutex); in futex_exit_recursive()
1065 tsk->futex_state = FUTEX_STATE_DEAD; in futex_exit_recursive()
1072 * including live locks by forcing the waiter to block on in futex_cleanup_begin()
1073 * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in in futex_cleanup_begin()
1076 mutex_lock(&tsk->futex_exit_mutex); in futex_cleanup_begin()
1079 * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock. in futex_cleanup_begin()
1081 * This ensures that all subsequent checks of tsk->futex_state in in futex_cleanup_begin()
1083 * tsk->pi_lock held. in futex_cleanup_begin()
1086 * the state change under tsk->pi_lock by a concurrent waiter must in futex_cleanup_begin()
1089 raw_spin_lock_irq(&tsk->pi_lock); in futex_cleanup_begin()
1090 tsk->futex_state = FUTEX_STATE_EXITING; in futex_cleanup_begin()
1091 raw_spin_unlock_irq(&tsk->pi_lock); in futex_cleanup_begin()
1100 tsk->futex_state = state; in futex_cleanup_end()
1105 mutex_unlock(&tsk->futex_exit_mutex); in futex_cleanup_end()