xref: /openbmc/linux/kernel/futex/core.c (revision 479965a2)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Fast Userspace Mutexes (which I call "Futexes!").
4  *  (C) Rusty Russell, IBM 2002
5  *
6  *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
7  *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
8  *
9  *  Removed page pinning, fix privately mapped COW pages and other cleanups
10  *  (C) Copyright 2003, 2004 Jamie Lokier
11  *
12  *  Robust futex support started by Ingo Molnar
13  *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
14  *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
15  *
16  *  PI-futex support started by Ingo Molnar and Thomas Gleixner
17  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
18  *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
19  *
20  *  PRIVATE futexes by Eric Dumazet
21  *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
22  *
23  *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
24  *  Copyright (C) IBM Corporation, 2009
25  *  Thanks to Thomas Gleixner for conceptual design and careful reviews.
26  *
27  *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
28  *  enough at me, Linus for the original (flawed) idea, Matthew
29  *  Kirkwood for proof-of-concept implementation.
30  *
31  *  "The futexes are also cursed."
32  *  "But they come in a choice of three flavours!"
33  */
34 #include <linux/compat.h>
35 #include <linux/jhash.h>
36 #include <linux/pagemap.h>
37 #include <linux/memblock.h>
38 #include <linux/fault-inject.h>
39 #include <linux/slab.h>
40 
41 #include "futex.h"
42 #include "../locking/rtmutex_common.h"
43 
44 /*
45  * The base of the bucket array and its size are always used together
46  * (after initialization only in futex_hash()), so ensure that they
47  * reside in the same cacheline.
48  */
49 static struct {
50 	struct futex_hash_bucket *queues;
51 	unsigned long            hashsize;
52 } __futex_data __read_mostly __aligned(2*sizeof(long));
53 #define futex_queues   (__futex_data.queues)
54 #define futex_hashsize (__futex_data.hashsize)
55 
56 
57 /*
58  * Fault injections for futexes.
59  */
60 #ifdef CONFIG_FAIL_FUTEX
61 
62 static struct {
63 	struct fault_attr attr;
64 
65 	bool ignore_private;
66 } fail_futex = {
67 	.attr = FAULT_ATTR_INITIALIZER,
68 	.ignore_private = false,
69 };
70 
71 static int __init setup_fail_futex(char *str)
72 {
73 	return setup_fault_attr(&fail_futex.attr, str);
74 }
75 __setup("fail_futex=", setup_fail_futex);
76 
77 bool should_fail_futex(bool fshared)
78 {
79 	if (fail_futex.ignore_private && !fshared)
80 		return false;
81 
82 	return should_fail(&fail_futex.attr, 1);
83 }
84 
85 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
86 
87 static int __init fail_futex_debugfs(void)
88 {
89 	umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
90 	struct dentry *dir;
91 
92 	dir = fault_create_debugfs_attr("fail_futex", NULL,
93 					&fail_futex.attr);
94 	if (IS_ERR(dir))
95 		return PTR_ERR(dir);
96 
97 	debugfs_create_bool("ignore-private", mode, dir,
98 			    &fail_futex.ignore_private);
99 	return 0;
100 }
101 
102 late_initcall(fail_futex_debugfs);
103 
104 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
105 
106 #endif /* CONFIG_FAIL_FUTEX */
107 
108 /**
109  * futex_hash - Return the hash bucket in the global hash
110  * @key:	Pointer to the futex key for which the hash is calculated
111  *
112  * We hash on the keys returned from get_futex_key (see below) and return the
113  * corresponding hash bucket in the global hash.
114  */
115 struct futex_hash_bucket *futex_hash(union futex_key *key)
116 {
117 	u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4,
118 			  key->both.offset);
119 
120 	return &futex_queues[hash & (futex_hashsize - 1)];
121 }
122 
123 
124 /**
125  * futex_setup_timer - set up the sleeping hrtimer.
126  * @time:	ptr to the given timeout value
127  * @timeout:	the hrtimer_sleeper structure to be set up
128  * @flags:	futex flags
129  * @range_ns:	optional range in ns
130  *
131  * Return: Initialized hrtimer_sleeper structure or NULL if no timeout
132  *	   value given
133  */
134 struct hrtimer_sleeper *
135 futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
136 		  int flags, u64 range_ns)
137 {
138 	if (!time)
139 		return NULL;
140 
141 	hrtimer_init_sleeper_on_stack(timeout, (flags & FLAGS_CLOCKRT) ?
142 				      CLOCK_REALTIME : CLOCK_MONOTONIC,
143 				      HRTIMER_MODE_ABS);
144 	/*
145 	 * If range_ns is 0, calling hrtimer_set_expires_range_ns() is
146 	 * effectively the same as calling hrtimer_set_expires().
147 	 */
148 	hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns);
149 
150 	return timeout;
151 }
152 
153 /*
154  * Generate a machine wide unique identifier for this inode.
155  *
156  * This relies on u64 not wrapping in the life-time of the machine; which with
157  * 1ns resolution means almost 585 years.
158  *
159  * This further relies on the fact that a well formed program will not unmap
160  * the file while it has a (shared) futex waiting on it. This mapping will have
161  * a file reference which pins the mount and inode.
162  *
163  * If for some reason an inode gets evicted and read back in again, it will get
164  * a new sequence number and will _NOT_ match, even though it is the exact same
165  * file.
166  *
167  * It is important that futex_match() will never have a false-positive, esp.
168  * for PI futexes that can mess up the state. The above argues that false-negatives
169  * are only possible for malformed programs.
170  */
171 static u64 get_inode_sequence_number(struct inode *inode)
172 {
173 	static atomic64_t i_seq;
174 	u64 old;
175 
176 	/* Does the inode already have a sequence number? */
177 	old = atomic64_read(&inode->i_sequence);
178 	if (likely(old))
179 		return old;
180 
181 	for (;;) {
182 		u64 new = atomic64_add_return(1, &i_seq);
183 		if (WARN_ON_ONCE(!new))
184 			continue;
185 
186 		old = atomic64_cmpxchg_relaxed(&inode->i_sequence, 0, new);
187 		if (old)
188 			return old;
189 		return new;
190 	}
191 }
192 
193 /**
194  * get_futex_key() - Get parameters which are the keys for a futex
195  * @uaddr:	virtual address of the futex
196  * @fshared:	false for a PROCESS_PRIVATE futex, true for PROCESS_SHARED
197  * @key:	address where result is stored.
198  * @rw:		mapping needs to be read/write (values: FUTEX_READ,
199  *              FUTEX_WRITE)
200  *
201  * Return: a negative error code or 0
202  *
203  * The key words are stored in @key on success.
204  *
205  * For shared mappings (when @fshared), the key is:
206  *
207  *   ( inode->i_sequence, page->index, offset_within_page )
208  *
209  * [ also see get_inode_sequence_number() ]
210  *
211  * For private mappings (or when !@fshared), the key is:
212  *
213  *   ( current->mm, address, 0 )
214  *
215  * This allows (cross process, where applicable) identification of the futex
216  * without keeping the page pinned for the duration of the FUTEX_WAIT.
217  *
218  * lock_page() might sleep, the caller should not hold a spinlock.
219  */
220 int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key,
221 		  enum futex_access rw)
222 {
223 	unsigned long address = (unsigned long)uaddr;
224 	struct mm_struct *mm = current->mm;
225 	struct page *page, *tail;
226 	struct address_space *mapping;
227 	int err, ro = 0;
228 
229 	/*
230 	 * The futex address must be "naturally" aligned.
231 	 */
232 	key->both.offset = address % PAGE_SIZE;
233 	if (unlikely((address % sizeof(u32)) != 0))
234 		return -EINVAL;
235 	address -= key->both.offset;
236 
237 	if (unlikely(!access_ok(uaddr, sizeof(u32))))
238 		return -EFAULT;
239 
240 	if (unlikely(should_fail_futex(fshared)))
241 		return -EFAULT;
242 
243 	/*
244 	 * PROCESS_PRIVATE futexes are fast.
245 	 * As the mm cannot disappear under us and the 'key' only needs
246 	 * virtual address, we dont even have to find the underlying vma.
247 	 * Note : We do have to check 'uaddr' is a valid user address,
248 	 *        but access_ok() should be faster than find_vma()
249 	 */
250 	if (!fshared) {
251 		key->private.mm = mm;
252 		key->private.address = address;
253 		return 0;
254 	}
255 
256 again:
257 	/* Ignore any VERIFY_READ mapping (futex common case) */
258 	if (unlikely(should_fail_futex(true)))
259 		return -EFAULT;
260 
261 	err = get_user_pages_fast(address, 1, FOLL_WRITE, &page);
262 	/*
263 	 * If write access is not required (eg. FUTEX_WAIT), try
264 	 * and get read-only access.
265 	 */
266 	if (err == -EFAULT && rw == FUTEX_READ) {
267 		err = get_user_pages_fast(address, 1, 0, &page);
268 		ro = 1;
269 	}
270 	if (err < 0)
271 		return err;
272 	else
273 		err = 0;
274 
275 	/*
276 	 * The treatment of mapping from this point on is critical. The page
277 	 * lock protects many things but in this context the page lock
278 	 * stabilizes mapping, prevents inode freeing in the shared
279 	 * file-backed region case and guards against movement to swap cache.
280 	 *
281 	 * Strictly speaking the page lock is not needed in all cases being
282 	 * considered here and page lock forces unnecessarily serialization
283 	 * From this point on, mapping will be re-verified if necessary and
284 	 * page lock will be acquired only if it is unavoidable
285 	 *
286 	 * Mapping checks require the head page for any compound page so the
287 	 * head page and mapping is looked up now. For anonymous pages, it
288 	 * does not matter if the page splits in the future as the key is
289 	 * based on the address. For filesystem-backed pages, the tail is
290 	 * required as the index of the page determines the key. For
291 	 * base pages, there is no tail page and tail == page.
292 	 */
293 	tail = page;
294 	page = compound_head(page);
295 	mapping = READ_ONCE(page->mapping);
296 
297 	/*
298 	 * If page->mapping is NULL, then it cannot be a PageAnon
299 	 * page; but it might be the ZERO_PAGE or in the gate area or
300 	 * in a special mapping (all cases which we are happy to fail);
301 	 * or it may have been a good file page when get_user_pages_fast
302 	 * found it, but truncated or holepunched or subjected to
303 	 * invalidate_complete_page2 before we got the page lock (also
304 	 * cases which we are happy to fail).  And we hold a reference,
305 	 * so refcount care in invalidate_inode_page's remove_mapping
306 	 * prevents drop_caches from setting mapping to NULL beneath us.
307 	 *
308 	 * The case we do have to guard against is when memory pressure made
309 	 * shmem_writepage move it from filecache to swapcache beneath us:
310 	 * an unlikely race, but we do need to retry for page->mapping.
311 	 */
312 	if (unlikely(!mapping)) {
313 		int shmem_swizzled;
314 
315 		/*
316 		 * Page lock is required to identify which special case above
317 		 * applies. If this is really a shmem page then the page lock
318 		 * will prevent unexpected transitions.
319 		 */
320 		lock_page(page);
321 		shmem_swizzled = PageSwapCache(page) || page->mapping;
322 		unlock_page(page);
323 		put_page(page);
324 
325 		if (shmem_swizzled)
326 			goto again;
327 
328 		return -EFAULT;
329 	}
330 
331 	/*
332 	 * Private mappings are handled in a simple way.
333 	 *
334 	 * If the futex key is stored on an anonymous page, then the associated
335 	 * object is the mm which is implicitly pinned by the calling process.
336 	 *
337 	 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
338 	 * it's a read-only handle, it's expected that futexes attach to
339 	 * the object not the particular process.
340 	 */
341 	if (PageAnon(page)) {
342 		/*
343 		 * A RO anonymous page will never change and thus doesn't make
344 		 * sense for futex operations.
345 		 */
346 		if (unlikely(should_fail_futex(true)) || ro) {
347 			err = -EFAULT;
348 			goto out;
349 		}
350 
351 		key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
352 		key->private.mm = mm;
353 		key->private.address = address;
354 
355 	} else {
356 		struct inode *inode;
357 
358 		/*
359 		 * The associated futex object in this case is the inode and
360 		 * the page->mapping must be traversed. Ordinarily this should
361 		 * be stabilised under page lock but it's not strictly
362 		 * necessary in this case as we just want to pin the inode, not
363 		 * update the radix tree or anything like that.
364 		 *
365 		 * The RCU read lock is taken as the inode is finally freed
366 		 * under RCU. If the mapping still matches expectations then the
367 		 * mapping->host can be safely accessed as being a valid inode.
368 		 */
369 		rcu_read_lock();
370 
371 		if (READ_ONCE(page->mapping) != mapping) {
372 			rcu_read_unlock();
373 			put_page(page);
374 
375 			goto again;
376 		}
377 
378 		inode = READ_ONCE(mapping->host);
379 		if (!inode) {
380 			rcu_read_unlock();
381 			put_page(page);
382 
383 			goto again;
384 		}
385 
386 		key->both.offset |= FUT_OFF_INODE; /* inode-based key */
387 		key->shared.i_seq = get_inode_sequence_number(inode);
388 		key->shared.pgoff = page_to_pgoff(tail);
389 		rcu_read_unlock();
390 	}
391 
392 out:
393 	put_page(page);
394 	return err;
395 }
396 
397 /**
398  * fault_in_user_writeable() - Fault in user address and verify RW access
399  * @uaddr:	pointer to faulting user space address
400  *
401  * Slow path to fixup the fault we just took in the atomic write
402  * access to @uaddr.
403  *
404  * We have no generic implementation of a non-destructive write to the
405  * user address. We know that we faulted in the atomic pagefault
406  * disabled section so we can as well avoid the #PF overhead by
407  * calling get_user_pages() right away.
408  */
409 int fault_in_user_writeable(u32 __user *uaddr)
410 {
411 	struct mm_struct *mm = current->mm;
412 	int ret;
413 
414 	mmap_read_lock(mm);
415 	ret = fixup_user_fault(mm, (unsigned long)uaddr,
416 			       FAULT_FLAG_WRITE, NULL);
417 	mmap_read_unlock(mm);
418 
419 	return ret < 0 ? ret : 0;
420 }
421 
422 /**
423  * futex_top_waiter() - Return the highest priority waiter on a futex
424  * @hb:		the hash bucket the futex_q's reside in
425  * @key:	the futex key (to distinguish it from other futex futex_q's)
426  *
427  * Must be called with the hb lock held.
428  */
429 struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key)
430 {
431 	struct futex_q *this;
432 
433 	plist_for_each_entry(this, &hb->chain, list) {
434 		if (futex_match(&this->key, key))
435 			return this;
436 	}
437 	return NULL;
438 }
439 
440 int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32 uval, u32 newval)
441 {
442 	int ret;
443 
444 	pagefault_disable();
445 	ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
446 	pagefault_enable();
447 
448 	return ret;
449 }
450 
451 int futex_get_value_locked(u32 *dest, u32 __user *from)
452 {
453 	int ret;
454 
455 	pagefault_disable();
456 	ret = __get_user(*dest, from);
457 	pagefault_enable();
458 
459 	return ret ? -EFAULT : 0;
460 }
461 
462 /**
463  * wait_for_owner_exiting - Block until the owner has exited
464  * @ret: owner's current futex lock status
465  * @exiting:	Pointer to the exiting task
466  *
467  * Caller must hold a refcount on @exiting.
468  */
469 void wait_for_owner_exiting(int ret, struct task_struct *exiting)
470 {
471 	if (ret != -EBUSY) {
472 		WARN_ON_ONCE(exiting);
473 		return;
474 	}
475 
476 	if (WARN_ON_ONCE(ret == -EBUSY && !exiting))
477 		return;
478 
479 	mutex_lock(&exiting->futex_exit_mutex);
480 	/*
481 	 * No point in doing state checking here. If the waiter got here
482 	 * while the task was in exec()->exec_futex_release() then it can
483 	 * have any FUTEX_STATE_* value when the waiter has acquired the
484 	 * mutex. OK, if running, EXITING or DEAD if it reached exit()
485 	 * already. Highly unlikely and not a problem. Just one more round
486 	 * through the futex maze.
487 	 */
488 	mutex_unlock(&exiting->futex_exit_mutex);
489 
490 	put_task_struct(exiting);
491 }
492 
493 /**
494  * __futex_unqueue() - Remove the futex_q from its futex_hash_bucket
495  * @q:	The futex_q to unqueue
496  *
497  * The q->lock_ptr must not be NULL and must be held by the caller.
498  */
499 void __futex_unqueue(struct futex_q *q)
500 {
501 	struct futex_hash_bucket *hb;
502 
503 	if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
504 		return;
505 	lockdep_assert_held(q->lock_ptr);
506 
507 	hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
508 	plist_del(&q->list, &hb->chain);
509 	futex_hb_waiters_dec(hb);
510 }
511 
512 /* The key must be already stored in q->key. */
513 struct futex_hash_bucket *futex_q_lock(struct futex_q *q)
514 	__acquires(&hb->lock)
515 {
516 	struct futex_hash_bucket *hb;
517 
518 	hb = futex_hash(&q->key);
519 
520 	/*
521 	 * Increment the counter before taking the lock so that
522 	 * a potential waker won't miss a to-be-slept task that is
523 	 * waiting for the spinlock. This is safe as all futex_q_lock()
524 	 * users end up calling futex_queue(). Similarly, for housekeeping,
525 	 * decrement the counter at futex_q_unlock() when some error has
526 	 * occurred and we don't end up adding the task to the list.
527 	 */
528 	futex_hb_waiters_inc(hb); /* implies smp_mb(); (A) */
529 
530 	q->lock_ptr = &hb->lock;
531 
532 	spin_lock(&hb->lock);
533 	return hb;
534 }
535 
536 void futex_q_unlock(struct futex_hash_bucket *hb)
537 	__releases(&hb->lock)
538 {
539 	spin_unlock(&hb->lock);
540 	futex_hb_waiters_dec(hb);
541 }
542 
543 void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb)
544 {
545 	int prio;
546 
547 	/*
548 	 * The priority used to register this element is
549 	 * - either the real thread-priority for the real-time threads
550 	 * (i.e. threads with a priority lower than MAX_RT_PRIO)
551 	 * - or MAX_RT_PRIO for non-RT threads.
552 	 * Thus, all RT-threads are woken first in priority order, and
553 	 * the others are woken last, in FIFO order.
554 	 */
555 	prio = min(current->normal_prio, MAX_RT_PRIO);
556 
557 	plist_node_init(&q->list, prio);
558 	plist_add(&q->list, &hb->chain);
559 	q->task = current;
560 }
561 
562 /**
563  * futex_unqueue() - Remove the futex_q from its futex_hash_bucket
564  * @q:	The futex_q to unqueue
565  *
566  * The q->lock_ptr must not be held by the caller. A call to futex_unqueue() must
567  * be paired with exactly one earlier call to futex_queue().
568  *
569  * Return:
570  *  - 1 - if the futex_q was still queued (and we removed unqueued it);
571  *  - 0 - if the futex_q was already removed by the waking thread
572  */
573 int futex_unqueue(struct futex_q *q)
574 {
575 	spinlock_t *lock_ptr;
576 	int ret = 0;
577 
578 	/* In the common case we don't take the spinlock, which is nice. */
579 retry:
580 	/*
581 	 * q->lock_ptr can change between this read and the following spin_lock.
582 	 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
583 	 * optimizing lock_ptr out of the logic below.
584 	 */
585 	lock_ptr = READ_ONCE(q->lock_ptr);
586 	if (lock_ptr != NULL) {
587 		spin_lock(lock_ptr);
588 		/*
589 		 * q->lock_ptr can change between reading it and
590 		 * spin_lock(), causing us to take the wrong lock.  This
591 		 * corrects the race condition.
592 		 *
593 		 * Reasoning goes like this: if we have the wrong lock,
594 		 * q->lock_ptr must have changed (maybe several times)
595 		 * between reading it and the spin_lock().  It can
596 		 * change again after the spin_lock() but only if it was
597 		 * already changed before the spin_lock().  It cannot,
598 		 * however, change back to the original value.  Therefore
599 		 * we can detect whether we acquired the correct lock.
600 		 */
601 		if (unlikely(lock_ptr != q->lock_ptr)) {
602 			spin_unlock(lock_ptr);
603 			goto retry;
604 		}
605 		__futex_unqueue(q);
606 
607 		BUG_ON(q->pi_state);
608 
609 		spin_unlock(lock_ptr);
610 		ret = 1;
611 	}
612 
613 	return ret;
614 }
615 
616 /*
617  * PI futexes can not be requeued and must remove themselves from the
618  * hash bucket. The hash bucket lock (i.e. lock_ptr) is held.
619  */
620 void futex_unqueue_pi(struct futex_q *q)
621 {
622 	__futex_unqueue(q);
623 
624 	BUG_ON(!q->pi_state);
625 	put_pi_state(q->pi_state);
626 	q->pi_state = NULL;
627 }
628 
629 /* Constants for the pending_op argument of handle_futex_death */
630 #define HANDLE_DEATH_PENDING	true
631 #define HANDLE_DEATH_LIST	false
632 
633 /*
634  * Process a futex-list entry, check whether it's owned by the
635  * dying task, and do notification if so:
636  */
637 static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
638 			      bool pi, bool pending_op)
639 {
640 	u32 uval, nval, mval;
641 	pid_t owner;
642 	int err;
643 
644 	/* Futex address must be 32bit aligned */
645 	if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
646 		return -1;
647 
648 retry:
649 	if (get_user(uval, uaddr))
650 		return -1;
651 
652 	/*
653 	 * Special case for regular (non PI) futexes. The unlock path in
654 	 * user space has two race scenarios:
655 	 *
656 	 * 1. The unlock path releases the user space futex value and
657 	 *    before it can execute the futex() syscall to wake up
658 	 *    waiters it is killed.
659 	 *
660 	 * 2. A woken up waiter is killed before it can acquire the
661 	 *    futex in user space.
662 	 *
663 	 * In the second case, the wake up notification could be generated
664 	 * by the unlock path in user space after setting the futex value
665 	 * to zero or by the kernel after setting the OWNER_DIED bit below.
666 	 *
667 	 * In both cases the TID validation below prevents a wakeup of
668 	 * potential waiters which can cause these waiters to block
669 	 * forever.
670 	 *
671 	 * In both cases the following conditions are met:
672 	 *
673 	 *	1) task->robust_list->list_op_pending != NULL
674 	 *	   @pending_op == true
675 	 *	2) The owner part of user space futex value == 0
676 	 *	3) Regular futex: @pi == false
677 	 *
678 	 * If these conditions are met, it is safe to attempt waking up a
679 	 * potential waiter without touching the user space futex value and
680 	 * trying to set the OWNER_DIED bit. If the futex value is zero,
681 	 * the rest of the user space mutex state is consistent, so a woken
682 	 * waiter will just take over the uncontended futex. Setting the
683 	 * OWNER_DIED bit would create inconsistent state and malfunction
684 	 * of the user space owner died handling. Otherwise, the OWNER_DIED
685 	 * bit is already set, and the woken waiter is expected to deal with
686 	 * this.
687 	 */
688 	owner = uval & FUTEX_TID_MASK;
689 
690 	if (pending_op && !pi && !owner) {
691 		futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
692 		return 0;
693 	}
694 
695 	if (owner != task_pid_vnr(curr))
696 		return 0;
697 
698 	/*
699 	 * Ok, this dying thread is truly holding a futex
700 	 * of interest. Set the OWNER_DIED bit atomically
701 	 * via cmpxchg, and if the value had FUTEX_WAITERS
702 	 * set, wake up a waiter (if any). (We have to do a
703 	 * futex_wake() even if OWNER_DIED is already set -
704 	 * to handle the rare but possible case of recursive
705 	 * thread-death.) The rest of the cleanup is done in
706 	 * userspace.
707 	 */
708 	mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
709 
710 	/*
711 	 * We are not holding a lock here, but we want to have
712 	 * the pagefault_disable/enable() protection because
713 	 * we want to handle the fault gracefully. If the
714 	 * access fails we try to fault in the futex with R/W
715 	 * verification via get_user_pages. get_user() above
716 	 * does not guarantee R/W access. If that fails we
717 	 * give up and leave the futex locked.
718 	 */
719 	if ((err = futex_cmpxchg_value_locked(&nval, uaddr, uval, mval))) {
720 		switch (err) {
721 		case -EFAULT:
722 			if (fault_in_user_writeable(uaddr))
723 				return -1;
724 			goto retry;
725 
726 		case -EAGAIN:
727 			cond_resched();
728 			goto retry;
729 
730 		default:
731 			WARN_ON_ONCE(1);
732 			return err;
733 		}
734 	}
735 
736 	if (nval != uval)
737 		goto retry;
738 
739 	/*
740 	 * Wake robust non-PI futexes here. The wakeup of
741 	 * PI futexes happens in exit_pi_state():
742 	 */
743 	if (!pi && (uval & FUTEX_WAITERS))
744 		futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
745 
746 	return 0;
747 }
748 
749 /*
750  * Fetch a robust-list pointer. Bit 0 signals PI futexes:
751  */
752 static inline int fetch_robust_entry(struct robust_list __user **entry,
753 				     struct robust_list __user * __user *head,
754 				     unsigned int *pi)
755 {
756 	unsigned long uentry;
757 
758 	if (get_user(uentry, (unsigned long __user *)head))
759 		return -EFAULT;
760 
761 	*entry = (void __user *)(uentry & ~1UL);
762 	*pi = uentry & 1;
763 
764 	return 0;
765 }
766 
767 /*
768  * Walk curr->robust_list (very carefully, it's a userspace list!)
769  * and mark any locks found there dead, and notify any waiters.
770  *
771  * We silently return on any sign of list-walking problem.
772  */
773 static void exit_robust_list(struct task_struct *curr)
774 {
775 	struct robust_list_head __user *head = curr->robust_list;
776 	struct robust_list __user *entry, *next_entry, *pending;
777 	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
778 	unsigned int next_pi;
779 	unsigned long futex_offset;
780 	int rc;
781 
782 	/*
783 	 * Fetch the list head (which was registered earlier, via
784 	 * sys_set_robust_list()):
785 	 */
786 	if (fetch_robust_entry(&entry, &head->list.next, &pi))
787 		return;
788 	/*
789 	 * Fetch the relative futex offset:
790 	 */
791 	if (get_user(futex_offset, &head->futex_offset))
792 		return;
793 	/*
794 	 * Fetch any possibly pending lock-add first, and handle it
795 	 * if it exists:
796 	 */
797 	if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
798 		return;
799 
800 	next_entry = NULL;	/* avoid warning with gcc */
801 	while (entry != &head->list) {
802 		/*
803 		 * Fetch the next entry in the list before calling
804 		 * handle_futex_death:
805 		 */
806 		rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
807 		/*
808 		 * A pending lock might already be on the list, so
809 		 * don't process it twice:
810 		 */
811 		if (entry != pending) {
812 			if (handle_futex_death((void __user *)entry + futex_offset,
813 						curr, pi, HANDLE_DEATH_LIST))
814 				return;
815 		}
816 		if (rc)
817 			return;
818 		entry = next_entry;
819 		pi = next_pi;
820 		/*
821 		 * Avoid excessively long or circular lists:
822 		 */
823 		if (!--limit)
824 			break;
825 
826 		cond_resched();
827 	}
828 
829 	if (pending) {
830 		handle_futex_death((void __user *)pending + futex_offset,
831 				   curr, pip, HANDLE_DEATH_PENDING);
832 	}
833 }
834 
835 #ifdef CONFIG_COMPAT
836 static void __user *futex_uaddr(struct robust_list __user *entry,
837 				compat_long_t futex_offset)
838 {
839 	compat_uptr_t base = ptr_to_compat(entry);
840 	void __user *uaddr = compat_ptr(base + futex_offset);
841 
842 	return uaddr;
843 }
844 
845 /*
846  * Fetch a robust-list pointer. Bit 0 signals PI futexes:
847  */
848 static inline int
849 compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
850 		   compat_uptr_t __user *head, unsigned int *pi)
851 {
852 	if (get_user(*uentry, head))
853 		return -EFAULT;
854 
855 	*entry = compat_ptr((*uentry) & ~1);
856 	*pi = (unsigned int)(*uentry) & 1;
857 
858 	return 0;
859 }
860 
861 /*
862  * Walk curr->robust_list (very carefully, it's a userspace list!)
863  * and mark any locks found there dead, and notify any waiters.
864  *
865  * We silently return on any sign of list-walking problem.
866  */
867 static void compat_exit_robust_list(struct task_struct *curr)
868 {
869 	struct compat_robust_list_head __user *head = curr->compat_robust_list;
870 	struct robust_list __user *entry, *next_entry, *pending;
871 	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
872 	unsigned int next_pi;
873 	compat_uptr_t uentry, next_uentry, upending;
874 	compat_long_t futex_offset;
875 	int rc;
876 
877 	/*
878 	 * Fetch the list head (which was registered earlier, via
879 	 * sys_set_robust_list()):
880 	 */
881 	if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
882 		return;
883 	/*
884 	 * Fetch the relative futex offset:
885 	 */
886 	if (get_user(futex_offset, &head->futex_offset))
887 		return;
888 	/*
889 	 * Fetch any possibly pending lock-add first, and handle it
890 	 * if it exists:
891 	 */
892 	if (compat_fetch_robust_entry(&upending, &pending,
893 			       &head->list_op_pending, &pip))
894 		return;
895 
896 	next_entry = NULL;	/* avoid warning with gcc */
897 	while (entry != (struct robust_list __user *) &head->list) {
898 		/*
899 		 * Fetch the next entry in the list before calling
900 		 * handle_futex_death:
901 		 */
902 		rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
903 			(compat_uptr_t __user *)&entry->next, &next_pi);
904 		/*
905 		 * A pending lock might already be on the list, so
906 		 * dont process it twice:
907 		 */
908 		if (entry != pending) {
909 			void __user *uaddr = futex_uaddr(entry, futex_offset);
910 
911 			if (handle_futex_death(uaddr, curr, pi,
912 					       HANDLE_DEATH_LIST))
913 				return;
914 		}
915 		if (rc)
916 			return;
917 		uentry = next_uentry;
918 		entry = next_entry;
919 		pi = next_pi;
920 		/*
921 		 * Avoid excessively long or circular lists:
922 		 */
923 		if (!--limit)
924 			break;
925 
926 		cond_resched();
927 	}
928 	if (pending) {
929 		void __user *uaddr = futex_uaddr(pending, futex_offset);
930 
931 		handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
932 	}
933 }
934 #endif
935 
936 #ifdef CONFIG_FUTEX_PI
937 
938 /*
939  * This task is holding PI mutexes at exit time => bad.
940  * Kernel cleans up PI-state, but userspace is likely hosed.
941  * (Robust-futex cleanup is separate and might save the day for userspace.)
942  */
943 static void exit_pi_state_list(struct task_struct *curr)
944 {
945 	struct list_head *next, *head = &curr->pi_state_list;
946 	struct futex_pi_state *pi_state;
947 	struct futex_hash_bucket *hb;
948 	union futex_key key = FUTEX_KEY_INIT;
949 
950 	/*
951 	 * We are a ZOMBIE and nobody can enqueue itself on
952 	 * pi_state_list anymore, but we have to be careful
953 	 * versus waiters unqueueing themselves:
954 	 */
955 	raw_spin_lock_irq(&curr->pi_lock);
956 	while (!list_empty(head)) {
957 		next = head->next;
958 		pi_state = list_entry(next, struct futex_pi_state, list);
959 		key = pi_state->key;
960 		hb = futex_hash(&key);
961 
962 		/*
963 		 * We can race against put_pi_state() removing itself from the
964 		 * list (a waiter going away). put_pi_state() will first
965 		 * decrement the reference count and then modify the list, so
966 		 * its possible to see the list entry but fail this reference
967 		 * acquire.
968 		 *
969 		 * In that case; drop the locks to let put_pi_state() make
970 		 * progress and retry the loop.
971 		 */
972 		if (!refcount_inc_not_zero(&pi_state->refcount)) {
973 			raw_spin_unlock_irq(&curr->pi_lock);
974 			cpu_relax();
975 			raw_spin_lock_irq(&curr->pi_lock);
976 			continue;
977 		}
978 		raw_spin_unlock_irq(&curr->pi_lock);
979 
980 		spin_lock(&hb->lock);
981 		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
982 		raw_spin_lock(&curr->pi_lock);
983 		/*
984 		 * We dropped the pi-lock, so re-check whether this
985 		 * task still owns the PI-state:
986 		 */
987 		if (head->next != next) {
988 			/* retain curr->pi_lock for the loop invariant */
989 			raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
990 			spin_unlock(&hb->lock);
991 			put_pi_state(pi_state);
992 			continue;
993 		}
994 
995 		WARN_ON(pi_state->owner != curr);
996 		WARN_ON(list_empty(&pi_state->list));
997 		list_del_init(&pi_state->list);
998 		pi_state->owner = NULL;
999 
1000 		raw_spin_unlock(&curr->pi_lock);
1001 		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1002 		spin_unlock(&hb->lock);
1003 
1004 		rt_mutex_futex_unlock(&pi_state->pi_mutex);
1005 		put_pi_state(pi_state);
1006 
1007 		raw_spin_lock_irq(&curr->pi_lock);
1008 	}
1009 	raw_spin_unlock_irq(&curr->pi_lock);
1010 }
1011 #else
1012 static inline void exit_pi_state_list(struct task_struct *curr) { }
1013 #endif
1014 
1015 static void futex_cleanup(struct task_struct *tsk)
1016 {
1017 	if (unlikely(tsk->robust_list)) {
1018 		exit_robust_list(tsk);
1019 		tsk->robust_list = NULL;
1020 	}
1021 
1022 #ifdef CONFIG_COMPAT
1023 	if (unlikely(tsk->compat_robust_list)) {
1024 		compat_exit_robust_list(tsk);
1025 		tsk->compat_robust_list = NULL;
1026 	}
1027 #endif
1028 
1029 	if (unlikely(!list_empty(&tsk->pi_state_list)))
1030 		exit_pi_state_list(tsk);
1031 }
1032 
1033 /**
1034  * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
1035  * @tsk:	task to set the state on
1036  *
1037  * Set the futex exit state of the task lockless. The futex waiter code
1038  * observes that state when a task is exiting and loops until the task has
1039  * actually finished the futex cleanup. The worst case for this is that the
1040  * waiter runs through the wait loop until the state becomes visible.
1041  *
1042  * This is called from the recursive fault handling path in make_task_dead().
1043  *
1044  * This is best effort. Either the futex exit code has run already or
1045  * not. If the OWNER_DIED bit has been set on the futex then the waiter can
1046  * take it over. If not, the problem is pushed back to user space. If the
1047  * futex exit code did not run yet, then an already queued waiter might
1048  * block forever, but there is nothing which can be done about that.
1049  */
1050 void futex_exit_recursive(struct task_struct *tsk)
1051 {
1052 	/* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */
1053 	if (tsk->futex_state == FUTEX_STATE_EXITING)
1054 		mutex_unlock(&tsk->futex_exit_mutex);
1055 	tsk->futex_state = FUTEX_STATE_DEAD;
1056 }
1057 
1058 static void futex_cleanup_begin(struct task_struct *tsk)
1059 {
1060 	/*
1061 	 * Prevent various race issues against a concurrent incoming waiter
1062 	 * including live locks by forcing the waiter to block on
1063 	 * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in
1064 	 * attach_to_pi_owner().
1065 	 */
1066 	mutex_lock(&tsk->futex_exit_mutex);
1067 
1068 	/*
1069 	 * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
1070 	 *
1071 	 * This ensures that all subsequent checks of tsk->futex_state in
1072 	 * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with
1073 	 * tsk->pi_lock held.
1074 	 *
1075 	 * It guarantees also that a pi_state which was queued right before
1076 	 * the state change under tsk->pi_lock by a concurrent waiter must
1077 	 * be observed in exit_pi_state_list().
1078 	 */
1079 	raw_spin_lock_irq(&tsk->pi_lock);
1080 	tsk->futex_state = FUTEX_STATE_EXITING;
1081 	raw_spin_unlock_irq(&tsk->pi_lock);
1082 }
1083 
1084 static void futex_cleanup_end(struct task_struct *tsk, int state)
1085 {
1086 	/*
1087 	 * Lockless store. The only side effect is that an observer might
1088 	 * take another loop until it becomes visible.
1089 	 */
1090 	tsk->futex_state = state;
1091 	/*
1092 	 * Drop the exit protection. This unblocks waiters which observed
1093 	 * FUTEX_STATE_EXITING to reevaluate the state.
1094 	 */
1095 	mutex_unlock(&tsk->futex_exit_mutex);
1096 }
1097 
1098 void futex_exec_release(struct task_struct *tsk)
1099 {
1100 	/*
1101 	 * The state handling is done for consistency, but in the case of
1102 	 * exec() there is no way to prevent further damage as the PID stays
1103 	 * the same. But for the unlikely and arguably buggy case that a
1104 	 * futex is held on exec(), this provides at least as much state
1105 	 * consistency protection which is possible.
1106 	 */
1107 	futex_cleanup_begin(tsk);
1108 	futex_cleanup(tsk);
1109 	/*
1110 	 * Reset the state to FUTEX_STATE_OK. The task is alive and about
1111 	 * exec a new binary.
1112 	 */
1113 	futex_cleanup_end(tsk, FUTEX_STATE_OK);
1114 }
1115 
1116 void futex_exit_release(struct task_struct *tsk)
1117 {
1118 	futex_cleanup_begin(tsk);
1119 	futex_cleanup(tsk);
1120 	futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
1121 }
1122 
1123 static int __init futex_init(void)
1124 {
1125 	unsigned int futex_shift;
1126 	unsigned long i;
1127 
1128 #if CONFIG_BASE_SMALL
1129 	futex_hashsize = 16;
1130 #else
1131 	futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
1132 #endif
1133 
1134 	futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
1135 					       futex_hashsize, 0, 0,
1136 					       &futex_shift, NULL,
1137 					       futex_hashsize, futex_hashsize);
1138 	futex_hashsize = 1UL << futex_shift;
1139 
1140 	for (i = 0; i < futex_hashsize; i++) {
1141 		atomic_set(&futex_queues[i].waiters, 0);
1142 		plist_head_init(&futex_queues[i].chain);
1143 		spin_lock_init(&futex_queues[i].lock);
1144 	}
1145 
1146 	return 0;
1147 }
1148 core_initcall(futex_init);
1149