xref: /openbmc/linux/fs/eventpoll.c (revision 55e43d6abd078ed6d219902ce8cb4d68e3c993ba)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  fs/eventpoll.c (Efficient event retrieval implementation)
4  *  Copyright (C) 2001,...,2009	 Davide Libenzi
5  *
6  *  Davide Libenzi <davidel@xmailserver.org>
7  */
8 
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/sched/signal.h>
12 #include <linux/fs.h>
13 #include <linux/file.h>
14 #include <linux/signal.h>
15 #include <linux/errno.h>
16 #include <linux/mm.h>
17 #include <linux/slab.h>
18 #include <linux/poll.h>
19 #include <linux/string.h>
20 #include <linux/list.h>
21 #include <linux/hash.h>
22 #include <linux/spinlock.h>
23 #include <linux/syscalls.h>
24 #include <linux/rbtree.h>
25 #include <linux/wait.h>
26 #include <linux/eventpoll.h>
27 #include <linux/mount.h>
28 #include <linux/bitops.h>
29 #include <linux/mutex.h>
30 #include <linux/anon_inodes.h>
31 #include <linux/device.h>
32 #include <linux/uaccess.h>
33 #include <asm/io.h>
34 #include <asm/mman.h>
35 #include <linux/atomic.h>
36 #include <linux/proc_fs.h>
37 #include <linux/seq_file.h>
38 #include <linux/compat.h>
39 #include <linux/rculist.h>
40 #include <net/busy_poll.h>
41 
42 /*
43  * LOCKING:
44  * There are three level of locking required by epoll :
45  *
46  * 1) epnested_mutex (mutex)
47  * 2) ep->mtx (mutex)
48  * 3) ep->lock (rwlock)
49  *
50  * The acquire order is the one listed above, from 1 to 3.
51  * We need a rwlock (ep->lock) because we manipulate objects
52  * from inside the poll callback, that might be triggered from
53  * a wake_up() that in turn might be called from IRQ context.
54  * So we can't sleep inside the poll callback and hence we need
55  * a spinlock. During the event transfer loop (from kernel to
56  * user space) we could end up sleeping due a copy_to_user(), so
57  * we need a lock that will allow us to sleep. This lock is a
58  * mutex (ep->mtx). It is acquired during the event transfer loop,
59  * during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file().
60  * The epnested_mutex is acquired when inserting an epoll fd onto another
61  * epoll fd. We do this so that we walk the epoll tree and ensure that this
62  * insertion does not create a cycle of epoll file descriptors, which
63  * could lead to deadlock. We need a global mutex to prevent two
64  * simultaneous inserts (A into B and B into A) from racing and
65  * constructing a cycle without either insert observing that it is
66  * going to.
67  * It is necessary to acquire multiple "ep->mtx"es at once in the
68  * case when one epoll fd is added to another. In this case, we
69  * always acquire the locks in the order of nesting (i.e. after
70  * epoll_ctl(e1, EPOLL_CTL_ADD, e2), e1->mtx will always be acquired
71  * before e2->mtx). Since we disallow cycles of epoll file
72  * descriptors, this ensures that the mutexes are well-ordered. In
73  * order to communicate this nesting to lockdep, when walking a tree
74  * of epoll file descriptors, we use the current recursion depth as
75  * the lockdep subkey.
76  * It is possible to drop the "ep->mtx" and to use the global
77  * mutex "epnested_mutex" (together with "ep->lock") to have it working,
78  * but having "ep->mtx" will make the interface more scalable.
79  * Events that require holding "epnested_mutex" are very rare, while for
80  * normal operations the epoll private "ep->mtx" will guarantee
81  * a better scalability.
82  */
83 
84 /* Epoll private bits inside the event mask */
85 #define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET | EPOLLEXCLUSIVE)
86 
87 #define EPOLLINOUT_BITS (EPOLLIN | EPOLLOUT)
88 
89 #define EPOLLEXCLUSIVE_OK_BITS (EPOLLINOUT_BITS | EPOLLERR | EPOLLHUP | \
90 				EPOLLWAKEUP | EPOLLET | EPOLLEXCLUSIVE)
91 
92 /* Maximum number of nesting allowed inside epoll sets */
93 #define EP_MAX_NESTS 4
94 
95 #define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
96 
97 #define EP_UNACTIVE_PTR ((void *) -1L)
98 
99 #define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry))
100 
101 struct epoll_filefd {
102 	struct file *file;
103 	int fd;
104 } __packed;
105 
106 /* Wait structure used by the poll hooks */
107 struct eppoll_entry {
108 	/* List header used to link this structure to the "struct epitem" */
109 	struct eppoll_entry *next;
110 
111 	/* The "base" pointer is set to the container "struct epitem" */
112 	struct epitem *base;
113 
114 	/*
115 	 * Wait queue item that will be linked to the target file wait
116 	 * queue head.
117 	 */
118 	wait_queue_entry_t wait;
119 
120 	/* The wait queue head that linked the "wait" wait queue item */
121 	wait_queue_head_t *whead;
122 };
123 
124 /*
125  * Each file descriptor added to the eventpoll interface will
126  * have an entry of this type linked to the "rbr" RB tree.
127  * Avoid increasing the size of this struct, there can be many thousands
128  * of these on a server and we do not want this to take another cache line.
129  */
130 struct epitem {
131 	union {
132 		/* RB tree node links this structure to the eventpoll RB tree */
133 		struct rb_node rbn;
134 		/* Used to free the struct epitem */
135 		struct rcu_head rcu;
136 	};
137 
138 	/* List header used to link this structure to the eventpoll ready list */
139 	struct list_head rdllink;
140 
141 	/*
142 	 * Works together "struct eventpoll"->ovflist in keeping the
143 	 * single linked chain of items.
144 	 */
145 	struct epitem *next;
146 
147 	/* The file descriptor information this item refers to */
148 	struct epoll_filefd ffd;
149 
150 	/*
151 	 * Protected by file->f_lock, true for to-be-released epitem already
152 	 * removed from the "struct file" items list; together with
153 	 * eventpoll->refcount orchestrates "struct eventpoll" disposal
154 	 */
155 	bool dying;
156 
157 	/* List containing poll wait queues */
158 	struct eppoll_entry *pwqlist;
159 
160 	/* The "container" of this item */
161 	struct eventpoll *ep;
162 
163 	/* List header used to link this item to the "struct file" items list */
164 	struct hlist_node fllink;
165 
166 	/* wakeup_source used when EPOLLWAKEUP is set */
167 	struct wakeup_source __rcu *ws;
168 
169 	/* The structure that describe the interested events and the source fd */
170 	struct epoll_event event;
171 };
172 
173 /*
174  * This structure is stored inside the "private_data" member of the file
175  * structure and represents the main data structure for the eventpoll
176  * interface.
177  */
178 struct eventpoll {
179 	/*
180 	 * This mutex is used to ensure that files are not removed
181 	 * while epoll is using them. This is held during the event
182 	 * collection loop, the file cleanup path, the epoll file exit
183 	 * code and the ctl operations.
184 	 */
185 	struct mutex mtx;
186 
187 	/* Wait queue used by sys_epoll_wait() */
188 	wait_queue_head_t wq;
189 
190 	/* Wait queue used by file->poll() */
191 	wait_queue_head_t poll_wait;
192 
193 	/* List of ready file descriptors */
194 	struct list_head rdllist;
195 
196 	/* Lock which protects rdllist and ovflist */
197 	rwlock_t lock;
198 
199 	/* RB tree root used to store monitored fd structs */
200 	struct rb_root_cached rbr;
201 
202 	/*
203 	 * This is a single linked list that chains all the "struct epitem" that
204 	 * happened while transferring ready events to userspace w/out
205 	 * holding ->lock.
206 	 */
207 	struct epitem *ovflist;
208 
209 	/* wakeup_source used when ep_scan_ready_list is running */
210 	struct wakeup_source *ws;
211 
212 	/* The user that created the eventpoll descriptor */
213 	struct user_struct *user;
214 
215 	struct file *file;
216 
217 	/* used to optimize loop detection check */
218 	u64 gen;
219 	struct hlist_head refs;
220 
221 	/*
222 	 * usage count, used together with epitem->dying to
223 	 * orchestrate the disposal of this struct
224 	 */
225 	refcount_t refcount;
226 
227 #ifdef CONFIG_NET_RX_BUSY_POLL
228 	/* used to track busy poll napi_id */
229 	unsigned int napi_id;
230 #endif
231 
232 #ifdef CONFIG_DEBUG_LOCK_ALLOC
233 	/* tracks wakeup nests for lockdep validation */
234 	u8 nests;
235 #endif
236 };
237 
238 /* Wrapper struct used by poll queueing */
239 struct ep_pqueue {
240 	poll_table pt;
241 	struct epitem *epi;
242 };
243 
244 /*
245  * Configuration options available inside /proc/sys/fs/epoll/
246  */
247 /* Maximum number of epoll watched descriptors, per user */
248 static long max_user_watches __read_mostly;
249 
250 /* Used for cycles detection */
251 static DEFINE_MUTEX(epnested_mutex);
252 
253 static u64 loop_check_gen = 0;
254 
255 /* Used to check for epoll file descriptor inclusion loops */
256 static struct eventpoll *inserting_into;
257 
258 /* Slab cache used to allocate "struct epitem" */
259 static struct kmem_cache *epi_cache __read_mostly;
260 
261 /* Slab cache used to allocate "struct eppoll_entry" */
262 static struct kmem_cache *pwq_cache __read_mostly;
263 
264 /*
265  * List of files with newly added links, where we may need to limit the number
266  * of emanating paths. Protected by the epnested_mutex.
267  */
268 struct epitems_head {
269 	struct hlist_head epitems;
270 	struct epitems_head *next;
271 };
272 static struct epitems_head *tfile_check_list = EP_UNACTIVE_PTR;
273 
274 static struct kmem_cache *ephead_cache __read_mostly;
275 
free_ephead(struct epitems_head * head)276 static inline void free_ephead(struct epitems_head *head)
277 {
278 	if (head)
279 		kmem_cache_free(ephead_cache, head);
280 }
281 
list_file(struct file * file)282 static void list_file(struct file *file)
283 {
284 	struct epitems_head *head;
285 
286 	head = container_of(file->f_ep, struct epitems_head, epitems);
287 	if (!head->next) {
288 		head->next = tfile_check_list;
289 		tfile_check_list = head;
290 	}
291 }
292 
unlist_file(struct epitems_head * head)293 static void unlist_file(struct epitems_head *head)
294 {
295 	struct epitems_head *to_free = head;
296 	struct hlist_node *p = rcu_dereference(hlist_first_rcu(&head->epitems));
297 	if (p) {
298 		struct epitem *epi= container_of(p, struct epitem, fllink);
299 		spin_lock(&epi->ffd.file->f_lock);
300 		if (!hlist_empty(&head->epitems))
301 			to_free = NULL;
302 		head->next = NULL;
303 		spin_unlock(&epi->ffd.file->f_lock);
304 	}
305 	free_ephead(to_free);
306 }
307 
308 #ifdef CONFIG_SYSCTL
309 
310 #include <linux/sysctl.h>
311 
312 static long long_zero;
313 static long long_max = LONG_MAX;
314 
315 static struct ctl_table epoll_table[] = {
316 	{
317 		.procname	= "max_user_watches",
318 		.data		= &max_user_watches,
319 		.maxlen		= sizeof(max_user_watches),
320 		.mode		= 0644,
321 		.proc_handler	= proc_doulongvec_minmax,
322 		.extra1		= &long_zero,
323 		.extra2		= &long_max,
324 	},
325 	{ }
326 };
327 
epoll_sysctls_init(void)328 static void __init epoll_sysctls_init(void)
329 {
330 	register_sysctl("fs/epoll", epoll_table);
331 }
332 #else
333 #define epoll_sysctls_init() do { } while (0)
334 #endif /* CONFIG_SYSCTL */
335 
336 static const struct file_operations eventpoll_fops;
337 
is_file_epoll(struct file * f)338 static inline int is_file_epoll(struct file *f)
339 {
340 	return f->f_op == &eventpoll_fops;
341 }
342 
343 /* Setup the structure that is used as key for the RB tree */
ep_set_ffd(struct epoll_filefd * ffd,struct file * file,int fd)344 static inline void ep_set_ffd(struct epoll_filefd *ffd,
345 			      struct file *file, int fd)
346 {
347 	ffd->file = file;
348 	ffd->fd = fd;
349 }
350 
351 /* Compare RB tree keys */
ep_cmp_ffd(struct epoll_filefd * p1,struct epoll_filefd * p2)352 static inline int ep_cmp_ffd(struct epoll_filefd *p1,
353 			     struct epoll_filefd *p2)
354 {
355 	return (p1->file > p2->file ? +1:
356 	        (p1->file < p2->file ? -1 : p1->fd - p2->fd));
357 }
358 
359 /* Tells us if the item is currently linked */
ep_is_linked(struct epitem * epi)360 static inline int ep_is_linked(struct epitem *epi)
361 {
362 	return !list_empty(&epi->rdllink);
363 }
364 
ep_pwq_from_wait(wait_queue_entry_t * p)365 static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_entry_t *p)
366 {
367 	return container_of(p, struct eppoll_entry, wait);
368 }
369 
370 /* Get the "struct epitem" from a wait queue pointer */
ep_item_from_wait(wait_queue_entry_t * p)371 static inline struct epitem *ep_item_from_wait(wait_queue_entry_t *p)
372 {
373 	return container_of(p, struct eppoll_entry, wait)->base;
374 }
375 
376 /**
377  * ep_events_available - Checks if ready events might be available.
378  *
379  * @ep: Pointer to the eventpoll context.
380  *
381  * Return: a value different than %zero if ready events are available,
382  *          or %zero otherwise.
383  */
ep_events_available(struct eventpoll * ep)384 static inline int ep_events_available(struct eventpoll *ep)
385 {
386 	return !list_empty_careful(&ep->rdllist) ||
387 		READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR;
388 }
389 
390 #ifdef CONFIG_NET_RX_BUSY_POLL
ep_busy_loop_end(void * p,unsigned long start_time)391 static bool ep_busy_loop_end(void *p, unsigned long start_time)
392 {
393 	struct eventpoll *ep = p;
394 
395 	return ep_events_available(ep) || busy_loop_timeout(start_time);
396 }
397 
398 /*
399  * Busy poll if globally on and supporting sockets found && no events,
400  * busy loop will return if need_resched or ep_events_available.
401  *
402  * we must do our busy polling with irqs enabled
403  */
ep_busy_loop(struct eventpoll * ep,int nonblock)404 static bool ep_busy_loop(struct eventpoll *ep, int nonblock)
405 {
406 	unsigned int napi_id = READ_ONCE(ep->napi_id);
407 
408 	if ((napi_id >= MIN_NAPI_ID) && net_busy_loop_on()) {
409 		napi_busy_loop(napi_id, nonblock ? NULL : ep_busy_loop_end, ep, false,
410 			       BUSY_POLL_BUDGET);
411 		if (ep_events_available(ep))
412 			return true;
413 		/*
414 		 * Busy poll timed out.  Drop NAPI ID for now, we can add
415 		 * it back in when we have moved a socket with a valid NAPI
416 		 * ID onto the ready list.
417 		 */
418 		ep->napi_id = 0;
419 		return false;
420 	}
421 	return false;
422 }
423 
424 /*
425  * Set epoll busy poll NAPI ID from sk.
426  */
ep_set_busy_poll_napi_id(struct epitem * epi)427 static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
428 {
429 	struct eventpoll *ep;
430 	unsigned int napi_id;
431 	struct socket *sock;
432 	struct sock *sk;
433 
434 	if (!net_busy_loop_on())
435 		return;
436 
437 	sock = sock_from_file(epi->ffd.file);
438 	if (!sock)
439 		return;
440 
441 	sk = sock->sk;
442 	if (!sk)
443 		return;
444 
445 	napi_id = READ_ONCE(sk->sk_napi_id);
446 	ep = epi->ep;
447 
448 	/* Non-NAPI IDs can be rejected
449 	 *	or
450 	 * Nothing to do if we already have this ID
451 	 */
452 	if (napi_id < MIN_NAPI_ID || napi_id == ep->napi_id)
453 		return;
454 
455 	/* record NAPI ID for use in next busy poll */
456 	ep->napi_id = napi_id;
457 }
458 
459 #else
460 
ep_busy_loop(struct eventpoll * ep,int nonblock)461 static inline bool ep_busy_loop(struct eventpoll *ep, int nonblock)
462 {
463 	return false;
464 }
465 
ep_set_busy_poll_napi_id(struct epitem * epi)466 static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
467 {
468 }
469 
470 #endif /* CONFIG_NET_RX_BUSY_POLL */
471 
472 /*
473  * As described in commit 0ccf831cb lockdep: annotate epoll
474  * the use of wait queues used by epoll is done in a very controlled
475  * manner. Wake ups can nest inside each other, but are never done
476  * with the same locking. For example:
477  *
478  *   dfd = socket(...);
479  *   efd1 = epoll_create();
480  *   efd2 = epoll_create();
481  *   epoll_ctl(efd1, EPOLL_CTL_ADD, dfd, ...);
482  *   epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...);
483  *
484  * When a packet arrives to the device underneath "dfd", the net code will
485  * issue a wake_up() on its poll wake list. Epoll (efd1) has installed a
486  * callback wakeup entry on that queue, and the wake_up() performed by the
487  * "dfd" net code will end up in ep_poll_callback(). At this point epoll
488  * (efd1) notices that it may have some event ready, so it needs to wake up
489  * the waiters on its poll wait list (efd2). So it calls ep_poll_safewake()
490  * that ends up in another wake_up(), after having checked about the
491  * recursion constraints. That are, no more than EP_MAX_NESTS, to avoid
492  * stack blasting.
493  *
494  * When CONFIG_DEBUG_LOCK_ALLOC is enabled, make sure lockdep can handle
495  * this special case of epoll.
496  */
497 #ifdef CONFIG_DEBUG_LOCK_ALLOC
498 
ep_poll_safewake(struct eventpoll * ep,struct epitem * epi,unsigned pollflags)499 static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi,
500 			     unsigned pollflags)
501 {
502 	struct eventpoll *ep_src;
503 	unsigned long flags;
504 	u8 nests = 0;
505 
506 	/*
507 	 * To set the subclass or nesting level for spin_lock_irqsave_nested()
508 	 * it might be natural to create a per-cpu nest count. However, since
509 	 * we can recurse on ep->poll_wait.lock, and a non-raw spinlock can
510 	 * schedule() in the -rt kernel, the per-cpu variable are no longer
511 	 * protected. Thus, we are introducing a per eventpoll nest field.
512 	 * If we are not being call from ep_poll_callback(), epi is NULL and
513 	 * we are at the first level of nesting, 0. Otherwise, we are being
514 	 * called from ep_poll_callback() and if a previous wakeup source is
515 	 * not an epoll file itself, we are at depth 1 since the wakeup source
516 	 * is depth 0. If the wakeup source is a previous epoll file in the
517 	 * wakeup chain then we use its nests value and record ours as
518 	 * nests + 1. The previous epoll file nests value is stable since its
519 	 * already holding its own poll_wait.lock.
520 	 */
521 	if (epi) {
522 		if ((is_file_epoll(epi->ffd.file))) {
523 			ep_src = epi->ffd.file->private_data;
524 			nests = ep_src->nests;
525 		} else {
526 			nests = 1;
527 		}
528 	}
529 	spin_lock_irqsave_nested(&ep->poll_wait.lock, flags, nests);
530 	ep->nests = nests + 1;
531 	wake_up_locked_poll(&ep->poll_wait, EPOLLIN | pollflags);
532 	ep->nests = 0;
533 	spin_unlock_irqrestore(&ep->poll_wait.lock, flags);
534 }
535 
536 #else
537 
ep_poll_safewake(struct eventpoll * ep,struct epitem * epi,__poll_t pollflags)538 static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi,
539 			     __poll_t pollflags)
540 {
541 	wake_up_poll(&ep->poll_wait, EPOLLIN | pollflags);
542 }
543 
544 #endif
545 
ep_remove_wait_queue(struct eppoll_entry * pwq)546 static void ep_remove_wait_queue(struct eppoll_entry *pwq)
547 {
548 	wait_queue_head_t *whead;
549 
550 	rcu_read_lock();
551 	/*
552 	 * If it is cleared by POLLFREE, it should be rcu-safe.
553 	 * If we read NULL we need a barrier paired with
554 	 * smp_store_release() in ep_poll_callback(), otherwise
555 	 * we rely on whead->lock.
556 	 */
557 	whead = smp_load_acquire(&pwq->whead);
558 	if (whead)
559 		remove_wait_queue(whead, &pwq->wait);
560 	rcu_read_unlock();
561 }
562 
563 /*
564  * This function unregisters poll callbacks from the associated file
565  * descriptor.  Must be called with "mtx" held.
566  */
ep_unregister_pollwait(struct eventpoll * ep,struct epitem * epi)567 static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
568 {
569 	struct eppoll_entry **p = &epi->pwqlist;
570 	struct eppoll_entry *pwq;
571 
572 	while ((pwq = *p) != NULL) {
573 		*p = pwq->next;
574 		ep_remove_wait_queue(pwq);
575 		kmem_cache_free(pwq_cache, pwq);
576 	}
577 }
578 
579 /* call only when ep->mtx is held */
ep_wakeup_source(struct epitem * epi)580 static inline struct wakeup_source *ep_wakeup_source(struct epitem *epi)
581 {
582 	return rcu_dereference_check(epi->ws, lockdep_is_held(&epi->ep->mtx));
583 }
584 
585 /* call only when ep->mtx is held */
ep_pm_stay_awake(struct epitem * epi)586 static inline void ep_pm_stay_awake(struct epitem *epi)
587 {
588 	struct wakeup_source *ws = ep_wakeup_source(epi);
589 
590 	if (ws)
591 		__pm_stay_awake(ws);
592 }
593 
ep_has_wakeup_source(struct epitem * epi)594 static inline bool ep_has_wakeup_source(struct epitem *epi)
595 {
596 	return rcu_access_pointer(epi->ws) ? true : false;
597 }
598 
599 /* call when ep->mtx cannot be held (ep_poll_callback) */
ep_pm_stay_awake_rcu(struct epitem * epi)600 static inline void ep_pm_stay_awake_rcu(struct epitem *epi)
601 {
602 	struct wakeup_source *ws;
603 
604 	rcu_read_lock();
605 	ws = rcu_dereference(epi->ws);
606 	if (ws)
607 		__pm_stay_awake(ws);
608 	rcu_read_unlock();
609 }
610 
611 
612 /*
613  * ep->mutex needs to be held because we could be hit by
614  * eventpoll_release_file() and epoll_ctl().
615  */
ep_start_scan(struct eventpoll * ep,struct list_head * txlist)616 static void ep_start_scan(struct eventpoll *ep, struct list_head *txlist)
617 {
618 	/*
619 	 * Steal the ready list, and re-init the original one to the
620 	 * empty list. Also, set ep->ovflist to NULL so that events
621 	 * happening while looping w/out locks, are not lost. We cannot
622 	 * have the poll callback to queue directly on ep->rdllist,
623 	 * because we want the "sproc" callback to be able to do it
624 	 * in a lockless way.
625 	 */
626 	lockdep_assert_irqs_enabled();
627 	write_lock_irq(&ep->lock);
628 	list_splice_init(&ep->rdllist, txlist);
629 	WRITE_ONCE(ep->ovflist, NULL);
630 	write_unlock_irq(&ep->lock);
631 }
632 
ep_done_scan(struct eventpoll * ep,struct list_head * txlist)633 static void ep_done_scan(struct eventpoll *ep,
634 			 struct list_head *txlist)
635 {
636 	struct epitem *epi, *nepi;
637 
638 	write_lock_irq(&ep->lock);
639 	/*
640 	 * During the time we spent inside the "sproc" callback, some
641 	 * other events might have been queued by the poll callback.
642 	 * We re-insert them inside the main ready-list here.
643 	 */
644 	for (nepi = READ_ONCE(ep->ovflist); (epi = nepi) != NULL;
645 	     nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {
646 		/*
647 		 * We need to check if the item is already in the list.
648 		 * During the "sproc" callback execution time, items are
649 		 * queued into ->ovflist but the "txlist" might already
650 		 * contain them, and the list_splice() below takes care of them.
651 		 */
652 		if (!ep_is_linked(epi)) {
653 			/*
654 			 * ->ovflist is LIFO, so we have to reverse it in order
655 			 * to keep in FIFO.
656 			 */
657 			list_add(&epi->rdllink, &ep->rdllist);
658 			ep_pm_stay_awake(epi);
659 		}
660 	}
661 	/*
662 	 * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after
663 	 * releasing the lock, events will be queued in the normal way inside
664 	 * ep->rdllist.
665 	 */
666 	WRITE_ONCE(ep->ovflist, EP_UNACTIVE_PTR);
667 
668 	/*
669 	 * Quickly re-inject items left on "txlist".
670 	 */
671 	list_splice(txlist, &ep->rdllist);
672 	__pm_relax(ep->ws);
673 
674 	if (!list_empty(&ep->rdllist)) {
675 		if (waitqueue_active(&ep->wq))
676 			wake_up(&ep->wq);
677 	}
678 
679 	write_unlock_irq(&ep->lock);
680 }
681 
epi_rcu_free(struct rcu_head * head)682 static void epi_rcu_free(struct rcu_head *head)
683 {
684 	struct epitem *epi = container_of(head, struct epitem, rcu);
685 	kmem_cache_free(epi_cache, epi);
686 }
687 
ep_get(struct eventpoll * ep)688 static void ep_get(struct eventpoll *ep)
689 {
690 	refcount_inc(&ep->refcount);
691 }
692 
693 /*
694  * Returns true if the event poll can be disposed
695  */
ep_refcount_dec_and_test(struct eventpoll * ep)696 static bool ep_refcount_dec_and_test(struct eventpoll *ep)
697 {
698 	if (!refcount_dec_and_test(&ep->refcount))
699 		return false;
700 
701 	WARN_ON_ONCE(!RB_EMPTY_ROOT(&ep->rbr.rb_root));
702 	return true;
703 }
704 
ep_free(struct eventpoll * ep)705 static void ep_free(struct eventpoll *ep)
706 {
707 	mutex_destroy(&ep->mtx);
708 	free_uid(ep->user);
709 	wakeup_source_unregister(ep->ws);
710 	kfree(ep);
711 }
712 
713 /*
714  * Removes a "struct epitem" from the eventpoll RB tree and deallocates
715  * all the associated resources. Must be called with "mtx" held.
716  * If the dying flag is set, do the removal only if force is true.
717  * This prevents ep_clear_and_put() from dropping all the ep references
718  * while running concurrently with eventpoll_release_file().
719  * Returns true if the eventpoll can be disposed.
720  */
__ep_remove(struct eventpoll * ep,struct epitem * epi,bool force)721 static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force)
722 {
723 	struct file *file = epi->ffd.file;
724 	struct epitems_head *to_free;
725 	struct hlist_head *head;
726 
727 	lockdep_assert_irqs_enabled();
728 
729 	/*
730 	 * Removes poll wait queue hooks.
731 	 */
732 	ep_unregister_pollwait(ep, epi);
733 
734 	/* Remove the current item from the list of epoll hooks */
735 	spin_lock(&file->f_lock);
736 	if (epi->dying && !force) {
737 		spin_unlock(&file->f_lock);
738 		return false;
739 	}
740 
741 	to_free = NULL;
742 	head = file->f_ep;
743 	if (head->first == &epi->fllink && !epi->fllink.next) {
744 		/* See eventpoll_release() for details. */
745 		WRITE_ONCE(file->f_ep, NULL);
746 		if (!is_file_epoll(file)) {
747 			struct epitems_head *v;
748 			v = container_of(head, struct epitems_head, epitems);
749 			if (!smp_load_acquire(&v->next))
750 				to_free = v;
751 		}
752 	}
753 	hlist_del_rcu(&epi->fllink);
754 	spin_unlock(&file->f_lock);
755 	free_ephead(to_free);
756 
757 	rb_erase_cached(&epi->rbn, &ep->rbr);
758 
759 	write_lock_irq(&ep->lock);
760 	if (ep_is_linked(epi))
761 		list_del_init(&epi->rdllink);
762 	write_unlock_irq(&ep->lock);
763 
764 	wakeup_source_unregister(ep_wakeup_source(epi));
765 	/*
766 	 * At this point it is safe to free the eventpoll item. Use the union
767 	 * field epi->rcu, since we are trying to minimize the size of
768 	 * 'struct epitem'. The 'rbn' field is no longer in use. Protected by
769 	 * ep->mtx. The rcu read side, reverse_path_check_proc(), does not make
770 	 * use of the rbn field.
771 	 */
772 	call_rcu(&epi->rcu, epi_rcu_free);
773 
774 	percpu_counter_dec(&ep->user->epoll_watches);
775 	return ep_refcount_dec_and_test(ep);
776 }
777 
778 /*
779  * ep_remove variant for callers owing an additional reference to the ep
780  */
ep_remove_safe(struct eventpoll * ep,struct epitem * epi)781 static void ep_remove_safe(struct eventpoll *ep, struct epitem *epi)
782 {
783 	WARN_ON_ONCE(__ep_remove(ep, epi, false));
784 }
785 
ep_clear_and_put(struct eventpoll * ep)786 static void ep_clear_and_put(struct eventpoll *ep)
787 {
788 	struct rb_node *rbp, *next;
789 	struct epitem *epi;
790 	bool dispose;
791 
792 	/* We need to release all tasks waiting for these file */
793 	if (waitqueue_active(&ep->poll_wait))
794 		ep_poll_safewake(ep, NULL, 0);
795 
796 	mutex_lock(&ep->mtx);
797 
798 	/*
799 	 * Walks through the whole tree by unregistering poll callbacks.
800 	 */
801 	for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
802 		epi = rb_entry(rbp, struct epitem, rbn);
803 
804 		ep_unregister_pollwait(ep, epi);
805 		cond_resched();
806 	}
807 
808 	/*
809 	 * Walks through the whole tree and try to free each "struct epitem".
810 	 * Note that ep_remove_safe() will not remove the epitem in case of a
811 	 * racing eventpoll_release_file(); the latter will do the removal.
812 	 * At this point we are sure no poll callbacks will be lingering around.
813 	 * Since we still own a reference to the eventpoll struct, the loop can't
814 	 * dispose it.
815 	 */
816 	for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = next) {
817 		next = rb_next(rbp);
818 		epi = rb_entry(rbp, struct epitem, rbn);
819 		ep_remove_safe(ep, epi);
820 		cond_resched();
821 	}
822 
823 	dispose = ep_refcount_dec_and_test(ep);
824 	mutex_unlock(&ep->mtx);
825 
826 	if (dispose)
827 		ep_free(ep);
828 }
829 
ep_eventpoll_release(struct inode * inode,struct file * file)830 static int ep_eventpoll_release(struct inode *inode, struct file *file)
831 {
832 	struct eventpoll *ep = file->private_data;
833 
834 	if (ep)
835 		ep_clear_and_put(ep);
836 
837 	return 0;
838 }
839 
840 static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt, int depth);
841 
__ep_eventpoll_poll(struct file * file,poll_table * wait,int depth)842 static __poll_t __ep_eventpoll_poll(struct file *file, poll_table *wait, int depth)
843 {
844 	struct eventpoll *ep = file->private_data;
845 	LIST_HEAD(txlist);
846 	struct epitem *epi, *tmp;
847 	poll_table pt;
848 	__poll_t res = 0;
849 
850 	init_poll_funcptr(&pt, NULL);
851 
852 	/* Insert inside our poll wait queue */
853 	poll_wait(file, &ep->poll_wait, wait);
854 
855 	/*
856 	 * Proceed to find out if wanted events are really available inside
857 	 * the ready list.
858 	 */
859 	mutex_lock_nested(&ep->mtx, depth);
860 	ep_start_scan(ep, &txlist);
861 	list_for_each_entry_safe(epi, tmp, &txlist, rdllink) {
862 		if (ep_item_poll(epi, &pt, depth + 1)) {
863 			res = EPOLLIN | EPOLLRDNORM;
864 			break;
865 		} else {
866 			/*
867 			 * Item has been dropped into the ready list by the poll
868 			 * callback, but it's not actually ready, as far as
869 			 * caller requested events goes. We can remove it here.
870 			 */
871 			__pm_relax(ep_wakeup_source(epi));
872 			list_del_init(&epi->rdllink);
873 		}
874 	}
875 	ep_done_scan(ep, &txlist);
876 	mutex_unlock(&ep->mtx);
877 	return res;
878 }
879 
880 /*
881  * The ffd.file pointer may be in the process of being torn down due to
882  * being closed, but we may not have finished eventpoll_release() yet.
883  *
884  * Normally, even with the atomic_long_inc_not_zero, the file may have
885  * been free'd and then gotten re-allocated to something else (since
886  * files are not RCU-delayed, they are SLAB_TYPESAFE_BY_RCU).
887  *
888  * But for epoll, users hold the ep->mtx mutex, and as such any file in
889  * the process of being free'd will block in eventpoll_release_file()
890  * and thus the underlying file allocation will not be free'd, and the
891  * file re-use cannot happen.
892  *
893  * For the same reason we can avoid a rcu_read_lock() around the
894  * operation - 'ffd.file' cannot go away even if the refcount has
895  * reached zero (but we must still not call out to ->poll() functions
896  * etc).
897  */
epi_fget(const struct epitem * epi)898 static struct file *epi_fget(const struct epitem *epi)
899 {
900 	struct file *file;
901 
902 	file = epi->ffd.file;
903 	if (!atomic_long_inc_not_zero(&file->f_count))
904 		file = NULL;
905 	return file;
906 }
907 
908 /*
909  * Differs from ep_eventpoll_poll() in that internal callers already have
910  * the ep->mtx so we need to start from depth=1, such that mutex_lock_nested()
911  * is correctly annotated.
912  */
ep_item_poll(const struct epitem * epi,poll_table * pt,int depth)913 static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt,
914 				 int depth)
915 {
916 	struct file *file = epi_fget(epi);
917 	__poll_t res;
918 
919 	/*
920 	 * We could return EPOLLERR | EPOLLHUP or something, but let's
921 	 * treat this more as "file doesn't exist, poll didn't happen".
922 	 */
923 	if (!file)
924 		return 0;
925 
926 	pt->_key = epi->event.events;
927 	if (!is_file_epoll(file))
928 		res = vfs_poll(file, pt);
929 	else
930 		res = __ep_eventpoll_poll(file, pt, depth);
931 	fput(file);
932 	return res & epi->event.events;
933 }
934 
ep_eventpoll_poll(struct file * file,poll_table * wait)935 static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait)
936 {
937 	return __ep_eventpoll_poll(file, wait, 0);
938 }
939 
940 #ifdef CONFIG_PROC_FS
ep_show_fdinfo(struct seq_file * m,struct file * f)941 static void ep_show_fdinfo(struct seq_file *m, struct file *f)
942 {
943 	struct eventpoll *ep = f->private_data;
944 	struct rb_node *rbp;
945 
946 	mutex_lock(&ep->mtx);
947 	for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
948 		struct epitem *epi = rb_entry(rbp, struct epitem, rbn);
949 		struct inode *inode = file_inode(epi->ffd.file);
950 
951 		seq_printf(m, "tfd: %8d events: %8x data: %16llx "
952 			   " pos:%lli ino:%lx sdev:%x\n",
953 			   epi->ffd.fd, epi->event.events,
954 			   (long long)epi->event.data,
955 			   (long long)epi->ffd.file->f_pos,
956 			   inode->i_ino, inode->i_sb->s_dev);
957 		if (seq_has_overflowed(m))
958 			break;
959 	}
960 	mutex_unlock(&ep->mtx);
961 }
962 #endif
963 
964 /* File callbacks that implement the eventpoll file behaviour */
965 static const struct file_operations eventpoll_fops = {
966 #ifdef CONFIG_PROC_FS
967 	.show_fdinfo	= ep_show_fdinfo,
968 #endif
969 	.release	= ep_eventpoll_release,
970 	.poll		= ep_eventpoll_poll,
971 	.llseek		= noop_llseek,
972 };
973 
974 /*
975  * This is called from eventpoll_release() to unlink files from the eventpoll
976  * interface. We need to have this facility to cleanup correctly files that are
977  * closed without being removed from the eventpoll interface.
978  */
eventpoll_release_file(struct file * file)979 void eventpoll_release_file(struct file *file)
980 {
981 	struct eventpoll *ep;
982 	struct epitem *epi;
983 	bool dispose;
984 
985 	/*
986 	 * Use the 'dying' flag to prevent a concurrent ep_clear_and_put() from
987 	 * touching the epitems list before eventpoll_release_file() can access
988 	 * the ep->mtx.
989 	 */
990 again:
991 	spin_lock(&file->f_lock);
992 	if (file->f_ep && file->f_ep->first) {
993 		epi = hlist_entry(file->f_ep->first, struct epitem, fllink);
994 		epi->dying = true;
995 		spin_unlock(&file->f_lock);
996 
997 		/*
998 		 * ep access is safe as we still own a reference to the ep
999 		 * struct
1000 		 */
1001 		ep = epi->ep;
1002 		mutex_lock(&ep->mtx);
1003 		dispose = __ep_remove(ep, epi, true);
1004 		mutex_unlock(&ep->mtx);
1005 
1006 		if (dispose)
1007 			ep_free(ep);
1008 		goto again;
1009 	}
1010 	spin_unlock(&file->f_lock);
1011 }
1012 
ep_alloc(struct eventpoll ** pep)1013 static int ep_alloc(struct eventpoll **pep)
1014 {
1015 	struct eventpoll *ep;
1016 
1017 	ep = kzalloc(sizeof(*ep), GFP_KERNEL);
1018 	if (unlikely(!ep))
1019 		return -ENOMEM;
1020 
1021 	mutex_init(&ep->mtx);
1022 	rwlock_init(&ep->lock);
1023 	init_waitqueue_head(&ep->wq);
1024 	init_waitqueue_head(&ep->poll_wait);
1025 	INIT_LIST_HEAD(&ep->rdllist);
1026 	ep->rbr = RB_ROOT_CACHED;
1027 	ep->ovflist = EP_UNACTIVE_PTR;
1028 	ep->user = get_current_user();
1029 	refcount_set(&ep->refcount, 1);
1030 
1031 	*pep = ep;
1032 
1033 	return 0;
1034 }
1035 
1036 /*
1037  * Search the file inside the eventpoll tree. The RB tree operations
1038  * are protected by the "mtx" mutex, and ep_find() must be called with
1039  * "mtx" held.
1040  */
ep_find(struct eventpoll * ep,struct file * file,int fd)1041 static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
1042 {
1043 	int kcmp;
1044 	struct rb_node *rbp;
1045 	struct epitem *epi, *epir = NULL;
1046 	struct epoll_filefd ffd;
1047 
1048 	ep_set_ffd(&ffd, file, fd);
1049 	for (rbp = ep->rbr.rb_root.rb_node; rbp; ) {
1050 		epi = rb_entry(rbp, struct epitem, rbn);
1051 		kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
1052 		if (kcmp > 0)
1053 			rbp = rbp->rb_right;
1054 		else if (kcmp < 0)
1055 			rbp = rbp->rb_left;
1056 		else {
1057 			epir = epi;
1058 			break;
1059 		}
1060 	}
1061 
1062 	return epir;
1063 }
1064 
1065 #ifdef CONFIG_KCMP
ep_find_tfd(struct eventpoll * ep,int tfd,unsigned long toff)1066 static struct epitem *ep_find_tfd(struct eventpoll *ep, int tfd, unsigned long toff)
1067 {
1068 	struct rb_node *rbp;
1069 	struct epitem *epi;
1070 
1071 	for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
1072 		epi = rb_entry(rbp, struct epitem, rbn);
1073 		if (epi->ffd.fd == tfd) {
1074 			if (toff == 0)
1075 				return epi;
1076 			else
1077 				toff--;
1078 		}
1079 		cond_resched();
1080 	}
1081 
1082 	return NULL;
1083 }
1084 
get_epoll_tfile_raw_ptr(struct file * file,int tfd,unsigned long toff)1085 struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd,
1086 				     unsigned long toff)
1087 {
1088 	struct file *file_raw;
1089 	struct eventpoll *ep;
1090 	struct epitem *epi;
1091 
1092 	if (!is_file_epoll(file))
1093 		return ERR_PTR(-EINVAL);
1094 
1095 	ep = file->private_data;
1096 
1097 	mutex_lock(&ep->mtx);
1098 	epi = ep_find_tfd(ep, tfd, toff);
1099 	if (epi)
1100 		file_raw = epi->ffd.file;
1101 	else
1102 		file_raw = ERR_PTR(-ENOENT);
1103 	mutex_unlock(&ep->mtx);
1104 
1105 	return file_raw;
1106 }
1107 #endif /* CONFIG_KCMP */
1108 
1109 /*
1110  * Adds a new entry to the tail of the list in a lockless way, i.e.
1111  * multiple CPUs are allowed to call this function concurrently.
1112  *
1113  * Beware: it is necessary to prevent any other modifications of the
1114  *         existing list until all changes are completed, in other words
1115  *         concurrent list_add_tail_lockless() calls should be protected
1116  *         with a read lock, where write lock acts as a barrier which
1117  *         makes sure all list_add_tail_lockless() calls are fully
1118  *         completed.
1119  *
1120  *        Also an element can be locklessly added to the list only in one
1121  *        direction i.e. either to the tail or to the head, otherwise
1122  *        concurrent access will corrupt the list.
1123  *
1124  * Return: %false if element has been already added to the list, %true
1125  * otherwise.
1126  */
list_add_tail_lockless(struct list_head * new,struct list_head * head)1127 static inline bool list_add_tail_lockless(struct list_head *new,
1128 					  struct list_head *head)
1129 {
1130 	struct list_head *prev;
1131 
1132 	/*
1133 	 * This is simple 'new->next = head' operation, but cmpxchg()
1134 	 * is used in order to detect that same element has been just
1135 	 * added to the list from another CPU: the winner observes
1136 	 * new->next == new.
1137 	 */
1138 	if (!try_cmpxchg(&new->next, &new, head))
1139 		return false;
1140 
1141 	/*
1142 	 * Initially ->next of a new element must be updated with the head
1143 	 * (we are inserting to the tail) and only then pointers are atomically
1144 	 * exchanged.  XCHG guarantees memory ordering, thus ->next should be
1145 	 * updated before pointers are actually swapped and pointers are
1146 	 * swapped before prev->next is updated.
1147 	 */
1148 
1149 	prev = xchg(&head->prev, new);
1150 
1151 	/*
1152 	 * It is safe to modify prev->next and new->prev, because a new element
1153 	 * is added only to the tail and new->next is updated before XCHG.
1154 	 */
1155 
1156 	prev->next = new;
1157 	new->prev = prev;
1158 
1159 	return true;
1160 }
1161 
1162 /*
1163  * Chains a new epi entry to the tail of the ep->ovflist in a lockless way,
1164  * i.e. multiple CPUs are allowed to call this function concurrently.
1165  *
1166  * Return: %false if epi element has been already chained, %true otherwise.
1167  */
chain_epi_lockless(struct epitem * epi)1168 static inline bool chain_epi_lockless(struct epitem *epi)
1169 {
1170 	struct eventpoll *ep = epi->ep;
1171 
1172 	/* Fast preliminary check */
1173 	if (epi->next != EP_UNACTIVE_PTR)
1174 		return false;
1175 
1176 	/* Check that the same epi has not been just chained from another CPU */
1177 	if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR)
1178 		return false;
1179 
1180 	/* Atomically exchange tail */
1181 	epi->next = xchg(&ep->ovflist, epi);
1182 
1183 	return true;
1184 }
1185 
1186 /*
1187  * This is the callback that is passed to the wait queue wakeup
1188  * mechanism. It is called by the stored file descriptors when they
1189  * have events to report.
1190  *
1191  * This callback takes a read lock in order not to contend with concurrent
1192  * events from another file descriptor, thus all modifications to ->rdllist
1193  * or ->ovflist are lockless.  Read lock is paired with the write lock from
1194  * ep_scan_ready_list(), which stops all list modifications and guarantees
1195  * that lists state is seen correctly.
1196  *
1197  * Another thing worth to mention is that ep_poll_callback() can be called
1198  * concurrently for the same @epi from different CPUs if poll table was inited
1199  * with several wait queues entries.  Plural wakeup from different CPUs of a
1200  * single wait queue is serialized by wq.lock, but the case when multiple wait
1201  * queues are used should be detected accordingly.  This is detected using
1202  * cmpxchg() operation.
1203  */
ep_poll_callback(wait_queue_entry_t * wait,unsigned mode,int sync,void * key)1204 static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
1205 {
1206 	int pwake = 0;
1207 	struct epitem *epi = ep_item_from_wait(wait);
1208 	struct eventpoll *ep = epi->ep;
1209 	__poll_t pollflags = key_to_poll(key);
1210 	unsigned long flags;
1211 	int ewake = 0;
1212 
1213 	read_lock_irqsave(&ep->lock, flags);
1214 
1215 	ep_set_busy_poll_napi_id(epi);
1216 
1217 	/*
1218 	 * If the event mask does not contain any poll(2) event, we consider the
1219 	 * descriptor to be disabled. This condition is likely the effect of the
1220 	 * EPOLLONESHOT bit that disables the descriptor when an event is received,
1221 	 * until the next EPOLL_CTL_MOD will be issued.
1222 	 */
1223 	if (!(epi->event.events & ~EP_PRIVATE_BITS))
1224 		goto out_unlock;
1225 
1226 	/*
1227 	 * Check the events coming with the callback. At this stage, not
1228 	 * every device reports the events in the "key" parameter of the
1229 	 * callback. We need to be able to handle both cases here, hence the
1230 	 * test for "key" != NULL before the event match test.
1231 	 */
1232 	if (pollflags && !(pollflags & epi->event.events))
1233 		goto out_unlock;
1234 
1235 	/*
1236 	 * If we are transferring events to userspace, we can hold no locks
1237 	 * (because we're accessing user memory, and because of linux f_op->poll()
1238 	 * semantics). All the events that happen during that period of time are
1239 	 * chained in ep->ovflist and requeued later on.
1240 	 */
1241 	if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) {
1242 		if (chain_epi_lockless(epi))
1243 			ep_pm_stay_awake_rcu(epi);
1244 	} else if (!ep_is_linked(epi)) {
1245 		/* In the usual case, add event to ready list. */
1246 		if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist))
1247 			ep_pm_stay_awake_rcu(epi);
1248 	}
1249 
1250 	/*
1251 	 * Wake up ( if active ) both the eventpoll wait list and the ->poll()
1252 	 * wait list.
1253 	 */
1254 	if (waitqueue_active(&ep->wq)) {
1255 		if ((epi->event.events & EPOLLEXCLUSIVE) &&
1256 					!(pollflags & POLLFREE)) {
1257 			switch (pollflags & EPOLLINOUT_BITS) {
1258 			case EPOLLIN:
1259 				if (epi->event.events & EPOLLIN)
1260 					ewake = 1;
1261 				break;
1262 			case EPOLLOUT:
1263 				if (epi->event.events & EPOLLOUT)
1264 					ewake = 1;
1265 				break;
1266 			case 0:
1267 				ewake = 1;
1268 				break;
1269 			}
1270 		}
1271 		if (sync)
1272 			wake_up_sync(&ep->wq);
1273 		else
1274 			wake_up(&ep->wq);
1275 	}
1276 	if (waitqueue_active(&ep->poll_wait))
1277 		pwake++;
1278 
1279 out_unlock:
1280 	read_unlock_irqrestore(&ep->lock, flags);
1281 
1282 	/* We have to call this outside the lock */
1283 	if (pwake)
1284 		ep_poll_safewake(ep, epi, pollflags & EPOLL_URING_WAKE);
1285 
1286 	if (!(epi->event.events & EPOLLEXCLUSIVE))
1287 		ewake = 1;
1288 
1289 	if (pollflags & POLLFREE) {
1290 		/*
1291 		 * If we race with ep_remove_wait_queue() it can miss
1292 		 * ->whead = NULL and do another remove_wait_queue() after
1293 		 * us, so we can't use __remove_wait_queue().
1294 		 */
1295 		list_del_init(&wait->entry);
1296 		/*
1297 		 * ->whead != NULL protects us from the race with
1298 		 * ep_clear_and_put() or ep_remove(), ep_remove_wait_queue()
1299 		 * takes whead->lock held by the caller. Once we nullify it,
1300 		 * nothing protects ep/epi or even wait.
1301 		 */
1302 		smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL);
1303 	}
1304 
1305 	return ewake;
1306 }
1307 
1308 /*
1309  * This is the callback that is used to add our wait queue to the
1310  * target file wakeup lists.
1311  */
ep_ptable_queue_proc(struct file * file,wait_queue_head_t * whead,poll_table * pt)1312 static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
1313 				 poll_table *pt)
1314 {
1315 	struct ep_pqueue *epq = container_of(pt, struct ep_pqueue, pt);
1316 	struct epitem *epi = epq->epi;
1317 	struct eppoll_entry *pwq;
1318 
1319 	if (unlikely(!epi))	// an earlier allocation has failed
1320 		return;
1321 
1322 	pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL);
1323 	if (unlikely(!pwq)) {
1324 		epq->epi = NULL;
1325 		return;
1326 	}
1327 
1328 	init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
1329 	pwq->whead = whead;
1330 	pwq->base = epi;
1331 	if (epi->event.events & EPOLLEXCLUSIVE)
1332 		add_wait_queue_exclusive(whead, &pwq->wait);
1333 	else
1334 		add_wait_queue(whead, &pwq->wait);
1335 	pwq->next = epi->pwqlist;
1336 	epi->pwqlist = pwq;
1337 }
1338 
ep_rbtree_insert(struct eventpoll * ep,struct epitem * epi)1339 static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
1340 {
1341 	int kcmp;
1342 	struct rb_node **p = &ep->rbr.rb_root.rb_node, *parent = NULL;
1343 	struct epitem *epic;
1344 	bool leftmost = true;
1345 
1346 	while (*p) {
1347 		parent = *p;
1348 		epic = rb_entry(parent, struct epitem, rbn);
1349 		kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd);
1350 		if (kcmp > 0) {
1351 			p = &parent->rb_right;
1352 			leftmost = false;
1353 		} else
1354 			p = &parent->rb_left;
1355 	}
1356 	rb_link_node(&epi->rbn, parent, p);
1357 	rb_insert_color_cached(&epi->rbn, &ep->rbr, leftmost);
1358 }
1359 
1360 
1361 
1362 #define PATH_ARR_SIZE 5
1363 /*
1364  * These are the number paths of length 1 to 5, that we are allowing to emanate
1365  * from a single file of interest. For example, we allow 1000 paths of length
1366  * 1, to emanate from each file of interest. This essentially represents the
1367  * potential wakeup paths, which need to be limited in order to avoid massive
1368  * uncontrolled wakeup storms. The common use case should be a single ep which
1369  * is connected to n file sources. In this case each file source has 1 path
1370  * of length 1. Thus, the numbers below should be more than sufficient. These
1371  * path limits are enforced during an EPOLL_CTL_ADD operation, since a modify
1372  * and delete can't add additional paths. Protected by the epnested_mutex.
1373  */
1374 static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 };
1375 static int path_count[PATH_ARR_SIZE];
1376 
path_count_inc(int nests)1377 static int path_count_inc(int nests)
1378 {
1379 	/* Allow an arbitrary number of depth 1 paths */
1380 	if (nests == 0)
1381 		return 0;
1382 
1383 	if (++path_count[nests] > path_limits[nests])
1384 		return -1;
1385 	return 0;
1386 }
1387 
path_count_init(void)1388 static void path_count_init(void)
1389 {
1390 	int i;
1391 
1392 	for (i = 0; i < PATH_ARR_SIZE; i++)
1393 		path_count[i] = 0;
1394 }
1395 
reverse_path_check_proc(struct hlist_head * refs,int depth)1396 static int reverse_path_check_proc(struct hlist_head *refs, int depth)
1397 {
1398 	int error = 0;
1399 	struct epitem *epi;
1400 
1401 	if (depth > EP_MAX_NESTS) /* too deep nesting */
1402 		return -1;
1403 
1404 	/* CTL_DEL can remove links here, but that can't increase our count */
1405 	hlist_for_each_entry_rcu(epi, refs, fllink) {
1406 		struct hlist_head *refs = &epi->ep->refs;
1407 		if (hlist_empty(refs))
1408 			error = path_count_inc(depth);
1409 		else
1410 			error = reverse_path_check_proc(refs, depth + 1);
1411 		if (error != 0)
1412 			break;
1413 	}
1414 	return error;
1415 }
1416 
1417 /**
1418  * reverse_path_check - The tfile_check_list is list of epitem_head, which have
1419  *                      links that are proposed to be newly added. We need to
1420  *                      make sure that those added links don't add too many
1421  *                      paths such that we will spend all our time waking up
1422  *                      eventpoll objects.
1423  *
1424  * Return: %zero if the proposed links don't create too many paths,
1425  *	    %-1 otherwise.
1426  */
reverse_path_check(void)1427 static int reverse_path_check(void)
1428 {
1429 	struct epitems_head *p;
1430 
1431 	for (p = tfile_check_list; p != EP_UNACTIVE_PTR; p = p->next) {
1432 		int error;
1433 		path_count_init();
1434 		rcu_read_lock();
1435 		error = reverse_path_check_proc(&p->epitems, 0);
1436 		rcu_read_unlock();
1437 		if (error)
1438 			return error;
1439 	}
1440 	return 0;
1441 }
1442 
ep_create_wakeup_source(struct epitem * epi)1443 static int ep_create_wakeup_source(struct epitem *epi)
1444 {
1445 	struct name_snapshot n;
1446 	struct wakeup_source *ws;
1447 
1448 	if (!epi->ep->ws) {
1449 		epi->ep->ws = wakeup_source_register(NULL, "eventpoll");
1450 		if (!epi->ep->ws)
1451 			return -ENOMEM;
1452 	}
1453 
1454 	take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry);
1455 	ws = wakeup_source_register(NULL, n.name.name);
1456 	release_dentry_name_snapshot(&n);
1457 
1458 	if (!ws)
1459 		return -ENOMEM;
1460 	rcu_assign_pointer(epi->ws, ws);
1461 
1462 	return 0;
1463 }
1464 
1465 /* rare code path, only used when EPOLL_CTL_MOD removes a wakeup source */
ep_destroy_wakeup_source(struct epitem * epi)1466 static noinline void ep_destroy_wakeup_source(struct epitem *epi)
1467 {
1468 	struct wakeup_source *ws = ep_wakeup_source(epi);
1469 
1470 	RCU_INIT_POINTER(epi->ws, NULL);
1471 
1472 	/*
1473 	 * wait for ep_pm_stay_awake_rcu to finish, synchronize_rcu is
1474 	 * used internally by wakeup_source_remove, too (called by
1475 	 * wakeup_source_unregister), so we cannot use call_rcu
1476 	 */
1477 	synchronize_rcu();
1478 	wakeup_source_unregister(ws);
1479 }
1480 
attach_epitem(struct file * file,struct epitem * epi)1481 static int attach_epitem(struct file *file, struct epitem *epi)
1482 {
1483 	struct epitems_head *to_free = NULL;
1484 	struct hlist_head *head = NULL;
1485 	struct eventpoll *ep = NULL;
1486 
1487 	if (is_file_epoll(file))
1488 		ep = file->private_data;
1489 
1490 	if (ep) {
1491 		head = &ep->refs;
1492 	} else if (!READ_ONCE(file->f_ep)) {
1493 allocate:
1494 		to_free = kmem_cache_zalloc(ephead_cache, GFP_KERNEL);
1495 		if (!to_free)
1496 			return -ENOMEM;
1497 		head = &to_free->epitems;
1498 	}
1499 	spin_lock(&file->f_lock);
1500 	if (!file->f_ep) {
1501 		if (unlikely(!head)) {
1502 			spin_unlock(&file->f_lock);
1503 			goto allocate;
1504 		}
1505 		/* See eventpoll_release() for details. */
1506 		WRITE_ONCE(file->f_ep, head);
1507 		to_free = NULL;
1508 	}
1509 	hlist_add_head_rcu(&epi->fllink, file->f_ep);
1510 	spin_unlock(&file->f_lock);
1511 	free_ephead(to_free);
1512 	return 0;
1513 }
1514 
1515 /*
1516  * Must be called with "mtx" held.
1517  */
ep_insert(struct eventpoll * ep,const struct epoll_event * event,struct file * tfile,int fd,int full_check)1518 static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
1519 		     struct file *tfile, int fd, int full_check)
1520 {
1521 	int error, pwake = 0;
1522 	__poll_t revents;
1523 	struct epitem *epi;
1524 	struct ep_pqueue epq;
1525 	struct eventpoll *tep = NULL;
1526 
1527 	if (is_file_epoll(tfile))
1528 		tep = tfile->private_data;
1529 
1530 	lockdep_assert_irqs_enabled();
1531 
1532 	if (unlikely(percpu_counter_compare(&ep->user->epoll_watches,
1533 					    max_user_watches) >= 0))
1534 		return -ENOSPC;
1535 	percpu_counter_inc(&ep->user->epoll_watches);
1536 
1537 	if (!(epi = kmem_cache_zalloc(epi_cache, GFP_KERNEL))) {
1538 		percpu_counter_dec(&ep->user->epoll_watches);
1539 		return -ENOMEM;
1540 	}
1541 
1542 	/* Item initialization follow here ... */
1543 	INIT_LIST_HEAD(&epi->rdllink);
1544 	epi->ep = ep;
1545 	ep_set_ffd(&epi->ffd, tfile, fd);
1546 	epi->event = *event;
1547 	epi->next = EP_UNACTIVE_PTR;
1548 
1549 	if (tep)
1550 		mutex_lock_nested(&tep->mtx, 1);
1551 	/* Add the current item to the list of active epoll hook for this file */
1552 	if (unlikely(attach_epitem(tfile, epi) < 0)) {
1553 		if (tep)
1554 			mutex_unlock(&tep->mtx);
1555 		kmem_cache_free(epi_cache, epi);
1556 		percpu_counter_dec(&ep->user->epoll_watches);
1557 		return -ENOMEM;
1558 	}
1559 
1560 	if (full_check && !tep)
1561 		list_file(tfile);
1562 
1563 	/*
1564 	 * Add the current item to the RB tree. All RB tree operations are
1565 	 * protected by "mtx", and ep_insert() is called with "mtx" held.
1566 	 */
1567 	ep_rbtree_insert(ep, epi);
1568 	if (tep)
1569 		mutex_unlock(&tep->mtx);
1570 
1571 	/*
1572 	 * ep_remove_safe() calls in the later error paths can't lead to
1573 	 * ep_free() as the ep file itself still holds an ep reference.
1574 	 */
1575 	ep_get(ep);
1576 
1577 	/* now check if we've created too many backpaths */
1578 	if (unlikely(full_check && reverse_path_check())) {
1579 		ep_remove_safe(ep, epi);
1580 		return -EINVAL;
1581 	}
1582 
1583 	if (epi->event.events & EPOLLWAKEUP) {
1584 		error = ep_create_wakeup_source(epi);
1585 		if (error) {
1586 			ep_remove_safe(ep, epi);
1587 			return error;
1588 		}
1589 	}
1590 
1591 	/* Initialize the poll table using the queue callback */
1592 	epq.epi = epi;
1593 	init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
1594 
1595 	/*
1596 	 * Attach the item to the poll hooks and get current event bits.
1597 	 * We can safely use the file* here because its usage count has
1598 	 * been increased by the caller of this function. Note that after
1599 	 * this operation completes, the poll callback can start hitting
1600 	 * the new item.
1601 	 */
1602 	revents = ep_item_poll(epi, &epq.pt, 1);
1603 
1604 	/*
1605 	 * We have to check if something went wrong during the poll wait queue
1606 	 * install process. Namely an allocation for a wait queue failed due
1607 	 * high memory pressure.
1608 	 */
1609 	if (unlikely(!epq.epi)) {
1610 		ep_remove_safe(ep, epi);
1611 		return -ENOMEM;
1612 	}
1613 
1614 	/* We have to drop the new item inside our item list to keep track of it */
1615 	write_lock_irq(&ep->lock);
1616 
1617 	/* record NAPI ID of new item if present */
1618 	ep_set_busy_poll_napi_id(epi);
1619 
1620 	/* If the file is already "ready" we drop it inside the ready list */
1621 	if (revents && !ep_is_linked(epi)) {
1622 		list_add_tail(&epi->rdllink, &ep->rdllist);
1623 		ep_pm_stay_awake(epi);
1624 
1625 		/* Notify waiting tasks that events are available */
1626 		if (waitqueue_active(&ep->wq))
1627 			wake_up(&ep->wq);
1628 		if (waitqueue_active(&ep->poll_wait))
1629 			pwake++;
1630 	}
1631 
1632 	write_unlock_irq(&ep->lock);
1633 
1634 	/* We have to call this outside the lock */
1635 	if (pwake)
1636 		ep_poll_safewake(ep, NULL, 0);
1637 
1638 	return 0;
1639 }
1640 
1641 /*
1642  * Modify the interest event mask by dropping an event if the new mask
1643  * has a match in the current file status. Must be called with "mtx" held.
1644  */
ep_modify(struct eventpoll * ep,struct epitem * epi,const struct epoll_event * event)1645 static int ep_modify(struct eventpoll *ep, struct epitem *epi,
1646 		     const struct epoll_event *event)
1647 {
1648 	int pwake = 0;
1649 	poll_table pt;
1650 
1651 	lockdep_assert_irqs_enabled();
1652 
1653 	init_poll_funcptr(&pt, NULL);
1654 
1655 	/*
1656 	 * Set the new event interest mask before calling f_op->poll();
1657 	 * otherwise we might miss an event that happens between the
1658 	 * f_op->poll() call and the new event set registering.
1659 	 */
1660 	epi->event.events = event->events; /* need barrier below */
1661 	epi->event.data = event->data; /* protected by mtx */
1662 	if (epi->event.events & EPOLLWAKEUP) {
1663 		if (!ep_has_wakeup_source(epi))
1664 			ep_create_wakeup_source(epi);
1665 	} else if (ep_has_wakeup_source(epi)) {
1666 		ep_destroy_wakeup_source(epi);
1667 	}
1668 
1669 	/*
1670 	 * The following barrier has two effects:
1671 	 *
1672 	 * 1) Flush epi changes above to other CPUs.  This ensures
1673 	 *    we do not miss events from ep_poll_callback if an
1674 	 *    event occurs immediately after we call f_op->poll().
1675 	 *    We need this because we did not take ep->lock while
1676 	 *    changing epi above (but ep_poll_callback does take
1677 	 *    ep->lock).
1678 	 *
1679 	 * 2) We also need to ensure we do not miss _past_ events
1680 	 *    when calling f_op->poll().  This barrier also
1681 	 *    pairs with the barrier in wq_has_sleeper (see
1682 	 *    comments for wq_has_sleeper).
1683 	 *
1684 	 * This barrier will now guarantee ep_poll_callback or f_op->poll
1685 	 * (or both) will notice the readiness of an item.
1686 	 */
1687 	smp_mb();
1688 
1689 	/*
1690 	 * Get current event bits. We can safely use the file* here because
1691 	 * its usage count has been increased by the caller of this function.
1692 	 * If the item is "hot" and it is not registered inside the ready
1693 	 * list, push it inside.
1694 	 */
1695 	if (ep_item_poll(epi, &pt, 1)) {
1696 		write_lock_irq(&ep->lock);
1697 		if (!ep_is_linked(epi)) {
1698 			list_add_tail(&epi->rdllink, &ep->rdllist);
1699 			ep_pm_stay_awake(epi);
1700 
1701 			/* Notify waiting tasks that events are available */
1702 			if (waitqueue_active(&ep->wq))
1703 				wake_up(&ep->wq);
1704 			if (waitqueue_active(&ep->poll_wait))
1705 				pwake++;
1706 		}
1707 		write_unlock_irq(&ep->lock);
1708 	}
1709 
1710 	/* We have to call this outside the lock */
1711 	if (pwake)
1712 		ep_poll_safewake(ep, NULL, 0);
1713 
1714 	return 0;
1715 }
1716 
ep_send_events(struct eventpoll * ep,struct epoll_event __user * events,int maxevents)1717 static int ep_send_events(struct eventpoll *ep,
1718 			  struct epoll_event __user *events, int maxevents)
1719 {
1720 	struct epitem *epi, *tmp;
1721 	LIST_HEAD(txlist);
1722 	poll_table pt;
1723 	int res = 0;
1724 
1725 	/*
1726 	 * Always short-circuit for fatal signals to allow threads to make a
1727 	 * timely exit without the chance of finding more events available and
1728 	 * fetching repeatedly.
1729 	 */
1730 	if (fatal_signal_pending(current))
1731 		return -EINTR;
1732 
1733 	init_poll_funcptr(&pt, NULL);
1734 
1735 	mutex_lock(&ep->mtx);
1736 	ep_start_scan(ep, &txlist);
1737 
1738 	/*
1739 	 * We can loop without lock because we are passed a task private list.
1740 	 * Items cannot vanish during the loop we are holding ep->mtx.
1741 	 */
1742 	list_for_each_entry_safe(epi, tmp, &txlist, rdllink) {
1743 		struct wakeup_source *ws;
1744 		__poll_t revents;
1745 
1746 		if (res >= maxevents)
1747 			break;
1748 
1749 		/*
1750 		 * Activate ep->ws before deactivating epi->ws to prevent
1751 		 * triggering auto-suspend here (in case we reactive epi->ws
1752 		 * below).
1753 		 *
1754 		 * This could be rearranged to delay the deactivation of epi->ws
1755 		 * instead, but then epi->ws would temporarily be out of sync
1756 		 * with ep_is_linked().
1757 		 */
1758 		ws = ep_wakeup_source(epi);
1759 		if (ws) {
1760 			if (ws->active)
1761 				__pm_stay_awake(ep->ws);
1762 			__pm_relax(ws);
1763 		}
1764 
1765 		list_del_init(&epi->rdllink);
1766 
1767 		/*
1768 		 * If the event mask intersect the caller-requested one,
1769 		 * deliver the event to userspace. Again, we are holding ep->mtx,
1770 		 * so no operations coming from userspace can change the item.
1771 		 */
1772 		revents = ep_item_poll(epi, &pt, 1);
1773 		if (!revents)
1774 			continue;
1775 
1776 		events = epoll_put_uevent(revents, epi->event.data, events);
1777 		if (!events) {
1778 			list_add(&epi->rdllink, &txlist);
1779 			ep_pm_stay_awake(epi);
1780 			if (!res)
1781 				res = -EFAULT;
1782 			break;
1783 		}
1784 		res++;
1785 		if (epi->event.events & EPOLLONESHOT)
1786 			epi->event.events &= EP_PRIVATE_BITS;
1787 		else if (!(epi->event.events & EPOLLET)) {
1788 			/*
1789 			 * If this file has been added with Level
1790 			 * Trigger mode, we need to insert back inside
1791 			 * the ready list, so that the next call to
1792 			 * epoll_wait() will check again the events
1793 			 * availability. At this point, no one can insert
1794 			 * into ep->rdllist besides us. The epoll_ctl()
1795 			 * callers are locked out by
1796 			 * ep_scan_ready_list() holding "mtx" and the
1797 			 * poll callback will queue them in ep->ovflist.
1798 			 */
1799 			list_add_tail(&epi->rdllink, &ep->rdllist);
1800 			ep_pm_stay_awake(epi);
1801 		}
1802 	}
1803 	ep_done_scan(ep, &txlist);
1804 	mutex_unlock(&ep->mtx);
1805 
1806 	return res;
1807 }
1808 
ep_timeout_to_timespec(struct timespec64 * to,long ms)1809 static struct timespec64 *ep_timeout_to_timespec(struct timespec64 *to, long ms)
1810 {
1811 	struct timespec64 now;
1812 
1813 	if (ms < 0)
1814 		return NULL;
1815 
1816 	if (!ms) {
1817 		to->tv_sec = 0;
1818 		to->tv_nsec = 0;
1819 		return to;
1820 	}
1821 
1822 	to->tv_sec = ms / MSEC_PER_SEC;
1823 	to->tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC);
1824 
1825 	ktime_get_ts64(&now);
1826 	*to = timespec64_add_safe(now, *to);
1827 	return to;
1828 }
1829 
1830 /*
1831  * autoremove_wake_function, but remove even on failure to wake up, because we
1832  * know that default_wake_function/ttwu will only fail if the thread is already
1833  * woken, and in that case the ep_poll loop will remove the entry anyways, not
1834  * try to reuse it.
1835  */
ep_autoremove_wake_function(struct wait_queue_entry * wq_entry,unsigned int mode,int sync,void * key)1836 static int ep_autoremove_wake_function(struct wait_queue_entry *wq_entry,
1837 				       unsigned int mode, int sync, void *key)
1838 {
1839 	int ret = default_wake_function(wq_entry, mode, sync, key);
1840 
1841 	/*
1842 	 * Pairs with list_empty_careful in ep_poll, and ensures future loop
1843 	 * iterations see the cause of this wakeup.
1844 	 */
1845 	list_del_init_careful(&wq_entry->entry);
1846 	return ret;
1847 }
1848 
1849 /**
1850  * ep_poll - Retrieves ready events, and delivers them to the caller-supplied
1851  *           event buffer.
1852  *
1853  * @ep: Pointer to the eventpoll context.
1854  * @events: Pointer to the userspace buffer where the ready events should be
1855  *          stored.
1856  * @maxevents: Size (in terms of number of events) of the caller event buffer.
1857  * @timeout: Maximum timeout for the ready events fetch operation, in
1858  *           timespec. If the timeout is zero, the function will not block,
1859  *           while if the @timeout ptr is NULL, the function will block
1860  *           until at least one event has been retrieved (or an error
1861  *           occurred).
1862  *
1863  * Return: the number of ready events which have been fetched, or an
1864  *          error code, in case of error.
1865  */
ep_poll(struct eventpoll * ep,struct epoll_event __user * events,int maxevents,struct timespec64 * timeout)1866 static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
1867 		   int maxevents, struct timespec64 *timeout)
1868 {
1869 	int res, eavail, timed_out = 0;
1870 	u64 slack = 0;
1871 	wait_queue_entry_t wait;
1872 	ktime_t expires, *to = NULL;
1873 
1874 	lockdep_assert_irqs_enabled();
1875 
1876 	if (timeout && (timeout->tv_sec | timeout->tv_nsec)) {
1877 		slack = select_estimate_accuracy(timeout);
1878 		to = &expires;
1879 		*to = timespec64_to_ktime(*timeout);
1880 	} else if (timeout) {
1881 		/*
1882 		 * Avoid the unnecessary trip to the wait queue loop, if the
1883 		 * caller specified a non blocking operation.
1884 		 */
1885 		timed_out = 1;
1886 	}
1887 
1888 	/*
1889 	 * This call is racy: We may or may not see events that are being added
1890 	 * to the ready list under the lock (e.g., in IRQ callbacks). For cases
1891 	 * with a non-zero timeout, this thread will check the ready list under
1892 	 * lock and will add to the wait queue.  For cases with a zero
1893 	 * timeout, the user by definition should not care and will have to
1894 	 * recheck again.
1895 	 */
1896 	eavail = ep_events_available(ep);
1897 
1898 	while (1) {
1899 		if (eavail) {
1900 			/*
1901 			 * Try to transfer events to user space. In case we get
1902 			 * 0 events and there's still timeout left over, we go
1903 			 * trying again in search of more luck.
1904 			 */
1905 			res = ep_send_events(ep, events, maxevents);
1906 			if (res)
1907 				return res;
1908 		}
1909 
1910 		if (timed_out)
1911 			return 0;
1912 
1913 		eavail = ep_busy_loop(ep, timed_out);
1914 		if (eavail)
1915 			continue;
1916 
1917 		if (signal_pending(current))
1918 			return -EINTR;
1919 
1920 		/*
1921 		 * Internally init_wait() uses autoremove_wake_function(),
1922 		 * thus wait entry is removed from the wait queue on each
1923 		 * wakeup. Why it is important? In case of several waiters
1924 		 * each new wakeup will hit the next waiter, giving it the
1925 		 * chance to harvest new event. Otherwise wakeup can be
1926 		 * lost. This is also good performance-wise, because on
1927 		 * normal wakeup path no need to call __remove_wait_queue()
1928 		 * explicitly, thus ep->lock is not taken, which halts the
1929 		 * event delivery.
1930 		 *
1931 		 * In fact, we now use an even more aggressive function that
1932 		 * unconditionally removes, because we don't reuse the wait
1933 		 * entry between loop iterations. This lets us also avoid the
1934 		 * performance issue if a process is killed, causing all of its
1935 		 * threads to wake up without being removed normally.
1936 		 */
1937 		init_wait(&wait);
1938 		wait.func = ep_autoremove_wake_function;
1939 
1940 		write_lock_irq(&ep->lock);
1941 		/*
1942 		 * Barrierless variant, waitqueue_active() is called under
1943 		 * the same lock on wakeup ep_poll_callback() side, so it
1944 		 * is safe to avoid an explicit barrier.
1945 		 */
1946 		__set_current_state(TASK_INTERRUPTIBLE);
1947 
1948 		/*
1949 		 * Do the final check under the lock. ep_scan_ready_list()
1950 		 * plays with two lists (->rdllist and ->ovflist) and there
1951 		 * is always a race when both lists are empty for short
1952 		 * period of time although events are pending, so lock is
1953 		 * important.
1954 		 */
1955 		eavail = ep_events_available(ep);
1956 		if (!eavail)
1957 			__add_wait_queue_exclusive(&ep->wq, &wait);
1958 
1959 		write_unlock_irq(&ep->lock);
1960 
1961 		if (!eavail)
1962 			timed_out = !schedule_hrtimeout_range(to, slack,
1963 							      HRTIMER_MODE_ABS);
1964 		__set_current_state(TASK_RUNNING);
1965 
1966 		/*
1967 		 * We were woken up, thus go and try to harvest some events.
1968 		 * If timed out and still on the wait queue, recheck eavail
1969 		 * carefully under lock, below.
1970 		 */
1971 		eavail = 1;
1972 
1973 		if (!list_empty_careful(&wait.entry)) {
1974 			write_lock_irq(&ep->lock);
1975 			/*
1976 			 * If the thread timed out and is not on the wait queue,
1977 			 * it means that the thread was woken up after its
1978 			 * timeout expired before it could reacquire the lock.
1979 			 * Thus, when wait.entry is empty, it needs to harvest
1980 			 * events.
1981 			 */
1982 			if (timed_out)
1983 				eavail = list_empty(&wait.entry);
1984 			__remove_wait_queue(&ep->wq, &wait);
1985 			write_unlock_irq(&ep->lock);
1986 		}
1987 	}
1988 }
1989 
1990 /**
1991  * ep_loop_check_proc - verify that adding an epoll file inside another
1992  *                      epoll structure does not violate the constraints, in
1993  *                      terms of closed loops, or too deep chains (which can
1994  *                      result in excessive stack usage).
1995  *
1996  * @ep: the &struct eventpoll to be currently checked.
1997  * @depth: Current depth of the path being checked.
1998  *
1999  * Return: %zero if adding the epoll @file inside current epoll
2000  *          structure @ep does not violate the constraints, or %-1 otherwise.
2001  */
ep_loop_check_proc(struct eventpoll * ep,int depth)2002 static int ep_loop_check_proc(struct eventpoll *ep, int depth)
2003 {
2004 	int error = 0;
2005 	struct rb_node *rbp;
2006 	struct epitem *epi;
2007 
2008 	mutex_lock_nested(&ep->mtx, depth + 1);
2009 	ep->gen = loop_check_gen;
2010 	for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
2011 		epi = rb_entry(rbp, struct epitem, rbn);
2012 		if (unlikely(is_file_epoll(epi->ffd.file))) {
2013 			struct eventpoll *ep_tovisit;
2014 			ep_tovisit = epi->ffd.file->private_data;
2015 			if (ep_tovisit->gen == loop_check_gen)
2016 				continue;
2017 			if (ep_tovisit == inserting_into || depth > EP_MAX_NESTS)
2018 				error = -1;
2019 			else
2020 				error = ep_loop_check_proc(ep_tovisit, depth + 1);
2021 			if (error != 0)
2022 				break;
2023 		} else {
2024 			/*
2025 			 * If we've reached a file that is not associated with
2026 			 * an ep, then we need to check if the newly added
2027 			 * links are going to add too many wakeup paths. We do
2028 			 * this by adding it to the tfile_check_list, if it's
2029 			 * not already there, and calling reverse_path_check()
2030 			 * during ep_insert().
2031 			 */
2032 			list_file(epi->ffd.file);
2033 		}
2034 	}
2035 	mutex_unlock(&ep->mtx);
2036 
2037 	return error;
2038 }
2039 
2040 /**
2041  * ep_loop_check - Performs a check to verify that adding an epoll file (@to)
2042  *                 into another epoll file (represented by @ep) does not create
2043  *                 closed loops or too deep chains.
2044  *
2045  * @ep: Pointer to the epoll we are inserting into.
2046  * @to: Pointer to the epoll to be inserted.
2047  *
2048  * Return: %zero if adding the epoll @to inside the epoll @from
2049  * does not violate the constraints, or %-1 otherwise.
2050  */
ep_loop_check(struct eventpoll * ep,struct eventpoll * to)2051 static int ep_loop_check(struct eventpoll *ep, struct eventpoll *to)
2052 {
2053 	inserting_into = ep;
2054 	return ep_loop_check_proc(to, 0);
2055 }
2056 
clear_tfile_check_list(void)2057 static void clear_tfile_check_list(void)
2058 {
2059 	rcu_read_lock();
2060 	while (tfile_check_list != EP_UNACTIVE_PTR) {
2061 		struct epitems_head *head = tfile_check_list;
2062 		tfile_check_list = head->next;
2063 		unlist_file(head);
2064 	}
2065 	rcu_read_unlock();
2066 }
2067 
2068 /*
2069  * Open an eventpoll file descriptor.
2070  */
do_epoll_create(int flags)2071 static int do_epoll_create(int flags)
2072 {
2073 	int error, fd;
2074 	struct eventpoll *ep = NULL;
2075 	struct file *file;
2076 
2077 	/* Check the EPOLL_* constant for consistency.  */
2078 	BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);
2079 
2080 	if (flags & ~EPOLL_CLOEXEC)
2081 		return -EINVAL;
2082 	/*
2083 	 * Create the internal data structure ("struct eventpoll").
2084 	 */
2085 	error = ep_alloc(&ep);
2086 	if (error < 0)
2087 		return error;
2088 	/*
2089 	 * Creates all the items needed to setup an eventpoll file. That is,
2090 	 * a file structure and a free file descriptor.
2091 	 */
2092 	fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC));
2093 	if (fd < 0) {
2094 		error = fd;
2095 		goto out_free_ep;
2096 	}
2097 	file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep,
2098 				 O_RDWR | (flags & O_CLOEXEC));
2099 	if (IS_ERR(file)) {
2100 		error = PTR_ERR(file);
2101 		goto out_free_fd;
2102 	}
2103 	ep->file = file;
2104 	fd_install(fd, file);
2105 	return fd;
2106 
2107 out_free_fd:
2108 	put_unused_fd(fd);
2109 out_free_ep:
2110 	ep_clear_and_put(ep);
2111 	return error;
2112 }
2113 
SYSCALL_DEFINE1(epoll_create1,int,flags)2114 SYSCALL_DEFINE1(epoll_create1, int, flags)
2115 {
2116 	return do_epoll_create(flags);
2117 }
2118 
SYSCALL_DEFINE1(epoll_create,int,size)2119 SYSCALL_DEFINE1(epoll_create, int, size)
2120 {
2121 	if (size <= 0)
2122 		return -EINVAL;
2123 
2124 	return do_epoll_create(0);
2125 }
2126 
2127 #ifdef CONFIG_PM_SLEEP
ep_take_care_of_epollwakeup(struct epoll_event * epev)2128 static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
2129 {
2130 	if ((epev->events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND))
2131 		epev->events &= ~EPOLLWAKEUP;
2132 }
2133 #else
ep_take_care_of_epollwakeup(struct epoll_event * epev)2134 static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
2135 {
2136 	epev->events &= ~EPOLLWAKEUP;
2137 }
2138 #endif
2139 
epoll_mutex_lock(struct mutex * mutex,int depth,bool nonblock)2140 static inline int epoll_mutex_lock(struct mutex *mutex, int depth,
2141 				   bool nonblock)
2142 {
2143 	if (!nonblock) {
2144 		mutex_lock_nested(mutex, depth);
2145 		return 0;
2146 	}
2147 	if (mutex_trylock(mutex))
2148 		return 0;
2149 	return -EAGAIN;
2150 }
2151 
do_epoll_ctl(int epfd,int op,int fd,struct epoll_event * epds,bool nonblock)2152 int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
2153 		 bool nonblock)
2154 {
2155 	int error;
2156 	int full_check = 0;
2157 	struct fd f, tf;
2158 	struct eventpoll *ep;
2159 	struct epitem *epi;
2160 	struct eventpoll *tep = NULL;
2161 
2162 	error = -EBADF;
2163 	f = fdget(epfd);
2164 	if (!f.file)
2165 		goto error_return;
2166 
2167 	/* Get the "struct file *" for the target file */
2168 	tf = fdget(fd);
2169 	if (!tf.file)
2170 		goto error_fput;
2171 
2172 	/* The target file descriptor must support poll */
2173 	error = -EPERM;
2174 	if (!file_can_poll(tf.file))
2175 		goto error_tgt_fput;
2176 
2177 	/* Check if EPOLLWAKEUP is allowed */
2178 	if (ep_op_has_event(op))
2179 		ep_take_care_of_epollwakeup(epds);
2180 
2181 	/*
2182 	 * We have to check that the file structure underneath the file descriptor
2183 	 * the user passed to us _is_ an eventpoll file. And also we do not permit
2184 	 * adding an epoll file descriptor inside itself.
2185 	 */
2186 	error = -EINVAL;
2187 	if (f.file == tf.file || !is_file_epoll(f.file))
2188 		goto error_tgt_fput;
2189 
2190 	/*
2191 	 * epoll adds to the wakeup queue at EPOLL_CTL_ADD time only,
2192 	 * so EPOLLEXCLUSIVE is not allowed for a EPOLL_CTL_MOD operation.
2193 	 * Also, we do not currently supported nested exclusive wakeups.
2194 	 */
2195 	if (ep_op_has_event(op) && (epds->events & EPOLLEXCLUSIVE)) {
2196 		if (op == EPOLL_CTL_MOD)
2197 			goto error_tgt_fput;
2198 		if (op == EPOLL_CTL_ADD && (is_file_epoll(tf.file) ||
2199 				(epds->events & ~EPOLLEXCLUSIVE_OK_BITS)))
2200 			goto error_tgt_fput;
2201 	}
2202 
2203 	/*
2204 	 * At this point it is safe to assume that the "private_data" contains
2205 	 * our own data structure.
2206 	 */
2207 	ep = f.file->private_data;
2208 
2209 	/*
2210 	 * When we insert an epoll file descriptor inside another epoll file
2211 	 * descriptor, there is the chance of creating closed loops, which are
2212 	 * better be handled here, than in more critical paths. While we are
2213 	 * checking for loops we also determine the list of files reachable
2214 	 * and hang them on the tfile_check_list, so we can check that we
2215 	 * haven't created too many possible wakeup paths.
2216 	 *
2217 	 * We do not need to take the global 'epumutex' on EPOLL_CTL_ADD when
2218 	 * the epoll file descriptor is attaching directly to a wakeup source,
2219 	 * unless the epoll file descriptor is nested. The purpose of taking the
2220 	 * 'epnested_mutex' on add is to prevent complex toplogies such as loops and
2221 	 * deep wakeup paths from forming in parallel through multiple
2222 	 * EPOLL_CTL_ADD operations.
2223 	 */
2224 	error = epoll_mutex_lock(&ep->mtx, 0, nonblock);
2225 	if (error)
2226 		goto error_tgt_fput;
2227 	if (op == EPOLL_CTL_ADD) {
2228 		if (READ_ONCE(f.file->f_ep) || ep->gen == loop_check_gen ||
2229 		    is_file_epoll(tf.file)) {
2230 			mutex_unlock(&ep->mtx);
2231 			error = epoll_mutex_lock(&epnested_mutex, 0, nonblock);
2232 			if (error)
2233 				goto error_tgt_fput;
2234 			loop_check_gen++;
2235 			full_check = 1;
2236 			if (is_file_epoll(tf.file)) {
2237 				tep = tf.file->private_data;
2238 				error = -ELOOP;
2239 				if (ep_loop_check(ep, tep) != 0)
2240 					goto error_tgt_fput;
2241 			}
2242 			error = epoll_mutex_lock(&ep->mtx, 0, nonblock);
2243 			if (error)
2244 				goto error_tgt_fput;
2245 		}
2246 	}
2247 
2248 	/*
2249 	 * Try to lookup the file inside our RB tree. Since we grabbed "mtx"
2250 	 * above, we can be sure to be able to use the item looked up by
2251 	 * ep_find() till we release the mutex.
2252 	 */
2253 	epi = ep_find(ep, tf.file, fd);
2254 
2255 	error = -EINVAL;
2256 	switch (op) {
2257 	case EPOLL_CTL_ADD:
2258 		if (!epi) {
2259 			epds->events |= EPOLLERR | EPOLLHUP;
2260 			error = ep_insert(ep, epds, tf.file, fd, full_check);
2261 		} else
2262 			error = -EEXIST;
2263 		break;
2264 	case EPOLL_CTL_DEL:
2265 		if (epi) {
2266 			/*
2267 			 * The eventpoll itself is still alive: the refcount
2268 			 * can't go to zero here.
2269 			 */
2270 			ep_remove_safe(ep, epi);
2271 			error = 0;
2272 		} else {
2273 			error = -ENOENT;
2274 		}
2275 		break;
2276 	case EPOLL_CTL_MOD:
2277 		if (epi) {
2278 			if (!(epi->event.events & EPOLLEXCLUSIVE)) {
2279 				epds->events |= EPOLLERR | EPOLLHUP;
2280 				error = ep_modify(ep, epi, epds);
2281 			}
2282 		} else
2283 			error = -ENOENT;
2284 		break;
2285 	}
2286 	mutex_unlock(&ep->mtx);
2287 
2288 error_tgt_fput:
2289 	if (full_check) {
2290 		clear_tfile_check_list();
2291 		loop_check_gen++;
2292 		mutex_unlock(&epnested_mutex);
2293 	}
2294 
2295 	fdput(tf);
2296 error_fput:
2297 	fdput(f);
2298 error_return:
2299 
2300 	return error;
2301 }
2302 
2303 /*
2304  * The following function implements the controller interface for
2305  * the eventpoll file that enables the insertion/removal/change of
2306  * file descriptors inside the interest set.
2307  */
SYSCALL_DEFINE4(epoll_ctl,int,epfd,int,op,int,fd,struct epoll_event __user *,event)2308 SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
2309 		struct epoll_event __user *, event)
2310 {
2311 	struct epoll_event epds;
2312 
2313 	if (ep_op_has_event(op) &&
2314 	    copy_from_user(&epds, event, sizeof(struct epoll_event)))
2315 		return -EFAULT;
2316 
2317 	return do_epoll_ctl(epfd, op, fd, &epds, false);
2318 }
2319 
2320 /*
2321  * Implement the event wait interface for the eventpoll file. It is the kernel
2322  * part of the user space epoll_wait(2).
2323  */
do_epoll_wait(int epfd,struct epoll_event __user * events,int maxevents,struct timespec64 * to)2324 static int do_epoll_wait(int epfd, struct epoll_event __user *events,
2325 			 int maxevents, struct timespec64 *to)
2326 {
2327 	int error;
2328 	struct fd f;
2329 	struct eventpoll *ep;
2330 
2331 	/* The maximum number of event must be greater than zero */
2332 	if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
2333 		return -EINVAL;
2334 
2335 	/* Verify that the area passed by the user is writeable */
2336 	if (!access_ok(events, maxevents * sizeof(struct epoll_event)))
2337 		return -EFAULT;
2338 
2339 	/* Get the "struct file *" for the eventpoll file */
2340 	f = fdget(epfd);
2341 	if (!f.file)
2342 		return -EBADF;
2343 
2344 	/*
2345 	 * We have to check that the file structure underneath the fd
2346 	 * the user passed to us _is_ an eventpoll file.
2347 	 */
2348 	error = -EINVAL;
2349 	if (!is_file_epoll(f.file))
2350 		goto error_fput;
2351 
2352 	/*
2353 	 * At this point it is safe to assume that the "private_data" contains
2354 	 * our own data structure.
2355 	 */
2356 	ep = f.file->private_data;
2357 
2358 	/* Time to fish for events ... */
2359 	error = ep_poll(ep, events, maxevents, to);
2360 
2361 error_fput:
2362 	fdput(f);
2363 	return error;
2364 }
2365 
SYSCALL_DEFINE4(epoll_wait,int,epfd,struct epoll_event __user *,events,int,maxevents,int,timeout)2366 SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,
2367 		int, maxevents, int, timeout)
2368 {
2369 	struct timespec64 to;
2370 
2371 	return do_epoll_wait(epfd, events, maxevents,
2372 			     ep_timeout_to_timespec(&to, timeout));
2373 }
2374 
2375 /*
2376  * Implement the event wait interface for the eventpoll file. It is the kernel
2377  * part of the user space epoll_pwait(2).
2378  */
do_epoll_pwait(int epfd,struct epoll_event __user * events,int maxevents,struct timespec64 * to,const sigset_t __user * sigmask,size_t sigsetsize)2379 static int do_epoll_pwait(int epfd, struct epoll_event __user *events,
2380 			  int maxevents, struct timespec64 *to,
2381 			  const sigset_t __user *sigmask, size_t sigsetsize)
2382 {
2383 	int error;
2384 
2385 	/*
2386 	 * If the caller wants a certain signal mask to be set during the wait,
2387 	 * we apply it here.
2388 	 */
2389 	error = set_user_sigmask(sigmask, sigsetsize);
2390 	if (error)
2391 		return error;
2392 
2393 	error = do_epoll_wait(epfd, events, maxevents, to);
2394 
2395 	restore_saved_sigmask_unless(error == -EINTR);
2396 
2397 	return error;
2398 }
2399 
SYSCALL_DEFINE6(epoll_pwait,int,epfd,struct epoll_event __user *,events,int,maxevents,int,timeout,const sigset_t __user *,sigmask,size_t,sigsetsize)2400 SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events,
2401 		int, maxevents, int, timeout, const sigset_t __user *, sigmask,
2402 		size_t, sigsetsize)
2403 {
2404 	struct timespec64 to;
2405 
2406 	return do_epoll_pwait(epfd, events, maxevents,
2407 			      ep_timeout_to_timespec(&to, timeout),
2408 			      sigmask, sigsetsize);
2409 }
2410 
SYSCALL_DEFINE6(epoll_pwait2,int,epfd,struct epoll_event __user *,events,int,maxevents,const struct __kernel_timespec __user *,timeout,const sigset_t __user *,sigmask,size_t,sigsetsize)2411 SYSCALL_DEFINE6(epoll_pwait2, int, epfd, struct epoll_event __user *, events,
2412 		int, maxevents, const struct __kernel_timespec __user *, timeout,
2413 		const sigset_t __user *, sigmask, size_t, sigsetsize)
2414 {
2415 	struct timespec64 ts, *to = NULL;
2416 
2417 	if (timeout) {
2418 		if (get_timespec64(&ts, timeout))
2419 			return -EFAULT;
2420 		to = &ts;
2421 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
2422 			return -EINVAL;
2423 	}
2424 
2425 	return do_epoll_pwait(epfd, events, maxevents, to,
2426 			      sigmask, sigsetsize);
2427 }
2428 
2429 #ifdef CONFIG_COMPAT
do_compat_epoll_pwait(int epfd,struct epoll_event __user * events,int maxevents,struct timespec64 * timeout,const compat_sigset_t __user * sigmask,compat_size_t sigsetsize)2430 static int do_compat_epoll_pwait(int epfd, struct epoll_event __user *events,
2431 				 int maxevents, struct timespec64 *timeout,
2432 				 const compat_sigset_t __user *sigmask,
2433 				 compat_size_t sigsetsize)
2434 {
2435 	long err;
2436 
2437 	/*
2438 	 * If the caller wants a certain signal mask to be set during the wait,
2439 	 * we apply it here.
2440 	 */
2441 	err = set_compat_user_sigmask(sigmask, sigsetsize);
2442 	if (err)
2443 		return err;
2444 
2445 	err = do_epoll_wait(epfd, events, maxevents, timeout);
2446 
2447 	restore_saved_sigmask_unless(err == -EINTR);
2448 
2449 	return err;
2450 }
2451 
COMPAT_SYSCALL_DEFINE6(epoll_pwait,int,epfd,struct epoll_event __user *,events,int,maxevents,int,timeout,const compat_sigset_t __user *,sigmask,compat_size_t,sigsetsize)2452 COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd,
2453 		       struct epoll_event __user *, events,
2454 		       int, maxevents, int, timeout,
2455 		       const compat_sigset_t __user *, sigmask,
2456 		       compat_size_t, sigsetsize)
2457 {
2458 	struct timespec64 to;
2459 
2460 	return do_compat_epoll_pwait(epfd, events, maxevents,
2461 				     ep_timeout_to_timespec(&to, timeout),
2462 				     sigmask, sigsetsize);
2463 }
2464 
COMPAT_SYSCALL_DEFINE6(epoll_pwait2,int,epfd,struct epoll_event __user *,events,int,maxevents,const struct __kernel_timespec __user *,timeout,const compat_sigset_t __user *,sigmask,compat_size_t,sigsetsize)2465 COMPAT_SYSCALL_DEFINE6(epoll_pwait2, int, epfd,
2466 		       struct epoll_event __user *, events,
2467 		       int, maxevents,
2468 		       const struct __kernel_timespec __user *, timeout,
2469 		       const compat_sigset_t __user *, sigmask,
2470 		       compat_size_t, sigsetsize)
2471 {
2472 	struct timespec64 ts, *to = NULL;
2473 
2474 	if (timeout) {
2475 		if (get_timespec64(&ts, timeout))
2476 			return -EFAULT;
2477 		to = &ts;
2478 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
2479 			return -EINVAL;
2480 	}
2481 
2482 	return do_compat_epoll_pwait(epfd, events, maxevents, to,
2483 				     sigmask, sigsetsize);
2484 }
2485 
2486 #endif
2487 
eventpoll_init(void)2488 static int __init eventpoll_init(void)
2489 {
2490 	struct sysinfo si;
2491 
2492 	si_meminfo(&si);
2493 	/*
2494 	 * Allows top 4% of lomem to be allocated for epoll watches (per user).
2495 	 */
2496 	max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
2497 		EP_ITEM_COST;
2498 	BUG_ON(max_user_watches < 0);
2499 
2500 	/*
2501 	 * We can have many thousands of epitems, so prevent this from
2502 	 * using an extra cache line on 64-bit (and smaller) CPUs
2503 	 */
2504 	BUILD_BUG_ON(sizeof(void *) <= 8 && sizeof(struct epitem) > 128);
2505 
2506 	/* Allocates slab cache used to allocate "struct epitem" items */
2507 	epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
2508 			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL);
2509 
2510 	/* Allocates slab cache used to allocate "struct eppoll_entry" */
2511 	pwq_cache = kmem_cache_create("eventpoll_pwq",
2512 		sizeof(struct eppoll_entry), 0, SLAB_PANIC|SLAB_ACCOUNT, NULL);
2513 	epoll_sysctls_init();
2514 
2515 	ephead_cache = kmem_cache_create("ep_head",
2516 		sizeof(struct epitems_head), 0, SLAB_PANIC|SLAB_ACCOUNT, NULL);
2517 
2518 	return 0;
2519 }
2520 fs_initcall(eventpoll_init);
2521