xref: /openbmc/linux/fs/select.c (revision ee8a99bd)
1 /*
2  * This file contains the procedures for the handling of select and poll
3  *
4  * Created for Linux based loosely upon Mathius Lattner's minix
5  * patches by Peter MacDonald. Heavily edited by Linus.
6  *
7  *  4 February 1994
8  *     COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
9  *     flag set in its personality we do *not* modify the given timeout
10  *     parameter to reflect time remaining.
11  *
12  *  24 January 2000
13  *     Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation
14  *     of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/syscalls.h>
20 #include <linux/export.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
23 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
24 #include <linux/file.h>
25 #include <linux/fdtable.h>
26 #include <linux/fs.h>
27 #include <linux/rcupdate.h>
28 #include <linux/hrtimer.h>
29 #include <linux/sched/rt.h>
30 #include <linux/freezer.h>
31 #include <net/busy_poll.h>
32 
33 #include <asm/uaccess.h>
34 
35 
36 /*
37  * Estimate expected accuracy in ns from a timeval.
38  *
39  * After quite a bit of churning around, we've settled on
40  * a simple thing of taking 0.1% of the timeout as the
41  * slack, with a cap of 100 msec.
42  * "nice" tasks get a 0.5% slack instead.
43  *
44  * Consider this comment an open invitation to come up with even
45  * better solutions..
46  */
47 
48 #define MAX_SLACK	(100 * NSEC_PER_MSEC)
49 
50 static long __estimate_accuracy(struct timespec *tv)
51 {
52 	long slack;
53 	int divfactor = 1000;
54 
55 	if (tv->tv_sec < 0)
56 		return 0;
57 
58 	if (task_nice(current) > 0)
59 		divfactor = divfactor / 5;
60 
61 	if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor))
62 		return MAX_SLACK;
63 
64 	slack = tv->tv_nsec / divfactor;
65 	slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
66 
67 	if (slack > MAX_SLACK)
68 		return MAX_SLACK;
69 
70 	return slack;
71 }
72 
73 long select_estimate_accuracy(struct timespec *tv)
74 {
75 	unsigned long ret;
76 	struct timespec now;
77 
78 	/*
79 	 * Realtime tasks get a slack of 0 for obvious reasons.
80 	 */
81 
82 	if (rt_task(current))
83 		return 0;
84 
85 	ktime_get_ts(&now);
86 	now = timespec_sub(*tv, now);
87 	ret = __estimate_accuracy(&now);
88 	if (ret < current->timer_slack_ns)
89 		return current->timer_slack_ns;
90 	return ret;
91 }
92 
93 
94 
95 struct poll_table_page {
96 	struct poll_table_page * next;
97 	struct poll_table_entry * entry;
98 	struct poll_table_entry entries[0];
99 };
100 
101 #define POLL_TABLE_FULL(table) \
102 	((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
103 
104 /*
105  * Ok, Peter made a complicated, but straightforward multiple_wait() function.
106  * I have rewritten this, taking some shortcuts: This code may not be easy to
107  * follow, but it should be free of race-conditions, and it's practical. If you
108  * understand what I'm doing here, then you understand how the linux
109  * sleep/wakeup mechanism works.
110  *
111  * Two very simple procedures, poll_wait() and poll_freewait() make all the
112  * work.  poll_wait() is an inline-function defined in <linux/poll.h>,
113  * as all select/poll functions have to call it to add an entry to the
114  * poll table.
115  */
116 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
117 		       poll_table *p);
118 
119 void poll_initwait(struct poll_wqueues *pwq)
120 {
121 	init_poll_funcptr(&pwq->pt, __pollwait);
122 	pwq->polling_task = current;
123 	pwq->triggered = 0;
124 	pwq->error = 0;
125 	pwq->table = NULL;
126 	pwq->inline_index = 0;
127 }
128 EXPORT_SYMBOL(poll_initwait);
129 
130 static void free_poll_entry(struct poll_table_entry *entry)
131 {
132 	remove_wait_queue(entry->wait_address, &entry->wait);
133 	fput(entry->filp);
134 }
135 
136 void poll_freewait(struct poll_wqueues *pwq)
137 {
138 	struct poll_table_page * p = pwq->table;
139 	int i;
140 	for (i = 0; i < pwq->inline_index; i++)
141 		free_poll_entry(pwq->inline_entries + i);
142 	while (p) {
143 		struct poll_table_entry * entry;
144 		struct poll_table_page *old;
145 
146 		entry = p->entry;
147 		do {
148 			entry--;
149 			free_poll_entry(entry);
150 		} while (entry > p->entries);
151 		old = p;
152 		p = p->next;
153 		free_page((unsigned long) old);
154 	}
155 }
156 EXPORT_SYMBOL(poll_freewait);
157 
158 static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
159 {
160 	struct poll_table_page *table = p->table;
161 
162 	if (p->inline_index < N_INLINE_POLL_ENTRIES)
163 		return p->inline_entries + p->inline_index++;
164 
165 	if (!table || POLL_TABLE_FULL(table)) {
166 		struct poll_table_page *new_table;
167 
168 		new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
169 		if (!new_table) {
170 			p->error = -ENOMEM;
171 			return NULL;
172 		}
173 		new_table->entry = new_table->entries;
174 		new_table->next = table;
175 		p->table = new_table;
176 		table = new_table;
177 	}
178 
179 	return table->entry++;
180 }
181 
182 static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
183 {
184 	struct poll_wqueues *pwq = wait->private;
185 	DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
186 
187 	/*
188 	 * Although this function is called under waitqueue lock, LOCK
189 	 * doesn't imply write barrier and the users expect write
190 	 * barrier semantics on wakeup functions.  The following
191 	 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
192 	 * and is paired with set_mb() in poll_schedule_timeout.
193 	 */
194 	smp_wmb();
195 	pwq->triggered = 1;
196 
197 	/*
198 	 * Perform the default wake up operation using a dummy
199 	 * waitqueue.
200 	 *
201 	 * TODO: This is hacky but there currently is no interface to
202 	 * pass in @sync.  @sync is scheduled to be removed and once
203 	 * that happens, wake_up_process() can be used directly.
204 	 */
205 	return default_wake_function(&dummy_wait, mode, sync, key);
206 }
207 
208 static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
209 {
210 	struct poll_table_entry *entry;
211 
212 	entry = container_of(wait, struct poll_table_entry, wait);
213 	if (key && !((unsigned long)key & entry->key))
214 		return 0;
215 	return __pollwake(wait, mode, sync, key);
216 }
217 
218 /* Add a new entry */
219 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
220 				poll_table *p)
221 {
222 	struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
223 	struct poll_table_entry *entry = poll_get_entry(pwq);
224 	if (!entry)
225 		return;
226 	entry->filp = get_file(filp);
227 	entry->wait_address = wait_address;
228 	entry->key = p->_key;
229 	init_waitqueue_func_entry(&entry->wait, pollwake);
230 	entry->wait.private = pwq;
231 	add_wait_queue(wait_address, &entry->wait);
232 }
233 
234 int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
235 			  ktime_t *expires, unsigned long slack)
236 {
237 	int rc = -EINTR;
238 
239 	set_current_state(state);
240 	if (!pwq->triggered)
241 		rc = freezable_schedule_hrtimeout_range(expires, slack,
242 							HRTIMER_MODE_ABS);
243 	__set_current_state(TASK_RUNNING);
244 
245 	/*
246 	 * Prepare for the next iteration.
247 	 *
248 	 * The following set_mb() serves two purposes.  First, it's
249 	 * the counterpart rmb of the wmb in pollwake() such that data
250 	 * written before wake up is always visible after wake up.
251 	 * Second, the full barrier guarantees that triggered clearing
252 	 * doesn't pass event check of the next iteration.  Note that
253 	 * this problem doesn't exist for the first iteration as
254 	 * add_wait_queue() has full barrier semantics.
255 	 */
256 	set_mb(pwq->triggered, 0);
257 
258 	return rc;
259 }
260 EXPORT_SYMBOL(poll_schedule_timeout);
261 
262 /**
263  * poll_select_set_timeout - helper function to setup the timeout value
264  * @to:		pointer to timespec variable for the final timeout
265  * @sec:	seconds (from user space)
266  * @nsec:	nanoseconds (from user space)
267  *
268  * Note, we do not use a timespec for the user space value here, That
269  * way we can use the function for timeval and compat interfaces as well.
270  *
271  * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
272  */
273 int poll_select_set_timeout(struct timespec *to, long sec, long nsec)
274 {
275 	struct timespec ts = {.tv_sec = sec, .tv_nsec = nsec};
276 
277 	if (!timespec_valid(&ts))
278 		return -EINVAL;
279 
280 	/* Optimize for the zero timeout value here */
281 	if (!sec && !nsec) {
282 		to->tv_sec = to->tv_nsec = 0;
283 	} else {
284 		ktime_get_ts(to);
285 		*to = timespec_add_safe(*to, ts);
286 	}
287 	return 0;
288 }
289 
290 static int poll_select_copy_remaining(struct timespec *end_time, void __user *p,
291 				      int timeval, int ret)
292 {
293 	struct timespec rts;
294 	struct timeval rtv;
295 
296 	if (!p)
297 		return ret;
298 
299 	if (current->personality & STICKY_TIMEOUTS)
300 		goto sticky;
301 
302 	/* No update for zero timeout */
303 	if (!end_time->tv_sec && !end_time->tv_nsec)
304 		return ret;
305 
306 	ktime_get_ts(&rts);
307 	rts = timespec_sub(*end_time, rts);
308 	if (rts.tv_sec < 0)
309 		rts.tv_sec = rts.tv_nsec = 0;
310 
311 	if (timeval) {
312 		if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
313 			memset(&rtv, 0, sizeof(rtv));
314 		rtv.tv_sec = rts.tv_sec;
315 		rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
316 
317 		if (!copy_to_user(p, &rtv, sizeof(rtv)))
318 			return ret;
319 
320 	} else if (!copy_to_user(p, &rts, sizeof(rts)))
321 		return ret;
322 
323 	/*
324 	 * If an application puts its timeval in read-only memory, we
325 	 * don't want the Linux-specific update to the timeval to
326 	 * cause a fault after the select has completed
327 	 * successfully. However, because we're not updating the
328 	 * timeval, we can't restart the system call.
329 	 */
330 
331 sticky:
332 	if (ret == -ERESTARTNOHAND)
333 		ret = -EINTR;
334 	return ret;
335 }
336 
337 #define FDS_IN(fds, n)		(fds->in + n)
338 #define FDS_OUT(fds, n)		(fds->out + n)
339 #define FDS_EX(fds, n)		(fds->ex + n)
340 
341 #define BITS(fds, n)	(*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
342 
343 static int max_select_fd(unsigned long n, fd_set_bits *fds)
344 {
345 	unsigned long *open_fds;
346 	unsigned long set;
347 	int max;
348 	struct fdtable *fdt;
349 
350 	/* handle last in-complete long-word first */
351 	set = ~(~0UL << (n & (BITS_PER_LONG-1)));
352 	n /= BITS_PER_LONG;
353 	fdt = files_fdtable(current->files);
354 	open_fds = fdt->open_fds + n;
355 	max = 0;
356 	if (set) {
357 		set &= BITS(fds, n);
358 		if (set) {
359 			if (!(set & ~*open_fds))
360 				goto get_max;
361 			return -EBADF;
362 		}
363 	}
364 	while (n) {
365 		open_fds--;
366 		n--;
367 		set = BITS(fds, n);
368 		if (!set)
369 			continue;
370 		if (set & ~*open_fds)
371 			return -EBADF;
372 		if (max)
373 			continue;
374 get_max:
375 		do {
376 			max++;
377 			set >>= 1;
378 		} while (set);
379 		max += n * BITS_PER_LONG;
380 	}
381 
382 	return max;
383 }
384 
385 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
386 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
387 #define POLLEX_SET (POLLPRI)
388 
389 static inline void wait_key_set(poll_table *wait, unsigned long in,
390 				unsigned long out, unsigned long bit,
391 				unsigned int ll_flag)
392 {
393 	wait->_key = POLLEX_SET | ll_flag;
394 	if (in & bit)
395 		wait->_key |= POLLIN_SET;
396 	if (out & bit)
397 		wait->_key |= POLLOUT_SET;
398 }
399 
400 int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
401 {
402 	ktime_t expire, *to = NULL;
403 	struct poll_wqueues table;
404 	poll_table *wait;
405 	int retval, i, timed_out = 0;
406 	unsigned long slack = 0;
407 	unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
408 	unsigned long busy_end = 0;
409 
410 	rcu_read_lock();
411 	retval = max_select_fd(n, fds);
412 	rcu_read_unlock();
413 
414 	if (retval < 0)
415 		return retval;
416 	n = retval;
417 
418 	poll_initwait(&table);
419 	wait = &table.pt;
420 	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
421 		wait->_qproc = NULL;
422 		timed_out = 1;
423 	}
424 
425 	if (end_time && !timed_out)
426 		slack = select_estimate_accuracy(end_time);
427 
428 	retval = 0;
429 	for (;;) {
430 		unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
431 		bool can_busy_loop = false;
432 
433 		inp = fds->in; outp = fds->out; exp = fds->ex;
434 		rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
435 
436 		for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
437 			unsigned long in, out, ex, all_bits, bit = 1, mask, j;
438 			unsigned long res_in = 0, res_out = 0, res_ex = 0;
439 
440 			in = *inp++; out = *outp++; ex = *exp++;
441 			all_bits = in | out | ex;
442 			if (all_bits == 0) {
443 				i += BITS_PER_LONG;
444 				continue;
445 			}
446 
447 			for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
448 				struct fd f;
449 				if (i >= n)
450 					break;
451 				if (!(bit & all_bits))
452 					continue;
453 				f = fdget(i);
454 				if (f.file) {
455 					const struct file_operations *f_op;
456 					f_op = f.file->f_op;
457 					mask = DEFAULT_POLLMASK;
458 					if (f_op && f_op->poll) {
459 						wait_key_set(wait, in, out,
460 							     bit, busy_flag);
461 						mask = (*f_op->poll)(f.file, wait);
462 					}
463 					fdput(f);
464 					if ((mask & POLLIN_SET) && (in & bit)) {
465 						res_in |= bit;
466 						retval++;
467 						wait->_qproc = NULL;
468 					}
469 					if ((mask & POLLOUT_SET) && (out & bit)) {
470 						res_out |= bit;
471 						retval++;
472 						wait->_qproc = NULL;
473 					}
474 					if ((mask & POLLEX_SET) && (ex & bit)) {
475 						res_ex |= bit;
476 						retval++;
477 						wait->_qproc = NULL;
478 					}
479 					/* got something, stop busy polling */
480 					if (retval) {
481 						can_busy_loop = false;
482 						busy_flag = 0;
483 
484 					/*
485 					 * only remember a returned
486 					 * POLL_BUSY_LOOP if we asked for it
487 					 */
488 					} else if (busy_flag & mask)
489 						can_busy_loop = true;
490 
491 				}
492 			}
493 			if (res_in)
494 				*rinp = res_in;
495 			if (res_out)
496 				*routp = res_out;
497 			if (res_ex)
498 				*rexp = res_ex;
499 			cond_resched();
500 		}
501 		wait->_qproc = NULL;
502 		if (retval || timed_out || signal_pending(current))
503 			break;
504 		if (table.error) {
505 			retval = table.error;
506 			break;
507 		}
508 
509 		/* only if found POLL_BUSY_LOOP sockets && not out of time */
510 		if (can_busy_loop && !need_resched()) {
511 			if (!busy_end) {
512 				busy_end = busy_loop_end_time();
513 				continue;
514 			}
515 			if (!busy_loop_timeout(busy_end))
516 				continue;
517 		}
518 		busy_flag = 0;
519 
520 		/*
521 		 * If this is the first loop and we have a timeout
522 		 * given, then we convert to ktime_t and set the to
523 		 * pointer to the expiry value.
524 		 */
525 		if (end_time && !to) {
526 			expire = timespec_to_ktime(*end_time);
527 			to = &expire;
528 		}
529 
530 		if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
531 					   to, slack))
532 			timed_out = 1;
533 	}
534 
535 	poll_freewait(&table);
536 
537 	return retval;
538 }
539 
540 /*
541  * We can actually return ERESTARTSYS instead of EINTR, but I'd
542  * like to be certain this leads to no problems. So I return
543  * EINTR just for safety.
544  *
545  * Update: ERESTARTSYS breaks at least the xview clock binary, so
546  * I'm trying ERESTARTNOHAND which restart only when you want to.
547  */
548 int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
549 			   fd_set __user *exp, struct timespec *end_time)
550 {
551 	fd_set_bits fds;
552 	void *bits;
553 	int ret, max_fds;
554 	unsigned int size;
555 	struct fdtable *fdt;
556 	/* Allocate small arguments on the stack to save memory and be faster */
557 	long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
558 
559 	ret = -EINVAL;
560 	if (n < 0)
561 		goto out_nofds;
562 
563 	/* max_fds can increase, so grab it once to avoid race */
564 	rcu_read_lock();
565 	fdt = files_fdtable(current->files);
566 	max_fds = fdt->max_fds;
567 	rcu_read_unlock();
568 	if (n > max_fds)
569 		n = max_fds;
570 
571 	/*
572 	 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
573 	 * since we used fdset we need to allocate memory in units of
574 	 * long-words.
575 	 */
576 	size = FDS_BYTES(n);
577 	bits = stack_fds;
578 	if (size > sizeof(stack_fds) / 6) {
579 		/* Not enough space in on-stack array; must use kmalloc */
580 		ret = -ENOMEM;
581 		bits = kmalloc(6 * size, GFP_KERNEL);
582 		if (!bits)
583 			goto out_nofds;
584 	}
585 	fds.in      = bits;
586 	fds.out     = bits +   size;
587 	fds.ex      = bits + 2*size;
588 	fds.res_in  = bits + 3*size;
589 	fds.res_out = bits + 4*size;
590 	fds.res_ex  = bits + 5*size;
591 
592 	if ((ret = get_fd_set(n, inp, fds.in)) ||
593 	    (ret = get_fd_set(n, outp, fds.out)) ||
594 	    (ret = get_fd_set(n, exp, fds.ex)))
595 		goto out;
596 	zero_fd_set(n, fds.res_in);
597 	zero_fd_set(n, fds.res_out);
598 	zero_fd_set(n, fds.res_ex);
599 
600 	ret = do_select(n, &fds, end_time);
601 
602 	if (ret < 0)
603 		goto out;
604 	if (!ret) {
605 		ret = -ERESTARTNOHAND;
606 		if (signal_pending(current))
607 			goto out;
608 		ret = 0;
609 	}
610 
611 	if (set_fd_set(n, inp, fds.res_in) ||
612 	    set_fd_set(n, outp, fds.res_out) ||
613 	    set_fd_set(n, exp, fds.res_ex))
614 		ret = -EFAULT;
615 
616 out:
617 	if (bits != stack_fds)
618 		kfree(bits);
619 out_nofds:
620 	return ret;
621 }
622 
623 SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
624 		fd_set __user *, exp, struct timeval __user *, tvp)
625 {
626 	struct timespec end_time, *to = NULL;
627 	struct timeval tv;
628 	int ret;
629 
630 	if (tvp) {
631 		if (copy_from_user(&tv, tvp, sizeof(tv)))
632 			return -EFAULT;
633 
634 		to = &end_time;
635 		if (poll_select_set_timeout(to,
636 				tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
637 				(tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
638 			return -EINVAL;
639 	}
640 
641 	ret = core_sys_select(n, inp, outp, exp, to);
642 	ret = poll_select_copy_remaining(&end_time, tvp, 1, ret);
643 
644 	return ret;
645 }
646 
647 static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
648 		       fd_set __user *exp, struct timespec __user *tsp,
649 		       const sigset_t __user *sigmask, size_t sigsetsize)
650 {
651 	sigset_t ksigmask, sigsaved;
652 	struct timespec ts, end_time, *to = NULL;
653 	int ret;
654 
655 	if (tsp) {
656 		if (copy_from_user(&ts, tsp, sizeof(ts)))
657 			return -EFAULT;
658 
659 		to = &end_time;
660 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
661 			return -EINVAL;
662 	}
663 
664 	if (sigmask) {
665 		/* XXX: Don't preclude handling different sized sigset_t's.  */
666 		if (sigsetsize != sizeof(sigset_t))
667 			return -EINVAL;
668 		if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
669 			return -EFAULT;
670 
671 		sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
672 		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
673 	}
674 
675 	ret = core_sys_select(n, inp, outp, exp, to);
676 	ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
677 
678 	if (ret == -ERESTARTNOHAND) {
679 		/*
680 		 * Don't restore the signal mask yet. Let do_signal() deliver
681 		 * the signal on the way back to userspace, before the signal
682 		 * mask is restored.
683 		 */
684 		if (sigmask) {
685 			memcpy(&current->saved_sigmask, &sigsaved,
686 					sizeof(sigsaved));
687 			set_restore_sigmask();
688 		}
689 	} else if (sigmask)
690 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
691 
692 	return ret;
693 }
694 
695 /*
696  * Most architectures can't handle 7-argument syscalls. So we provide a
697  * 6-argument version where the sixth argument is a pointer to a structure
698  * which has a pointer to the sigset_t itself followed by a size_t containing
699  * the sigset size.
700  */
701 SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
702 		fd_set __user *, exp, struct timespec __user *, tsp,
703 		void __user *, sig)
704 {
705 	size_t sigsetsize = 0;
706 	sigset_t __user *up = NULL;
707 
708 	if (sig) {
709 		if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t))
710 		    || __get_user(up, (sigset_t __user * __user *)sig)
711 		    || __get_user(sigsetsize,
712 				(size_t __user *)(sig+sizeof(void *))))
713 			return -EFAULT;
714 	}
715 
716 	return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize);
717 }
718 
719 #ifdef __ARCH_WANT_SYS_OLD_SELECT
720 struct sel_arg_struct {
721 	unsigned long n;
722 	fd_set __user *inp, *outp, *exp;
723 	struct timeval __user *tvp;
724 };
725 
726 SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
727 {
728 	struct sel_arg_struct a;
729 
730 	if (copy_from_user(&a, arg, sizeof(a)))
731 		return -EFAULT;
732 	return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
733 }
734 #endif
735 
736 struct poll_list {
737 	struct poll_list *next;
738 	int len;
739 	struct pollfd entries[0];
740 };
741 
742 #define POLLFD_PER_PAGE  ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
743 
744 /*
745  * Fish for pollable events on the pollfd->fd file descriptor. We're only
746  * interested in events matching the pollfd->events mask, and the result
747  * matching that mask is both recorded in pollfd->revents and returned. The
748  * pwait poll_table will be used by the fd-provided poll handler for waiting,
749  * if pwait->_qproc is non-NULL.
750  */
751 static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait,
752 				     bool *can_busy_poll,
753 				     unsigned int busy_flag)
754 {
755 	unsigned int mask;
756 	int fd;
757 
758 	mask = 0;
759 	fd = pollfd->fd;
760 	if (fd >= 0) {
761 		struct fd f = fdget(fd);
762 		mask = POLLNVAL;
763 		if (f.file) {
764 			mask = DEFAULT_POLLMASK;
765 			if (f.file->f_op && f.file->f_op->poll) {
766 				pwait->_key = pollfd->events|POLLERR|POLLHUP;
767 				pwait->_key |= busy_flag;
768 				mask = f.file->f_op->poll(f.file, pwait);
769 				if (mask & busy_flag)
770 					*can_busy_poll = true;
771 			}
772 			/* Mask out unneeded events. */
773 			mask &= pollfd->events | POLLERR | POLLHUP;
774 			fdput(f);
775 		}
776 	}
777 	pollfd->revents = mask;
778 
779 	return mask;
780 }
781 
782 static int do_poll(unsigned int nfds,  struct poll_list *list,
783 		   struct poll_wqueues *wait, struct timespec *end_time)
784 {
785 	poll_table* pt = &wait->pt;
786 	ktime_t expire, *to = NULL;
787 	int timed_out = 0, count = 0;
788 	unsigned long slack = 0;
789 	unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
790 	unsigned long busy_end = 0;
791 
792 	/* Optimise the no-wait case */
793 	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
794 		pt->_qproc = NULL;
795 		timed_out = 1;
796 	}
797 
798 	if (end_time && !timed_out)
799 		slack = select_estimate_accuracy(end_time);
800 
801 	for (;;) {
802 		struct poll_list *walk;
803 		bool can_busy_loop = false;
804 
805 		for (walk = list; walk != NULL; walk = walk->next) {
806 			struct pollfd * pfd, * pfd_end;
807 
808 			pfd = walk->entries;
809 			pfd_end = pfd + walk->len;
810 			for (; pfd != pfd_end; pfd++) {
811 				/*
812 				 * Fish for events. If we found one, record it
813 				 * and kill poll_table->_qproc, so we don't
814 				 * needlessly register any other waiters after
815 				 * this. They'll get immediately deregistered
816 				 * when we break out and return.
817 				 */
818 				if (do_pollfd(pfd, pt, &can_busy_loop,
819 					      busy_flag)) {
820 					count++;
821 					pt->_qproc = NULL;
822 					/* found something, stop busy polling */
823 					busy_flag = 0;
824 					can_busy_loop = false;
825 				}
826 			}
827 		}
828 		/*
829 		 * All waiters have already been registered, so don't provide
830 		 * a poll_table->_qproc to them on the next loop iteration.
831 		 */
832 		pt->_qproc = NULL;
833 		if (!count) {
834 			count = wait->error;
835 			if (signal_pending(current))
836 				count = -EINTR;
837 		}
838 		if (count || timed_out)
839 			break;
840 
841 		/* only if found POLL_BUSY_LOOP sockets && not out of time */
842 		if (can_busy_loop && !need_resched()) {
843 			if (!busy_end) {
844 				busy_end = busy_loop_end_time();
845 				continue;
846 			}
847 			if (!busy_loop_timeout(busy_end))
848 				continue;
849 		}
850 		busy_flag = 0;
851 
852 		/*
853 		 * If this is the first loop and we have a timeout
854 		 * given, then we convert to ktime_t and set the to
855 		 * pointer to the expiry value.
856 		 */
857 		if (end_time && !to) {
858 			expire = timespec_to_ktime(*end_time);
859 			to = &expire;
860 		}
861 
862 		if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
863 			timed_out = 1;
864 	}
865 	return count;
866 }
867 
868 #define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list))  / \
869 			sizeof(struct pollfd))
870 
871 int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
872 		struct timespec *end_time)
873 {
874 	struct poll_wqueues table;
875  	int err = -EFAULT, fdcount, len, size;
876 	/* Allocate small arguments on the stack to save memory and be
877 	   faster - use long to make sure the buffer is aligned properly
878 	   on 64 bit archs to avoid unaligned access */
879 	long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
880 	struct poll_list *const head = (struct poll_list *)stack_pps;
881  	struct poll_list *walk = head;
882  	unsigned long todo = nfds;
883 
884 	if (nfds > rlimit(RLIMIT_NOFILE))
885 		return -EINVAL;
886 
887 	len = min_t(unsigned int, nfds, N_STACK_PPS);
888 	for (;;) {
889 		walk->next = NULL;
890 		walk->len = len;
891 		if (!len)
892 			break;
893 
894 		if (copy_from_user(walk->entries, ufds + nfds-todo,
895 					sizeof(struct pollfd) * walk->len))
896 			goto out_fds;
897 
898 		todo -= walk->len;
899 		if (!todo)
900 			break;
901 
902 		len = min(todo, POLLFD_PER_PAGE);
903 		size = sizeof(struct poll_list) + sizeof(struct pollfd) * len;
904 		walk = walk->next = kmalloc(size, GFP_KERNEL);
905 		if (!walk) {
906 			err = -ENOMEM;
907 			goto out_fds;
908 		}
909 	}
910 
911 	poll_initwait(&table);
912 	fdcount = do_poll(nfds, head, &table, end_time);
913 	poll_freewait(&table);
914 
915 	for (walk = head; walk; walk = walk->next) {
916 		struct pollfd *fds = walk->entries;
917 		int j;
918 
919 		for (j = 0; j < walk->len; j++, ufds++)
920 			if (__put_user(fds[j].revents, &ufds->revents))
921 				goto out_fds;
922   	}
923 
924 	err = fdcount;
925 out_fds:
926 	walk = head->next;
927 	while (walk) {
928 		struct poll_list *pos = walk;
929 		walk = walk->next;
930 		kfree(pos);
931 	}
932 
933 	return err;
934 }
935 
936 static long do_restart_poll(struct restart_block *restart_block)
937 {
938 	struct pollfd __user *ufds = restart_block->poll.ufds;
939 	int nfds = restart_block->poll.nfds;
940 	struct timespec *to = NULL, end_time;
941 	int ret;
942 
943 	if (restart_block->poll.has_timeout) {
944 		end_time.tv_sec = restart_block->poll.tv_sec;
945 		end_time.tv_nsec = restart_block->poll.tv_nsec;
946 		to = &end_time;
947 	}
948 
949 	ret = do_sys_poll(ufds, nfds, to);
950 
951 	if (ret == -EINTR) {
952 		restart_block->fn = do_restart_poll;
953 		ret = -ERESTART_RESTARTBLOCK;
954 	}
955 	return ret;
956 }
957 
958 SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
959 		int, timeout_msecs)
960 {
961 	struct timespec end_time, *to = NULL;
962 	int ret;
963 
964 	if (timeout_msecs >= 0) {
965 		to = &end_time;
966 		poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
967 			NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC));
968 	}
969 
970 	ret = do_sys_poll(ufds, nfds, to);
971 
972 	if (ret == -EINTR) {
973 		struct restart_block *restart_block;
974 
975 		restart_block = &current_thread_info()->restart_block;
976 		restart_block->fn = do_restart_poll;
977 		restart_block->poll.ufds = ufds;
978 		restart_block->poll.nfds = nfds;
979 
980 		if (timeout_msecs >= 0) {
981 			restart_block->poll.tv_sec = end_time.tv_sec;
982 			restart_block->poll.tv_nsec = end_time.tv_nsec;
983 			restart_block->poll.has_timeout = 1;
984 		} else
985 			restart_block->poll.has_timeout = 0;
986 
987 		ret = -ERESTART_RESTARTBLOCK;
988 	}
989 	return ret;
990 }
991 
992 SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
993 		struct timespec __user *, tsp, const sigset_t __user *, sigmask,
994 		size_t, sigsetsize)
995 {
996 	sigset_t ksigmask, sigsaved;
997 	struct timespec ts, end_time, *to = NULL;
998 	int ret;
999 
1000 	if (tsp) {
1001 		if (copy_from_user(&ts, tsp, sizeof(ts)))
1002 			return -EFAULT;
1003 
1004 		to = &end_time;
1005 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1006 			return -EINVAL;
1007 	}
1008 
1009 	if (sigmask) {
1010 		/* XXX: Don't preclude handling different sized sigset_t's.  */
1011 		if (sigsetsize != sizeof(sigset_t))
1012 			return -EINVAL;
1013 		if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
1014 			return -EFAULT;
1015 
1016 		sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
1017 		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
1018 	}
1019 
1020 	ret = do_sys_poll(ufds, nfds, to);
1021 
1022 	/* We can restart this syscall, usually */
1023 	if (ret == -EINTR) {
1024 		/*
1025 		 * Don't restore the signal mask yet. Let do_signal() deliver
1026 		 * the signal on the way back to userspace, before the signal
1027 		 * mask is restored.
1028 		 */
1029 		if (sigmask) {
1030 			memcpy(&current->saved_sigmask, &sigsaved,
1031 					sizeof(sigsaved));
1032 			set_restore_sigmask();
1033 		}
1034 		ret = -ERESTARTNOHAND;
1035 	} else if (sigmask)
1036 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1037 
1038 	ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
1039 
1040 	return ret;
1041 }
1042