xref: /openbmc/linux/fs/select.c (revision 0d456bad)
1 /*
2  * This file contains the procedures for the handling of select and poll
3  *
4  * Created for Linux based loosely upon Mathius Lattner's minix
5  * patches by Peter MacDonald. Heavily edited by Linus.
6  *
7  *  4 February 1994
8  *     COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
9  *     flag set in its personality we do *not* modify the given timeout
10  *     parameter to reflect time remaining.
11  *
12  *  24 January 2000
13  *     Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation
14  *     of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/syscalls.h>
20 #include <linux/export.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
23 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
24 #include <linux/file.h>
25 #include <linux/fdtable.h>
26 #include <linux/fs.h>
27 #include <linux/rcupdate.h>
28 #include <linux/hrtimer.h>
29 
30 #include <asm/uaccess.h>
31 
32 
33 /*
34  * Estimate expected accuracy in ns from a timeval.
35  *
36  * After quite a bit of churning around, we've settled on
37  * a simple thing of taking 0.1% of the timeout as the
38  * slack, with a cap of 100 msec.
39  * "nice" tasks get a 0.5% slack instead.
40  *
41  * Consider this comment an open invitation to come up with even
42  * better solutions..
43  */
44 
45 #define MAX_SLACK	(100 * NSEC_PER_MSEC)
46 
47 static long __estimate_accuracy(struct timespec *tv)
48 {
49 	long slack;
50 	int divfactor = 1000;
51 
52 	if (tv->tv_sec < 0)
53 		return 0;
54 
55 	if (task_nice(current) > 0)
56 		divfactor = divfactor / 5;
57 
58 	if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor))
59 		return MAX_SLACK;
60 
61 	slack = tv->tv_nsec / divfactor;
62 	slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
63 
64 	if (slack > MAX_SLACK)
65 		return MAX_SLACK;
66 
67 	return slack;
68 }
69 
70 long select_estimate_accuracy(struct timespec *tv)
71 {
72 	unsigned long ret;
73 	struct timespec now;
74 
75 	/*
76 	 * Realtime tasks get a slack of 0 for obvious reasons.
77 	 */
78 
79 	if (rt_task(current))
80 		return 0;
81 
82 	ktime_get_ts(&now);
83 	now = timespec_sub(*tv, now);
84 	ret = __estimate_accuracy(&now);
85 	if (ret < current->timer_slack_ns)
86 		return current->timer_slack_ns;
87 	return ret;
88 }
89 
90 
91 
92 struct poll_table_page {
93 	struct poll_table_page * next;
94 	struct poll_table_entry * entry;
95 	struct poll_table_entry entries[0];
96 };
97 
98 #define POLL_TABLE_FULL(table) \
99 	((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
100 
101 /*
102  * Ok, Peter made a complicated, but straightforward multiple_wait() function.
103  * I have rewritten this, taking some shortcuts: This code may not be easy to
104  * follow, but it should be free of race-conditions, and it's practical. If you
105  * understand what I'm doing here, then you understand how the linux
106  * sleep/wakeup mechanism works.
107  *
108  * Two very simple procedures, poll_wait() and poll_freewait() make all the
109  * work.  poll_wait() is an inline-function defined in <linux/poll.h>,
110  * as all select/poll functions have to call it to add an entry to the
111  * poll table.
112  */
113 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
114 		       poll_table *p);
115 
116 void poll_initwait(struct poll_wqueues *pwq)
117 {
118 	init_poll_funcptr(&pwq->pt, __pollwait);
119 	pwq->polling_task = current;
120 	pwq->triggered = 0;
121 	pwq->error = 0;
122 	pwq->table = NULL;
123 	pwq->inline_index = 0;
124 }
125 EXPORT_SYMBOL(poll_initwait);
126 
127 static void free_poll_entry(struct poll_table_entry *entry)
128 {
129 	remove_wait_queue(entry->wait_address, &entry->wait);
130 	fput(entry->filp);
131 }
132 
133 void poll_freewait(struct poll_wqueues *pwq)
134 {
135 	struct poll_table_page * p = pwq->table;
136 	int i;
137 	for (i = 0; i < pwq->inline_index; i++)
138 		free_poll_entry(pwq->inline_entries + i);
139 	while (p) {
140 		struct poll_table_entry * entry;
141 		struct poll_table_page *old;
142 
143 		entry = p->entry;
144 		do {
145 			entry--;
146 			free_poll_entry(entry);
147 		} while (entry > p->entries);
148 		old = p;
149 		p = p->next;
150 		free_page((unsigned long) old);
151 	}
152 }
153 EXPORT_SYMBOL(poll_freewait);
154 
155 static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
156 {
157 	struct poll_table_page *table = p->table;
158 
159 	if (p->inline_index < N_INLINE_POLL_ENTRIES)
160 		return p->inline_entries + p->inline_index++;
161 
162 	if (!table || POLL_TABLE_FULL(table)) {
163 		struct poll_table_page *new_table;
164 
165 		new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
166 		if (!new_table) {
167 			p->error = -ENOMEM;
168 			return NULL;
169 		}
170 		new_table->entry = new_table->entries;
171 		new_table->next = table;
172 		p->table = new_table;
173 		table = new_table;
174 	}
175 
176 	return table->entry++;
177 }
178 
179 static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
180 {
181 	struct poll_wqueues *pwq = wait->private;
182 	DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
183 
184 	/*
185 	 * Although this function is called under waitqueue lock, LOCK
186 	 * doesn't imply write barrier and the users expect write
187 	 * barrier semantics on wakeup functions.  The following
188 	 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
189 	 * and is paired with set_mb() in poll_schedule_timeout.
190 	 */
191 	smp_wmb();
192 	pwq->triggered = 1;
193 
194 	/*
195 	 * Perform the default wake up operation using a dummy
196 	 * waitqueue.
197 	 *
198 	 * TODO: This is hacky but there currently is no interface to
199 	 * pass in @sync.  @sync is scheduled to be removed and once
200 	 * that happens, wake_up_process() can be used directly.
201 	 */
202 	return default_wake_function(&dummy_wait, mode, sync, key);
203 }
204 
205 static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
206 {
207 	struct poll_table_entry *entry;
208 
209 	entry = container_of(wait, struct poll_table_entry, wait);
210 	if (key && !((unsigned long)key & entry->key))
211 		return 0;
212 	return __pollwake(wait, mode, sync, key);
213 }
214 
215 /* Add a new entry */
216 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
217 				poll_table *p)
218 {
219 	struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
220 	struct poll_table_entry *entry = poll_get_entry(pwq);
221 	if (!entry)
222 		return;
223 	entry->filp = get_file(filp);
224 	entry->wait_address = wait_address;
225 	entry->key = p->_key;
226 	init_waitqueue_func_entry(&entry->wait, pollwake);
227 	entry->wait.private = pwq;
228 	add_wait_queue(wait_address, &entry->wait);
229 }
230 
231 int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
232 			  ktime_t *expires, unsigned long slack)
233 {
234 	int rc = -EINTR;
235 
236 	set_current_state(state);
237 	if (!pwq->triggered)
238 		rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
239 	__set_current_state(TASK_RUNNING);
240 
241 	/*
242 	 * Prepare for the next iteration.
243 	 *
244 	 * The following set_mb() serves two purposes.  First, it's
245 	 * the counterpart rmb of the wmb in pollwake() such that data
246 	 * written before wake up is always visible after wake up.
247 	 * Second, the full barrier guarantees that triggered clearing
248 	 * doesn't pass event check of the next iteration.  Note that
249 	 * this problem doesn't exist for the first iteration as
250 	 * add_wait_queue() has full barrier semantics.
251 	 */
252 	set_mb(pwq->triggered, 0);
253 
254 	return rc;
255 }
256 EXPORT_SYMBOL(poll_schedule_timeout);
257 
258 /**
259  * poll_select_set_timeout - helper function to setup the timeout value
260  * @to:		pointer to timespec variable for the final timeout
261  * @sec:	seconds (from user space)
262  * @nsec:	nanoseconds (from user space)
263  *
264  * Note, we do not use a timespec for the user space value here, That
265  * way we can use the function for timeval and compat interfaces as well.
266  *
267  * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
268  */
269 int poll_select_set_timeout(struct timespec *to, long sec, long nsec)
270 {
271 	struct timespec ts = {.tv_sec = sec, .tv_nsec = nsec};
272 
273 	if (!timespec_valid(&ts))
274 		return -EINVAL;
275 
276 	/* Optimize for the zero timeout value here */
277 	if (!sec && !nsec) {
278 		to->tv_sec = to->tv_nsec = 0;
279 	} else {
280 		ktime_get_ts(to);
281 		*to = timespec_add_safe(*to, ts);
282 	}
283 	return 0;
284 }
285 
286 static int poll_select_copy_remaining(struct timespec *end_time, void __user *p,
287 				      int timeval, int ret)
288 {
289 	struct timespec rts;
290 	struct timeval rtv;
291 
292 	if (!p)
293 		return ret;
294 
295 	if (current->personality & STICKY_TIMEOUTS)
296 		goto sticky;
297 
298 	/* No update for zero timeout */
299 	if (!end_time->tv_sec && !end_time->tv_nsec)
300 		return ret;
301 
302 	ktime_get_ts(&rts);
303 	rts = timespec_sub(*end_time, rts);
304 	if (rts.tv_sec < 0)
305 		rts.tv_sec = rts.tv_nsec = 0;
306 
307 	if (timeval) {
308 		if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
309 			memset(&rtv, 0, sizeof(rtv));
310 		rtv.tv_sec = rts.tv_sec;
311 		rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
312 
313 		if (!copy_to_user(p, &rtv, sizeof(rtv)))
314 			return ret;
315 
316 	} else if (!copy_to_user(p, &rts, sizeof(rts)))
317 		return ret;
318 
319 	/*
320 	 * If an application puts its timeval in read-only memory, we
321 	 * don't want the Linux-specific update to the timeval to
322 	 * cause a fault after the select has completed
323 	 * successfully. However, because we're not updating the
324 	 * timeval, we can't restart the system call.
325 	 */
326 
327 sticky:
328 	if (ret == -ERESTARTNOHAND)
329 		ret = -EINTR;
330 	return ret;
331 }
332 
333 #define FDS_IN(fds, n)		(fds->in + n)
334 #define FDS_OUT(fds, n)		(fds->out + n)
335 #define FDS_EX(fds, n)		(fds->ex + n)
336 
337 #define BITS(fds, n)	(*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
338 
339 static int max_select_fd(unsigned long n, fd_set_bits *fds)
340 {
341 	unsigned long *open_fds;
342 	unsigned long set;
343 	int max;
344 	struct fdtable *fdt;
345 
346 	/* handle last in-complete long-word first */
347 	set = ~(~0UL << (n & (BITS_PER_LONG-1)));
348 	n /= BITS_PER_LONG;
349 	fdt = files_fdtable(current->files);
350 	open_fds = fdt->open_fds + n;
351 	max = 0;
352 	if (set) {
353 		set &= BITS(fds, n);
354 		if (set) {
355 			if (!(set & ~*open_fds))
356 				goto get_max;
357 			return -EBADF;
358 		}
359 	}
360 	while (n) {
361 		open_fds--;
362 		n--;
363 		set = BITS(fds, n);
364 		if (!set)
365 			continue;
366 		if (set & ~*open_fds)
367 			return -EBADF;
368 		if (max)
369 			continue;
370 get_max:
371 		do {
372 			max++;
373 			set >>= 1;
374 		} while (set);
375 		max += n * BITS_PER_LONG;
376 	}
377 
378 	return max;
379 }
380 
381 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
382 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
383 #define POLLEX_SET (POLLPRI)
384 
385 static inline void wait_key_set(poll_table *wait, unsigned long in,
386 				unsigned long out, unsigned long bit)
387 {
388 	wait->_key = POLLEX_SET;
389 	if (in & bit)
390 		wait->_key |= POLLIN_SET;
391 	if (out & bit)
392 		wait->_key |= POLLOUT_SET;
393 }
394 
395 int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
396 {
397 	ktime_t expire, *to = NULL;
398 	struct poll_wqueues table;
399 	poll_table *wait;
400 	int retval, i, timed_out = 0;
401 	unsigned long slack = 0;
402 
403 	rcu_read_lock();
404 	retval = max_select_fd(n, fds);
405 	rcu_read_unlock();
406 
407 	if (retval < 0)
408 		return retval;
409 	n = retval;
410 
411 	poll_initwait(&table);
412 	wait = &table.pt;
413 	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
414 		wait->_qproc = NULL;
415 		timed_out = 1;
416 	}
417 
418 	if (end_time && !timed_out)
419 		slack = select_estimate_accuracy(end_time);
420 
421 	retval = 0;
422 	for (;;) {
423 		unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
424 
425 		inp = fds->in; outp = fds->out; exp = fds->ex;
426 		rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
427 
428 		for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
429 			unsigned long in, out, ex, all_bits, bit = 1, mask, j;
430 			unsigned long res_in = 0, res_out = 0, res_ex = 0;
431 
432 			in = *inp++; out = *outp++; ex = *exp++;
433 			all_bits = in | out | ex;
434 			if (all_bits == 0) {
435 				i += BITS_PER_LONG;
436 				continue;
437 			}
438 
439 			for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
440 				struct fd f;
441 				if (i >= n)
442 					break;
443 				if (!(bit & all_bits))
444 					continue;
445 				f = fdget(i);
446 				if (f.file) {
447 					const struct file_operations *f_op;
448 					f_op = f.file->f_op;
449 					mask = DEFAULT_POLLMASK;
450 					if (f_op && f_op->poll) {
451 						wait_key_set(wait, in, out, bit);
452 						mask = (*f_op->poll)(f.file, wait);
453 					}
454 					fdput(f);
455 					if ((mask & POLLIN_SET) && (in & bit)) {
456 						res_in |= bit;
457 						retval++;
458 						wait->_qproc = NULL;
459 					}
460 					if ((mask & POLLOUT_SET) && (out & bit)) {
461 						res_out |= bit;
462 						retval++;
463 						wait->_qproc = NULL;
464 					}
465 					if ((mask & POLLEX_SET) && (ex & bit)) {
466 						res_ex |= bit;
467 						retval++;
468 						wait->_qproc = NULL;
469 					}
470 				}
471 			}
472 			if (res_in)
473 				*rinp = res_in;
474 			if (res_out)
475 				*routp = res_out;
476 			if (res_ex)
477 				*rexp = res_ex;
478 			cond_resched();
479 		}
480 		wait->_qproc = NULL;
481 		if (retval || timed_out || signal_pending(current))
482 			break;
483 		if (table.error) {
484 			retval = table.error;
485 			break;
486 		}
487 
488 		/*
489 		 * If this is the first loop and we have a timeout
490 		 * given, then we convert to ktime_t and set the to
491 		 * pointer to the expiry value.
492 		 */
493 		if (end_time && !to) {
494 			expire = timespec_to_ktime(*end_time);
495 			to = &expire;
496 		}
497 
498 		if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
499 					   to, slack))
500 			timed_out = 1;
501 	}
502 
503 	poll_freewait(&table);
504 
505 	return retval;
506 }
507 
508 /*
509  * We can actually return ERESTARTSYS instead of EINTR, but I'd
510  * like to be certain this leads to no problems. So I return
511  * EINTR just for safety.
512  *
513  * Update: ERESTARTSYS breaks at least the xview clock binary, so
514  * I'm trying ERESTARTNOHAND which restart only when you want to.
515  */
516 int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
517 			   fd_set __user *exp, struct timespec *end_time)
518 {
519 	fd_set_bits fds;
520 	void *bits;
521 	int ret, max_fds;
522 	unsigned int size;
523 	struct fdtable *fdt;
524 	/* Allocate small arguments on the stack to save memory and be faster */
525 	long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
526 
527 	ret = -EINVAL;
528 	if (n < 0)
529 		goto out_nofds;
530 
531 	/* max_fds can increase, so grab it once to avoid race */
532 	rcu_read_lock();
533 	fdt = files_fdtable(current->files);
534 	max_fds = fdt->max_fds;
535 	rcu_read_unlock();
536 	if (n > max_fds)
537 		n = max_fds;
538 
539 	/*
540 	 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
541 	 * since we used fdset we need to allocate memory in units of
542 	 * long-words.
543 	 */
544 	size = FDS_BYTES(n);
545 	bits = stack_fds;
546 	if (size > sizeof(stack_fds) / 6) {
547 		/* Not enough space in on-stack array; must use kmalloc */
548 		ret = -ENOMEM;
549 		bits = kmalloc(6 * size, GFP_KERNEL);
550 		if (!bits)
551 			goto out_nofds;
552 	}
553 	fds.in      = bits;
554 	fds.out     = bits +   size;
555 	fds.ex      = bits + 2*size;
556 	fds.res_in  = bits + 3*size;
557 	fds.res_out = bits + 4*size;
558 	fds.res_ex  = bits + 5*size;
559 
560 	if ((ret = get_fd_set(n, inp, fds.in)) ||
561 	    (ret = get_fd_set(n, outp, fds.out)) ||
562 	    (ret = get_fd_set(n, exp, fds.ex)))
563 		goto out;
564 	zero_fd_set(n, fds.res_in);
565 	zero_fd_set(n, fds.res_out);
566 	zero_fd_set(n, fds.res_ex);
567 
568 	ret = do_select(n, &fds, end_time);
569 
570 	if (ret < 0)
571 		goto out;
572 	if (!ret) {
573 		ret = -ERESTARTNOHAND;
574 		if (signal_pending(current))
575 			goto out;
576 		ret = 0;
577 	}
578 
579 	if (set_fd_set(n, inp, fds.res_in) ||
580 	    set_fd_set(n, outp, fds.res_out) ||
581 	    set_fd_set(n, exp, fds.res_ex))
582 		ret = -EFAULT;
583 
584 out:
585 	if (bits != stack_fds)
586 		kfree(bits);
587 out_nofds:
588 	return ret;
589 }
590 
591 SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
592 		fd_set __user *, exp, struct timeval __user *, tvp)
593 {
594 	struct timespec end_time, *to = NULL;
595 	struct timeval tv;
596 	int ret;
597 
598 	if (tvp) {
599 		if (copy_from_user(&tv, tvp, sizeof(tv)))
600 			return -EFAULT;
601 
602 		to = &end_time;
603 		if (poll_select_set_timeout(to,
604 				tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
605 				(tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
606 			return -EINVAL;
607 	}
608 
609 	ret = core_sys_select(n, inp, outp, exp, to);
610 	ret = poll_select_copy_remaining(&end_time, tvp, 1, ret);
611 
612 	return ret;
613 }
614 
615 static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
616 		       fd_set __user *exp, struct timespec __user *tsp,
617 		       const sigset_t __user *sigmask, size_t sigsetsize)
618 {
619 	sigset_t ksigmask, sigsaved;
620 	struct timespec ts, end_time, *to = NULL;
621 	int ret;
622 
623 	if (tsp) {
624 		if (copy_from_user(&ts, tsp, sizeof(ts)))
625 			return -EFAULT;
626 
627 		to = &end_time;
628 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
629 			return -EINVAL;
630 	}
631 
632 	if (sigmask) {
633 		/* XXX: Don't preclude handling different sized sigset_t's.  */
634 		if (sigsetsize != sizeof(sigset_t))
635 			return -EINVAL;
636 		if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
637 			return -EFAULT;
638 
639 		sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
640 		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
641 	}
642 
643 	ret = core_sys_select(n, inp, outp, exp, to);
644 	ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
645 
646 	if (ret == -ERESTARTNOHAND) {
647 		/*
648 		 * Don't restore the signal mask yet. Let do_signal() deliver
649 		 * the signal on the way back to userspace, before the signal
650 		 * mask is restored.
651 		 */
652 		if (sigmask) {
653 			memcpy(&current->saved_sigmask, &sigsaved,
654 					sizeof(sigsaved));
655 			set_restore_sigmask();
656 		}
657 	} else if (sigmask)
658 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
659 
660 	return ret;
661 }
662 
663 /*
664  * Most architectures can't handle 7-argument syscalls. So we provide a
665  * 6-argument version where the sixth argument is a pointer to a structure
666  * which has a pointer to the sigset_t itself followed by a size_t containing
667  * the sigset size.
668  */
669 SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
670 		fd_set __user *, exp, struct timespec __user *, tsp,
671 		void __user *, sig)
672 {
673 	size_t sigsetsize = 0;
674 	sigset_t __user *up = NULL;
675 
676 	if (sig) {
677 		if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t))
678 		    || __get_user(up, (sigset_t __user * __user *)sig)
679 		    || __get_user(sigsetsize,
680 				(size_t __user *)(sig+sizeof(void *))))
681 			return -EFAULT;
682 	}
683 
684 	return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize);
685 }
686 
687 #ifdef __ARCH_WANT_SYS_OLD_SELECT
688 struct sel_arg_struct {
689 	unsigned long n;
690 	fd_set __user *inp, *outp, *exp;
691 	struct timeval __user *tvp;
692 };
693 
694 SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
695 {
696 	struct sel_arg_struct a;
697 
698 	if (copy_from_user(&a, arg, sizeof(a)))
699 		return -EFAULT;
700 	return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
701 }
702 #endif
703 
704 struct poll_list {
705 	struct poll_list *next;
706 	int len;
707 	struct pollfd entries[0];
708 };
709 
710 #define POLLFD_PER_PAGE  ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
711 
712 /*
713  * Fish for pollable events on the pollfd->fd file descriptor. We're only
714  * interested in events matching the pollfd->events mask, and the result
715  * matching that mask is both recorded in pollfd->revents and returned. The
716  * pwait poll_table will be used by the fd-provided poll handler for waiting,
717  * if pwait->_qproc is non-NULL.
718  */
719 static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait)
720 {
721 	unsigned int mask;
722 	int fd;
723 
724 	mask = 0;
725 	fd = pollfd->fd;
726 	if (fd >= 0) {
727 		struct fd f = fdget(fd);
728 		mask = POLLNVAL;
729 		if (f.file) {
730 			mask = DEFAULT_POLLMASK;
731 			if (f.file->f_op && f.file->f_op->poll) {
732 				pwait->_key = pollfd->events|POLLERR|POLLHUP;
733 				mask = f.file->f_op->poll(f.file, pwait);
734 			}
735 			/* Mask out unneeded events. */
736 			mask &= pollfd->events | POLLERR | POLLHUP;
737 			fdput(f);
738 		}
739 	}
740 	pollfd->revents = mask;
741 
742 	return mask;
743 }
744 
745 static int do_poll(unsigned int nfds,  struct poll_list *list,
746 		   struct poll_wqueues *wait, struct timespec *end_time)
747 {
748 	poll_table* pt = &wait->pt;
749 	ktime_t expire, *to = NULL;
750 	int timed_out = 0, count = 0;
751 	unsigned long slack = 0;
752 
753 	/* Optimise the no-wait case */
754 	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
755 		pt->_qproc = NULL;
756 		timed_out = 1;
757 	}
758 
759 	if (end_time && !timed_out)
760 		slack = select_estimate_accuracy(end_time);
761 
762 	for (;;) {
763 		struct poll_list *walk;
764 
765 		for (walk = list; walk != NULL; walk = walk->next) {
766 			struct pollfd * pfd, * pfd_end;
767 
768 			pfd = walk->entries;
769 			pfd_end = pfd + walk->len;
770 			for (; pfd != pfd_end; pfd++) {
771 				/*
772 				 * Fish for events. If we found one, record it
773 				 * and kill poll_table->_qproc, so we don't
774 				 * needlessly register any other waiters after
775 				 * this. They'll get immediately deregistered
776 				 * when we break out and return.
777 				 */
778 				if (do_pollfd(pfd, pt)) {
779 					count++;
780 					pt->_qproc = NULL;
781 				}
782 			}
783 		}
784 		/*
785 		 * All waiters have already been registered, so don't provide
786 		 * a poll_table->_qproc to them on the next loop iteration.
787 		 */
788 		pt->_qproc = NULL;
789 		if (!count) {
790 			count = wait->error;
791 			if (signal_pending(current))
792 				count = -EINTR;
793 		}
794 		if (count || timed_out)
795 			break;
796 
797 		/*
798 		 * If this is the first loop and we have a timeout
799 		 * given, then we convert to ktime_t and set the to
800 		 * pointer to the expiry value.
801 		 */
802 		if (end_time && !to) {
803 			expire = timespec_to_ktime(*end_time);
804 			to = &expire;
805 		}
806 
807 		if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
808 			timed_out = 1;
809 	}
810 	return count;
811 }
812 
813 #define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list))  / \
814 			sizeof(struct pollfd))
815 
816 int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
817 		struct timespec *end_time)
818 {
819 	struct poll_wqueues table;
820  	int err = -EFAULT, fdcount, len, size;
821 	/* Allocate small arguments on the stack to save memory and be
822 	   faster - use long to make sure the buffer is aligned properly
823 	   on 64 bit archs to avoid unaligned access */
824 	long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
825 	struct poll_list *const head = (struct poll_list *)stack_pps;
826  	struct poll_list *walk = head;
827  	unsigned long todo = nfds;
828 
829 	if (nfds > rlimit(RLIMIT_NOFILE))
830 		return -EINVAL;
831 
832 	len = min_t(unsigned int, nfds, N_STACK_PPS);
833 	for (;;) {
834 		walk->next = NULL;
835 		walk->len = len;
836 		if (!len)
837 			break;
838 
839 		if (copy_from_user(walk->entries, ufds + nfds-todo,
840 					sizeof(struct pollfd) * walk->len))
841 			goto out_fds;
842 
843 		todo -= walk->len;
844 		if (!todo)
845 			break;
846 
847 		len = min(todo, POLLFD_PER_PAGE);
848 		size = sizeof(struct poll_list) + sizeof(struct pollfd) * len;
849 		walk = walk->next = kmalloc(size, GFP_KERNEL);
850 		if (!walk) {
851 			err = -ENOMEM;
852 			goto out_fds;
853 		}
854 	}
855 
856 	poll_initwait(&table);
857 	fdcount = do_poll(nfds, head, &table, end_time);
858 	poll_freewait(&table);
859 
860 	for (walk = head; walk; walk = walk->next) {
861 		struct pollfd *fds = walk->entries;
862 		int j;
863 
864 		for (j = 0; j < walk->len; j++, ufds++)
865 			if (__put_user(fds[j].revents, &ufds->revents))
866 				goto out_fds;
867   	}
868 
869 	err = fdcount;
870 out_fds:
871 	walk = head->next;
872 	while (walk) {
873 		struct poll_list *pos = walk;
874 		walk = walk->next;
875 		kfree(pos);
876 	}
877 
878 	return err;
879 }
880 
881 static long do_restart_poll(struct restart_block *restart_block)
882 {
883 	struct pollfd __user *ufds = restart_block->poll.ufds;
884 	int nfds = restart_block->poll.nfds;
885 	struct timespec *to = NULL, end_time;
886 	int ret;
887 
888 	if (restart_block->poll.has_timeout) {
889 		end_time.tv_sec = restart_block->poll.tv_sec;
890 		end_time.tv_nsec = restart_block->poll.tv_nsec;
891 		to = &end_time;
892 	}
893 
894 	ret = do_sys_poll(ufds, nfds, to);
895 
896 	if (ret == -EINTR) {
897 		restart_block->fn = do_restart_poll;
898 		ret = -ERESTART_RESTARTBLOCK;
899 	}
900 	return ret;
901 }
902 
903 SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
904 		int, timeout_msecs)
905 {
906 	struct timespec end_time, *to = NULL;
907 	int ret;
908 
909 	if (timeout_msecs >= 0) {
910 		to = &end_time;
911 		poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
912 			NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC));
913 	}
914 
915 	ret = do_sys_poll(ufds, nfds, to);
916 
917 	if (ret == -EINTR) {
918 		struct restart_block *restart_block;
919 
920 		restart_block = &current_thread_info()->restart_block;
921 		restart_block->fn = do_restart_poll;
922 		restart_block->poll.ufds = ufds;
923 		restart_block->poll.nfds = nfds;
924 
925 		if (timeout_msecs >= 0) {
926 			restart_block->poll.tv_sec = end_time.tv_sec;
927 			restart_block->poll.tv_nsec = end_time.tv_nsec;
928 			restart_block->poll.has_timeout = 1;
929 		} else
930 			restart_block->poll.has_timeout = 0;
931 
932 		ret = -ERESTART_RESTARTBLOCK;
933 	}
934 	return ret;
935 }
936 
937 SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
938 		struct timespec __user *, tsp, const sigset_t __user *, sigmask,
939 		size_t, sigsetsize)
940 {
941 	sigset_t ksigmask, sigsaved;
942 	struct timespec ts, end_time, *to = NULL;
943 	int ret;
944 
945 	if (tsp) {
946 		if (copy_from_user(&ts, tsp, sizeof(ts)))
947 			return -EFAULT;
948 
949 		to = &end_time;
950 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
951 			return -EINVAL;
952 	}
953 
954 	if (sigmask) {
955 		/* XXX: Don't preclude handling different sized sigset_t's.  */
956 		if (sigsetsize != sizeof(sigset_t))
957 			return -EINVAL;
958 		if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
959 			return -EFAULT;
960 
961 		sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
962 		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
963 	}
964 
965 	ret = do_sys_poll(ufds, nfds, to);
966 
967 	/* We can restart this syscall, usually */
968 	if (ret == -EINTR) {
969 		/*
970 		 * Don't restore the signal mask yet. Let do_signal() deliver
971 		 * the signal on the way back to userspace, before the signal
972 		 * mask is restored.
973 		 */
974 		if (sigmask) {
975 			memcpy(&current->saved_sigmask, &sigsaved,
976 					sizeof(sigsaved));
977 			set_restore_sigmask();
978 		}
979 		ret = -ERESTARTNOHAND;
980 	} else if (sigmask)
981 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
982 
983 	ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
984 
985 	return ret;
986 }
987