xref: /openbmc/linux/fs/select.c (revision ca79522c)
1 /*
2  * This file contains the procedures for the handling of select and poll
3  *
4  * Created for Linux based loosely upon Mathius Lattner's minix
5  * patches by Peter MacDonald. Heavily edited by Linus.
6  *
7  *  4 February 1994
8  *     COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
9  *     flag set in its personality we do *not* modify the given timeout
10  *     parameter to reflect time remaining.
11  *
12  *  24 January 2000
13  *     Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation
14  *     of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/syscalls.h>
20 #include <linux/export.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
23 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
24 #include <linux/file.h>
25 #include <linux/fdtable.h>
26 #include <linux/fs.h>
27 #include <linux/rcupdate.h>
28 #include <linux/hrtimer.h>
29 #include <linux/sched/rt.h>
30 
31 #include <asm/uaccess.h>
32 
33 
34 /*
35  * Estimate expected accuracy in ns from a timeval.
36  *
37  * After quite a bit of churning around, we've settled on
38  * a simple thing of taking 0.1% of the timeout as the
39  * slack, with a cap of 100 msec.
40  * "nice" tasks get a 0.5% slack instead.
41  *
42  * Consider this comment an open invitation to come up with even
43  * better solutions..
44  */
45 
46 #define MAX_SLACK	(100 * NSEC_PER_MSEC)
47 
48 static long __estimate_accuracy(struct timespec *tv)
49 {
50 	long slack;
51 	int divfactor = 1000;
52 
53 	if (tv->tv_sec < 0)
54 		return 0;
55 
56 	if (task_nice(current) > 0)
57 		divfactor = divfactor / 5;
58 
59 	if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor))
60 		return MAX_SLACK;
61 
62 	slack = tv->tv_nsec / divfactor;
63 	slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
64 
65 	if (slack > MAX_SLACK)
66 		return MAX_SLACK;
67 
68 	return slack;
69 }
70 
71 long select_estimate_accuracy(struct timespec *tv)
72 {
73 	unsigned long ret;
74 	struct timespec now;
75 
76 	/*
77 	 * Realtime tasks get a slack of 0 for obvious reasons.
78 	 */
79 
80 	if (rt_task(current))
81 		return 0;
82 
83 	ktime_get_ts(&now);
84 	now = timespec_sub(*tv, now);
85 	ret = __estimate_accuracy(&now);
86 	if (ret < current->timer_slack_ns)
87 		return current->timer_slack_ns;
88 	return ret;
89 }
90 
91 
92 
93 struct poll_table_page {
94 	struct poll_table_page * next;
95 	struct poll_table_entry * entry;
96 	struct poll_table_entry entries[0];
97 };
98 
99 #define POLL_TABLE_FULL(table) \
100 	((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
101 
102 /*
103  * Ok, Peter made a complicated, but straightforward multiple_wait() function.
104  * I have rewritten this, taking some shortcuts: This code may not be easy to
105  * follow, but it should be free of race-conditions, and it's practical. If you
106  * understand what I'm doing here, then you understand how the linux
107  * sleep/wakeup mechanism works.
108  *
109  * Two very simple procedures, poll_wait() and poll_freewait() make all the
110  * work.  poll_wait() is an inline-function defined in <linux/poll.h>,
111  * as all select/poll functions have to call it to add an entry to the
112  * poll table.
113  */
114 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
115 		       poll_table *p);
116 
117 void poll_initwait(struct poll_wqueues *pwq)
118 {
119 	init_poll_funcptr(&pwq->pt, __pollwait);
120 	pwq->polling_task = current;
121 	pwq->triggered = 0;
122 	pwq->error = 0;
123 	pwq->table = NULL;
124 	pwq->inline_index = 0;
125 }
126 EXPORT_SYMBOL(poll_initwait);
127 
128 static void free_poll_entry(struct poll_table_entry *entry)
129 {
130 	remove_wait_queue(entry->wait_address, &entry->wait);
131 	fput(entry->filp);
132 }
133 
134 void poll_freewait(struct poll_wqueues *pwq)
135 {
136 	struct poll_table_page * p = pwq->table;
137 	int i;
138 	for (i = 0; i < pwq->inline_index; i++)
139 		free_poll_entry(pwq->inline_entries + i);
140 	while (p) {
141 		struct poll_table_entry * entry;
142 		struct poll_table_page *old;
143 
144 		entry = p->entry;
145 		do {
146 			entry--;
147 			free_poll_entry(entry);
148 		} while (entry > p->entries);
149 		old = p;
150 		p = p->next;
151 		free_page((unsigned long) old);
152 	}
153 }
154 EXPORT_SYMBOL(poll_freewait);
155 
156 static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
157 {
158 	struct poll_table_page *table = p->table;
159 
160 	if (p->inline_index < N_INLINE_POLL_ENTRIES)
161 		return p->inline_entries + p->inline_index++;
162 
163 	if (!table || POLL_TABLE_FULL(table)) {
164 		struct poll_table_page *new_table;
165 
166 		new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
167 		if (!new_table) {
168 			p->error = -ENOMEM;
169 			return NULL;
170 		}
171 		new_table->entry = new_table->entries;
172 		new_table->next = table;
173 		p->table = new_table;
174 		table = new_table;
175 	}
176 
177 	return table->entry++;
178 }
179 
180 static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
181 {
182 	struct poll_wqueues *pwq = wait->private;
183 	DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
184 
185 	/*
186 	 * Although this function is called under waitqueue lock, LOCK
187 	 * doesn't imply write barrier and the users expect write
188 	 * barrier semantics on wakeup functions.  The following
189 	 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
190 	 * and is paired with set_mb() in poll_schedule_timeout.
191 	 */
192 	smp_wmb();
193 	pwq->triggered = 1;
194 
195 	/*
196 	 * Perform the default wake up operation using a dummy
197 	 * waitqueue.
198 	 *
199 	 * TODO: This is hacky but there currently is no interface to
200 	 * pass in @sync.  @sync is scheduled to be removed and once
201 	 * that happens, wake_up_process() can be used directly.
202 	 */
203 	return default_wake_function(&dummy_wait, mode, sync, key);
204 }
205 
206 static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
207 {
208 	struct poll_table_entry *entry;
209 
210 	entry = container_of(wait, struct poll_table_entry, wait);
211 	if (key && !((unsigned long)key & entry->key))
212 		return 0;
213 	return __pollwake(wait, mode, sync, key);
214 }
215 
216 /* Add a new entry */
217 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
218 				poll_table *p)
219 {
220 	struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
221 	struct poll_table_entry *entry = poll_get_entry(pwq);
222 	if (!entry)
223 		return;
224 	entry->filp = get_file(filp);
225 	entry->wait_address = wait_address;
226 	entry->key = p->_key;
227 	init_waitqueue_func_entry(&entry->wait, pollwake);
228 	entry->wait.private = pwq;
229 	add_wait_queue(wait_address, &entry->wait);
230 }
231 
232 int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
233 			  ktime_t *expires, unsigned long slack)
234 {
235 	int rc = -EINTR;
236 
237 	set_current_state(state);
238 	if (!pwq->triggered)
239 		rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
240 	__set_current_state(TASK_RUNNING);
241 
242 	/*
243 	 * Prepare for the next iteration.
244 	 *
245 	 * The following set_mb() serves two purposes.  First, it's
246 	 * the counterpart rmb of the wmb in pollwake() such that data
247 	 * written before wake up is always visible after wake up.
248 	 * Second, the full barrier guarantees that triggered clearing
249 	 * doesn't pass event check of the next iteration.  Note that
250 	 * this problem doesn't exist for the first iteration as
251 	 * add_wait_queue() has full barrier semantics.
252 	 */
253 	set_mb(pwq->triggered, 0);
254 
255 	return rc;
256 }
257 EXPORT_SYMBOL(poll_schedule_timeout);
258 
259 /**
260  * poll_select_set_timeout - helper function to setup the timeout value
261  * @to:		pointer to timespec variable for the final timeout
262  * @sec:	seconds (from user space)
263  * @nsec:	nanoseconds (from user space)
264  *
265  * Note, we do not use a timespec for the user space value here, That
266  * way we can use the function for timeval and compat interfaces as well.
267  *
268  * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
269  */
270 int poll_select_set_timeout(struct timespec *to, long sec, long nsec)
271 {
272 	struct timespec ts = {.tv_sec = sec, .tv_nsec = nsec};
273 
274 	if (!timespec_valid(&ts))
275 		return -EINVAL;
276 
277 	/* Optimize for the zero timeout value here */
278 	if (!sec && !nsec) {
279 		to->tv_sec = to->tv_nsec = 0;
280 	} else {
281 		ktime_get_ts(to);
282 		*to = timespec_add_safe(*to, ts);
283 	}
284 	return 0;
285 }
286 
287 static int poll_select_copy_remaining(struct timespec *end_time, void __user *p,
288 				      int timeval, int ret)
289 {
290 	struct timespec rts;
291 	struct timeval rtv;
292 
293 	if (!p)
294 		return ret;
295 
296 	if (current->personality & STICKY_TIMEOUTS)
297 		goto sticky;
298 
299 	/* No update for zero timeout */
300 	if (!end_time->tv_sec && !end_time->tv_nsec)
301 		return ret;
302 
303 	ktime_get_ts(&rts);
304 	rts = timespec_sub(*end_time, rts);
305 	if (rts.tv_sec < 0)
306 		rts.tv_sec = rts.tv_nsec = 0;
307 
308 	if (timeval) {
309 		if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
310 			memset(&rtv, 0, sizeof(rtv));
311 		rtv.tv_sec = rts.tv_sec;
312 		rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
313 
314 		if (!copy_to_user(p, &rtv, sizeof(rtv)))
315 			return ret;
316 
317 	} else if (!copy_to_user(p, &rts, sizeof(rts)))
318 		return ret;
319 
320 	/*
321 	 * If an application puts its timeval in read-only memory, we
322 	 * don't want the Linux-specific update to the timeval to
323 	 * cause a fault after the select has completed
324 	 * successfully. However, because we're not updating the
325 	 * timeval, we can't restart the system call.
326 	 */
327 
328 sticky:
329 	if (ret == -ERESTARTNOHAND)
330 		ret = -EINTR;
331 	return ret;
332 }
333 
334 #define FDS_IN(fds, n)		(fds->in + n)
335 #define FDS_OUT(fds, n)		(fds->out + n)
336 #define FDS_EX(fds, n)		(fds->ex + n)
337 
338 #define BITS(fds, n)	(*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
339 
340 static int max_select_fd(unsigned long n, fd_set_bits *fds)
341 {
342 	unsigned long *open_fds;
343 	unsigned long set;
344 	int max;
345 	struct fdtable *fdt;
346 
347 	/* handle last in-complete long-word first */
348 	set = ~(~0UL << (n & (BITS_PER_LONG-1)));
349 	n /= BITS_PER_LONG;
350 	fdt = files_fdtable(current->files);
351 	open_fds = fdt->open_fds + n;
352 	max = 0;
353 	if (set) {
354 		set &= BITS(fds, n);
355 		if (set) {
356 			if (!(set & ~*open_fds))
357 				goto get_max;
358 			return -EBADF;
359 		}
360 	}
361 	while (n) {
362 		open_fds--;
363 		n--;
364 		set = BITS(fds, n);
365 		if (!set)
366 			continue;
367 		if (set & ~*open_fds)
368 			return -EBADF;
369 		if (max)
370 			continue;
371 get_max:
372 		do {
373 			max++;
374 			set >>= 1;
375 		} while (set);
376 		max += n * BITS_PER_LONG;
377 	}
378 
379 	return max;
380 }
381 
382 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
383 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
384 #define POLLEX_SET (POLLPRI)
385 
386 static inline void wait_key_set(poll_table *wait, unsigned long in,
387 				unsigned long out, unsigned long bit)
388 {
389 	wait->_key = POLLEX_SET;
390 	if (in & bit)
391 		wait->_key |= POLLIN_SET;
392 	if (out & bit)
393 		wait->_key |= POLLOUT_SET;
394 }
395 
396 int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
397 {
398 	ktime_t expire, *to = NULL;
399 	struct poll_wqueues table;
400 	poll_table *wait;
401 	int retval, i, timed_out = 0;
402 	unsigned long slack = 0;
403 
404 	rcu_read_lock();
405 	retval = max_select_fd(n, fds);
406 	rcu_read_unlock();
407 
408 	if (retval < 0)
409 		return retval;
410 	n = retval;
411 
412 	poll_initwait(&table);
413 	wait = &table.pt;
414 	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
415 		wait->_qproc = NULL;
416 		timed_out = 1;
417 	}
418 
419 	if (end_time && !timed_out)
420 		slack = select_estimate_accuracy(end_time);
421 
422 	retval = 0;
423 	for (;;) {
424 		unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
425 
426 		inp = fds->in; outp = fds->out; exp = fds->ex;
427 		rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
428 
429 		for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
430 			unsigned long in, out, ex, all_bits, bit = 1, mask, j;
431 			unsigned long res_in = 0, res_out = 0, res_ex = 0;
432 
433 			in = *inp++; out = *outp++; ex = *exp++;
434 			all_bits = in | out | ex;
435 			if (all_bits == 0) {
436 				i += BITS_PER_LONG;
437 				continue;
438 			}
439 
440 			for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
441 				struct fd f;
442 				if (i >= n)
443 					break;
444 				if (!(bit & all_bits))
445 					continue;
446 				f = fdget(i);
447 				if (f.file) {
448 					const struct file_operations *f_op;
449 					f_op = f.file->f_op;
450 					mask = DEFAULT_POLLMASK;
451 					if (f_op && f_op->poll) {
452 						wait_key_set(wait, in, out, bit);
453 						mask = (*f_op->poll)(f.file, wait);
454 					}
455 					fdput(f);
456 					if ((mask & POLLIN_SET) && (in & bit)) {
457 						res_in |= bit;
458 						retval++;
459 						wait->_qproc = NULL;
460 					}
461 					if ((mask & POLLOUT_SET) && (out & bit)) {
462 						res_out |= bit;
463 						retval++;
464 						wait->_qproc = NULL;
465 					}
466 					if ((mask & POLLEX_SET) && (ex & bit)) {
467 						res_ex |= bit;
468 						retval++;
469 						wait->_qproc = NULL;
470 					}
471 				}
472 			}
473 			if (res_in)
474 				*rinp = res_in;
475 			if (res_out)
476 				*routp = res_out;
477 			if (res_ex)
478 				*rexp = res_ex;
479 			cond_resched();
480 		}
481 		wait->_qproc = NULL;
482 		if (retval || timed_out || signal_pending(current))
483 			break;
484 		if (table.error) {
485 			retval = table.error;
486 			break;
487 		}
488 
489 		/*
490 		 * If this is the first loop and we have a timeout
491 		 * given, then we convert to ktime_t and set the to
492 		 * pointer to the expiry value.
493 		 */
494 		if (end_time && !to) {
495 			expire = timespec_to_ktime(*end_time);
496 			to = &expire;
497 		}
498 
499 		if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
500 					   to, slack))
501 			timed_out = 1;
502 	}
503 
504 	poll_freewait(&table);
505 
506 	return retval;
507 }
508 
509 /*
510  * We can actually return ERESTARTSYS instead of EINTR, but I'd
511  * like to be certain this leads to no problems. So I return
512  * EINTR just for safety.
513  *
514  * Update: ERESTARTSYS breaks at least the xview clock binary, so
515  * I'm trying ERESTARTNOHAND which restart only when you want to.
516  */
517 int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
518 			   fd_set __user *exp, struct timespec *end_time)
519 {
520 	fd_set_bits fds;
521 	void *bits;
522 	int ret, max_fds;
523 	unsigned int size;
524 	struct fdtable *fdt;
525 	/* Allocate small arguments on the stack to save memory and be faster */
526 	long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
527 
528 	ret = -EINVAL;
529 	if (n < 0)
530 		goto out_nofds;
531 
532 	/* max_fds can increase, so grab it once to avoid race */
533 	rcu_read_lock();
534 	fdt = files_fdtable(current->files);
535 	max_fds = fdt->max_fds;
536 	rcu_read_unlock();
537 	if (n > max_fds)
538 		n = max_fds;
539 
540 	/*
541 	 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
542 	 * since we used fdset we need to allocate memory in units of
543 	 * long-words.
544 	 */
545 	size = FDS_BYTES(n);
546 	bits = stack_fds;
547 	if (size > sizeof(stack_fds) / 6) {
548 		/* Not enough space in on-stack array; must use kmalloc */
549 		ret = -ENOMEM;
550 		bits = kmalloc(6 * size, GFP_KERNEL);
551 		if (!bits)
552 			goto out_nofds;
553 	}
554 	fds.in      = bits;
555 	fds.out     = bits +   size;
556 	fds.ex      = bits + 2*size;
557 	fds.res_in  = bits + 3*size;
558 	fds.res_out = bits + 4*size;
559 	fds.res_ex  = bits + 5*size;
560 
561 	if ((ret = get_fd_set(n, inp, fds.in)) ||
562 	    (ret = get_fd_set(n, outp, fds.out)) ||
563 	    (ret = get_fd_set(n, exp, fds.ex)))
564 		goto out;
565 	zero_fd_set(n, fds.res_in);
566 	zero_fd_set(n, fds.res_out);
567 	zero_fd_set(n, fds.res_ex);
568 
569 	ret = do_select(n, &fds, end_time);
570 
571 	if (ret < 0)
572 		goto out;
573 	if (!ret) {
574 		ret = -ERESTARTNOHAND;
575 		if (signal_pending(current))
576 			goto out;
577 		ret = 0;
578 	}
579 
580 	if (set_fd_set(n, inp, fds.res_in) ||
581 	    set_fd_set(n, outp, fds.res_out) ||
582 	    set_fd_set(n, exp, fds.res_ex))
583 		ret = -EFAULT;
584 
585 out:
586 	if (bits != stack_fds)
587 		kfree(bits);
588 out_nofds:
589 	return ret;
590 }
591 
592 SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
593 		fd_set __user *, exp, struct timeval __user *, tvp)
594 {
595 	struct timespec end_time, *to = NULL;
596 	struct timeval tv;
597 	int ret;
598 
599 	if (tvp) {
600 		if (copy_from_user(&tv, tvp, sizeof(tv)))
601 			return -EFAULT;
602 
603 		to = &end_time;
604 		if (poll_select_set_timeout(to,
605 				tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
606 				(tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
607 			return -EINVAL;
608 	}
609 
610 	ret = core_sys_select(n, inp, outp, exp, to);
611 	ret = poll_select_copy_remaining(&end_time, tvp, 1, ret);
612 
613 	return ret;
614 }
615 
616 static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
617 		       fd_set __user *exp, struct timespec __user *tsp,
618 		       const sigset_t __user *sigmask, size_t sigsetsize)
619 {
620 	sigset_t ksigmask, sigsaved;
621 	struct timespec ts, end_time, *to = NULL;
622 	int ret;
623 
624 	if (tsp) {
625 		if (copy_from_user(&ts, tsp, sizeof(ts)))
626 			return -EFAULT;
627 
628 		to = &end_time;
629 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
630 			return -EINVAL;
631 	}
632 
633 	if (sigmask) {
634 		/* XXX: Don't preclude handling different sized sigset_t's.  */
635 		if (sigsetsize != sizeof(sigset_t))
636 			return -EINVAL;
637 		if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
638 			return -EFAULT;
639 
640 		sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
641 		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
642 	}
643 
644 	ret = core_sys_select(n, inp, outp, exp, to);
645 	ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
646 
647 	if (ret == -ERESTARTNOHAND) {
648 		/*
649 		 * Don't restore the signal mask yet. Let do_signal() deliver
650 		 * the signal on the way back to userspace, before the signal
651 		 * mask is restored.
652 		 */
653 		if (sigmask) {
654 			memcpy(&current->saved_sigmask, &sigsaved,
655 					sizeof(sigsaved));
656 			set_restore_sigmask();
657 		}
658 	} else if (sigmask)
659 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
660 
661 	return ret;
662 }
663 
664 /*
665  * Most architectures can't handle 7-argument syscalls. So we provide a
666  * 6-argument version where the sixth argument is a pointer to a structure
667  * which has a pointer to the sigset_t itself followed by a size_t containing
668  * the sigset size.
669  */
670 SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
671 		fd_set __user *, exp, struct timespec __user *, tsp,
672 		void __user *, sig)
673 {
674 	size_t sigsetsize = 0;
675 	sigset_t __user *up = NULL;
676 
677 	if (sig) {
678 		if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t))
679 		    || __get_user(up, (sigset_t __user * __user *)sig)
680 		    || __get_user(sigsetsize,
681 				(size_t __user *)(sig+sizeof(void *))))
682 			return -EFAULT;
683 	}
684 
685 	return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize);
686 }
687 
688 #ifdef __ARCH_WANT_SYS_OLD_SELECT
689 struct sel_arg_struct {
690 	unsigned long n;
691 	fd_set __user *inp, *outp, *exp;
692 	struct timeval __user *tvp;
693 };
694 
695 SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
696 {
697 	struct sel_arg_struct a;
698 
699 	if (copy_from_user(&a, arg, sizeof(a)))
700 		return -EFAULT;
701 	return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
702 }
703 #endif
704 
705 struct poll_list {
706 	struct poll_list *next;
707 	int len;
708 	struct pollfd entries[0];
709 };
710 
711 #define POLLFD_PER_PAGE  ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
712 
713 /*
714  * Fish for pollable events on the pollfd->fd file descriptor. We're only
715  * interested in events matching the pollfd->events mask, and the result
716  * matching that mask is both recorded in pollfd->revents and returned. The
717  * pwait poll_table will be used by the fd-provided poll handler for waiting,
718  * if pwait->_qproc is non-NULL.
719  */
720 static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait)
721 {
722 	unsigned int mask;
723 	int fd;
724 
725 	mask = 0;
726 	fd = pollfd->fd;
727 	if (fd >= 0) {
728 		struct fd f = fdget(fd);
729 		mask = POLLNVAL;
730 		if (f.file) {
731 			mask = DEFAULT_POLLMASK;
732 			if (f.file->f_op && f.file->f_op->poll) {
733 				pwait->_key = pollfd->events|POLLERR|POLLHUP;
734 				mask = f.file->f_op->poll(f.file, pwait);
735 			}
736 			/* Mask out unneeded events. */
737 			mask &= pollfd->events | POLLERR | POLLHUP;
738 			fdput(f);
739 		}
740 	}
741 	pollfd->revents = mask;
742 
743 	return mask;
744 }
745 
746 static int do_poll(unsigned int nfds,  struct poll_list *list,
747 		   struct poll_wqueues *wait, struct timespec *end_time)
748 {
749 	poll_table* pt = &wait->pt;
750 	ktime_t expire, *to = NULL;
751 	int timed_out = 0, count = 0;
752 	unsigned long slack = 0;
753 
754 	/* Optimise the no-wait case */
755 	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
756 		pt->_qproc = NULL;
757 		timed_out = 1;
758 	}
759 
760 	if (end_time && !timed_out)
761 		slack = select_estimate_accuracy(end_time);
762 
763 	for (;;) {
764 		struct poll_list *walk;
765 
766 		for (walk = list; walk != NULL; walk = walk->next) {
767 			struct pollfd * pfd, * pfd_end;
768 
769 			pfd = walk->entries;
770 			pfd_end = pfd + walk->len;
771 			for (; pfd != pfd_end; pfd++) {
772 				/*
773 				 * Fish for events. If we found one, record it
774 				 * and kill poll_table->_qproc, so we don't
775 				 * needlessly register any other waiters after
776 				 * this. They'll get immediately deregistered
777 				 * when we break out and return.
778 				 */
779 				if (do_pollfd(pfd, pt)) {
780 					count++;
781 					pt->_qproc = NULL;
782 				}
783 			}
784 		}
785 		/*
786 		 * All waiters have already been registered, so don't provide
787 		 * a poll_table->_qproc to them on the next loop iteration.
788 		 */
789 		pt->_qproc = NULL;
790 		if (!count) {
791 			count = wait->error;
792 			if (signal_pending(current))
793 				count = -EINTR;
794 		}
795 		if (count || timed_out)
796 			break;
797 
798 		/*
799 		 * If this is the first loop and we have a timeout
800 		 * given, then we convert to ktime_t and set the to
801 		 * pointer to the expiry value.
802 		 */
803 		if (end_time && !to) {
804 			expire = timespec_to_ktime(*end_time);
805 			to = &expire;
806 		}
807 
808 		if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
809 			timed_out = 1;
810 	}
811 	return count;
812 }
813 
814 #define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list))  / \
815 			sizeof(struct pollfd))
816 
817 int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
818 		struct timespec *end_time)
819 {
820 	struct poll_wqueues table;
821  	int err = -EFAULT, fdcount, len, size;
822 	/* Allocate small arguments on the stack to save memory and be
823 	   faster - use long to make sure the buffer is aligned properly
824 	   on 64 bit archs to avoid unaligned access */
825 	long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
826 	struct poll_list *const head = (struct poll_list *)stack_pps;
827  	struct poll_list *walk = head;
828  	unsigned long todo = nfds;
829 
830 	if (nfds > rlimit(RLIMIT_NOFILE))
831 		return -EINVAL;
832 
833 	len = min_t(unsigned int, nfds, N_STACK_PPS);
834 	for (;;) {
835 		walk->next = NULL;
836 		walk->len = len;
837 		if (!len)
838 			break;
839 
840 		if (copy_from_user(walk->entries, ufds + nfds-todo,
841 					sizeof(struct pollfd) * walk->len))
842 			goto out_fds;
843 
844 		todo -= walk->len;
845 		if (!todo)
846 			break;
847 
848 		len = min(todo, POLLFD_PER_PAGE);
849 		size = sizeof(struct poll_list) + sizeof(struct pollfd) * len;
850 		walk = walk->next = kmalloc(size, GFP_KERNEL);
851 		if (!walk) {
852 			err = -ENOMEM;
853 			goto out_fds;
854 		}
855 	}
856 
857 	poll_initwait(&table);
858 	fdcount = do_poll(nfds, head, &table, end_time);
859 	poll_freewait(&table);
860 
861 	for (walk = head; walk; walk = walk->next) {
862 		struct pollfd *fds = walk->entries;
863 		int j;
864 
865 		for (j = 0; j < walk->len; j++, ufds++)
866 			if (__put_user(fds[j].revents, &ufds->revents))
867 				goto out_fds;
868   	}
869 
870 	err = fdcount;
871 out_fds:
872 	walk = head->next;
873 	while (walk) {
874 		struct poll_list *pos = walk;
875 		walk = walk->next;
876 		kfree(pos);
877 	}
878 
879 	return err;
880 }
881 
882 static long do_restart_poll(struct restart_block *restart_block)
883 {
884 	struct pollfd __user *ufds = restart_block->poll.ufds;
885 	int nfds = restart_block->poll.nfds;
886 	struct timespec *to = NULL, end_time;
887 	int ret;
888 
889 	if (restart_block->poll.has_timeout) {
890 		end_time.tv_sec = restart_block->poll.tv_sec;
891 		end_time.tv_nsec = restart_block->poll.tv_nsec;
892 		to = &end_time;
893 	}
894 
895 	ret = do_sys_poll(ufds, nfds, to);
896 
897 	if (ret == -EINTR) {
898 		restart_block->fn = do_restart_poll;
899 		ret = -ERESTART_RESTARTBLOCK;
900 	}
901 	return ret;
902 }
903 
904 SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
905 		int, timeout_msecs)
906 {
907 	struct timespec end_time, *to = NULL;
908 	int ret;
909 
910 	if (timeout_msecs >= 0) {
911 		to = &end_time;
912 		poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
913 			NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC));
914 	}
915 
916 	ret = do_sys_poll(ufds, nfds, to);
917 
918 	if (ret == -EINTR) {
919 		struct restart_block *restart_block;
920 
921 		restart_block = &current_thread_info()->restart_block;
922 		restart_block->fn = do_restart_poll;
923 		restart_block->poll.ufds = ufds;
924 		restart_block->poll.nfds = nfds;
925 
926 		if (timeout_msecs >= 0) {
927 			restart_block->poll.tv_sec = end_time.tv_sec;
928 			restart_block->poll.tv_nsec = end_time.tv_nsec;
929 			restart_block->poll.has_timeout = 1;
930 		} else
931 			restart_block->poll.has_timeout = 0;
932 
933 		ret = -ERESTART_RESTARTBLOCK;
934 	}
935 	return ret;
936 }
937 
938 SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
939 		struct timespec __user *, tsp, const sigset_t __user *, sigmask,
940 		size_t, sigsetsize)
941 {
942 	sigset_t ksigmask, sigsaved;
943 	struct timespec ts, end_time, *to = NULL;
944 	int ret;
945 
946 	if (tsp) {
947 		if (copy_from_user(&ts, tsp, sizeof(ts)))
948 			return -EFAULT;
949 
950 		to = &end_time;
951 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
952 			return -EINVAL;
953 	}
954 
955 	if (sigmask) {
956 		/* XXX: Don't preclude handling different sized sigset_t's.  */
957 		if (sigsetsize != sizeof(sigset_t))
958 			return -EINVAL;
959 		if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
960 			return -EFAULT;
961 
962 		sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
963 		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
964 	}
965 
966 	ret = do_sys_poll(ufds, nfds, to);
967 
968 	/* We can restart this syscall, usually */
969 	if (ret == -EINTR) {
970 		/*
971 		 * Don't restore the signal mask yet. Let do_signal() deliver
972 		 * the signal on the way back to userspace, before the signal
973 		 * mask is restored.
974 		 */
975 		if (sigmask) {
976 			memcpy(&current->saved_sigmask, &sigsaved,
977 					sizeof(sigsaved));
978 			set_restore_sigmask();
979 		}
980 		ret = -ERESTARTNOHAND;
981 	} else if (sigmask)
982 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
983 
984 	ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
985 
986 	return ret;
987 }
988