1 /* 2 * This file contains the procedures for the handling of select and poll 3 * 4 * Created for Linux based loosely upon Mathius Lattner's minix 5 * patches by Peter MacDonald. Heavily edited by Linus. 6 * 7 * 4 February 1994 8 * COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS 9 * flag set in its personality we do *not* modify the given timeout 10 * parameter to reflect time remaining. 11 * 12 * 24 January 2000 13 * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation 14 * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian). 15 */ 16 17 #include <linux/kernel.h> 18 #include <linux/sched/signal.h> 19 #include <linux/sched/rt.h> 20 #include <linux/syscalls.h> 21 #include <linux/export.h> 22 #include <linux/slab.h> 23 #include <linux/poll.h> 24 #include <linux/personality.h> /* for STICKY_TIMEOUTS */ 25 #include <linux/file.h> 26 #include <linux/fdtable.h> 27 #include <linux/fs.h> 28 #include <linux/rcupdate.h> 29 #include <linux/hrtimer.h> 30 #include <linux/freezer.h> 31 #include <net/busy_poll.h> 32 #include <linux/vmalloc.h> 33 34 #include <linux/uaccess.h> 35 36 37 /* 38 * Estimate expected accuracy in ns from a timeval. 39 * 40 * After quite a bit of churning around, we've settled on 41 * a simple thing of taking 0.1% of the timeout as the 42 * slack, with a cap of 100 msec. 43 * "nice" tasks get a 0.5% slack instead. 44 * 45 * Consider this comment an open invitation to come up with even 46 * better solutions.. 47 */ 48 49 #define MAX_SLACK (100 * NSEC_PER_MSEC) 50 51 static long __estimate_accuracy(struct timespec64 *tv) 52 { 53 long slack; 54 int divfactor = 1000; 55 56 if (tv->tv_sec < 0) 57 return 0; 58 59 if (task_nice(current) > 0) 60 divfactor = divfactor / 5; 61 62 if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor)) 63 return MAX_SLACK; 64 65 slack = tv->tv_nsec / divfactor; 66 slack += tv->tv_sec * (NSEC_PER_SEC/divfactor); 67 68 if (slack > MAX_SLACK) 69 return MAX_SLACK; 70 71 return slack; 72 } 73 74 u64 select_estimate_accuracy(struct timespec64 *tv) 75 { 76 u64 ret; 77 struct timespec64 now; 78 79 /* 80 * Realtime tasks get a slack of 0 for obvious reasons. 81 */ 82 83 if (rt_task(current)) 84 return 0; 85 86 ktime_get_ts64(&now); 87 now = timespec64_sub(*tv, now); 88 ret = __estimate_accuracy(&now); 89 if (ret < current->timer_slack_ns) 90 return current->timer_slack_ns; 91 return ret; 92 } 93 94 95 96 struct poll_table_page { 97 struct poll_table_page * next; 98 struct poll_table_entry * entry; 99 struct poll_table_entry entries[0]; 100 }; 101 102 #define POLL_TABLE_FULL(table) \ 103 ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table)) 104 105 /* 106 * Ok, Peter made a complicated, but straightforward multiple_wait() function. 107 * I have rewritten this, taking some shortcuts: This code may not be easy to 108 * follow, but it should be free of race-conditions, and it's practical. If you 109 * understand what I'm doing here, then you understand how the linux 110 * sleep/wakeup mechanism works. 111 * 112 * Two very simple procedures, poll_wait() and poll_freewait() make all the 113 * work. poll_wait() is an inline-function defined in <linux/poll.h>, 114 * as all select/poll functions have to call it to add an entry to the 115 * poll table. 116 */ 117 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, 118 poll_table *p); 119 120 void poll_initwait(struct poll_wqueues *pwq) 121 { 122 init_poll_funcptr(&pwq->pt, __pollwait); 123 pwq->polling_task = current; 124 pwq->triggered = 0; 125 pwq->error = 0; 126 pwq->table = NULL; 127 pwq->inline_index = 0; 128 } 129 EXPORT_SYMBOL(poll_initwait); 130 131 static void free_poll_entry(struct poll_table_entry *entry) 132 { 133 remove_wait_queue(entry->wait_address, &entry->wait); 134 fput(entry->filp); 135 } 136 137 void poll_freewait(struct poll_wqueues *pwq) 138 { 139 struct poll_table_page * p = pwq->table; 140 int i; 141 for (i = 0; i < pwq->inline_index; i++) 142 free_poll_entry(pwq->inline_entries + i); 143 while (p) { 144 struct poll_table_entry * entry; 145 struct poll_table_page *old; 146 147 entry = p->entry; 148 do { 149 entry--; 150 free_poll_entry(entry); 151 } while (entry > p->entries); 152 old = p; 153 p = p->next; 154 free_page((unsigned long) old); 155 } 156 } 157 EXPORT_SYMBOL(poll_freewait); 158 159 static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p) 160 { 161 struct poll_table_page *table = p->table; 162 163 if (p->inline_index < N_INLINE_POLL_ENTRIES) 164 return p->inline_entries + p->inline_index++; 165 166 if (!table || POLL_TABLE_FULL(table)) { 167 struct poll_table_page *new_table; 168 169 new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL); 170 if (!new_table) { 171 p->error = -ENOMEM; 172 return NULL; 173 } 174 new_table->entry = new_table->entries; 175 new_table->next = table; 176 p->table = new_table; 177 table = new_table; 178 } 179 180 return table->entry++; 181 } 182 183 static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) 184 { 185 struct poll_wqueues *pwq = wait->private; 186 DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task); 187 188 /* 189 * Although this function is called under waitqueue lock, LOCK 190 * doesn't imply write barrier and the users expect write 191 * barrier semantics on wakeup functions. The following 192 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() 193 * and is paired with smp_store_mb() in poll_schedule_timeout. 194 */ 195 smp_wmb(); 196 pwq->triggered = 1; 197 198 /* 199 * Perform the default wake up operation using a dummy 200 * waitqueue. 201 * 202 * TODO: This is hacky but there currently is no interface to 203 * pass in @sync. @sync is scheduled to be removed and once 204 * that happens, wake_up_process() can be used directly. 205 */ 206 return default_wake_function(&dummy_wait, mode, sync, key); 207 } 208 209 static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) 210 { 211 struct poll_table_entry *entry; 212 213 entry = container_of(wait, struct poll_table_entry, wait); 214 if (key && !((unsigned long)key & entry->key)) 215 return 0; 216 return __pollwake(wait, mode, sync, key); 217 } 218 219 /* Add a new entry */ 220 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, 221 poll_table *p) 222 { 223 struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt); 224 struct poll_table_entry *entry = poll_get_entry(pwq); 225 if (!entry) 226 return; 227 entry->filp = get_file(filp); 228 entry->wait_address = wait_address; 229 entry->key = p->_key; 230 init_waitqueue_func_entry(&entry->wait, pollwake); 231 entry->wait.private = pwq; 232 add_wait_queue(wait_address, &entry->wait); 233 } 234 235 int poll_schedule_timeout(struct poll_wqueues *pwq, int state, 236 ktime_t *expires, unsigned long slack) 237 { 238 int rc = -EINTR; 239 240 set_current_state(state); 241 if (!pwq->triggered) 242 rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS); 243 __set_current_state(TASK_RUNNING); 244 245 /* 246 * Prepare for the next iteration. 247 * 248 * The following smp_store_mb() serves two purposes. First, it's 249 * the counterpart rmb of the wmb in pollwake() such that data 250 * written before wake up is always visible after wake up. 251 * Second, the full barrier guarantees that triggered clearing 252 * doesn't pass event check of the next iteration. Note that 253 * this problem doesn't exist for the first iteration as 254 * add_wait_queue() has full barrier semantics. 255 */ 256 smp_store_mb(pwq->triggered, 0); 257 258 return rc; 259 } 260 EXPORT_SYMBOL(poll_schedule_timeout); 261 262 /** 263 * poll_select_set_timeout - helper function to setup the timeout value 264 * @to: pointer to timespec64 variable for the final timeout 265 * @sec: seconds (from user space) 266 * @nsec: nanoseconds (from user space) 267 * 268 * Note, we do not use a timespec for the user space value here, That 269 * way we can use the function for timeval and compat interfaces as well. 270 * 271 * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0. 272 */ 273 int poll_select_set_timeout(struct timespec64 *to, time64_t sec, long nsec) 274 { 275 struct timespec64 ts = {.tv_sec = sec, .tv_nsec = nsec}; 276 277 if (!timespec64_valid(&ts)) 278 return -EINVAL; 279 280 /* Optimize for the zero timeout value here */ 281 if (!sec && !nsec) { 282 to->tv_sec = to->tv_nsec = 0; 283 } else { 284 ktime_get_ts64(to); 285 *to = timespec64_add_safe(*to, ts); 286 } 287 return 0; 288 } 289 290 static int poll_select_copy_remaining(struct timespec64 *end_time, 291 void __user *p, 292 int timeval, int ret) 293 { 294 struct timespec64 rts64; 295 struct timespec rts; 296 struct timeval rtv; 297 298 if (!p) 299 return ret; 300 301 if (current->personality & STICKY_TIMEOUTS) 302 goto sticky; 303 304 /* No update for zero timeout */ 305 if (!end_time->tv_sec && !end_time->tv_nsec) 306 return ret; 307 308 ktime_get_ts64(&rts64); 309 rts64 = timespec64_sub(*end_time, rts64); 310 if (rts64.tv_sec < 0) 311 rts64.tv_sec = rts64.tv_nsec = 0; 312 313 rts = timespec64_to_timespec(rts64); 314 315 if (timeval) { 316 if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec)) 317 memset(&rtv, 0, sizeof(rtv)); 318 rtv.tv_sec = rts64.tv_sec; 319 rtv.tv_usec = rts64.tv_nsec / NSEC_PER_USEC; 320 321 if (!copy_to_user(p, &rtv, sizeof(rtv))) 322 return ret; 323 324 } else if (!copy_to_user(p, &rts, sizeof(rts))) 325 return ret; 326 327 /* 328 * If an application puts its timeval in read-only memory, we 329 * don't want the Linux-specific update to the timeval to 330 * cause a fault after the select has completed 331 * successfully. However, because we're not updating the 332 * timeval, we can't restart the system call. 333 */ 334 335 sticky: 336 if (ret == -ERESTARTNOHAND) 337 ret = -EINTR; 338 return ret; 339 } 340 341 /* 342 * Scalable version of the fd_set. 343 */ 344 345 typedef struct { 346 unsigned long *in, *out, *ex; 347 unsigned long *res_in, *res_out, *res_ex; 348 } fd_set_bits; 349 350 /* 351 * How many longwords for "nr" bits? 352 */ 353 #define FDS_BITPERLONG (8*sizeof(long)) 354 #define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG) 355 #define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long)) 356 357 /* 358 * We do a VERIFY_WRITE here even though we are only reading this time: 359 * we'll write to it eventually.. 360 * 361 * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned. 362 */ 363 static inline 364 int get_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset) 365 { 366 nr = FDS_BYTES(nr); 367 if (ufdset) 368 return copy_from_user(fdset, ufdset, nr) ? -EFAULT : 0; 369 370 memset(fdset, 0, nr); 371 return 0; 372 } 373 374 static inline unsigned long __must_check 375 set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset) 376 { 377 if (ufdset) 378 return __copy_to_user(ufdset, fdset, FDS_BYTES(nr)); 379 return 0; 380 } 381 382 static inline 383 void zero_fd_set(unsigned long nr, unsigned long *fdset) 384 { 385 memset(fdset, 0, FDS_BYTES(nr)); 386 } 387 388 #define FDS_IN(fds, n) (fds->in + n) 389 #define FDS_OUT(fds, n) (fds->out + n) 390 #define FDS_EX(fds, n) (fds->ex + n) 391 392 #define BITS(fds, n) (*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n)) 393 394 static int max_select_fd(unsigned long n, fd_set_bits *fds) 395 { 396 unsigned long *open_fds; 397 unsigned long set; 398 int max; 399 struct fdtable *fdt; 400 401 /* handle last in-complete long-word first */ 402 set = ~(~0UL << (n & (BITS_PER_LONG-1))); 403 n /= BITS_PER_LONG; 404 fdt = files_fdtable(current->files); 405 open_fds = fdt->open_fds + n; 406 max = 0; 407 if (set) { 408 set &= BITS(fds, n); 409 if (set) { 410 if (!(set & ~*open_fds)) 411 goto get_max; 412 return -EBADF; 413 } 414 } 415 while (n) { 416 open_fds--; 417 n--; 418 set = BITS(fds, n); 419 if (!set) 420 continue; 421 if (set & ~*open_fds) 422 return -EBADF; 423 if (max) 424 continue; 425 get_max: 426 do { 427 max++; 428 set >>= 1; 429 } while (set); 430 max += n * BITS_PER_LONG; 431 } 432 433 return max; 434 } 435 436 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR) 437 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR) 438 #define POLLEX_SET (POLLPRI) 439 440 static inline void wait_key_set(poll_table *wait, unsigned long in, 441 unsigned long out, unsigned long bit, 442 unsigned int ll_flag) 443 { 444 wait->_key = POLLEX_SET | ll_flag; 445 if (in & bit) 446 wait->_key |= POLLIN_SET; 447 if (out & bit) 448 wait->_key |= POLLOUT_SET; 449 } 450 451 static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time) 452 { 453 ktime_t expire, *to = NULL; 454 struct poll_wqueues table; 455 poll_table *wait; 456 int retval, i, timed_out = 0; 457 u64 slack = 0; 458 unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0; 459 unsigned long busy_start = 0; 460 461 rcu_read_lock(); 462 retval = max_select_fd(n, fds); 463 rcu_read_unlock(); 464 465 if (retval < 0) 466 return retval; 467 n = retval; 468 469 poll_initwait(&table); 470 wait = &table.pt; 471 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { 472 wait->_qproc = NULL; 473 timed_out = 1; 474 } 475 476 if (end_time && !timed_out) 477 slack = select_estimate_accuracy(end_time); 478 479 retval = 0; 480 for (;;) { 481 unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp; 482 bool can_busy_loop = false; 483 484 inp = fds->in; outp = fds->out; exp = fds->ex; 485 rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex; 486 487 for (i = 0; i < n; ++rinp, ++routp, ++rexp) { 488 unsigned long in, out, ex, all_bits, bit = 1, mask, j; 489 unsigned long res_in = 0, res_out = 0, res_ex = 0; 490 491 in = *inp++; out = *outp++; ex = *exp++; 492 all_bits = in | out | ex; 493 if (all_bits == 0) { 494 i += BITS_PER_LONG; 495 continue; 496 } 497 498 for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) { 499 struct fd f; 500 if (i >= n) 501 break; 502 if (!(bit & all_bits)) 503 continue; 504 f = fdget(i); 505 if (f.file) { 506 const struct file_operations *f_op; 507 f_op = f.file->f_op; 508 mask = DEFAULT_POLLMASK; 509 if (f_op->poll) { 510 wait_key_set(wait, in, out, 511 bit, busy_flag); 512 mask = (*f_op->poll)(f.file, wait); 513 } 514 fdput(f); 515 if ((mask & POLLIN_SET) && (in & bit)) { 516 res_in |= bit; 517 retval++; 518 wait->_qproc = NULL; 519 } 520 if ((mask & POLLOUT_SET) && (out & bit)) { 521 res_out |= bit; 522 retval++; 523 wait->_qproc = NULL; 524 } 525 if ((mask & POLLEX_SET) && (ex & bit)) { 526 res_ex |= bit; 527 retval++; 528 wait->_qproc = NULL; 529 } 530 /* got something, stop busy polling */ 531 if (retval) { 532 can_busy_loop = false; 533 busy_flag = 0; 534 535 /* 536 * only remember a returned 537 * POLL_BUSY_LOOP if we asked for it 538 */ 539 } else if (busy_flag & mask) 540 can_busy_loop = true; 541 542 } 543 } 544 if (res_in) 545 *rinp = res_in; 546 if (res_out) 547 *routp = res_out; 548 if (res_ex) 549 *rexp = res_ex; 550 cond_resched(); 551 } 552 wait->_qproc = NULL; 553 if (retval || timed_out || signal_pending(current)) 554 break; 555 if (table.error) { 556 retval = table.error; 557 break; 558 } 559 560 /* only if found POLL_BUSY_LOOP sockets && not out of time */ 561 if (can_busy_loop && !need_resched()) { 562 if (!busy_start) { 563 busy_start = busy_loop_current_time(); 564 continue; 565 } 566 if (!busy_loop_timeout(busy_start)) 567 continue; 568 } 569 busy_flag = 0; 570 571 /* 572 * If this is the first loop and we have a timeout 573 * given, then we convert to ktime_t and set the to 574 * pointer to the expiry value. 575 */ 576 if (end_time && !to) { 577 expire = timespec64_to_ktime(*end_time); 578 to = &expire; 579 } 580 581 if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE, 582 to, slack)) 583 timed_out = 1; 584 } 585 586 poll_freewait(&table); 587 588 return retval; 589 } 590 591 /* 592 * We can actually return ERESTARTSYS instead of EINTR, but I'd 593 * like to be certain this leads to no problems. So I return 594 * EINTR just for safety. 595 * 596 * Update: ERESTARTSYS breaks at least the xview clock binary, so 597 * I'm trying ERESTARTNOHAND which restart only when you want to. 598 */ 599 int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, 600 fd_set __user *exp, struct timespec64 *end_time) 601 { 602 fd_set_bits fds; 603 void *bits; 604 int ret, max_fds; 605 size_t size, alloc_size; 606 struct fdtable *fdt; 607 /* Allocate small arguments on the stack to save memory and be faster */ 608 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; 609 610 ret = -EINVAL; 611 if (n < 0) 612 goto out_nofds; 613 614 /* max_fds can increase, so grab it once to avoid race */ 615 rcu_read_lock(); 616 fdt = files_fdtable(current->files); 617 max_fds = fdt->max_fds; 618 rcu_read_unlock(); 619 if (n > max_fds) 620 n = max_fds; 621 622 /* 623 * We need 6 bitmaps (in/out/ex for both incoming and outgoing), 624 * since we used fdset we need to allocate memory in units of 625 * long-words. 626 */ 627 size = FDS_BYTES(n); 628 bits = stack_fds; 629 if (size > sizeof(stack_fds) / 6) { 630 /* Not enough space in on-stack array; must use kmalloc */ 631 ret = -ENOMEM; 632 if (size > (SIZE_MAX / 6)) 633 goto out_nofds; 634 635 alloc_size = 6 * size; 636 bits = kmalloc(alloc_size, GFP_KERNEL|__GFP_NOWARN); 637 if (!bits && alloc_size > PAGE_SIZE) 638 bits = vmalloc(alloc_size); 639 640 if (!bits) 641 goto out_nofds; 642 } 643 fds.in = bits; 644 fds.out = bits + size; 645 fds.ex = bits + 2*size; 646 fds.res_in = bits + 3*size; 647 fds.res_out = bits + 4*size; 648 fds.res_ex = bits + 5*size; 649 650 if ((ret = get_fd_set(n, inp, fds.in)) || 651 (ret = get_fd_set(n, outp, fds.out)) || 652 (ret = get_fd_set(n, exp, fds.ex))) 653 goto out; 654 zero_fd_set(n, fds.res_in); 655 zero_fd_set(n, fds.res_out); 656 zero_fd_set(n, fds.res_ex); 657 658 ret = do_select(n, &fds, end_time); 659 660 if (ret < 0) 661 goto out; 662 if (!ret) { 663 ret = -ERESTARTNOHAND; 664 if (signal_pending(current)) 665 goto out; 666 ret = 0; 667 } 668 669 if (set_fd_set(n, inp, fds.res_in) || 670 set_fd_set(n, outp, fds.res_out) || 671 set_fd_set(n, exp, fds.res_ex)) 672 ret = -EFAULT; 673 674 out: 675 if (bits != stack_fds) 676 kvfree(bits); 677 out_nofds: 678 return ret; 679 } 680 681 SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp, 682 fd_set __user *, exp, struct timeval __user *, tvp) 683 { 684 struct timespec64 end_time, *to = NULL; 685 struct timeval tv; 686 int ret; 687 688 if (tvp) { 689 if (copy_from_user(&tv, tvp, sizeof(tv))) 690 return -EFAULT; 691 692 to = &end_time; 693 if (poll_select_set_timeout(to, 694 tv.tv_sec + (tv.tv_usec / USEC_PER_SEC), 695 (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC)) 696 return -EINVAL; 697 } 698 699 ret = core_sys_select(n, inp, outp, exp, to); 700 ret = poll_select_copy_remaining(&end_time, tvp, 1, ret); 701 702 return ret; 703 } 704 705 static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp, 706 fd_set __user *exp, struct timespec __user *tsp, 707 const sigset_t __user *sigmask, size_t sigsetsize) 708 { 709 sigset_t ksigmask, sigsaved; 710 struct timespec ts; 711 struct timespec64 ts64, end_time, *to = NULL; 712 int ret; 713 714 if (tsp) { 715 if (copy_from_user(&ts, tsp, sizeof(ts))) 716 return -EFAULT; 717 ts64 = timespec_to_timespec64(ts); 718 719 to = &end_time; 720 if (poll_select_set_timeout(to, ts64.tv_sec, ts64.tv_nsec)) 721 return -EINVAL; 722 } 723 724 if (sigmask) { 725 /* XXX: Don't preclude handling different sized sigset_t's. */ 726 if (sigsetsize != sizeof(sigset_t)) 727 return -EINVAL; 728 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) 729 return -EFAULT; 730 731 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); 732 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); 733 } 734 735 ret = core_sys_select(n, inp, outp, exp, to); 736 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret); 737 738 if (ret == -ERESTARTNOHAND) { 739 /* 740 * Don't restore the signal mask yet. Let do_signal() deliver 741 * the signal on the way back to userspace, before the signal 742 * mask is restored. 743 */ 744 if (sigmask) { 745 memcpy(¤t->saved_sigmask, &sigsaved, 746 sizeof(sigsaved)); 747 set_restore_sigmask(); 748 } 749 } else if (sigmask) 750 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 751 752 return ret; 753 } 754 755 /* 756 * Most architectures can't handle 7-argument syscalls. So we provide a 757 * 6-argument version where the sixth argument is a pointer to a structure 758 * which has a pointer to the sigset_t itself followed by a size_t containing 759 * the sigset size. 760 */ 761 SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp, 762 fd_set __user *, exp, struct timespec __user *, tsp, 763 void __user *, sig) 764 { 765 size_t sigsetsize = 0; 766 sigset_t __user *up = NULL; 767 768 if (sig) { 769 if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t)) 770 || __get_user(up, (sigset_t __user * __user *)sig) 771 || __get_user(sigsetsize, 772 (size_t __user *)(sig+sizeof(void *)))) 773 return -EFAULT; 774 } 775 776 return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize); 777 } 778 779 #ifdef __ARCH_WANT_SYS_OLD_SELECT 780 struct sel_arg_struct { 781 unsigned long n; 782 fd_set __user *inp, *outp, *exp; 783 struct timeval __user *tvp; 784 }; 785 786 SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg) 787 { 788 struct sel_arg_struct a; 789 790 if (copy_from_user(&a, arg, sizeof(a))) 791 return -EFAULT; 792 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp); 793 } 794 #endif 795 796 struct poll_list { 797 struct poll_list *next; 798 int len; 799 struct pollfd entries[0]; 800 }; 801 802 #define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd)) 803 804 /* 805 * Fish for pollable events on the pollfd->fd file descriptor. We're only 806 * interested in events matching the pollfd->events mask, and the result 807 * matching that mask is both recorded in pollfd->revents and returned. The 808 * pwait poll_table will be used by the fd-provided poll handler for waiting, 809 * if pwait->_qproc is non-NULL. 810 */ 811 static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait, 812 bool *can_busy_poll, 813 unsigned int busy_flag) 814 { 815 unsigned int mask; 816 int fd; 817 818 mask = 0; 819 fd = pollfd->fd; 820 if (fd >= 0) { 821 struct fd f = fdget(fd); 822 mask = POLLNVAL; 823 if (f.file) { 824 mask = DEFAULT_POLLMASK; 825 if (f.file->f_op->poll) { 826 pwait->_key = pollfd->events|POLLERR|POLLHUP; 827 pwait->_key |= busy_flag; 828 mask = f.file->f_op->poll(f.file, pwait); 829 if (mask & busy_flag) 830 *can_busy_poll = true; 831 } 832 /* Mask out unneeded events. */ 833 mask &= pollfd->events | POLLERR | POLLHUP; 834 fdput(f); 835 } 836 } 837 pollfd->revents = mask; 838 839 return mask; 840 } 841 842 static int do_poll(struct poll_list *list, struct poll_wqueues *wait, 843 struct timespec64 *end_time) 844 { 845 poll_table* pt = &wait->pt; 846 ktime_t expire, *to = NULL; 847 int timed_out = 0, count = 0; 848 u64 slack = 0; 849 unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0; 850 unsigned long busy_start = 0; 851 852 /* Optimise the no-wait case */ 853 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { 854 pt->_qproc = NULL; 855 timed_out = 1; 856 } 857 858 if (end_time && !timed_out) 859 slack = select_estimate_accuracy(end_time); 860 861 for (;;) { 862 struct poll_list *walk; 863 bool can_busy_loop = false; 864 865 for (walk = list; walk != NULL; walk = walk->next) { 866 struct pollfd * pfd, * pfd_end; 867 868 pfd = walk->entries; 869 pfd_end = pfd + walk->len; 870 for (; pfd != pfd_end; pfd++) { 871 /* 872 * Fish for events. If we found one, record it 873 * and kill poll_table->_qproc, so we don't 874 * needlessly register any other waiters after 875 * this. They'll get immediately deregistered 876 * when we break out and return. 877 */ 878 if (do_pollfd(pfd, pt, &can_busy_loop, 879 busy_flag)) { 880 count++; 881 pt->_qproc = NULL; 882 /* found something, stop busy polling */ 883 busy_flag = 0; 884 can_busy_loop = false; 885 } 886 } 887 } 888 /* 889 * All waiters have already been registered, so don't provide 890 * a poll_table->_qproc to them on the next loop iteration. 891 */ 892 pt->_qproc = NULL; 893 if (!count) { 894 count = wait->error; 895 if (signal_pending(current)) 896 count = -EINTR; 897 } 898 if (count || timed_out) 899 break; 900 901 /* only if found POLL_BUSY_LOOP sockets && not out of time */ 902 if (can_busy_loop && !need_resched()) { 903 if (!busy_start) { 904 busy_start = busy_loop_current_time(); 905 continue; 906 } 907 if (!busy_loop_timeout(busy_start)) 908 continue; 909 } 910 busy_flag = 0; 911 912 /* 913 * If this is the first loop and we have a timeout 914 * given, then we convert to ktime_t and set the to 915 * pointer to the expiry value. 916 */ 917 if (end_time && !to) { 918 expire = timespec64_to_ktime(*end_time); 919 to = &expire; 920 } 921 922 if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack)) 923 timed_out = 1; 924 } 925 return count; 926 } 927 928 #define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \ 929 sizeof(struct pollfd)) 930 931 static int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, 932 struct timespec64 *end_time) 933 { 934 struct poll_wqueues table; 935 int err = -EFAULT, fdcount, len, size; 936 /* Allocate small arguments on the stack to save memory and be 937 faster - use long to make sure the buffer is aligned properly 938 on 64 bit archs to avoid unaligned access */ 939 long stack_pps[POLL_STACK_ALLOC/sizeof(long)]; 940 struct poll_list *const head = (struct poll_list *)stack_pps; 941 struct poll_list *walk = head; 942 unsigned long todo = nfds; 943 944 if (nfds > rlimit(RLIMIT_NOFILE)) 945 return -EINVAL; 946 947 len = min_t(unsigned int, nfds, N_STACK_PPS); 948 for (;;) { 949 walk->next = NULL; 950 walk->len = len; 951 if (!len) 952 break; 953 954 if (copy_from_user(walk->entries, ufds + nfds-todo, 955 sizeof(struct pollfd) * walk->len)) 956 goto out_fds; 957 958 todo -= walk->len; 959 if (!todo) 960 break; 961 962 len = min(todo, POLLFD_PER_PAGE); 963 size = sizeof(struct poll_list) + sizeof(struct pollfd) * len; 964 walk = walk->next = kmalloc(size, GFP_KERNEL); 965 if (!walk) { 966 err = -ENOMEM; 967 goto out_fds; 968 } 969 } 970 971 poll_initwait(&table); 972 fdcount = do_poll(head, &table, end_time); 973 poll_freewait(&table); 974 975 for (walk = head; walk; walk = walk->next) { 976 struct pollfd *fds = walk->entries; 977 int j; 978 979 for (j = 0; j < walk->len; j++, ufds++) 980 if (__put_user(fds[j].revents, &ufds->revents)) 981 goto out_fds; 982 } 983 984 err = fdcount; 985 out_fds: 986 walk = head->next; 987 while (walk) { 988 struct poll_list *pos = walk; 989 walk = walk->next; 990 kfree(pos); 991 } 992 993 return err; 994 } 995 996 static long do_restart_poll(struct restart_block *restart_block) 997 { 998 struct pollfd __user *ufds = restart_block->poll.ufds; 999 int nfds = restart_block->poll.nfds; 1000 struct timespec64 *to = NULL, end_time; 1001 int ret; 1002 1003 if (restart_block->poll.has_timeout) { 1004 end_time.tv_sec = restart_block->poll.tv_sec; 1005 end_time.tv_nsec = restart_block->poll.tv_nsec; 1006 to = &end_time; 1007 } 1008 1009 ret = do_sys_poll(ufds, nfds, to); 1010 1011 if (ret == -EINTR) { 1012 restart_block->fn = do_restart_poll; 1013 ret = -ERESTART_RESTARTBLOCK; 1014 } 1015 return ret; 1016 } 1017 1018 SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds, 1019 int, timeout_msecs) 1020 { 1021 struct timespec64 end_time, *to = NULL; 1022 int ret; 1023 1024 if (timeout_msecs >= 0) { 1025 to = &end_time; 1026 poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC, 1027 NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC)); 1028 } 1029 1030 ret = do_sys_poll(ufds, nfds, to); 1031 1032 if (ret == -EINTR) { 1033 struct restart_block *restart_block; 1034 1035 restart_block = ¤t->restart_block; 1036 restart_block->fn = do_restart_poll; 1037 restart_block->poll.ufds = ufds; 1038 restart_block->poll.nfds = nfds; 1039 1040 if (timeout_msecs >= 0) { 1041 restart_block->poll.tv_sec = end_time.tv_sec; 1042 restart_block->poll.tv_nsec = end_time.tv_nsec; 1043 restart_block->poll.has_timeout = 1; 1044 } else 1045 restart_block->poll.has_timeout = 0; 1046 1047 ret = -ERESTART_RESTARTBLOCK; 1048 } 1049 return ret; 1050 } 1051 1052 SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds, 1053 struct timespec __user *, tsp, const sigset_t __user *, sigmask, 1054 size_t, sigsetsize) 1055 { 1056 sigset_t ksigmask, sigsaved; 1057 struct timespec ts; 1058 struct timespec64 end_time, *to = NULL; 1059 int ret; 1060 1061 if (tsp) { 1062 if (copy_from_user(&ts, tsp, sizeof(ts))) 1063 return -EFAULT; 1064 1065 to = &end_time; 1066 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) 1067 return -EINVAL; 1068 } 1069 1070 if (sigmask) { 1071 /* XXX: Don't preclude handling different sized sigset_t's. */ 1072 if (sigsetsize != sizeof(sigset_t)) 1073 return -EINVAL; 1074 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) 1075 return -EFAULT; 1076 1077 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1078 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); 1079 } 1080 1081 ret = do_sys_poll(ufds, nfds, to); 1082 1083 /* We can restart this syscall, usually */ 1084 if (ret == -EINTR) { 1085 /* 1086 * Don't restore the signal mask yet. Let do_signal() deliver 1087 * the signal on the way back to userspace, before the signal 1088 * mask is restored. 1089 */ 1090 if (sigmask) { 1091 memcpy(¤t->saved_sigmask, &sigsaved, 1092 sizeof(sigsaved)); 1093 set_restore_sigmask(); 1094 } 1095 ret = -ERESTARTNOHAND; 1096 } else if (sigmask) 1097 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1098 1099 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret); 1100 1101 return ret; 1102 } 1103 1104 #ifdef CONFIG_COMPAT 1105 #define __COMPAT_NFDBITS (8 * sizeof(compat_ulong_t)) 1106 1107 static 1108 int compat_poll_select_copy_remaining(struct timespec *end_time, void __user *p, 1109 int timeval, int ret) 1110 { 1111 struct timespec ts; 1112 1113 if (!p) 1114 return ret; 1115 1116 if (current->personality & STICKY_TIMEOUTS) 1117 goto sticky; 1118 1119 /* No update for zero timeout */ 1120 if (!end_time->tv_sec && !end_time->tv_nsec) 1121 return ret; 1122 1123 ktime_get_ts(&ts); 1124 ts = timespec_sub(*end_time, ts); 1125 if (ts.tv_sec < 0) 1126 ts.tv_sec = ts.tv_nsec = 0; 1127 1128 if (timeval) { 1129 struct compat_timeval rtv; 1130 1131 rtv.tv_sec = ts.tv_sec; 1132 rtv.tv_usec = ts.tv_nsec / NSEC_PER_USEC; 1133 1134 if (!copy_to_user(p, &rtv, sizeof(rtv))) 1135 return ret; 1136 } else { 1137 struct compat_timespec rts; 1138 1139 rts.tv_sec = ts.tv_sec; 1140 rts.tv_nsec = ts.tv_nsec; 1141 1142 if (!copy_to_user(p, &rts, sizeof(rts))) 1143 return ret; 1144 } 1145 /* 1146 * If an application puts its timeval in read-only memory, we 1147 * don't want the Linux-specific update to the timeval to 1148 * cause a fault after the select has completed 1149 * successfully. However, because we're not updating the 1150 * timeval, we can't restart the system call. 1151 */ 1152 1153 sticky: 1154 if (ret == -ERESTARTNOHAND) 1155 ret = -EINTR; 1156 return ret; 1157 } 1158 1159 /* 1160 * Ooo, nasty. We need here to frob 32-bit unsigned longs to 1161 * 64-bit unsigned longs. 1162 */ 1163 static 1164 int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, 1165 unsigned long *fdset) 1166 { 1167 nr = DIV_ROUND_UP(nr, __COMPAT_NFDBITS); 1168 if (ufdset) { 1169 unsigned long odd; 1170 1171 if (!access_ok(VERIFY_WRITE, ufdset, nr*sizeof(compat_ulong_t))) 1172 return -EFAULT; 1173 1174 odd = nr & 1UL; 1175 nr &= ~1UL; 1176 while (nr) { 1177 unsigned long h, l; 1178 if (__get_user(l, ufdset) || __get_user(h, ufdset+1)) 1179 return -EFAULT; 1180 ufdset += 2; 1181 *fdset++ = h << 32 | l; 1182 nr -= 2; 1183 } 1184 if (odd && __get_user(*fdset, ufdset)) 1185 return -EFAULT; 1186 } else { 1187 /* Tricky, must clear full unsigned long in the 1188 * kernel fdset at the end, this makes sure that 1189 * actually happens. 1190 */ 1191 memset(fdset, 0, ((nr + 1) & ~1)*sizeof(compat_ulong_t)); 1192 } 1193 return 0; 1194 } 1195 1196 static 1197 int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, 1198 unsigned long *fdset) 1199 { 1200 unsigned long odd; 1201 nr = DIV_ROUND_UP(nr, __COMPAT_NFDBITS); 1202 1203 if (!ufdset) 1204 return 0; 1205 1206 odd = nr & 1UL; 1207 nr &= ~1UL; 1208 while (nr) { 1209 unsigned long h, l; 1210 l = *fdset++; 1211 h = l >> 32; 1212 if (__put_user(l, ufdset) || __put_user(h, ufdset+1)) 1213 return -EFAULT; 1214 ufdset += 2; 1215 nr -= 2; 1216 } 1217 if (odd && __put_user(*fdset, ufdset)) 1218 return -EFAULT; 1219 return 0; 1220 } 1221 1222 1223 /* 1224 * This is a virtual copy of sys_select from fs/select.c and probably 1225 * should be compared to it from time to time 1226 */ 1227 1228 /* 1229 * We can actually return ERESTARTSYS instead of EINTR, but I'd 1230 * like to be certain this leads to no problems. So I return 1231 * EINTR just for safety. 1232 * 1233 * Update: ERESTARTSYS breaks at least the xview clock binary, so 1234 * I'm trying ERESTARTNOHAND which restart only when you want to. 1235 */ 1236 static int compat_core_sys_select(int n, compat_ulong_t __user *inp, 1237 compat_ulong_t __user *outp, compat_ulong_t __user *exp, 1238 struct timespec *end_time) 1239 { 1240 fd_set_bits fds; 1241 void *bits; 1242 int size, max_fds, ret = -EINVAL; 1243 struct fdtable *fdt; 1244 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; 1245 1246 if (n < 0) 1247 goto out_nofds; 1248 1249 /* max_fds can increase, so grab it once to avoid race */ 1250 rcu_read_lock(); 1251 fdt = files_fdtable(current->files); 1252 max_fds = fdt->max_fds; 1253 rcu_read_unlock(); 1254 if (n > max_fds) 1255 n = max_fds; 1256 1257 /* 1258 * We need 6 bitmaps (in/out/ex for both incoming and outgoing), 1259 * since we used fdset we need to allocate memory in units of 1260 * long-words. 1261 */ 1262 size = FDS_BYTES(n); 1263 bits = stack_fds; 1264 if (size > sizeof(stack_fds) / 6) { 1265 bits = kmalloc(6 * size, GFP_KERNEL); 1266 ret = -ENOMEM; 1267 if (!bits) 1268 goto out_nofds; 1269 } 1270 fds.in = (unsigned long *) bits; 1271 fds.out = (unsigned long *) (bits + size); 1272 fds.ex = (unsigned long *) (bits + 2*size); 1273 fds.res_in = (unsigned long *) (bits + 3*size); 1274 fds.res_out = (unsigned long *) (bits + 4*size); 1275 fds.res_ex = (unsigned long *) (bits + 5*size); 1276 1277 if ((ret = compat_get_fd_set(n, inp, fds.in)) || 1278 (ret = compat_get_fd_set(n, outp, fds.out)) || 1279 (ret = compat_get_fd_set(n, exp, fds.ex))) 1280 goto out; 1281 zero_fd_set(n, fds.res_in); 1282 zero_fd_set(n, fds.res_out); 1283 zero_fd_set(n, fds.res_ex); 1284 1285 ret = do_select(n, &fds, end_time); 1286 1287 if (ret < 0) 1288 goto out; 1289 if (!ret) { 1290 ret = -ERESTARTNOHAND; 1291 if (signal_pending(current)) 1292 goto out; 1293 ret = 0; 1294 } 1295 1296 if (compat_set_fd_set(n, inp, fds.res_in) || 1297 compat_set_fd_set(n, outp, fds.res_out) || 1298 compat_set_fd_set(n, exp, fds.res_ex)) 1299 ret = -EFAULT; 1300 out: 1301 if (bits != stack_fds) 1302 kfree(bits); 1303 out_nofds: 1304 return ret; 1305 } 1306 1307 COMPAT_SYSCALL_DEFINE5(select, int, n, compat_ulong_t __user *, inp, 1308 compat_ulong_t __user *, outp, compat_ulong_t __user *, exp, 1309 struct compat_timeval __user *, tvp) 1310 { 1311 struct timespec end_time, *to = NULL; 1312 struct compat_timeval tv; 1313 int ret; 1314 1315 if (tvp) { 1316 if (copy_from_user(&tv, tvp, sizeof(tv))) 1317 return -EFAULT; 1318 1319 to = &end_time; 1320 if (poll_select_set_timeout(to, 1321 tv.tv_sec + (tv.tv_usec / USEC_PER_SEC), 1322 (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC)) 1323 return -EINVAL; 1324 } 1325 1326 ret = compat_core_sys_select(n, inp, outp, exp, to); 1327 ret = compat_poll_select_copy_remaining(&end_time, tvp, 1, ret); 1328 1329 return ret; 1330 } 1331 1332 struct compat_sel_arg_struct { 1333 compat_ulong_t n; 1334 compat_uptr_t inp; 1335 compat_uptr_t outp; 1336 compat_uptr_t exp; 1337 compat_uptr_t tvp; 1338 }; 1339 1340 COMPAT_SYSCALL_DEFINE1(old_select, struct compat_sel_arg_struct __user *, arg) 1341 { 1342 struct compat_sel_arg_struct a; 1343 1344 if (copy_from_user(&a, arg, sizeof(a))) 1345 return -EFAULT; 1346 return compat_sys_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp), 1347 compat_ptr(a.exp), compat_ptr(a.tvp)); 1348 } 1349 1350 static long do_compat_pselect(int n, compat_ulong_t __user *inp, 1351 compat_ulong_t __user *outp, compat_ulong_t __user *exp, 1352 struct compat_timespec __user *tsp, compat_sigset_t __user *sigmask, 1353 compat_size_t sigsetsize) 1354 { 1355 compat_sigset_t ss32; 1356 sigset_t ksigmask, sigsaved; 1357 struct compat_timespec ts; 1358 struct timespec end_time, *to = NULL; 1359 int ret; 1360 1361 if (tsp) { 1362 if (copy_from_user(&ts, tsp, sizeof(ts))) 1363 return -EFAULT; 1364 1365 to = &end_time; 1366 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) 1367 return -EINVAL; 1368 } 1369 1370 if (sigmask) { 1371 if (sigsetsize != sizeof(compat_sigset_t)) 1372 return -EINVAL; 1373 if (copy_from_user(&ss32, sigmask, sizeof(ss32))) 1374 return -EFAULT; 1375 sigset_from_compat(&ksigmask, &ss32); 1376 1377 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1378 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); 1379 } 1380 1381 ret = compat_core_sys_select(n, inp, outp, exp, to); 1382 ret = compat_poll_select_copy_remaining(&end_time, tsp, 0, ret); 1383 1384 if (ret == -ERESTARTNOHAND) { 1385 /* 1386 * Don't restore the signal mask yet. Let do_signal() deliver 1387 * the signal on the way back to userspace, before the signal 1388 * mask is restored. 1389 */ 1390 if (sigmask) { 1391 memcpy(¤t->saved_sigmask, &sigsaved, 1392 sizeof(sigsaved)); 1393 set_restore_sigmask(); 1394 } 1395 } else if (sigmask) 1396 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1397 1398 return ret; 1399 } 1400 1401 COMPAT_SYSCALL_DEFINE6(pselect6, int, n, compat_ulong_t __user *, inp, 1402 compat_ulong_t __user *, outp, compat_ulong_t __user *, exp, 1403 struct compat_timespec __user *, tsp, void __user *, sig) 1404 { 1405 compat_size_t sigsetsize = 0; 1406 compat_uptr_t up = 0; 1407 1408 if (sig) { 1409 if (!access_ok(VERIFY_READ, sig, 1410 sizeof(compat_uptr_t)+sizeof(compat_size_t)) || 1411 __get_user(up, (compat_uptr_t __user *)sig) || 1412 __get_user(sigsetsize, 1413 (compat_size_t __user *)(sig+sizeof(up)))) 1414 return -EFAULT; 1415 } 1416 return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(up), 1417 sigsetsize); 1418 } 1419 1420 COMPAT_SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, 1421 unsigned int, nfds, struct compat_timespec __user *, tsp, 1422 const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize) 1423 { 1424 compat_sigset_t ss32; 1425 sigset_t ksigmask, sigsaved; 1426 struct compat_timespec ts; 1427 struct timespec end_time, *to = NULL; 1428 int ret; 1429 1430 if (tsp) { 1431 if (copy_from_user(&ts, tsp, sizeof(ts))) 1432 return -EFAULT; 1433 1434 to = &end_time; 1435 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) 1436 return -EINVAL; 1437 } 1438 1439 if (sigmask) { 1440 if (sigsetsize != sizeof(compat_sigset_t)) 1441 return -EINVAL; 1442 if (copy_from_user(&ss32, sigmask, sizeof(ss32))) 1443 return -EFAULT; 1444 sigset_from_compat(&ksigmask, &ss32); 1445 1446 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1447 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); 1448 } 1449 1450 ret = do_sys_poll(ufds, nfds, to); 1451 1452 /* We can restart this syscall, usually */ 1453 if (ret == -EINTR) { 1454 /* 1455 * Don't restore the signal mask yet. Let do_signal() deliver 1456 * the signal on the way back to userspace, before the signal 1457 * mask is restored. 1458 */ 1459 if (sigmask) { 1460 memcpy(¤t->saved_sigmask, &sigsaved, 1461 sizeof(sigsaved)); 1462 set_restore_sigmask(); 1463 } 1464 ret = -ERESTARTNOHAND; 1465 } else if (sigmask) 1466 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1467 1468 ret = compat_poll_select_copy_remaining(&end_time, tsp, 0, ret); 1469 1470 return ret; 1471 } 1472 #endif 1473