1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * This file contains the procedures for the handling of select and poll 4 * 5 * Created for Linux based loosely upon Mathius Lattner's minix 6 * patches by Peter MacDonald. Heavily edited by Linus. 7 * 8 * 4 February 1994 9 * COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS 10 * flag set in its personality we do *not* modify the given timeout 11 * parameter to reflect time remaining. 12 * 13 * 24 January 2000 14 * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation 15 * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian). 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/sched/signal.h> 20 #include <linux/sched/rt.h> 21 #include <linux/syscalls.h> 22 #include <linux/export.h> 23 #include <linux/slab.h> 24 #include <linux/poll.h> 25 #include <linux/personality.h> /* for STICKY_TIMEOUTS */ 26 #include <linux/file.h> 27 #include <linux/fdtable.h> 28 #include <linux/fs.h> 29 #include <linux/rcupdate.h> 30 #include <linux/hrtimer.h> 31 #include <linux/freezer.h> 32 #include <net/busy_poll.h> 33 #include <linux/vmalloc.h> 34 35 #include <linux/uaccess.h> 36 37 38 /* 39 * Estimate expected accuracy in ns from a timeval. 40 * 41 * After quite a bit of churning around, we've settled on 42 * a simple thing of taking 0.1% of the timeout as the 43 * slack, with a cap of 100 msec. 44 * "nice" tasks get a 0.5% slack instead. 45 * 46 * Consider this comment an open invitation to come up with even 47 * better solutions.. 48 */ 49 50 #define MAX_SLACK (100 * NSEC_PER_MSEC) 51 52 static long __estimate_accuracy(struct timespec64 *tv) 53 { 54 long slack; 55 int divfactor = 1000; 56 57 if (tv->tv_sec < 0) 58 return 0; 59 60 if (task_nice(current) > 0) 61 divfactor = divfactor / 5; 62 63 if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor)) 64 return MAX_SLACK; 65 66 slack = tv->tv_nsec / divfactor; 67 slack += tv->tv_sec * (NSEC_PER_SEC/divfactor); 68 69 if (slack > MAX_SLACK) 70 return MAX_SLACK; 71 72 return slack; 73 } 74 75 u64 select_estimate_accuracy(struct timespec64 *tv) 76 { 77 u64 ret; 78 struct timespec64 now; 79 80 /* 81 * Realtime tasks get a slack of 0 for obvious reasons. 82 */ 83 84 if (rt_task(current)) 85 return 0; 86 87 ktime_get_ts64(&now); 88 now = timespec64_sub(*tv, now); 89 ret = __estimate_accuracy(&now); 90 if (ret < current->timer_slack_ns) 91 return current->timer_slack_ns; 92 return ret; 93 } 94 95 96 97 struct poll_table_page { 98 struct poll_table_page * next; 99 struct poll_table_entry * entry; 100 struct poll_table_entry entries[0]; 101 }; 102 103 #define POLL_TABLE_FULL(table) \ 104 ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table)) 105 106 /* 107 * Ok, Peter made a complicated, but straightforward multiple_wait() function. 108 * I have rewritten this, taking some shortcuts: This code may not be easy to 109 * follow, but it should be free of race-conditions, and it's practical. If you 110 * understand what I'm doing here, then you understand how the linux 111 * sleep/wakeup mechanism works. 112 * 113 * Two very simple procedures, poll_wait() and poll_freewait() make all the 114 * work. poll_wait() is an inline-function defined in <linux/poll.h>, 115 * as all select/poll functions have to call it to add an entry to the 116 * poll table. 117 */ 118 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, 119 poll_table *p); 120 121 void poll_initwait(struct poll_wqueues *pwq) 122 { 123 init_poll_funcptr(&pwq->pt, __pollwait); 124 pwq->polling_task = current; 125 pwq->triggered = 0; 126 pwq->error = 0; 127 pwq->table = NULL; 128 pwq->inline_index = 0; 129 } 130 EXPORT_SYMBOL(poll_initwait); 131 132 static void free_poll_entry(struct poll_table_entry *entry) 133 { 134 remove_wait_queue(entry->wait_address, &entry->wait); 135 fput(entry->filp); 136 } 137 138 void poll_freewait(struct poll_wqueues *pwq) 139 { 140 struct poll_table_page * p = pwq->table; 141 int i; 142 for (i = 0; i < pwq->inline_index; i++) 143 free_poll_entry(pwq->inline_entries + i); 144 while (p) { 145 struct poll_table_entry * entry; 146 struct poll_table_page *old; 147 148 entry = p->entry; 149 do { 150 entry--; 151 free_poll_entry(entry); 152 } while (entry > p->entries); 153 old = p; 154 p = p->next; 155 free_page((unsigned long) old); 156 } 157 } 158 EXPORT_SYMBOL(poll_freewait); 159 160 static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p) 161 { 162 struct poll_table_page *table = p->table; 163 164 if (p->inline_index < N_INLINE_POLL_ENTRIES) 165 return p->inline_entries + p->inline_index++; 166 167 if (!table || POLL_TABLE_FULL(table)) { 168 struct poll_table_page *new_table; 169 170 new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL); 171 if (!new_table) { 172 p->error = -ENOMEM; 173 return NULL; 174 } 175 new_table->entry = new_table->entries; 176 new_table->next = table; 177 p->table = new_table; 178 table = new_table; 179 } 180 181 return table->entry++; 182 } 183 184 static int __pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 185 { 186 struct poll_wqueues *pwq = wait->private; 187 DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task); 188 189 /* 190 * Although this function is called under waitqueue lock, LOCK 191 * doesn't imply write barrier and the users expect write 192 * barrier semantics on wakeup functions. The following 193 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() 194 * and is paired with smp_store_mb() in poll_schedule_timeout. 195 */ 196 smp_wmb(); 197 pwq->triggered = 1; 198 199 /* 200 * Perform the default wake up operation using a dummy 201 * waitqueue. 202 * 203 * TODO: This is hacky but there currently is no interface to 204 * pass in @sync. @sync is scheduled to be removed and once 205 * that happens, wake_up_process() can be used directly. 206 */ 207 return default_wake_function(&dummy_wait, mode, sync, key); 208 } 209 210 static int pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 211 { 212 struct poll_table_entry *entry; 213 214 entry = container_of(wait, struct poll_table_entry, wait); 215 if (key && !(key_to_poll(key) & entry->key)) 216 return 0; 217 return __pollwake(wait, mode, sync, key); 218 } 219 220 /* Add a new entry */ 221 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, 222 poll_table *p) 223 { 224 struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt); 225 struct poll_table_entry *entry = poll_get_entry(pwq); 226 if (!entry) 227 return; 228 entry->filp = get_file(filp); 229 entry->wait_address = wait_address; 230 entry->key = p->_key; 231 init_waitqueue_func_entry(&entry->wait, pollwake); 232 entry->wait.private = pwq; 233 add_wait_queue(wait_address, &entry->wait); 234 } 235 236 int poll_schedule_timeout(struct poll_wqueues *pwq, int state, 237 ktime_t *expires, unsigned long slack) 238 { 239 int rc = -EINTR; 240 241 set_current_state(state); 242 if (!pwq->triggered) 243 rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS); 244 __set_current_state(TASK_RUNNING); 245 246 /* 247 * Prepare for the next iteration. 248 * 249 * The following smp_store_mb() serves two purposes. First, it's 250 * the counterpart rmb of the wmb in pollwake() such that data 251 * written before wake up is always visible after wake up. 252 * Second, the full barrier guarantees that triggered clearing 253 * doesn't pass event check of the next iteration. Note that 254 * this problem doesn't exist for the first iteration as 255 * add_wait_queue() has full barrier semantics. 256 */ 257 smp_store_mb(pwq->triggered, 0); 258 259 return rc; 260 } 261 EXPORT_SYMBOL(poll_schedule_timeout); 262 263 /** 264 * poll_select_set_timeout - helper function to setup the timeout value 265 * @to: pointer to timespec64 variable for the final timeout 266 * @sec: seconds (from user space) 267 * @nsec: nanoseconds (from user space) 268 * 269 * Note, we do not use a timespec for the user space value here, That 270 * way we can use the function for timeval and compat interfaces as well. 271 * 272 * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0. 273 */ 274 int poll_select_set_timeout(struct timespec64 *to, time64_t sec, long nsec) 275 { 276 struct timespec64 ts = {.tv_sec = sec, .tv_nsec = nsec}; 277 278 if (!timespec64_valid(&ts)) 279 return -EINVAL; 280 281 /* Optimize for the zero timeout value here */ 282 if (!sec && !nsec) { 283 to->tv_sec = to->tv_nsec = 0; 284 } else { 285 ktime_get_ts64(to); 286 *to = timespec64_add_safe(*to, ts); 287 } 288 return 0; 289 } 290 291 static int poll_select_copy_remaining(struct timespec64 *end_time, 292 void __user *p, 293 int timeval, int ret) 294 { 295 struct timespec64 rts; 296 struct timeval rtv; 297 298 if (!p) 299 return ret; 300 301 if (current->personality & STICKY_TIMEOUTS) 302 goto sticky; 303 304 /* No update for zero timeout */ 305 if (!end_time->tv_sec && !end_time->tv_nsec) 306 return ret; 307 308 ktime_get_ts64(&rts); 309 rts = timespec64_sub(*end_time, rts); 310 if (rts.tv_sec < 0) 311 rts.tv_sec = rts.tv_nsec = 0; 312 313 314 if (timeval) { 315 if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec)) 316 memset(&rtv, 0, sizeof(rtv)); 317 rtv.tv_sec = rts.tv_sec; 318 rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC; 319 320 if (!copy_to_user(p, &rtv, sizeof(rtv))) 321 return ret; 322 323 } else if (!put_timespec64(&rts, p)) 324 return ret; 325 326 /* 327 * If an application puts its timeval in read-only memory, we 328 * don't want the Linux-specific update to the timeval to 329 * cause a fault after the select has completed 330 * successfully. However, because we're not updating the 331 * timeval, we can't restart the system call. 332 */ 333 334 sticky: 335 if (ret == -ERESTARTNOHAND) 336 ret = -EINTR; 337 return ret; 338 } 339 340 /* 341 * Scalable version of the fd_set. 342 */ 343 344 typedef struct { 345 unsigned long *in, *out, *ex; 346 unsigned long *res_in, *res_out, *res_ex; 347 } fd_set_bits; 348 349 /* 350 * How many longwords for "nr" bits? 351 */ 352 #define FDS_BITPERLONG (8*sizeof(long)) 353 #define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG) 354 #define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long)) 355 356 /* 357 * We do a VERIFY_WRITE here even though we are only reading this time: 358 * we'll write to it eventually.. 359 * 360 * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned. 361 */ 362 static inline 363 int get_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset) 364 { 365 nr = FDS_BYTES(nr); 366 if (ufdset) 367 return copy_from_user(fdset, ufdset, nr) ? -EFAULT : 0; 368 369 memset(fdset, 0, nr); 370 return 0; 371 } 372 373 static inline unsigned long __must_check 374 set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset) 375 { 376 if (ufdset) 377 return __copy_to_user(ufdset, fdset, FDS_BYTES(nr)); 378 return 0; 379 } 380 381 static inline 382 void zero_fd_set(unsigned long nr, unsigned long *fdset) 383 { 384 memset(fdset, 0, FDS_BYTES(nr)); 385 } 386 387 #define FDS_IN(fds, n) (fds->in + n) 388 #define FDS_OUT(fds, n) (fds->out + n) 389 #define FDS_EX(fds, n) (fds->ex + n) 390 391 #define BITS(fds, n) (*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n)) 392 393 static int max_select_fd(unsigned long n, fd_set_bits *fds) 394 { 395 unsigned long *open_fds; 396 unsigned long set; 397 int max; 398 struct fdtable *fdt; 399 400 /* handle last in-complete long-word first */ 401 set = ~(~0UL << (n & (BITS_PER_LONG-1))); 402 n /= BITS_PER_LONG; 403 fdt = files_fdtable(current->files); 404 open_fds = fdt->open_fds + n; 405 max = 0; 406 if (set) { 407 set &= BITS(fds, n); 408 if (set) { 409 if (!(set & ~*open_fds)) 410 goto get_max; 411 return -EBADF; 412 } 413 } 414 while (n) { 415 open_fds--; 416 n--; 417 set = BITS(fds, n); 418 if (!set) 419 continue; 420 if (set & ~*open_fds) 421 return -EBADF; 422 if (max) 423 continue; 424 get_max: 425 do { 426 max++; 427 set >>= 1; 428 } while (set); 429 max += n * BITS_PER_LONG; 430 } 431 432 return max; 433 } 434 435 #define POLLIN_SET (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN | EPOLLHUP | EPOLLERR) 436 #define POLLOUT_SET (EPOLLWRBAND | EPOLLWRNORM | EPOLLOUT | EPOLLERR) 437 #define POLLEX_SET (EPOLLPRI) 438 439 static inline void wait_key_set(poll_table *wait, unsigned long in, 440 unsigned long out, unsigned long bit, 441 __poll_t ll_flag) 442 { 443 wait->_key = POLLEX_SET | ll_flag; 444 if (in & bit) 445 wait->_key |= POLLIN_SET; 446 if (out & bit) 447 wait->_key |= POLLOUT_SET; 448 } 449 450 static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time) 451 { 452 ktime_t expire, *to = NULL; 453 struct poll_wqueues table; 454 poll_table *wait; 455 int retval, i, timed_out = 0; 456 u64 slack = 0; 457 __poll_t busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0; 458 unsigned long busy_start = 0; 459 460 rcu_read_lock(); 461 retval = max_select_fd(n, fds); 462 rcu_read_unlock(); 463 464 if (retval < 0) 465 return retval; 466 n = retval; 467 468 poll_initwait(&table); 469 wait = &table.pt; 470 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { 471 wait->_qproc = NULL; 472 timed_out = 1; 473 } 474 475 if (end_time && !timed_out) 476 slack = select_estimate_accuracy(end_time); 477 478 retval = 0; 479 for (;;) { 480 unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp; 481 bool can_busy_loop = false; 482 483 inp = fds->in; outp = fds->out; exp = fds->ex; 484 rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex; 485 486 for (i = 0; i < n; ++rinp, ++routp, ++rexp) { 487 unsigned long in, out, ex, all_bits, bit = 1, j; 488 unsigned long res_in = 0, res_out = 0, res_ex = 0; 489 __poll_t mask; 490 491 in = *inp++; out = *outp++; ex = *exp++; 492 all_bits = in | out | ex; 493 if (all_bits == 0) { 494 i += BITS_PER_LONG; 495 continue; 496 } 497 498 for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) { 499 struct fd f; 500 if (i >= n) 501 break; 502 if (!(bit & all_bits)) 503 continue; 504 f = fdget(i); 505 if (f.file) { 506 const struct file_operations *f_op; 507 f_op = f.file->f_op; 508 mask = DEFAULT_POLLMASK; 509 if (f_op->poll) { 510 wait_key_set(wait, in, out, 511 bit, busy_flag); 512 mask = (*f_op->poll)(f.file, wait); 513 } 514 fdput(f); 515 if ((mask & POLLIN_SET) && (in & bit)) { 516 res_in |= bit; 517 retval++; 518 wait->_qproc = NULL; 519 } 520 if ((mask & POLLOUT_SET) && (out & bit)) { 521 res_out |= bit; 522 retval++; 523 wait->_qproc = NULL; 524 } 525 if ((mask & POLLEX_SET) && (ex & bit)) { 526 res_ex |= bit; 527 retval++; 528 wait->_qproc = NULL; 529 } 530 /* got something, stop busy polling */ 531 if (retval) { 532 can_busy_loop = false; 533 busy_flag = 0; 534 535 /* 536 * only remember a returned 537 * POLL_BUSY_LOOP if we asked for it 538 */ 539 } else if (busy_flag & mask) 540 can_busy_loop = true; 541 542 } 543 } 544 if (res_in) 545 *rinp = res_in; 546 if (res_out) 547 *routp = res_out; 548 if (res_ex) 549 *rexp = res_ex; 550 cond_resched(); 551 } 552 wait->_qproc = NULL; 553 if (retval || timed_out || signal_pending(current)) 554 break; 555 if (table.error) { 556 retval = table.error; 557 break; 558 } 559 560 /* only if found POLL_BUSY_LOOP sockets && not out of time */ 561 if (can_busy_loop && !need_resched()) { 562 if (!busy_start) { 563 busy_start = busy_loop_current_time(); 564 continue; 565 } 566 if (!busy_loop_timeout(busy_start)) 567 continue; 568 } 569 busy_flag = 0; 570 571 /* 572 * If this is the first loop and we have a timeout 573 * given, then we convert to ktime_t and set the to 574 * pointer to the expiry value. 575 */ 576 if (end_time && !to) { 577 expire = timespec64_to_ktime(*end_time); 578 to = &expire; 579 } 580 581 if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE, 582 to, slack)) 583 timed_out = 1; 584 } 585 586 poll_freewait(&table); 587 588 return retval; 589 } 590 591 /* 592 * We can actually return ERESTARTSYS instead of EINTR, but I'd 593 * like to be certain this leads to no problems. So I return 594 * EINTR just for safety. 595 * 596 * Update: ERESTARTSYS breaks at least the xview clock binary, so 597 * I'm trying ERESTARTNOHAND which restart only when you want to. 598 */ 599 int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, 600 fd_set __user *exp, struct timespec64 *end_time) 601 { 602 fd_set_bits fds; 603 void *bits; 604 int ret, max_fds; 605 size_t size, alloc_size; 606 struct fdtable *fdt; 607 /* Allocate small arguments on the stack to save memory and be faster */ 608 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; 609 610 ret = -EINVAL; 611 if (n < 0) 612 goto out_nofds; 613 614 /* max_fds can increase, so grab it once to avoid race */ 615 rcu_read_lock(); 616 fdt = files_fdtable(current->files); 617 max_fds = fdt->max_fds; 618 rcu_read_unlock(); 619 if (n > max_fds) 620 n = max_fds; 621 622 /* 623 * We need 6 bitmaps (in/out/ex for both incoming and outgoing), 624 * since we used fdset we need to allocate memory in units of 625 * long-words. 626 */ 627 size = FDS_BYTES(n); 628 bits = stack_fds; 629 if (size > sizeof(stack_fds) / 6) { 630 /* Not enough space in on-stack array; must use kmalloc */ 631 ret = -ENOMEM; 632 if (size > (SIZE_MAX / 6)) 633 goto out_nofds; 634 635 alloc_size = 6 * size; 636 bits = kvmalloc(alloc_size, GFP_KERNEL); 637 if (!bits) 638 goto out_nofds; 639 } 640 fds.in = bits; 641 fds.out = bits + size; 642 fds.ex = bits + 2*size; 643 fds.res_in = bits + 3*size; 644 fds.res_out = bits + 4*size; 645 fds.res_ex = bits + 5*size; 646 647 if ((ret = get_fd_set(n, inp, fds.in)) || 648 (ret = get_fd_set(n, outp, fds.out)) || 649 (ret = get_fd_set(n, exp, fds.ex))) 650 goto out; 651 zero_fd_set(n, fds.res_in); 652 zero_fd_set(n, fds.res_out); 653 zero_fd_set(n, fds.res_ex); 654 655 ret = do_select(n, &fds, end_time); 656 657 if (ret < 0) 658 goto out; 659 if (!ret) { 660 ret = -ERESTARTNOHAND; 661 if (signal_pending(current)) 662 goto out; 663 ret = 0; 664 } 665 666 if (set_fd_set(n, inp, fds.res_in) || 667 set_fd_set(n, outp, fds.res_out) || 668 set_fd_set(n, exp, fds.res_ex)) 669 ret = -EFAULT; 670 671 out: 672 if (bits != stack_fds) 673 kvfree(bits); 674 out_nofds: 675 return ret; 676 } 677 678 static int kern_select(int n, fd_set __user *inp, fd_set __user *outp, 679 fd_set __user *exp, struct timeval __user *tvp) 680 { 681 struct timespec64 end_time, *to = NULL; 682 struct timeval tv; 683 int ret; 684 685 if (tvp) { 686 if (copy_from_user(&tv, tvp, sizeof(tv))) 687 return -EFAULT; 688 689 to = &end_time; 690 if (poll_select_set_timeout(to, 691 tv.tv_sec + (tv.tv_usec / USEC_PER_SEC), 692 (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC)) 693 return -EINVAL; 694 } 695 696 ret = core_sys_select(n, inp, outp, exp, to); 697 ret = poll_select_copy_remaining(&end_time, tvp, 1, ret); 698 699 return ret; 700 } 701 702 SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp, 703 fd_set __user *, exp, struct timeval __user *, tvp) 704 { 705 return kern_select(n, inp, outp, exp, tvp); 706 } 707 708 static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp, 709 fd_set __user *exp, struct timespec __user *tsp, 710 const sigset_t __user *sigmask, size_t sigsetsize) 711 { 712 sigset_t ksigmask, sigsaved; 713 struct timespec64 ts, end_time, *to = NULL; 714 int ret; 715 716 if (tsp) { 717 if (get_timespec64(&ts, tsp)) 718 return -EFAULT; 719 720 to = &end_time; 721 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) 722 return -EINVAL; 723 } 724 725 if (sigmask) { 726 /* XXX: Don't preclude handling different sized sigset_t's. */ 727 if (sigsetsize != sizeof(sigset_t)) 728 return -EINVAL; 729 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) 730 return -EFAULT; 731 732 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); 733 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); 734 } 735 736 ret = core_sys_select(n, inp, outp, exp, to); 737 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret); 738 739 if (ret == -ERESTARTNOHAND) { 740 /* 741 * Don't restore the signal mask yet. Let do_signal() deliver 742 * the signal on the way back to userspace, before the signal 743 * mask is restored. 744 */ 745 if (sigmask) { 746 memcpy(¤t->saved_sigmask, &sigsaved, 747 sizeof(sigsaved)); 748 set_restore_sigmask(); 749 } 750 } else if (sigmask) 751 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 752 753 return ret; 754 } 755 756 /* 757 * Most architectures can't handle 7-argument syscalls. So we provide a 758 * 6-argument version where the sixth argument is a pointer to a structure 759 * which has a pointer to the sigset_t itself followed by a size_t containing 760 * the sigset size. 761 */ 762 SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp, 763 fd_set __user *, exp, struct timespec __user *, tsp, 764 void __user *, sig) 765 { 766 size_t sigsetsize = 0; 767 sigset_t __user *up = NULL; 768 769 if (sig) { 770 if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t)) 771 || __get_user(up, (sigset_t __user * __user *)sig) 772 || __get_user(sigsetsize, 773 (size_t __user *)(sig+sizeof(void *)))) 774 return -EFAULT; 775 } 776 777 return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize); 778 } 779 780 #ifdef __ARCH_WANT_SYS_OLD_SELECT 781 struct sel_arg_struct { 782 unsigned long n; 783 fd_set __user *inp, *outp, *exp; 784 struct timeval __user *tvp; 785 }; 786 787 SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg) 788 { 789 struct sel_arg_struct a; 790 791 if (copy_from_user(&a, arg, sizeof(a))) 792 return -EFAULT; 793 return kern_select(a.n, a.inp, a.outp, a.exp, a.tvp); 794 } 795 #endif 796 797 struct poll_list { 798 struct poll_list *next; 799 int len; 800 struct pollfd entries[0]; 801 }; 802 803 #define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd)) 804 805 /* 806 * Fish for pollable events on the pollfd->fd file descriptor. We're only 807 * interested in events matching the pollfd->events mask, and the result 808 * matching that mask is both recorded in pollfd->revents and returned. The 809 * pwait poll_table will be used by the fd-provided poll handler for waiting, 810 * if pwait->_qproc is non-NULL. 811 */ 812 static inline __poll_t do_pollfd(struct pollfd *pollfd, poll_table *pwait, 813 bool *can_busy_poll, 814 __poll_t busy_flag) 815 { 816 __poll_t mask; 817 int fd; 818 819 mask = 0; 820 fd = pollfd->fd; 821 if (fd >= 0) { 822 struct fd f = fdget(fd); 823 mask = EPOLLNVAL; 824 if (f.file) { 825 /* userland u16 ->events contains POLL... bitmap */ 826 __poll_t filter = demangle_poll(pollfd->events) | 827 EPOLLERR | EPOLLHUP; 828 mask = DEFAULT_POLLMASK; 829 if (f.file->f_op->poll) { 830 pwait->_key = filter; 831 pwait->_key |= busy_flag; 832 mask = f.file->f_op->poll(f.file, pwait); 833 if (mask & busy_flag) 834 *can_busy_poll = true; 835 } 836 /* Mask out unneeded events. */ 837 mask &= filter; 838 fdput(f); 839 } 840 } 841 /* ... and so does ->revents */ 842 pollfd->revents = mangle_poll(mask); 843 844 return mask; 845 } 846 847 static int do_poll(struct poll_list *list, struct poll_wqueues *wait, 848 struct timespec64 *end_time) 849 { 850 poll_table* pt = &wait->pt; 851 ktime_t expire, *to = NULL; 852 int timed_out = 0, count = 0; 853 u64 slack = 0; 854 __poll_t busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0; 855 unsigned long busy_start = 0; 856 857 /* Optimise the no-wait case */ 858 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { 859 pt->_qproc = NULL; 860 timed_out = 1; 861 } 862 863 if (end_time && !timed_out) 864 slack = select_estimate_accuracy(end_time); 865 866 for (;;) { 867 struct poll_list *walk; 868 bool can_busy_loop = false; 869 870 for (walk = list; walk != NULL; walk = walk->next) { 871 struct pollfd * pfd, * pfd_end; 872 873 pfd = walk->entries; 874 pfd_end = pfd + walk->len; 875 for (; pfd != pfd_end; pfd++) { 876 /* 877 * Fish for events. If we found one, record it 878 * and kill poll_table->_qproc, so we don't 879 * needlessly register any other waiters after 880 * this. They'll get immediately deregistered 881 * when we break out and return. 882 */ 883 if (do_pollfd(pfd, pt, &can_busy_loop, 884 busy_flag)) { 885 count++; 886 pt->_qproc = NULL; 887 /* found something, stop busy polling */ 888 busy_flag = 0; 889 can_busy_loop = false; 890 } 891 } 892 } 893 /* 894 * All waiters have already been registered, so don't provide 895 * a poll_table->_qproc to them on the next loop iteration. 896 */ 897 pt->_qproc = NULL; 898 if (!count) { 899 count = wait->error; 900 if (signal_pending(current)) 901 count = -EINTR; 902 } 903 if (count || timed_out) 904 break; 905 906 /* only if found POLL_BUSY_LOOP sockets && not out of time */ 907 if (can_busy_loop && !need_resched()) { 908 if (!busy_start) { 909 busy_start = busy_loop_current_time(); 910 continue; 911 } 912 if (!busy_loop_timeout(busy_start)) 913 continue; 914 } 915 busy_flag = 0; 916 917 /* 918 * If this is the first loop and we have a timeout 919 * given, then we convert to ktime_t and set the to 920 * pointer to the expiry value. 921 */ 922 if (end_time && !to) { 923 expire = timespec64_to_ktime(*end_time); 924 to = &expire; 925 } 926 927 if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack)) 928 timed_out = 1; 929 } 930 return count; 931 } 932 933 #define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \ 934 sizeof(struct pollfd)) 935 936 static int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, 937 struct timespec64 *end_time) 938 { 939 struct poll_wqueues table; 940 int err = -EFAULT, fdcount, len, size; 941 /* Allocate small arguments on the stack to save memory and be 942 faster - use long to make sure the buffer is aligned properly 943 on 64 bit archs to avoid unaligned access */ 944 long stack_pps[POLL_STACK_ALLOC/sizeof(long)]; 945 struct poll_list *const head = (struct poll_list *)stack_pps; 946 struct poll_list *walk = head; 947 unsigned long todo = nfds; 948 949 if (nfds > rlimit(RLIMIT_NOFILE)) 950 return -EINVAL; 951 952 len = min_t(unsigned int, nfds, N_STACK_PPS); 953 for (;;) { 954 walk->next = NULL; 955 walk->len = len; 956 if (!len) 957 break; 958 959 if (copy_from_user(walk->entries, ufds + nfds-todo, 960 sizeof(struct pollfd) * walk->len)) 961 goto out_fds; 962 963 todo -= walk->len; 964 if (!todo) 965 break; 966 967 len = min(todo, POLLFD_PER_PAGE); 968 size = sizeof(struct poll_list) + sizeof(struct pollfd) * len; 969 walk = walk->next = kmalloc(size, GFP_KERNEL); 970 if (!walk) { 971 err = -ENOMEM; 972 goto out_fds; 973 } 974 } 975 976 poll_initwait(&table); 977 fdcount = do_poll(head, &table, end_time); 978 poll_freewait(&table); 979 980 for (walk = head; walk; walk = walk->next) { 981 struct pollfd *fds = walk->entries; 982 int j; 983 984 for (j = 0; j < walk->len; j++, ufds++) 985 if (__put_user(fds[j].revents, &ufds->revents)) 986 goto out_fds; 987 } 988 989 err = fdcount; 990 out_fds: 991 walk = head->next; 992 while (walk) { 993 struct poll_list *pos = walk; 994 walk = walk->next; 995 kfree(pos); 996 } 997 998 return err; 999 } 1000 1001 static long do_restart_poll(struct restart_block *restart_block) 1002 { 1003 struct pollfd __user *ufds = restart_block->poll.ufds; 1004 int nfds = restart_block->poll.nfds; 1005 struct timespec64 *to = NULL, end_time; 1006 int ret; 1007 1008 if (restart_block->poll.has_timeout) { 1009 end_time.tv_sec = restart_block->poll.tv_sec; 1010 end_time.tv_nsec = restart_block->poll.tv_nsec; 1011 to = &end_time; 1012 } 1013 1014 ret = do_sys_poll(ufds, nfds, to); 1015 1016 if (ret == -EINTR) { 1017 restart_block->fn = do_restart_poll; 1018 ret = -ERESTART_RESTARTBLOCK; 1019 } 1020 return ret; 1021 } 1022 1023 SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds, 1024 int, timeout_msecs) 1025 { 1026 struct timespec64 end_time, *to = NULL; 1027 int ret; 1028 1029 if (timeout_msecs >= 0) { 1030 to = &end_time; 1031 poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC, 1032 NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC)); 1033 } 1034 1035 ret = do_sys_poll(ufds, nfds, to); 1036 1037 if (ret == -EINTR) { 1038 struct restart_block *restart_block; 1039 1040 restart_block = ¤t->restart_block; 1041 restart_block->fn = do_restart_poll; 1042 restart_block->poll.ufds = ufds; 1043 restart_block->poll.nfds = nfds; 1044 1045 if (timeout_msecs >= 0) { 1046 restart_block->poll.tv_sec = end_time.tv_sec; 1047 restart_block->poll.tv_nsec = end_time.tv_nsec; 1048 restart_block->poll.has_timeout = 1; 1049 } else 1050 restart_block->poll.has_timeout = 0; 1051 1052 ret = -ERESTART_RESTARTBLOCK; 1053 } 1054 return ret; 1055 } 1056 1057 SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds, 1058 struct timespec __user *, tsp, const sigset_t __user *, sigmask, 1059 size_t, sigsetsize) 1060 { 1061 sigset_t ksigmask, sigsaved; 1062 struct timespec64 ts, end_time, *to = NULL; 1063 int ret; 1064 1065 if (tsp) { 1066 if (get_timespec64(&ts, tsp)) 1067 return -EFAULT; 1068 1069 to = &end_time; 1070 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) 1071 return -EINVAL; 1072 } 1073 1074 if (sigmask) { 1075 /* XXX: Don't preclude handling different sized sigset_t's. */ 1076 if (sigsetsize != sizeof(sigset_t)) 1077 return -EINVAL; 1078 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) 1079 return -EFAULT; 1080 1081 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1082 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); 1083 } 1084 1085 ret = do_sys_poll(ufds, nfds, to); 1086 1087 /* We can restart this syscall, usually */ 1088 if (ret == -EINTR) { 1089 /* 1090 * Don't restore the signal mask yet. Let do_signal() deliver 1091 * the signal on the way back to userspace, before the signal 1092 * mask is restored. 1093 */ 1094 if (sigmask) { 1095 memcpy(¤t->saved_sigmask, &sigsaved, 1096 sizeof(sigsaved)); 1097 set_restore_sigmask(); 1098 } 1099 ret = -ERESTARTNOHAND; 1100 } else if (sigmask) 1101 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1102 1103 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret); 1104 1105 return ret; 1106 } 1107 1108 #ifdef CONFIG_COMPAT 1109 #define __COMPAT_NFDBITS (8 * sizeof(compat_ulong_t)) 1110 1111 static 1112 int compat_poll_select_copy_remaining(struct timespec64 *end_time, void __user *p, 1113 int timeval, int ret) 1114 { 1115 struct timespec64 ts; 1116 1117 if (!p) 1118 return ret; 1119 1120 if (current->personality & STICKY_TIMEOUTS) 1121 goto sticky; 1122 1123 /* No update for zero timeout */ 1124 if (!end_time->tv_sec && !end_time->tv_nsec) 1125 return ret; 1126 1127 ktime_get_ts64(&ts); 1128 ts = timespec64_sub(*end_time, ts); 1129 if (ts.tv_sec < 0) 1130 ts.tv_sec = ts.tv_nsec = 0; 1131 1132 if (timeval) { 1133 struct compat_timeval rtv; 1134 1135 rtv.tv_sec = ts.tv_sec; 1136 rtv.tv_usec = ts.tv_nsec / NSEC_PER_USEC; 1137 1138 if (!copy_to_user(p, &rtv, sizeof(rtv))) 1139 return ret; 1140 } else { 1141 if (!compat_put_timespec64(&ts, p)) 1142 return ret; 1143 } 1144 /* 1145 * If an application puts its timeval in read-only memory, we 1146 * don't want the Linux-specific update to the timeval to 1147 * cause a fault after the select has completed 1148 * successfully. However, because we're not updating the 1149 * timeval, we can't restart the system call. 1150 */ 1151 1152 sticky: 1153 if (ret == -ERESTARTNOHAND) 1154 ret = -EINTR; 1155 return ret; 1156 } 1157 1158 /* 1159 * Ooo, nasty. We need here to frob 32-bit unsigned longs to 1160 * 64-bit unsigned longs. 1161 */ 1162 static 1163 int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, 1164 unsigned long *fdset) 1165 { 1166 if (ufdset) { 1167 return compat_get_bitmap(fdset, ufdset, nr); 1168 } else { 1169 zero_fd_set(nr, fdset); 1170 return 0; 1171 } 1172 } 1173 1174 static 1175 int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, 1176 unsigned long *fdset) 1177 { 1178 if (!ufdset) 1179 return 0; 1180 return compat_put_bitmap(ufdset, fdset, nr); 1181 } 1182 1183 1184 /* 1185 * This is a virtual copy of sys_select from fs/select.c and probably 1186 * should be compared to it from time to time 1187 */ 1188 1189 /* 1190 * We can actually return ERESTARTSYS instead of EINTR, but I'd 1191 * like to be certain this leads to no problems. So I return 1192 * EINTR just for safety. 1193 * 1194 * Update: ERESTARTSYS breaks at least the xview clock binary, so 1195 * I'm trying ERESTARTNOHAND which restart only when you want to. 1196 */ 1197 static int compat_core_sys_select(int n, compat_ulong_t __user *inp, 1198 compat_ulong_t __user *outp, compat_ulong_t __user *exp, 1199 struct timespec64 *end_time) 1200 { 1201 fd_set_bits fds; 1202 void *bits; 1203 int size, max_fds, ret = -EINVAL; 1204 struct fdtable *fdt; 1205 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; 1206 1207 if (n < 0) 1208 goto out_nofds; 1209 1210 /* max_fds can increase, so grab it once to avoid race */ 1211 rcu_read_lock(); 1212 fdt = files_fdtable(current->files); 1213 max_fds = fdt->max_fds; 1214 rcu_read_unlock(); 1215 if (n > max_fds) 1216 n = max_fds; 1217 1218 /* 1219 * We need 6 bitmaps (in/out/ex for both incoming and outgoing), 1220 * since we used fdset we need to allocate memory in units of 1221 * long-words. 1222 */ 1223 size = FDS_BYTES(n); 1224 bits = stack_fds; 1225 if (size > sizeof(stack_fds) / 6) { 1226 bits = kmalloc(6 * size, GFP_KERNEL); 1227 ret = -ENOMEM; 1228 if (!bits) 1229 goto out_nofds; 1230 } 1231 fds.in = (unsigned long *) bits; 1232 fds.out = (unsigned long *) (bits + size); 1233 fds.ex = (unsigned long *) (bits + 2*size); 1234 fds.res_in = (unsigned long *) (bits + 3*size); 1235 fds.res_out = (unsigned long *) (bits + 4*size); 1236 fds.res_ex = (unsigned long *) (bits + 5*size); 1237 1238 if ((ret = compat_get_fd_set(n, inp, fds.in)) || 1239 (ret = compat_get_fd_set(n, outp, fds.out)) || 1240 (ret = compat_get_fd_set(n, exp, fds.ex))) 1241 goto out; 1242 zero_fd_set(n, fds.res_in); 1243 zero_fd_set(n, fds.res_out); 1244 zero_fd_set(n, fds.res_ex); 1245 1246 ret = do_select(n, &fds, end_time); 1247 1248 if (ret < 0) 1249 goto out; 1250 if (!ret) { 1251 ret = -ERESTARTNOHAND; 1252 if (signal_pending(current)) 1253 goto out; 1254 ret = 0; 1255 } 1256 1257 if (compat_set_fd_set(n, inp, fds.res_in) || 1258 compat_set_fd_set(n, outp, fds.res_out) || 1259 compat_set_fd_set(n, exp, fds.res_ex)) 1260 ret = -EFAULT; 1261 out: 1262 if (bits != stack_fds) 1263 kfree(bits); 1264 out_nofds: 1265 return ret; 1266 } 1267 1268 static int do_compat_select(int n, compat_ulong_t __user *inp, 1269 compat_ulong_t __user *outp, compat_ulong_t __user *exp, 1270 struct compat_timeval __user *tvp) 1271 { 1272 struct timespec64 end_time, *to = NULL; 1273 struct compat_timeval tv; 1274 int ret; 1275 1276 if (tvp) { 1277 if (copy_from_user(&tv, tvp, sizeof(tv))) 1278 return -EFAULT; 1279 1280 to = &end_time; 1281 if (poll_select_set_timeout(to, 1282 tv.tv_sec + (tv.tv_usec / USEC_PER_SEC), 1283 (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC)) 1284 return -EINVAL; 1285 } 1286 1287 ret = compat_core_sys_select(n, inp, outp, exp, to); 1288 ret = compat_poll_select_copy_remaining(&end_time, tvp, 1, ret); 1289 1290 return ret; 1291 } 1292 1293 COMPAT_SYSCALL_DEFINE5(select, int, n, compat_ulong_t __user *, inp, 1294 compat_ulong_t __user *, outp, compat_ulong_t __user *, exp, 1295 struct compat_timeval __user *, tvp) 1296 { 1297 return do_compat_select(n, inp, outp, exp, tvp); 1298 } 1299 1300 struct compat_sel_arg_struct { 1301 compat_ulong_t n; 1302 compat_uptr_t inp; 1303 compat_uptr_t outp; 1304 compat_uptr_t exp; 1305 compat_uptr_t tvp; 1306 }; 1307 1308 COMPAT_SYSCALL_DEFINE1(old_select, struct compat_sel_arg_struct __user *, arg) 1309 { 1310 struct compat_sel_arg_struct a; 1311 1312 if (copy_from_user(&a, arg, sizeof(a))) 1313 return -EFAULT; 1314 return do_compat_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp), 1315 compat_ptr(a.exp), compat_ptr(a.tvp)); 1316 } 1317 1318 static long do_compat_pselect(int n, compat_ulong_t __user *inp, 1319 compat_ulong_t __user *outp, compat_ulong_t __user *exp, 1320 struct compat_timespec __user *tsp, compat_sigset_t __user *sigmask, 1321 compat_size_t sigsetsize) 1322 { 1323 sigset_t ksigmask, sigsaved; 1324 struct timespec64 ts, end_time, *to = NULL; 1325 int ret; 1326 1327 if (tsp) { 1328 if (compat_get_timespec64(&ts, tsp)) 1329 return -EFAULT; 1330 1331 to = &end_time; 1332 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) 1333 return -EINVAL; 1334 } 1335 1336 if (sigmask) { 1337 if (sigsetsize != sizeof(compat_sigset_t)) 1338 return -EINVAL; 1339 if (get_compat_sigset(&ksigmask, sigmask)) 1340 return -EFAULT; 1341 1342 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1343 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); 1344 } 1345 1346 ret = compat_core_sys_select(n, inp, outp, exp, to); 1347 ret = compat_poll_select_copy_remaining(&end_time, tsp, 0, ret); 1348 1349 if (ret == -ERESTARTNOHAND) { 1350 /* 1351 * Don't restore the signal mask yet. Let do_signal() deliver 1352 * the signal on the way back to userspace, before the signal 1353 * mask is restored. 1354 */ 1355 if (sigmask) { 1356 memcpy(¤t->saved_sigmask, &sigsaved, 1357 sizeof(sigsaved)); 1358 set_restore_sigmask(); 1359 } 1360 } else if (sigmask) 1361 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1362 1363 return ret; 1364 } 1365 1366 COMPAT_SYSCALL_DEFINE6(pselect6, int, n, compat_ulong_t __user *, inp, 1367 compat_ulong_t __user *, outp, compat_ulong_t __user *, exp, 1368 struct compat_timespec __user *, tsp, void __user *, sig) 1369 { 1370 compat_size_t sigsetsize = 0; 1371 compat_uptr_t up = 0; 1372 1373 if (sig) { 1374 if (!access_ok(VERIFY_READ, sig, 1375 sizeof(compat_uptr_t)+sizeof(compat_size_t)) || 1376 __get_user(up, (compat_uptr_t __user *)sig) || 1377 __get_user(sigsetsize, 1378 (compat_size_t __user *)(sig+sizeof(up)))) 1379 return -EFAULT; 1380 } 1381 return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(up), 1382 sigsetsize); 1383 } 1384 1385 COMPAT_SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, 1386 unsigned int, nfds, struct compat_timespec __user *, tsp, 1387 const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize) 1388 { 1389 sigset_t ksigmask, sigsaved; 1390 struct timespec64 ts, end_time, *to = NULL; 1391 int ret; 1392 1393 if (tsp) { 1394 if (compat_get_timespec64(&ts, tsp)) 1395 return -EFAULT; 1396 1397 to = &end_time; 1398 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) 1399 return -EINVAL; 1400 } 1401 1402 if (sigmask) { 1403 if (sigsetsize != sizeof(compat_sigset_t)) 1404 return -EINVAL; 1405 if (get_compat_sigset(&ksigmask, sigmask)) 1406 return -EFAULT; 1407 1408 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1409 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); 1410 } 1411 1412 ret = do_sys_poll(ufds, nfds, to); 1413 1414 /* We can restart this syscall, usually */ 1415 if (ret == -EINTR) { 1416 /* 1417 * Don't restore the signal mask yet. Let do_signal() deliver 1418 * the signal on the way back to userspace, before the signal 1419 * mask is restored. 1420 */ 1421 if (sigmask) { 1422 memcpy(¤t->saved_sigmask, &sigsaved, 1423 sizeof(sigsaved)); 1424 set_restore_sigmask(); 1425 } 1426 ret = -ERESTARTNOHAND; 1427 } else if (sigmask) 1428 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1429 1430 ret = compat_poll_select_copy_remaining(&end_time, tsp, 0, ret); 1431 1432 return ret; 1433 } 1434 #endif 1435