1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/file.c 4 * 5 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes 6 * 7 * Manage the dynamic fd arrays in the process files_struct. 8 */ 9 10 #include <linux/syscalls.h> 11 #include <linux/export.h> 12 #include <linux/fs.h> 13 #include <linux/kernel.h> 14 #include <linux/mm.h> 15 #include <linux/sched/signal.h> 16 #include <linux/slab.h> 17 #include <linux/file.h> 18 #include <linux/fdtable.h> 19 #include <linux/bitops.h> 20 #include <linux/spinlock.h> 21 #include <linux/rcupdate.h> 22 #include <linux/close_range.h> 23 #include <net/sock.h> 24 #include <linux/io_uring.h> 25 26 unsigned int sysctl_nr_open __read_mostly = 1024*1024; 27 unsigned int sysctl_nr_open_min = BITS_PER_LONG; 28 /* our min() is unusable in constant expressions ;-/ */ 29 #define __const_min(x, y) ((x) < (y) ? (x) : (y)) 30 unsigned int sysctl_nr_open_max = 31 __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG; 32 33 static void __free_fdtable(struct fdtable *fdt) 34 { 35 kvfree(fdt->fd); 36 kvfree(fdt->open_fds); 37 kfree(fdt); 38 } 39 40 static void free_fdtable_rcu(struct rcu_head *rcu) 41 { 42 __free_fdtable(container_of(rcu, struct fdtable, rcu)); 43 } 44 45 #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr)) 46 #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long)) 47 48 /* 49 * Copy 'count' fd bits from the old table to the new table and clear the extra 50 * space if any. This does not copy the file pointers. Called with the files 51 * spinlock held for write. 52 */ 53 static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt, 54 unsigned int count) 55 { 56 unsigned int cpy, set; 57 58 cpy = count / BITS_PER_BYTE; 59 set = (nfdt->max_fds - count) / BITS_PER_BYTE; 60 memcpy(nfdt->open_fds, ofdt->open_fds, cpy); 61 memset((char *)nfdt->open_fds + cpy, 0, set); 62 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy); 63 memset((char *)nfdt->close_on_exec + cpy, 0, set); 64 65 cpy = BITBIT_SIZE(count); 66 set = BITBIT_SIZE(nfdt->max_fds) - cpy; 67 memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy); 68 memset((char *)nfdt->full_fds_bits + cpy, 0, set); 69 } 70 71 /* 72 * Copy all file descriptors from the old table to the new, expanded table and 73 * clear the extra space. Called with the files spinlock held for write. 74 */ 75 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) 76 { 77 size_t cpy, set; 78 79 BUG_ON(nfdt->max_fds < ofdt->max_fds); 80 81 cpy = ofdt->max_fds * sizeof(struct file *); 82 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *); 83 memcpy(nfdt->fd, ofdt->fd, cpy); 84 memset((char *)nfdt->fd + cpy, 0, set); 85 86 copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds); 87 } 88 89 static struct fdtable * alloc_fdtable(unsigned int nr) 90 { 91 struct fdtable *fdt; 92 void *data; 93 94 /* 95 * Figure out how many fds we actually want to support in this fdtable. 96 * Allocation steps are keyed to the size of the fdarray, since it 97 * grows far faster than any of the other dynamic data. We try to fit 98 * the fdarray into comfortable page-tuned chunks: starting at 1024B 99 * and growing in powers of two from there on. 100 */ 101 nr /= (1024 / sizeof(struct file *)); 102 nr = roundup_pow_of_two(nr + 1); 103 nr *= (1024 / sizeof(struct file *)); 104 /* 105 * Note that this can drive nr *below* what we had passed if sysctl_nr_open 106 * had been set lower between the check in expand_files() and here. Deal 107 * with that in caller, it's cheaper that way. 108 * 109 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise 110 * bitmaps handling below becomes unpleasant, to put it mildly... 111 */ 112 if (unlikely(nr > sysctl_nr_open)) 113 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1; 114 115 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT); 116 if (!fdt) 117 goto out; 118 fdt->max_fds = nr; 119 data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT); 120 if (!data) 121 goto out_fdt; 122 fdt->fd = data; 123 124 data = kvmalloc(max_t(size_t, 125 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES), 126 GFP_KERNEL_ACCOUNT); 127 if (!data) 128 goto out_arr; 129 fdt->open_fds = data; 130 data += nr / BITS_PER_BYTE; 131 fdt->close_on_exec = data; 132 data += nr / BITS_PER_BYTE; 133 fdt->full_fds_bits = data; 134 135 return fdt; 136 137 out_arr: 138 kvfree(fdt->fd); 139 out_fdt: 140 kfree(fdt); 141 out: 142 return NULL; 143 } 144 145 /* 146 * Expand the file descriptor table. 147 * This function will allocate a new fdtable and both fd array and fdset, of 148 * the given size. 149 * Return <0 error code on error; 1 on successful completion. 150 * The files->file_lock should be held on entry, and will be held on exit. 151 */ 152 static int expand_fdtable(struct files_struct *files, unsigned int nr) 153 __releases(files->file_lock) 154 __acquires(files->file_lock) 155 { 156 struct fdtable *new_fdt, *cur_fdt; 157 158 spin_unlock(&files->file_lock); 159 new_fdt = alloc_fdtable(nr); 160 161 /* make sure all fd_install() have seen resize_in_progress 162 * or have finished their rcu_read_lock_sched() section. 163 */ 164 if (atomic_read(&files->count) > 1) 165 synchronize_rcu(); 166 167 spin_lock(&files->file_lock); 168 if (!new_fdt) 169 return -ENOMEM; 170 /* 171 * extremely unlikely race - sysctl_nr_open decreased between the check in 172 * caller and alloc_fdtable(). Cheaper to catch it here... 173 */ 174 if (unlikely(new_fdt->max_fds <= nr)) { 175 __free_fdtable(new_fdt); 176 return -EMFILE; 177 } 178 cur_fdt = files_fdtable(files); 179 BUG_ON(nr < cur_fdt->max_fds); 180 copy_fdtable(new_fdt, cur_fdt); 181 rcu_assign_pointer(files->fdt, new_fdt); 182 if (cur_fdt != &files->fdtab) 183 call_rcu(&cur_fdt->rcu, free_fdtable_rcu); 184 /* coupled with smp_rmb() in fd_install() */ 185 smp_wmb(); 186 return 1; 187 } 188 189 /* 190 * Expand files. 191 * This function will expand the file structures, if the requested size exceeds 192 * the current capacity and there is room for expansion. 193 * Return <0 error code on error; 0 when nothing done; 1 when files were 194 * expanded and execution may have blocked. 195 * The files->file_lock should be held on entry, and will be held on exit. 196 */ 197 static int expand_files(struct files_struct *files, unsigned int nr) 198 __releases(files->file_lock) 199 __acquires(files->file_lock) 200 { 201 struct fdtable *fdt; 202 int expanded = 0; 203 204 repeat: 205 fdt = files_fdtable(files); 206 207 /* Do we need to expand? */ 208 if (nr < fdt->max_fds) 209 return expanded; 210 211 /* Can we expand? */ 212 if (nr >= sysctl_nr_open) 213 return -EMFILE; 214 215 if (unlikely(files->resize_in_progress)) { 216 spin_unlock(&files->file_lock); 217 expanded = 1; 218 wait_event(files->resize_wait, !files->resize_in_progress); 219 spin_lock(&files->file_lock); 220 goto repeat; 221 } 222 223 /* All good, so we try */ 224 files->resize_in_progress = true; 225 expanded = expand_fdtable(files, nr); 226 files->resize_in_progress = false; 227 228 wake_up_all(&files->resize_wait); 229 return expanded; 230 } 231 232 static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt) 233 { 234 __set_bit(fd, fdt->close_on_exec); 235 } 236 237 static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt) 238 { 239 if (test_bit(fd, fdt->close_on_exec)) 240 __clear_bit(fd, fdt->close_on_exec); 241 } 242 243 static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt) 244 { 245 __set_bit(fd, fdt->open_fds); 246 fd /= BITS_PER_LONG; 247 if (!~fdt->open_fds[fd]) 248 __set_bit(fd, fdt->full_fds_bits); 249 } 250 251 static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt) 252 { 253 __clear_bit(fd, fdt->open_fds); 254 __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits); 255 } 256 257 static unsigned int count_open_files(struct fdtable *fdt) 258 { 259 unsigned int size = fdt->max_fds; 260 unsigned int i; 261 262 /* Find the last open fd */ 263 for (i = size / BITS_PER_LONG; i > 0; ) { 264 if (fdt->open_fds[--i]) 265 break; 266 } 267 i = (i + 1) * BITS_PER_LONG; 268 return i; 269 } 270 271 static unsigned int sane_fdtable_size(struct fdtable *fdt, unsigned int max_fds) 272 { 273 unsigned int count; 274 275 count = count_open_files(fdt); 276 if (max_fds < NR_OPEN_DEFAULT) 277 max_fds = NR_OPEN_DEFAULT; 278 return min(count, max_fds); 279 } 280 281 /* 282 * Allocate a new files structure and copy contents from the 283 * passed in files structure. 284 * errorp will be valid only when the returned files_struct is NULL. 285 */ 286 struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int *errorp) 287 { 288 struct files_struct *newf; 289 struct file **old_fds, **new_fds; 290 unsigned int open_files, i; 291 struct fdtable *old_fdt, *new_fdt; 292 293 *errorp = -ENOMEM; 294 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL); 295 if (!newf) 296 goto out; 297 298 atomic_set(&newf->count, 1); 299 300 spin_lock_init(&newf->file_lock); 301 newf->resize_in_progress = false; 302 init_waitqueue_head(&newf->resize_wait); 303 newf->next_fd = 0; 304 new_fdt = &newf->fdtab; 305 new_fdt->max_fds = NR_OPEN_DEFAULT; 306 new_fdt->close_on_exec = newf->close_on_exec_init; 307 new_fdt->open_fds = newf->open_fds_init; 308 new_fdt->full_fds_bits = newf->full_fds_bits_init; 309 new_fdt->fd = &newf->fd_array[0]; 310 311 spin_lock(&oldf->file_lock); 312 old_fdt = files_fdtable(oldf); 313 open_files = sane_fdtable_size(old_fdt, max_fds); 314 315 /* 316 * Check whether we need to allocate a larger fd array and fd set. 317 */ 318 while (unlikely(open_files > new_fdt->max_fds)) { 319 spin_unlock(&oldf->file_lock); 320 321 if (new_fdt != &newf->fdtab) 322 __free_fdtable(new_fdt); 323 324 new_fdt = alloc_fdtable(open_files - 1); 325 if (!new_fdt) { 326 *errorp = -ENOMEM; 327 goto out_release; 328 } 329 330 /* beyond sysctl_nr_open; nothing to do */ 331 if (unlikely(new_fdt->max_fds < open_files)) { 332 __free_fdtable(new_fdt); 333 *errorp = -EMFILE; 334 goto out_release; 335 } 336 337 /* 338 * Reacquire the oldf lock and a pointer to its fd table 339 * who knows it may have a new bigger fd table. We need 340 * the latest pointer. 341 */ 342 spin_lock(&oldf->file_lock); 343 old_fdt = files_fdtable(oldf); 344 open_files = sane_fdtable_size(old_fdt, max_fds); 345 } 346 347 copy_fd_bitmaps(new_fdt, old_fdt, open_files); 348 349 old_fds = old_fdt->fd; 350 new_fds = new_fdt->fd; 351 352 for (i = open_files; i != 0; i--) { 353 struct file *f = *old_fds++; 354 if (f) { 355 get_file(f); 356 } else { 357 /* 358 * The fd may be claimed in the fd bitmap but not yet 359 * instantiated in the files array if a sibling thread 360 * is partway through open(). So make sure that this 361 * fd is available to the new process. 362 */ 363 __clear_open_fd(open_files - i, new_fdt); 364 } 365 rcu_assign_pointer(*new_fds++, f); 366 } 367 spin_unlock(&oldf->file_lock); 368 369 /* clear the remainder */ 370 memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *)); 371 372 rcu_assign_pointer(newf->fdt, new_fdt); 373 374 return newf; 375 376 out_release: 377 kmem_cache_free(files_cachep, newf); 378 out: 379 return NULL; 380 } 381 382 static struct fdtable *close_files(struct files_struct * files) 383 { 384 /* 385 * It is safe to dereference the fd table without RCU or 386 * ->file_lock because this is the last reference to the 387 * files structure. 388 */ 389 struct fdtable *fdt = rcu_dereference_raw(files->fdt); 390 unsigned int i, j = 0; 391 392 for (;;) { 393 unsigned long set; 394 i = j * BITS_PER_LONG; 395 if (i >= fdt->max_fds) 396 break; 397 set = fdt->open_fds[j++]; 398 while (set) { 399 if (set & 1) { 400 struct file * file = xchg(&fdt->fd[i], NULL); 401 if (file) { 402 filp_close(file, files); 403 cond_resched(); 404 } 405 } 406 i++; 407 set >>= 1; 408 } 409 } 410 411 return fdt; 412 } 413 414 void put_files_struct(struct files_struct *files) 415 { 416 if (atomic_dec_and_test(&files->count)) { 417 struct fdtable *fdt = close_files(files); 418 419 /* free the arrays if they are not embedded */ 420 if (fdt != &files->fdtab) 421 __free_fdtable(fdt); 422 kmem_cache_free(files_cachep, files); 423 } 424 } 425 426 void exit_files(struct task_struct *tsk) 427 { 428 struct files_struct * files = tsk->files; 429 430 if (files) { 431 io_uring_files_cancel(files); 432 task_lock(tsk); 433 tsk->files = NULL; 434 task_unlock(tsk); 435 put_files_struct(files); 436 } 437 } 438 439 struct files_struct init_files = { 440 .count = ATOMIC_INIT(1), 441 .fdt = &init_files.fdtab, 442 .fdtab = { 443 .max_fds = NR_OPEN_DEFAULT, 444 .fd = &init_files.fd_array[0], 445 .close_on_exec = init_files.close_on_exec_init, 446 .open_fds = init_files.open_fds_init, 447 .full_fds_bits = init_files.full_fds_bits_init, 448 }, 449 .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock), 450 .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait), 451 }; 452 453 static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start) 454 { 455 unsigned int maxfd = fdt->max_fds; 456 unsigned int maxbit = maxfd / BITS_PER_LONG; 457 unsigned int bitbit = start / BITS_PER_LONG; 458 459 bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG; 460 if (bitbit > maxfd) 461 return maxfd; 462 if (bitbit > start) 463 start = bitbit; 464 return find_next_zero_bit(fdt->open_fds, maxfd, start); 465 } 466 467 /* 468 * allocate a file descriptor, mark it busy. 469 */ 470 static int alloc_fd(unsigned start, unsigned end, unsigned flags) 471 { 472 struct files_struct *files = current->files; 473 unsigned int fd; 474 int error; 475 struct fdtable *fdt; 476 477 spin_lock(&files->file_lock); 478 repeat: 479 fdt = files_fdtable(files); 480 fd = start; 481 if (fd < files->next_fd) 482 fd = files->next_fd; 483 484 if (fd < fdt->max_fds) 485 fd = find_next_fd(fdt, fd); 486 487 /* 488 * N.B. For clone tasks sharing a files structure, this test 489 * will limit the total number of files that can be opened. 490 */ 491 error = -EMFILE; 492 if (fd >= end) 493 goto out; 494 495 error = expand_files(files, fd); 496 if (error < 0) 497 goto out; 498 499 /* 500 * If we needed to expand the fs array we 501 * might have blocked - try again. 502 */ 503 if (error) 504 goto repeat; 505 506 if (start <= files->next_fd) 507 files->next_fd = fd + 1; 508 509 __set_open_fd(fd, fdt); 510 if (flags & O_CLOEXEC) 511 __set_close_on_exec(fd, fdt); 512 else 513 __clear_close_on_exec(fd, fdt); 514 error = fd; 515 #if 1 516 /* Sanity check */ 517 if (rcu_access_pointer(fdt->fd[fd]) != NULL) { 518 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd); 519 rcu_assign_pointer(fdt->fd[fd], NULL); 520 } 521 #endif 522 523 out: 524 spin_unlock(&files->file_lock); 525 return error; 526 } 527 528 int __get_unused_fd_flags(unsigned flags, unsigned long nofile) 529 { 530 return alloc_fd(0, nofile, flags); 531 } 532 533 int get_unused_fd_flags(unsigned flags) 534 { 535 return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE)); 536 } 537 EXPORT_SYMBOL(get_unused_fd_flags); 538 539 static void __put_unused_fd(struct files_struct *files, unsigned int fd) 540 { 541 struct fdtable *fdt = files_fdtable(files); 542 __clear_open_fd(fd, fdt); 543 if (fd < files->next_fd) 544 files->next_fd = fd; 545 } 546 547 void put_unused_fd(unsigned int fd) 548 { 549 struct files_struct *files = current->files; 550 spin_lock(&files->file_lock); 551 __put_unused_fd(files, fd); 552 spin_unlock(&files->file_lock); 553 } 554 555 EXPORT_SYMBOL(put_unused_fd); 556 557 /* 558 * Install a file pointer in the fd array. 559 * 560 * The VFS is full of places where we drop the files lock between 561 * setting the open_fds bitmap and installing the file in the file 562 * array. At any such point, we are vulnerable to a dup2() race 563 * installing a file in the array before us. We need to detect this and 564 * fput() the struct file we are about to overwrite in this case. 565 * 566 * It should never happen - if we allow dup2() do it, _really_ bad things 567 * will follow. 568 * 569 * This consumes the "file" refcount, so callers should treat it 570 * as if they had called fput(file). 571 */ 572 573 void fd_install(unsigned int fd, struct file *file) 574 { 575 struct files_struct *files = current->files; 576 struct fdtable *fdt; 577 578 rcu_read_lock_sched(); 579 580 if (unlikely(files->resize_in_progress)) { 581 rcu_read_unlock_sched(); 582 spin_lock(&files->file_lock); 583 fdt = files_fdtable(files); 584 BUG_ON(fdt->fd[fd] != NULL); 585 rcu_assign_pointer(fdt->fd[fd], file); 586 spin_unlock(&files->file_lock); 587 return; 588 } 589 /* coupled with smp_wmb() in expand_fdtable() */ 590 smp_rmb(); 591 fdt = rcu_dereference_sched(files->fdt); 592 BUG_ON(fdt->fd[fd] != NULL); 593 rcu_assign_pointer(fdt->fd[fd], file); 594 rcu_read_unlock_sched(); 595 } 596 597 EXPORT_SYMBOL(fd_install); 598 599 static struct file *pick_file(struct files_struct *files, unsigned fd) 600 { 601 struct file *file = NULL; 602 struct fdtable *fdt; 603 604 spin_lock(&files->file_lock); 605 fdt = files_fdtable(files); 606 if (fd >= fdt->max_fds) 607 goto out_unlock; 608 file = fdt->fd[fd]; 609 if (!file) 610 goto out_unlock; 611 rcu_assign_pointer(fdt->fd[fd], NULL); 612 __put_unused_fd(files, fd); 613 614 out_unlock: 615 spin_unlock(&files->file_lock); 616 return file; 617 } 618 619 int close_fd(unsigned fd) 620 { 621 struct files_struct *files = current->files; 622 struct file *file; 623 624 file = pick_file(files, fd); 625 if (!file) 626 return -EBADF; 627 628 return filp_close(file, files); 629 } 630 EXPORT_SYMBOL(close_fd); /* for ksys_close() */ 631 632 static inline void __range_cloexec(struct files_struct *cur_fds, 633 unsigned int fd, unsigned int max_fd) 634 { 635 struct fdtable *fdt; 636 637 if (fd > max_fd) 638 return; 639 640 spin_lock(&cur_fds->file_lock); 641 fdt = files_fdtable(cur_fds); 642 bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1); 643 spin_unlock(&cur_fds->file_lock); 644 } 645 646 static inline void __range_close(struct files_struct *cur_fds, unsigned int fd, 647 unsigned int max_fd) 648 { 649 while (fd <= max_fd) { 650 struct file *file; 651 652 file = pick_file(cur_fds, fd++); 653 if (!file) 654 continue; 655 656 filp_close(file, cur_fds); 657 cond_resched(); 658 } 659 } 660 661 /** 662 * __close_range() - Close all file descriptors in a given range. 663 * 664 * @fd: starting file descriptor to close 665 * @max_fd: last file descriptor to close 666 * 667 * This closes a range of file descriptors. All file descriptors 668 * from @fd up to and including @max_fd are closed. 669 */ 670 int __close_range(unsigned fd, unsigned max_fd, unsigned int flags) 671 { 672 unsigned int cur_max; 673 struct task_struct *me = current; 674 struct files_struct *cur_fds = me->files, *fds = NULL; 675 676 if (flags & ~(CLOSE_RANGE_UNSHARE | CLOSE_RANGE_CLOEXEC)) 677 return -EINVAL; 678 679 if (fd > max_fd) 680 return -EINVAL; 681 682 rcu_read_lock(); 683 cur_max = files_fdtable(cur_fds)->max_fds; 684 rcu_read_unlock(); 685 686 /* cap to last valid index into fdtable */ 687 cur_max--; 688 689 if (flags & CLOSE_RANGE_UNSHARE) { 690 int ret; 691 unsigned int max_unshare_fds = NR_OPEN_MAX; 692 693 /* 694 * If the requested range is greater than the current maximum, 695 * we're closing everything so only copy all file descriptors 696 * beneath the lowest file descriptor. 697 * If the caller requested all fds to be made cloexec copy all 698 * of the file descriptors since they still want to use them. 699 */ 700 if (!(flags & CLOSE_RANGE_CLOEXEC) && (max_fd >= cur_max)) 701 max_unshare_fds = fd; 702 703 ret = unshare_fd(CLONE_FILES, max_unshare_fds, &fds); 704 if (ret) 705 return ret; 706 707 /* 708 * We used to share our file descriptor table, and have now 709 * created a private one, make sure we're using it below. 710 */ 711 if (fds) 712 swap(cur_fds, fds); 713 } 714 715 max_fd = min(max_fd, cur_max); 716 717 if (flags & CLOSE_RANGE_CLOEXEC) 718 __range_cloexec(cur_fds, fd, max_fd); 719 else 720 __range_close(cur_fds, fd, max_fd); 721 722 if (fds) { 723 /* 724 * We're done closing the files we were supposed to. Time to install 725 * the new file descriptor table and drop the old one. 726 */ 727 task_lock(me); 728 me->files = cur_fds; 729 task_unlock(me); 730 put_files_struct(fds); 731 } 732 733 return 0; 734 } 735 736 /* 737 * variant of close_fd that gets a ref on the file for later fput. 738 * The caller must ensure that filp_close() called on the file, and then 739 * an fput(). 740 */ 741 int close_fd_get_file(unsigned int fd, struct file **res) 742 { 743 struct files_struct *files = current->files; 744 struct file *file; 745 struct fdtable *fdt; 746 747 spin_lock(&files->file_lock); 748 fdt = files_fdtable(files); 749 if (fd >= fdt->max_fds) 750 goto out_unlock; 751 file = fdt->fd[fd]; 752 if (!file) 753 goto out_unlock; 754 rcu_assign_pointer(fdt->fd[fd], NULL); 755 __put_unused_fd(files, fd); 756 spin_unlock(&files->file_lock); 757 get_file(file); 758 *res = file; 759 return 0; 760 761 out_unlock: 762 spin_unlock(&files->file_lock); 763 *res = NULL; 764 return -ENOENT; 765 } 766 767 void do_close_on_exec(struct files_struct *files) 768 { 769 unsigned i; 770 struct fdtable *fdt; 771 772 /* exec unshares first */ 773 spin_lock(&files->file_lock); 774 for (i = 0; ; i++) { 775 unsigned long set; 776 unsigned fd = i * BITS_PER_LONG; 777 fdt = files_fdtable(files); 778 if (fd >= fdt->max_fds) 779 break; 780 set = fdt->close_on_exec[i]; 781 if (!set) 782 continue; 783 fdt->close_on_exec[i] = 0; 784 for ( ; set ; fd++, set >>= 1) { 785 struct file *file; 786 if (!(set & 1)) 787 continue; 788 file = fdt->fd[fd]; 789 if (!file) 790 continue; 791 rcu_assign_pointer(fdt->fd[fd], NULL); 792 __put_unused_fd(files, fd); 793 spin_unlock(&files->file_lock); 794 filp_close(file, files); 795 cond_resched(); 796 spin_lock(&files->file_lock); 797 } 798 799 } 800 spin_unlock(&files->file_lock); 801 } 802 803 static struct file *__fget_files(struct files_struct *files, unsigned int fd, 804 fmode_t mask, unsigned int refs) 805 { 806 struct file *file; 807 808 rcu_read_lock(); 809 loop: 810 file = files_lookup_fd_rcu(files, fd); 811 if (file) { 812 /* File object ref couldn't be taken. 813 * dup2() atomicity guarantee is the reason 814 * we loop to catch the new file (or NULL pointer) 815 */ 816 if (file->f_mode & mask) 817 file = NULL; 818 else if (!get_file_rcu_many(file, refs)) 819 goto loop; 820 } 821 rcu_read_unlock(); 822 823 return file; 824 } 825 826 static inline struct file *__fget(unsigned int fd, fmode_t mask, 827 unsigned int refs) 828 { 829 return __fget_files(current->files, fd, mask, refs); 830 } 831 832 struct file *fget_many(unsigned int fd, unsigned int refs) 833 { 834 return __fget(fd, FMODE_PATH, refs); 835 } 836 837 struct file *fget(unsigned int fd) 838 { 839 return __fget(fd, FMODE_PATH, 1); 840 } 841 EXPORT_SYMBOL(fget); 842 843 struct file *fget_raw(unsigned int fd) 844 { 845 return __fget(fd, 0, 1); 846 } 847 EXPORT_SYMBOL(fget_raw); 848 849 struct file *fget_task(struct task_struct *task, unsigned int fd) 850 { 851 struct file *file = NULL; 852 853 task_lock(task); 854 if (task->files) 855 file = __fget_files(task->files, fd, 0, 1); 856 task_unlock(task); 857 858 return file; 859 } 860 861 struct file *task_lookup_fd_rcu(struct task_struct *task, unsigned int fd) 862 { 863 /* Must be called with rcu_read_lock held */ 864 struct files_struct *files; 865 struct file *file = NULL; 866 867 task_lock(task); 868 files = task->files; 869 if (files) 870 file = files_lookup_fd_rcu(files, fd); 871 task_unlock(task); 872 873 return file; 874 } 875 876 struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *ret_fd) 877 { 878 /* Must be called with rcu_read_lock held */ 879 struct files_struct *files; 880 unsigned int fd = *ret_fd; 881 struct file *file = NULL; 882 883 task_lock(task); 884 files = task->files; 885 if (files) { 886 for (; fd < files_fdtable(files)->max_fds; fd++) { 887 file = files_lookup_fd_rcu(files, fd); 888 if (file) 889 break; 890 } 891 } 892 task_unlock(task); 893 *ret_fd = fd; 894 return file; 895 } 896 897 /* 898 * Lightweight file lookup - no refcnt increment if fd table isn't shared. 899 * 900 * You can use this instead of fget if you satisfy all of the following 901 * conditions: 902 * 1) You must call fput_light before exiting the syscall and returning control 903 * to userspace (i.e. you cannot remember the returned struct file * after 904 * returning to userspace). 905 * 2) You must not call filp_close on the returned struct file * in between 906 * calls to fget_light and fput_light. 907 * 3) You must not clone the current task in between the calls to fget_light 908 * and fput_light. 909 * 910 * The fput_needed flag returned by fget_light should be passed to the 911 * corresponding fput_light. 912 */ 913 static unsigned long __fget_light(unsigned int fd, fmode_t mask) 914 { 915 struct files_struct *files = current->files; 916 struct file *file; 917 918 if (atomic_read(&files->count) == 1) { 919 file = files_lookup_fd_raw(files, fd); 920 if (!file || unlikely(file->f_mode & mask)) 921 return 0; 922 return (unsigned long)file; 923 } else { 924 file = __fget(fd, mask, 1); 925 if (!file) 926 return 0; 927 return FDPUT_FPUT | (unsigned long)file; 928 } 929 } 930 unsigned long __fdget(unsigned int fd) 931 { 932 return __fget_light(fd, FMODE_PATH); 933 } 934 EXPORT_SYMBOL(__fdget); 935 936 unsigned long __fdget_raw(unsigned int fd) 937 { 938 return __fget_light(fd, 0); 939 } 940 941 unsigned long __fdget_pos(unsigned int fd) 942 { 943 unsigned long v = __fdget(fd); 944 struct file *file = (struct file *)(v & ~3); 945 946 if (file && (file->f_mode & FMODE_ATOMIC_POS)) { 947 if (file_count(file) > 1) { 948 v |= FDPUT_POS_UNLOCK; 949 mutex_lock(&file->f_pos_lock); 950 } 951 } 952 return v; 953 } 954 955 void __f_unlock_pos(struct file *f) 956 { 957 mutex_unlock(&f->f_pos_lock); 958 } 959 960 /* 961 * We only lock f_pos if we have threads or if the file might be 962 * shared with another process. In both cases we'll have an elevated 963 * file count (done either by fdget() or by fork()). 964 */ 965 966 void set_close_on_exec(unsigned int fd, int flag) 967 { 968 struct files_struct *files = current->files; 969 struct fdtable *fdt; 970 spin_lock(&files->file_lock); 971 fdt = files_fdtable(files); 972 if (flag) 973 __set_close_on_exec(fd, fdt); 974 else 975 __clear_close_on_exec(fd, fdt); 976 spin_unlock(&files->file_lock); 977 } 978 979 bool get_close_on_exec(unsigned int fd) 980 { 981 struct files_struct *files = current->files; 982 struct fdtable *fdt; 983 bool res; 984 rcu_read_lock(); 985 fdt = files_fdtable(files); 986 res = close_on_exec(fd, fdt); 987 rcu_read_unlock(); 988 return res; 989 } 990 991 static int do_dup2(struct files_struct *files, 992 struct file *file, unsigned fd, unsigned flags) 993 __releases(&files->file_lock) 994 { 995 struct file *tofree; 996 struct fdtable *fdt; 997 998 /* 999 * We need to detect attempts to do dup2() over allocated but still 1000 * not finished descriptor. NB: OpenBSD avoids that at the price of 1001 * extra work in their equivalent of fget() - they insert struct 1002 * file immediately after grabbing descriptor, mark it larval if 1003 * more work (e.g. actual opening) is needed and make sure that 1004 * fget() treats larval files as absent. Potentially interesting, 1005 * but while extra work in fget() is trivial, locking implications 1006 * and amount of surgery on open()-related paths in VFS are not. 1007 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution" 1008 * deadlocks in rather amusing ways, AFAICS. All of that is out of 1009 * scope of POSIX or SUS, since neither considers shared descriptor 1010 * tables and this condition does not arise without those. 1011 */ 1012 fdt = files_fdtable(files); 1013 tofree = fdt->fd[fd]; 1014 if (!tofree && fd_is_open(fd, fdt)) 1015 goto Ebusy; 1016 get_file(file); 1017 rcu_assign_pointer(fdt->fd[fd], file); 1018 __set_open_fd(fd, fdt); 1019 if (flags & O_CLOEXEC) 1020 __set_close_on_exec(fd, fdt); 1021 else 1022 __clear_close_on_exec(fd, fdt); 1023 spin_unlock(&files->file_lock); 1024 1025 if (tofree) 1026 filp_close(tofree, files); 1027 1028 return fd; 1029 1030 Ebusy: 1031 spin_unlock(&files->file_lock); 1032 return -EBUSY; 1033 } 1034 1035 int replace_fd(unsigned fd, struct file *file, unsigned flags) 1036 { 1037 int err; 1038 struct files_struct *files = current->files; 1039 1040 if (!file) 1041 return close_fd(fd); 1042 1043 if (fd >= rlimit(RLIMIT_NOFILE)) 1044 return -EBADF; 1045 1046 spin_lock(&files->file_lock); 1047 err = expand_files(files, fd); 1048 if (unlikely(err < 0)) 1049 goto out_unlock; 1050 return do_dup2(files, file, fd, flags); 1051 1052 out_unlock: 1053 spin_unlock(&files->file_lock); 1054 return err; 1055 } 1056 1057 /** 1058 * __receive_fd() - Install received file into file descriptor table 1059 * 1060 * @fd: fd to install into (if negative, a new fd will be allocated) 1061 * @file: struct file that was received from another process 1062 * @ufd: __user pointer to write new fd number to 1063 * @o_flags: the O_* flags to apply to the new fd entry 1064 * 1065 * Installs a received file into the file descriptor table, with appropriate 1066 * checks and count updates. Optionally writes the fd number to userspace, if 1067 * @ufd is non-NULL. 1068 * 1069 * This helper handles its own reference counting of the incoming 1070 * struct file. 1071 * 1072 * Returns newly install fd or -ve on error. 1073 */ 1074 int __receive_fd(int fd, struct file *file, int __user *ufd, unsigned int o_flags) 1075 { 1076 int new_fd; 1077 int error; 1078 1079 error = security_file_receive(file); 1080 if (error) 1081 return error; 1082 1083 if (fd < 0) { 1084 new_fd = get_unused_fd_flags(o_flags); 1085 if (new_fd < 0) 1086 return new_fd; 1087 } else { 1088 new_fd = fd; 1089 } 1090 1091 if (ufd) { 1092 error = put_user(new_fd, ufd); 1093 if (error) { 1094 if (fd < 0) 1095 put_unused_fd(new_fd); 1096 return error; 1097 } 1098 } 1099 1100 if (fd < 0) { 1101 fd_install(new_fd, get_file(file)); 1102 } else { 1103 error = replace_fd(new_fd, file, o_flags); 1104 if (error) 1105 return error; 1106 } 1107 1108 /* Bump the sock usage counts, if any. */ 1109 __receive_sock(file); 1110 return new_fd; 1111 } 1112 1113 static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags) 1114 { 1115 int err = -EBADF; 1116 struct file *file; 1117 struct files_struct *files = current->files; 1118 1119 if ((flags & ~O_CLOEXEC) != 0) 1120 return -EINVAL; 1121 1122 if (unlikely(oldfd == newfd)) 1123 return -EINVAL; 1124 1125 if (newfd >= rlimit(RLIMIT_NOFILE)) 1126 return -EBADF; 1127 1128 spin_lock(&files->file_lock); 1129 err = expand_files(files, newfd); 1130 file = files_lookup_fd_locked(files, oldfd); 1131 if (unlikely(!file)) 1132 goto Ebadf; 1133 if (unlikely(err < 0)) { 1134 if (err == -EMFILE) 1135 goto Ebadf; 1136 goto out_unlock; 1137 } 1138 return do_dup2(files, file, newfd, flags); 1139 1140 Ebadf: 1141 err = -EBADF; 1142 out_unlock: 1143 spin_unlock(&files->file_lock); 1144 return err; 1145 } 1146 1147 SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags) 1148 { 1149 return ksys_dup3(oldfd, newfd, flags); 1150 } 1151 1152 SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd) 1153 { 1154 if (unlikely(newfd == oldfd)) { /* corner case */ 1155 struct files_struct *files = current->files; 1156 int retval = oldfd; 1157 1158 rcu_read_lock(); 1159 if (!files_lookup_fd_rcu(files, oldfd)) 1160 retval = -EBADF; 1161 rcu_read_unlock(); 1162 return retval; 1163 } 1164 return ksys_dup3(oldfd, newfd, 0); 1165 } 1166 1167 SYSCALL_DEFINE1(dup, unsigned int, fildes) 1168 { 1169 int ret = -EBADF; 1170 struct file *file = fget_raw(fildes); 1171 1172 if (file) { 1173 ret = get_unused_fd_flags(0); 1174 if (ret >= 0) 1175 fd_install(ret, file); 1176 else 1177 fput(file); 1178 } 1179 return ret; 1180 } 1181 1182 int f_dupfd(unsigned int from, struct file *file, unsigned flags) 1183 { 1184 unsigned long nofile = rlimit(RLIMIT_NOFILE); 1185 int err; 1186 if (from >= nofile) 1187 return -EINVAL; 1188 err = alloc_fd(from, nofile, flags); 1189 if (err >= 0) { 1190 get_file(file); 1191 fd_install(err, file); 1192 } 1193 return err; 1194 } 1195 1196 int iterate_fd(struct files_struct *files, unsigned n, 1197 int (*f)(const void *, struct file *, unsigned), 1198 const void *p) 1199 { 1200 struct fdtable *fdt; 1201 int res = 0; 1202 if (!files) 1203 return 0; 1204 spin_lock(&files->file_lock); 1205 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) { 1206 struct file *file; 1207 file = rcu_dereference_check_fdtable(files, fdt->fd[n]); 1208 if (!file) 1209 continue; 1210 res = f(p, file, n); 1211 if (res) 1212 break; 1213 } 1214 spin_unlock(&files->file_lock); 1215 return res; 1216 } 1217 EXPORT_SYMBOL(iterate_fd); 1218