1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/file.c 4 * 5 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes 6 * 7 * Manage the dynamic fd arrays in the process files_struct. 8 */ 9 10 #include <linux/syscalls.h> 11 #include <linux/export.h> 12 #include <linux/fs.h> 13 #include <linux/kernel.h> 14 #include <linux/mm.h> 15 #include <linux/sched/signal.h> 16 #include <linux/slab.h> 17 #include <linux/file.h> 18 #include <linux/fdtable.h> 19 #include <linux/bitops.h> 20 #include <linux/spinlock.h> 21 #include <linux/rcupdate.h> 22 #include <linux/close_range.h> 23 #include <net/sock.h> 24 25 #include "internal.h" 26 27 unsigned int sysctl_nr_open __read_mostly = 1024*1024; 28 unsigned int sysctl_nr_open_min = BITS_PER_LONG; 29 /* our min() is unusable in constant expressions ;-/ */ 30 #define __const_min(x, y) ((x) < (y) ? (x) : (y)) 31 unsigned int sysctl_nr_open_max = 32 __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG; 33 34 static void __free_fdtable(struct fdtable *fdt) 35 { 36 kvfree(fdt->fd); 37 kvfree(fdt->open_fds); 38 kfree(fdt); 39 } 40 41 static void free_fdtable_rcu(struct rcu_head *rcu) 42 { 43 __free_fdtable(container_of(rcu, struct fdtable, rcu)); 44 } 45 46 #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr)) 47 #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long)) 48 49 /* 50 * Copy 'count' fd bits from the old table to the new table and clear the extra 51 * space if any. This does not copy the file pointers. Called with the files 52 * spinlock held for write. 53 */ 54 static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt, 55 unsigned int count) 56 { 57 unsigned int cpy, set; 58 59 cpy = count / BITS_PER_BYTE; 60 set = (nfdt->max_fds - count) / BITS_PER_BYTE; 61 memcpy(nfdt->open_fds, ofdt->open_fds, cpy); 62 memset((char *)nfdt->open_fds + cpy, 0, set); 63 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy); 64 memset((char *)nfdt->close_on_exec + cpy, 0, set); 65 66 cpy = BITBIT_SIZE(count); 67 set = BITBIT_SIZE(nfdt->max_fds) - cpy; 68 memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy); 69 memset((char *)nfdt->full_fds_bits + cpy, 0, set); 70 } 71 72 /* 73 * Copy all file descriptors from the old table to the new, expanded table and 74 * clear the extra space. Called with the files spinlock held for write. 75 */ 76 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) 77 { 78 size_t cpy, set; 79 80 BUG_ON(nfdt->max_fds < ofdt->max_fds); 81 82 cpy = ofdt->max_fds * sizeof(struct file *); 83 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *); 84 memcpy(nfdt->fd, ofdt->fd, cpy); 85 memset((char *)nfdt->fd + cpy, 0, set); 86 87 copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds); 88 } 89 90 /* 91 * Note how the fdtable bitmap allocations very much have to be a multiple of 92 * BITS_PER_LONG. This is not only because we walk those things in chunks of 93 * 'unsigned long' in some places, but simply because that is how the Linux 94 * kernel bitmaps are defined to work: they are not "bits in an array of bytes", 95 * they are very much "bits in an array of unsigned long". 96 * 97 * The ALIGN(nr, BITS_PER_LONG) here is for clarity: since we just multiplied 98 * by that "1024/sizeof(ptr)" before, we already know there are sufficient 99 * clear low bits. Clang seems to realize that, gcc ends up being confused. 100 * 101 * On a 128-bit machine, the ALIGN() would actually matter. In the meantime, 102 * let's consider it documentation (and maybe a test-case for gcc to improve 103 * its code generation ;) 104 */ 105 static struct fdtable * alloc_fdtable(unsigned int nr) 106 { 107 struct fdtable *fdt; 108 void *data; 109 110 /* 111 * Figure out how many fds we actually want to support in this fdtable. 112 * Allocation steps are keyed to the size of the fdarray, since it 113 * grows far faster than any of the other dynamic data. We try to fit 114 * the fdarray into comfortable page-tuned chunks: starting at 1024B 115 * and growing in powers of two from there on. 116 */ 117 nr /= (1024 / sizeof(struct file *)); 118 nr = roundup_pow_of_two(nr + 1); 119 nr *= (1024 / sizeof(struct file *)); 120 nr = ALIGN(nr, BITS_PER_LONG); 121 /* 122 * Note that this can drive nr *below* what we had passed if sysctl_nr_open 123 * had been set lower between the check in expand_files() and here. Deal 124 * with that in caller, it's cheaper that way. 125 * 126 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise 127 * bitmaps handling below becomes unpleasant, to put it mildly... 128 */ 129 if (unlikely(nr > sysctl_nr_open)) 130 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1; 131 132 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT); 133 if (!fdt) 134 goto out; 135 fdt->max_fds = nr; 136 data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT); 137 if (!data) 138 goto out_fdt; 139 fdt->fd = data; 140 141 data = kvmalloc(max_t(size_t, 142 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES), 143 GFP_KERNEL_ACCOUNT); 144 if (!data) 145 goto out_arr; 146 fdt->open_fds = data; 147 data += nr / BITS_PER_BYTE; 148 fdt->close_on_exec = data; 149 data += nr / BITS_PER_BYTE; 150 fdt->full_fds_bits = data; 151 152 return fdt; 153 154 out_arr: 155 kvfree(fdt->fd); 156 out_fdt: 157 kfree(fdt); 158 out: 159 return NULL; 160 } 161 162 /* 163 * Expand the file descriptor table. 164 * This function will allocate a new fdtable and both fd array and fdset, of 165 * the given size. 166 * Return <0 error code on error; 1 on successful completion. 167 * The files->file_lock should be held on entry, and will be held on exit. 168 */ 169 static int expand_fdtable(struct files_struct *files, unsigned int nr) 170 __releases(files->file_lock) 171 __acquires(files->file_lock) 172 { 173 struct fdtable *new_fdt, *cur_fdt; 174 175 spin_unlock(&files->file_lock); 176 new_fdt = alloc_fdtable(nr); 177 178 /* make sure all fd_install() have seen resize_in_progress 179 * or have finished their rcu_read_lock_sched() section. 180 */ 181 if (atomic_read(&files->count) > 1) 182 synchronize_rcu(); 183 184 spin_lock(&files->file_lock); 185 if (!new_fdt) 186 return -ENOMEM; 187 /* 188 * extremely unlikely race - sysctl_nr_open decreased between the check in 189 * caller and alloc_fdtable(). Cheaper to catch it here... 190 */ 191 if (unlikely(new_fdt->max_fds <= nr)) { 192 __free_fdtable(new_fdt); 193 return -EMFILE; 194 } 195 cur_fdt = files_fdtable(files); 196 BUG_ON(nr < cur_fdt->max_fds); 197 copy_fdtable(new_fdt, cur_fdt); 198 rcu_assign_pointer(files->fdt, new_fdt); 199 if (cur_fdt != &files->fdtab) 200 call_rcu(&cur_fdt->rcu, free_fdtable_rcu); 201 /* coupled with smp_rmb() in fd_install() */ 202 smp_wmb(); 203 return 1; 204 } 205 206 /* 207 * Expand files. 208 * This function will expand the file structures, if the requested size exceeds 209 * the current capacity and there is room for expansion. 210 * Return <0 error code on error; 0 when nothing done; 1 when files were 211 * expanded and execution may have blocked. 212 * The files->file_lock should be held on entry, and will be held on exit. 213 */ 214 static int expand_files(struct files_struct *files, unsigned int nr) 215 __releases(files->file_lock) 216 __acquires(files->file_lock) 217 { 218 struct fdtable *fdt; 219 int expanded = 0; 220 221 repeat: 222 fdt = files_fdtable(files); 223 224 /* Do we need to expand? */ 225 if (nr < fdt->max_fds) 226 return expanded; 227 228 /* Can we expand? */ 229 if (nr >= sysctl_nr_open) 230 return -EMFILE; 231 232 if (unlikely(files->resize_in_progress)) { 233 spin_unlock(&files->file_lock); 234 expanded = 1; 235 wait_event(files->resize_wait, !files->resize_in_progress); 236 spin_lock(&files->file_lock); 237 goto repeat; 238 } 239 240 /* All good, so we try */ 241 files->resize_in_progress = true; 242 expanded = expand_fdtable(files, nr); 243 files->resize_in_progress = false; 244 245 wake_up_all(&files->resize_wait); 246 return expanded; 247 } 248 249 static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt) 250 { 251 __set_bit(fd, fdt->close_on_exec); 252 } 253 254 static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt) 255 { 256 if (test_bit(fd, fdt->close_on_exec)) 257 __clear_bit(fd, fdt->close_on_exec); 258 } 259 260 static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt) 261 { 262 __set_bit(fd, fdt->open_fds); 263 fd /= BITS_PER_LONG; 264 if (!~fdt->open_fds[fd]) 265 __set_bit(fd, fdt->full_fds_bits); 266 } 267 268 static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt) 269 { 270 __clear_bit(fd, fdt->open_fds); 271 __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits); 272 } 273 274 static unsigned int count_open_files(struct fdtable *fdt) 275 { 276 unsigned int size = fdt->max_fds; 277 unsigned int i; 278 279 /* Find the last open fd */ 280 for (i = size / BITS_PER_LONG; i > 0; ) { 281 if (fdt->open_fds[--i]) 282 break; 283 } 284 i = (i + 1) * BITS_PER_LONG; 285 return i; 286 } 287 288 /* 289 * Note that a sane fdtable size always has to be a multiple of 290 * BITS_PER_LONG, since we have bitmaps that are sized by this. 291 * 292 * 'max_fds' will normally already be properly aligned, but it 293 * turns out that in the close_range() -> __close_range() -> 294 * unshare_fd() -> dup_fd() -> sane_fdtable_size() we can end 295 * up having a 'max_fds' value that isn't already aligned. 296 * 297 * Rather than make close_range() have to worry about this, 298 * just make that BITS_PER_LONG alignment be part of a sane 299 * fdtable size. Becuase that's really what it is. 300 */ 301 static unsigned int sane_fdtable_size(struct fdtable *fdt, unsigned int max_fds) 302 { 303 unsigned int count; 304 305 count = count_open_files(fdt); 306 if (max_fds < NR_OPEN_DEFAULT) 307 max_fds = NR_OPEN_DEFAULT; 308 return ALIGN(min(count, max_fds), BITS_PER_LONG); 309 } 310 311 /* 312 * Allocate a new files structure and copy contents from the 313 * passed in files structure. 314 * errorp will be valid only when the returned files_struct is NULL. 315 */ 316 struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int *errorp) 317 { 318 struct files_struct *newf; 319 struct file **old_fds, **new_fds; 320 unsigned int open_files, i; 321 struct fdtable *old_fdt, *new_fdt; 322 323 *errorp = -ENOMEM; 324 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL); 325 if (!newf) 326 goto out; 327 328 atomic_set(&newf->count, 1); 329 330 spin_lock_init(&newf->file_lock); 331 newf->resize_in_progress = false; 332 init_waitqueue_head(&newf->resize_wait); 333 newf->next_fd = 0; 334 new_fdt = &newf->fdtab; 335 new_fdt->max_fds = NR_OPEN_DEFAULT; 336 new_fdt->close_on_exec = newf->close_on_exec_init; 337 new_fdt->open_fds = newf->open_fds_init; 338 new_fdt->full_fds_bits = newf->full_fds_bits_init; 339 new_fdt->fd = &newf->fd_array[0]; 340 341 spin_lock(&oldf->file_lock); 342 old_fdt = files_fdtable(oldf); 343 open_files = sane_fdtable_size(old_fdt, max_fds); 344 345 /* 346 * Check whether we need to allocate a larger fd array and fd set. 347 */ 348 while (unlikely(open_files > new_fdt->max_fds)) { 349 spin_unlock(&oldf->file_lock); 350 351 if (new_fdt != &newf->fdtab) 352 __free_fdtable(new_fdt); 353 354 new_fdt = alloc_fdtable(open_files - 1); 355 if (!new_fdt) { 356 *errorp = -ENOMEM; 357 goto out_release; 358 } 359 360 /* beyond sysctl_nr_open; nothing to do */ 361 if (unlikely(new_fdt->max_fds < open_files)) { 362 __free_fdtable(new_fdt); 363 *errorp = -EMFILE; 364 goto out_release; 365 } 366 367 /* 368 * Reacquire the oldf lock and a pointer to its fd table 369 * who knows it may have a new bigger fd table. We need 370 * the latest pointer. 371 */ 372 spin_lock(&oldf->file_lock); 373 old_fdt = files_fdtable(oldf); 374 open_files = sane_fdtable_size(old_fdt, max_fds); 375 } 376 377 copy_fd_bitmaps(new_fdt, old_fdt, open_files); 378 379 old_fds = old_fdt->fd; 380 new_fds = new_fdt->fd; 381 382 for (i = open_files; i != 0; i--) { 383 struct file *f = *old_fds++; 384 if (f) { 385 get_file(f); 386 } else { 387 /* 388 * The fd may be claimed in the fd bitmap but not yet 389 * instantiated in the files array if a sibling thread 390 * is partway through open(). So make sure that this 391 * fd is available to the new process. 392 */ 393 __clear_open_fd(open_files - i, new_fdt); 394 } 395 rcu_assign_pointer(*new_fds++, f); 396 } 397 spin_unlock(&oldf->file_lock); 398 399 /* clear the remainder */ 400 memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *)); 401 402 rcu_assign_pointer(newf->fdt, new_fdt); 403 404 return newf; 405 406 out_release: 407 kmem_cache_free(files_cachep, newf); 408 out: 409 return NULL; 410 } 411 412 static struct fdtable *close_files(struct files_struct * files) 413 { 414 /* 415 * It is safe to dereference the fd table without RCU or 416 * ->file_lock because this is the last reference to the 417 * files structure. 418 */ 419 struct fdtable *fdt = rcu_dereference_raw(files->fdt); 420 unsigned int i, j = 0; 421 422 for (;;) { 423 unsigned long set; 424 i = j * BITS_PER_LONG; 425 if (i >= fdt->max_fds) 426 break; 427 set = fdt->open_fds[j++]; 428 while (set) { 429 if (set & 1) { 430 struct file * file = xchg(&fdt->fd[i], NULL); 431 if (file) { 432 filp_close(file, files); 433 cond_resched(); 434 } 435 } 436 i++; 437 set >>= 1; 438 } 439 } 440 441 return fdt; 442 } 443 444 void put_files_struct(struct files_struct *files) 445 { 446 if (atomic_dec_and_test(&files->count)) { 447 struct fdtable *fdt = close_files(files); 448 449 /* free the arrays if they are not embedded */ 450 if (fdt != &files->fdtab) 451 __free_fdtable(fdt); 452 kmem_cache_free(files_cachep, files); 453 } 454 } 455 456 void exit_files(struct task_struct *tsk) 457 { 458 struct files_struct * files = tsk->files; 459 460 if (files) { 461 task_lock(tsk); 462 tsk->files = NULL; 463 task_unlock(tsk); 464 put_files_struct(files); 465 } 466 } 467 468 struct files_struct init_files = { 469 .count = ATOMIC_INIT(1), 470 .fdt = &init_files.fdtab, 471 .fdtab = { 472 .max_fds = NR_OPEN_DEFAULT, 473 .fd = &init_files.fd_array[0], 474 .close_on_exec = init_files.close_on_exec_init, 475 .open_fds = init_files.open_fds_init, 476 .full_fds_bits = init_files.full_fds_bits_init, 477 }, 478 .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock), 479 .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait), 480 }; 481 482 static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start) 483 { 484 unsigned int maxfd = fdt->max_fds; /* always multiple of BITS_PER_LONG */ 485 unsigned int maxbit = maxfd / BITS_PER_LONG; 486 unsigned int bitbit = start / BITS_PER_LONG; 487 488 bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG; 489 if (bitbit >= maxfd) 490 return maxfd; 491 if (bitbit > start) 492 start = bitbit; 493 return find_next_zero_bit(fdt->open_fds, maxfd, start); 494 } 495 496 /* 497 * allocate a file descriptor, mark it busy. 498 */ 499 static int alloc_fd(unsigned start, unsigned end, unsigned flags) 500 { 501 struct files_struct *files = current->files; 502 unsigned int fd; 503 int error; 504 struct fdtable *fdt; 505 506 spin_lock(&files->file_lock); 507 repeat: 508 fdt = files_fdtable(files); 509 fd = start; 510 if (fd < files->next_fd) 511 fd = files->next_fd; 512 513 if (fd < fdt->max_fds) 514 fd = find_next_fd(fdt, fd); 515 516 /* 517 * N.B. For clone tasks sharing a files structure, this test 518 * will limit the total number of files that can be opened. 519 */ 520 error = -EMFILE; 521 if (fd >= end) 522 goto out; 523 524 error = expand_files(files, fd); 525 if (error < 0) 526 goto out; 527 528 /* 529 * If we needed to expand the fs array we 530 * might have blocked - try again. 531 */ 532 if (error) 533 goto repeat; 534 535 if (start <= files->next_fd) 536 files->next_fd = fd + 1; 537 538 __set_open_fd(fd, fdt); 539 if (flags & O_CLOEXEC) 540 __set_close_on_exec(fd, fdt); 541 else 542 __clear_close_on_exec(fd, fdt); 543 error = fd; 544 #if 1 545 /* Sanity check */ 546 if (rcu_access_pointer(fdt->fd[fd]) != NULL) { 547 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd); 548 rcu_assign_pointer(fdt->fd[fd], NULL); 549 } 550 #endif 551 552 out: 553 spin_unlock(&files->file_lock); 554 return error; 555 } 556 557 int __get_unused_fd_flags(unsigned flags, unsigned long nofile) 558 { 559 return alloc_fd(0, nofile, flags); 560 } 561 562 int get_unused_fd_flags(unsigned flags) 563 { 564 return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE)); 565 } 566 EXPORT_SYMBOL(get_unused_fd_flags); 567 568 static void __put_unused_fd(struct files_struct *files, unsigned int fd) 569 { 570 struct fdtable *fdt = files_fdtable(files); 571 __clear_open_fd(fd, fdt); 572 if (fd < files->next_fd) 573 files->next_fd = fd; 574 } 575 576 void put_unused_fd(unsigned int fd) 577 { 578 struct files_struct *files = current->files; 579 spin_lock(&files->file_lock); 580 __put_unused_fd(files, fd); 581 spin_unlock(&files->file_lock); 582 } 583 584 EXPORT_SYMBOL(put_unused_fd); 585 586 /* 587 * Install a file pointer in the fd array. 588 * 589 * The VFS is full of places where we drop the files lock between 590 * setting the open_fds bitmap and installing the file in the file 591 * array. At any such point, we are vulnerable to a dup2() race 592 * installing a file in the array before us. We need to detect this and 593 * fput() the struct file we are about to overwrite in this case. 594 * 595 * It should never happen - if we allow dup2() do it, _really_ bad things 596 * will follow. 597 * 598 * This consumes the "file" refcount, so callers should treat it 599 * as if they had called fput(file). 600 */ 601 602 void fd_install(unsigned int fd, struct file *file) 603 { 604 struct files_struct *files = current->files; 605 struct fdtable *fdt; 606 607 rcu_read_lock_sched(); 608 609 if (unlikely(files->resize_in_progress)) { 610 rcu_read_unlock_sched(); 611 spin_lock(&files->file_lock); 612 fdt = files_fdtable(files); 613 BUG_ON(fdt->fd[fd] != NULL); 614 rcu_assign_pointer(fdt->fd[fd], file); 615 spin_unlock(&files->file_lock); 616 return; 617 } 618 /* coupled with smp_wmb() in expand_fdtable() */ 619 smp_rmb(); 620 fdt = rcu_dereference_sched(files->fdt); 621 BUG_ON(fdt->fd[fd] != NULL); 622 rcu_assign_pointer(fdt->fd[fd], file); 623 rcu_read_unlock_sched(); 624 } 625 626 EXPORT_SYMBOL(fd_install); 627 628 /** 629 * pick_file - return file associatd with fd 630 * @files: file struct to retrieve file from 631 * @fd: file descriptor to retrieve file for 632 * 633 * Context: files_lock must be held. 634 * 635 * Returns: The file associated with @fd (NULL if @fd is not open) 636 */ 637 static struct file *pick_file(struct files_struct *files, unsigned fd) 638 { 639 struct fdtable *fdt = files_fdtable(files); 640 struct file *file; 641 642 if (fd >= fdt->max_fds) 643 return NULL; 644 645 fd = array_index_nospec(fd, fdt->max_fds); 646 file = fdt->fd[fd]; 647 if (file) { 648 rcu_assign_pointer(fdt->fd[fd], NULL); 649 __put_unused_fd(files, fd); 650 } 651 return file; 652 } 653 654 int close_fd(unsigned fd) 655 { 656 struct files_struct *files = current->files; 657 struct file *file; 658 659 spin_lock(&files->file_lock); 660 file = pick_file(files, fd); 661 spin_unlock(&files->file_lock); 662 if (!file) 663 return -EBADF; 664 665 return filp_close(file, files); 666 } 667 EXPORT_SYMBOL(close_fd); /* for ksys_close() */ 668 669 /** 670 * last_fd - return last valid index into fd table 671 * @fdt: File descriptor table. 672 * 673 * Context: Either rcu read lock or files_lock must be held. 674 * 675 * Returns: Last valid index into fdtable. 676 */ 677 static inline unsigned last_fd(struct fdtable *fdt) 678 { 679 return fdt->max_fds - 1; 680 } 681 682 static inline void __range_cloexec(struct files_struct *cur_fds, 683 unsigned int fd, unsigned int max_fd) 684 { 685 struct fdtable *fdt; 686 687 /* make sure we're using the correct maximum value */ 688 spin_lock(&cur_fds->file_lock); 689 fdt = files_fdtable(cur_fds); 690 max_fd = min(last_fd(fdt), max_fd); 691 if (fd <= max_fd) 692 bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1); 693 spin_unlock(&cur_fds->file_lock); 694 } 695 696 static inline void __range_close(struct files_struct *files, unsigned int fd, 697 unsigned int max_fd) 698 { 699 struct file *file; 700 unsigned n; 701 702 spin_lock(&files->file_lock); 703 n = last_fd(files_fdtable(files)); 704 max_fd = min(max_fd, n); 705 706 for (; fd <= max_fd; fd++) { 707 file = pick_file(files, fd); 708 if (file) { 709 spin_unlock(&files->file_lock); 710 filp_close(file, files); 711 cond_resched(); 712 spin_lock(&files->file_lock); 713 } else if (need_resched()) { 714 spin_unlock(&files->file_lock); 715 cond_resched(); 716 spin_lock(&files->file_lock); 717 } 718 } 719 spin_unlock(&files->file_lock); 720 } 721 722 /** 723 * __close_range() - Close all file descriptors in a given range. 724 * 725 * @fd: starting file descriptor to close 726 * @max_fd: last file descriptor to close 727 * @flags: CLOSE_RANGE flags. 728 * 729 * This closes a range of file descriptors. All file descriptors 730 * from @fd up to and including @max_fd are closed. 731 */ 732 int __close_range(unsigned fd, unsigned max_fd, unsigned int flags) 733 { 734 struct task_struct *me = current; 735 struct files_struct *cur_fds = me->files, *fds = NULL; 736 737 if (flags & ~(CLOSE_RANGE_UNSHARE | CLOSE_RANGE_CLOEXEC)) 738 return -EINVAL; 739 740 if (fd > max_fd) 741 return -EINVAL; 742 743 if (flags & CLOSE_RANGE_UNSHARE) { 744 int ret; 745 unsigned int max_unshare_fds = NR_OPEN_MAX; 746 747 /* 748 * If the caller requested all fds to be made cloexec we always 749 * copy all of the file descriptors since they still want to 750 * use them. 751 */ 752 if (!(flags & CLOSE_RANGE_CLOEXEC)) { 753 /* 754 * If the requested range is greater than the current 755 * maximum, we're closing everything so only copy all 756 * file descriptors beneath the lowest file descriptor. 757 */ 758 rcu_read_lock(); 759 if (max_fd >= last_fd(files_fdtable(cur_fds))) 760 max_unshare_fds = fd; 761 rcu_read_unlock(); 762 } 763 764 ret = unshare_fd(CLONE_FILES, max_unshare_fds, &fds); 765 if (ret) 766 return ret; 767 768 /* 769 * We used to share our file descriptor table, and have now 770 * created a private one, make sure we're using it below. 771 */ 772 if (fds) 773 swap(cur_fds, fds); 774 } 775 776 if (flags & CLOSE_RANGE_CLOEXEC) 777 __range_cloexec(cur_fds, fd, max_fd); 778 else 779 __range_close(cur_fds, fd, max_fd); 780 781 if (fds) { 782 /* 783 * We're done closing the files we were supposed to. Time to install 784 * the new file descriptor table and drop the old one. 785 */ 786 task_lock(me); 787 me->files = cur_fds; 788 task_unlock(me); 789 put_files_struct(fds); 790 } 791 792 return 0; 793 } 794 795 /* 796 * See close_fd_get_file() below, this variant assumes current->files->file_lock 797 * is held. 798 */ 799 struct file *__close_fd_get_file(unsigned int fd) 800 { 801 return pick_file(current->files, fd); 802 } 803 804 /* 805 * variant of close_fd that gets a ref on the file for later fput. 806 * The caller must ensure that filp_close() called on the file. 807 */ 808 struct file *close_fd_get_file(unsigned int fd) 809 { 810 struct files_struct *files = current->files; 811 struct file *file; 812 813 spin_lock(&files->file_lock); 814 file = pick_file(files, fd); 815 spin_unlock(&files->file_lock); 816 817 return file; 818 } 819 820 void do_close_on_exec(struct files_struct *files) 821 { 822 unsigned i; 823 struct fdtable *fdt; 824 825 /* exec unshares first */ 826 spin_lock(&files->file_lock); 827 for (i = 0; ; i++) { 828 unsigned long set; 829 unsigned fd = i * BITS_PER_LONG; 830 fdt = files_fdtable(files); 831 if (fd >= fdt->max_fds) 832 break; 833 set = fdt->close_on_exec[i]; 834 if (!set) 835 continue; 836 fdt->close_on_exec[i] = 0; 837 for ( ; set ; fd++, set >>= 1) { 838 struct file *file; 839 if (!(set & 1)) 840 continue; 841 file = fdt->fd[fd]; 842 if (!file) 843 continue; 844 rcu_assign_pointer(fdt->fd[fd], NULL); 845 __put_unused_fd(files, fd); 846 spin_unlock(&files->file_lock); 847 filp_close(file, files); 848 cond_resched(); 849 spin_lock(&files->file_lock); 850 } 851 852 } 853 spin_unlock(&files->file_lock); 854 } 855 856 static inline struct file *__fget_files_rcu(struct files_struct *files, 857 unsigned int fd, fmode_t mask) 858 { 859 for (;;) { 860 struct file *file; 861 struct fdtable *fdt = rcu_dereference_raw(files->fdt); 862 struct file __rcu **fdentry; 863 864 if (unlikely(fd >= fdt->max_fds)) 865 return NULL; 866 867 fdentry = fdt->fd + array_index_nospec(fd, fdt->max_fds); 868 file = rcu_dereference_raw(*fdentry); 869 if (unlikely(!file)) 870 return NULL; 871 872 if (unlikely(file->f_mode & mask)) 873 return NULL; 874 875 /* 876 * Ok, we have a file pointer. However, because we do 877 * this all locklessly under RCU, we may be racing with 878 * that file being closed. 879 * 880 * Such a race can take two forms: 881 * 882 * (a) the file ref already went down to zero, 883 * and get_file_rcu() fails. Just try again: 884 */ 885 if (unlikely(!get_file_rcu(file))) 886 continue; 887 888 /* 889 * (b) the file table entry has changed under us. 890 * Note that we don't need to re-check the 'fdt->fd' 891 * pointer having changed, because it always goes 892 * hand-in-hand with 'fdt'. 893 * 894 * If so, we need to put our ref and try again. 895 */ 896 if (unlikely(rcu_dereference_raw(files->fdt) != fdt) || 897 unlikely(rcu_dereference_raw(*fdentry) != file)) { 898 fput(file); 899 continue; 900 } 901 902 /* 903 * Ok, we have a ref to the file, and checked that it 904 * still exists. 905 */ 906 return file; 907 } 908 } 909 910 static struct file *__fget_files(struct files_struct *files, unsigned int fd, 911 fmode_t mask) 912 { 913 struct file *file; 914 915 rcu_read_lock(); 916 file = __fget_files_rcu(files, fd, mask); 917 rcu_read_unlock(); 918 919 return file; 920 } 921 922 static inline struct file *__fget(unsigned int fd, fmode_t mask) 923 { 924 return __fget_files(current->files, fd, mask); 925 } 926 927 struct file *fget(unsigned int fd) 928 { 929 return __fget(fd, FMODE_PATH); 930 } 931 EXPORT_SYMBOL(fget); 932 933 struct file *fget_raw(unsigned int fd) 934 { 935 return __fget(fd, 0); 936 } 937 EXPORT_SYMBOL(fget_raw); 938 939 struct file *fget_task(struct task_struct *task, unsigned int fd) 940 { 941 struct file *file = NULL; 942 943 task_lock(task); 944 if (task->files) 945 file = __fget_files(task->files, fd, 0); 946 task_unlock(task); 947 948 return file; 949 } 950 951 struct file *task_lookup_fd_rcu(struct task_struct *task, unsigned int fd) 952 { 953 /* Must be called with rcu_read_lock held */ 954 struct files_struct *files; 955 struct file *file = NULL; 956 957 task_lock(task); 958 files = task->files; 959 if (files) 960 file = files_lookup_fd_rcu(files, fd); 961 task_unlock(task); 962 963 return file; 964 } 965 966 struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *ret_fd) 967 { 968 /* Must be called with rcu_read_lock held */ 969 struct files_struct *files; 970 unsigned int fd = *ret_fd; 971 struct file *file = NULL; 972 973 task_lock(task); 974 files = task->files; 975 if (files) { 976 for (; fd < files_fdtable(files)->max_fds; fd++) { 977 file = files_lookup_fd_rcu(files, fd); 978 if (file) 979 break; 980 } 981 } 982 task_unlock(task); 983 *ret_fd = fd; 984 return file; 985 } 986 EXPORT_SYMBOL(task_lookup_next_fd_rcu); 987 988 /* 989 * Lightweight file lookup - no refcnt increment if fd table isn't shared. 990 * 991 * You can use this instead of fget if you satisfy all of the following 992 * conditions: 993 * 1) You must call fput_light before exiting the syscall and returning control 994 * to userspace (i.e. you cannot remember the returned struct file * after 995 * returning to userspace). 996 * 2) You must not call filp_close on the returned struct file * in between 997 * calls to fget_light and fput_light. 998 * 3) You must not clone the current task in between the calls to fget_light 999 * and fput_light. 1000 * 1001 * The fput_needed flag returned by fget_light should be passed to the 1002 * corresponding fput_light. 1003 */ 1004 static unsigned long __fget_light(unsigned int fd, fmode_t mask) 1005 { 1006 struct files_struct *files = current->files; 1007 struct file *file; 1008 1009 /* 1010 * If another thread is concurrently calling close_fd() followed 1011 * by put_files_struct(), we must not observe the old table 1012 * entry combined with the new refcount - otherwise we could 1013 * return a file that is concurrently being freed. 1014 * 1015 * atomic_read_acquire() pairs with atomic_dec_and_test() in 1016 * put_files_struct(). 1017 */ 1018 if (atomic_read_acquire(&files->count) == 1) { 1019 file = files_lookup_fd_raw(files, fd); 1020 if (!file || unlikely(file->f_mode & mask)) 1021 return 0; 1022 return (unsigned long)file; 1023 } else { 1024 file = __fget(fd, mask); 1025 if (!file) 1026 return 0; 1027 return FDPUT_FPUT | (unsigned long)file; 1028 } 1029 } 1030 unsigned long __fdget(unsigned int fd) 1031 { 1032 return __fget_light(fd, FMODE_PATH); 1033 } 1034 EXPORT_SYMBOL(__fdget); 1035 1036 unsigned long __fdget_raw(unsigned int fd) 1037 { 1038 return __fget_light(fd, 0); 1039 } 1040 1041 /* 1042 * Try to avoid f_pos locking. We only need it if the 1043 * file is marked for FMODE_ATOMIC_POS, and it can be 1044 * accessed multiple ways. 1045 * 1046 * Always do it for directories, because pidfd_getfd() 1047 * can make a file accessible even if it otherwise would 1048 * not be, and for directories this is a correctness 1049 * issue, not a "POSIX requirement". 1050 */ 1051 static inline bool file_needs_f_pos_lock(struct file *file) 1052 { 1053 return (file->f_mode & FMODE_ATOMIC_POS) && 1054 (file_count(file) > 1 || file->f_op->iterate_shared); 1055 } 1056 1057 unsigned long __fdget_pos(unsigned int fd) 1058 { 1059 unsigned long v = __fdget(fd); 1060 struct file *file = (struct file *)(v & ~3); 1061 1062 if (file && file_needs_f_pos_lock(file)) { 1063 v |= FDPUT_POS_UNLOCK; 1064 mutex_lock(&file->f_pos_lock); 1065 } 1066 return v; 1067 } 1068 1069 void __f_unlock_pos(struct file *f) 1070 { 1071 mutex_unlock(&f->f_pos_lock); 1072 } 1073 1074 /* 1075 * We only lock f_pos if we have threads or if the file might be 1076 * shared with another process. In both cases we'll have an elevated 1077 * file count (done either by fdget() or by fork()). 1078 */ 1079 1080 void set_close_on_exec(unsigned int fd, int flag) 1081 { 1082 struct files_struct *files = current->files; 1083 struct fdtable *fdt; 1084 spin_lock(&files->file_lock); 1085 fdt = files_fdtable(files); 1086 if (flag) 1087 __set_close_on_exec(fd, fdt); 1088 else 1089 __clear_close_on_exec(fd, fdt); 1090 spin_unlock(&files->file_lock); 1091 } 1092 1093 bool get_close_on_exec(unsigned int fd) 1094 { 1095 struct files_struct *files = current->files; 1096 struct fdtable *fdt; 1097 bool res; 1098 rcu_read_lock(); 1099 fdt = files_fdtable(files); 1100 res = close_on_exec(fd, fdt); 1101 rcu_read_unlock(); 1102 return res; 1103 } 1104 1105 static int do_dup2(struct files_struct *files, 1106 struct file *file, unsigned fd, unsigned flags) 1107 __releases(&files->file_lock) 1108 { 1109 struct file *tofree; 1110 struct fdtable *fdt; 1111 1112 /* 1113 * We need to detect attempts to do dup2() over allocated but still 1114 * not finished descriptor. NB: OpenBSD avoids that at the price of 1115 * extra work in their equivalent of fget() - they insert struct 1116 * file immediately after grabbing descriptor, mark it larval if 1117 * more work (e.g. actual opening) is needed and make sure that 1118 * fget() treats larval files as absent. Potentially interesting, 1119 * but while extra work in fget() is trivial, locking implications 1120 * and amount of surgery on open()-related paths in VFS are not. 1121 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution" 1122 * deadlocks in rather amusing ways, AFAICS. All of that is out of 1123 * scope of POSIX or SUS, since neither considers shared descriptor 1124 * tables and this condition does not arise without those. 1125 */ 1126 fdt = files_fdtable(files); 1127 fd = array_index_nospec(fd, fdt->max_fds); 1128 tofree = fdt->fd[fd]; 1129 if (!tofree && fd_is_open(fd, fdt)) 1130 goto Ebusy; 1131 get_file(file); 1132 rcu_assign_pointer(fdt->fd[fd], file); 1133 __set_open_fd(fd, fdt); 1134 if (flags & O_CLOEXEC) 1135 __set_close_on_exec(fd, fdt); 1136 else 1137 __clear_close_on_exec(fd, fdt); 1138 spin_unlock(&files->file_lock); 1139 1140 if (tofree) 1141 filp_close(tofree, files); 1142 1143 return fd; 1144 1145 Ebusy: 1146 spin_unlock(&files->file_lock); 1147 return -EBUSY; 1148 } 1149 1150 int replace_fd(unsigned fd, struct file *file, unsigned flags) 1151 { 1152 int err; 1153 struct files_struct *files = current->files; 1154 1155 if (!file) 1156 return close_fd(fd); 1157 1158 if (fd >= rlimit(RLIMIT_NOFILE)) 1159 return -EBADF; 1160 1161 spin_lock(&files->file_lock); 1162 err = expand_files(files, fd); 1163 if (unlikely(err < 0)) 1164 goto out_unlock; 1165 return do_dup2(files, file, fd, flags); 1166 1167 out_unlock: 1168 spin_unlock(&files->file_lock); 1169 return err; 1170 } 1171 1172 /** 1173 * __receive_fd() - Install received file into file descriptor table 1174 * @file: struct file that was received from another process 1175 * @ufd: __user pointer to write new fd number to 1176 * @o_flags: the O_* flags to apply to the new fd entry 1177 * 1178 * Installs a received file into the file descriptor table, with appropriate 1179 * checks and count updates. Optionally writes the fd number to userspace, if 1180 * @ufd is non-NULL. 1181 * 1182 * This helper handles its own reference counting of the incoming 1183 * struct file. 1184 * 1185 * Returns newly install fd or -ve on error. 1186 */ 1187 int __receive_fd(struct file *file, int __user *ufd, unsigned int o_flags) 1188 { 1189 int new_fd; 1190 int error; 1191 1192 error = security_file_receive(file); 1193 if (error) 1194 return error; 1195 1196 new_fd = get_unused_fd_flags(o_flags); 1197 if (new_fd < 0) 1198 return new_fd; 1199 1200 if (ufd) { 1201 error = put_user(new_fd, ufd); 1202 if (error) { 1203 put_unused_fd(new_fd); 1204 return error; 1205 } 1206 } 1207 1208 fd_install(new_fd, get_file(file)); 1209 __receive_sock(file); 1210 return new_fd; 1211 } 1212 1213 int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags) 1214 { 1215 int error; 1216 1217 error = security_file_receive(file); 1218 if (error) 1219 return error; 1220 error = replace_fd(new_fd, file, o_flags); 1221 if (error) 1222 return error; 1223 __receive_sock(file); 1224 return new_fd; 1225 } 1226 1227 int receive_fd(struct file *file, unsigned int o_flags) 1228 { 1229 return __receive_fd(file, NULL, o_flags); 1230 } 1231 EXPORT_SYMBOL_GPL(receive_fd); 1232 1233 static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags) 1234 { 1235 int err = -EBADF; 1236 struct file *file; 1237 struct files_struct *files = current->files; 1238 1239 if ((flags & ~O_CLOEXEC) != 0) 1240 return -EINVAL; 1241 1242 if (unlikely(oldfd == newfd)) 1243 return -EINVAL; 1244 1245 if (newfd >= rlimit(RLIMIT_NOFILE)) 1246 return -EBADF; 1247 1248 spin_lock(&files->file_lock); 1249 err = expand_files(files, newfd); 1250 file = files_lookup_fd_locked(files, oldfd); 1251 if (unlikely(!file)) 1252 goto Ebadf; 1253 if (unlikely(err < 0)) { 1254 if (err == -EMFILE) 1255 goto Ebadf; 1256 goto out_unlock; 1257 } 1258 return do_dup2(files, file, newfd, flags); 1259 1260 Ebadf: 1261 err = -EBADF; 1262 out_unlock: 1263 spin_unlock(&files->file_lock); 1264 return err; 1265 } 1266 1267 SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags) 1268 { 1269 return ksys_dup3(oldfd, newfd, flags); 1270 } 1271 1272 SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd) 1273 { 1274 if (unlikely(newfd == oldfd)) { /* corner case */ 1275 struct files_struct *files = current->files; 1276 int retval = oldfd; 1277 1278 rcu_read_lock(); 1279 if (!files_lookup_fd_rcu(files, oldfd)) 1280 retval = -EBADF; 1281 rcu_read_unlock(); 1282 return retval; 1283 } 1284 return ksys_dup3(oldfd, newfd, 0); 1285 } 1286 1287 SYSCALL_DEFINE1(dup, unsigned int, fildes) 1288 { 1289 int ret = -EBADF; 1290 struct file *file = fget_raw(fildes); 1291 1292 if (file) { 1293 ret = get_unused_fd_flags(0); 1294 if (ret >= 0) 1295 fd_install(ret, file); 1296 else 1297 fput(file); 1298 } 1299 return ret; 1300 } 1301 1302 int f_dupfd(unsigned int from, struct file *file, unsigned flags) 1303 { 1304 unsigned long nofile = rlimit(RLIMIT_NOFILE); 1305 int err; 1306 if (from >= nofile) 1307 return -EINVAL; 1308 err = alloc_fd(from, nofile, flags); 1309 if (err >= 0) { 1310 get_file(file); 1311 fd_install(err, file); 1312 } 1313 return err; 1314 } 1315 1316 int iterate_fd(struct files_struct *files, unsigned n, 1317 int (*f)(const void *, struct file *, unsigned), 1318 const void *p) 1319 { 1320 struct fdtable *fdt; 1321 int res = 0; 1322 if (!files) 1323 return 0; 1324 spin_lock(&files->file_lock); 1325 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) { 1326 struct file *file; 1327 file = rcu_dereference_check_fdtable(files, fdt->fd[n]); 1328 if (!file) 1329 continue; 1330 res = f(p, file, n); 1331 if (res) 1332 break; 1333 } 1334 spin_unlock(&files->file_lock); 1335 return res; 1336 } 1337 EXPORT_SYMBOL(iterate_fd); 1338