1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/file.c 4 * 5 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes 6 * 7 * Manage the dynamic fd arrays in the process files_struct. 8 */ 9 10 #include <linux/syscalls.h> 11 #include <linux/export.h> 12 #include <linux/fs.h> 13 #include <linux/kernel.h> 14 #include <linux/mm.h> 15 #include <linux/sched/signal.h> 16 #include <linux/slab.h> 17 #include <linux/file.h> 18 #include <linux/fdtable.h> 19 #include <linux/bitops.h> 20 #include <linux/spinlock.h> 21 #include <linux/rcupdate.h> 22 #include <linux/close_range.h> 23 #include <net/sock.h> 24 25 #include "internal.h" 26 27 unsigned int sysctl_nr_open __read_mostly = 1024*1024; 28 unsigned int sysctl_nr_open_min = BITS_PER_LONG; 29 /* our min() is unusable in constant expressions ;-/ */ 30 #define __const_min(x, y) ((x) < (y) ? (x) : (y)) 31 unsigned int sysctl_nr_open_max = 32 __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG; 33 34 static void __free_fdtable(struct fdtable *fdt) 35 { 36 kvfree(fdt->fd); 37 kvfree(fdt->open_fds); 38 kfree(fdt); 39 } 40 41 static void free_fdtable_rcu(struct rcu_head *rcu) 42 { 43 __free_fdtable(container_of(rcu, struct fdtable, rcu)); 44 } 45 46 #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr)) 47 #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long)) 48 49 /* 50 * Copy 'count' fd bits from the old table to the new table and clear the extra 51 * space if any. This does not copy the file pointers. Called with the files 52 * spinlock held for write. 53 */ 54 static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt, 55 unsigned int count) 56 { 57 unsigned int cpy, set; 58 59 cpy = count / BITS_PER_BYTE; 60 set = (nfdt->max_fds - count) / BITS_PER_BYTE; 61 memcpy(nfdt->open_fds, ofdt->open_fds, cpy); 62 memset((char *)nfdt->open_fds + cpy, 0, set); 63 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy); 64 memset((char *)nfdt->close_on_exec + cpy, 0, set); 65 66 cpy = BITBIT_SIZE(count); 67 set = BITBIT_SIZE(nfdt->max_fds) - cpy; 68 memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy); 69 memset((char *)nfdt->full_fds_bits + cpy, 0, set); 70 } 71 72 /* 73 * Copy all file descriptors from the old table to the new, expanded table and 74 * clear the extra space. Called with the files spinlock held for write. 75 */ 76 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) 77 { 78 size_t cpy, set; 79 80 BUG_ON(nfdt->max_fds < ofdt->max_fds); 81 82 cpy = ofdt->max_fds * sizeof(struct file *); 83 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *); 84 memcpy(nfdt->fd, ofdt->fd, cpy); 85 memset((char *)nfdt->fd + cpy, 0, set); 86 87 copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds); 88 } 89 90 /* 91 * Note how the fdtable bitmap allocations very much have to be a multiple of 92 * BITS_PER_LONG. This is not only because we walk those things in chunks of 93 * 'unsigned long' in some places, but simply because that is how the Linux 94 * kernel bitmaps are defined to work: they are not "bits in an array of bytes", 95 * they are very much "bits in an array of unsigned long". 96 * 97 * The ALIGN(nr, BITS_PER_LONG) here is for clarity: since we just multiplied 98 * by that "1024/sizeof(ptr)" before, we already know there are sufficient 99 * clear low bits. Clang seems to realize that, gcc ends up being confused. 100 * 101 * On a 128-bit machine, the ALIGN() would actually matter. In the meantime, 102 * let's consider it documentation (and maybe a test-case for gcc to improve 103 * its code generation ;) 104 */ 105 static struct fdtable * alloc_fdtable(unsigned int nr) 106 { 107 struct fdtable *fdt; 108 void *data; 109 110 /* 111 * Figure out how many fds we actually want to support in this fdtable. 112 * Allocation steps are keyed to the size of the fdarray, since it 113 * grows far faster than any of the other dynamic data. We try to fit 114 * the fdarray into comfortable page-tuned chunks: starting at 1024B 115 * and growing in powers of two from there on. 116 */ 117 nr /= (1024 / sizeof(struct file *)); 118 nr = roundup_pow_of_two(nr + 1); 119 nr *= (1024 / sizeof(struct file *)); 120 nr = ALIGN(nr, BITS_PER_LONG); 121 /* 122 * Note that this can drive nr *below* what we had passed if sysctl_nr_open 123 * had been set lower between the check in expand_files() and here. Deal 124 * with that in caller, it's cheaper that way. 125 * 126 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise 127 * bitmaps handling below becomes unpleasant, to put it mildly... 128 */ 129 if (unlikely(nr > sysctl_nr_open)) 130 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1; 131 132 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT); 133 if (!fdt) 134 goto out; 135 fdt->max_fds = nr; 136 data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT); 137 if (!data) 138 goto out_fdt; 139 fdt->fd = data; 140 141 data = kvmalloc(max_t(size_t, 142 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES), 143 GFP_KERNEL_ACCOUNT); 144 if (!data) 145 goto out_arr; 146 fdt->open_fds = data; 147 data += nr / BITS_PER_BYTE; 148 fdt->close_on_exec = data; 149 data += nr / BITS_PER_BYTE; 150 fdt->full_fds_bits = data; 151 152 return fdt; 153 154 out_arr: 155 kvfree(fdt->fd); 156 out_fdt: 157 kfree(fdt); 158 out: 159 return NULL; 160 } 161 162 /* 163 * Expand the file descriptor table. 164 * This function will allocate a new fdtable and both fd array and fdset, of 165 * the given size. 166 * Return <0 error code on error; 1 on successful completion. 167 * The files->file_lock should be held on entry, and will be held on exit. 168 */ 169 static int expand_fdtable(struct files_struct *files, unsigned int nr) 170 __releases(files->file_lock) 171 __acquires(files->file_lock) 172 { 173 struct fdtable *new_fdt, *cur_fdt; 174 175 spin_unlock(&files->file_lock); 176 new_fdt = alloc_fdtable(nr); 177 178 /* make sure all fd_install() have seen resize_in_progress 179 * or have finished their rcu_read_lock_sched() section. 180 */ 181 if (atomic_read(&files->count) > 1) 182 synchronize_rcu(); 183 184 spin_lock(&files->file_lock); 185 if (!new_fdt) 186 return -ENOMEM; 187 /* 188 * extremely unlikely race - sysctl_nr_open decreased between the check in 189 * caller and alloc_fdtable(). Cheaper to catch it here... 190 */ 191 if (unlikely(new_fdt->max_fds <= nr)) { 192 __free_fdtable(new_fdt); 193 return -EMFILE; 194 } 195 cur_fdt = files_fdtable(files); 196 BUG_ON(nr < cur_fdt->max_fds); 197 copy_fdtable(new_fdt, cur_fdt); 198 rcu_assign_pointer(files->fdt, new_fdt); 199 if (cur_fdt != &files->fdtab) 200 call_rcu(&cur_fdt->rcu, free_fdtable_rcu); 201 /* coupled with smp_rmb() in fd_install() */ 202 smp_wmb(); 203 return 1; 204 } 205 206 /* 207 * Expand files. 208 * This function will expand the file structures, if the requested size exceeds 209 * the current capacity and there is room for expansion. 210 * Return <0 error code on error; 0 when nothing done; 1 when files were 211 * expanded and execution may have blocked. 212 * The files->file_lock should be held on entry, and will be held on exit. 213 */ 214 static int expand_files(struct files_struct *files, unsigned int nr) 215 __releases(files->file_lock) 216 __acquires(files->file_lock) 217 { 218 struct fdtable *fdt; 219 int expanded = 0; 220 221 repeat: 222 fdt = files_fdtable(files); 223 224 /* Do we need to expand? */ 225 if (nr < fdt->max_fds) 226 return expanded; 227 228 /* Can we expand? */ 229 if (nr >= sysctl_nr_open) 230 return -EMFILE; 231 232 if (unlikely(files->resize_in_progress)) { 233 spin_unlock(&files->file_lock); 234 expanded = 1; 235 wait_event(files->resize_wait, !files->resize_in_progress); 236 spin_lock(&files->file_lock); 237 goto repeat; 238 } 239 240 /* All good, so we try */ 241 files->resize_in_progress = true; 242 expanded = expand_fdtable(files, nr); 243 files->resize_in_progress = false; 244 245 wake_up_all(&files->resize_wait); 246 return expanded; 247 } 248 249 static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt) 250 { 251 __set_bit(fd, fdt->close_on_exec); 252 } 253 254 static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt) 255 { 256 if (test_bit(fd, fdt->close_on_exec)) 257 __clear_bit(fd, fdt->close_on_exec); 258 } 259 260 static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt) 261 { 262 __set_bit(fd, fdt->open_fds); 263 fd /= BITS_PER_LONG; 264 if (!~fdt->open_fds[fd]) 265 __set_bit(fd, fdt->full_fds_bits); 266 } 267 268 static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt) 269 { 270 __clear_bit(fd, fdt->open_fds); 271 __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits); 272 } 273 274 static unsigned int count_open_files(struct fdtable *fdt) 275 { 276 unsigned int size = fdt->max_fds; 277 unsigned int i; 278 279 /* Find the last open fd */ 280 for (i = size / BITS_PER_LONG; i > 0; ) { 281 if (fdt->open_fds[--i]) 282 break; 283 } 284 i = (i + 1) * BITS_PER_LONG; 285 return i; 286 } 287 288 /* 289 * Note that a sane fdtable size always has to be a multiple of 290 * BITS_PER_LONG, since we have bitmaps that are sized by this. 291 * 292 * 'max_fds' will normally already be properly aligned, but it 293 * turns out that in the close_range() -> __close_range() -> 294 * unshare_fd() -> dup_fd() -> sane_fdtable_size() we can end 295 * up having a 'max_fds' value that isn't already aligned. 296 * 297 * Rather than make close_range() have to worry about this, 298 * just make that BITS_PER_LONG alignment be part of a sane 299 * fdtable size. Becuase that's really what it is. 300 */ 301 static unsigned int sane_fdtable_size(struct fdtable *fdt, unsigned int max_fds) 302 { 303 unsigned int count; 304 305 count = count_open_files(fdt); 306 if (max_fds < NR_OPEN_DEFAULT) 307 max_fds = NR_OPEN_DEFAULT; 308 return ALIGN(min(count, max_fds), BITS_PER_LONG); 309 } 310 311 /* 312 * Allocate a new files structure and copy contents from the 313 * passed in files structure. 314 * errorp will be valid only when the returned files_struct is NULL. 315 */ 316 struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int *errorp) 317 { 318 struct files_struct *newf; 319 struct file **old_fds, **new_fds; 320 unsigned int open_files, i; 321 struct fdtable *old_fdt, *new_fdt; 322 323 *errorp = -ENOMEM; 324 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL); 325 if (!newf) 326 goto out; 327 328 atomic_set(&newf->count, 1); 329 330 spin_lock_init(&newf->file_lock); 331 newf->resize_in_progress = false; 332 init_waitqueue_head(&newf->resize_wait); 333 newf->next_fd = 0; 334 new_fdt = &newf->fdtab; 335 new_fdt->max_fds = NR_OPEN_DEFAULT; 336 new_fdt->close_on_exec = newf->close_on_exec_init; 337 new_fdt->open_fds = newf->open_fds_init; 338 new_fdt->full_fds_bits = newf->full_fds_bits_init; 339 new_fdt->fd = &newf->fd_array[0]; 340 341 spin_lock(&oldf->file_lock); 342 old_fdt = files_fdtable(oldf); 343 open_files = sane_fdtable_size(old_fdt, max_fds); 344 345 /* 346 * Check whether we need to allocate a larger fd array and fd set. 347 */ 348 while (unlikely(open_files > new_fdt->max_fds)) { 349 spin_unlock(&oldf->file_lock); 350 351 if (new_fdt != &newf->fdtab) 352 __free_fdtable(new_fdt); 353 354 new_fdt = alloc_fdtable(open_files - 1); 355 if (!new_fdt) { 356 *errorp = -ENOMEM; 357 goto out_release; 358 } 359 360 /* beyond sysctl_nr_open; nothing to do */ 361 if (unlikely(new_fdt->max_fds < open_files)) { 362 __free_fdtable(new_fdt); 363 *errorp = -EMFILE; 364 goto out_release; 365 } 366 367 /* 368 * Reacquire the oldf lock and a pointer to its fd table 369 * who knows it may have a new bigger fd table. We need 370 * the latest pointer. 371 */ 372 spin_lock(&oldf->file_lock); 373 old_fdt = files_fdtable(oldf); 374 open_files = sane_fdtable_size(old_fdt, max_fds); 375 } 376 377 copy_fd_bitmaps(new_fdt, old_fdt, open_files); 378 379 old_fds = old_fdt->fd; 380 new_fds = new_fdt->fd; 381 382 for (i = open_files; i != 0; i--) { 383 struct file *f = *old_fds++; 384 if (f) { 385 get_file(f); 386 } else { 387 /* 388 * The fd may be claimed in the fd bitmap but not yet 389 * instantiated in the files array if a sibling thread 390 * is partway through open(). So make sure that this 391 * fd is available to the new process. 392 */ 393 __clear_open_fd(open_files - i, new_fdt); 394 } 395 rcu_assign_pointer(*new_fds++, f); 396 } 397 spin_unlock(&oldf->file_lock); 398 399 /* clear the remainder */ 400 memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *)); 401 402 rcu_assign_pointer(newf->fdt, new_fdt); 403 404 return newf; 405 406 out_release: 407 kmem_cache_free(files_cachep, newf); 408 out: 409 return NULL; 410 } 411 412 static struct fdtable *close_files(struct files_struct * files) 413 { 414 /* 415 * It is safe to dereference the fd table without RCU or 416 * ->file_lock because this is the last reference to the 417 * files structure. 418 */ 419 struct fdtable *fdt = rcu_dereference_raw(files->fdt); 420 unsigned int i, j = 0; 421 422 for (;;) { 423 unsigned long set; 424 i = j * BITS_PER_LONG; 425 if (i >= fdt->max_fds) 426 break; 427 set = fdt->open_fds[j++]; 428 while (set) { 429 if (set & 1) { 430 struct file * file = xchg(&fdt->fd[i], NULL); 431 if (file) { 432 filp_close(file, files); 433 cond_resched(); 434 } 435 } 436 i++; 437 set >>= 1; 438 } 439 } 440 441 return fdt; 442 } 443 444 void put_files_struct(struct files_struct *files) 445 { 446 if (atomic_dec_and_test(&files->count)) { 447 struct fdtable *fdt = close_files(files); 448 449 /* free the arrays if they are not embedded */ 450 if (fdt != &files->fdtab) 451 __free_fdtable(fdt); 452 kmem_cache_free(files_cachep, files); 453 } 454 } 455 456 void exit_files(struct task_struct *tsk) 457 { 458 struct files_struct * files = tsk->files; 459 460 if (files) { 461 task_lock(tsk); 462 tsk->files = NULL; 463 task_unlock(tsk); 464 put_files_struct(files); 465 } 466 } 467 468 struct files_struct init_files = { 469 .count = ATOMIC_INIT(1), 470 .fdt = &init_files.fdtab, 471 .fdtab = { 472 .max_fds = NR_OPEN_DEFAULT, 473 .fd = &init_files.fd_array[0], 474 .close_on_exec = init_files.close_on_exec_init, 475 .open_fds = init_files.open_fds_init, 476 .full_fds_bits = init_files.full_fds_bits_init, 477 }, 478 .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock), 479 .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait), 480 }; 481 482 static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start) 483 { 484 unsigned int maxfd = fdt->max_fds; 485 unsigned int maxbit = maxfd / BITS_PER_LONG; 486 unsigned int bitbit = start / BITS_PER_LONG; 487 488 bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG; 489 if (bitbit > maxfd) 490 return maxfd; 491 if (bitbit > start) 492 start = bitbit; 493 return find_next_zero_bit(fdt->open_fds, maxfd, start); 494 } 495 496 /* 497 * allocate a file descriptor, mark it busy. 498 */ 499 static int alloc_fd(unsigned start, unsigned end, unsigned flags) 500 { 501 struct files_struct *files = current->files; 502 unsigned int fd; 503 int error; 504 struct fdtable *fdt; 505 506 spin_lock(&files->file_lock); 507 repeat: 508 fdt = files_fdtable(files); 509 fd = start; 510 if (fd < files->next_fd) 511 fd = files->next_fd; 512 513 if (fd < fdt->max_fds) 514 fd = find_next_fd(fdt, fd); 515 516 /* 517 * N.B. For clone tasks sharing a files structure, this test 518 * will limit the total number of files that can be opened. 519 */ 520 error = -EMFILE; 521 if (fd >= end) 522 goto out; 523 524 error = expand_files(files, fd); 525 if (error < 0) 526 goto out; 527 528 /* 529 * If we needed to expand the fs array we 530 * might have blocked - try again. 531 */ 532 if (error) 533 goto repeat; 534 535 if (start <= files->next_fd) 536 files->next_fd = fd + 1; 537 538 __set_open_fd(fd, fdt); 539 if (flags & O_CLOEXEC) 540 __set_close_on_exec(fd, fdt); 541 else 542 __clear_close_on_exec(fd, fdt); 543 error = fd; 544 #if 1 545 /* Sanity check */ 546 if (rcu_access_pointer(fdt->fd[fd]) != NULL) { 547 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd); 548 rcu_assign_pointer(fdt->fd[fd], NULL); 549 } 550 #endif 551 552 out: 553 spin_unlock(&files->file_lock); 554 return error; 555 } 556 557 int __get_unused_fd_flags(unsigned flags, unsigned long nofile) 558 { 559 return alloc_fd(0, nofile, flags); 560 } 561 562 int get_unused_fd_flags(unsigned flags) 563 { 564 return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE)); 565 } 566 EXPORT_SYMBOL(get_unused_fd_flags); 567 568 static void __put_unused_fd(struct files_struct *files, unsigned int fd) 569 { 570 struct fdtable *fdt = files_fdtable(files); 571 __clear_open_fd(fd, fdt); 572 if (fd < files->next_fd) 573 files->next_fd = fd; 574 } 575 576 void put_unused_fd(unsigned int fd) 577 { 578 struct files_struct *files = current->files; 579 spin_lock(&files->file_lock); 580 __put_unused_fd(files, fd); 581 spin_unlock(&files->file_lock); 582 } 583 584 EXPORT_SYMBOL(put_unused_fd); 585 586 /* 587 * Install a file pointer in the fd array. 588 * 589 * The VFS is full of places where we drop the files lock between 590 * setting the open_fds bitmap and installing the file in the file 591 * array. At any such point, we are vulnerable to a dup2() race 592 * installing a file in the array before us. We need to detect this and 593 * fput() the struct file we are about to overwrite in this case. 594 * 595 * It should never happen - if we allow dup2() do it, _really_ bad things 596 * will follow. 597 * 598 * This consumes the "file" refcount, so callers should treat it 599 * as if they had called fput(file). 600 */ 601 602 void fd_install(unsigned int fd, struct file *file) 603 { 604 struct files_struct *files = current->files; 605 struct fdtable *fdt; 606 607 rcu_read_lock_sched(); 608 609 if (unlikely(files->resize_in_progress)) { 610 rcu_read_unlock_sched(); 611 spin_lock(&files->file_lock); 612 fdt = files_fdtable(files); 613 BUG_ON(fdt->fd[fd] != NULL); 614 rcu_assign_pointer(fdt->fd[fd], file); 615 spin_unlock(&files->file_lock); 616 return; 617 } 618 /* coupled with smp_wmb() in expand_fdtable() */ 619 smp_rmb(); 620 fdt = rcu_dereference_sched(files->fdt); 621 BUG_ON(fdt->fd[fd] != NULL); 622 rcu_assign_pointer(fdt->fd[fd], file); 623 rcu_read_unlock_sched(); 624 } 625 626 EXPORT_SYMBOL(fd_install); 627 628 /** 629 * pick_file - return file associatd with fd 630 * @files: file struct to retrieve file from 631 * @fd: file descriptor to retrieve file for 632 * 633 * Context: files_lock must be held. 634 * 635 * Returns: The file associated with @fd (NULL if @fd is not open) 636 */ 637 static struct file *pick_file(struct files_struct *files, unsigned fd) 638 { 639 struct fdtable *fdt = files_fdtable(files); 640 struct file *file; 641 642 if (fd >= fdt->max_fds) 643 return NULL; 644 645 fd = array_index_nospec(fd, fdt->max_fds); 646 file = fdt->fd[fd]; 647 if (file) { 648 rcu_assign_pointer(fdt->fd[fd], NULL); 649 __put_unused_fd(files, fd); 650 } 651 return file; 652 } 653 654 int close_fd(unsigned fd) 655 { 656 struct files_struct *files = current->files; 657 struct file *file; 658 659 spin_lock(&files->file_lock); 660 file = pick_file(files, fd); 661 spin_unlock(&files->file_lock); 662 if (!file) 663 return -EBADF; 664 665 return filp_close(file, files); 666 } 667 EXPORT_SYMBOL(close_fd); /* for ksys_close() */ 668 669 /** 670 * last_fd - return last valid index into fd table 671 * @cur_fds: files struct 672 * 673 * Context: Either rcu read lock or files_lock must be held. 674 * 675 * Returns: Last valid index into fdtable. 676 */ 677 static inline unsigned last_fd(struct fdtable *fdt) 678 { 679 return fdt->max_fds - 1; 680 } 681 682 static inline void __range_cloexec(struct files_struct *cur_fds, 683 unsigned int fd, unsigned int max_fd) 684 { 685 struct fdtable *fdt; 686 687 /* make sure we're using the correct maximum value */ 688 spin_lock(&cur_fds->file_lock); 689 fdt = files_fdtable(cur_fds); 690 max_fd = min(last_fd(fdt), max_fd); 691 if (fd <= max_fd) 692 bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1); 693 spin_unlock(&cur_fds->file_lock); 694 } 695 696 static inline void __range_close(struct files_struct *cur_fds, unsigned int fd, 697 unsigned int max_fd) 698 { 699 unsigned n; 700 701 rcu_read_lock(); 702 n = last_fd(files_fdtable(cur_fds)); 703 rcu_read_unlock(); 704 max_fd = min(max_fd, n); 705 706 while (fd <= max_fd) { 707 struct file *file; 708 709 spin_lock(&cur_fds->file_lock); 710 file = pick_file(cur_fds, fd++); 711 spin_unlock(&cur_fds->file_lock); 712 713 if (file) { 714 /* found a valid file to close */ 715 filp_close(file, cur_fds); 716 cond_resched(); 717 } 718 } 719 } 720 721 /** 722 * __close_range() - Close all file descriptors in a given range. 723 * 724 * @fd: starting file descriptor to close 725 * @max_fd: last file descriptor to close 726 * 727 * This closes a range of file descriptors. All file descriptors 728 * from @fd up to and including @max_fd are closed. 729 */ 730 int __close_range(unsigned fd, unsigned max_fd, unsigned int flags) 731 { 732 struct task_struct *me = current; 733 struct files_struct *cur_fds = me->files, *fds = NULL; 734 735 if (flags & ~(CLOSE_RANGE_UNSHARE | CLOSE_RANGE_CLOEXEC)) 736 return -EINVAL; 737 738 if (fd > max_fd) 739 return -EINVAL; 740 741 if (flags & CLOSE_RANGE_UNSHARE) { 742 int ret; 743 unsigned int max_unshare_fds = NR_OPEN_MAX; 744 745 /* 746 * If the caller requested all fds to be made cloexec we always 747 * copy all of the file descriptors since they still want to 748 * use them. 749 */ 750 if (!(flags & CLOSE_RANGE_CLOEXEC)) { 751 /* 752 * If the requested range is greater than the current 753 * maximum, we're closing everything so only copy all 754 * file descriptors beneath the lowest file descriptor. 755 */ 756 rcu_read_lock(); 757 if (max_fd >= last_fd(files_fdtable(cur_fds))) 758 max_unshare_fds = fd; 759 rcu_read_unlock(); 760 } 761 762 ret = unshare_fd(CLONE_FILES, max_unshare_fds, &fds); 763 if (ret) 764 return ret; 765 766 /* 767 * We used to share our file descriptor table, and have now 768 * created a private one, make sure we're using it below. 769 */ 770 if (fds) 771 swap(cur_fds, fds); 772 } 773 774 if (flags & CLOSE_RANGE_CLOEXEC) 775 __range_cloexec(cur_fds, fd, max_fd); 776 else 777 __range_close(cur_fds, fd, max_fd); 778 779 if (fds) { 780 /* 781 * We're done closing the files we were supposed to. Time to install 782 * the new file descriptor table and drop the old one. 783 */ 784 task_lock(me); 785 me->files = cur_fds; 786 task_unlock(me); 787 put_files_struct(fds); 788 } 789 790 return 0; 791 } 792 793 /* 794 * See close_fd_get_file() below, this variant assumes current->files->file_lock 795 * is held. 796 */ 797 struct file *__close_fd_get_file(unsigned int fd) 798 { 799 return pick_file(current->files, fd); 800 } 801 802 /* 803 * variant of close_fd that gets a ref on the file for later fput. 804 * The caller must ensure that filp_close() called on the file. 805 */ 806 struct file *close_fd_get_file(unsigned int fd) 807 { 808 struct files_struct *files = current->files; 809 struct file *file; 810 811 spin_lock(&files->file_lock); 812 file = pick_file(files, fd); 813 spin_unlock(&files->file_lock); 814 815 return file; 816 } 817 818 void do_close_on_exec(struct files_struct *files) 819 { 820 unsigned i; 821 struct fdtable *fdt; 822 823 /* exec unshares first */ 824 spin_lock(&files->file_lock); 825 for (i = 0; ; i++) { 826 unsigned long set; 827 unsigned fd = i * BITS_PER_LONG; 828 fdt = files_fdtable(files); 829 if (fd >= fdt->max_fds) 830 break; 831 set = fdt->close_on_exec[i]; 832 if (!set) 833 continue; 834 fdt->close_on_exec[i] = 0; 835 for ( ; set ; fd++, set >>= 1) { 836 struct file *file; 837 if (!(set & 1)) 838 continue; 839 file = fdt->fd[fd]; 840 if (!file) 841 continue; 842 rcu_assign_pointer(fdt->fd[fd], NULL); 843 __put_unused_fd(files, fd); 844 spin_unlock(&files->file_lock); 845 filp_close(file, files); 846 cond_resched(); 847 spin_lock(&files->file_lock); 848 } 849 850 } 851 spin_unlock(&files->file_lock); 852 } 853 854 static inline struct file *__fget_files_rcu(struct files_struct *files, 855 unsigned int fd, fmode_t mask) 856 { 857 for (;;) { 858 struct file *file; 859 struct fdtable *fdt = rcu_dereference_raw(files->fdt); 860 struct file __rcu **fdentry; 861 862 if (unlikely(fd >= fdt->max_fds)) 863 return NULL; 864 865 fdentry = fdt->fd + array_index_nospec(fd, fdt->max_fds); 866 file = rcu_dereference_raw(*fdentry); 867 if (unlikely(!file)) 868 return NULL; 869 870 if (unlikely(file->f_mode & mask)) 871 return NULL; 872 873 /* 874 * Ok, we have a file pointer. However, because we do 875 * this all locklessly under RCU, we may be racing with 876 * that file being closed. 877 * 878 * Such a race can take two forms: 879 * 880 * (a) the file ref already went down to zero, 881 * and get_file_rcu() fails. Just try again: 882 */ 883 if (unlikely(!get_file_rcu(file))) 884 continue; 885 886 /* 887 * (b) the file table entry has changed under us. 888 * Note that we don't need to re-check the 'fdt->fd' 889 * pointer having changed, because it always goes 890 * hand-in-hand with 'fdt'. 891 * 892 * If so, we need to put our ref and try again. 893 */ 894 if (unlikely(rcu_dereference_raw(files->fdt) != fdt) || 895 unlikely(rcu_dereference_raw(*fdentry) != file)) { 896 fput(file); 897 continue; 898 } 899 900 /* 901 * Ok, we have a ref to the file, and checked that it 902 * still exists. 903 */ 904 return file; 905 } 906 } 907 908 static struct file *__fget_files(struct files_struct *files, unsigned int fd, 909 fmode_t mask) 910 { 911 struct file *file; 912 913 rcu_read_lock(); 914 file = __fget_files_rcu(files, fd, mask); 915 rcu_read_unlock(); 916 917 return file; 918 } 919 920 static inline struct file *__fget(unsigned int fd, fmode_t mask) 921 { 922 return __fget_files(current->files, fd, mask); 923 } 924 925 struct file *fget(unsigned int fd) 926 { 927 return __fget(fd, FMODE_PATH); 928 } 929 EXPORT_SYMBOL(fget); 930 931 struct file *fget_raw(unsigned int fd) 932 { 933 return __fget(fd, 0); 934 } 935 EXPORT_SYMBOL(fget_raw); 936 937 struct file *fget_task(struct task_struct *task, unsigned int fd) 938 { 939 struct file *file = NULL; 940 941 task_lock(task); 942 if (task->files) 943 file = __fget_files(task->files, fd, 0); 944 task_unlock(task); 945 946 return file; 947 } 948 949 struct file *task_lookup_fd_rcu(struct task_struct *task, unsigned int fd) 950 { 951 /* Must be called with rcu_read_lock held */ 952 struct files_struct *files; 953 struct file *file = NULL; 954 955 task_lock(task); 956 files = task->files; 957 if (files) 958 file = files_lookup_fd_rcu(files, fd); 959 task_unlock(task); 960 961 return file; 962 } 963 964 struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *ret_fd) 965 { 966 /* Must be called with rcu_read_lock held */ 967 struct files_struct *files; 968 unsigned int fd = *ret_fd; 969 struct file *file = NULL; 970 971 task_lock(task); 972 files = task->files; 973 if (files) { 974 for (; fd < files_fdtable(files)->max_fds; fd++) { 975 file = files_lookup_fd_rcu(files, fd); 976 if (file) 977 break; 978 } 979 } 980 task_unlock(task); 981 *ret_fd = fd; 982 return file; 983 } 984 EXPORT_SYMBOL(task_lookup_next_fd_rcu); 985 986 /* 987 * Lightweight file lookup - no refcnt increment if fd table isn't shared. 988 * 989 * You can use this instead of fget if you satisfy all of the following 990 * conditions: 991 * 1) You must call fput_light before exiting the syscall and returning control 992 * to userspace (i.e. you cannot remember the returned struct file * after 993 * returning to userspace). 994 * 2) You must not call filp_close on the returned struct file * in between 995 * calls to fget_light and fput_light. 996 * 3) You must not clone the current task in between the calls to fget_light 997 * and fput_light. 998 * 999 * The fput_needed flag returned by fget_light should be passed to the 1000 * corresponding fput_light. 1001 */ 1002 static unsigned long __fget_light(unsigned int fd, fmode_t mask) 1003 { 1004 struct files_struct *files = current->files; 1005 struct file *file; 1006 1007 /* 1008 * If another thread is concurrently calling close_fd() followed 1009 * by put_files_struct(), we must not observe the old table 1010 * entry combined with the new refcount - otherwise we could 1011 * return a file that is concurrently being freed. 1012 * 1013 * atomic_read_acquire() pairs with atomic_dec_and_test() in 1014 * put_files_struct(). 1015 */ 1016 if (atomic_read_acquire(&files->count) == 1) { 1017 file = files_lookup_fd_raw(files, fd); 1018 if (!file || unlikely(file->f_mode & mask)) 1019 return 0; 1020 return (unsigned long)file; 1021 } else { 1022 file = __fget(fd, mask); 1023 if (!file) 1024 return 0; 1025 return FDPUT_FPUT | (unsigned long)file; 1026 } 1027 } 1028 unsigned long __fdget(unsigned int fd) 1029 { 1030 return __fget_light(fd, FMODE_PATH); 1031 } 1032 EXPORT_SYMBOL(__fdget); 1033 1034 unsigned long __fdget_raw(unsigned int fd) 1035 { 1036 return __fget_light(fd, 0); 1037 } 1038 1039 unsigned long __fdget_pos(unsigned int fd) 1040 { 1041 unsigned long v = __fdget(fd); 1042 struct file *file = (struct file *)(v & ~3); 1043 1044 if (file && (file->f_mode & FMODE_ATOMIC_POS)) { 1045 if (file_count(file) > 1) { 1046 v |= FDPUT_POS_UNLOCK; 1047 mutex_lock(&file->f_pos_lock); 1048 } 1049 } 1050 return v; 1051 } 1052 1053 void __f_unlock_pos(struct file *f) 1054 { 1055 mutex_unlock(&f->f_pos_lock); 1056 } 1057 1058 /* 1059 * We only lock f_pos if we have threads or if the file might be 1060 * shared with another process. In both cases we'll have an elevated 1061 * file count (done either by fdget() or by fork()). 1062 */ 1063 1064 void set_close_on_exec(unsigned int fd, int flag) 1065 { 1066 struct files_struct *files = current->files; 1067 struct fdtable *fdt; 1068 spin_lock(&files->file_lock); 1069 fdt = files_fdtable(files); 1070 if (flag) 1071 __set_close_on_exec(fd, fdt); 1072 else 1073 __clear_close_on_exec(fd, fdt); 1074 spin_unlock(&files->file_lock); 1075 } 1076 1077 bool get_close_on_exec(unsigned int fd) 1078 { 1079 struct files_struct *files = current->files; 1080 struct fdtable *fdt; 1081 bool res; 1082 rcu_read_lock(); 1083 fdt = files_fdtable(files); 1084 res = close_on_exec(fd, fdt); 1085 rcu_read_unlock(); 1086 return res; 1087 } 1088 1089 static int do_dup2(struct files_struct *files, 1090 struct file *file, unsigned fd, unsigned flags) 1091 __releases(&files->file_lock) 1092 { 1093 struct file *tofree; 1094 struct fdtable *fdt; 1095 1096 /* 1097 * We need to detect attempts to do dup2() over allocated but still 1098 * not finished descriptor. NB: OpenBSD avoids that at the price of 1099 * extra work in their equivalent of fget() - they insert struct 1100 * file immediately after grabbing descriptor, mark it larval if 1101 * more work (e.g. actual opening) is needed and make sure that 1102 * fget() treats larval files as absent. Potentially interesting, 1103 * but while extra work in fget() is trivial, locking implications 1104 * and amount of surgery on open()-related paths in VFS are not. 1105 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution" 1106 * deadlocks in rather amusing ways, AFAICS. All of that is out of 1107 * scope of POSIX or SUS, since neither considers shared descriptor 1108 * tables and this condition does not arise without those. 1109 */ 1110 fdt = files_fdtable(files); 1111 tofree = fdt->fd[fd]; 1112 if (!tofree && fd_is_open(fd, fdt)) 1113 goto Ebusy; 1114 get_file(file); 1115 rcu_assign_pointer(fdt->fd[fd], file); 1116 __set_open_fd(fd, fdt); 1117 if (flags & O_CLOEXEC) 1118 __set_close_on_exec(fd, fdt); 1119 else 1120 __clear_close_on_exec(fd, fdt); 1121 spin_unlock(&files->file_lock); 1122 1123 if (tofree) 1124 filp_close(tofree, files); 1125 1126 return fd; 1127 1128 Ebusy: 1129 spin_unlock(&files->file_lock); 1130 return -EBUSY; 1131 } 1132 1133 int replace_fd(unsigned fd, struct file *file, unsigned flags) 1134 { 1135 int err; 1136 struct files_struct *files = current->files; 1137 1138 if (!file) 1139 return close_fd(fd); 1140 1141 if (fd >= rlimit(RLIMIT_NOFILE)) 1142 return -EBADF; 1143 1144 spin_lock(&files->file_lock); 1145 err = expand_files(files, fd); 1146 if (unlikely(err < 0)) 1147 goto out_unlock; 1148 return do_dup2(files, file, fd, flags); 1149 1150 out_unlock: 1151 spin_unlock(&files->file_lock); 1152 return err; 1153 } 1154 1155 /** 1156 * __receive_fd() - Install received file into file descriptor table 1157 * @file: struct file that was received from another process 1158 * @ufd: __user pointer to write new fd number to 1159 * @o_flags: the O_* flags to apply to the new fd entry 1160 * 1161 * Installs a received file into the file descriptor table, with appropriate 1162 * checks and count updates. Optionally writes the fd number to userspace, if 1163 * @ufd is non-NULL. 1164 * 1165 * This helper handles its own reference counting of the incoming 1166 * struct file. 1167 * 1168 * Returns newly install fd or -ve on error. 1169 */ 1170 int __receive_fd(struct file *file, int __user *ufd, unsigned int o_flags) 1171 { 1172 int new_fd; 1173 int error; 1174 1175 error = security_file_receive(file); 1176 if (error) 1177 return error; 1178 1179 new_fd = get_unused_fd_flags(o_flags); 1180 if (new_fd < 0) 1181 return new_fd; 1182 1183 if (ufd) { 1184 error = put_user(new_fd, ufd); 1185 if (error) { 1186 put_unused_fd(new_fd); 1187 return error; 1188 } 1189 } 1190 1191 fd_install(new_fd, get_file(file)); 1192 __receive_sock(file); 1193 return new_fd; 1194 } 1195 1196 int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags) 1197 { 1198 int error; 1199 1200 error = security_file_receive(file); 1201 if (error) 1202 return error; 1203 error = replace_fd(new_fd, file, o_flags); 1204 if (error) 1205 return error; 1206 __receive_sock(file); 1207 return new_fd; 1208 } 1209 1210 int receive_fd(struct file *file, unsigned int o_flags) 1211 { 1212 return __receive_fd(file, NULL, o_flags); 1213 } 1214 EXPORT_SYMBOL_GPL(receive_fd); 1215 1216 static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags) 1217 { 1218 int err = -EBADF; 1219 struct file *file; 1220 struct files_struct *files = current->files; 1221 1222 if ((flags & ~O_CLOEXEC) != 0) 1223 return -EINVAL; 1224 1225 if (unlikely(oldfd == newfd)) 1226 return -EINVAL; 1227 1228 if (newfd >= rlimit(RLIMIT_NOFILE)) 1229 return -EBADF; 1230 1231 spin_lock(&files->file_lock); 1232 err = expand_files(files, newfd); 1233 file = files_lookup_fd_locked(files, oldfd); 1234 if (unlikely(!file)) 1235 goto Ebadf; 1236 if (unlikely(err < 0)) { 1237 if (err == -EMFILE) 1238 goto Ebadf; 1239 goto out_unlock; 1240 } 1241 return do_dup2(files, file, newfd, flags); 1242 1243 Ebadf: 1244 err = -EBADF; 1245 out_unlock: 1246 spin_unlock(&files->file_lock); 1247 return err; 1248 } 1249 1250 SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags) 1251 { 1252 return ksys_dup3(oldfd, newfd, flags); 1253 } 1254 1255 SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd) 1256 { 1257 if (unlikely(newfd == oldfd)) { /* corner case */ 1258 struct files_struct *files = current->files; 1259 int retval = oldfd; 1260 1261 rcu_read_lock(); 1262 if (!files_lookup_fd_rcu(files, oldfd)) 1263 retval = -EBADF; 1264 rcu_read_unlock(); 1265 return retval; 1266 } 1267 return ksys_dup3(oldfd, newfd, 0); 1268 } 1269 1270 SYSCALL_DEFINE1(dup, unsigned int, fildes) 1271 { 1272 int ret = -EBADF; 1273 struct file *file = fget_raw(fildes); 1274 1275 if (file) { 1276 ret = get_unused_fd_flags(0); 1277 if (ret >= 0) 1278 fd_install(ret, file); 1279 else 1280 fput(file); 1281 } 1282 return ret; 1283 } 1284 1285 int f_dupfd(unsigned int from, struct file *file, unsigned flags) 1286 { 1287 unsigned long nofile = rlimit(RLIMIT_NOFILE); 1288 int err; 1289 if (from >= nofile) 1290 return -EINVAL; 1291 err = alloc_fd(from, nofile, flags); 1292 if (err >= 0) { 1293 get_file(file); 1294 fd_install(err, file); 1295 } 1296 return err; 1297 } 1298 1299 int iterate_fd(struct files_struct *files, unsigned n, 1300 int (*f)(const void *, struct file *, unsigned), 1301 const void *p) 1302 { 1303 struct fdtable *fdt; 1304 int res = 0; 1305 if (!files) 1306 return 0; 1307 spin_lock(&files->file_lock); 1308 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) { 1309 struct file *file; 1310 file = rcu_dereference_check_fdtable(files, fdt->fd[n]); 1311 if (!file) 1312 continue; 1313 res = f(p, file, n); 1314 if (res) 1315 break; 1316 } 1317 spin_unlock(&files->file_lock); 1318 return res; 1319 } 1320 EXPORT_SYMBOL(iterate_fd); 1321