1 /* 2 * linux/fs/file.c 3 * 4 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes 5 * 6 * Manage the dynamic fd arrays in the process files_struct. 7 */ 8 9 #include <linux/syscalls.h> 10 #include <linux/export.h> 11 #include <linux/fs.h> 12 #include <linux/mm.h> 13 #include <linux/mmzone.h> 14 #include <linux/time.h> 15 #include <linux/sched.h> 16 #include <linux/slab.h> 17 #include <linux/vmalloc.h> 18 #include <linux/file.h> 19 #include <linux/fdtable.h> 20 #include <linux/bitops.h> 21 #include <linux/interrupt.h> 22 #include <linux/spinlock.h> 23 #include <linux/rcupdate.h> 24 #include <linux/workqueue.h> 25 26 int sysctl_nr_open __read_mostly = 1024*1024; 27 int sysctl_nr_open_min = BITS_PER_LONG; 28 /* our max() is unusable in constant expressions ;-/ */ 29 #define __const_max(x, y) ((x) < (y) ? (x) : (y)) 30 int sysctl_nr_open_max = __const_max(INT_MAX, ~(size_t)0/sizeof(void *)) & 31 -BITS_PER_LONG; 32 33 static void *alloc_fdmem(size_t size) 34 { 35 /* 36 * Very large allocations can stress page reclaim, so fall back to 37 * vmalloc() if the allocation size will be considered "large" by the VM. 38 */ 39 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 40 void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN|__GFP_NORETRY); 41 if (data != NULL) 42 return data; 43 } 44 return vmalloc(size); 45 } 46 47 static void __free_fdtable(struct fdtable *fdt) 48 { 49 kvfree(fdt->fd); 50 kvfree(fdt->open_fds); 51 kfree(fdt); 52 } 53 54 static void free_fdtable_rcu(struct rcu_head *rcu) 55 { 56 __free_fdtable(container_of(rcu, struct fdtable, rcu)); 57 } 58 59 /* 60 * Expand the fdset in the files_struct. Called with the files spinlock 61 * held for write. 62 */ 63 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) 64 { 65 unsigned int cpy, set; 66 67 BUG_ON(nfdt->max_fds < ofdt->max_fds); 68 69 cpy = ofdt->max_fds * sizeof(struct file *); 70 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *); 71 memcpy(nfdt->fd, ofdt->fd, cpy); 72 memset((char *)(nfdt->fd) + cpy, 0, set); 73 74 cpy = ofdt->max_fds / BITS_PER_BYTE; 75 set = (nfdt->max_fds - ofdt->max_fds) / BITS_PER_BYTE; 76 memcpy(nfdt->open_fds, ofdt->open_fds, cpy); 77 memset((char *)(nfdt->open_fds) + cpy, 0, set); 78 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy); 79 memset((char *)(nfdt->close_on_exec) + cpy, 0, set); 80 } 81 82 static struct fdtable * alloc_fdtable(unsigned int nr) 83 { 84 struct fdtable *fdt; 85 void *data; 86 87 /* 88 * Figure out how many fds we actually want to support in this fdtable. 89 * Allocation steps are keyed to the size of the fdarray, since it 90 * grows far faster than any of the other dynamic data. We try to fit 91 * the fdarray into comfortable page-tuned chunks: starting at 1024B 92 * and growing in powers of two from there on. 93 */ 94 nr /= (1024 / sizeof(struct file *)); 95 nr = roundup_pow_of_two(nr + 1); 96 nr *= (1024 / sizeof(struct file *)); 97 /* 98 * Note that this can drive nr *below* what we had passed if sysctl_nr_open 99 * had been set lower between the check in expand_files() and here. Deal 100 * with that in caller, it's cheaper that way. 101 * 102 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise 103 * bitmaps handling below becomes unpleasant, to put it mildly... 104 */ 105 if (unlikely(nr > sysctl_nr_open)) 106 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1; 107 108 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL); 109 if (!fdt) 110 goto out; 111 fdt->max_fds = nr; 112 data = alloc_fdmem(nr * sizeof(struct file *)); 113 if (!data) 114 goto out_fdt; 115 fdt->fd = data; 116 117 data = alloc_fdmem(max_t(size_t, 118 2 * nr / BITS_PER_BYTE, L1_CACHE_BYTES)); 119 if (!data) 120 goto out_arr; 121 fdt->open_fds = data; 122 data += nr / BITS_PER_BYTE; 123 fdt->close_on_exec = data; 124 125 return fdt; 126 127 out_arr: 128 kvfree(fdt->fd); 129 out_fdt: 130 kfree(fdt); 131 out: 132 return NULL; 133 } 134 135 /* 136 * Expand the file descriptor table. 137 * This function will allocate a new fdtable and both fd array and fdset, of 138 * the given size. 139 * Return <0 error code on error; 1 on successful completion. 140 * The files->file_lock should be held on entry, and will be held on exit. 141 */ 142 static int expand_fdtable(struct files_struct *files, int nr) 143 __releases(files->file_lock) 144 __acquires(files->file_lock) 145 { 146 struct fdtable *new_fdt, *cur_fdt; 147 148 spin_unlock(&files->file_lock); 149 new_fdt = alloc_fdtable(nr); 150 151 /* make sure all __fd_install() have seen resize_in_progress 152 * or have finished their rcu_read_lock_sched() section. 153 */ 154 if (atomic_read(&files->count) > 1) 155 synchronize_sched(); 156 157 spin_lock(&files->file_lock); 158 if (!new_fdt) 159 return -ENOMEM; 160 /* 161 * extremely unlikely race - sysctl_nr_open decreased between the check in 162 * caller and alloc_fdtable(). Cheaper to catch it here... 163 */ 164 if (unlikely(new_fdt->max_fds <= nr)) { 165 __free_fdtable(new_fdt); 166 return -EMFILE; 167 } 168 cur_fdt = files_fdtable(files); 169 BUG_ON(nr < cur_fdt->max_fds); 170 copy_fdtable(new_fdt, cur_fdt); 171 rcu_assign_pointer(files->fdt, new_fdt); 172 if (cur_fdt != &files->fdtab) 173 call_rcu(&cur_fdt->rcu, free_fdtable_rcu); 174 /* coupled with smp_rmb() in __fd_install() */ 175 smp_wmb(); 176 return 1; 177 } 178 179 /* 180 * Expand files. 181 * This function will expand the file structures, if the requested size exceeds 182 * the current capacity and there is room for expansion. 183 * Return <0 error code on error; 0 when nothing done; 1 when files were 184 * expanded and execution may have blocked. 185 * The files->file_lock should be held on entry, and will be held on exit. 186 */ 187 static int expand_files(struct files_struct *files, int nr) 188 __releases(files->file_lock) 189 __acquires(files->file_lock) 190 { 191 struct fdtable *fdt; 192 int expanded = 0; 193 194 repeat: 195 fdt = files_fdtable(files); 196 197 /* Do we need to expand? */ 198 if (nr < fdt->max_fds) 199 return expanded; 200 201 /* Can we expand? */ 202 if (nr >= sysctl_nr_open) 203 return -EMFILE; 204 205 if (unlikely(files->resize_in_progress)) { 206 spin_unlock(&files->file_lock); 207 expanded = 1; 208 wait_event(files->resize_wait, !files->resize_in_progress); 209 spin_lock(&files->file_lock); 210 goto repeat; 211 } 212 213 /* All good, so we try */ 214 files->resize_in_progress = true; 215 expanded = expand_fdtable(files, nr); 216 files->resize_in_progress = false; 217 218 wake_up_all(&files->resize_wait); 219 return expanded; 220 } 221 222 static inline void __set_close_on_exec(int fd, struct fdtable *fdt) 223 { 224 __set_bit(fd, fdt->close_on_exec); 225 } 226 227 static inline void __clear_close_on_exec(int fd, struct fdtable *fdt) 228 { 229 __clear_bit(fd, fdt->close_on_exec); 230 } 231 232 static inline void __set_open_fd(int fd, struct fdtable *fdt) 233 { 234 __set_bit(fd, fdt->open_fds); 235 } 236 237 static inline void __clear_open_fd(int fd, struct fdtable *fdt) 238 { 239 __clear_bit(fd, fdt->open_fds); 240 } 241 242 static int count_open_files(struct fdtable *fdt) 243 { 244 int size = fdt->max_fds; 245 int i; 246 247 /* Find the last open fd */ 248 for (i = size / BITS_PER_LONG; i > 0; ) { 249 if (fdt->open_fds[--i]) 250 break; 251 } 252 i = (i + 1) * BITS_PER_LONG; 253 return i; 254 } 255 256 /* 257 * Allocate a new files structure and copy contents from the 258 * passed in files structure. 259 * errorp will be valid only when the returned files_struct is NULL. 260 */ 261 struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) 262 { 263 struct files_struct *newf; 264 struct file **old_fds, **new_fds; 265 int open_files, size, i; 266 struct fdtable *old_fdt, *new_fdt; 267 268 *errorp = -ENOMEM; 269 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL); 270 if (!newf) 271 goto out; 272 273 atomic_set(&newf->count, 1); 274 275 spin_lock_init(&newf->file_lock); 276 newf->resize_in_progress = false; 277 init_waitqueue_head(&newf->resize_wait); 278 newf->next_fd = 0; 279 new_fdt = &newf->fdtab; 280 new_fdt->max_fds = NR_OPEN_DEFAULT; 281 new_fdt->close_on_exec = newf->close_on_exec_init; 282 new_fdt->open_fds = newf->open_fds_init; 283 new_fdt->fd = &newf->fd_array[0]; 284 285 spin_lock(&oldf->file_lock); 286 old_fdt = files_fdtable(oldf); 287 open_files = count_open_files(old_fdt); 288 289 /* 290 * Check whether we need to allocate a larger fd array and fd set. 291 */ 292 while (unlikely(open_files > new_fdt->max_fds)) { 293 spin_unlock(&oldf->file_lock); 294 295 if (new_fdt != &newf->fdtab) 296 __free_fdtable(new_fdt); 297 298 new_fdt = alloc_fdtable(open_files - 1); 299 if (!new_fdt) { 300 *errorp = -ENOMEM; 301 goto out_release; 302 } 303 304 /* beyond sysctl_nr_open; nothing to do */ 305 if (unlikely(new_fdt->max_fds < open_files)) { 306 __free_fdtable(new_fdt); 307 *errorp = -EMFILE; 308 goto out_release; 309 } 310 311 /* 312 * Reacquire the oldf lock and a pointer to its fd table 313 * who knows it may have a new bigger fd table. We need 314 * the latest pointer. 315 */ 316 spin_lock(&oldf->file_lock); 317 old_fdt = files_fdtable(oldf); 318 open_files = count_open_files(old_fdt); 319 } 320 321 old_fds = old_fdt->fd; 322 new_fds = new_fdt->fd; 323 324 memcpy(new_fdt->open_fds, old_fdt->open_fds, open_files / 8); 325 memcpy(new_fdt->close_on_exec, old_fdt->close_on_exec, open_files / 8); 326 327 for (i = open_files; i != 0; i--) { 328 struct file *f = *old_fds++; 329 if (f) { 330 get_file(f); 331 } else { 332 /* 333 * The fd may be claimed in the fd bitmap but not yet 334 * instantiated in the files array if a sibling thread 335 * is partway through open(). So make sure that this 336 * fd is available to the new process. 337 */ 338 __clear_open_fd(open_files - i, new_fdt); 339 } 340 rcu_assign_pointer(*new_fds++, f); 341 } 342 spin_unlock(&oldf->file_lock); 343 344 /* compute the remainder to be cleared */ 345 size = (new_fdt->max_fds - open_files) * sizeof(struct file *); 346 347 /* This is long word aligned thus could use a optimized version */ 348 memset(new_fds, 0, size); 349 350 if (new_fdt->max_fds > open_files) { 351 int left = (new_fdt->max_fds - open_files) / 8; 352 int start = open_files / BITS_PER_LONG; 353 354 memset(&new_fdt->open_fds[start], 0, left); 355 memset(&new_fdt->close_on_exec[start], 0, left); 356 } 357 358 rcu_assign_pointer(newf->fdt, new_fdt); 359 360 return newf; 361 362 out_release: 363 kmem_cache_free(files_cachep, newf); 364 out: 365 return NULL; 366 } 367 368 static struct fdtable *close_files(struct files_struct * files) 369 { 370 /* 371 * It is safe to dereference the fd table without RCU or 372 * ->file_lock because this is the last reference to the 373 * files structure. 374 */ 375 struct fdtable *fdt = rcu_dereference_raw(files->fdt); 376 int i, j = 0; 377 378 for (;;) { 379 unsigned long set; 380 i = j * BITS_PER_LONG; 381 if (i >= fdt->max_fds) 382 break; 383 set = fdt->open_fds[j++]; 384 while (set) { 385 if (set & 1) { 386 struct file * file = xchg(&fdt->fd[i], NULL); 387 if (file) { 388 filp_close(file, files); 389 cond_resched_rcu_qs(); 390 } 391 } 392 i++; 393 set >>= 1; 394 } 395 } 396 397 return fdt; 398 } 399 400 struct files_struct *get_files_struct(struct task_struct *task) 401 { 402 struct files_struct *files; 403 404 task_lock(task); 405 files = task->files; 406 if (files) 407 atomic_inc(&files->count); 408 task_unlock(task); 409 410 return files; 411 } 412 413 void put_files_struct(struct files_struct *files) 414 { 415 if (atomic_dec_and_test(&files->count)) { 416 struct fdtable *fdt = close_files(files); 417 418 /* free the arrays if they are not embedded */ 419 if (fdt != &files->fdtab) 420 __free_fdtable(fdt); 421 kmem_cache_free(files_cachep, files); 422 } 423 } 424 425 void reset_files_struct(struct files_struct *files) 426 { 427 struct task_struct *tsk = current; 428 struct files_struct *old; 429 430 old = tsk->files; 431 task_lock(tsk); 432 tsk->files = files; 433 task_unlock(tsk); 434 put_files_struct(old); 435 } 436 437 void exit_files(struct task_struct *tsk) 438 { 439 struct files_struct * files = tsk->files; 440 441 if (files) { 442 task_lock(tsk); 443 tsk->files = NULL; 444 task_unlock(tsk); 445 put_files_struct(files); 446 } 447 } 448 449 struct files_struct init_files = { 450 .count = ATOMIC_INIT(1), 451 .fdt = &init_files.fdtab, 452 .fdtab = { 453 .max_fds = NR_OPEN_DEFAULT, 454 .fd = &init_files.fd_array[0], 455 .close_on_exec = init_files.close_on_exec_init, 456 .open_fds = init_files.open_fds_init, 457 }, 458 .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock), 459 }; 460 461 /* 462 * allocate a file descriptor, mark it busy. 463 */ 464 int __alloc_fd(struct files_struct *files, 465 unsigned start, unsigned end, unsigned flags) 466 { 467 unsigned int fd; 468 int error; 469 struct fdtable *fdt; 470 471 spin_lock(&files->file_lock); 472 repeat: 473 fdt = files_fdtable(files); 474 fd = start; 475 if (fd < files->next_fd) 476 fd = files->next_fd; 477 478 if (fd < fdt->max_fds) 479 fd = find_next_zero_bit(fdt->open_fds, fdt->max_fds, fd); 480 481 /* 482 * N.B. For clone tasks sharing a files structure, this test 483 * will limit the total number of files that can be opened. 484 */ 485 error = -EMFILE; 486 if (fd >= end) 487 goto out; 488 489 error = expand_files(files, fd); 490 if (error < 0) 491 goto out; 492 493 /* 494 * If we needed to expand the fs array we 495 * might have blocked - try again. 496 */ 497 if (error) 498 goto repeat; 499 500 if (start <= files->next_fd) 501 files->next_fd = fd + 1; 502 503 __set_open_fd(fd, fdt); 504 if (flags & O_CLOEXEC) 505 __set_close_on_exec(fd, fdt); 506 else 507 __clear_close_on_exec(fd, fdt); 508 error = fd; 509 #if 1 510 /* Sanity check */ 511 if (rcu_access_pointer(fdt->fd[fd]) != NULL) { 512 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd); 513 rcu_assign_pointer(fdt->fd[fd], NULL); 514 } 515 #endif 516 517 out: 518 spin_unlock(&files->file_lock); 519 return error; 520 } 521 522 static int alloc_fd(unsigned start, unsigned flags) 523 { 524 return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags); 525 } 526 527 int get_unused_fd_flags(unsigned flags) 528 { 529 return __alloc_fd(current->files, 0, rlimit(RLIMIT_NOFILE), flags); 530 } 531 EXPORT_SYMBOL(get_unused_fd_flags); 532 533 static void __put_unused_fd(struct files_struct *files, unsigned int fd) 534 { 535 struct fdtable *fdt = files_fdtable(files); 536 __clear_open_fd(fd, fdt); 537 if (fd < files->next_fd) 538 files->next_fd = fd; 539 } 540 541 void put_unused_fd(unsigned int fd) 542 { 543 struct files_struct *files = current->files; 544 spin_lock(&files->file_lock); 545 __put_unused_fd(files, fd); 546 spin_unlock(&files->file_lock); 547 } 548 549 EXPORT_SYMBOL(put_unused_fd); 550 551 /* 552 * Install a file pointer in the fd array. 553 * 554 * The VFS is full of places where we drop the files lock between 555 * setting the open_fds bitmap and installing the file in the file 556 * array. At any such point, we are vulnerable to a dup2() race 557 * installing a file in the array before us. We need to detect this and 558 * fput() the struct file we are about to overwrite in this case. 559 * 560 * It should never happen - if we allow dup2() do it, _really_ bad things 561 * will follow. 562 * 563 * NOTE: __fd_install() variant is really, really low-level; don't 564 * use it unless you are forced to by truly lousy API shoved down 565 * your throat. 'files' *MUST* be either current->files or obtained 566 * by get_files_struct(current) done by whoever had given it to you, 567 * or really bad things will happen. Normally you want to use 568 * fd_install() instead. 569 */ 570 571 void __fd_install(struct files_struct *files, unsigned int fd, 572 struct file *file) 573 { 574 struct fdtable *fdt; 575 576 might_sleep(); 577 rcu_read_lock_sched(); 578 579 while (unlikely(files->resize_in_progress)) { 580 rcu_read_unlock_sched(); 581 wait_event(files->resize_wait, !files->resize_in_progress); 582 rcu_read_lock_sched(); 583 } 584 /* coupled with smp_wmb() in expand_fdtable() */ 585 smp_rmb(); 586 fdt = rcu_dereference_sched(files->fdt); 587 BUG_ON(fdt->fd[fd] != NULL); 588 rcu_assign_pointer(fdt->fd[fd], file); 589 rcu_read_unlock_sched(); 590 } 591 592 void fd_install(unsigned int fd, struct file *file) 593 { 594 __fd_install(current->files, fd, file); 595 } 596 597 EXPORT_SYMBOL(fd_install); 598 599 /* 600 * The same warnings as for __alloc_fd()/__fd_install() apply here... 601 */ 602 int __close_fd(struct files_struct *files, unsigned fd) 603 { 604 struct file *file; 605 struct fdtable *fdt; 606 607 spin_lock(&files->file_lock); 608 fdt = files_fdtable(files); 609 if (fd >= fdt->max_fds) 610 goto out_unlock; 611 file = fdt->fd[fd]; 612 if (!file) 613 goto out_unlock; 614 rcu_assign_pointer(fdt->fd[fd], NULL); 615 __clear_close_on_exec(fd, fdt); 616 __put_unused_fd(files, fd); 617 spin_unlock(&files->file_lock); 618 return filp_close(file, files); 619 620 out_unlock: 621 spin_unlock(&files->file_lock); 622 return -EBADF; 623 } 624 625 void do_close_on_exec(struct files_struct *files) 626 { 627 unsigned i; 628 struct fdtable *fdt; 629 630 /* exec unshares first */ 631 spin_lock(&files->file_lock); 632 for (i = 0; ; i++) { 633 unsigned long set; 634 unsigned fd = i * BITS_PER_LONG; 635 fdt = files_fdtable(files); 636 if (fd >= fdt->max_fds) 637 break; 638 set = fdt->close_on_exec[i]; 639 if (!set) 640 continue; 641 fdt->close_on_exec[i] = 0; 642 for ( ; set ; fd++, set >>= 1) { 643 struct file *file; 644 if (!(set & 1)) 645 continue; 646 file = fdt->fd[fd]; 647 if (!file) 648 continue; 649 rcu_assign_pointer(fdt->fd[fd], NULL); 650 __put_unused_fd(files, fd); 651 spin_unlock(&files->file_lock); 652 filp_close(file, files); 653 cond_resched(); 654 spin_lock(&files->file_lock); 655 } 656 657 } 658 spin_unlock(&files->file_lock); 659 } 660 661 static struct file *__fget(unsigned int fd, fmode_t mask) 662 { 663 struct files_struct *files = current->files; 664 struct file *file; 665 666 rcu_read_lock(); 667 loop: 668 file = fcheck_files(files, fd); 669 if (file) { 670 /* File object ref couldn't be taken. 671 * dup2() atomicity guarantee is the reason 672 * we loop to catch the new file (or NULL pointer) 673 */ 674 if (file->f_mode & mask) 675 file = NULL; 676 else if (!get_file_rcu(file)) 677 goto loop; 678 } 679 rcu_read_unlock(); 680 681 return file; 682 } 683 684 struct file *fget(unsigned int fd) 685 { 686 return __fget(fd, FMODE_PATH); 687 } 688 EXPORT_SYMBOL(fget); 689 690 struct file *fget_raw(unsigned int fd) 691 { 692 return __fget(fd, 0); 693 } 694 EXPORT_SYMBOL(fget_raw); 695 696 /* 697 * Lightweight file lookup - no refcnt increment if fd table isn't shared. 698 * 699 * You can use this instead of fget if you satisfy all of the following 700 * conditions: 701 * 1) You must call fput_light before exiting the syscall and returning control 702 * to userspace (i.e. you cannot remember the returned struct file * after 703 * returning to userspace). 704 * 2) You must not call filp_close on the returned struct file * in between 705 * calls to fget_light and fput_light. 706 * 3) You must not clone the current task in between the calls to fget_light 707 * and fput_light. 708 * 709 * The fput_needed flag returned by fget_light should be passed to the 710 * corresponding fput_light. 711 */ 712 static unsigned long __fget_light(unsigned int fd, fmode_t mask) 713 { 714 struct files_struct *files = current->files; 715 struct file *file; 716 717 if (atomic_read(&files->count) == 1) { 718 file = __fcheck_files(files, fd); 719 if (!file || unlikely(file->f_mode & mask)) 720 return 0; 721 return (unsigned long)file; 722 } else { 723 file = __fget(fd, mask); 724 if (!file) 725 return 0; 726 return FDPUT_FPUT | (unsigned long)file; 727 } 728 } 729 unsigned long __fdget(unsigned int fd) 730 { 731 return __fget_light(fd, FMODE_PATH); 732 } 733 EXPORT_SYMBOL(__fdget); 734 735 unsigned long __fdget_raw(unsigned int fd) 736 { 737 return __fget_light(fd, 0); 738 } 739 740 unsigned long __fdget_pos(unsigned int fd) 741 { 742 unsigned long v = __fdget(fd); 743 struct file *file = (struct file *)(v & ~3); 744 745 if (file && (file->f_mode & FMODE_ATOMIC_POS)) { 746 if (file_count(file) > 1) { 747 v |= FDPUT_POS_UNLOCK; 748 mutex_lock(&file->f_pos_lock); 749 } 750 } 751 return v; 752 } 753 754 /* 755 * We only lock f_pos if we have threads or if the file might be 756 * shared with another process. In both cases we'll have an elevated 757 * file count (done either by fdget() or by fork()). 758 */ 759 760 void set_close_on_exec(unsigned int fd, int flag) 761 { 762 struct files_struct *files = current->files; 763 struct fdtable *fdt; 764 spin_lock(&files->file_lock); 765 fdt = files_fdtable(files); 766 if (flag) 767 __set_close_on_exec(fd, fdt); 768 else 769 __clear_close_on_exec(fd, fdt); 770 spin_unlock(&files->file_lock); 771 } 772 773 bool get_close_on_exec(unsigned int fd) 774 { 775 struct files_struct *files = current->files; 776 struct fdtable *fdt; 777 bool res; 778 rcu_read_lock(); 779 fdt = files_fdtable(files); 780 res = close_on_exec(fd, fdt); 781 rcu_read_unlock(); 782 return res; 783 } 784 785 static int do_dup2(struct files_struct *files, 786 struct file *file, unsigned fd, unsigned flags) 787 __releases(&files->file_lock) 788 { 789 struct file *tofree; 790 struct fdtable *fdt; 791 792 /* 793 * We need to detect attempts to do dup2() over allocated but still 794 * not finished descriptor. NB: OpenBSD avoids that at the price of 795 * extra work in their equivalent of fget() - they insert struct 796 * file immediately after grabbing descriptor, mark it larval if 797 * more work (e.g. actual opening) is needed and make sure that 798 * fget() treats larval files as absent. Potentially interesting, 799 * but while extra work in fget() is trivial, locking implications 800 * and amount of surgery on open()-related paths in VFS are not. 801 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution" 802 * deadlocks in rather amusing ways, AFAICS. All of that is out of 803 * scope of POSIX or SUS, since neither considers shared descriptor 804 * tables and this condition does not arise without those. 805 */ 806 fdt = files_fdtable(files); 807 tofree = fdt->fd[fd]; 808 if (!tofree && fd_is_open(fd, fdt)) 809 goto Ebusy; 810 get_file(file); 811 rcu_assign_pointer(fdt->fd[fd], file); 812 __set_open_fd(fd, fdt); 813 if (flags & O_CLOEXEC) 814 __set_close_on_exec(fd, fdt); 815 else 816 __clear_close_on_exec(fd, fdt); 817 spin_unlock(&files->file_lock); 818 819 if (tofree) 820 filp_close(tofree, files); 821 822 return fd; 823 824 Ebusy: 825 spin_unlock(&files->file_lock); 826 return -EBUSY; 827 } 828 829 int replace_fd(unsigned fd, struct file *file, unsigned flags) 830 { 831 int err; 832 struct files_struct *files = current->files; 833 834 if (!file) 835 return __close_fd(files, fd); 836 837 if (fd >= rlimit(RLIMIT_NOFILE)) 838 return -EBADF; 839 840 spin_lock(&files->file_lock); 841 err = expand_files(files, fd); 842 if (unlikely(err < 0)) 843 goto out_unlock; 844 return do_dup2(files, file, fd, flags); 845 846 out_unlock: 847 spin_unlock(&files->file_lock); 848 return err; 849 } 850 851 SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags) 852 { 853 int err = -EBADF; 854 struct file *file; 855 struct files_struct *files = current->files; 856 857 if ((flags & ~O_CLOEXEC) != 0) 858 return -EINVAL; 859 860 if (unlikely(oldfd == newfd)) 861 return -EINVAL; 862 863 if (newfd >= rlimit(RLIMIT_NOFILE)) 864 return -EBADF; 865 866 spin_lock(&files->file_lock); 867 err = expand_files(files, newfd); 868 file = fcheck(oldfd); 869 if (unlikely(!file)) 870 goto Ebadf; 871 if (unlikely(err < 0)) { 872 if (err == -EMFILE) 873 goto Ebadf; 874 goto out_unlock; 875 } 876 return do_dup2(files, file, newfd, flags); 877 878 Ebadf: 879 err = -EBADF; 880 out_unlock: 881 spin_unlock(&files->file_lock); 882 return err; 883 } 884 885 SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd) 886 { 887 if (unlikely(newfd == oldfd)) { /* corner case */ 888 struct files_struct *files = current->files; 889 int retval = oldfd; 890 891 rcu_read_lock(); 892 if (!fcheck_files(files, oldfd)) 893 retval = -EBADF; 894 rcu_read_unlock(); 895 return retval; 896 } 897 return sys_dup3(oldfd, newfd, 0); 898 } 899 900 SYSCALL_DEFINE1(dup, unsigned int, fildes) 901 { 902 int ret = -EBADF; 903 struct file *file = fget_raw(fildes); 904 905 if (file) { 906 ret = get_unused_fd_flags(0); 907 if (ret >= 0) 908 fd_install(ret, file); 909 else 910 fput(file); 911 } 912 return ret; 913 } 914 915 int f_dupfd(unsigned int from, struct file *file, unsigned flags) 916 { 917 int err; 918 if (from >= rlimit(RLIMIT_NOFILE)) 919 return -EINVAL; 920 err = alloc_fd(from, flags); 921 if (err >= 0) { 922 get_file(file); 923 fd_install(err, file); 924 } 925 return err; 926 } 927 928 int iterate_fd(struct files_struct *files, unsigned n, 929 int (*f)(const void *, struct file *, unsigned), 930 const void *p) 931 { 932 struct fdtable *fdt; 933 int res = 0; 934 if (!files) 935 return 0; 936 spin_lock(&files->file_lock); 937 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) { 938 struct file *file; 939 file = rcu_dereference_check_fdtable(files, fdt->fd[n]); 940 if (!file) 941 continue; 942 res = f(p, file, n); 943 if (res) 944 break; 945 } 946 spin_unlock(&files->file_lock); 947 return res; 948 } 949 EXPORT_SYMBOL(iterate_fd); 950