1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/exec.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 /* 9 * #!-checking implemented by tytso. 10 */ 11 /* 12 * Demand-loading implemented 01.12.91 - no need to read anything but 13 * the header into memory. The inode of the executable is put into 14 * "current->executable", and page faults do the actual loading. Clean. 15 * 16 * Once more I can proudly say that linux stood up to being changed: it 17 * was less than 2 hours work to get demand-loading completely implemented. 18 * 19 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead, 20 * current->executable is only used by the procfs. This allows a dispatch 21 * table to check for several different types of binary formats. We keep 22 * trying until we recognize the file or we run out of supported binary 23 * formats. 24 */ 25 26 #include <linux/kernel_read_file.h> 27 #include <linux/slab.h> 28 #include <linux/file.h> 29 #include <linux/fdtable.h> 30 #include <linux/mm.h> 31 #include <linux/vmacache.h> 32 #include <linux/stat.h> 33 #include <linux/fcntl.h> 34 #include <linux/swap.h> 35 #include <linux/string.h> 36 #include <linux/init.h> 37 #include <linux/sched/mm.h> 38 #include <linux/sched/coredump.h> 39 #include <linux/sched/signal.h> 40 #include <linux/sched/numa_balancing.h> 41 #include <linux/sched/task.h> 42 #include <linux/pagemap.h> 43 #include <linux/perf_event.h> 44 #include <linux/highmem.h> 45 #include <linux/spinlock.h> 46 #include <linux/key.h> 47 #include <linux/personality.h> 48 #include <linux/binfmts.h> 49 #include <linux/utsname.h> 50 #include <linux/pid_namespace.h> 51 #include <linux/module.h> 52 #include <linux/namei.h> 53 #include <linux/mount.h> 54 #include <linux/security.h> 55 #include <linux/syscalls.h> 56 #include <linux/tsacct_kern.h> 57 #include <linux/cn_proc.h> 58 #include <linux/audit.h> 59 #include <linux/kmod.h> 60 #include <linux/fsnotify.h> 61 #include <linux/fs_struct.h> 62 #include <linux/oom.h> 63 #include <linux/compat.h> 64 #include <linux/vmalloc.h> 65 #include <linux/io_uring.h> 66 #include <linux/syscall_user_dispatch.h> 67 #include <linux/coredump.h> 68 #include <linux/time_namespace.h> 69 70 #include <linux/uaccess.h> 71 #include <asm/mmu_context.h> 72 #include <asm/tlb.h> 73 74 #include <trace/events/task.h> 75 #include "internal.h" 76 77 #include <trace/events/sched.h> 78 79 static int bprm_creds_from_file(struct linux_binprm *bprm); 80 81 int suid_dumpable = 0; 82 83 static LIST_HEAD(formats); 84 static DEFINE_RWLOCK(binfmt_lock); 85 86 void __register_binfmt(struct linux_binfmt * fmt, int insert) 87 { 88 write_lock(&binfmt_lock); 89 insert ? list_add(&fmt->lh, &formats) : 90 list_add_tail(&fmt->lh, &formats); 91 write_unlock(&binfmt_lock); 92 } 93 94 EXPORT_SYMBOL(__register_binfmt); 95 96 void unregister_binfmt(struct linux_binfmt * fmt) 97 { 98 write_lock(&binfmt_lock); 99 list_del(&fmt->lh); 100 write_unlock(&binfmt_lock); 101 } 102 103 EXPORT_SYMBOL(unregister_binfmt); 104 105 static inline void put_binfmt(struct linux_binfmt * fmt) 106 { 107 module_put(fmt->module); 108 } 109 110 bool path_noexec(const struct path *path) 111 { 112 return (path->mnt->mnt_flags & MNT_NOEXEC) || 113 (path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC); 114 } 115 116 #ifdef CONFIG_USELIB 117 /* 118 * Note that a shared library must be both readable and executable due to 119 * security reasons. 120 * 121 * Also note that we take the address to load from the file itself. 122 */ 123 SYSCALL_DEFINE1(uselib, const char __user *, library) 124 { 125 struct linux_binfmt *fmt; 126 struct file *file; 127 struct filename *tmp = getname(library); 128 int error = PTR_ERR(tmp); 129 static const struct open_flags uselib_flags = { 130 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC, 131 .acc_mode = MAY_READ | MAY_EXEC, 132 .intent = LOOKUP_OPEN, 133 .lookup_flags = LOOKUP_FOLLOW, 134 }; 135 136 if (IS_ERR(tmp)) 137 goto out; 138 139 file = do_filp_open(AT_FDCWD, tmp, &uselib_flags); 140 putname(tmp); 141 error = PTR_ERR(file); 142 if (IS_ERR(file)) 143 goto out; 144 145 /* 146 * may_open() has already checked for this, so it should be 147 * impossible to trip now. But we need to be extra cautious 148 * and check again at the very end too. 149 */ 150 error = -EACCES; 151 if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) || 152 path_noexec(&file->f_path))) 153 goto exit; 154 155 fsnotify_open(file); 156 157 error = -ENOEXEC; 158 159 read_lock(&binfmt_lock); 160 list_for_each_entry(fmt, &formats, lh) { 161 if (!fmt->load_shlib) 162 continue; 163 if (!try_module_get(fmt->module)) 164 continue; 165 read_unlock(&binfmt_lock); 166 error = fmt->load_shlib(file); 167 read_lock(&binfmt_lock); 168 put_binfmt(fmt); 169 if (error != -ENOEXEC) 170 break; 171 } 172 read_unlock(&binfmt_lock); 173 exit: 174 fput(file); 175 out: 176 return error; 177 } 178 #endif /* #ifdef CONFIG_USELIB */ 179 180 #ifdef CONFIG_MMU 181 /* 182 * The nascent bprm->mm is not visible until exec_mmap() but it can 183 * use a lot of memory, account these pages in current->mm temporary 184 * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we 185 * change the counter back via acct_arg_size(0). 186 */ 187 static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages) 188 { 189 struct mm_struct *mm = current->mm; 190 long diff = (long)(pages - bprm->vma_pages); 191 192 if (!mm || !diff) 193 return; 194 195 bprm->vma_pages = pages; 196 add_mm_counter(mm, MM_ANONPAGES, diff); 197 } 198 199 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, 200 int write) 201 { 202 struct page *page; 203 int ret; 204 unsigned int gup_flags = FOLL_FORCE; 205 206 #ifdef CONFIG_STACK_GROWSUP 207 if (write) { 208 ret = expand_downwards(bprm->vma, pos); 209 if (ret < 0) 210 return NULL; 211 } 212 #endif 213 214 if (write) 215 gup_flags |= FOLL_WRITE; 216 217 /* 218 * We are doing an exec(). 'current' is the process 219 * doing the exec and bprm->mm is the new process's mm. 220 */ 221 mmap_read_lock(bprm->mm); 222 ret = get_user_pages_remote(bprm->mm, pos, 1, gup_flags, 223 &page, NULL, NULL); 224 mmap_read_unlock(bprm->mm); 225 if (ret <= 0) 226 return NULL; 227 228 if (write) 229 acct_arg_size(bprm, vma_pages(bprm->vma)); 230 231 return page; 232 } 233 234 static void put_arg_page(struct page *page) 235 { 236 put_page(page); 237 } 238 239 static void free_arg_pages(struct linux_binprm *bprm) 240 { 241 } 242 243 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos, 244 struct page *page) 245 { 246 flush_cache_page(bprm->vma, pos, page_to_pfn(page)); 247 } 248 249 static int __bprm_mm_init(struct linux_binprm *bprm) 250 { 251 int err; 252 struct vm_area_struct *vma = NULL; 253 struct mm_struct *mm = bprm->mm; 254 255 bprm->vma = vma = vm_area_alloc(mm); 256 if (!vma) 257 return -ENOMEM; 258 vma_set_anonymous(vma); 259 260 if (mmap_write_lock_killable(mm)) { 261 err = -EINTR; 262 goto err_free; 263 } 264 265 /* 266 * Place the stack at the largest stack address the architecture 267 * supports. Later, we'll move this to an appropriate place. We don't 268 * use STACK_TOP because that can depend on attributes which aren't 269 * configured yet. 270 */ 271 BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP); 272 vma->vm_end = STACK_TOP_MAX; 273 vma->vm_start = vma->vm_end - PAGE_SIZE; 274 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP; 275 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 276 277 err = insert_vm_struct(mm, vma); 278 if (err) 279 goto err; 280 281 mm->stack_vm = mm->total_vm = 1; 282 mmap_write_unlock(mm); 283 bprm->p = vma->vm_end - sizeof(void *); 284 return 0; 285 err: 286 mmap_write_unlock(mm); 287 err_free: 288 bprm->vma = NULL; 289 vm_area_free(vma); 290 return err; 291 } 292 293 static bool valid_arg_len(struct linux_binprm *bprm, long len) 294 { 295 return len <= MAX_ARG_STRLEN; 296 } 297 298 #else 299 300 static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages) 301 { 302 } 303 304 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, 305 int write) 306 { 307 struct page *page; 308 309 page = bprm->page[pos / PAGE_SIZE]; 310 if (!page && write) { 311 page = alloc_page(GFP_HIGHUSER|__GFP_ZERO); 312 if (!page) 313 return NULL; 314 bprm->page[pos / PAGE_SIZE] = page; 315 } 316 317 return page; 318 } 319 320 static void put_arg_page(struct page *page) 321 { 322 } 323 324 static void free_arg_page(struct linux_binprm *bprm, int i) 325 { 326 if (bprm->page[i]) { 327 __free_page(bprm->page[i]); 328 bprm->page[i] = NULL; 329 } 330 } 331 332 static void free_arg_pages(struct linux_binprm *bprm) 333 { 334 int i; 335 336 for (i = 0; i < MAX_ARG_PAGES; i++) 337 free_arg_page(bprm, i); 338 } 339 340 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos, 341 struct page *page) 342 { 343 } 344 345 static int __bprm_mm_init(struct linux_binprm *bprm) 346 { 347 bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *); 348 return 0; 349 } 350 351 static bool valid_arg_len(struct linux_binprm *bprm, long len) 352 { 353 return len <= bprm->p; 354 } 355 356 #endif /* CONFIG_MMU */ 357 358 /* 359 * Create a new mm_struct and populate it with a temporary stack 360 * vm_area_struct. We don't have enough context at this point to set the stack 361 * flags, permissions, and offset, so we use temporary values. We'll update 362 * them later in setup_arg_pages(). 363 */ 364 static int bprm_mm_init(struct linux_binprm *bprm) 365 { 366 int err; 367 struct mm_struct *mm = NULL; 368 369 bprm->mm = mm = mm_alloc(); 370 err = -ENOMEM; 371 if (!mm) 372 goto err; 373 374 /* Save current stack limit for all calculations made during exec. */ 375 task_lock(current->group_leader); 376 bprm->rlim_stack = current->signal->rlim[RLIMIT_STACK]; 377 task_unlock(current->group_leader); 378 379 err = __bprm_mm_init(bprm); 380 if (err) 381 goto err; 382 383 return 0; 384 385 err: 386 if (mm) { 387 bprm->mm = NULL; 388 mmdrop(mm); 389 } 390 391 return err; 392 } 393 394 struct user_arg_ptr { 395 #ifdef CONFIG_COMPAT 396 bool is_compat; 397 #endif 398 union { 399 const char __user *const __user *native; 400 #ifdef CONFIG_COMPAT 401 const compat_uptr_t __user *compat; 402 #endif 403 } ptr; 404 }; 405 406 static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr) 407 { 408 const char __user *native; 409 410 #ifdef CONFIG_COMPAT 411 if (unlikely(argv.is_compat)) { 412 compat_uptr_t compat; 413 414 if (get_user(compat, argv.ptr.compat + nr)) 415 return ERR_PTR(-EFAULT); 416 417 return compat_ptr(compat); 418 } 419 #endif 420 421 if (get_user(native, argv.ptr.native + nr)) 422 return ERR_PTR(-EFAULT); 423 424 return native; 425 } 426 427 /* 428 * count() counts the number of strings in array ARGV. 429 */ 430 static int count(struct user_arg_ptr argv, int max) 431 { 432 int i = 0; 433 434 if (argv.ptr.native != NULL) { 435 for (;;) { 436 const char __user *p = get_user_arg_ptr(argv, i); 437 438 if (!p) 439 break; 440 441 if (IS_ERR(p)) 442 return -EFAULT; 443 444 if (i >= max) 445 return -E2BIG; 446 ++i; 447 448 if (fatal_signal_pending(current)) 449 return -ERESTARTNOHAND; 450 cond_resched(); 451 } 452 } 453 return i; 454 } 455 456 static int count_strings_kernel(const char *const *argv) 457 { 458 int i; 459 460 if (!argv) 461 return 0; 462 463 for (i = 0; argv[i]; ++i) { 464 if (i >= MAX_ARG_STRINGS) 465 return -E2BIG; 466 if (fatal_signal_pending(current)) 467 return -ERESTARTNOHAND; 468 cond_resched(); 469 } 470 return i; 471 } 472 473 static int bprm_stack_limits(struct linux_binprm *bprm) 474 { 475 unsigned long limit, ptr_size; 476 477 /* 478 * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM 479 * (whichever is smaller) for the argv+env strings. 480 * This ensures that: 481 * - the remaining binfmt code will not run out of stack space, 482 * - the program will have a reasonable amount of stack left 483 * to work from. 484 */ 485 limit = _STK_LIM / 4 * 3; 486 limit = min(limit, bprm->rlim_stack.rlim_cur / 4); 487 /* 488 * We've historically supported up to 32 pages (ARG_MAX) 489 * of argument strings even with small stacks 490 */ 491 limit = max_t(unsigned long, limit, ARG_MAX); 492 /* 493 * We must account for the size of all the argv and envp pointers to 494 * the argv and envp strings, since they will also take up space in 495 * the stack. They aren't stored until much later when we can't 496 * signal to the parent that the child has run out of stack space. 497 * Instead, calculate it here so it's possible to fail gracefully. 498 * 499 * In the case of argc = 0, make sure there is space for adding a 500 * empty string (which will bump argc to 1), to ensure confused 501 * userspace programs don't start processing from argv[1], thinking 502 * argc can never be 0, to keep them from walking envp by accident. 503 * See do_execveat_common(). 504 */ 505 ptr_size = (max(bprm->argc, 1) + bprm->envc) * sizeof(void *); 506 if (limit <= ptr_size) 507 return -E2BIG; 508 limit -= ptr_size; 509 510 bprm->argmin = bprm->p - limit; 511 return 0; 512 } 513 514 /* 515 * 'copy_strings()' copies argument/environment strings from the old 516 * processes's memory to the new process's stack. The call to get_user_pages() 517 * ensures the destination page is created and not swapped out. 518 */ 519 static int copy_strings(int argc, struct user_arg_ptr argv, 520 struct linux_binprm *bprm) 521 { 522 struct page *kmapped_page = NULL; 523 char *kaddr = NULL; 524 unsigned long kpos = 0; 525 int ret; 526 527 while (argc-- > 0) { 528 const char __user *str; 529 int len; 530 unsigned long pos; 531 532 ret = -EFAULT; 533 str = get_user_arg_ptr(argv, argc); 534 if (IS_ERR(str)) 535 goto out; 536 537 len = strnlen_user(str, MAX_ARG_STRLEN); 538 if (!len) 539 goto out; 540 541 ret = -E2BIG; 542 if (!valid_arg_len(bprm, len)) 543 goto out; 544 545 /* We're going to work our way backwards. */ 546 pos = bprm->p; 547 str += len; 548 bprm->p -= len; 549 #ifdef CONFIG_MMU 550 if (bprm->p < bprm->argmin) 551 goto out; 552 #endif 553 554 while (len > 0) { 555 int offset, bytes_to_copy; 556 557 if (fatal_signal_pending(current)) { 558 ret = -ERESTARTNOHAND; 559 goto out; 560 } 561 cond_resched(); 562 563 offset = pos % PAGE_SIZE; 564 if (offset == 0) 565 offset = PAGE_SIZE; 566 567 bytes_to_copy = offset; 568 if (bytes_to_copy > len) 569 bytes_to_copy = len; 570 571 offset -= bytes_to_copy; 572 pos -= bytes_to_copy; 573 str -= bytes_to_copy; 574 len -= bytes_to_copy; 575 576 if (!kmapped_page || kpos != (pos & PAGE_MASK)) { 577 struct page *page; 578 579 page = get_arg_page(bprm, pos, 1); 580 if (!page) { 581 ret = -E2BIG; 582 goto out; 583 } 584 585 if (kmapped_page) { 586 flush_dcache_page(kmapped_page); 587 kunmap_local(kaddr); 588 put_arg_page(kmapped_page); 589 } 590 kmapped_page = page; 591 kaddr = kmap_local_page(kmapped_page); 592 kpos = pos & PAGE_MASK; 593 flush_arg_page(bprm, kpos, kmapped_page); 594 } 595 if (copy_from_user(kaddr+offset, str, bytes_to_copy)) { 596 ret = -EFAULT; 597 goto out; 598 } 599 } 600 } 601 ret = 0; 602 out: 603 if (kmapped_page) { 604 flush_dcache_page(kmapped_page); 605 kunmap_local(kaddr); 606 put_arg_page(kmapped_page); 607 } 608 return ret; 609 } 610 611 /* 612 * Copy and argument/environment string from the kernel to the processes stack. 613 */ 614 int copy_string_kernel(const char *arg, struct linux_binprm *bprm) 615 { 616 int len = strnlen(arg, MAX_ARG_STRLEN) + 1 /* terminating NUL */; 617 unsigned long pos = bprm->p; 618 619 if (len == 0) 620 return -EFAULT; 621 if (!valid_arg_len(bprm, len)) 622 return -E2BIG; 623 624 /* We're going to work our way backwards. */ 625 arg += len; 626 bprm->p -= len; 627 if (IS_ENABLED(CONFIG_MMU) && bprm->p < bprm->argmin) 628 return -E2BIG; 629 630 while (len > 0) { 631 unsigned int bytes_to_copy = min_t(unsigned int, len, 632 min_not_zero(offset_in_page(pos), PAGE_SIZE)); 633 struct page *page; 634 635 pos -= bytes_to_copy; 636 arg -= bytes_to_copy; 637 len -= bytes_to_copy; 638 639 page = get_arg_page(bprm, pos, 1); 640 if (!page) 641 return -E2BIG; 642 flush_arg_page(bprm, pos & PAGE_MASK, page); 643 memcpy_to_page(page, offset_in_page(pos), arg, bytes_to_copy); 644 put_arg_page(page); 645 } 646 647 return 0; 648 } 649 EXPORT_SYMBOL(copy_string_kernel); 650 651 static int copy_strings_kernel(int argc, const char *const *argv, 652 struct linux_binprm *bprm) 653 { 654 while (argc-- > 0) { 655 int ret = copy_string_kernel(argv[argc], bprm); 656 if (ret < 0) 657 return ret; 658 if (fatal_signal_pending(current)) 659 return -ERESTARTNOHAND; 660 cond_resched(); 661 } 662 return 0; 663 } 664 665 #ifdef CONFIG_MMU 666 667 /* 668 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once 669 * the binfmt code determines where the new stack should reside, we shift it to 670 * its final location. The process proceeds as follows: 671 * 672 * 1) Use shift to calculate the new vma endpoints. 673 * 2) Extend vma to cover both the old and new ranges. This ensures the 674 * arguments passed to subsequent functions are consistent. 675 * 3) Move vma's page tables to the new range. 676 * 4) Free up any cleared pgd range. 677 * 5) Shrink the vma to cover only the new range. 678 */ 679 static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) 680 { 681 struct mm_struct *mm = vma->vm_mm; 682 unsigned long old_start = vma->vm_start; 683 unsigned long old_end = vma->vm_end; 684 unsigned long length = old_end - old_start; 685 unsigned long new_start = old_start - shift; 686 unsigned long new_end = old_end - shift; 687 struct mmu_gather tlb; 688 689 BUG_ON(new_start > new_end); 690 691 /* 692 * ensure there are no vmas between where we want to go 693 * and where we are 694 */ 695 if (vma != find_vma(mm, new_start)) 696 return -EFAULT; 697 698 /* 699 * cover the whole range: [new_start, old_end) 700 */ 701 if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL)) 702 return -ENOMEM; 703 704 /* 705 * move the page tables downwards, on failure we rely on 706 * process cleanup to remove whatever mess we made. 707 */ 708 if (length != move_page_tables(vma, old_start, 709 vma, new_start, length, false)) 710 return -ENOMEM; 711 712 lru_add_drain(); 713 tlb_gather_mmu(&tlb, mm); 714 if (new_end > old_start) { 715 /* 716 * when the old and new regions overlap clear from new_end. 717 */ 718 free_pgd_range(&tlb, new_end, old_end, new_end, 719 vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); 720 } else { 721 /* 722 * otherwise, clean from old_start; this is done to not touch 723 * the address space in [new_end, old_start) some architectures 724 * have constraints on va-space that make this illegal (IA64) - 725 * for the others its just a little faster. 726 */ 727 free_pgd_range(&tlb, old_start, old_end, new_end, 728 vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); 729 } 730 tlb_finish_mmu(&tlb); 731 732 /* 733 * Shrink the vma to just the new range. Always succeeds. 734 */ 735 vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL); 736 737 return 0; 738 } 739 740 /* 741 * Finalizes the stack vm_area_struct. The flags and permissions are updated, 742 * the stack is optionally relocated, and some extra space is added. 743 */ 744 int setup_arg_pages(struct linux_binprm *bprm, 745 unsigned long stack_top, 746 int executable_stack) 747 { 748 unsigned long ret; 749 unsigned long stack_shift; 750 struct mm_struct *mm = current->mm; 751 struct vm_area_struct *vma = bprm->vma; 752 struct vm_area_struct *prev = NULL; 753 unsigned long vm_flags; 754 unsigned long stack_base; 755 unsigned long stack_size; 756 unsigned long stack_expand; 757 unsigned long rlim_stack; 758 struct mmu_gather tlb; 759 760 #ifdef CONFIG_STACK_GROWSUP 761 /* Limit stack size */ 762 stack_base = bprm->rlim_stack.rlim_max; 763 764 stack_base = calc_max_stack_size(stack_base); 765 766 /* Add space for stack randomization. */ 767 stack_base += (STACK_RND_MASK << PAGE_SHIFT); 768 769 /* Make sure we didn't let the argument array grow too large. */ 770 if (vma->vm_end - vma->vm_start > stack_base) 771 return -ENOMEM; 772 773 stack_base = PAGE_ALIGN(stack_top - stack_base); 774 775 stack_shift = vma->vm_start - stack_base; 776 mm->arg_start = bprm->p - stack_shift; 777 bprm->p = vma->vm_end - stack_shift; 778 #else 779 stack_top = arch_align_stack(stack_top); 780 stack_top = PAGE_ALIGN(stack_top); 781 782 if (unlikely(stack_top < mmap_min_addr) || 783 unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr)) 784 return -ENOMEM; 785 786 stack_shift = vma->vm_end - stack_top; 787 788 bprm->p -= stack_shift; 789 mm->arg_start = bprm->p; 790 #endif 791 792 if (bprm->loader) 793 bprm->loader -= stack_shift; 794 bprm->exec -= stack_shift; 795 796 if (mmap_write_lock_killable(mm)) 797 return -EINTR; 798 799 vm_flags = VM_STACK_FLAGS; 800 801 /* 802 * Adjust stack execute permissions; explicitly enable for 803 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone 804 * (arch default) otherwise. 805 */ 806 if (unlikely(executable_stack == EXSTACK_ENABLE_X)) 807 vm_flags |= VM_EXEC; 808 else if (executable_stack == EXSTACK_DISABLE_X) 809 vm_flags &= ~VM_EXEC; 810 vm_flags |= mm->def_flags; 811 vm_flags |= VM_STACK_INCOMPLETE_SETUP; 812 813 tlb_gather_mmu(&tlb, mm); 814 ret = mprotect_fixup(&tlb, vma, &prev, vma->vm_start, vma->vm_end, 815 vm_flags); 816 tlb_finish_mmu(&tlb); 817 818 if (ret) 819 goto out_unlock; 820 BUG_ON(prev != vma); 821 822 if (unlikely(vm_flags & VM_EXEC)) { 823 pr_warn_once("process '%pD4' started with executable stack\n", 824 bprm->file); 825 } 826 827 /* Move stack pages down in memory. */ 828 if (stack_shift) { 829 ret = shift_arg_pages(vma, stack_shift); 830 if (ret) 831 goto out_unlock; 832 } 833 834 /* mprotect_fixup is overkill to remove the temporary stack flags */ 835 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP; 836 837 stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */ 838 stack_size = vma->vm_end - vma->vm_start; 839 /* 840 * Align this down to a page boundary as expand_stack 841 * will align it up. 842 */ 843 rlim_stack = bprm->rlim_stack.rlim_cur & PAGE_MASK; 844 #ifdef CONFIG_STACK_GROWSUP 845 if (stack_size + stack_expand > rlim_stack) 846 stack_base = vma->vm_start + rlim_stack; 847 else 848 stack_base = vma->vm_end + stack_expand; 849 #else 850 if (stack_size + stack_expand > rlim_stack) 851 stack_base = vma->vm_end - rlim_stack; 852 else 853 stack_base = vma->vm_start - stack_expand; 854 #endif 855 current->mm->start_stack = bprm->p; 856 ret = expand_stack(vma, stack_base); 857 if (ret) 858 ret = -EFAULT; 859 860 out_unlock: 861 mmap_write_unlock(mm); 862 return ret; 863 } 864 EXPORT_SYMBOL(setup_arg_pages); 865 866 #else 867 868 /* 869 * Transfer the program arguments and environment from the holding pages 870 * onto the stack. The provided stack pointer is adjusted accordingly. 871 */ 872 int transfer_args_to_stack(struct linux_binprm *bprm, 873 unsigned long *sp_location) 874 { 875 unsigned long index, stop, sp; 876 int ret = 0; 877 878 stop = bprm->p >> PAGE_SHIFT; 879 sp = *sp_location; 880 881 for (index = MAX_ARG_PAGES - 1; index >= stop; index--) { 882 unsigned int offset = index == stop ? bprm->p & ~PAGE_MASK : 0; 883 char *src = kmap_local_page(bprm->page[index]) + offset; 884 sp -= PAGE_SIZE - offset; 885 if (copy_to_user((void *) sp, src, PAGE_SIZE - offset) != 0) 886 ret = -EFAULT; 887 kunmap_local(src); 888 if (ret) 889 goto out; 890 } 891 892 *sp_location = sp; 893 894 out: 895 return ret; 896 } 897 EXPORT_SYMBOL(transfer_args_to_stack); 898 899 #endif /* CONFIG_MMU */ 900 901 static struct file *do_open_execat(int fd, struct filename *name, int flags) 902 { 903 struct file *file; 904 int err; 905 struct open_flags open_exec_flags = { 906 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC, 907 .acc_mode = MAY_EXEC, 908 .intent = LOOKUP_OPEN, 909 .lookup_flags = LOOKUP_FOLLOW, 910 }; 911 912 if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0) 913 return ERR_PTR(-EINVAL); 914 if (flags & AT_SYMLINK_NOFOLLOW) 915 open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW; 916 if (flags & AT_EMPTY_PATH) 917 open_exec_flags.lookup_flags |= LOOKUP_EMPTY; 918 919 file = do_filp_open(fd, name, &open_exec_flags); 920 if (IS_ERR(file)) 921 goto out; 922 923 /* 924 * may_open() has already checked for this, so it should be 925 * impossible to trip now. But we need to be extra cautious 926 * and check again at the very end too. 927 */ 928 err = -EACCES; 929 if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) || 930 path_noexec(&file->f_path))) 931 goto exit; 932 933 err = deny_write_access(file); 934 if (err) 935 goto exit; 936 937 if (name->name[0] != '\0') 938 fsnotify_open(file); 939 940 out: 941 return file; 942 943 exit: 944 fput(file); 945 return ERR_PTR(err); 946 } 947 948 struct file *open_exec(const char *name) 949 { 950 struct filename *filename = getname_kernel(name); 951 struct file *f = ERR_CAST(filename); 952 953 if (!IS_ERR(filename)) { 954 f = do_open_execat(AT_FDCWD, filename, 0); 955 putname(filename); 956 } 957 return f; 958 } 959 EXPORT_SYMBOL(open_exec); 960 961 #if defined(CONFIG_HAVE_AOUT) || defined(CONFIG_BINFMT_FLAT) || \ 962 defined(CONFIG_BINFMT_ELF_FDPIC) 963 ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len) 964 { 965 ssize_t res = vfs_read(file, (void __user *)addr, len, &pos); 966 if (res > 0) 967 flush_icache_user_range(addr, addr + len); 968 return res; 969 } 970 EXPORT_SYMBOL(read_code); 971 #endif 972 973 /* 974 * Maps the mm_struct mm into the current task struct. 975 * On success, this function returns with exec_update_lock 976 * held for writing. 977 */ 978 static int exec_mmap(struct mm_struct *mm) 979 { 980 struct task_struct *tsk; 981 struct mm_struct *old_mm, *active_mm; 982 bool vfork; 983 int ret; 984 985 /* Notify parent that we're no longer interested in the old VM */ 986 tsk = current; 987 vfork = !!tsk->vfork_done; 988 old_mm = current->mm; 989 exec_mm_release(tsk, old_mm); 990 if (old_mm) 991 sync_mm_rss(old_mm); 992 993 ret = down_write_killable(&tsk->signal->exec_update_lock); 994 if (ret) 995 return ret; 996 997 if (old_mm) { 998 /* 999 * If there is a pending fatal signal perhaps a signal 1000 * whose default action is to create a coredump get 1001 * out and die instead of going through with the exec. 1002 */ 1003 ret = mmap_read_lock_killable(old_mm); 1004 if (ret) { 1005 up_write(&tsk->signal->exec_update_lock); 1006 return ret; 1007 } 1008 } 1009 1010 task_lock(tsk); 1011 membarrier_exec_mmap(mm); 1012 1013 local_irq_disable(); 1014 active_mm = tsk->active_mm; 1015 tsk->active_mm = mm; 1016 tsk->mm = mm; 1017 lru_gen_add_mm(mm); 1018 /* 1019 * This prevents preemption while active_mm is being loaded and 1020 * it and mm are being updated, which could cause problems for 1021 * lazy tlb mm refcounting when these are updated by context 1022 * switches. Not all architectures can handle irqs off over 1023 * activate_mm yet. 1024 */ 1025 if (!IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM)) 1026 local_irq_enable(); 1027 activate_mm(active_mm, mm); 1028 if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM)) 1029 local_irq_enable(); 1030 tsk->mm->vmacache_seqnum = 0; 1031 vmacache_flush(tsk); 1032 task_unlock(tsk); 1033 lru_gen_use_mm(mm); 1034 1035 if (vfork) 1036 timens_on_fork(tsk->nsproxy, tsk); 1037 1038 if (old_mm) { 1039 mmap_read_unlock(old_mm); 1040 BUG_ON(active_mm != old_mm); 1041 setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm); 1042 mm_update_next_owner(old_mm); 1043 mmput(old_mm); 1044 return 0; 1045 } 1046 mmdrop(active_mm); 1047 return 0; 1048 } 1049 1050 static int de_thread(struct task_struct *tsk) 1051 { 1052 struct signal_struct *sig = tsk->signal; 1053 struct sighand_struct *oldsighand = tsk->sighand; 1054 spinlock_t *lock = &oldsighand->siglock; 1055 1056 if (thread_group_empty(tsk)) 1057 goto no_thread_group; 1058 1059 /* 1060 * Kill all other threads in the thread group. 1061 */ 1062 spin_lock_irq(lock); 1063 if ((sig->flags & SIGNAL_GROUP_EXIT) || sig->group_exec_task) { 1064 /* 1065 * Another group action in progress, just 1066 * return so that the signal is processed. 1067 */ 1068 spin_unlock_irq(lock); 1069 return -EAGAIN; 1070 } 1071 1072 sig->group_exec_task = tsk; 1073 sig->notify_count = zap_other_threads(tsk); 1074 if (!thread_group_leader(tsk)) 1075 sig->notify_count--; 1076 1077 while (sig->notify_count) { 1078 __set_current_state(TASK_KILLABLE); 1079 spin_unlock_irq(lock); 1080 schedule(); 1081 if (__fatal_signal_pending(tsk)) 1082 goto killed; 1083 spin_lock_irq(lock); 1084 } 1085 spin_unlock_irq(lock); 1086 1087 /* 1088 * At this point all other threads have exited, all we have to 1089 * do is to wait for the thread group leader to become inactive, 1090 * and to assume its PID: 1091 */ 1092 if (!thread_group_leader(tsk)) { 1093 struct task_struct *leader = tsk->group_leader; 1094 1095 for (;;) { 1096 cgroup_threadgroup_change_begin(tsk); 1097 write_lock_irq(&tasklist_lock); 1098 /* 1099 * Do this under tasklist_lock to ensure that 1100 * exit_notify() can't miss ->group_exec_task 1101 */ 1102 sig->notify_count = -1; 1103 if (likely(leader->exit_state)) 1104 break; 1105 __set_current_state(TASK_KILLABLE); 1106 write_unlock_irq(&tasklist_lock); 1107 cgroup_threadgroup_change_end(tsk); 1108 schedule(); 1109 if (__fatal_signal_pending(tsk)) 1110 goto killed; 1111 } 1112 1113 /* 1114 * The only record we have of the real-time age of a 1115 * process, regardless of execs it's done, is start_time. 1116 * All the past CPU time is accumulated in signal_struct 1117 * from sister threads now dead. But in this non-leader 1118 * exec, nothing survives from the original leader thread, 1119 * whose birth marks the true age of this process now. 1120 * When we take on its identity by switching to its PID, we 1121 * also take its birthdate (always earlier than our own). 1122 */ 1123 tsk->start_time = leader->start_time; 1124 tsk->start_boottime = leader->start_boottime; 1125 1126 BUG_ON(!same_thread_group(leader, tsk)); 1127 /* 1128 * An exec() starts a new thread group with the 1129 * TGID of the previous thread group. Rehash the 1130 * two threads with a switched PID, and release 1131 * the former thread group leader: 1132 */ 1133 1134 /* Become a process group leader with the old leader's pid. 1135 * The old leader becomes a thread of the this thread group. 1136 */ 1137 exchange_tids(tsk, leader); 1138 transfer_pid(leader, tsk, PIDTYPE_TGID); 1139 transfer_pid(leader, tsk, PIDTYPE_PGID); 1140 transfer_pid(leader, tsk, PIDTYPE_SID); 1141 1142 list_replace_rcu(&leader->tasks, &tsk->tasks); 1143 list_replace_init(&leader->sibling, &tsk->sibling); 1144 1145 tsk->group_leader = tsk; 1146 leader->group_leader = tsk; 1147 1148 tsk->exit_signal = SIGCHLD; 1149 leader->exit_signal = -1; 1150 1151 BUG_ON(leader->exit_state != EXIT_ZOMBIE); 1152 leader->exit_state = EXIT_DEAD; 1153 1154 /* 1155 * We are going to release_task()->ptrace_unlink() silently, 1156 * the tracer can sleep in do_wait(). EXIT_DEAD guarantees 1157 * the tracer won't block again waiting for this thread. 1158 */ 1159 if (unlikely(leader->ptrace)) 1160 __wake_up_parent(leader, leader->parent); 1161 write_unlock_irq(&tasklist_lock); 1162 cgroup_threadgroup_change_end(tsk); 1163 1164 release_task(leader); 1165 } 1166 1167 sig->group_exec_task = NULL; 1168 sig->notify_count = 0; 1169 1170 no_thread_group: 1171 /* we have changed execution domain */ 1172 tsk->exit_signal = SIGCHLD; 1173 1174 BUG_ON(!thread_group_leader(tsk)); 1175 return 0; 1176 1177 killed: 1178 /* protects against exit_notify() and __exit_signal() */ 1179 read_lock(&tasklist_lock); 1180 sig->group_exec_task = NULL; 1181 sig->notify_count = 0; 1182 read_unlock(&tasklist_lock); 1183 return -EAGAIN; 1184 } 1185 1186 1187 /* 1188 * This function makes sure the current process has its own signal table, 1189 * so that flush_signal_handlers can later reset the handlers without 1190 * disturbing other processes. (Other processes might share the signal 1191 * table via the CLONE_SIGHAND option to clone().) 1192 */ 1193 static int unshare_sighand(struct task_struct *me) 1194 { 1195 struct sighand_struct *oldsighand = me->sighand; 1196 1197 if (refcount_read(&oldsighand->count) != 1) { 1198 struct sighand_struct *newsighand; 1199 /* 1200 * This ->sighand is shared with the CLONE_SIGHAND 1201 * but not CLONE_THREAD task, switch to the new one. 1202 */ 1203 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); 1204 if (!newsighand) 1205 return -ENOMEM; 1206 1207 refcount_set(&newsighand->count, 1); 1208 memcpy(newsighand->action, oldsighand->action, 1209 sizeof(newsighand->action)); 1210 1211 write_lock_irq(&tasklist_lock); 1212 spin_lock(&oldsighand->siglock); 1213 rcu_assign_pointer(me->sighand, newsighand); 1214 spin_unlock(&oldsighand->siglock); 1215 write_unlock_irq(&tasklist_lock); 1216 1217 __cleanup_sighand(oldsighand); 1218 } 1219 return 0; 1220 } 1221 1222 char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk) 1223 { 1224 task_lock(tsk); 1225 /* Always NUL terminated and zero-padded */ 1226 strscpy_pad(buf, tsk->comm, buf_size); 1227 task_unlock(tsk); 1228 return buf; 1229 } 1230 EXPORT_SYMBOL_GPL(__get_task_comm); 1231 1232 /* 1233 * These functions flushes out all traces of the currently running executable 1234 * so that a new one can be started 1235 */ 1236 1237 void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec) 1238 { 1239 task_lock(tsk); 1240 trace_task_rename(tsk, buf); 1241 strscpy_pad(tsk->comm, buf, sizeof(tsk->comm)); 1242 task_unlock(tsk); 1243 perf_event_comm(tsk, exec); 1244 } 1245 1246 /* 1247 * Calling this is the point of no return. None of the failures will be 1248 * seen by userspace since either the process is already taking a fatal 1249 * signal (via de_thread() or coredump), or will have SEGV raised 1250 * (after exec_mmap()) by search_binary_handler (see below). 1251 */ 1252 int begin_new_exec(struct linux_binprm * bprm) 1253 { 1254 struct task_struct *me = current; 1255 int retval; 1256 1257 /* Once we are committed compute the creds */ 1258 retval = bprm_creds_from_file(bprm); 1259 if (retval) 1260 return retval; 1261 1262 /* 1263 * Ensure all future errors are fatal. 1264 */ 1265 bprm->point_of_no_return = true; 1266 1267 /* 1268 * Make this the only thread in the thread group. 1269 */ 1270 retval = de_thread(me); 1271 if (retval) 1272 goto out; 1273 1274 /* 1275 * Cancel any io_uring activity across execve 1276 */ 1277 io_uring_task_cancel(); 1278 1279 /* Ensure the files table is not shared. */ 1280 retval = unshare_files(); 1281 if (retval) 1282 goto out; 1283 1284 /* 1285 * Must be called _before_ exec_mmap() as bprm->mm is 1286 * not visible until then. This also enables the update 1287 * to be lockless. 1288 */ 1289 retval = set_mm_exe_file(bprm->mm, bprm->file); 1290 if (retval) 1291 goto out; 1292 1293 /* If the binary is not readable then enforce mm->dumpable=0 */ 1294 would_dump(bprm, bprm->file); 1295 if (bprm->have_execfd) 1296 would_dump(bprm, bprm->executable); 1297 1298 /* 1299 * Release all of the old mmap stuff 1300 */ 1301 acct_arg_size(bprm, 0); 1302 retval = exec_mmap(bprm->mm); 1303 if (retval) 1304 goto out; 1305 1306 bprm->mm = NULL; 1307 1308 #ifdef CONFIG_POSIX_TIMERS 1309 spin_lock_irq(&me->sighand->siglock); 1310 posix_cpu_timers_exit(me); 1311 spin_unlock_irq(&me->sighand->siglock); 1312 exit_itimers(me); 1313 flush_itimer_signals(); 1314 #endif 1315 1316 /* 1317 * Make the signal table private. 1318 */ 1319 retval = unshare_sighand(me); 1320 if (retval) 1321 goto out_unlock; 1322 1323 me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | 1324 PF_NOFREEZE | PF_NO_SETAFFINITY); 1325 flush_thread(); 1326 me->personality &= ~bprm->per_clear; 1327 1328 clear_syscall_work_syscall_user_dispatch(me); 1329 1330 /* 1331 * We have to apply CLOEXEC before we change whether the process is 1332 * dumpable (in setup_new_exec) to avoid a race with a process in userspace 1333 * trying to access the should-be-closed file descriptors of a process 1334 * undergoing exec(2). 1335 */ 1336 do_close_on_exec(me->files); 1337 1338 if (bprm->secureexec) { 1339 /* Make sure parent cannot signal privileged process. */ 1340 me->pdeath_signal = 0; 1341 1342 /* 1343 * For secureexec, reset the stack limit to sane default to 1344 * avoid bad behavior from the prior rlimits. This has to 1345 * happen before arch_pick_mmap_layout(), which examines 1346 * RLIMIT_STACK, but after the point of no return to avoid 1347 * needing to clean up the change on failure. 1348 */ 1349 if (bprm->rlim_stack.rlim_cur > _STK_LIM) 1350 bprm->rlim_stack.rlim_cur = _STK_LIM; 1351 } 1352 1353 me->sas_ss_sp = me->sas_ss_size = 0; 1354 1355 /* 1356 * Figure out dumpability. Note that this checking only of current 1357 * is wrong, but userspace depends on it. This should be testing 1358 * bprm->secureexec instead. 1359 */ 1360 if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP || 1361 !(uid_eq(current_euid(), current_uid()) && 1362 gid_eq(current_egid(), current_gid()))) 1363 set_dumpable(current->mm, suid_dumpable); 1364 else 1365 set_dumpable(current->mm, SUID_DUMP_USER); 1366 1367 perf_event_exec(); 1368 __set_task_comm(me, kbasename(bprm->filename), true); 1369 1370 /* An exec changes our domain. We are no longer part of the thread 1371 group */ 1372 WRITE_ONCE(me->self_exec_id, me->self_exec_id + 1); 1373 flush_signal_handlers(me, 0); 1374 1375 retval = set_cred_ucounts(bprm->cred); 1376 if (retval < 0) 1377 goto out_unlock; 1378 1379 /* 1380 * install the new credentials for this executable 1381 */ 1382 security_bprm_committing_creds(bprm); 1383 1384 commit_creds(bprm->cred); 1385 bprm->cred = NULL; 1386 1387 /* 1388 * Disable monitoring for regular users 1389 * when executing setuid binaries. Must 1390 * wait until new credentials are committed 1391 * by commit_creds() above 1392 */ 1393 if (get_dumpable(me->mm) != SUID_DUMP_USER) 1394 perf_event_exit_task(me); 1395 /* 1396 * cred_guard_mutex must be held at least to this point to prevent 1397 * ptrace_attach() from altering our determination of the task's 1398 * credentials; any time after this it may be unlocked. 1399 */ 1400 security_bprm_committed_creds(bprm); 1401 1402 /* Pass the opened binary to the interpreter. */ 1403 if (bprm->have_execfd) { 1404 retval = get_unused_fd_flags(0); 1405 if (retval < 0) 1406 goto out_unlock; 1407 fd_install(retval, bprm->executable); 1408 bprm->executable = NULL; 1409 bprm->execfd = retval; 1410 } 1411 return 0; 1412 1413 out_unlock: 1414 up_write(&me->signal->exec_update_lock); 1415 out: 1416 return retval; 1417 } 1418 EXPORT_SYMBOL(begin_new_exec); 1419 1420 void would_dump(struct linux_binprm *bprm, struct file *file) 1421 { 1422 struct inode *inode = file_inode(file); 1423 struct user_namespace *mnt_userns = file_mnt_user_ns(file); 1424 if (inode_permission(mnt_userns, inode, MAY_READ) < 0) { 1425 struct user_namespace *old, *user_ns; 1426 bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP; 1427 1428 /* Ensure mm->user_ns contains the executable */ 1429 user_ns = old = bprm->mm->user_ns; 1430 while ((user_ns != &init_user_ns) && 1431 !privileged_wrt_inode_uidgid(user_ns, mnt_userns, inode)) 1432 user_ns = user_ns->parent; 1433 1434 if (old != user_ns) { 1435 bprm->mm->user_ns = get_user_ns(user_ns); 1436 put_user_ns(old); 1437 } 1438 } 1439 } 1440 EXPORT_SYMBOL(would_dump); 1441 1442 void setup_new_exec(struct linux_binprm * bprm) 1443 { 1444 /* Setup things that can depend upon the personality */ 1445 struct task_struct *me = current; 1446 1447 arch_pick_mmap_layout(me->mm, &bprm->rlim_stack); 1448 1449 arch_setup_new_exec(); 1450 1451 /* Set the new mm task size. We have to do that late because it may 1452 * depend on TIF_32BIT which is only updated in flush_thread() on 1453 * some architectures like powerpc 1454 */ 1455 me->mm->task_size = TASK_SIZE; 1456 up_write(&me->signal->exec_update_lock); 1457 mutex_unlock(&me->signal->cred_guard_mutex); 1458 } 1459 EXPORT_SYMBOL(setup_new_exec); 1460 1461 /* Runs immediately before start_thread() takes over. */ 1462 void finalize_exec(struct linux_binprm *bprm) 1463 { 1464 /* Store any stack rlimit changes before starting thread. */ 1465 task_lock(current->group_leader); 1466 current->signal->rlim[RLIMIT_STACK] = bprm->rlim_stack; 1467 task_unlock(current->group_leader); 1468 } 1469 EXPORT_SYMBOL(finalize_exec); 1470 1471 /* 1472 * Prepare credentials and lock ->cred_guard_mutex. 1473 * setup_new_exec() commits the new creds and drops the lock. 1474 * Or, if exec fails before, free_bprm() should release ->cred 1475 * and unlock. 1476 */ 1477 static int prepare_bprm_creds(struct linux_binprm *bprm) 1478 { 1479 if (mutex_lock_interruptible(¤t->signal->cred_guard_mutex)) 1480 return -ERESTARTNOINTR; 1481 1482 bprm->cred = prepare_exec_creds(); 1483 if (likely(bprm->cred)) 1484 return 0; 1485 1486 mutex_unlock(¤t->signal->cred_guard_mutex); 1487 return -ENOMEM; 1488 } 1489 1490 static void free_bprm(struct linux_binprm *bprm) 1491 { 1492 if (bprm->mm) { 1493 acct_arg_size(bprm, 0); 1494 mmput(bprm->mm); 1495 } 1496 free_arg_pages(bprm); 1497 if (bprm->cred) { 1498 mutex_unlock(¤t->signal->cred_guard_mutex); 1499 abort_creds(bprm->cred); 1500 } 1501 if (bprm->file) { 1502 allow_write_access(bprm->file); 1503 fput(bprm->file); 1504 } 1505 if (bprm->executable) 1506 fput(bprm->executable); 1507 /* If a binfmt changed the interp, free it. */ 1508 if (bprm->interp != bprm->filename) 1509 kfree(bprm->interp); 1510 kfree(bprm->fdpath); 1511 kfree(bprm); 1512 } 1513 1514 static struct linux_binprm *alloc_bprm(int fd, struct filename *filename) 1515 { 1516 struct linux_binprm *bprm = kzalloc(sizeof(*bprm), GFP_KERNEL); 1517 int retval = -ENOMEM; 1518 if (!bprm) 1519 goto out; 1520 1521 if (fd == AT_FDCWD || filename->name[0] == '/') { 1522 bprm->filename = filename->name; 1523 } else { 1524 if (filename->name[0] == '\0') 1525 bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd); 1526 else 1527 bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s", 1528 fd, filename->name); 1529 if (!bprm->fdpath) 1530 goto out_free; 1531 1532 bprm->filename = bprm->fdpath; 1533 } 1534 bprm->interp = bprm->filename; 1535 1536 retval = bprm_mm_init(bprm); 1537 if (retval) 1538 goto out_free; 1539 return bprm; 1540 1541 out_free: 1542 free_bprm(bprm); 1543 out: 1544 return ERR_PTR(retval); 1545 } 1546 1547 int bprm_change_interp(const char *interp, struct linux_binprm *bprm) 1548 { 1549 /* If a binfmt changed the interp, free it first. */ 1550 if (bprm->interp != bprm->filename) 1551 kfree(bprm->interp); 1552 bprm->interp = kstrdup(interp, GFP_KERNEL); 1553 if (!bprm->interp) 1554 return -ENOMEM; 1555 return 0; 1556 } 1557 EXPORT_SYMBOL(bprm_change_interp); 1558 1559 /* 1560 * determine how safe it is to execute the proposed program 1561 * - the caller must hold ->cred_guard_mutex to protect against 1562 * PTRACE_ATTACH or seccomp thread-sync 1563 */ 1564 static void check_unsafe_exec(struct linux_binprm *bprm) 1565 { 1566 struct task_struct *p = current, *t; 1567 unsigned n_fs; 1568 1569 if (p->ptrace) 1570 bprm->unsafe |= LSM_UNSAFE_PTRACE; 1571 1572 /* 1573 * This isn't strictly necessary, but it makes it harder for LSMs to 1574 * mess up. 1575 */ 1576 if (task_no_new_privs(current)) 1577 bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS; 1578 1579 t = p; 1580 n_fs = 1; 1581 spin_lock(&p->fs->lock); 1582 rcu_read_lock(); 1583 while_each_thread(p, t) { 1584 if (t->fs == p->fs) 1585 n_fs++; 1586 } 1587 rcu_read_unlock(); 1588 1589 if (p->fs->users > n_fs) 1590 bprm->unsafe |= LSM_UNSAFE_SHARE; 1591 else 1592 p->fs->in_exec = 1; 1593 spin_unlock(&p->fs->lock); 1594 } 1595 1596 static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file) 1597 { 1598 /* Handle suid and sgid on files */ 1599 struct user_namespace *mnt_userns; 1600 struct inode *inode; 1601 unsigned int mode; 1602 kuid_t uid; 1603 kgid_t gid; 1604 1605 if (!mnt_may_suid(file->f_path.mnt)) 1606 return; 1607 1608 if (task_no_new_privs(current)) 1609 return; 1610 1611 inode = file->f_path.dentry->d_inode; 1612 mode = READ_ONCE(inode->i_mode); 1613 if (!(mode & (S_ISUID|S_ISGID))) 1614 return; 1615 1616 mnt_userns = file_mnt_user_ns(file); 1617 1618 /* Be careful if suid/sgid is set */ 1619 inode_lock(inode); 1620 1621 /* reload atomically mode/uid/gid now that lock held */ 1622 mode = inode->i_mode; 1623 uid = i_uid_into_mnt(mnt_userns, inode); 1624 gid = i_gid_into_mnt(mnt_userns, inode); 1625 inode_unlock(inode); 1626 1627 /* We ignore suid/sgid if there are no mappings for them in the ns */ 1628 if (!kuid_has_mapping(bprm->cred->user_ns, uid) || 1629 !kgid_has_mapping(bprm->cred->user_ns, gid)) 1630 return; 1631 1632 if (mode & S_ISUID) { 1633 bprm->per_clear |= PER_CLEAR_ON_SETID; 1634 bprm->cred->euid = uid; 1635 } 1636 1637 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) { 1638 bprm->per_clear |= PER_CLEAR_ON_SETID; 1639 bprm->cred->egid = gid; 1640 } 1641 } 1642 1643 /* 1644 * Compute brpm->cred based upon the final binary. 1645 */ 1646 static int bprm_creds_from_file(struct linux_binprm *bprm) 1647 { 1648 /* Compute creds based on which file? */ 1649 struct file *file = bprm->execfd_creds ? bprm->executable : bprm->file; 1650 1651 bprm_fill_uid(bprm, file); 1652 return security_bprm_creds_from_file(bprm, file); 1653 } 1654 1655 /* 1656 * Fill the binprm structure from the inode. 1657 * Read the first BINPRM_BUF_SIZE bytes 1658 * 1659 * This may be called multiple times for binary chains (scripts for example). 1660 */ 1661 static int prepare_binprm(struct linux_binprm *bprm) 1662 { 1663 loff_t pos = 0; 1664 1665 memset(bprm->buf, 0, BINPRM_BUF_SIZE); 1666 return kernel_read(bprm->file, bprm->buf, BINPRM_BUF_SIZE, &pos); 1667 } 1668 1669 /* 1670 * Arguments are '\0' separated strings found at the location bprm->p 1671 * points to; chop off the first by relocating brpm->p to right after 1672 * the first '\0' encountered. 1673 */ 1674 int remove_arg_zero(struct linux_binprm *bprm) 1675 { 1676 int ret = 0; 1677 unsigned long offset; 1678 char *kaddr; 1679 struct page *page; 1680 1681 if (!bprm->argc) 1682 return 0; 1683 1684 do { 1685 offset = bprm->p & ~PAGE_MASK; 1686 page = get_arg_page(bprm, bprm->p, 0); 1687 if (!page) { 1688 ret = -EFAULT; 1689 goto out; 1690 } 1691 kaddr = kmap_local_page(page); 1692 1693 for (; offset < PAGE_SIZE && kaddr[offset]; 1694 offset++, bprm->p++) 1695 ; 1696 1697 kunmap_local(kaddr); 1698 put_arg_page(page); 1699 } while (offset == PAGE_SIZE); 1700 1701 bprm->p++; 1702 bprm->argc--; 1703 ret = 0; 1704 1705 out: 1706 return ret; 1707 } 1708 EXPORT_SYMBOL(remove_arg_zero); 1709 1710 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e)) 1711 /* 1712 * cycle the list of binary formats handler, until one recognizes the image 1713 */ 1714 static int search_binary_handler(struct linux_binprm *bprm) 1715 { 1716 bool need_retry = IS_ENABLED(CONFIG_MODULES); 1717 struct linux_binfmt *fmt; 1718 int retval; 1719 1720 retval = prepare_binprm(bprm); 1721 if (retval < 0) 1722 return retval; 1723 1724 retval = security_bprm_check(bprm); 1725 if (retval) 1726 return retval; 1727 1728 retval = -ENOENT; 1729 retry: 1730 read_lock(&binfmt_lock); 1731 list_for_each_entry(fmt, &formats, lh) { 1732 if (!try_module_get(fmt->module)) 1733 continue; 1734 read_unlock(&binfmt_lock); 1735 1736 retval = fmt->load_binary(bprm); 1737 1738 read_lock(&binfmt_lock); 1739 put_binfmt(fmt); 1740 if (bprm->point_of_no_return || (retval != -ENOEXEC)) { 1741 read_unlock(&binfmt_lock); 1742 return retval; 1743 } 1744 } 1745 read_unlock(&binfmt_lock); 1746 1747 if (need_retry) { 1748 if (printable(bprm->buf[0]) && printable(bprm->buf[1]) && 1749 printable(bprm->buf[2]) && printable(bprm->buf[3])) 1750 return retval; 1751 if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0) 1752 return retval; 1753 need_retry = false; 1754 goto retry; 1755 } 1756 1757 return retval; 1758 } 1759 1760 static int exec_binprm(struct linux_binprm *bprm) 1761 { 1762 pid_t old_pid, old_vpid; 1763 int ret, depth; 1764 1765 /* Need to fetch pid before load_binary changes it */ 1766 old_pid = current->pid; 1767 rcu_read_lock(); 1768 old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent)); 1769 rcu_read_unlock(); 1770 1771 /* This allows 4 levels of binfmt rewrites before failing hard. */ 1772 for (depth = 0;; depth++) { 1773 struct file *exec; 1774 if (depth > 5) 1775 return -ELOOP; 1776 1777 ret = search_binary_handler(bprm); 1778 if (ret < 0) 1779 return ret; 1780 if (!bprm->interpreter) 1781 break; 1782 1783 exec = bprm->file; 1784 bprm->file = bprm->interpreter; 1785 bprm->interpreter = NULL; 1786 1787 allow_write_access(exec); 1788 if (unlikely(bprm->have_execfd)) { 1789 if (bprm->executable) { 1790 fput(exec); 1791 return -ENOEXEC; 1792 } 1793 bprm->executable = exec; 1794 } else 1795 fput(exec); 1796 } 1797 1798 audit_bprm(bprm); 1799 trace_sched_process_exec(current, old_pid, bprm); 1800 ptrace_event(PTRACE_EVENT_EXEC, old_vpid); 1801 proc_exec_connector(current); 1802 return 0; 1803 } 1804 1805 /* 1806 * sys_execve() executes a new program. 1807 */ 1808 static int bprm_execve(struct linux_binprm *bprm, 1809 int fd, struct filename *filename, int flags) 1810 { 1811 struct file *file; 1812 int retval; 1813 1814 retval = prepare_bprm_creds(bprm); 1815 if (retval) 1816 return retval; 1817 1818 check_unsafe_exec(bprm); 1819 current->in_execve = 1; 1820 1821 file = do_open_execat(fd, filename, flags); 1822 retval = PTR_ERR(file); 1823 if (IS_ERR(file)) 1824 goto out_unmark; 1825 1826 sched_exec(); 1827 1828 bprm->file = file; 1829 /* 1830 * Record that a name derived from an O_CLOEXEC fd will be 1831 * inaccessible after exec. This allows the code in exec to 1832 * choose to fail when the executable is not mmaped into the 1833 * interpreter and an open file descriptor is not passed to 1834 * the interpreter. This makes for a better user experience 1835 * than having the interpreter start and then immediately fail 1836 * when it finds the executable is inaccessible. 1837 */ 1838 if (bprm->fdpath && get_close_on_exec(fd)) 1839 bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE; 1840 1841 /* Set the unchanging part of bprm->cred */ 1842 retval = security_bprm_creds_for_exec(bprm); 1843 if (retval) 1844 goto out; 1845 1846 retval = exec_binprm(bprm); 1847 if (retval < 0) 1848 goto out; 1849 1850 /* execve succeeded */ 1851 current->fs->in_exec = 0; 1852 current->in_execve = 0; 1853 rseq_execve(current); 1854 acct_update_integrals(current); 1855 task_numa_free(current, false); 1856 return retval; 1857 1858 out: 1859 /* 1860 * If past the point of no return ensure the code never 1861 * returns to the userspace process. Use an existing fatal 1862 * signal if present otherwise terminate the process with 1863 * SIGSEGV. 1864 */ 1865 if (bprm->point_of_no_return && !fatal_signal_pending(current)) 1866 force_fatal_sig(SIGSEGV); 1867 1868 out_unmark: 1869 current->fs->in_exec = 0; 1870 current->in_execve = 0; 1871 1872 return retval; 1873 } 1874 1875 static int do_execveat_common(int fd, struct filename *filename, 1876 struct user_arg_ptr argv, 1877 struct user_arg_ptr envp, 1878 int flags) 1879 { 1880 struct linux_binprm *bprm; 1881 int retval; 1882 1883 if (IS_ERR(filename)) 1884 return PTR_ERR(filename); 1885 1886 /* 1887 * We move the actual failure in case of RLIMIT_NPROC excess from 1888 * set*uid() to execve() because too many poorly written programs 1889 * don't check setuid() return code. Here we additionally recheck 1890 * whether NPROC limit is still exceeded. 1891 */ 1892 if ((current->flags & PF_NPROC_EXCEEDED) && 1893 is_ucounts_overlimit(current_ucounts(), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) { 1894 retval = -EAGAIN; 1895 goto out_ret; 1896 } 1897 1898 /* We're below the limit (still or again), so we don't want to make 1899 * further execve() calls fail. */ 1900 current->flags &= ~PF_NPROC_EXCEEDED; 1901 1902 bprm = alloc_bprm(fd, filename); 1903 if (IS_ERR(bprm)) { 1904 retval = PTR_ERR(bprm); 1905 goto out_ret; 1906 } 1907 1908 retval = count(argv, MAX_ARG_STRINGS); 1909 if (retval == 0) 1910 pr_warn_once("process '%s' launched '%s' with NULL argv: empty string added\n", 1911 current->comm, bprm->filename); 1912 if (retval < 0) 1913 goto out_free; 1914 bprm->argc = retval; 1915 1916 retval = count(envp, MAX_ARG_STRINGS); 1917 if (retval < 0) 1918 goto out_free; 1919 bprm->envc = retval; 1920 1921 retval = bprm_stack_limits(bprm); 1922 if (retval < 0) 1923 goto out_free; 1924 1925 retval = copy_string_kernel(bprm->filename, bprm); 1926 if (retval < 0) 1927 goto out_free; 1928 bprm->exec = bprm->p; 1929 1930 retval = copy_strings(bprm->envc, envp, bprm); 1931 if (retval < 0) 1932 goto out_free; 1933 1934 retval = copy_strings(bprm->argc, argv, bprm); 1935 if (retval < 0) 1936 goto out_free; 1937 1938 /* 1939 * When argv is empty, add an empty string ("") as argv[0] to 1940 * ensure confused userspace programs that start processing 1941 * from argv[1] won't end up walking envp. See also 1942 * bprm_stack_limits(). 1943 */ 1944 if (bprm->argc == 0) { 1945 retval = copy_string_kernel("", bprm); 1946 if (retval < 0) 1947 goto out_free; 1948 bprm->argc = 1; 1949 } 1950 1951 retval = bprm_execve(bprm, fd, filename, flags); 1952 out_free: 1953 free_bprm(bprm); 1954 1955 out_ret: 1956 putname(filename); 1957 return retval; 1958 } 1959 1960 int kernel_execve(const char *kernel_filename, 1961 const char *const *argv, const char *const *envp) 1962 { 1963 struct filename *filename; 1964 struct linux_binprm *bprm; 1965 int fd = AT_FDCWD; 1966 int retval; 1967 1968 /* It is non-sense for kernel threads to call execve */ 1969 if (WARN_ON_ONCE(current->flags & PF_KTHREAD)) 1970 return -EINVAL; 1971 1972 filename = getname_kernel(kernel_filename); 1973 if (IS_ERR(filename)) 1974 return PTR_ERR(filename); 1975 1976 bprm = alloc_bprm(fd, filename); 1977 if (IS_ERR(bprm)) { 1978 retval = PTR_ERR(bprm); 1979 goto out_ret; 1980 } 1981 1982 retval = count_strings_kernel(argv); 1983 if (WARN_ON_ONCE(retval == 0)) 1984 retval = -EINVAL; 1985 if (retval < 0) 1986 goto out_free; 1987 bprm->argc = retval; 1988 1989 retval = count_strings_kernel(envp); 1990 if (retval < 0) 1991 goto out_free; 1992 bprm->envc = retval; 1993 1994 retval = bprm_stack_limits(bprm); 1995 if (retval < 0) 1996 goto out_free; 1997 1998 retval = copy_string_kernel(bprm->filename, bprm); 1999 if (retval < 0) 2000 goto out_free; 2001 bprm->exec = bprm->p; 2002 2003 retval = copy_strings_kernel(bprm->envc, envp, bprm); 2004 if (retval < 0) 2005 goto out_free; 2006 2007 retval = copy_strings_kernel(bprm->argc, argv, bprm); 2008 if (retval < 0) 2009 goto out_free; 2010 2011 retval = bprm_execve(bprm, fd, filename, 0); 2012 out_free: 2013 free_bprm(bprm); 2014 out_ret: 2015 putname(filename); 2016 return retval; 2017 } 2018 2019 static int do_execve(struct filename *filename, 2020 const char __user *const __user *__argv, 2021 const char __user *const __user *__envp) 2022 { 2023 struct user_arg_ptr argv = { .ptr.native = __argv }; 2024 struct user_arg_ptr envp = { .ptr.native = __envp }; 2025 return do_execveat_common(AT_FDCWD, filename, argv, envp, 0); 2026 } 2027 2028 static int do_execveat(int fd, struct filename *filename, 2029 const char __user *const __user *__argv, 2030 const char __user *const __user *__envp, 2031 int flags) 2032 { 2033 struct user_arg_ptr argv = { .ptr.native = __argv }; 2034 struct user_arg_ptr envp = { .ptr.native = __envp }; 2035 2036 return do_execveat_common(fd, filename, argv, envp, flags); 2037 } 2038 2039 #ifdef CONFIG_COMPAT 2040 static int compat_do_execve(struct filename *filename, 2041 const compat_uptr_t __user *__argv, 2042 const compat_uptr_t __user *__envp) 2043 { 2044 struct user_arg_ptr argv = { 2045 .is_compat = true, 2046 .ptr.compat = __argv, 2047 }; 2048 struct user_arg_ptr envp = { 2049 .is_compat = true, 2050 .ptr.compat = __envp, 2051 }; 2052 return do_execveat_common(AT_FDCWD, filename, argv, envp, 0); 2053 } 2054 2055 static int compat_do_execveat(int fd, struct filename *filename, 2056 const compat_uptr_t __user *__argv, 2057 const compat_uptr_t __user *__envp, 2058 int flags) 2059 { 2060 struct user_arg_ptr argv = { 2061 .is_compat = true, 2062 .ptr.compat = __argv, 2063 }; 2064 struct user_arg_ptr envp = { 2065 .is_compat = true, 2066 .ptr.compat = __envp, 2067 }; 2068 return do_execveat_common(fd, filename, argv, envp, flags); 2069 } 2070 #endif 2071 2072 void set_binfmt(struct linux_binfmt *new) 2073 { 2074 struct mm_struct *mm = current->mm; 2075 2076 if (mm->binfmt) 2077 module_put(mm->binfmt->module); 2078 2079 mm->binfmt = new; 2080 if (new) 2081 __module_get(new->module); 2082 } 2083 EXPORT_SYMBOL(set_binfmt); 2084 2085 /* 2086 * set_dumpable stores three-value SUID_DUMP_* into mm->flags. 2087 */ 2088 void set_dumpable(struct mm_struct *mm, int value) 2089 { 2090 if (WARN_ON((unsigned)value > SUID_DUMP_ROOT)) 2091 return; 2092 2093 set_mask_bits(&mm->flags, MMF_DUMPABLE_MASK, value); 2094 } 2095 2096 SYSCALL_DEFINE3(execve, 2097 const char __user *, filename, 2098 const char __user *const __user *, argv, 2099 const char __user *const __user *, envp) 2100 { 2101 return do_execve(getname(filename), argv, envp); 2102 } 2103 2104 SYSCALL_DEFINE5(execveat, 2105 int, fd, const char __user *, filename, 2106 const char __user *const __user *, argv, 2107 const char __user *const __user *, envp, 2108 int, flags) 2109 { 2110 return do_execveat(fd, 2111 getname_uflags(filename, flags), 2112 argv, envp, flags); 2113 } 2114 2115 #ifdef CONFIG_COMPAT 2116 COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename, 2117 const compat_uptr_t __user *, argv, 2118 const compat_uptr_t __user *, envp) 2119 { 2120 return compat_do_execve(getname(filename), argv, envp); 2121 } 2122 2123 COMPAT_SYSCALL_DEFINE5(execveat, int, fd, 2124 const char __user *, filename, 2125 const compat_uptr_t __user *, argv, 2126 const compat_uptr_t __user *, envp, 2127 int, flags) 2128 { 2129 return compat_do_execveat(fd, 2130 getname_uflags(filename, flags), 2131 argv, envp, flags); 2132 } 2133 #endif 2134 2135 #ifdef CONFIG_SYSCTL 2136 2137 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write, 2138 void *buffer, size_t *lenp, loff_t *ppos) 2139 { 2140 int error = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 2141 2142 if (!error) 2143 validate_coredump_safety(); 2144 return error; 2145 } 2146 2147 static struct ctl_table fs_exec_sysctls[] = { 2148 { 2149 .procname = "suid_dumpable", 2150 .data = &suid_dumpable, 2151 .maxlen = sizeof(int), 2152 .mode = 0644, 2153 .proc_handler = proc_dointvec_minmax_coredump, 2154 .extra1 = SYSCTL_ZERO, 2155 .extra2 = SYSCTL_TWO, 2156 }, 2157 { } 2158 }; 2159 2160 static int __init init_fs_exec_sysctls(void) 2161 { 2162 register_sysctl_init("fs", fs_exec_sysctls); 2163 return 0; 2164 } 2165 2166 fs_initcall(init_fs_exec_sysctls); 2167 #endif /* CONFIG_SYSCTL */ 2168