1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/exec.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 /* 9 * #!-checking implemented by tytso. 10 */ 11 /* 12 * Demand-loading implemented 01.12.91 - no need to read anything but 13 * the header into memory. The inode of the executable is put into 14 * "current->executable", and page faults do the actual loading. Clean. 15 * 16 * Once more I can proudly say that linux stood up to being changed: it 17 * was less than 2 hours work to get demand-loading completely implemented. 18 * 19 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead, 20 * current->executable is only used by the procfs. This allows a dispatch 21 * table to check for several different types of binary formats. We keep 22 * trying until we recognize the file or we run out of supported binary 23 * formats. 24 */ 25 26 #include <linux/kernel_read_file.h> 27 #include <linux/slab.h> 28 #include <linux/file.h> 29 #include <linux/fdtable.h> 30 #include <linux/mm.h> 31 #include <linux/vmacache.h> 32 #include <linux/stat.h> 33 #include <linux/fcntl.h> 34 #include <linux/swap.h> 35 #include <linux/string.h> 36 #include <linux/init.h> 37 #include <linux/sched/mm.h> 38 #include <linux/sched/coredump.h> 39 #include <linux/sched/signal.h> 40 #include <linux/sched/numa_balancing.h> 41 #include <linux/sched/task.h> 42 #include <linux/pagemap.h> 43 #include <linux/perf_event.h> 44 #include <linux/highmem.h> 45 #include <linux/spinlock.h> 46 #include <linux/key.h> 47 #include <linux/personality.h> 48 #include <linux/binfmts.h> 49 #include <linux/utsname.h> 50 #include <linux/pid_namespace.h> 51 #include <linux/module.h> 52 #include <linux/namei.h> 53 #include <linux/mount.h> 54 #include <linux/security.h> 55 #include <linux/syscalls.h> 56 #include <linux/tsacct_kern.h> 57 #include <linux/cn_proc.h> 58 #include <linux/audit.h> 59 #include <linux/tracehook.h> 60 #include <linux/kmod.h> 61 #include <linux/fsnotify.h> 62 #include <linux/fs_struct.h> 63 #include <linux/oom.h> 64 #include <linux/compat.h> 65 #include <linux/vmalloc.h> 66 #include <linux/io_uring.h> 67 #include <linux/syscall_user_dispatch.h> 68 69 #include <linux/uaccess.h> 70 #include <asm/mmu_context.h> 71 #include <asm/tlb.h> 72 73 #include <trace/events/task.h> 74 #include "internal.h" 75 76 #include <trace/events/sched.h> 77 78 static int bprm_creds_from_file(struct linux_binprm *bprm); 79 80 int suid_dumpable = 0; 81 82 static LIST_HEAD(formats); 83 static DEFINE_RWLOCK(binfmt_lock); 84 85 void __register_binfmt(struct linux_binfmt * fmt, int insert) 86 { 87 write_lock(&binfmt_lock); 88 insert ? list_add(&fmt->lh, &formats) : 89 list_add_tail(&fmt->lh, &formats); 90 write_unlock(&binfmt_lock); 91 } 92 93 EXPORT_SYMBOL(__register_binfmt); 94 95 void unregister_binfmt(struct linux_binfmt * fmt) 96 { 97 write_lock(&binfmt_lock); 98 list_del(&fmt->lh); 99 write_unlock(&binfmt_lock); 100 } 101 102 EXPORT_SYMBOL(unregister_binfmt); 103 104 static inline void put_binfmt(struct linux_binfmt * fmt) 105 { 106 module_put(fmt->module); 107 } 108 109 bool path_noexec(const struct path *path) 110 { 111 return (path->mnt->mnt_flags & MNT_NOEXEC) || 112 (path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC); 113 } 114 115 #ifdef CONFIG_USELIB 116 /* 117 * Note that a shared library must be both readable and executable due to 118 * security reasons. 119 * 120 * Also note that we take the address to load from from the file itself. 121 */ 122 SYSCALL_DEFINE1(uselib, const char __user *, library) 123 { 124 struct linux_binfmt *fmt; 125 struct file *file; 126 struct filename *tmp = getname(library); 127 int error = PTR_ERR(tmp); 128 static const struct open_flags uselib_flags = { 129 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC, 130 .acc_mode = MAY_READ | MAY_EXEC, 131 .intent = LOOKUP_OPEN, 132 .lookup_flags = LOOKUP_FOLLOW, 133 }; 134 135 if (IS_ERR(tmp)) 136 goto out; 137 138 file = do_filp_open(AT_FDCWD, tmp, &uselib_flags); 139 putname(tmp); 140 error = PTR_ERR(file); 141 if (IS_ERR(file)) 142 goto out; 143 144 /* 145 * may_open() has already checked for this, so it should be 146 * impossible to trip now. But we need to be extra cautious 147 * and check again at the very end too. 148 */ 149 error = -EACCES; 150 if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) || 151 path_noexec(&file->f_path))) 152 goto exit; 153 154 fsnotify_open(file); 155 156 error = -ENOEXEC; 157 158 read_lock(&binfmt_lock); 159 list_for_each_entry(fmt, &formats, lh) { 160 if (!fmt->load_shlib) 161 continue; 162 if (!try_module_get(fmt->module)) 163 continue; 164 read_unlock(&binfmt_lock); 165 error = fmt->load_shlib(file); 166 read_lock(&binfmt_lock); 167 put_binfmt(fmt); 168 if (error != -ENOEXEC) 169 break; 170 } 171 read_unlock(&binfmt_lock); 172 exit: 173 fput(file); 174 out: 175 return error; 176 } 177 #endif /* #ifdef CONFIG_USELIB */ 178 179 #ifdef CONFIG_MMU 180 /* 181 * The nascent bprm->mm is not visible until exec_mmap() but it can 182 * use a lot of memory, account these pages in current->mm temporary 183 * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we 184 * change the counter back via acct_arg_size(0). 185 */ 186 static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages) 187 { 188 struct mm_struct *mm = current->mm; 189 long diff = (long)(pages - bprm->vma_pages); 190 191 if (!mm || !diff) 192 return; 193 194 bprm->vma_pages = pages; 195 add_mm_counter(mm, MM_ANONPAGES, diff); 196 } 197 198 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, 199 int write) 200 { 201 struct page *page; 202 int ret; 203 unsigned int gup_flags = FOLL_FORCE; 204 205 #ifdef CONFIG_STACK_GROWSUP 206 if (write) { 207 ret = expand_downwards(bprm->vma, pos); 208 if (ret < 0) 209 return NULL; 210 } 211 #endif 212 213 if (write) 214 gup_flags |= FOLL_WRITE; 215 216 /* 217 * We are doing an exec(). 'current' is the process 218 * doing the exec and bprm->mm is the new process's mm. 219 */ 220 mmap_read_lock(bprm->mm); 221 ret = get_user_pages_remote(bprm->mm, pos, 1, gup_flags, 222 &page, NULL, NULL); 223 mmap_read_unlock(bprm->mm); 224 if (ret <= 0) 225 return NULL; 226 227 if (write) 228 acct_arg_size(bprm, vma_pages(bprm->vma)); 229 230 return page; 231 } 232 233 static void put_arg_page(struct page *page) 234 { 235 put_page(page); 236 } 237 238 static void free_arg_pages(struct linux_binprm *bprm) 239 { 240 } 241 242 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos, 243 struct page *page) 244 { 245 flush_cache_page(bprm->vma, pos, page_to_pfn(page)); 246 } 247 248 static int __bprm_mm_init(struct linux_binprm *bprm) 249 { 250 int err; 251 struct vm_area_struct *vma = NULL; 252 struct mm_struct *mm = bprm->mm; 253 254 bprm->vma = vma = vm_area_alloc(mm); 255 if (!vma) 256 return -ENOMEM; 257 vma_set_anonymous(vma); 258 259 if (mmap_write_lock_killable(mm)) { 260 err = -EINTR; 261 goto err_free; 262 } 263 264 /* 265 * Place the stack at the largest stack address the architecture 266 * supports. Later, we'll move this to an appropriate place. We don't 267 * use STACK_TOP because that can depend on attributes which aren't 268 * configured yet. 269 */ 270 BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP); 271 vma->vm_end = STACK_TOP_MAX; 272 vma->vm_start = vma->vm_end - PAGE_SIZE; 273 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP; 274 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 275 276 err = insert_vm_struct(mm, vma); 277 if (err) 278 goto err; 279 280 mm->stack_vm = mm->total_vm = 1; 281 mmap_write_unlock(mm); 282 bprm->p = vma->vm_end - sizeof(void *); 283 return 0; 284 err: 285 mmap_write_unlock(mm); 286 err_free: 287 bprm->vma = NULL; 288 vm_area_free(vma); 289 return err; 290 } 291 292 static bool valid_arg_len(struct linux_binprm *bprm, long len) 293 { 294 return len <= MAX_ARG_STRLEN; 295 } 296 297 #else 298 299 static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages) 300 { 301 } 302 303 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, 304 int write) 305 { 306 struct page *page; 307 308 page = bprm->page[pos / PAGE_SIZE]; 309 if (!page && write) { 310 page = alloc_page(GFP_HIGHUSER|__GFP_ZERO); 311 if (!page) 312 return NULL; 313 bprm->page[pos / PAGE_SIZE] = page; 314 } 315 316 return page; 317 } 318 319 static void put_arg_page(struct page *page) 320 { 321 } 322 323 static void free_arg_page(struct linux_binprm *bprm, int i) 324 { 325 if (bprm->page[i]) { 326 __free_page(bprm->page[i]); 327 bprm->page[i] = NULL; 328 } 329 } 330 331 static void free_arg_pages(struct linux_binprm *bprm) 332 { 333 int i; 334 335 for (i = 0; i < MAX_ARG_PAGES; i++) 336 free_arg_page(bprm, i); 337 } 338 339 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos, 340 struct page *page) 341 { 342 } 343 344 static int __bprm_mm_init(struct linux_binprm *bprm) 345 { 346 bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *); 347 return 0; 348 } 349 350 static bool valid_arg_len(struct linux_binprm *bprm, long len) 351 { 352 return len <= bprm->p; 353 } 354 355 #endif /* CONFIG_MMU */ 356 357 /* 358 * Create a new mm_struct and populate it with a temporary stack 359 * vm_area_struct. We don't have enough context at this point to set the stack 360 * flags, permissions, and offset, so we use temporary values. We'll update 361 * them later in setup_arg_pages(). 362 */ 363 static int bprm_mm_init(struct linux_binprm *bprm) 364 { 365 int err; 366 struct mm_struct *mm = NULL; 367 368 bprm->mm = mm = mm_alloc(); 369 err = -ENOMEM; 370 if (!mm) 371 goto err; 372 373 /* Save current stack limit for all calculations made during exec. */ 374 task_lock(current->group_leader); 375 bprm->rlim_stack = current->signal->rlim[RLIMIT_STACK]; 376 task_unlock(current->group_leader); 377 378 err = __bprm_mm_init(bprm); 379 if (err) 380 goto err; 381 382 return 0; 383 384 err: 385 if (mm) { 386 bprm->mm = NULL; 387 mmdrop(mm); 388 } 389 390 return err; 391 } 392 393 struct user_arg_ptr { 394 #ifdef CONFIG_COMPAT 395 bool is_compat; 396 #endif 397 union { 398 const char __user *const __user *native; 399 #ifdef CONFIG_COMPAT 400 const compat_uptr_t __user *compat; 401 #endif 402 } ptr; 403 }; 404 405 static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr) 406 { 407 const char __user *native; 408 409 #ifdef CONFIG_COMPAT 410 if (unlikely(argv.is_compat)) { 411 compat_uptr_t compat; 412 413 if (get_user(compat, argv.ptr.compat + nr)) 414 return ERR_PTR(-EFAULT); 415 416 return compat_ptr(compat); 417 } 418 #endif 419 420 if (get_user(native, argv.ptr.native + nr)) 421 return ERR_PTR(-EFAULT); 422 423 return native; 424 } 425 426 /* 427 * count() counts the number of strings in array ARGV. 428 */ 429 static int count(struct user_arg_ptr argv, int max) 430 { 431 int i = 0; 432 433 if (argv.ptr.native != NULL) { 434 for (;;) { 435 const char __user *p = get_user_arg_ptr(argv, i); 436 437 if (!p) 438 break; 439 440 if (IS_ERR(p)) 441 return -EFAULT; 442 443 if (i >= max) 444 return -E2BIG; 445 ++i; 446 447 if (fatal_signal_pending(current)) 448 return -ERESTARTNOHAND; 449 cond_resched(); 450 } 451 } 452 return i; 453 } 454 455 static int count_strings_kernel(const char *const *argv) 456 { 457 int i; 458 459 if (!argv) 460 return 0; 461 462 for (i = 0; argv[i]; ++i) { 463 if (i >= MAX_ARG_STRINGS) 464 return -E2BIG; 465 if (fatal_signal_pending(current)) 466 return -ERESTARTNOHAND; 467 cond_resched(); 468 } 469 return i; 470 } 471 472 static int bprm_stack_limits(struct linux_binprm *bprm) 473 { 474 unsigned long limit, ptr_size; 475 476 /* 477 * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM 478 * (whichever is smaller) for the argv+env strings. 479 * This ensures that: 480 * - the remaining binfmt code will not run out of stack space, 481 * - the program will have a reasonable amount of stack left 482 * to work from. 483 */ 484 limit = _STK_LIM / 4 * 3; 485 limit = min(limit, bprm->rlim_stack.rlim_cur / 4); 486 /* 487 * We've historically supported up to 32 pages (ARG_MAX) 488 * of argument strings even with small stacks 489 */ 490 limit = max_t(unsigned long, limit, ARG_MAX); 491 /* 492 * We must account for the size of all the argv and envp pointers to 493 * the argv and envp strings, since they will also take up space in 494 * the stack. They aren't stored until much later when we can't 495 * signal to the parent that the child has run out of stack space. 496 * Instead, calculate it here so it's possible to fail gracefully. 497 */ 498 ptr_size = (bprm->argc + bprm->envc) * sizeof(void *); 499 if (limit <= ptr_size) 500 return -E2BIG; 501 limit -= ptr_size; 502 503 bprm->argmin = bprm->p - limit; 504 return 0; 505 } 506 507 /* 508 * 'copy_strings()' copies argument/environment strings from the old 509 * processes's memory to the new process's stack. The call to get_user_pages() 510 * ensures the destination page is created and not swapped out. 511 */ 512 static int copy_strings(int argc, struct user_arg_ptr argv, 513 struct linux_binprm *bprm) 514 { 515 struct page *kmapped_page = NULL; 516 char *kaddr = NULL; 517 unsigned long kpos = 0; 518 int ret; 519 520 while (argc-- > 0) { 521 const char __user *str; 522 int len; 523 unsigned long pos; 524 525 ret = -EFAULT; 526 str = get_user_arg_ptr(argv, argc); 527 if (IS_ERR(str)) 528 goto out; 529 530 len = strnlen_user(str, MAX_ARG_STRLEN); 531 if (!len) 532 goto out; 533 534 ret = -E2BIG; 535 if (!valid_arg_len(bprm, len)) 536 goto out; 537 538 /* We're going to work our way backwords. */ 539 pos = bprm->p; 540 str += len; 541 bprm->p -= len; 542 #ifdef CONFIG_MMU 543 if (bprm->p < bprm->argmin) 544 goto out; 545 #endif 546 547 while (len > 0) { 548 int offset, bytes_to_copy; 549 550 if (fatal_signal_pending(current)) { 551 ret = -ERESTARTNOHAND; 552 goto out; 553 } 554 cond_resched(); 555 556 offset = pos % PAGE_SIZE; 557 if (offset == 0) 558 offset = PAGE_SIZE; 559 560 bytes_to_copy = offset; 561 if (bytes_to_copy > len) 562 bytes_to_copy = len; 563 564 offset -= bytes_to_copy; 565 pos -= bytes_to_copy; 566 str -= bytes_to_copy; 567 len -= bytes_to_copy; 568 569 if (!kmapped_page || kpos != (pos & PAGE_MASK)) { 570 struct page *page; 571 572 page = get_arg_page(bprm, pos, 1); 573 if (!page) { 574 ret = -E2BIG; 575 goto out; 576 } 577 578 if (kmapped_page) { 579 flush_dcache_page(kmapped_page); 580 kunmap(kmapped_page); 581 put_arg_page(kmapped_page); 582 } 583 kmapped_page = page; 584 kaddr = kmap(kmapped_page); 585 kpos = pos & PAGE_MASK; 586 flush_arg_page(bprm, kpos, kmapped_page); 587 } 588 if (copy_from_user(kaddr+offset, str, bytes_to_copy)) { 589 ret = -EFAULT; 590 goto out; 591 } 592 } 593 } 594 ret = 0; 595 out: 596 if (kmapped_page) { 597 flush_dcache_page(kmapped_page); 598 kunmap(kmapped_page); 599 put_arg_page(kmapped_page); 600 } 601 return ret; 602 } 603 604 /* 605 * Copy and argument/environment string from the kernel to the processes stack. 606 */ 607 int copy_string_kernel(const char *arg, struct linux_binprm *bprm) 608 { 609 int len = strnlen(arg, MAX_ARG_STRLEN) + 1 /* terminating NUL */; 610 unsigned long pos = bprm->p; 611 612 if (len == 0) 613 return -EFAULT; 614 if (!valid_arg_len(bprm, len)) 615 return -E2BIG; 616 617 /* We're going to work our way backwards. */ 618 arg += len; 619 bprm->p -= len; 620 if (IS_ENABLED(CONFIG_MMU) && bprm->p < bprm->argmin) 621 return -E2BIG; 622 623 while (len > 0) { 624 unsigned int bytes_to_copy = min_t(unsigned int, len, 625 min_not_zero(offset_in_page(pos), PAGE_SIZE)); 626 struct page *page; 627 char *kaddr; 628 629 pos -= bytes_to_copy; 630 arg -= bytes_to_copy; 631 len -= bytes_to_copy; 632 633 page = get_arg_page(bprm, pos, 1); 634 if (!page) 635 return -E2BIG; 636 kaddr = kmap_atomic(page); 637 flush_arg_page(bprm, pos & PAGE_MASK, page); 638 memcpy(kaddr + offset_in_page(pos), arg, bytes_to_copy); 639 flush_dcache_page(page); 640 kunmap_atomic(kaddr); 641 put_arg_page(page); 642 } 643 644 return 0; 645 } 646 EXPORT_SYMBOL(copy_string_kernel); 647 648 static int copy_strings_kernel(int argc, const char *const *argv, 649 struct linux_binprm *bprm) 650 { 651 while (argc-- > 0) { 652 int ret = copy_string_kernel(argv[argc], bprm); 653 if (ret < 0) 654 return ret; 655 if (fatal_signal_pending(current)) 656 return -ERESTARTNOHAND; 657 cond_resched(); 658 } 659 return 0; 660 } 661 662 #ifdef CONFIG_MMU 663 664 /* 665 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once 666 * the binfmt code determines where the new stack should reside, we shift it to 667 * its final location. The process proceeds as follows: 668 * 669 * 1) Use shift to calculate the new vma endpoints. 670 * 2) Extend vma to cover both the old and new ranges. This ensures the 671 * arguments passed to subsequent functions are consistent. 672 * 3) Move vma's page tables to the new range. 673 * 4) Free up any cleared pgd range. 674 * 5) Shrink the vma to cover only the new range. 675 */ 676 static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) 677 { 678 struct mm_struct *mm = vma->vm_mm; 679 unsigned long old_start = vma->vm_start; 680 unsigned long old_end = vma->vm_end; 681 unsigned long length = old_end - old_start; 682 unsigned long new_start = old_start - shift; 683 unsigned long new_end = old_end - shift; 684 struct mmu_gather tlb; 685 686 BUG_ON(new_start > new_end); 687 688 /* 689 * ensure there are no vmas between where we want to go 690 * and where we are 691 */ 692 if (vma != find_vma(mm, new_start)) 693 return -EFAULT; 694 695 /* 696 * cover the whole range: [new_start, old_end) 697 */ 698 if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL)) 699 return -ENOMEM; 700 701 /* 702 * move the page tables downwards, on failure we rely on 703 * process cleanup to remove whatever mess we made. 704 */ 705 if (length != move_page_tables(vma, old_start, 706 vma, new_start, length, false)) 707 return -ENOMEM; 708 709 lru_add_drain(); 710 tlb_gather_mmu(&tlb, mm); 711 if (new_end > old_start) { 712 /* 713 * when the old and new regions overlap clear from new_end. 714 */ 715 free_pgd_range(&tlb, new_end, old_end, new_end, 716 vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); 717 } else { 718 /* 719 * otherwise, clean from old_start; this is done to not touch 720 * the address space in [new_end, old_start) some architectures 721 * have constraints on va-space that make this illegal (IA64) - 722 * for the others its just a little faster. 723 */ 724 free_pgd_range(&tlb, old_start, old_end, new_end, 725 vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); 726 } 727 tlb_finish_mmu(&tlb); 728 729 /* 730 * Shrink the vma to just the new range. Always succeeds. 731 */ 732 vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL); 733 734 return 0; 735 } 736 737 /* 738 * Finalizes the stack vm_area_struct. The flags and permissions are updated, 739 * the stack is optionally relocated, and some extra space is added. 740 */ 741 int setup_arg_pages(struct linux_binprm *bprm, 742 unsigned long stack_top, 743 int executable_stack) 744 { 745 unsigned long ret; 746 unsigned long stack_shift; 747 struct mm_struct *mm = current->mm; 748 struct vm_area_struct *vma = bprm->vma; 749 struct vm_area_struct *prev = NULL; 750 unsigned long vm_flags; 751 unsigned long stack_base; 752 unsigned long stack_size; 753 unsigned long stack_expand; 754 unsigned long rlim_stack; 755 756 #ifdef CONFIG_STACK_GROWSUP 757 /* Limit stack size */ 758 stack_base = bprm->rlim_stack.rlim_max; 759 760 stack_base = calc_max_stack_size(stack_base); 761 762 /* Add space for stack randomization. */ 763 stack_base += (STACK_RND_MASK << PAGE_SHIFT); 764 765 /* Make sure we didn't let the argument array grow too large. */ 766 if (vma->vm_end - vma->vm_start > stack_base) 767 return -ENOMEM; 768 769 stack_base = PAGE_ALIGN(stack_top - stack_base); 770 771 stack_shift = vma->vm_start - stack_base; 772 mm->arg_start = bprm->p - stack_shift; 773 bprm->p = vma->vm_end - stack_shift; 774 #else 775 stack_top = arch_align_stack(stack_top); 776 stack_top = PAGE_ALIGN(stack_top); 777 778 if (unlikely(stack_top < mmap_min_addr) || 779 unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr)) 780 return -ENOMEM; 781 782 stack_shift = vma->vm_end - stack_top; 783 784 bprm->p -= stack_shift; 785 mm->arg_start = bprm->p; 786 #endif 787 788 if (bprm->loader) 789 bprm->loader -= stack_shift; 790 bprm->exec -= stack_shift; 791 792 if (mmap_write_lock_killable(mm)) 793 return -EINTR; 794 795 vm_flags = VM_STACK_FLAGS; 796 797 /* 798 * Adjust stack execute permissions; explicitly enable for 799 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone 800 * (arch default) otherwise. 801 */ 802 if (unlikely(executable_stack == EXSTACK_ENABLE_X)) 803 vm_flags |= VM_EXEC; 804 else if (executable_stack == EXSTACK_DISABLE_X) 805 vm_flags &= ~VM_EXEC; 806 vm_flags |= mm->def_flags; 807 vm_flags |= VM_STACK_INCOMPLETE_SETUP; 808 809 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end, 810 vm_flags); 811 if (ret) 812 goto out_unlock; 813 BUG_ON(prev != vma); 814 815 if (unlikely(vm_flags & VM_EXEC)) { 816 pr_warn_once("process '%pD4' started with executable stack\n", 817 bprm->file); 818 } 819 820 /* Move stack pages down in memory. */ 821 if (stack_shift) { 822 ret = shift_arg_pages(vma, stack_shift); 823 if (ret) 824 goto out_unlock; 825 } 826 827 /* mprotect_fixup is overkill to remove the temporary stack flags */ 828 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP; 829 830 stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */ 831 stack_size = vma->vm_end - vma->vm_start; 832 /* 833 * Align this down to a page boundary as expand_stack 834 * will align it up. 835 */ 836 rlim_stack = bprm->rlim_stack.rlim_cur & PAGE_MASK; 837 #ifdef CONFIG_STACK_GROWSUP 838 if (stack_size + stack_expand > rlim_stack) 839 stack_base = vma->vm_start + rlim_stack; 840 else 841 stack_base = vma->vm_end + stack_expand; 842 #else 843 if (stack_size + stack_expand > rlim_stack) 844 stack_base = vma->vm_end - rlim_stack; 845 else 846 stack_base = vma->vm_start - stack_expand; 847 #endif 848 current->mm->start_stack = bprm->p; 849 ret = expand_stack(vma, stack_base); 850 if (ret) 851 ret = -EFAULT; 852 853 out_unlock: 854 mmap_write_unlock(mm); 855 return ret; 856 } 857 EXPORT_SYMBOL(setup_arg_pages); 858 859 #else 860 861 /* 862 * Transfer the program arguments and environment from the holding pages 863 * onto the stack. The provided stack pointer is adjusted accordingly. 864 */ 865 int transfer_args_to_stack(struct linux_binprm *bprm, 866 unsigned long *sp_location) 867 { 868 unsigned long index, stop, sp; 869 int ret = 0; 870 871 stop = bprm->p >> PAGE_SHIFT; 872 sp = *sp_location; 873 874 for (index = MAX_ARG_PAGES - 1; index >= stop; index--) { 875 unsigned int offset = index == stop ? bprm->p & ~PAGE_MASK : 0; 876 char *src = kmap(bprm->page[index]) + offset; 877 sp -= PAGE_SIZE - offset; 878 if (copy_to_user((void *) sp, src, PAGE_SIZE - offset) != 0) 879 ret = -EFAULT; 880 kunmap(bprm->page[index]); 881 if (ret) 882 goto out; 883 } 884 885 *sp_location = sp; 886 887 out: 888 return ret; 889 } 890 EXPORT_SYMBOL(transfer_args_to_stack); 891 892 #endif /* CONFIG_MMU */ 893 894 static struct file *do_open_execat(int fd, struct filename *name, int flags) 895 { 896 struct file *file; 897 int err; 898 struct open_flags open_exec_flags = { 899 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC, 900 .acc_mode = MAY_EXEC, 901 .intent = LOOKUP_OPEN, 902 .lookup_flags = LOOKUP_FOLLOW, 903 }; 904 905 if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0) 906 return ERR_PTR(-EINVAL); 907 if (flags & AT_SYMLINK_NOFOLLOW) 908 open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW; 909 if (flags & AT_EMPTY_PATH) 910 open_exec_flags.lookup_flags |= LOOKUP_EMPTY; 911 912 file = do_filp_open(fd, name, &open_exec_flags); 913 if (IS_ERR(file)) 914 goto out; 915 916 /* 917 * may_open() has already checked for this, so it should be 918 * impossible to trip now. But we need to be extra cautious 919 * and check again at the very end too. 920 */ 921 err = -EACCES; 922 if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) || 923 path_noexec(&file->f_path))) 924 goto exit; 925 926 err = deny_write_access(file); 927 if (err) 928 goto exit; 929 930 if (name->name[0] != '\0') 931 fsnotify_open(file); 932 933 out: 934 return file; 935 936 exit: 937 fput(file); 938 return ERR_PTR(err); 939 } 940 941 struct file *open_exec(const char *name) 942 { 943 struct filename *filename = getname_kernel(name); 944 struct file *f = ERR_CAST(filename); 945 946 if (!IS_ERR(filename)) { 947 f = do_open_execat(AT_FDCWD, filename, 0); 948 putname(filename); 949 } 950 return f; 951 } 952 EXPORT_SYMBOL(open_exec); 953 954 #if defined(CONFIG_HAVE_AOUT) || defined(CONFIG_BINFMT_FLAT) || \ 955 defined(CONFIG_BINFMT_ELF_FDPIC) 956 ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len) 957 { 958 ssize_t res = vfs_read(file, (void __user *)addr, len, &pos); 959 if (res > 0) 960 flush_icache_user_range(addr, addr + len); 961 return res; 962 } 963 EXPORT_SYMBOL(read_code); 964 #endif 965 966 /* 967 * Maps the mm_struct mm into the current task struct. 968 * On success, this function returns with exec_update_lock 969 * held for writing. 970 */ 971 static int exec_mmap(struct mm_struct *mm) 972 { 973 struct task_struct *tsk; 974 struct mm_struct *old_mm, *active_mm; 975 int ret; 976 977 /* Notify parent that we're no longer interested in the old VM */ 978 tsk = current; 979 old_mm = current->mm; 980 exec_mm_release(tsk, old_mm); 981 if (old_mm) 982 sync_mm_rss(old_mm); 983 984 ret = down_write_killable(&tsk->signal->exec_update_lock); 985 if (ret) 986 return ret; 987 988 if (old_mm) { 989 /* 990 * If there is a pending fatal signal perhaps a signal 991 * whose default action is to create a coredump get 992 * out and die instead of going through with the exec. 993 */ 994 ret = mmap_read_lock_killable(old_mm); 995 if (ret) { 996 up_write(&tsk->signal->exec_update_lock); 997 return ret; 998 } 999 } 1000 1001 task_lock(tsk); 1002 membarrier_exec_mmap(mm); 1003 1004 local_irq_disable(); 1005 active_mm = tsk->active_mm; 1006 tsk->active_mm = mm; 1007 tsk->mm = mm; 1008 /* 1009 * This prevents preemption while active_mm is being loaded and 1010 * it and mm are being updated, which could cause problems for 1011 * lazy tlb mm refcounting when these are updated by context 1012 * switches. Not all architectures can handle irqs off over 1013 * activate_mm yet. 1014 */ 1015 if (!IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM)) 1016 local_irq_enable(); 1017 activate_mm(active_mm, mm); 1018 if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM)) 1019 local_irq_enable(); 1020 tsk->mm->vmacache_seqnum = 0; 1021 vmacache_flush(tsk); 1022 task_unlock(tsk); 1023 if (old_mm) { 1024 mmap_read_unlock(old_mm); 1025 BUG_ON(active_mm != old_mm); 1026 setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm); 1027 mm_update_next_owner(old_mm); 1028 mmput(old_mm); 1029 return 0; 1030 } 1031 mmdrop(active_mm); 1032 return 0; 1033 } 1034 1035 static int de_thread(struct task_struct *tsk) 1036 { 1037 struct signal_struct *sig = tsk->signal; 1038 struct sighand_struct *oldsighand = tsk->sighand; 1039 spinlock_t *lock = &oldsighand->siglock; 1040 1041 if (thread_group_empty(tsk)) 1042 goto no_thread_group; 1043 1044 /* 1045 * Kill all other threads in the thread group. 1046 */ 1047 spin_lock_irq(lock); 1048 if ((sig->flags & SIGNAL_GROUP_EXIT) || sig->group_exec_task) { 1049 /* 1050 * Another group action in progress, just 1051 * return so that the signal is processed. 1052 */ 1053 spin_unlock_irq(lock); 1054 return -EAGAIN; 1055 } 1056 1057 sig->group_exec_task = tsk; 1058 sig->notify_count = zap_other_threads(tsk); 1059 if (!thread_group_leader(tsk)) 1060 sig->notify_count--; 1061 1062 while (sig->notify_count) { 1063 __set_current_state(TASK_KILLABLE); 1064 spin_unlock_irq(lock); 1065 schedule(); 1066 if (__fatal_signal_pending(tsk)) 1067 goto killed; 1068 spin_lock_irq(lock); 1069 } 1070 spin_unlock_irq(lock); 1071 1072 /* 1073 * At this point all other threads have exited, all we have to 1074 * do is to wait for the thread group leader to become inactive, 1075 * and to assume its PID: 1076 */ 1077 if (!thread_group_leader(tsk)) { 1078 struct task_struct *leader = tsk->group_leader; 1079 1080 for (;;) { 1081 cgroup_threadgroup_change_begin(tsk); 1082 write_lock_irq(&tasklist_lock); 1083 /* 1084 * Do this under tasklist_lock to ensure that 1085 * exit_notify() can't miss ->group_exec_task 1086 */ 1087 sig->notify_count = -1; 1088 if (likely(leader->exit_state)) 1089 break; 1090 __set_current_state(TASK_KILLABLE); 1091 write_unlock_irq(&tasklist_lock); 1092 cgroup_threadgroup_change_end(tsk); 1093 schedule(); 1094 if (__fatal_signal_pending(tsk)) 1095 goto killed; 1096 } 1097 1098 /* 1099 * The only record we have of the real-time age of a 1100 * process, regardless of execs it's done, is start_time. 1101 * All the past CPU time is accumulated in signal_struct 1102 * from sister threads now dead. But in this non-leader 1103 * exec, nothing survives from the original leader thread, 1104 * whose birth marks the true age of this process now. 1105 * When we take on its identity by switching to its PID, we 1106 * also take its birthdate (always earlier than our own). 1107 */ 1108 tsk->start_time = leader->start_time; 1109 tsk->start_boottime = leader->start_boottime; 1110 1111 BUG_ON(!same_thread_group(leader, tsk)); 1112 /* 1113 * An exec() starts a new thread group with the 1114 * TGID of the previous thread group. Rehash the 1115 * two threads with a switched PID, and release 1116 * the former thread group leader: 1117 */ 1118 1119 /* Become a process group leader with the old leader's pid. 1120 * The old leader becomes a thread of the this thread group. 1121 */ 1122 exchange_tids(tsk, leader); 1123 transfer_pid(leader, tsk, PIDTYPE_TGID); 1124 transfer_pid(leader, tsk, PIDTYPE_PGID); 1125 transfer_pid(leader, tsk, PIDTYPE_SID); 1126 1127 list_replace_rcu(&leader->tasks, &tsk->tasks); 1128 list_replace_init(&leader->sibling, &tsk->sibling); 1129 1130 tsk->group_leader = tsk; 1131 leader->group_leader = tsk; 1132 1133 tsk->exit_signal = SIGCHLD; 1134 leader->exit_signal = -1; 1135 1136 BUG_ON(leader->exit_state != EXIT_ZOMBIE); 1137 leader->exit_state = EXIT_DEAD; 1138 1139 /* 1140 * We are going to release_task()->ptrace_unlink() silently, 1141 * the tracer can sleep in do_wait(). EXIT_DEAD guarantees 1142 * the tracer wont't block again waiting for this thread. 1143 */ 1144 if (unlikely(leader->ptrace)) 1145 __wake_up_parent(leader, leader->parent); 1146 write_unlock_irq(&tasklist_lock); 1147 cgroup_threadgroup_change_end(tsk); 1148 1149 release_task(leader); 1150 } 1151 1152 sig->group_exec_task = NULL; 1153 sig->notify_count = 0; 1154 1155 no_thread_group: 1156 /* we have changed execution domain */ 1157 tsk->exit_signal = SIGCHLD; 1158 1159 BUG_ON(!thread_group_leader(tsk)); 1160 return 0; 1161 1162 killed: 1163 /* protects against exit_notify() and __exit_signal() */ 1164 read_lock(&tasklist_lock); 1165 sig->group_exec_task = NULL; 1166 sig->notify_count = 0; 1167 read_unlock(&tasklist_lock); 1168 return -EAGAIN; 1169 } 1170 1171 1172 /* 1173 * This function makes sure the current process has its own signal table, 1174 * so that flush_signal_handlers can later reset the handlers without 1175 * disturbing other processes. (Other processes might share the signal 1176 * table via the CLONE_SIGHAND option to clone().) 1177 */ 1178 static int unshare_sighand(struct task_struct *me) 1179 { 1180 struct sighand_struct *oldsighand = me->sighand; 1181 1182 if (refcount_read(&oldsighand->count) != 1) { 1183 struct sighand_struct *newsighand; 1184 /* 1185 * This ->sighand is shared with the CLONE_SIGHAND 1186 * but not CLONE_THREAD task, switch to the new one. 1187 */ 1188 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); 1189 if (!newsighand) 1190 return -ENOMEM; 1191 1192 refcount_set(&newsighand->count, 1); 1193 memcpy(newsighand->action, oldsighand->action, 1194 sizeof(newsighand->action)); 1195 1196 write_lock_irq(&tasklist_lock); 1197 spin_lock(&oldsighand->siglock); 1198 rcu_assign_pointer(me->sighand, newsighand); 1199 spin_unlock(&oldsighand->siglock); 1200 write_unlock_irq(&tasklist_lock); 1201 1202 __cleanup_sighand(oldsighand); 1203 } 1204 return 0; 1205 } 1206 1207 char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk) 1208 { 1209 task_lock(tsk); 1210 /* Always NUL terminated and zero-padded */ 1211 strscpy_pad(buf, tsk->comm, buf_size); 1212 task_unlock(tsk); 1213 return buf; 1214 } 1215 EXPORT_SYMBOL_GPL(__get_task_comm); 1216 1217 /* 1218 * These functions flushes out all traces of the currently running executable 1219 * so that a new one can be started 1220 */ 1221 1222 void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec) 1223 { 1224 task_lock(tsk); 1225 trace_task_rename(tsk, buf); 1226 strscpy_pad(tsk->comm, buf, sizeof(tsk->comm)); 1227 task_unlock(tsk); 1228 perf_event_comm(tsk, exec); 1229 } 1230 1231 /* 1232 * Calling this is the point of no return. None of the failures will be 1233 * seen by userspace since either the process is already taking a fatal 1234 * signal (via de_thread() or coredump), or will have SEGV raised 1235 * (after exec_mmap()) by search_binary_handler (see below). 1236 */ 1237 int begin_new_exec(struct linux_binprm * bprm) 1238 { 1239 struct task_struct *me = current; 1240 int retval; 1241 1242 /* Once we are committed compute the creds */ 1243 retval = bprm_creds_from_file(bprm); 1244 if (retval) 1245 return retval; 1246 1247 /* 1248 * Ensure all future errors are fatal. 1249 */ 1250 bprm->point_of_no_return = true; 1251 1252 /* 1253 * Make this the only thread in the thread group. 1254 */ 1255 retval = de_thread(me); 1256 if (retval) 1257 goto out; 1258 1259 /* 1260 * Cancel any io_uring activity across execve 1261 */ 1262 io_uring_task_cancel(); 1263 1264 /* Ensure the files table is not shared. */ 1265 retval = unshare_files(); 1266 if (retval) 1267 goto out; 1268 1269 /* 1270 * Must be called _before_ exec_mmap() as bprm->mm is 1271 * not visibile until then. This also enables the update 1272 * to be lockless. 1273 */ 1274 retval = set_mm_exe_file(bprm->mm, bprm->file); 1275 if (retval) 1276 goto out; 1277 1278 /* If the binary is not readable then enforce mm->dumpable=0 */ 1279 would_dump(bprm, bprm->file); 1280 if (bprm->have_execfd) 1281 would_dump(bprm, bprm->executable); 1282 1283 /* 1284 * Release all of the old mmap stuff 1285 */ 1286 acct_arg_size(bprm, 0); 1287 retval = exec_mmap(bprm->mm); 1288 if (retval) 1289 goto out; 1290 1291 bprm->mm = NULL; 1292 1293 #ifdef CONFIG_POSIX_TIMERS 1294 exit_itimers(me->signal); 1295 flush_itimer_signals(); 1296 #endif 1297 1298 /* 1299 * Make the signal table private. 1300 */ 1301 retval = unshare_sighand(me); 1302 if (retval) 1303 goto out_unlock; 1304 1305 /* 1306 * Ensure that the uaccess routines can actually operate on userspace 1307 * pointers: 1308 */ 1309 force_uaccess_begin(); 1310 1311 if (me->flags & PF_KTHREAD) 1312 free_kthread_struct(me); 1313 me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD | 1314 PF_NOFREEZE | PF_NO_SETAFFINITY); 1315 flush_thread(); 1316 me->personality &= ~bprm->per_clear; 1317 1318 clear_syscall_work_syscall_user_dispatch(me); 1319 1320 /* 1321 * We have to apply CLOEXEC before we change whether the process is 1322 * dumpable (in setup_new_exec) to avoid a race with a process in userspace 1323 * trying to access the should-be-closed file descriptors of a process 1324 * undergoing exec(2). 1325 */ 1326 do_close_on_exec(me->files); 1327 1328 if (bprm->secureexec) { 1329 /* Make sure parent cannot signal privileged process. */ 1330 me->pdeath_signal = 0; 1331 1332 /* 1333 * For secureexec, reset the stack limit to sane default to 1334 * avoid bad behavior from the prior rlimits. This has to 1335 * happen before arch_pick_mmap_layout(), which examines 1336 * RLIMIT_STACK, but after the point of no return to avoid 1337 * needing to clean up the change on failure. 1338 */ 1339 if (bprm->rlim_stack.rlim_cur > _STK_LIM) 1340 bprm->rlim_stack.rlim_cur = _STK_LIM; 1341 } 1342 1343 me->sas_ss_sp = me->sas_ss_size = 0; 1344 1345 /* 1346 * Figure out dumpability. Note that this checking only of current 1347 * is wrong, but userspace depends on it. This should be testing 1348 * bprm->secureexec instead. 1349 */ 1350 if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP || 1351 !(uid_eq(current_euid(), current_uid()) && 1352 gid_eq(current_egid(), current_gid()))) 1353 set_dumpable(current->mm, suid_dumpable); 1354 else 1355 set_dumpable(current->mm, SUID_DUMP_USER); 1356 1357 perf_event_exec(); 1358 __set_task_comm(me, kbasename(bprm->filename), true); 1359 1360 /* An exec changes our domain. We are no longer part of the thread 1361 group */ 1362 WRITE_ONCE(me->self_exec_id, me->self_exec_id + 1); 1363 flush_signal_handlers(me, 0); 1364 1365 retval = set_cred_ucounts(bprm->cred); 1366 if (retval < 0) 1367 goto out_unlock; 1368 1369 /* 1370 * install the new credentials for this executable 1371 */ 1372 security_bprm_committing_creds(bprm); 1373 1374 commit_creds(bprm->cred); 1375 bprm->cred = NULL; 1376 1377 /* 1378 * Disable monitoring for regular users 1379 * when executing setuid binaries. Must 1380 * wait until new credentials are committed 1381 * by commit_creds() above 1382 */ 1383 if (get_dumpable(me->mm) != SUID_DUMP_USER) 1384 perf_event_exit_task(me); 1385 /* 1386 * cred_guard_mutex must be held at least to this point to prevent 1387 * ptrace_attach() from altering our determination of the task's 1388 * credentials; any time after this it may be unlocked. 1389 */ 1390 security_bprm_committed_creds(bprm); 1391 1392 /* Pass the opened binary to the interpreter. */ 1393 if (bprm->have_execfd) { 1394 retval = get_unused_fd_flags(0); 1395 if (retval < 0) 1396 goto out_unlock; 1397 fd_install(retval, bprm->executable); 1398 bprm->executable = NULL; 1399 bprm->execfd = retval; 1400 } 1401 return 0; 1402 1403 out_unlock: 1404 up_write(&me->signal->exec_update_lock); 1405 out: 1406 return retval; 1407 } 1408 EXPORT_SYMBOL(begin_new_exec); 1409 1410 void would_dump(struct linux_binprm *bprm, struct file *file) 1411 { 1412 struct inode *inode = file_inode(file); 1413 struct user_namespace *mnt_userns = file_mnt_user_ns(file); 1414 if (inode_permission(mnt_userns, inode, MAY_READ) < 0) { 1415 struct user_namespace *old, *user_ns; 1416 bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP; 1417 1418 /* Ensure mm->user_ns contains the executable */ 1419 user_ns = old = bprm->mm->user_ns; 1420 while ((user_ns != &init_user_ns) && 1421 !privileged_wrt_inode_uidgid(user_ns, mnt_userns, inode)) 1422 user_ns = user_ns->parent; 1423 1424 if (old != user_ns) { 1425 bprm->mm->user_ns = get_user_ns(user_ns); 1426 put_user_ns(old); 1427 } 1428 } 1429 } 1430 EXPORT_SYMBOL(would_dump); 1431 1432 void setup_new_exec(struct linux_binprm * bprm) 1433 { 1434 /* Setup things that can depend upon the personality */ 1435 struct task_struct *me = current; 1436 1437 arch_pick_mmap_layout(me->mm, &bprm->rlim_stack); 1438 1439 arch_setup_new_exec(); 1440 1441 /* Set the new mm task size. We have to do that late because it may 1442 * depend on TIF_32BIT which is only updated in flush_thread() on 1443 * some architectures like powerpc 1444 */ 1445 me->mm->task_size = TASK_SIZE; 1446 up_write(&me->signal->exec_update_lock); 1447 mutex_unlock(&me->signal->cred_guard_mutex); 1448 } 1449 EXPORT_SYMBOL(setup_new_exec); 1450 1451 /* Runs immediately before start_thread() takes over. */ 1452 void finalize_exec(struct linux_binprm *bprm) 1453 { 1454 /* Store any stack rlimit changes before starting thread. */ 1455 task_lock(current->group_leader); 1456 current->signal->rlim[RLIMIT_STACK] = bprm->rlim_stack; 1457 task_unlock(current->group_leader); 1458 } 1459 EXPORT_SYMBOL(finalize_exec); 1460 1461 /* 1462 * Prepare credentials and lock ->cred_guard_mutex. 1463 * setup_new_exec() commits the new creds and drops the lock. 1464 * Or, if exec fails before, free_bprm() should release ->cred 1465 * and unlock. 1466 */ 1467 static int prepare_bprm_creds(struct linux_binprm *bprm) 1468 { 1469 if (mutex_lock_interruptible(¤t->signal->cred_guard_mutex)) 1470 return -ERESTARTNOINTR; 1471 1472 bprm->cred = prepare_exec_creds(); 1473 if (likely(bprm->cred)) 1474 return 0; 1475 1476 mutex_unlock(¤t->signal->cred_guard_mutex); 1477 return -ENOMEM; 1478 } 1479 1480 static void free_bprm(struct linux_binprm *bprm) 1481 { 1482 if (bprm->mm) { 1483 acct_arg_size(bprm, 0); 1484 mmput(bprm->mm); 1485 } 1486 free_arg_pages(bprm); 1487 if (bprm->cred) { 1488 mutex_unlock(¤t->signal->cred_guard_mutex); 1489 abort_creds(bprm->cred); 1490 } 1491 if (bprm->file) { 1492 allow_write_access(bprm->file); 1493 fput(bprm->file); 1494 } 1495 if (bprm->executable) 1496 fput(bprm->executable); 1497 /* If a binfmt changed the interp, free it. */ 1498 if (bprm->interp != bprm->filename) 1499 kfree(bprm->interp); 1500 kfree(bprm->fdpath); 1501 kfree(bprm); 1502 } 1503 1504 static struct linux_binprm *alloc_bprm(int fd, struct filename *filename) 1505 { 1506 struct linux_binprm *bprm = kzalloc(sizeof(*bprm), GFP_KERNEL); 1507 int retval = -ENOMEM; 1508 if (!bprm) 1509 goto out; 1510 1511 if (fd == AT_FDCWD || filename->name[0] == '/') { 1512 bprm->filename = filename->name; 1513 } else { 1514 if (filename->name[0] == '\0') 1515 bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd); 1516 else 1517 bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s", 1518 fd, filename->name); 1519 if (!bprm->fdpath) 1520 goto out_free; 1521 1522 bprm->filename = bprm->fdpath; 1523 } 1524 bprm->interp = bprm->filename; 1525 1526 retval = bprm_mm_init(bprm); 1527 if (retval) 1528 goto out_free; 1529 return bprm; 1530 1531 out_free: 1532 free_bprm(bprm); 1533 out: 1534 return ERR_PTR(retval); 1535 } 1536 1537 int bprm_change_interp(const char *interp, struct linux_binprm *bprm) 1538 { 1539 /* If a binfmt changed the interp, free it first. */ 1540 if (bprm->interp != bprm->filename) 1541 kfree(bprm->interp); 1542 bprm->interp = kstrdup(interp, GFP_KERNEL); 1543 if (!bprm->interp) 1544 return -ENOMEM; 1545 return 0; 1546 } 1547 EXPORT_SYMBOL(bprm_change_interp); 1548 1549 /* 1550 * determine how safe it is to execute the proposed program 1551 * - the caller must hold ->cred_guard_mutex to protect against 1552 * PTRACE_ATTACH or seccomp thread-sync 1553 */ 1554 static void check_unsafe_exec(struct linux_binprm *bprm) 1555 { 1556 struct task_struct *p = current, *t; 1557 unsigned n_fs; 1558 1559 if (p->ptrace) 1560 bprm->unsafe |= LSM_UNSAFE_PTRACE; 1561 1562 /* 1563 * This isn't strictly necessary, but it makes it harder for LSMs to 1564 * mess up. 1565 */ 1566 if (task_no_new_privs(current)) 1567 bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS; 1568 1569 t = p; 1570 n_fs = 1; 1571 spin_lock(&p->fs->lock); 1572 rcu_read_lock(); 1573 while_each_thread(p, t) { 1574 if (t->fs == p->fs) 1575 n_fs++; 1576 } 1577 rcu_read_unlock(); 1578 1579 if (p->fs->users > n_fs) 1580 bprm->unsafe |= LSM_UNSAFE_SHARE; 1581 else 1582 p->fs->in_exec = 1; 1583 spin_unlock(&p->fs->lock); 1584 } 1585 1586 static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file) 1587 { 1588 /* Handle suid and sgid on files */ 1589 struct user_namespace *mnt_userns; 1590 struct inode *inode; 1591 unsigned int mode; 1592 kuid_t uid; 1593 kgid_t gid; 1594 1595 if (!mnt_may_suid(file->f_path.mnt)) 1596 return; 1597 1598 if (task_no_new_privs(current)) 1599 return; 1600 1601 inode = file->f_path.dentry->d_inode; 1602 mode = READ_ONCE(inode->i_mode); 1603 if (!(mode & (S_ISUID|S_ISGID))) 1604 return; 1605 1606 mnt_userns = file_mnt_user_ns(file); 1607 1608 /* Be careful if suid/sgid is set */ 1609 inode_lock(inode); 1610 1611 /* reload atomically mode/uid/gid now that lock held */ 1612 mode = inode->i_mode; 1613 uid = i_uid_into_mnt(mnt_userns, inode); 1614 gid = i_gid_into_mnt(mnt_userns, inode); 1615 inode_unlock(inode); 1616 1617 /* We ignore suid/sgid if there are no mappings for them in the ns */ 1618 if (!kuid_has_mapping(bprm->cred->user_ns, uid) || 1619 !kgid_has_mapping(bprm->cred->user_ns, gid)) 1620 return; 1621 1622 if (mode & S_ISUID) { 1623 bprm->per_clear |= PER_CLEAR_ON_SETID; 1624 bprm->cred->euid = uid; 1625 } 1626 1627 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) { 1628 bprm->per_clear |= PER_CLEAR_ON_SETID; 1629 bprm->cred->egid = gid; 1630 } 1631 } 1632 1633 /* 1634 * Compute brpm->cred based upon the final binary. 1635 */ 1636 static int bprm_creds_from_file(struct linux_binprm *bprm) 1637 { 1638 /* Compute creds based on which file? */ 1639 struct file *file = bprm->execfd_creds ? bprm->executable : bprm->file; 1640 1641 bprm_fill_uid(bprm, file); 1642 return security_bprm_creds_from_file(bprm, file); 1643 } 1644 1645 /* 1646 * Fill the binprm structure from the inode. 1647 * Read the first BINPRM_BUF_SIZE bytes 1648 * 1649 * This may be called multiple times for binary chains (scripts for example). 1650 */ 1651 static int prepare_binprm(struct linux_binprm *bprm) 1652 { 1653 loff_t pos = 0; 1654 1655 memset(bprm->buf, 0, BINPRM_BUF_SIZE); 1656 return kernel_read(bprm->file, bprm->buf, BINPRM_BUF_SIZE, &pos); 1657 } 1658 1659 /* 1660 * Arguments are '\0' separated strings found at the location bprm->p 1661 * points to; chop off the first by relocating brpm->p to right after 1662 * the first '\0' encountered. 1663 */ 1664 int remove_arg_zero(struct linux_binprm *bprm) 1665 { 1666 int ret = 0; 1667 unsigned long offset; 1668 char *kaddr; 1669 struct page *page; 1670 1671 if (!bprm->argc) 1672 return 0; 1673 1674 do { 1675 offset = bprm->p & ~PAGE_MASK; 1676 page = get_arg_page(bprm, bprm->p, 0); 1677 if (!page) { 1678 ret = -EFAULT; 1679 goto out; 1680 } 1681 kaddr = kmap_atomic(page); 1682 1683 for (; offset < PAGE_SIZE && kaddr[offset]; 1684 offset++, bprm->p++) 1685 ; 1686 1687 kunmap_atomic(kaddr); 1688 put_arg_page(page); 1689 } while (offset == PAGE_SIZE); 1690 1691 bprm->p++; 1692 bprm->argc--; 1693 ret = 0; 1694 1695 out: 1696 return ret; 1697 } 1698 EXPORT_SYMBOL(remove_arg_zero); 1699 1700 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e)) 1701 /* 1702 * cycle the list of binary formats handler, until one recognizes the image 1703 */ 1704 static int search_binary_handler(struct linux_binprm *bprm) 1705 { 1706 bool need_retry = IS_ENABLED(CONFIG_MODULES); 1707 struct linux_binfmt *fmt; 1708 int retval; 1709 1710 retval = prepare_binprm(bprm); 1711 if (retval < 0) 1712 return retval; 1713 1714 retval = security_bprm_check(bprm); 1715 if (retval) 1716 return retval; 1717 1718 retval = -ENOENT; 1719 retry: 1720 read_lock(&binfmt_lock); 1721 list_for_each_entry(fmt, &formats, lh) { 1722 if (!try_module_get(fmt->module)) 1723 continue; 1724 read_unlock(&binfmt_lock); 1725 1726 retval = fmt->load_binary(bprm); 1727 1728 read_lock(&binfmt_lock); 1729 put_binfmt(fmt); 1730 if (bprm->point_of_no_return || (retval != -ENOEXEC)) { 1731 read_unlock(&binfmt_lock); 1732 return retval; 1733 } 1734 } 1735 read_unlock(&binfmt_lock); 1736 1737 if (need_retry) { 1738 if (printable(bprm->buf[0]) && printable(bprm->buf[1]) && 1739 printable(bprm->buf[2]) && printable(bprm->buf[3])) 1740 return retval; 1741 if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0) 1742 return retval; 1743 need_retry = false; 1744 goto retry; 1745 } 1746 1747 return retval; 1748 } 1749 1750 static int exec_binprm(struct linux_binprm *bprm) 1751 { 1752 pid_t old_pid, old_vpid; 1753 int ret, depth; 1754 1755 /* Need to fetch pid before load_binary changes it */ 1756 old_pid = current->pid; 1757 rcu_read_lock(); 1758 old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent)); 1759 rcu_read_unlock(); 1760 1761 /* This allows 4 levels of binfmt rewrites before failing hard. */ 1762 for (depth = 0;; depth++) { 1763 struct file *exec; 1764 if (depth > 5) 1765 return -ELOOP; 1766 1767 ret = search_binary_handler(bprm); 1768 if (ret < 0) 1769 return ret; 1770 if (!bprm->interpreter) 1771 break; 1772 1773 exec = bprm->file; 1774 bprm->file = bprm->interpreter; 1775 bprm->interpreter = NULL; 1776 1777 allow_write_access(exec); 1778 if (unlikely(bprm->have_execfd)) { 1779 if (bprm->executable) { 1780 fput(exec); 1781 return -ENOEXEC; 1782 } 1783 bprm->executable = exec; 1784 } else 1785 fput(exec); 1786 } 1787 1788 audit_bprm(bprm); 1789 trace_sched_process_exec(current, old_pid, bprm); 1790 ptrace_event(PTRACE_EVENT_EXEC, old_vpid); 1791 proc_exec_connector(current); 1792 return 0; 1793 } 1794 1795 /* 1796 * sys_execve() executes a new program. 1797 */ 1798 static int bprm_execve(struct linux_binprm *bprm, 1799 int fd, struct filename *filename, int flags) 1800 { 1801 struct file *file; 1802 int retval; 1803 1804 retval = prepare_bprm_creds(bprm); 1805 if (retval) 1806 return retval; 1807 1808 check_unsafe_exec(bprm); 1809 current->in_execve = 1; 1810 1811 file = do_open_execat(fd, filename, flags); 1812 retval = PTR_ERR(file); 1813 if (IS_ERR(file)) 1814 goto out_unmark; 1815 1816 sched_exec(); 1817 1818 bprm->file = file; 1819 /* 1820 * Record that a name derived from an O_CLOEXEC fd will be 1821 * inaccessible after exec. This allows the code in exec to 1822 * choose to fail when the executable is not mmaped into the 1823 * interpreter and an open file descriptor is not passed to 1824 * the interpreter. This makes for a better user experience 1825 * than having the interpreter start and then immediately fail 1826 * when it finds the executable is inaccessible. 1827 */ 1828 if (bprm->fdpath && get_close_on_exec(fd)) 1829 bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE; 1830 1831 /* Set the unchanging part of bprm->cred */ 1832 retval = security_bprm_creds_for_exec(bprm); 1833 if (retval) 1834 goto out; 1835 1836 retval = exec_binprm(bprm); 1837 if (retval < 0) 1838 goto out; 1839 1840 /* execve succeeded */ 1841 current->fs->in_exec = 0; 1842 current->in_execve = 0; 1843 rseq_execve(current); 1844 acct_update_integrals(current); 1845 task_numa_free(current, false); 1846 return retval; 1847 1848 out: 1849 /* 1850 * If past the point of no return ensure the code never 1851 * returns to the userspace process. Use an existing fatal 1852 * signal if present otherwise terminate the process with 1853 * SIGSEGV. 1854 */ 1855 if (bprm->point_of_no_return && !fatal_signal_pending(current)) 1856 force_fatal_sig(SIGSEGV); 1857 1858 out_unmark: 1859 current->fs->in_exec = 0; 1860 current->in_execve = 0; 1861 1862 return retval; 1863 } 1864 1865 static int do_execveat_common(int fd, struct filename *filename, 1866 struct user_arg_ptr argv, 1867 struct user_arg_ptr envp, 1868 int flags) 1869 { 1870 struct linux_binprm *bprm; 1871 int retval; 1872 1873 if (IS_ERR(filename)) 1874 return PTR_ERR(filename); 1875 1876 /* 1877 * We move the actual failure in case of RLIMIT_NPROC excess from 1878 * set*uid() to execve() because too many poorly written programs 1879 * don't check setuid() return code. Here we additionally recheck 1880 * whether NPROC limit is still exceeded. 1881 */ 1882 if ((current->flags & PF_NPROC_EXCEEDED) && 1883 is_ucounts_overlimit(current_ucounts(), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) { 1884 retval = -EAGAIN; 1885 goto out_ret; 1886 } 1887 1888 /* We're below the limit (still or again), so we don't want to make 1889 * further execve() calls fail. */ 1890 current->flags &= ~PF_NPROC_EXCEEDED; 1891 1892 bprm = alloc_bprm(fd, filename); 1893 if (IS_ERR(bprm)) { 1894 retval = PTR_ERR(bprm); 1895 goto out_ret; 1896 } 1897 1898 retval = count(argv, MAX_ARG_STRINGS); 1899 if (retval < 0) 1900 goto out_free; 1901 bprm->argc = retval; 1902 1903 retval = count(envp, MAX_ARG_STRINGS); 1904 if (retval < 0) 1905 goto out_free; 1906 bprm->envc = retval; 1907 1908 retval = bprm_stack_limits(bprm); 1909 if (retval < 0) 1910 goto out_free; 1911 1912 retval = copy_string_kernel(bprm->filename, bprm); 1913 if (retval < 0) 1914 goto out_free; 1915 bprm->exec = bprm->p; 1916 1917 retval = copy_strings(bprm->envc, envp, bprm); 1918 if (retval < 0) 1919 goto out_free; 1920 1921 retval = copy_strings(bprm->argc, argv, bprm); 1922 if (retval < 0) 1923 goto out_free; 1924 1925 retval = bprm_execve(bprm, fd, filename, flags); 1926 out_free: 1927 free_bprm(bprm); 1928 1929 out_ret: 1930 putname(filename); 1931 return retval; 1932 } 1933 1934 int kernel_execve(const char *kernel_filename, 1935 const char *const *argv, const char *const *envp) 1936 { 1937 struct filename *filename; 1938 struct linux_binprm *bprm; 1939 int fd = AT_FDCWD; 1940 int retval; 1941 1942 filename = getname_kernel(kernel_filename); 1943 if (IS_ERR(filename)) 1944 return PTR_ERR(filename); 1945 1946 bprm = alloc_bprm(fd, filename); 1947 if (IS_ERR(bprm)) { 1948 retval = PTR_ERR(bprm); 1949 goto out_ret; 1950 } 1951 1952 retval = count_strings_kernel(argv); 1953 if (retval < 0) 1954 goto out_free; 1955 bprm->argc = retval; 1956 1957 retval = count_strings_kernel(envp); 1958 if (retval < 0) 1959 goto out_free; 1960 bprm->envc = retval; 1961 1962 retval = bprm_stack_limits(bprm); 1963 if (retval < 0) 1964 goto out_free; 1965 1966 retval = copy_string_kernel(bprm->filename, bprm); 1967 if (retval < 0) 1968 goto out_free; 1969 bprm->exec = bprm->p; 1970 1971 retval = copy_strings_kernel(bprm->envc, envp, bprm); 1972 if (retval < 0) 1973 goto out_free; 1974 1975 retval = copy_strings_kernel(bprm->argc, argv, bprm); 1976 if (retval < 0) 1977 goto out_free; 1978 1979 retval = bprm_execve(bprm, fd, filename, 0); 1980 out_free: 1981 free_bprm(bprm); 1982 out_ret: 1983 putname(filename); 1984 return retval; 1985 } 1986 1987 static int do_execve(struct filename *filename, 1988 const char __user *const __user *__argv, 1989 const char __user *const __user *__envp) 1990 { 1991 struct user_arg_ptr argv = { .ptr.native = __argv }; 1992 struct user_arg_ptr envp = { .ptr.native = __envp }; 1993 return do_execveat_common(AT_FDCWD, filename, argv, envp, 0); 1994 } 1995 1996 static int do_execveat(int fd, struct filename *filename, 1997 const char __user *const __user *__argv, 1998 const char __user *const __user *__envp, 1999 int flags) 2000 { 2001 struct user_arg_ptr argv = { .ptr.native = __argv }; 2002 struct user_arg_ptr envp = { .ptr.native = __envp }; 2003 2004 return do_execveat_common(fd, filename, argv, envp, flags); 2005 } 2006 2007 #ifdef CONFIG_COMPAT 2008 static int compat_do_execve(struct filename *filename, 2009 const compat_uptr_t __user *__argv, 2010 const compat_uptr_t __user *__envp) 2011 { 2012 struct user_arg_ptr argv = { 2013 .is_compat = true, 2014 .ptr.compat = __argv, 2015 }; 2016 struct user_arg_ptr envp = { 2017 .is_compat = true, 2018 .ptr.compat = __envp, 2019 }; 2020 return do_execveat_common(AT_FDCWD, filename, argv, envp, 0); 2021 } 2022 2023 static int compat_do_execveat(int fd, struct filename *filename, 2024 const compat_uptr_t __user *__argv, 2025 const compat_uptr_t __user *__envp, 2026 int flags) 2027 { 2028 struct user_arg_ptr argv = { 2029 .is_compat = true, 2030 .ptr.compat = __argv, 2031 }; 2032 struct user_arg_ptr envp = { 2033 .is_compat = true, 2034 .ptr.compat = __envp, 2035 }; 2036 return do_execveat_common(fd, filename, argv, envp, flags); 2037 } 2038 #endif 2039 2040 void set_binfmt(struct linux_binfmt *new) 2041 { 2042 struct mm_struct *mm = current->mm; 2043 2044 if (mm->binfmt) 2045 module_put(mm->binfmt->module); 2046 2047 mm->binfmt = new; 2048 if (new) 2049 __module_get(new->module); 2050 } 2051 EXPORT_SYMBOL(set_binfmt); 2052 2053 /* 2054 * set_dumpable stores three-value SUID_DUMP_* into mm->flags. 2055 */ 2056 void set_dumpable(struct mm_struct *mm, int value) 2057 { 2058 if (WARN_ON((unsigned)value > SUID_DUMP_ROOT)) 2059 return; 2060 2061 set_mask_bits(&mm->flags, MMF_DUMPABLE_MASK, value); 2062 } 2063 2064 SYSCALL_DEFINE3(execve, 2065 const char __user *, filename, 2066 const char __user *const __user *, argv, 2067 const char __user *const __user *, envp) 2068 { 2069 return do_execve(getname(filename), argv, envp); 2070 } 2071 2072 SYSCALL_DEFINE5(execveat, 2073 int, fd, const char __user *, filename, 2074 const char __user *const __user *, argv, 2075 const char __user *const __user *, envp, 2076 int, flags) 2077 { 2078 return do_execveat(fd, 2079 getname_uflags(filename, flags), 2080 argv, envp, flags); 2081 } 2082 2083 #ifdef CONFIG_COMPAT 2084 COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename, 2085 const compat_uptr_t __user *, argv, 2086 const compat_uptr_t __user *, envp) 2087 { 2088 return compat_do_execve(getname(filename), argv, envp); 2089 } 2090 2091 COMPAT_SYSCALL_DEFINE5(execveat, int, fd, 2092 const char __user *, filename, 2093 const compat_uptr_t __user *, argv, 2094 const compat_uptr_t __user *, envp, 2095 int, flags) 2096 { 2097 return compat_do_execveat(fd, 2098 getname_uflags(filename, flags), 2099 argv, envp, flags); 2100 } 2101 #endif 2102