1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/mm.h> 3 #include <linux/slab.h> 4 #include <linux/string.h> 5 #include <linux/compiler.h> 6 #include <linux/export.h> 7 #include <linux/err.h> 8 #include <linux/sched.h> 9 #include <linux/sched/mm.h> 10 #include <linux/sched/signal.h> 11 #include <linux/sched/task_stack.h> 12 #include <linux/security.h> 13 #include <linux/swap.h> 14 #include <linux/swapops.h> 15 #include <linux/mman.h> 16 #include <linux/hugetlb.h> 17 #include <linux/vmalloc.h> 18 #include <linux/userfaultfd_k.h> 19 #include <linux/elf.h> 20 #include <linux/elf-randomize.h> 21 #include <linux/personality.h> 22 #include <linux/random.h> 23 #include <linux/processor.h> 24 #include <linux/sizes.h> 25 #include <linux/compat.h> 26 27 #include <linux/uaccess.h> 28 29 #include "internal.h" 30 31 /** 32 * kfree_const - conditionally free memory 33 * @x: pointer to the memory 34 * 35 * Function calls kfree only if @x is not in .rodata section. 36 */ 37 void kfree_const(const void *x) 38 { 39 if (!is_kernel_rodata((unsigned long)x)) 40 kfree(x); 41 } 42 EXPORT_SYMBOL(kfree_const); 43 44 /** 45 * kstrdup - allocate space for and copy an existing string 46 * @s: the string to duplicate 47 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 48 * 49 * Return: newly allocated copy of @s or %NULL in case of error 50 */ 51 char *kstrdup(const char *s, gfp_t gfp) 52 { 53 size_t len; 54 char *buf; 55 56 if (!s) 57 return NULL; 58 59 len = strlen(s) + 1; 60 buf = kmalloc_track_caller(len, gfp); 61 if (buf) 62 memcpy(buf, s, len); 63 return buf; 64 } 65 EXPORT_SYMBOL(kstrdup); 66 67 /** 68 * kstrdup_const - conditionally duplicate an existing const string 69 * @s: the string to duplicate 70 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 71 * 72 * Note: Strings allocated by kstrdup_const should be freed by kfree_const. 73 * 74 * Return: source string if it is in .rodata section otherwise 75 * fallback to kstrdup. 76 */ 77 const char *kstrdup_const(const char *s, gfp_t gfp) 78 { 79 if (is_kernel_rodata((unsigned long)s)) 80 return s; 81 82 return kstrdup(s, gfp); 83 } 84 EXPORT_SYMBOL(kstrdup_const); 85 86 /** 87 * kstrndup - allocate space for and copy an existing string 88 * @s: the string to duplicate 89 * @max: read at most @max chars from @s 90 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 91 * 92 * Note: Use kmemdup_nul() instead if the size is known exactly. 93 * 94 * Return: newly allocated copy of @s or %NULL in case of error 95 */ 96 char *kstrndup(const char *s, size_t max, gfp_t gfp) 97 { 98 size_t len; 99 char *buf; 100 101 if (!s) 102 return NULL; 103 104 len = strnlen(s, max); 105 buf = kmalloc_track_caller(len+1, gfp); 106 if (buf) { 107 memcpy(buf, s, len); 108 buf[len] = '\0'; 109 } 110 return buf; 111 } 112 EXPORT_SYMBOL(kstrndup); 113 114 /** 115 * kmemdup - duplicate region of memory 116 * 117 * @src: memory region to duplicate 118 * @len: memory region length 119 * @gfp: GFP mask to use 120 * 121 * Return: newly allocated copy of @src or %NULL in case of error 122 */ 123 void *kmemdup(const void *src, size_t len, gfp_t gfp) 124 { 125 void *p; 126 127 p = kmalloc_track_caller(len, gfp); 128 if (p) 129 memcpy(p, src, len); 130 return p; 131 } 132 EXPORT_SYMBOL(kmemdup); 133 134 /** 135 * kmemdup_nul - Create a NUL-terminated string from unterminated data 136 * @s: The data to stringify 137 * @len: The size of the data 138 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 139 * 140 * Return: newly allocated copy of @s with NUL-termination or %NULL in 141 * case of error 142 */ 143 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp) 144 { 145 char *buf; 146 147 if (!s) 148 return NULL; 149 150 buf = kmalloc_track_caller(len + 1, gfp); 151 if (buf) { 152 memcpy(buf, s, len); 153 buf[len] = '\0'; 154 } 155 return buf; 156 } 157 EXPORT_SYMBOL(kmemdup_nul); 158 159 /** 160 * memdup_user - duplicate memory region from user space 161 * 162 * @src: source address in user space 163 * @len: number of bytes to copy 164 * 165 * Return: an ERR_PTR() on failure. Result is physically 166 * contiguous, to be freed by kfree(). 167 */ 168 void *memdup_user(const void __user *src, size_t len) 169 { 170 void *p; 171 172 p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); 173 if (!p) 174 return ERR_PTR(-ENOMEM); 175 176 if (copy_from_user(p, src, len)) { 177 kfree(p); 178 return ERR_PTR(-EFAULT); 179 } 180 181 return p; 182 } 183 EXPORT_SYMBOL(memdup_user); 184 185 /** 186 * vmemdup_user - duplicate memory region from user space 187 * 188 * @src: source address in user space 189 * @len: number of bytes to copy 190 * 191 * Return: an ERR_PTR() on failure. Result may be not 192 * physically contiguous. Use kvfree() to free. 193 */ 194 void *vmemdup_user(const void __user *src, size_t len) 195 { 196 void *p; 197 198 p = kvmalloc(len, GFP_USER); 199 if (!p) 200 return ERR_PTR(-ENOMEM); 201 202 if (copy_from_user(p, src, len)) { 203 kvfree(p); 204 return ERR_PTR(-EFAULT); 205 } 206 207 return p; 208 } 209 EXPORT_SYMBOL(vmemdup_user); 210 211 /** 212 * strndup_user - duplicate an existing string from user space 213 * @s: The string to duplicate 214 * @n: Maximum number of bytes to copy, including the trailing NUL. 215 * 216 * Return: newly allocated copy of @s or an ERR_PTR() in case of error 217 */ 218 char *strndup_user(const char __user *s, long n) 219 { 220 char *p; 221 long length; 222 223 length = strnlen_user(s, n); 224 225 if (!length) 226 return ERR_PTR(-EFAULT); 227 228 if (length > n) 229 return ERR_PTR(-EINVAL); 230 231 p = memdup_user(s, length); 232 233 if (IS_ERR(p)) 234 return p; 235 236 p[length - 1] = '\0'; 237 238 return p; 239 } 240 EXPORT_SYMBOL(strndup_user); 241 242 /** 243 * memdup_user_nul - duplicate memory region from user space and NUL-terminate 244 * 245 * @src: source address in user space 246 * @len: number of bytes to copy 247 * 248 * Return: an ERR_PTR() on failure. 249 */ 250 void *memdup_user_nul(const void __user *src, size_t len) 251 { 252 char *p; 253 254 /* 255 * Always use GFP_KERNEL, since copy_from_user() can sleep and 256 * cause pagefault, which makes it pointless to use GFP_NOFS 257 * or GFP_ATOMIC. 258 */ 259 p = kmalloc_track_caller(len + 1, GFP_KERNEL); 260 if (!p) 261 return ERR_PTR(-ENOMEM); 262 263 if (copy_from_user(p, src, len)) { 264 kfree(p); 265 return ERR_PTR(-EFAULT); 266 } 267 p[len] = '\0'; 268 269 return p; 270 } 271 EXPORT_SYMBOL(memdup_user_nul); 272 273 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, 274 struct vm_area_struct *prev) 275 { 276 struct vm_area_struct *next; 277 278 vma->vm_prev = prev; 279 if (prev) { 280 next = prev->vm_next; 281 prev->vm_next = vma; 282 } else { 283 next = mm->mmap; 284 mm->mmap = vma; 285 } 286 vma->vm_next = next; 287 if (next) 288 next->vm_prev = vma; 289 } 290 291 void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma) 292 { 293 struct vm_area_struct *prev, *next; 294 295 next = vma->vm_next; 296 prev = vma->vm_prev; 297 if (prev) 298 prev->vm_next = next; 299 else 300 mm->mmap = next; 301 if (next) 302 next->vm_prev = prev; 303 } 304 305 /* Check if the vma is being used as a stack by this task */ 306 int vma_is_stack_for_current(struct vm_area_struct *vma) 307 { 308 struct task_struct * __maybe_unused t = current; 309 310 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); 311 } 312 313 #ifndef STACK_RND_MASK 314 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */ 315 #endif 316 317 unsigned long randomize_stack_top(unsigned long stack_top) 318 { 319 unsigned long random_variable = 0; 320 321 if (current->flags & PF_RANDOMIZE) { 322 random_variable = get_random_long(); 323 random_variable &= STACK_RND_MASK; 324 random_variable <<= PAGE_SHIFT; 325 } 326 #ifdef CONFIG_STACK_GROWSUP 327 return PAGE_ALIGN(stack_top) + random_variable; 328 #else 329 return PAGE_ALIGN(stack_top) - random_variable; 330 #endif 331 } 332 333 #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT 334 unsigned long arch_randomize_brk(struct mm_struct *mm) 335 { 336 /* Is the current task 32bit ? */ 337 if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task()) 338 return randomize_page(mm->brk, SZ_32M); 339 340 return randomize_page(mm->brk, SZ_1G); 341 } 342 343 unsigned long arch_mmap_rnd(void) 344 { 345 unsigned long rnd; 346 347 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 348 if (is_compat_task()) 349 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); 350 else 351 #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */ 352 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); 353 354 return rnd << PAGE_SHIFT; 355 } 356 357 static int mmap_is_legacy(struct rlimit *rlim_stack) 358 { 359 if (current->personality & ADDR_COMPAT_LAYOUT) 360 return 1; 361 362 if (rlim_stack->rlim_cur == RLIM_INFINITY) 363 return 1; 364 365 return sysctl_legacy_va_layout; 366 } 367 368 /* 369 * Leave enough space between the mmap area and the stack to honour ulimit in 370 * the face of randomisation. 371 */ 372 #define MIN_GAP (SZ_128M) 373 #define MAX_GAP (STACK_TOP / 6 * 5) 374 375 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) 376 { 377 unsigned long gap = rlim_stack->rlim_cur; 378 unsigned long pad = stack_guard_gap; 379 380 /* Account for stack randomization if necessary */ 381 if (current->flags & PF_RANDOMIZE) 382 pad += (STACK_RND_MASK << PAGE_SHIFT); 383 384 /* Values close to RLIM_INFINITY can overflow. */ 385 if (gap + pad > gap) 386 gap += pad; 387 388 if (gap < MIN_GAP) 389 gap = MIN_GAP; 390 else if (gap > MAX_GAP) 391 gap = MAX_GAP; 392 393 return PAGE_ALIGN(STACK_TOP - gap - rnd); 394 } 395 396 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) 397 { 398 unsigned long random_factor = 0UL; 399 400 if (current->flags & PF_RANDOMIZE) 401 random_factor = arch_mmap_rnd(); 402 403 if (mmap_is_legacy(rlim_stack)) { 404 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; 405 mm->get_unmapped_area = arch_get_unmapped_area; 406 } else { 407 mm->mmap_base = mmap_base(random_factor, rlim_stack); 408 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 409 } 410 } 411 #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) 412 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) 413 { 414 mm->mmap_base = TASK_UNMAPPED_BASE; 415 mm->get_unmapped_area = arch_get_unmapped_area; 416 } 417 #endif 418 419 /** 420 * __account_locked_vm - account locked pages to an mm's locked_vm 421 * @mm: mm to account against 422 * @pages: number of pages to account 423 * @inc: %true if @pages should be considered positive, %false if not 424 * @task: task used to check RLIMIT_MEMLOCK 425 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped 426 * 427 * Assumes @task and @mm are valid (i.e. at least one reference on each), and 428 * that mmap_lock is held as writer. 429 * 430 * Return: 431 * * 0 on success 432 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. 433 */ 434 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, 435 struct task_struct *task, bool bypass_rlim) 436 { 437 unsigned long locked_vm, limit; 438 int ret = 0; 439 440 mmap_assert_write_locked(mm); 441 442 locked_vm = mm->locked_vm; 443 if (inc) { 444 if (!bypass_rlim) { 445 limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT; 446 if (locked_vm + pages > limit) 447 ret = -ENOMEM; 448 } 449 if (!ret) 450 mm->locked_vm = locked_vm + pages; 451 } else { 452 WARN_ON_ONCE(pages > locked_vm); 453 mm->locked_vm = locked_vm - pages; 454 } 455 456 pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid, 457 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT, 458 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK), 459 ret ? " - exceeded" : ""); 460 461 return ret; 462 } 463 EXPORT_SYMBOL_GPL(__account_locked_vm); 464 465 /** 466 * account_locked_vm - account locked pages to an mm's locked_vm 467 * @mm: mm to account against, may be NULL 468 * @pages: number of pages to account 469 * @inc: %true if @pages should be considered positive, %false if not 470 * 471 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it). 472 * 473 * Return: 474 * * 0 on success, or if mm is NULL 475 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. 476 */ 477 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc) 478 { 479 int ret; 480 481 if (pages == 0 || !mm) 482 return 0; 483 484 mmap_write_lock(mm); 485 ret = __account_locked_vm(mm, pages, inc, current, 486 capable(CAP_IPC_LOCK)); 487 mmap_write_unlock(mm); 488 489 return ret; 490 } 491 EXPORT_SYMBOL_GPL(account_locked_vm); 492 493 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, 494 unsigned long len, unsigned long prot, 495 unsigned long flag, unsigned long pgoff) 496 { 497 unsigned long ret; 498 struct mm_struct *mm = current->mm; 499 unsigned long populate; 500 LIST_HEAD(uf); 501 502 ret = security_mmap_file(file, prot, flag); 503 if (!ret) { 504 if (mmap_write_lock_killable(mm)) 505 return -EINTR; 506 ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate, 507 &uf); 508 mmap_write_unlock(mm); 509 userfaultfd_unmap_complete(mm, &uf); 510 if (populate) 511 mm_populate(ret, populate); 512 } 513 return ret; 514 } 515 516 unsigned long vm_mmap(struct file *file, unsigned long addr, 517 unsigned long len, unsigned long prot, 518 unsigned long flag, unsigned long offset) 519 { 520 if (unlikely(offset + PAGE_ALIGN(len) < offset)) 521 return -EINVAL; 522 if (unlikely(offset_in_page(offset))) 523 return -EINVAL; 524 525 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); 526 } 527 EXPORT_SYMBOL(vm_mmap); 528 529 /** 530 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon 531 * failure, fall back to non-contiguous (vmalloc) allocation. 532 * @size: size of the request. 533 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL. 534 * @node: numa node to allocate from 535 * 536 * Uses kmalloc to get the memory but if the allocation fails then falls back 537 * to the vmalloc allocator. Use kvfree for freeing the memory. 538 * 539 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported. 540 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is 541 * preferable to the vmalloc fallback, due to visible performance drawbacks. 542 * 543 * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not 544 * fall back to vmalloc. 545 * 546 * Return: pointer to the allocated memory of %NULL in case of failure 547 */ 548 void *kvmalloc_node(size_t size, gfp_t flags, int node) 549 { 550 gfp_t kmalloc_flags = flags; 551 void *ret; 552 553 /* 554 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables) 555 * so the given set of flags has to be compatible. 556 */ 557 if ((flags & GFP_KERNEL) != GFP_KERNEL) 558 return kmalloc_node(size, flags, node); 559 560 /* 561 * We want to attempt a large physically contiguous block first because 562 * it is less likely to fragment multiple larger blocks and therefore 563 * contribute to a long term fragmentation less than vmalloc fallback. 564 * However make sure that larger requests are not too disruptive - no 565 * OOM killer and no allocation failure warnings as we have a fallback. 566 */ 567 if (size > PAGE_SIZE) { 568 kmalloc_flags |= __GFP_NOWARN; 569 570 if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL)) 571 kmalloc_flags |= __GFP_NORETRY; 572 } 573 574 ret = kmalloc_node(size, kmalloc_flags, node); 575 576 /* 577 * It doesn't really make sense to fallback to vmalloc for sub page 578 * requests 579 */ 580 if (ret || size <= PAGE_SIZE) 581 return ret; 582 583 return __vmalloc_node(size, 1, flags, node, 584 __builtin_return_address(0)); 585 } 586 EXPORT_SYMBOL(kvmalloc_node); 587 588 /** 589 * kvfree() - Free memory. 590 * @addr: Pointer to allocated memory. 591 * 592 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc(). 593 * It is slightly more efficient to use kfree() or vfree() if you are certain 594 * that you know which one to use. 595 * 596 * Context: Either preemptible task context or not-NMI interrupt. 597 */ 598 void kvfree(const void *addr) 599 { 600 if (is_vmalloc_addr(addr)) 601 vfree(addr); 602 else 603 kfree(addr); 604 } 605 EXPORT_SYMBOL(kvfree); 606 607 /** 608 * kvfree_sensitive - Free a data object containing sensitive information. 609 * @addr: address of the data object to be freed. 610 * @len: length of the data object. 611 * 612 * Use the special memzero_explicit() function to clear the content of a 613 * kvmalloc'ed object containing sensitive data to make sure that the 614 * compiler won't optimize out the data clearing. 615 */ 616 void kvfree_sensitive(const void *addr, size_t len) 617 { 618 if (likely(!ZERO_OR_NULL_PTR(addr))) { 619 memzero_explicit((void *)addr, len); 620 kvfree(addr); 621 } 622 } 623 EXPORT_SYMBOL(kvfree_sensitive); 624 625 static inline void *__page_rmapping(struct page *page) 626 { 627 unsigned long mapping; 628 629 mapping = (unsigned long)page->mapping; 630 mapping &= ~PAGE_MAPPING_FLAGS; 631 632 return (void *)mapping; 633 } 634 635 /* Neutral page->mapping pointer to address_space or anon_vma or other */ 636 void *page_rmapping(struct page *page) 637 { 638 page = compound_head(page); 639 return __page_rmapping(page); 640 } 641 642 /* 643 * Return true if this page is mapped into pagetables. 644 * For compound page it returns true if any subpage of compound page is mapped. 645 */ 646 bool page_mapped(struct page *page) 647 { 648 int i; 649 650 if (likely(!PageCompound(page))) 651 return atomic_read(&page->_mapcount) >= 0; 652 page = compound_head(page); 653 if (atomic_read(compound_mapcount_ptr(page)) >= 0) 654 return true; 655 if (PageHuge(page)) 656 return false; 657 for (i = 0; i < compound_nr(page); i++) { 658 if (atomic_read(&page[i]._mapcount) >= 0) 659 return true; 660 } 661 return false; 662 } 663 EXPORT_SYMBOL(page_mapped); 664 665 struct anon_vma *page_anon_vma(struct page *page) 666 { 667 unsigned long mapping; 668 669 page = compound_head(page); 670 mapping = (unsigned long)page->mapping; 671 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 672 return NULL; 673 return __page_rmapping(page); 674 } 675 676 struct address_space *page_mapping(struct page *page) 677 { 678 struct address_space *mapping; 679 680 page = compound_head(page); 681 682 /* This happens if someone calls flush_dcache_page on slab page */ 683 if (unlikely(PageSlab(page))) 684 return NULL; 685 686 if (unlikely(PageSwapCache(page))) { 687 swp_entry_t entry; 688 689 entry.val = page_private(page); 690 return swap_address_space(entry); 691 } 692 693 mapping = page->mapping; 694 if ((unsigned long)mapping & PAGE_MAPPING_ANON) 695 return NULL; 696 697 return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS); 698 } 699 EXPORT_SYMBOL(page_mapping); 700 701 /* 702 * For file cache pages, return the address_space, otherwise return NULL 703 */ 704 struct address_space *page_mapping_file(struct page *page) 705 { 706 if (unlikely(PageSwapCache(page))) 707 return NULL; 708 return page_mapping(page); 709 } 710 711 /* Slow path of page_mapcount() for compound pages */ 712 int __page_mapcount(struct page *page) 713 { 714 int ret; 715 716 ret = atomic_read(&page->_mapcount) + 1; 717 /* 718 * For file THP page->_mapcount contains total number of mapping 719 * of the page: no need to look into compound_mapcount. 720 */ 721 if (!PageAnon(page) && !PageHuge(page)) 722 return ret; 723 page = compound_head(page); 724 ret += atomic_read(compound_mapcount_ptr(page)) + 1; 725 if (PageDoubleMap(page)) 726 ret--; 727 return ret; 728 } 729 EXPORT_SYMBOL_GPL(__page_mapcount); 730 731 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; 732 int sysctl_overcommit_ratio __read_mostly = 50; 733 unsigned long sysctl_overcommit_kbytes __read_mostly; 734 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; 735 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ 736 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ 737 738 int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer, 739 size_t *lenp, loff_t *ppos) 740 { 741 int ret; 742 743 ret = proc_dointvec(table, write, buffer, lenp, ppos); 744 if (ret == 0 && write) 745 sysctl_overcommit_kbytes = 0; 746 return ret; 747 } 748 749 static void sync_overcommit_as(struct work_struct *dummy) 750 { 751 percpu_counter_sync(&vm_committed_as); 752 } 753 754 int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer, 755 size_t *lenp, loff_t *ppos) 756 { 757 struct ctl_table t; 758 int new_policy; 759 int ret; 760 761 /* 762 * The deviation of sync_overcommit_as could be big with loose policy 763 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to 764 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply 765 * with the strict "NEVER", and to avoid possible race condtion (even 766 * though user usually won't too frequently do the switching to policy 767 * OVERCOMMIT_NEVER), the switch is done in the following order: 768 * 1. changing the batch 769 * 2. sync percpu count on each CPU 770 * 3. switch the policy 771 */ 772 if (write) { 773 t = *table; 774 t.data = &new_policy; 775 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 776 if (ret) 777 return ret; 778 779 mm_compute_batch(new_policy); 780 if (new_policy == OVERCOMMIT_NEVER) 781 schedule_on_each_cpu(sync_overcommit_as); 782 sysctl_overcommit_memory = new_policy; 783 } else { 784 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 785 } 786 787 return ret; 788 } 789 790 int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer, 791 size_t *lenp, loff_t *ppos) 792 { 793 int ret; 794 795 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 796 if (ret == 0 && write) 797 sysctl_overcommit_ratio = 0; 798 return ret; 799 } 800 801 /* 802 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used 803 */ 804 unsigned long vm_commit_limit(void) 805 { 806 unsigned long allowed; 807 808 if (sysctl_overcommit_kbytes) 809 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); 810 else 811 allowed = ((totalram_pages() - hugetlb_total_pages()) 812 * sysctl_overcommit_ratio / 100); 813 allowed += total_swap_pages; 814 815 return allowed; 816 } 817 818 /* 819 * Make sure vm_committed_as in one cacheline and not cacheline shared with 820 * other variables. It can be updated by several CPUs frequently. 821 */ 822 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; 823 824 /* 825 * The global memory commitment made in the system can be a metric 826 * that can be used to drive ballooning decisions when Linux is hosted 827 * as a guest. On Hyper-V, the host implements a policy engine for dynamically 828 * balancing memory across competing virtual machines that are hosted. 829 * Several metrics drive this policy engine including the guest reported 830 * memory commitment. 831 * 832 * The time cost of this is very low for small platforms, and for big 833 * platform like a 2S/36C/72T Skylake server, in worst case where 834 * vm_committed_as's spinlock is under severe contention, the time cost 835 * could be about 30~40 microseconds. 836 */ 837 unsigned long vm_memory_committed(void) 838 { 839 return percpu_counter_sum_positive(&vm_committed_as); 840 } 841 EXPORT_SYMBOL_GPL(vm_memory_committed); 842 843 /* 844 * Check that a process has enough memory to allocate a new virtual 845 * mapping. 0 means there is enough memory for the allocation to 846 * succeed and -ENOMEM implies there is not. 847 * 848 * We currently support three overcommit policies, which are set via the 849 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting.rst 850 * 851 * Strict overcommit modes added 2002 Feb 26 by Alan Cox. 852 * Additional code 2002 Jul 20 by Robert Love. 853 * 854 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. 855 * 856 * Note this is a helper function intended to be used by LSMs which 857 * wish to use this logic. 858 */ 859 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) 860 { 861 long allowed; 862 863 vm_acct_memory(pages); 864 865 /* 866 * Sometimes we want to use more memory than we have 867 */ 868 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) 869 return 0; 870 871 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { 872 if (pages > totalram_pages() + total_swap_pages) 873 goto error; 874 return 0; 875 } 876 877 allowed = vm_commit_limit(); 878 /* 879 * Reserve some for root 880 */ 881 if (!cap_sys_admin) 882 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); 883 884 /* 885 * Don't let a single process grow so big a user can't recover 886 */ 887 if (mm) { 888 long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); 889 890 allowed -= min_t(long, mm->total_vm / 32, reserve); 891 } 892 893 if (percpu_counter_read_positive(&vm_committed_as) < allowed) 894 return 0; 895 error: 896 vm_unacct_memory(pages); 897 898 return -ENOMEM; 899 } 900 901 /** 902 * get_cmdline() - copy the cmdline value to a buffer. 903 * @task: the task whose cmdline value to copy. 904 * @buffer: the buffer to copy to. 905 * @buflen: the length of the buffer. Larger cmdline values are truncated 906 * to this length. 907 * 908 * Return: the size of the cmdline field copied. Note that the copy does 909 * not guarantee an ending NULL byte. 910 */ 911 int get_cmdline(struct task_struct *task, char *buffer, int buflen) 912 { 913 int res = 0; 914 unsigned int len; 915 struct mm_struct *mm = get_task_mm(task); 916 unsigned long arg_start, arg_end, env_start, env_end; 917 if (!mm) 918 goto out; 919 if (!mm->arg_end) 920 goto out_mm; /* Shh! No looking before we're done */ 921 922 spin_lock(&mm->arg_lock); 923 arg_start = mm->arg_start; 924 arg_end = mm->arg_end; 925 env_start = mm->env_start; 926 env_end = mm->env_end; 927 spin_unlock(&mm->arg_lock); 928 929 len = arg_end - arg_start; 930 931 if (len > buflen) 932 len = buflen; 933 934 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE); 935 936 /* 937 * If the nul at the end of args has been overwritten, then 938 * assume application is using setproctitle(3). 939 */ 940 if (res > 0 && buffer[res-1] != '\0' && len < buflen) { 941 len = strnlen(buffer, res); 942 if (len < res) { 943 res = len; 944 } else { 945 len = env_end - env_start; 946 if (len > buflen - res) 947 len = buflen - res; 948 res += access_process_vm(task, env_start, 949 buffer+res, len, 950 FOLL_FORCE); 951 res = strnlen(buffer, res); 952 } 953 } 954 out_mm: 955 mmput(mm); 956 out: 957 return res; 958 } 959 960 int memcmp_pages(struct page *page1, struct page *page2) 961 { 962 char *addr1, *addr2; 963 int ret; 964 965 addr1 = kmap_atomic(page1); 966 addr2 = kmap_atomic(page2); 967 ret = memcmp(addr1, addr2, PAGE_SIZE); 968 kunmap_atomic(addr2); 969 kunmap_atomic(addr1); 970 return ret; 971 } 972