1 #include <linux/mm.h> 2 #include <linux/slab.h> 3 #include <linux/string.h> 4 #include <linux/compiler.h> 5 #include <linux/export.h> 6 #include <linux/err.h> 7 #include <linux/sched.h> 8 #include <linux/sched/mm.h> 9 #include <linux/sched/task_stack.h> 10 #include <linux/security.h> 11 #include <linux/swap.h> 12 #include <linux/swapops.h> 13 #include <linux/mman.h> 14 #include <linux/hugetlb.h> 15 #include <linux/vmalloc.h> 16 #include <linux/userfaultfd_k.h> 17 18 #include <asm/sections.h> 19 #include <linux/uaccess.h> 20 21 #include "internal.h" 22 23 static inline int is_kernel_rodata(unsigned long addr) 24 { 25 return addr >= (unsigned long)__start_rodata && 26 addr < (unsigned long)__end_rodata; 27 } 28 29 /** 30 * kfree_const - conditionally free memory 31 * @x: pointer to the memory 32 * 33 * Function calls kfree only if @x is not in .rodata section. 34 */ 35 void kfree_const(const void *x) 36 { 37 if (!is_kernel_rodata((unsigned long)x)) 38 kfree(x); 39 } 40 EXPORT_SYMBOL(kfree_const); 41 42 /** 43 * kstrdup - allocate space for and copy an existing string 44 * @s: the string to duplicate 45 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 46 */ 47 char *kstrdup(const char *s, gfp_t gfp) 48 { 49 size_t len; 50 char *buf; 51 52 if (!s) 53 return NULL; 54 55 len = strlen(s) + 1; 56 buf = kmalloc_track_caller(len, gfp); 57 if (buf) 58 memcpy(buf, s, len); 59 return buf; 60 } 61 EXPORT_SYMBOL(kstrdup); 62 63 /** 64 * kstrdup_const - conditionally duplicate an existing const string 65 * @s: the string to duplicate 66 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 67 * 68 * Function returns source string if it is in .rodata section otherwise it 69 * fallbacks to kstrdup. 70 * Strings allocated by kstrdup_const should be freed by kfree_const. 71 */ 72 const char *kstrdup_const(const char *s, gfp_t gfp) 73 { 74 if (is_kernel_rodata((unsigned long)s)) 75 return s; 76 77 return kstrdup(s, gfp); 78 } 79 EXPORT_SYMBOL(kstrdup_const); 80 81 /** 82 * kstrndup - allocate space for and copy an existing string 83 * @s: the string to duplicate 84 * @max: read at most @max chars from @s 85 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 86 */ 87 char *kstrndup(const char *s, size_t max, gfp_t gfp) 88 { 89 size_t len; 90 char *buf; 91 92 if (!s) 93 return NULL; 94 95 len = strnlen(s, max); 96 buf = kmalloc_track_caller(len+1, gfp); 97 if (buf) { 98 memcpy(buf, s, len); 99 buf[len] = '\0'; 100 } 101 return buf; 102 } 103 EXPORT_SYMBOL(kstrndup); 104 105 /** 106 * kmemdup - duplicate region of memory 107 * 108 * @src: memory region to duplicate 109 * @len: memory region length 110 * @gfp: GFP mask to use 111 */ 112 void *kmemdup(const void *src, size_t len, gfp_t gfp) 113 { 114 void *p; 115 116 p = kmalloc_track_caller(len, gfp); 117 if (p) 118 memcpy(p, src, len); 119 return p; 120 } 121 EXPORT_SYMBOL(kmemdup); 122 123 /** 124 * memdup_user - duplicate memory region from user space 125 * 126 * @src: source address in user space 127 * @len: number of bytes to copy 128 * 129 * Returns an ERR_PTR() on failure. 130 */ 131 void *memdup_user(const void __user *src, size_t len) 132 { 133 void *p; 134 135 /* 136 * Always use GFP_KERNEL, since copy_from_user() can sleep and 137 * cause pagefault, which makes it pointless to use GFP_NOFS 138 * or GFP_ATOMIC. 139 */ 140 p = kmalloc_track_caller(len, GFP_KERNEL); 141 if (!p) 142 return ERR_PTR(-ENOMEM); 143 144 if (copy_from_user(p, src, len)) { 145 kfree(p); 146 return ERR_PTR(-EFAULT); 147 } 148 149 return p; 150 } 151 EXPORT_SYMBOL(memdup_user); 152 153 /* 154 * strndup_user - duplicate an existing string from user space 155 * @s: The string to duplicate 156 * @n: Maximum number of bytes to copy, including the trailing NUL. 157 */ 158 char *strndup_user(const char __user *s, long n) 159 { 160 char *p; 161 long length; 162 163 length = strnlen_user(s, n); 164 165 if (!length) 166 return ERR_PTR(-EFAULT); 167 168 if (length > n) 169 return ERR_PTR(-EINVAL); 170 171 p = memdup_user(s, length); 172 173 if (IS_ERR(p)) 174 return p; 175 176 p[length - 1] = '\0'; 177 178 return p; 179 } 180 EXPORT_SYMBOL(strndup_user); 181 182 /** 183 * memdup_user_nul - duplicate memory region from user space and NUL-terminate 184 * 185 * @src: source address in user space 186 * @len: number of bytes to copy 187 * 188 * Returns an ERR_PTR() on failure. 189 */ 190 void *memdup_user_nul(const void __user *src, size_t len) 191 { 192 char *p; 193 194 /* 195 * Always use GFP_KERNEL, since copy_from_user() can sleep and 196 * cause pagefault, which makes it pointless to use GFP_NOFS 197 * or GFP_ATOMIC. 198 */ 199 p = kmalloc_track_caller(len + 1, GFP_KERNEL); 200 if (!p) 201 return ERR_PTR(-ENOMEM); 202 203 if (copy_from_user(p, src, len)) { 204 kfree(p); 205 return ERR_PTR(-EFAULT); 206 } 207 p[len] = '\0'; 208 209 return p; 210 } 211 EXPORT_SYMBOL(memdup_user_nul); 212 213 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, 214 struct vm_area_struct *prev, struct rb_node *rb_parent) 215 { 216 struct vm_area_struct *next; 217 218 vma->vm_prev = prev; 219 if (prev) { 220 next = prev->vm_next; 221 prev->vm_next = vma; 222 } else { 223 mm->mmap = vma; 224 if (rb_parent) 225 next = rb_entry(rb_parent, 226 struct vm_area_struct, vm_rb); 227 else 228 next = NULL; 229 } 230 vma->vm_next = next; 231 if (next) 232 next->vm_prev = vma; 233 } 234 235 /* Check if the vma is being used as a stack by this task */ 236 int vma_is_stack_for_current(struct vm_area_struct *vma) 237 { 238 struct task_struct * __maybe_unused t = current; 239 240 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); 241 } 242 243 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) 244 void arch_pick_mmap_layout(struct mm_struct *mm) 245 { 246 mm->mmap_base = TASK_UNMAPPED_BASE; 247 mm->get_unmapped_area = arch_get_unmapped_area; 248 } 249 #endif 250 251 /* 252 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall 253 * back to the regular GUP. 254 * If the architecture not support this function, simply return with no 255 * page pinned 256 */ 257 int __weak __get_user_pages_fast(unsigned long start, 258 int nr_pages, int write, struct page **pages) 259 { 260 return 0; 261 } 262 EXPORT_SYMBOL_GPL(__get_user_pages_fast); 263 264 /** 265 * get_user_pages_fast() - pin user pages in memory 266 * @start: starting user address 267 * @nr_pages: number of pages from start to pin 268 * @write: whether pages will be written to 269 * @pages: array that receives pointers to the pages pinned. 270 * Should be at least nr_pages long. 271 * 272 * Returns number of pages pinned. This may be fewer than the number 273 * requested. If nr_pages is 0 or negative, returns 0. If no pages 274 * were pinned, returns -errno. 275 * 276 * get_user_pages_fast provides equivalent functionality to get_user_pages, 277 * operating on current and current->mm, with force=0 and vma=NULL. However 278 * unlike get_user_pages, it must be called without mmap_sem held. 279 * 280 * get_user_pages_fast may take mmap_sem and page table locks, so no 281 * assumptions can be made about lack of locking. get_user_pages_fast is to be 282 * implemented in a way that is advantageous (vs get_user_pages()) when the 283 * user memory area is already faulted in and present in ptes. However if the 284 * pages have to be faulted in, it may turn out to be slightly slower so 285 * callers need to carefully consider what to use. On many architectures, 286 * get_user_pages_fast simply falls back to get_user_pages. 287 */ 288 int __weak get_user_pages_fast(unsigned long start, 289 int nr_pages, int write, struct page **pages) 290 { 291 return get_user_pages_unlocked(start, nr_pages, pages, 292 write ? FOLL_WRITE : 0); 293 } 294 EXPORT_SYMBOL_GPL(get_user_pages_fast); 295 296 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, 297 unsigned long len, unsigned long prot, 298 unsigned long flag, unsigned long pgoff) 299 { 300 unsigned long ret; 301 struct mm_struct *mm = current->mm; 302 unsigned long populate; 303 LIST_HEAD(uf); 304 305 ret = security_mmap_file(file, prot, flag); 306 if (!ret) { 307 if (down_write_killable(&mm->mmap_sem)) 308 return -EINTR; 309 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff, 310 &populate, &uf); 311 up_write(&mm->mmap_sem); 312 userfaultfd_unmap_complete(mm, &uf); 313 if (populate) 314 mm_populate(ret, populate); 315 } 316 return ret; 317 } 318 319 unsigned long vm_mmap(struct file *file, unsigned long addr, 320 unsigned long len, unsigned long prot, 321 unsigned long flag, unsigned long offset) 322 { 323 if (unlikely(offset + PAGE_ALIGN(len) < offset)) 324 return -EINVAL; 325 if (unlikely(offset_in_page(offset))) 326 return -EINVAL; 327 328 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); 329 } 330 EXPORT_SYMBOL(vm_mmap); 331 332 /** 333 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon 334 * failure, fall back to non-contiguous (vmalloc) allocation. 335 * @size: size of the request. 336 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL. 337 * @node: numa node to allocate from 338 * 339 * Uses kmalloc to get the memory but if the allocation fails then falls back 340 * to the vmalloc allocator. Use kvfree for freeing the memory. 341 * 342 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported. __GFP_REPEAT 343 * is supported only for large (>32kB) allocations, and it should be used only if 344 * kmalloc is preferable to the vmalloc fallback, due to visible performance drawbacks. 345 * 346 * Any use of gfp flags outside of GFP_KERNEL should be consulted with mm people. 347 */ 348 void *kvmalloc_node(size_t size, gfp_t flags, int node) 349 { 350 gfp_t kmalloc_flags = flags; 351 void *ret; 352 353 /* 354 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables) 355 * so the given set of flags has to be compatible. 356 */ 357 WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL); 358 359 /* 360 * Make sure that larger requests are not too disruptive - no OOM 361 * killer and no allocation failure warnings as we have a fallback 362 */ 363 if (size > PAGE_SIZE) { 364 kmalloc_flags |= __GFP_NOWARN; 365 366 /* 367 * We have to override __GFP_REPEAT by __GFP_NORETRY for !costly 368 * requests because there is no other way to tell the allocator 369 * that we want to fail rather than retry endlessly. 370 */ 371 if (!(kmalloc_flags & __GFP_REPEAT) || 372 (size <= PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) 373 kmalloc_flags |= __GFP_NORETRY; 374 } 375 376 ret = kmalloc_node(size, kmalloc_flags, node); 377 378 /* 379 * It doesn't really make sense to fallback to vmalloc for sub page 380 * requests 381 */ 382 if (ret || size <= PAGE_SIZE) 383 return ret; 384 385 return __vmalloc_node_flags_caller(size, node, flags, 386 __builtin_return_address(0)); 387 } 388 EXPORT_SYMBOL(kvmalloc_node); 389 390 void kvfree(const void *addr) 391 { 392 if (is_vmalloc_addr(addr)) 393 vfree(addr); 394 else 395 kfree(addr); 396 } 397 EXPORT_SYMBOL(kvfree); 398 399 static inline void *__page_rmapping(struct page *page) 400 { 401 unsigned long mapping; 402 403 mapping = (unsigned long)page->mapping; 404 mapping &= ~PAGE_MAPPING_FLAGS; 405 406 return (void *)mapping; 407 } 408 409 /* Neutral page->mapping pointer to address_space or anon_vma or other */ 410 void *page_rmapping(struct page *page) 411 { 412 page = compound_head(page); 413 return __page_rmapping(page); 414 } 415 416 /* 417 * Return true if this page is mapped into pagetables. 418 * For compound page it returns true if any subpage of compound page is mapped. 419 */ 420 bool page_mapped(struct page *page) 421 { 422 int i; 423 424 if (likely(!PageCompound(page))) 425 return atomic_read(&page->_mapcount) >= 0; 426 page = compound_head(page); 427 if (atomic_read(compound_mapcount_ptr(page)) >= 0) 428 return true; 429 if (PageHuge(page)) 430 return false; 431 for (i = 0; i < hpage_nr_pages(page); i++) { 432 if (atomic_read(&page[i]._mapcount) >= 0) 433 return true; 434 } 435 return false; 436 } 437 EXPORT_SYMBOL(page_mapped); 438 439 struct anon_vma *page_anon_vma(struct page *page) 440 { 441 unsigned long mapping; 442 443 page = compound_head(page); 444 mapping = (unsigned long)page->mapping; 445 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 446 return NULL; 447 return __page_rmapping(page); 448 } 449 450 struct address_space *page_mapping(struct page *page) 451 { 452 struct address_space *mapping; 453 454 page = compound_head(page); 455 456 /* This happens if someone calls flush_dcache_page on slab page */ 457 if (unlikely(PageSlab(page))) 458 return NULL; 459 460 if (unlikely(PageSwapCache(page))) { 461 swp_entry_t entry; 462 463 entry.val = page_private(page); 464 return swap_address_space(entry); 465 } 466 467 mapping = page->mapping; 468 if ((unsigned long)mapping & PAGE_MAPPING_ANON) 469 return NULL; 470 471 return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS); 472 } 473 EXPORT_SYMBOL(page_mapping); 474 475 /* Slow path of page_mapcount() for compound pages */ 476 int __page_mapcount(struct page *page) 477 { 478 int ret; 479 480 ret = atomic_read(&page->_mapcount) + 1; 481 /* 482 * For file THP page->_mapcount contains total number of mapping 483 * of the page: no need to look into compound_mapcount. 484 */ 485 if (!PageAnon(page) && !PageHuge(page)) 486 return ret; 487 page = compound_head(page); 488 ret += atomic_read(compound_mapcount_ptr(page)) + 1; 489 if (PageDoubleMap(page)) 490 ret--; 491 return ret; 492 } 493 EXPORT_SYMBOL_GPL(__page_mapcount); 494 495 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; 496 int sysctl_overcommit_ratio __read_mostly = 50; 497 unsigned long sysctl_overcommit_kbytes __read_mostly; 498 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; 499 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ 500 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ 501 502 int overcommit_ratio_handler(struct ctl_table *table, int write, 503 void __user *buffer, size_t *lenp, 504 loff_t *ppos) 505 { 506 int ret; 507 508 ret = proc_dointvec(table, write, buffer, lenp, ppos); 509 if (ret == 0 && write) 510 sysctl_overcommit_kbytes = 0; 511 return ret; 512 } 513 514 int overcommit_kbytes_handler(struct ctl_table *table, int write, 515 void __user *buffer, size_t *lenp, 516 loff_t *ppos) 517 { 518 int ret; 519 520 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 521 if (ret == 0 && write) 522 sysctl_overcommit_ratio = 0; 523 return ret; 524 } 525 526 /* 527 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used 528 */ 529 unsigned long vm_commit_limit(void) 530 { 531 unsigned long allowed; 532 533 if (sysctl_overcommit_kbytes) 534 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); 535 else 536 allowed = ((totalram_pages - hugetlb_total_pages()) 537 * sysctl_overcommit_ratio / 100); 538 allowed += total_swap_pages; 539 540 return allowed; 541 } 542 543 /* 544 * Make sure vm_committed_as in one cacheline and not cacheline shared with 545 * other variables. It can be updated by several CPUs frequently. 546 */ 547 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; 548 549 /* 550 * The global memory commitment made in the system can be a metric 551 * that can be used to drive ballooning decisions when Linux is hosted 552 * as a guest. On Hyper-V, the host implements a policy engine for dynamically 553 * balancing memory across competing virtual machines that are hosted. 554 * Several metrics drive this policy engine including the guest reported 555 * memory commitment. 556 */ 557 unsigned long vm_memory_committed(void) 558 { 559 return percpu_counter_read_positive(&vm_committed_as); 560 } 561 EXPORT_SYMBOL_GPL(vm_memory_committed); 562 563 /* 564 * Check that a process has enough memory to allocate a new virtual 565 * mapping. 0 means there is enough memory for the allocation to 566 * succeed and -ENOMEM implies there is not. 567 * 568 * We currently support three overcommit policies, which are set via the 569 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting 570 * 571 * Strict overcommit modes added 2002 Feb 26 by Alan Cox. 572 * Additional code 2002 Jul 20 by Robert Love. 573 * 574 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. 575 * 576 * Note this is a helper function intended to be used by LSMs which 577 * wish to use this logic. 578 */ 579 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) 580 { 581 long free, allowed, reserve; 582 583 VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) < 584 -(s64)vm_committed_as_batch * num_online_cpus(), 585 "memory commitment underflow"); 586 587 vm_acct_memory(pages); 588 589 /* 590 * Sometimes we want to use more memory than we have 591 */ 592 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) 593 return 0; 594 595 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { 596 free = global_page_state(NR_FREE_PAGES); 597 free += global_node_page_state(NR_FILE_PAGES); 598 599 /* 600 * shmem pages shouldn't be counted as free in this 601 * case, they can't be purged, only swapped out, and 602 * that won't affect the overall amount of available 603 * memory in the system. 604 */ 605 free -= global_node_page_state(NR_SHMEM); 606 607 free += get_nr_swap_pages(); 608 609 /* 610 * Any slabs which are created with the 611 * SLAB_RECLAIM_ACCOUNT flag claim to have contents 612 * which are reclaimable, under pressure. The dentry 613 * cache and most inode caches should fall into this 614 */ 615 free += global_page_state(NR_SLAB_RECLAIMABLE); 616 617 /* 618 * Leave reserved pages. The pages are not for anonymous pages. 619 */ 620 if (free <= totalreserve_pages) 621 goto error; 622 else 623 free -= totalreserve_pages; 624 625 /* 626 * Reserve some for root 627 */ 628 if (!cap_sys_admin) 629 free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); 630 631 if (free > pages) 632 return 0; 633 634 goto error; 635 } 636 637 allowed = vm_commit_limit(); 638 /* 639 * Reserve some for root 640 */ 641 if (!cap_sys_admin) 642 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); 643 644 /* 645 * Don't let a single process grow so big a user can't recover 646 */ 647 if (mm) { 648 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); 649 allowed -= min_t(long, mm->total_vm / 32, reserve); 650 } 651 652 if (percpu_counter_read_positive(&vm_committed_as) < allowed) 653 return 0; 654 error: 655 vm_unacct_memory(pages); 656 657 return -ENOMEM; 658 } 659 660 /** 661 * get_cmdline() - copy the cmdline value to a buffer. 662 * @task: the task whose cmdline value to copy. 663 * @buffer: the buffer to copy to. 664 * @buflen: the length of the buffer. Larger cmdline values are truncated 665 * to this length. 666 * Returns the size of the cmdline field copied. Note that the copy does 667 * not guarantee an ending NULL byte. 668 */ 669 int get_cmdline(struct task_struct *task, char *buffer, int buflen) 670 { 671 int res = 0; 672 unsigned int len; 673 struct mm_struct *mm = get_task_mm(task); 674 unsigned long arg_start, arg_end, env_start, env_end; 675 if (!mm) 676 goto out; 677 if (!mm->arg_end) 678 goto out_mm; /* Shh! No looking before we're done */ 679 680 down_read(&mm->mmap_sem); 681 arg_start = mm->arg_start; 682 arg_end = mm->arg_end; 683 env_start = mm->env_start; 684 env_end = mm->env_end; 685 up_read(&mm->mmap_sem); 686 687 len = arg_end - arg_start; 688 689 if (len > buflen) 690 len = buflen; 691 692 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE); 693 694 /* 695 * If the nul at the end of args has been overwritten, then 696 * assume application is using setproctitle(3). 697 */ 698 if (res > 0 && buffer[res-1] != '\0' && len < buflen) { 699 len = strnlen(buffer, res); 700 if (len < res) { 701 res = len; 702 } else { 703 len = env_end - env_start; 704 if (len > buflen - res) 705 len = buflen - res; 706 res += access_process_vm(task, env_start, 707 buffer+res, len, 708 FOLL_FORCE); 709 res = strnlen(buffer, res); 710 } 711 } 712 out_mm: 713 mmput(mm); 714 out: 715 return res; 716 } 717