1 #include <linux/mm.h> 2 #include <linux/slab.h> 3 #include <linux/string.h> 4 #include <linux/compiler.h> 5 #include <linux/export.h> 6 #include <linux/err.h> 7 #include <linux/sched.h> 8 #include <linux/sched/mm.h> 9 #include <linux/sched/task_stack.h> 10 #include <linux/security.h> 11 #include <linux/swap.h> 12 #include <linux/swapops.h> 13 #include <linux/mman.h> 14 #include <linux/hugetlb.h> 15 #include <linux/vmalloc.h> 16 #include <linux/userfaultfd_k.h> 17 18 #include <asm/sections.h> 19 #include <linux/uaccess.h> 20 21 #include "internal.h" 22 23 static inline int is_kernel_rodata(unsigned long addr) 24 { 25 return addr >= (unsigned long)__start_rodata && 26 addr < (unsigned long)__end_rodata; 27 } 28 29 /** 30 * kfree_const - conditionally free memory 31 * @x: pointer to the memory 32 * 33 * Function calls kfree only if @x is not in .rodata section. 34 */ 35 void kfree_const(const void *x) 36 { 37 if (!is_kernel_rodata((unsigned long)x)) 38 kfree(x); 39 } 40 EXPORT_SYMBOL(kfree_const); 41 42 /** 43 * kstrdup - allocate space for and copy an existing string 44 * @s: the string to duplicate 45 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 46 */ 47 char *kstrdup(const char *s, gfp_t gfp) 48 { 49 size_t len; 50 char *buf; 51 52 if (!s) 53 return NULL; 54 55 len = strlen(s) + 1; 56 buf = kmalloc_track_caller(len, gfp); 57 if (buf) 58 memcpy(buf, s, len); 59 return buf; 60 } 61 EXPORT_SYMBOL(kstrdup); 62 63 /** 64 * kstrdup_const - conditionally duplicate an existing const string 65 * @s: the string to duplicate 66 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 67 * 68 * Function returns source string if it is in .rodata section otherwise it 69 * fallbacks to kstrdup. 70 * Strings allocated by kstrdup_const should be freed by kfree_const. 71 */ 72 const char *kstrdup_const(const char *s, gfp_t gfp) 73 { 74 if (is_kernel_rodata((unsigned long)s)) 75 return s; 76 77 return kstrdup(s, gfp); 78 } 79 EXPORT_SYMBOL(kstrdup_const); 80 81 /** 82 * kstrndup - allocate space for and copy an existing string 83 * @s: the string to duplicate 84 * @max: read at most @max chars from @s 85 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 86 */ 87 char *kstrndup(const char *s, size_t max, gfp_t gfp) 88 { 89 size_t len; 90 char *buf; 91 92 if (!s) 93 return NULL; 94 95 len = strnlen(s, max); 96 buf = kmalloc_track_caller(len+1, gfp); 97 if (buf) { 98 memcpy(buf, s, len); 99 buf[len] = '\0'; 100 } 101 return buf; 102 } 103 EXPORT_SYMBOL(kstrndup); 104 105 /** 106 * kmemdup - duplicate region of memory 107 * 108 * @src: memory region to duplicate 109 * @len: memory region length 110 * @gfp: GFP mask to use 111 */ 112 void *kmemdup(const void *src, size_t len, gfp_t gfp) 113 { 114 void *p; 115 116 p = kmalloc_track_caller(len, gfp); 117 if (p) 118 memcpy(p, src, len); 119 return p; 120 } 121 EXPORT_SYMBOL(kmemdup); 122 123 /** 124 * memdup_user - duplicate memory region from user space 125 * 126 * @src: source address in user space 127 * @len: number of bytes to copy 128 * 129 * Returns an ERR_PTR() on failure. 130 */ 131 void *memdup_user(const void __user *src, size_t len) 132 { 133 void *p; 134 135 /* 136 * Always use GFP_KERNEL, since copy_from_user() can sleep and 137 * cause pagefault, which makes it pointless to use GFP_NOFS 138 * or GFP_ATOMIC. 139 */ 140 p = kmalloc_track_caller(len, GFP_KERNEL); 141 if (!p) 142 return ERR_PTR(-ENOMEM); 143 144 if (copy_from_user(p, src, len)) { 145 kfree(p); 146 return ERR_PTR(-EFAULT); 147 } 148 149 return p; 150 } 151 EXPORT_SYMBOL(memdup_user); 152 153 /* 154 * strndup_user - duplicate an existing string from user space 155 * @s: The string to duplicate 156 * @n: Maximum number of bytes to copy, including the trailing NUL. 157 */ 158 char *strndup_user(const char __user *s, long n) 159 { 160 char *p; 161 long length; 162 163 length = strnlen_user(s, n); 164 165 if (!length) 166 return ERR_PTR(-EFAULT); 167 168 if (length > n) 169 return ERR_PTR(-EINVAL); 170 171 p = memdup_user(s, length); 172 173 if (IS_ERR(p)) 174 return p; 175 176 p[length - 1] = '\0'; 177 178 return p; 179 } 180 EXPORT_SYMBOL(strndup_user); 181 182 /** 183 * memdup_user_nul - duplicate memory region from user space and NUL-terminate 184 * 185 * @src: source address in user space 186 * @len: number of bytes to copy 187 * 188 * Returns an ERR_PTR() on failure. 189 */ 190 void *memdup_user_nul(const void __user *src, size_t len) 191 { 192 char *p; 193 194 /* 195 * Always use GFP_KERNEL, since copy_from_user() can sleep and 196 * cause pagefault, which makes it pointless to use GFP_NOFS 197 * or GFP_ATOMIC. 198 */ 199 p = kmalloc_track_caller(len + 1, GFP_KERNEL); 200 if (!p) 201 return ERR_PTR(-ENOMEM); 202 203 if (copy_from_user(p, src, len)) { 204 kfree(p); 205 return ERR_PTR(-EFAULT); 206 } 207 p[len] = '\0'; 208 209 return p; 210 } 211 EXPORT_SYMBOL(memdup_user_nul); 212 213 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, 214 struct vm_area_struct *prev, struct rb_node *rb_parent) 215 { 216 struct vm_area_struct *next; 217 218 vma->vm_prev = prev; 219 if (prev) { 220 next = prev->vm_next; 221 prev->vm_next = vma; 222 } else { 223 mm->mmap = vma; 224 if (rb_parent) 225 next = rb_entry(rb_parent, 226 struct vm_area_struct, vm_rb); 227 else 228 next = NULL; 229 } 230 vma->vm_next = next; 231 if (next) 232 next->vm_prev = vma; 233 } 234 235 /* Check if the vma is being used as a stack by this task */ 236 int vma_is_stack_for_current(struct vm_area_struct *vma) 237 { 238 struct task_struct * __maybe_unused t = current; 239 240 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); 241 } 242 243 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) 244 void arch_pick_mmap_layout(struct mm_struct *mm) 245 { 246 mm->mmap_base = TASK_UNMAPPED_BASE; 247 mm->get_unmapped_area = arch_get_unmapped_area; 248 } 249 #endif 250 251 /* 252 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall 253 * back to the regular GUP. 254 * If the architecture not support this function, simply return with no 255 * page pinned 256 */ 257 int __weak __get_user_pages_fast(unsigned long start, 258 int nr_pages, int write, struct page **pages) 259 { 260 return 0; 261 } 262 EXPORT_SYMBOL_GPL(__get_user_pages_fast); 263 264 /** 265 * get_user_pages_fast() - pin user pages in memory 266 * @start: starting user address 267 * @nr_pages: number of pages from start to pin 268 * @write: whether pages will be written to 269 * @pages: array that receives pointers to the pages pinned. 270 * Should be at least nr_pages long. 271 * 272 * Returns number of pages pinned. This may be fewer than the number 273 * requested. If nr_pages is 0 or negative, returns 0. If no pages 274 * were pinned, returns -errno. 275 * 276 * get_user_pages_fast provides equivalent functionality to get_user_pages, 277 * operating on current and current->mm, with force=0 and vma=NULL. However 278 * unlike get_user_pages, it must be called without mmap_sem held. 279 * 280 * get_user_pages_fast may take mmap_sem and page table locks, so no 281 * assumptions can be made about lack of locking. get_user_pages_fast is to be 282 * implemented in a way that is advantageous (vs get_user_pages()) when the 283 * user memory area is already faulted in and present in ptes. However if the 284 * pages have to be faulted in, it may turn out to be slightly slower so 285 * callers need to carefully consider what to use. On many architectures, 286 * get_user_pages_fast simply falls back to get_user_pages. 287 */ 288 int __weak get_user_pages_fast(unsigned long start, 289 int nr_pages, int write, struct page **pages) 290 { 291 return get_user_pages_unlocked(start, nr_pages, pages, 292 write ? FOLL_WRITE : 0); 293 } 294 EXPORT_SYMBOL_GPL(get_user_pages_fast); 295 296 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, 297 unsigned long len, unsigned long prot, 298 unsigned long flag, unsigned long pgoff) 299 { 300 unsigned long ret; 301 struct mm_struct *mm = current->mm; 302 unsigned long populate; 303 LIST_HEAD(uf); 304 305 ret = security_mmap_file(file, prot, flag); 306 if (!ret) { 307 if (down_write_killable(&mm->mmap_sem)) 308 return -EINTR; 309 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff, 310 &populate, &uf); 311 up_write(&mm->mmap_sem); 312 userfaultfd_unmap_complete(mm, &uf); 313 if (populate) 314 mm_populate(ret, populate); 315 } 316 return ret; 317 } 318 319 unsigned long vm_mmap(struct file *file, unsigned long addr, 320 unsigned long len, unsigned long prot, 321 unsigned long flag, unsigned long offset) 322 { 323 if (unlikely(offset + PAGE_ALIGN(len) < offset)) 324 return -EINVAL; 325 if (unlikely(offset_in_page(offset))) 326 return -EINVAL; 327 328 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); 329 } 330 EXPORT_SYMBOL(vm_mmap); 331 332 /** 333 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon 334 * failure, fall back to non-contiguous (vmalloc) allocation. 335 * @size: size of the request. 336 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL. 337 * @node: numa node to allocate from 338 * 339 * Uses kmalloc to get the memory but if the allocation fails then falls back 340 * to the vmalloc allocator. Use kvfree for freeing the memory. 341 * 342 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported. __GFP_REPEAT 343 * is supported only for large (>32kB) allocations, and it should be used only if 344 * kmalloc is preferable to the vmalloc fallback, due to visible performance drawbacks. 345 * 346 * Any use of gfp flags outside of GFP_KERNEL should be consulted with mm people. 347 */ 348 void *kvmalloc_node(size_t size, gfp_t flags, int node) 349 { 350 gfp_t kmalloc_flags = flags; 351 void *ret; 352 353 /* 354 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables) 355 * so the given set of flags has to be compatible. 356 */ 357 WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL); 358 359 /* 360 * Make sure that larger requests are not too disruptive - no OOM 361 * killer and no allocation failure warnings as we have a fallback 362 */ 363 if (size > PAGE_SIZE) { 364 kmalloc_flags |= __GFP_NOWARN; 365 366 /* 367 * We have to override __GFP_REPEAT by __GFP_NORETRY for !costly 368 * requests because there is no other way to tell the allocator 369 * that we want to fail rather than retry endlessly. 370 */ 371 if (!(kmalloc_flags & __GFP_REPEAT) || 372 (size <= PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) 373 kmalloc_flags |= __GFP_NORETRY; 374 } 375 376 ret = kmalloc_node(size, kmalloc_flags, node); 377 378 /* 379 * It doesn't really make sense to fallback to vmalloc for sub page 380 * requests 381 */ 382 if (ret || size <= PAGE_SIZE) 383 return ret; 384 385 return __vmalloc_node_flags(size, node, flags | __GFP_HIGHMEM); 386 } 387 EXPORT_SYMBOL(kvmalloc_node); 388 389 void kvfree(const void *addr) 390 { 391 if (is_vmalloc_addr(addr)) 392 vfree(addr); 393 else 394 kfree(addr); 395 } 396 EXPORT_SYMBOL(kvfree); 397 398 static inline void *__page_rmapping(struct page *page) 399 { 400 unsigned long mapping; 401 402 mapping = (unsigned long)page->mapping; 403 mapping &= ~PAGE_MAPPING_FLAGS; 404 405 return (void *)mapping; 406 } 407 408 /* Neutral page->mapping pointer to address_space or anon_vma or other */ 409 void *page_rmapping(struct page *page) 410 { 411 page = compound_head(page); 412 return __page_rmapping(page); 413 } 414 415 /* 416 * Return true if this page is mapped into pagetables. 417 * For compound page it returns true if any subpage of compound page is mapped. 418 */ 419 bool page_mapped(struct page *page) 420 { 421 int i; 422 423 if (likely(!PageCompound(page))) 424 return atomic_read(&page->_mapcount) >= 0; 425 page = compound_head(page); 426 if (atomic_read(compound_mapcount_ptr(page)) >= 0) 427 return true; 428 if (PageHuge(page)) 429 return false; 430 for (i = 0; i < hpage_nr_pages(page); i++) { 431 if (atomic_read(&page[i]._mapcount) >= 0) 432 return true; 433 } 434 return false; 435 } 436 EXPORT_SYMBOL(page_mapped); 437 438 struct anon_vma *page_anon_vma(struct page *page) 439 { 440 unsigned long mapping; 441 442 page = compound_head(page); 443 mapping = (unsigned long)page->mapping; 444 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 445 return NULL; 446 return __page_rmapping(page); 447 } 448 449 struct address_space *page_mapping(struct page *page) 450 { 451 struct address_space *mapping; 452 453 page = compound_head(page); 454 455 /* This happens if someone calls flush_dcache_page on slab page */ 456 if (unlikely(PageSlab(page))) 457 return NULL; 458 459 if (unlikely(PageSwapCache(page))) { 460 swp_entry_t entry; 461 462 entry.val = page_private(page); 463 return swap_address_space(entry); 464 } 465 466 mapping = page->mapping; 467 if ((unsigned long)mapping & PAGE_MAPPING_ANON) 468 return NULL; 469 470 return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS); 471 } 472 EXPORT_SYMBOL(page_mapping); 473 474 /* Slow path of page_mapcount() for compound pages */ 475 int __page_mapcount(struct page *page) 476 { 477 int ret; 478 479 ret = atomic_read(&page->_mapcount) + 1; 480 /* 481 * For file THP page->_mapcount contains total number of mapping 482 * of the page: no need to look into compound_mapcount. 483 */ 484 if (!PageAnon(page) && !PageHuge(page)) 485 return ret; 486 page = compound_head(page); 487 ret += atomic_read(compound_mapcount_ptr(page)) + 1; 488 if (PageDoubleMap(page)) 489 ret--; 490 return ret; 491 } 492 EXPORT_SYMBOL_GPL(__page_mapcount); 493 494 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; 495 int sysctl_overcommit_ratio __read_mostly = 50; 496 unsigned long sysctl_overcommit_kbytes __read_mostly; 497 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; 498 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ 499 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ 500 501 int overcommit_ratio_handler(struct ctl_table *table, int write, 502 void __user *buffer, size_t *lenp, 503 loff_t *ppos) 504 { 505 int ret; 506 507 ret = proc_dointvec(table, write, buffer, lenp, ppos); 508 if (ret == 0 && write) 509 sysctl_overcommit_kbytes = 0; 510 return ret; 511 } 512 513 int overcommit_kbytes_handler(struct ctl_table *table, int write, 514 void __user *buffer, size_t *lenp, 515 loff_t *ppos) 516 { 517 int ret; 518 519 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 520 if (ret == 0 && write) 521 sysctl_overcommit_ratio = 0; 522 return ret; 523 } 524 525 /* 526 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used 527 */ 528 unsigned long vm_commit_limit(void) 529 { 530 unsigned long allowed; 531 532 if (sysctl_overcommit_kbytes) 533 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); 534 else 535 allowed = ((totalram_pages - hugetlb_total_pages()) 536 * sysctl_overcommit_ratio / 100); 537 allowed += total_swap_pages; 538 539 return allowed; 540 } 541 542 /* 543 * Make sure vm_committed_as in one cacheline and not cacheline shared with 544 * other variables. It can be updated by several CPUs frequently. 545 */ 546 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; 547 548 /* 549 * The global memory commitment made in the system can be a metric 550 * that can be used to drive ballooning decisions when Linux is hosted 551 * as a guest. On Hyper-V, the host implements a policy engine for dynamically 552 * balancing memory across competing virtual machines that are hosted. 553 * Several metrics drive this policy engine including the guest reported 554 * memory commitment. 555 */ 556 unsigned long vm_memory_committed(void) 557 { 558 return percpu_counter_read_positive(&vm_committed_as); 559 } 560 EXPORT_SYMBOL_GPL(vm_memory_committed); 561 562 /* 563 * Check that a process has enough memory to allocate a new virtual 564 * mapping. 0 means there is enough memory for the allocation to 565 * succeed and -ENOMEM implies there is not. 566 * 567 * We currently support three overcommit policies, which are set via the 568 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting 569 * 570 * Strict overcommit modes added 2002 Feb 26 by Alan Cox. 571 * Additional code 2002 Jul 20 by Robert Love. 572 * 573 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. 574 * 575 * Note this is a helper function intended to be used by LSMs which 576 * wish to use this logic. 577 */ 578 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) 579 { 580 long free, allowed, reserve; 581 582 VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) < 583 -(s64)vm_committed_as_batch * num_online_cpus(), 584 "memory commitment underflow"); 585 586 vm_acct_memory(pages); 587 588 /* 589 * Sometimes we want to use more memory than we have 590 */ 591 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) 592 return 0; 593 594 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { 595 free = global_page_state(NR_FREE_PAGES); 596 free += global_node_page_state(NR_FILE_PAGES); 597 598 /* 599 * shmem pages shouldn't be counted as free in this 600 * case, they can't be purged, only swapped out, and 601 * that won't affect the overall amount of available 602 * memory in the system. 603 */ 604 free -= global_node_page_state(NR_SHMEM); 605 606 free += get_nr_swap_pages(); 607 608 /* 609 * Any slabs which are created with the 610 * SLAB_RECLAIM_ACCOUNT flag claim to have contents 611 * which are reclaimable, under pressure. The dentry 612 * cache and most inode caches should fall into this 613 */ 614 free += global_page_state(NR_SLAB_RECLAIMABLE); 615 616 /* 617 * Leave reserved pages. The pages are not for anonymous pages. 618 */ 619 if (free <= totalreserve_pages) 620 goto error; 621 else 622 free -= totalreserve_pages; 623 624 /* 625 * Reserve some for root 626 */ 627 if (!cap_sys_admin) 628 free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); 629 630 if (free > pages) 631 return 0; 632 633 goto error; 634 } 635 636 allowed = vm_commit_limit(); 637 /* 638 * Reserve some for root 639 */ 640 if (!cap_sys_admin) 641 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); 642 643 /* 644 * Don't let a single process grow so big a user can't recover 645 */ 646 if (mm) { 647 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); 648 allowed -= min_t(long, mm->total_vm / 32, reserve); 649 } 650 651 if (percpu_counter_read_positive(&vm_committed_as) < allowed) 652 return 0; 653 error: 654 vm_unacct_memory(pages); 655 656 return -ENOMEM; 657 } 658 659 /** 660 * get_cmdline() - copy the cmdline value to a buffer. 661 * @task: the task whose cmdline value to copy. 662 * @buffer: the buffer to copy to. 663 * @buflen: the length of the buffer. Larger cmdline values are truncated 664 * to this length. 665 * Returns the size of the cmdline field copied. Note that the copy does 666 * not guarantee an ending NULL byte. 667 */ 668 int get_cmdline(struct task_struct *task, char *buffer, int buflen) 669 { 670 int res = 0; 671 unsigned int len; 672 struct mm_struct *mm = get_task_mm(task); 673 unsigned long arg_start, arg_end, env_start, env_end; 674 if (!mm) 675 goto out; 676 if (!mm->arg_end) 677 goto out_mm; /* Shh! No looking before we're done */ 678 679 down_read(&mm->mmap_sem); 680 arg_start = mm->arg_start; 681 arg_end = mm->arg_end; 682 env_start = mm->env_start; 683 env_end = mm->env_end; 684 up_read(&mm->mmap_sem); 685 686 len = arg_end - arg_start; 687 688 if (len > buflen) 689 len = buflen; 690 691 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE); 692 693 /* 694 * If the nul at the end of args has been overwritten, then 695 * assume application is using setproctitle(3). 696 */ 697 if (res > 0 && buffer[res-1] != '\0' && len < buflen) { 698 len = strnlen(buffer, res); 699 if (len < res) { 700 res = len; 701 } else { 702 len = env_end - env_start; 703 if (len > buflen - res) 704 len = buflen - res; 705 res += access_process_vm(task, env_start, 706 buffer+res, len, 707 FOLL_FORCE); 708 res = strnlen(buffer, res); 709 } 710 } 711 out_mm: 712 mmput(mm); 713 out: 714 return res; 715 } 716