1 #include <linux/mm.h> 2 #include <linux/slab.h> 3 #include <linux/string.h> 4 #include <linux/compiler.h> 5 #include <linux/export.h> 6 #include <linux/err.h> 7 #include <linux/sched.h> 8 #include <linux/security.h> 9 #include <linux/swap.h> 10 #include <linux/swapops.h> 11 #include <linux/mman.h> 12 #include <linux/hugetlb.h> 13 #include <linux/vmalloc.h> 14 15 #include <asm/sections.h> 16 #include <asm/uaccess.h> 17 18 #include "internal.h" 19 20 static inline int is_kernel_rodata(unsigned long addr) 21 { 22 return addr >= (unsigned long)__start_rodata && 23 addr < (unsigned long)__end_rodata; 24 } 25 26 /** 27 * kfree_const - conditionally free memory 28 * @x: pointer to the memory 29 * 30 * Function calls kfree only if @x is not in .rodata section. 31 */ 32 void kfree_const(const void *x) 33 { 34 if (!is_kernel_rodata((unsigned long)x)) 35 kfree(x); 36 } 37 EXPORT_SYMBOL(kfree_const); 38 39 /** 40 * kstrdup - allocate space for and copy an existing string 41 * @s: the string to duplicate 42 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 43 */ 44 char *kstrdup(const char *s, gfp_t gfp) 45 { 46 size_t len; 47 char *buf; 48 49 if (!s) 50 return NULL; 51 52 len = strlen(s) + 1; 53 buf = kmalloc_track_caller(len, gfp); 54 if (buf) 55 memcpy(buf, s, len); 56 return buf; 57 } 58 EXPORT_SYMBOL(kstrdup); 59 60 /** 61 * kstrdup_const - conditionally duplicate an existing const string 62 * @s: the string to duplicate 63 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 64 * 65 * Function returns source string if it is in .rodata section otherwise it 66 * fallbacks to kstrdup. 67 * Strings allocated by kstrdup_const should be freed by kfree_const. 68 */ 69 const char *kstrdup_const(const char *s, gfp_t gfp) 70 { 71 if (is_kernel_rodata((unsigned long)s)) 72 return s; 73 74 return kstrdup(s, gfp); 75 } 76 EXPORT_SYMBOL(kstrdup_const); 77 78 /** 79 * kstrndup - allocate space for and copy an existing string 80 * @s: the string to duplicate 81 * @max: read at most @max chars from @s 82 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 83 */ 84 char *kstrndup(const char *s, size_t max, gfp_t gfp) 85 { 86 size_t len; 87 char *buf; 88 89 if (!s) 90 return NULL; 91 92 len = strnlen(s, max); 93 buf = kmalloc_track_caller(len+1, gfp); 94 if (buf) { 95 memcpy(buf, s, len); 96 buf[len] = '\0'; 97 } 98 return buf; 99 } 100 EXPORT_SYMBOL(kstrndup); 101 102 /** 103 * kmemdup - duplicate region of memory 104 * 105 * @src: memory region to duplicate 106 * @len: memory region length 107 * @gfp: GFP mask to use 108 */ 109 void *kmemdup(const void *src, size_t len, gfp_t gfp) 110 { 111 void *p; 112 113 p = kmalloc_track_caller(len, gfp); 114 if (p) 115 memcpy(p, src, len); 116 return p; 117 } 118 EXPORT_SYMBOL(kmemdup); 119 120 /** 121 * memdup_user - duplicate memory region from user space 122 * 123 * @src: source address in user space 124 * @len: number of bytes to copy 125 * 126 * Returns an ERR_PTR() on failure. 127 */ 128 void *memdup_user(const void __user *src, size_t len) 129 { 130 void *p; 131 132 /* 133 * Always use GFP_KERNEL, since copy_from_user() can sleep and 134 * cause pagefault, which makes it pointless to use GFP_NOFS 135 * or GFP_ATOMIC. 136 */ 137 p = kmalloc_track_caller(len, GFP_KERNEL); 138 if (!p) 139 return ERR_PTR(-ENOMEM); 140 141 if (copy_from_user(p, src, len)) { 142 kfree(p); 143 return ERR_PTR(-EFAULT); 144 } 145 146 return p; 147 } 148 EXPORT_SYMBOL(memdup_user); 149 150 /* 151 * strndup_user - duplicate an existing string from user space 152 * @s: The string to duplicate 153 * @n: Maximum number of bytes to copy, including the trailing NUL. 154 */ 155 char *strndup_user(const char __user *s, long n) 156 { 157 char *p; 158 long length; 159 160 length = strnlen_user(s, n); 161 162 if (!length) 163 return ERR_PTR(-EFAULT); 164 165 if (length > n) 166 return ERR_PTR(-EINVAL); 167 168 p = memdup_user(s, length); 169 170 if (IS_ERR(p)) 171 return p; 172 173 p[length - 1] = '\0'; 174 175 return p; 176 } 177 EXPORT_SYMBOL(strndup_user); 178 179 /** 180 * memdup_user_nul - duplicate memory region from user space and NUL-terminate 181 * 182 * @src: source address in user space 183 * @len: number of bytes to copy 184 * 185 * Returns an ERR_PTR() on failure. 186 */ 187 void *memdup_user_nul(const void __user *src, size_t len) 188 { 189 char *p; 190 191 /* 192 * Always use GFP_KERNEL, since copy_from_user() can sleep and 193 * cause pagefault, which makes it pointless to use GFP_NOFS 194 * or GFP_ATOMIC. 195 */ 196 p = kmalloc_track_caller(len + 1, GFP_KERNEL); 197 if (!p) 198 return ERR_PTR(-ENOMEM); 199 200 if (copy_from_user(p, src, len)) { 201 kfree(p); 202 return ERR_PTR(-EFAULT); 203 } 204 p[len] = '\0'; 205 206 return p; 207 } 208 EXPORT_SYMBOL(memdup_user_nul); 209 210 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, 211 struct vm_area_struct *prev, struct rb_node *rb_parent) 212 { 213 struct vm_area_struct *next; 214 215 vma->vm_prev = prev; 216 if (prev) { 217 next = prev->vm_next; 218 prev->vm_next = vma; 219 } else { 220 mm->mmap = vma; 221 if (rb_parent) 222 next = rb_entry(rb_parent, 223 struct vm_area_struct, vm_rb); 224 else 225 next = NULL; 226 } 227 vma->vm_next = next; 228 if (next) 229 next->vm_prev = vma; 230 } 231 232 /* Check if the vma is being used as a stack by this task */ 233 int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t) 234 { 235 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); 236 } 237 238 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) 239 void arch_pick_mmap_layout(struct mm_struct *mm) 240 { 241 mm->mmap_base = TASK_UNMAPPED_BASE; 242 mm->get_unmapped_area = arch_get_unmapped_area; 243 } 244 #endif 245 246 /* 247 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall 248 * back to the regular GUP. 249 * If the architecture not support this function, simply return with no 250 * page pinned 251 */ 252 int __weak __get_user_pages_fast(unsigned long start, 253 int nr_pages, int write, struct page **pages) 254 { 255 return 0; 256 } 257 EXPORT_SYMBOL_GPL(__get_user_pages_fast); 258 259 /** 260 * get_user_pages_fast() - pin user pages in memory 261 * @start: starting user address 262 * @nr_pages: number of pages from start to pin 263 * @write: whether pages will be written to 264 * @pages: array that receives pointers to the pages pinned. 265 * Should be at least nr_pages long. 266 * 267 * Returns number of pages pinned. This may be fewer than the number 268 * requested. If nr_pages is 0 or negative, returns 0. If no pages 269 * were pinned, returns -errno. 270 * 271 * get_user_pages_fast provides equivalent functionality to get_user_pages, 272 * operating on current and current->mm, with force=0 and vma=NULL. However 273 * unlike get_user_pages, it must be called without mmap_sem held. 274 * 275 * get_user_pages_fast may take mmap_sem and page table locks, so no 276 * assumptions can be made about lack of locking. get_user_pages_fast is to be 277 * implemented in a way that is advantageous (vs get_user_pages()) when the 278 * user memory area is already faulted in and present in ptes. However if the 279 * pages have to be faulted in, it may turn out to be slightly slower so 280 * callers need to carefully consider what to use. On many architectures, 281 * get_user_pages_fast simply falls back to get_user_pages. 282 */ 283 int __weak get_user_pages_fast(unsigned long start, 284 int nr_pages, int write, struct page **pages) 285 { 286 return get_user_pages_unlocked(start, nr_pages, write, 0, pages); 287 } 288 EXPORT_SYMBOL_GPL(get_user_pages_fast); 289 290 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, 291 unsigned long len, unsigned long prot, 292 unsigned long flag, unsigned long pgoff) 293 { 294 unsigned long ret; 295 struct mm_struct *mm = current->mm; 296 unsigned long populate; 297 298 ret = security_mmap_file(file, prot, flag); 299 if (!ret) { 300 if (down_write_killable(&mm->mmap_sem)) 301 return -EINTR; 302 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff, 303 &populate); 304 up_write(&mm->mmap_sem); 305 if (populate) 306 mm_populate(ret, populate); 307 } 308 return ret; 309 } 310 311 unsigned long vm_mmap(struct file *file, unsigned long addr, 312 unsigned long len, unsigned long prot, 313 unsigned long flag, unsigned long offset) 314 { 315 if (unlikely(offset + PAGE_ALIGN(len) < offset)) 316 return -EINVAL; 317 if (unlikely(offset_in_page(offset))) 318 return -EINVAL; 319 320 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); 321 } 322 EXPORT_SYMBOL(vm_mmap); 323 324 void kvfree(const void *addr) 325 { 326 if (is_vmalloc_addr(addr)) 327 vfree(addr); 328 else 329 kfree(addr); 330 } 331 EXPORT_SYMBOL(kvfree); 332 333 static inline void *__page_rmapping(struct page *page) 334 { 335 unsigned long mapping; 336 337 mapping = (unsigned long)page->mapping; 338 mapping &= ~PAGE_MAPPING_FLAGS; 339 340 return (void *)mapping; 341 } 342 343 /* Neutral page->mapping pointer to address_space or anon_vma or other */ 344 void *page_rmapping(struct page *page) 345 { 346 page = compound_head(page); 347 return __page_rmapping(page); 348 } 349 350 /* 351 * Return true if this page is mapped into pagetables. 352 * For compound page it returns true if any subpage of compound page is mapped. 353 */ 354 bool page_mapped(struct page *page) 355 { 356 int i; 357 358 if (likely(!PageCompound(page))) 359 return atomic_read(&page->_mapcount) >= 0; 360 page = compound_head(page); 361 if (atomic_read(compound_mapcount_ptr(page)) >= 0) 362 return true; 363 if (PageHuge(page)) 364 return false; 365 for (i = 0; i < hpage_nr_pages(page); i++) { 366 if (atomic_read(&page[i]._mapcount) >= 0) 367 return true; 368 } 369 return false; 370 } 371 EXPORT_SYMBOL(page_mapped); 372 373 struct anon_vma *page_anon_vma(struct page *page) 374 { 375 unsigned long mapping; 376 377 page = compound_head(page); 378 mapping = (unsigned long)page->mapping; 379 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 380 return NULL; 381 return __page_rmapping(page); 382 } 383 384 struct address_space *page_mapping(struct page *page) 385 { 386 struct address_space *mapping; 387 388 page = compound_head(page); 389 390 /* This happens if someone calls flush_dcache_page on slab page */ 391 if (unlikely(PageSlab(page))) 392 return NULL; 393 394 if (unlikely(PageSwapCache(page))) { 395 swp_entry_t entry; 396 397 entry.val = page_private(page); 398 return swap_address_space(entry); 399 } 400 401 mapping = page->mapping; 402 if ((unsigned long)mapping & PAGE_MAPPING_ANON) 403 return NULL; 404 405 return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS); 406 } 407 EXPORT_SYMBOL(page_mapping); 408 409 /* Slow path of page_mapcount() for compound pages */ 410 int __page_mapcount(struct page *page) 411 { 412 int ret; 413 414 ret = atomic_read(&page->_mapcount) + 1; 415 /* 416 * For file THP page->_mapcount contains total number of mapping 417 * of the page: no need to look into compound_mapcount. 418 */ 419 if (!PageAnon(page) && !PageHuge(page)) 420 return ret; 421 page = compound_head(page); 422 ret += atomic_read(compound_mapcount_ptr(page)) + 1; 423 if (PageDoubleMap(page)) 424 ret--; 425 return ret; 426 } 427 EXPORT_SYMBOL_GPL(__page_mapcount); 428 429 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; 430 int sysctl_overcommit_ratio __read_mostly = 50; 431 unsigned long sysctl_overcommit_kbytes __read_mostly; 432 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; 433 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ 434 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ 435 436 int overcommit_ratio_handler(struct ctl_table *table, int write, 437 void __user *buffer, size_t *lenp, 438 loff_t *ppos) 439 { 440 int ret; 441 442 ret = proc_dointvec(table, write, buffer, lenp, ppos); 443 if (ret == 0 && write) 444 sysctl_overcommit_kbytes = 0; 445 return ret; 446 } 447 448 int overcommit_kbytes_handler(struct ctl_table *table, int write, 449 void __user *buffer, size_t *lenp, 450 loff_t *ppos) 451 { 452 int ret; 453 454 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 455 if (ret == 0 && write) 456 sysctl_overcommit_ratio = 0; 457 return ret; 458 } 459 460 /* 461 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used 462 */ 463 unsigned long vm_commit_limit(void) 464 { 465 unsigned long allowed; 466 467 if (sysctl_overcommit_kbytes) 468 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); 469 else 470 allowed = ((totalram_pages - hugetlb_total_pages()) 471 * sysctl_overcommit_ratio / 100); 472 allowed += total_swap_pages; 473 474 return allowed; 475 } 476 477 /* 478 * Make sure vm_committed_as in one cacheline and not cacheline shared with 479 * other variables. It can be updated by several CPUs frequently. 480 */ 481 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; 482 483 /* 484 * The global memory commitment made in the system can be a metric 485 * that can be used to drive ballooning decisions when Linux is hosted 486 * as a guest. On Hyper-V, the host implements a policy engine for dynamically 487 * balancing memory across competing virtual machines that are hosted. 488 * Several metrics drive this policy engine including the guest reported 489 * memory commitment. 490 */ 491 unsigned long vm_memory_committed(void) 492 { 493 return percpu_counter_read_positive(&vm_committed_as); 494 } 495 EXPORT_SYMBOL_GPL(vm_memory_committed); 496 497 /* 498 * Check that a process has enough memory to allocate a new virtual 499 * mapping. 0 means there is enough memory for the allocation to 500 * succeed and -ENOMEM implies there is not. 501 * 502 * We currently support three overcommit policies, which are set via the 503 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting 504 * 505 * Strict overcommit modes added 2002 Feb 26 by Alan Cox. 506 * Additional code 2002 Jul 20 by Robert Love. 507 * 508 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. 509 * 510 * Note this is a helper function intended to be used by LSMs which 511 * wish to use this logic. 512 */ 513 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) 514 { 515 long free, allowed, reserve; 516 517 VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) < 518 -(s64)vm_committed_as_batch * num_online_cpus(), 519 "memory commitment underflow"); 520 521 vm_acct_memory(pages); 522 523 /* 524 * Sometimes we want to use more memory than we have 525 */ 526 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) 527 return 0; 528 529 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { 530 free = global_page_state(NR_FREE_PAGES); 531 free += global_node_page_state(NR_FILE_PAGES); 532 533 /* 534 * shmem pages shouldn't be counted as free in this 535 * case, they can't be purged, only swapped out, and 536 * that won't affect the overall amount of available 537 * memory in the system. 538 */ 539 free -= global_node_page_state(NR_SHMEM); 540 541 free += get_nr_swap_pages(); 542 543 /* 544 * Any slabs which are created with the 545 * SLAB_RECLAIM_ACCOUNT flag claim to have contents 546 * which are reclaimable, under pressure. The dentry 547 * cache and most inode caches should fall into this 548 */ 549 free += global_page_state(NR_SLAB_RECLAIMABLE); 550 551 /* 552 * Leave reserved pages. The pages are not for anonymous pages. 553 */ 554 if (free <= totalreserve_pages) 555 goto error; 556 else 557 free -= totalreserve_pages; 558 559 /* 560 * Reserve some for root 561 */ 562 if (!cap_sys_admin) 563 free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); 564 565 if (free > pages) 566 return 0; 567 568 goto error; 569 } 570 571 allowed = vm_commit_limit(); 572 /* 573 * Reserve some for root 574 */ 575 if (!cap_sys_admin) 576 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); 577 578 /* 579 * Don't let a single process grow so big a user can't recover 580 */ 581 if (mm) { 582 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); 583 allowed -= min_t(long, mm->total_vm / 32, reserve); 584 } 585 586 if (percpu_counter_read_positive(&vm_committed_as) < allowed) 587 return 0; 588 error: 589 vm_unacct_memory(pages); 590 591 return -ENOMEM; 592 } 593 594 /** 595 * get_cmdline() - copy the cmdline value to a buffer. 596 * @task: the task whose cmdline value to copy. 597 * @buffer: the buffer to copy to. 598 * @buflen: the length of the buffer. Larger cmdline values are truncated 599 * to this length. 600 * Returns the size of the cmdline field copied. Note that the copy does 601 * not guarantee an ending NULL byte. 602 */ 603 int get_cmdline(struct task_struct *task, char *buffer, int buflen) 604 { 605 int res = 0; 606 unsigned int len; 607 struct mm_struct *mm = get_task_mm(task); 608 unsigned long arg_start, arg_end, env_start, env_end; 609 if (!mm) 610 goto out; 611 if (!mm->arg_end) 612 goto out_mm; /* Shh! No looking before we're done */ 613 614 down_read(&mm->mmap_sem); 615 arg_start = mm->arg_start; 616 arg_end = mm->arg_end; 617 env_start = mm->env_start; 618 env_end = mm->env_end; 619 up_read(&mm->mmap_sem); 620 621 len = arg_end - arg_start; 622 623 if (len > buflen) 624 len = buflen; 625 626 res = access_process_vm(task, arg_start, buffer, len, 0); 627 628 /* 629 * If the nul at the end of args has been overwritten, then 630 * assume application is using setproctitle(3). 631 */ 632 if (res > 0 && buffer[res-1] != '\0' && len < buflen) { 633 len = strnlen(buffer, res); 634 if (len < res) { 635 res = len; 636 } else { 637 len = env_end - env_start; 638 if (len > buflen - res) 639 len = buflen - res; 640 res += access_process_vm(task, env_start, 641 buffer+res, len, 0); 642 res = strnlen(buffer, res); 643 } 644 } 645 out_mm: 646 mmput(mm); 647 out: 648 return res; 649 } 650