1 #include <linux/mm.h> 2 #include <linux/slab.h> 3 #include <linux/string.h> 4 #include <linux/compiler.h> 5 #include <linux/export.h> 6 #include <linux/err.h> 7 #include <linux/sched.h> 8 #include <linux/sched/mm.h> 9 #include <linux/sched/task_stack.h> 10 #include <linux/security.h> 11 #include <linux/swap.h> 12 #include <linux/swapops.h> 13 #include <linux/mman.h> 14 #include <linux/hugetlb.h> 15 #include <linux/vmalloc.h> 16 #include <linux/userfaultfd_k.h> 17 18 #include <asm/sections.h> 19 #include <linux/uaccess.h> 20 21 #include "internal.h" 22 23 static inline int is_kernel_rodata(unsigned long addr) 24 { 25 return addr >= (unsigned long)__start_rodata && 26 addr < (unsigned long)__end_rodata; 27 } 28 29 /** 30 * kfree_const - conditionally free memory 31 * @x: pointer to the memory 32 * 33 * Function calls kfree only if @x is not in .rodata section. 34 */ 35 void kfree_const(const void *x) 36 { 37 if (!is_kernel_rodata((unsigned long)x)) 38 kfree(x); 39 } 40 EXPORT_SYMBOL(kfree_const); 41 42 /** 43 * kstrdup - allocate space for and copy an existing string 44 * @s: the string to duplicate 45 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 46 */ 47 char *kstrdup(const char *s, gfp_t gfp) 48 { 49 size_t len; 50 char *buf; 51 52 if (!s) 53 return NULL; 54 55 len = strlen(s) + 1; 56 buf = kmalloc_track_caller(len, gfp); 57 if (buf) 58 memcpy(buf, s, len); 59 return buf; 60 } 61 EXPORT_SYMBOL(kstrdup); 62 63 /** 64 * kstrdup_const - conditionally duplicate an existing const string 65 * @s: the string to duplicate 66 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 67 * 68 * Function returns source string if it is in .rodata section otherwise it 69 * fallbacks to kstrdup. 70 * Strings allocated by kstrdup_const should be freed by kfree_const. 71 */ 72 const char *kstrdup_const(const char *s, gfp_t gfp) 73 { 74 if (is_kernel_rodata((unsigned long)s)) 75 return s; 76 77 return kstrdup(s, gfp); 78 } 79 EXPORT_SYMBOL(kstrdup_const); 80 81 /** 82 * kstrndup - allocate space for and copy an existing string 83 * @s: the string to duplicate 84 * @max: read at most @max chars from @s 85 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 86 * 87 * Note: Use kmemdup_nul() instead if the size is known exactly. 88 */ 89 char *kstrndup(const char *s, size_t max, gfp_t gfp) 90 { 91 size_t len; 92 char *buf; 93 94 if (!s) 95 return NULL; 96 97 len = strnlen(s, max); 98 buf = kmalloc_track_caller(len+1, gfp); 99 if (buf) { 100 memcpy(buf, s, len); 101 buf[len] = '\0'; 102 } 103 return buf; 104 } 105 EXPORT_SYMBOL(kstrndup); 106 107 /** 108 * kmemdup - duplicate region of memory 109 * 110 * @src: memory region to duplicate 111 * @len: memory region length 112 * @gfp: GFP mask to use 113 */ 114 void *kmemdup(const void *src, size_t len, gfp_t gfp) 115 { 116 void *p; 117 118 p = kmalloc_track_caller(len, gfp); 119 if (p) 120 memcpy(p, src, len); 121 return p; 122 } 123 EXPORT_SYMBOL(kmemdup); 124 125 /** 126 * kmemdup_nul - Create a NUL-terminated string from unterminated data 127 * @s: The data to stringify 128 * @len: The size of the data 129 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 130 */ 131 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp) 132 { 133 char *buf; 134 135 if (!s) 136 return NULL; 137 138 buf = kmalloc_track_caller(len + 1, gfp); 139 if (buf) { 140 memcpy(buf, s, len); 141 buf[len] = '\0'; 142 } 143 return buf; 144 } 145 EXPORT_SYMBOL(kmemdup_nul); 146 147 /** 148 * memdup_user - duplicate memory region from user space 149 * 150 * @src: source address in user space 151 * @len: number of bytes to copy 152 * 153 * Returns an ERR_PTR() on failure. Result is physically 154 * contiguous, to be freed by kfree(). 155 */ 156 void *memdup_user(const void __user *src, size_t len) 157 { 158 void *p; 159 160 p = kmalloc_track_caller(len, GFP_USER); 161 if (!p) 162 return ERR_PTR(-ENOMEM); 163 164 if (copy_from_user(p, src, len)) { 165 kfree(p); 166 return ERR_PTR(-EFAULT); 167 } 168 169 return p; 170 } 171 EXPORT_SYMBOL(memdup_user); 172 173 /** 174 * vmemdup_user - duplicate memory region from user space 175 * 176 * @src: source address in user space 177 * @len: number of bytes to copy 178 * 179 * Returns an ERR_PTR() on failure. Result may be not 180 * physically contiguous. Use kvfree() to free. 181 */ 182 void *vmemdup_user(const void __user *src, size_t len) 183 { 184 void *p; 185 186 p = kvmalloc(len, GFP_USER); 187 if (!p) 188 return ERR_PTR(-ENOMEM); 189 190 if (copy_from_user(p, src, len)) { 191 kvfree(p); 192 return ERR_PTR(-EFAULT); 193 } 194 195 return p; 196 } 197 EXPORT_SYMBOL(vmemdup_user); 198 199 /* 200 * strndup_user - duplicate an existing string from user space 201 * @s: The string to duplicate 202 * @n: Maximum number of bytes to copy, including the trailing NUL. 203 */ 204 char *strndup_user(const char __user *s, long n) 205 { 206 char *p; 207 long length; 208 209 length = strnlen_user(s, n); 210 211 if (!length) 212 return ERR_PTR(-EFAULT); 213 214 if (length > n) 215 return ERR_PTR(-EINVAL); 216 217 p = memdup_user(s, length); 218 219 if (IS_ERR(p)) 220 return p; 221 222 p[length - 1] = '\0'; 223 224 return p; 225 } 226 EXPORT_SYMBOL(strndup_user); 227 228 /** 229 * memdup_user_nul - duplicate memory region from user space and NUL-terminate 230 * 231 * @src: source address in user space 232 * @len: number of bytes to copy 233 * 234 * Returns an ERR_PTR() on failure. 235 */ 236 void *memdup_user_nul(const void __user *src, size_t len) 237 { 238 char *p; 239 240 /* 241 * Always use GFP_KERNEL, since copy_from_user() can sleep and 242 * cause pagefault, which makes it pointless to use GFP_NOFS 243 * or GFP_ATOMIC. 244 */ 245 p = kmalloc_track_caller(len + 1, GFP_KERNEL); 246 if (!p) 247 return ERR_PTR(-ENOMEM); 248 249 if (copy_from_user(p, src, len)) { 250 kfree(p); 251 return ERR_PTR(-EFAULT); 252 } 253 p[len] = '\0'; 254 255 return p; 256 } 257 EXPORT_SYMBOL(memdup_user_nul); 258 259 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, 260 struct vm_area_struct *prev, struct rb_node *rb_parent) 261 { 262 struct vm_area_struct *next; 263 264 vma->vm_prev = prev; 265 if (prev) { 266 next = prev->vm_next; 267 prev->vm_next = vma; 268 } else { 269 mm->mmap = vma; 270 if (rb_parent) 271 next = rb_entry(rb_parent, 272 struct vm_area_struct, vm_rb); 273 else 274 next = NULL; 275 } 276 vma->vm_next = next; 277 if (next) 278 next->vm_prev = vma; 279 } 280 281 /* Check if the vma is being used as a stack by this task */ 282 int vma_is_stack_for_current(struct vm_area_struct *vma) 283 { 284 struct task_struct * __maybe_unused t = current; 285 286 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); 287 } 288 289 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) 290 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) 291 { 292 mm->mmap_base = TASK_UNMAPPED_BASE; 293 mm->get_unmapped_area = arch_get_unmapped_area; 294 } 295 #endif 296 297 /* 298 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall 299 * back to the regular GUP. 300 * If the architecture not support this function, simply return with no 301 * page pinned 302 */ 303 int __weak __get_user_pages_fast(unsigned long start, 304 int nr_pages, int write, struct page **pages) 305 { 306 return 0; 307 } 308 EXPORT_SYMBOL_GPL(__get_user_pages_fast); 309 310 /** 311 * get_user_pages_fast() - pin user pages in memory 312 * @start: starting user address 313 * @nr_pages: number of pages from start to pin 314 * @write: whether pages will be written to 315 * @pages: array that receives pointers to the pages pinned. 316 * Should be at least nr_pages long. 317 * 318 * Returns number of pages pinned. This may be fewer than the number 319 * requested. If nr_pages is 0 or negative, returns 0. If no pages 320 * were pinned, returns -errno. 321 * 322 * get_user_pages_fast provides equivalent functionality to get_user_pages, 323 * operating on current and current->mm, with force=0 and vma=NULL. However 324 * unlike get_user_pages, it must be called without mmap_sem held. 325 * 326 * get_user_pages_fast may take mmap_sem and page table locks, so no 327 * assumptions can be made about lack of locking. get_user_pages_fast is to be 328 * implemented in a way that is advantageous (vs get_user_pages()) when the 329 * user memory area is already faulted in and present in ptes. However if the 330 * pages have to be faulted in, it may turn out to be slightly slower so 331 * callers need to carefully consider what to use. On many architectures, 332 * get_user_pages_fast simply falls back to get_user_pages. 333 */ 334 int __weak get_user_pages_fast(unsigned long start, 335 int nr_pages, int write, struct page **pages) 336 { 337 return get_user_pages_unlocked(start, nr_pages, pages, 338 write ? FOLL_WRITE : 0); 339 } 340 EXPORT_SYMBOL_GPL(get_user_pages_fast); 341 342 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, 343 unsigned long len, unsigned long prot, 344 unsigned long flag, unsigned long pgoff) 345 { 346 unsigned long ret; 347 struct mm_struct *mm = current->mm; 348 unsigned long populate; 349 LIST_HEAD(uf); 350 351 ret = security_mmap_file(file, prot, flag); 352 if (!ret) { 353 if (down_write_killable(&mm->mmap_sem)) 354 return -EINTR; 355 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff, 356 &populate, &uf); 357 up_write(&mm->mmap_sem); 358 userfaultfd_unmap_complete(mm, &uf); 359 if (populate) 360 mm_populate(ret, populate); 361 } 362 return ret; 363 } 364 365 unsigned long vm_mmap(struct file *file, unsigned long addr, 366 unsigned long len, unsigned long prot, 367 unsigned long flag, unsigned long offset) 368 { 369 if (unlikely(offset + PAGE_ALIGN(len) < offset)) 370 return -EINVAL; 371 if (unlikely(offset_in_page(offset))) 372 return -EINVAL; 373 374 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); 375 } 376 EXPORT_SYMBOL(vm_mmap); 377 378 /** 379 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon 380 * failure, fall back to non-contiguous (vmalloc) allocation. 381 * @size: size of the request. 382 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL. 383 * @node: numa node to allocate from 384 * 385 * Uses kmalloc to get the memory but if the allocation fails then falls back 386 * to the vmalloc allocator. Use kvfree for freeing the memory. 387 * 388 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported. 389 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is 390 * preferable to the vmalloc fallback, due to visible performance drawbacks. 391 * 392 * Any use of gfp flags outside of GFP_KERNEL should be consulted with mm people. 393 */ 394 void *kvmalloc_node(size_t size, gfp_t flags, int node) 395 { 396 gfp_t kmalloc_flags = flags; 397 void *ret; 398 399 /* 400 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables) 401 * so the given set of flags has to be compatible. 402 */ 403 WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL); 404 405 /* 406 * We want to attempt a large physically contiguous block first because 407 * it is less likely to fragment multiple larger blocks and therefore 408 * contribute to a long term fragmentation less than vmalloc fallback. 409 * However make sure that larger requests are not too disruptive - no 410 * OOM killer and no allocation failure warnings as we have a fallback. 411 */ 412 if (size > PAGE_SIZE) { 413 kmalloc_flags |= __GFP_NOWARN; 414 415 if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL)) 416 kmalloc_flags |= __GFP_NORETRY; 417 } 418 419 ret = kmalloc_node(size, kmalloc_flags, node); 420 421 /* 422 * It doesn't really make sense to fallback to vmalloc for sub page 423 * requests 424 */ 425 if (ret || size <= PAGE_SIZE) 426 return ret; 427 428 return __vmalloc_node_flags_caller(size, node, flags, 429 __builtin_return_address(0)); 430 } 431 EXPORT_SYMBOL(kvmalloc_node); 432 433 void kvfree(const void *addr) 434 { 435 if (is_vmalloc_addr(addr)) 436 vfree(addr); 437 else 438 kfree(addr); 439 } 440 EXPORT_SYMBOL(kvfree); 441 442 static inline void *__page_rmapping(struct page *page) 443 { 444 unsigned long mapping; 445 446 mapping = (unsigned long)page->mapping; 447 mapping &= ~PAGE_MAPPING_FLAGS; 448 449 return (void *)mapping; 450 } 451 452 /* Neutral page->mapping pointer to address_space or anon_vma or other */ 453 void *page_rmapping(struct page *page) 454 { 455 page = compound_head(page); 456 return __page_rmapping(page); 457 } 458 459 /* 460 * Return true if this page is mapped into pagetables. 461 * For compound page it returns true if any subpage of compound page is mapped. 462 */ 463 bool page_mapped(struct page *page) 464 { 465 int i; 466 467 if (likely(!PageCompound(page))) 468 return atomic_read(&page->_mapcount) >= 0; 469 page = compound_head(page); 470 if (atomic_read(compound_mapcount_ptr(page)) >= 0) 471 return true; 472 if (PageHuge(page)) 473 return false; 474 for (i = 0; i < hpage_nr_pages(page); i++) { 475 if (atomic_read(&page[i]._mapcount) >= 0) 476 return true; 477 } 478 return false; 479 } 480 EXPORT_SYMBOL(page_mapped); 481 482 struct anon_vma *page_anon_vma(struct page *page) 483 { 484 unsigned long mapping; 485 486 page = compound_head(page); 487 mapping = (unsigned long)page->mapping; 488 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 489 return NULL; 490 return __page_rmapping(page); 491 } 492 493 struct address_space *page_mapping(struct page *page) 494 { 495 struct address_space *mapping; 496 497 page = compound_head(page); 498 499 /* This happens if someone calls flush_dcache_page on slab page */ 500 if (unlikely(PageSlab(page))) 501 return NULL; 502 503 if (unlikely(PageSwapCache(page))) { 504 swp_entry_t entry; 505 506 entry.val = page_private(page); 507 return swap_address_space(entry); 508 } 509 510 mapping = page->mapping; 511 if ((unsigned long)mapping & PAGE_MAPPING_ANON) 512 return NULL; 513 514 return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS); 515 } 516 EXPORT_SYMBOL(page_mapping); 517 518 /* 519 * For file cache pages, return the address_space, otherwise return NULL 520 */ 521 struct address_space *page_mapping_file(struct page *page) 522 { 523 if (unlikely(PageSwapCache(page))) 524 return NULL; 525 return page_mapping(page); 526 } 527 528 /* Slow path of page_mapcount() for compound pages */ 529 int __page_mapcount(struct page *page) 530 { 531 int ret; 532 533 ret = atomic_read(&page->_mapcount) + 1; 534 /* 535 * For file THP page->_mapcount contains total number of mapping 536 * of the page: no need to look into compound_mapcount. 537 */ 538 if (!PageAnon(page) && !PageHuge(page)) 539 return ret; 540 page = compound_head(page); 541 ret += atomic_read(compound_mapcount_ptr(page)) + 1; 542 if (PageDoubleMap(page)) 543 ret--; 544 return ret; 545 } 546 EXPORT_SYMBOL_GPL(__page_mapcount); 547 548 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; 549 int sysctl_overcommit_ratio __read_mostly = 50; 550 unsigned long sysctl_overcommit_kbytes __read_mostly; 551 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; 552 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ 553 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ 554 555 int overcommit_ratio_handler(struct ctl_table *table, int write, 556 void __user *buffer, size_t *lenp, 557 loff_t *ppos) 558 { 559 int ret; 560 561 ret = proc_dointvec(table, write, buffer, lenp, ppos); 562 if (ret == 0 && write) 563 sysctl_overcommit_kbytes = 0; 564 return ret; 565 } 566 567 int overcommit_kbytes_handler(struct ctl_table *table, int write, 568 void __user *buffer, size_t *lenp, 569 loff_t *ppos) 570 { 571 int ret; 572 573 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 574 if (ret == 0 && write) 575 sysctl_overcommit_ratio = 0; 576 return ret; 577 } 578 579 /* 580 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used 581 */ 582 unsigned long vm_commit_limit(void) 583 { 584 unsigned long allowed; 585 586 if (sysctl_overcommit_kbytes) 587 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); 588 else 589 allowed = ((totalram_pages - hugetlb_total_pages()) 590 * sysctl_overcommit_ratio / 100); 591 allowed += total_swap_pages; 592 593 return allowed; 594 } 595 596 /* 597 * Make sure vm_committed_as in one cacheline and not cacheline shared with 598 * other variables. It can be updated by several CPUs frequently. 599 */ 600 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; 601 602 /* 603 * The global memory commitment made in the system can be a metric 604 * that can be used to drive ballooning decisions when Linux is hosted 605 * as a guest. On Hyper-V, the host implements a policy engine for dynamically 606 * balancing memory across competing virtual machines that are hosted. 607 * Several metrics drive this policy engine including the guest reported 608 * memory commitment. 609 */ 610 unsigned long vm_memory_committed(void) 611 { 612 return percpu_counter_read_positive(&vm_committed_as); 613 } 614 EXPORT_SYMBOL_GPL(vm_memory_committed); 615 616 /* 617 * Check that a process has enough memory to allocate a new virtual 618 * mapping. 0 means there is enough memory for the allocation to 619 * succeed and -ENOMEM implies there is not. 620 * 621 * We currently support three overcommit policies, which are set via the 622 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting 623 * 624 * Strict overcommit modes added 2002 Feb 26 by Alan Cox. 625 * Additional code 2002 Jul 20 by Robert Love. 626 * 627 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. 628 * 629 * Note this is a helper function intended to be used by LSMs which 630 * wish to use this logic. 631 */ 632 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) 633 { 634 long free, allowed, reserve; 635 636 VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) < 637 -(s64)vm_committed_as_batch * num_online_cpus(), 638 "memory commitment underflow"); 639 640 vm_acct_memory(pages); 641 642 /* 643 * Sometimes we want to use more memory than we have 644 */ 645 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) 646 return 0; 647 648 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { 649 free = global_zone_page_state(NR_FREE_PAGES); 650 free += global_node_page_state(NR_FILE_PAGES); 651 652 /* 653 * shmem pages shouldn't be counted as free in this 654 * case, they can't be purged, only swapped out, and 655 * that won't affect the overall amount of available 656 * memory in the system. 657 */ 658 free -= global_node_page_state(NR_SHMEM); 659 660 free += get_nr_swap_pages(); 661 662 /* 663 * Any slabs which are created with the 664 * SLAB_RECLAIM_ACCOUNT flag claim to have contents 665 * which are reclaimable, under pressure. The dentry 666 * cache and most inode caches should fall into this 667 */ 668 free += global_node_page_state(NR_SLAB_RECLAIMABLE); 669 670 /* 671 * Part of the kernel memory, which can be released 672 * under memory pressure. 673 */ 674 free += global_node_page_state( 675 NR_INDIRECTLY_RECLAIMABLE_BYTES) >> PAGE_SHIFT; 676 677 /* 678 * Leave reserved pages. The pages are not for anonymous pages. 679 */ 680 if (free <= totalreserve_pages) 681 goto error; 682 else 683 free -= totalreserve_pages; 684 685 /* 686 * Reserve some for root 687 */ 688 if (!cap_sys_admin) 689 free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); 690 691 if (free > pages) 692 return 0; 693 694 goto error; 695 } 696 697 allowed = vm_commit_limit(); 698 /* 699 * Reserve some for root 700 */ 701 if (!cap_sys_admin) 702 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); 703 704 /* 705 * Don't let a single process grow so big a user can't recover 706 */ 707 if (mm) { 708 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); 709 allowed -= min_t(long, mm->total_vm / 32, reserve); 710 } 711 712 if (percpu_counter_read_positive(&vm_committed_as) < allowed) 713 return 0; 714 error: 715 vm_unacct_memory(pages); 716 717 return -ENOMEM; 718 } 719 720 /** 721 * get_cmdline() - copy the cmdline value to a buffer. 722 * @task: the task whose cmdline value to copy. 723 * @buffer: the buffer to copy to. 724 * @buflen: the length of the buffer. Larger cmdline values are truncated 725 * to this length. 726 * Returns the size of the cmdline field copied. Note that the copy does 727 * not guarantee an ending NULL byte. 728 */ 729 int get_cmdline(struct task_struct *task, char *buffer, int buflen) 730 { 731 int res = 0; 732 unsigned int len; 733 struct mm_struct *mm = get_task_mm(task); 734 unsigned long arg_start, arg_end, env_start, env_end; 735 if (!mm) 736 goto out; 737 if (!mm->arg_end) 738 goto out_mm; /* Shh! No looking before we're done */ 739 740 down_read(&mm->mmap_sem); 741 arg_start = mm->arg_start; 742 arg_end = mm->arg_end; 743 env_start = mm->env_start; 744 env_end = mm->env_end; 745 up_read(&mm->mmap_sem); 746 747 len = arg_end - arg_start; 748 749 if (len > buflen) 750 len = buflen; 751 752 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE); 753 754 /* 755 * If the nul at the end of args has been overwritten, then 756 * assume application is using setproctitle(3). 757 */ 758 if (res > 0 && buffer[res-1] != '\0' && len < buflen) { 759 len = strnlen(buffer, res); 760 if (len < res) { 761 res = len; 762 } else { 763 len = env_end - env_start; 764 if (len > buflen - res) 765 len = buflen - res; 766 res += access_process_vm(task, env_start, 767 buffer+res, len, 768 FOLL_FORCE); 769 res = strnlen(buffer, res); 770 } 771 } 772 out_mm: 773 mmput(mm); 774 out: 775 return res; 776 } 777