xref: /openbmc/linux/mm/util.c (revision f1770e3c)
1 #include <linux/mm.h>
2 #include <linux/slab.h>
3 #include <linux/string.h>
4 #include <linux/compiler.h>
5 #include <linux/export.h>
6 #include <linux/err.h>
7 #include <linux/sched.h>
8 #include <linux/sched/mm.h>
9 #include <linux/sched/task_stack.h>
10 #include <linux/security.h>
11 #include <linux/swap.h>
12 #include <linux/swapops.h>
13 #include <linux/mman.h>
14 #include <linux/hugetlb.h>
15 #include <linux/vmalloc.h>
16 #include <linux/userfaultfd_k.h>
17 
18 #include <linux/uaccess.h>
19 
20 #include "internal.h"
21 
22 /**
23  * kfree_const - conditionally free memory
24  * @x: pointer to the memory
25  *
26  * Function calls kfree only if @x is not in .rodata section.
27  */
28 void kfree_const(const void *x)
29 {
30 	if (!is_kernel_rodata((unsigned long)x))
31 		kfree(x);
32 }
33 EXPORT_SYMBOL(kfree_const);
34 
35 /**
36  * kstrdup - allocate space for and copy an existing string
37  * @s: the string to duplicate
38  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
39  */
40 char *kstrdup(const char *s, gfp_t gfp)
41 {
42 	size_t len;
43 	char *buf;
44 
45 	if (!s)
46 		return NULL;
47 
48 	len = strlen(s) + 1;
49 	buf = kmalloc_track_caller(len, gfp);
50 	if (buf)
51 		memcpy(buf, s, len);
52 	return buf;
53 }
54 EXPORT_SYMBOL(kstrdup);
55 
56 /**
57  * kstrdup_const - conditionally duplicate an existing const string
58  * @s: the string to duplicate
59  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
60  *
61  * Function returns source string if it is in .rodata section otherwise it
62  * fallbacks to kstrdup.
63  * Strings allocated by kstrdup_const should be freed by kfree_const.
64  */
65 const char *kstrdup_const(const char *s, gfp_t gfp)
66 {
67 	if (is_kernel_rodata((unsigned long)s))
68 		return s;
69 
70 	return kstrdup(s, gfp);
71 }
72 EXPORT_SYMBOL(kstrdup_const);
73 
74 /**
75  * kstrndup - allocate space for and copy an existing string
76  * @s: the string to duplicate
77  * @max: read at most @max chars from @s
78  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
79  *
80  * Note: Use kmemdup_nul() instead if the size is known exactly.
81  */
82 char *kstrndup(const char *s, size_t max, gfp_t gfp)
83 {
84 	size_t len;
85 	char *buf;
86 
87 	if (!s)
88 		return NULL;
89 
90 	len = strnlen(s, max);
91 	buf = kmalloc_track_caller(len+1, gfp);
92 	if (buf) {
93 		memcpy(buf, s, len);
94 		buf[len] = '\0';
95 	}
96 	return buf;
97 }
98 EXPORT_SYMBOL(kstrndup);
99 
100 /**
101  * kmemdup - duplicate region of memory
102  *
103  * @src: memory region to duplicate
104  * @len: memory region length
105  * @gfp: GFP mask to use
106  */
107 void *kmemdup(const void *src, size_t len, gfp_t gfp)
108 {
109 	void *p;
110 
111 	p = kmalloc_track_caller(len, gfp);
112 	if (p)
113 		memcpy(p, src, len);
114 	return p;
115 }
116 EXPORT_SYMBOL(kmemdup);
117 
118 /**
119  * kmemdup_nul - Create a NUL-terminated string from unterminated data
120  * @s: The data to stringify
121  * @len: The size of the data
122  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
123  */
124 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
125 {
126 	char *buf;
127 
128 	if (!s)
129 		return NULL;
130 
131 	buf = kmalloc_track_caller(len + 1, gfp);
132 	if (buf) {
133 		memcpy(buf, s, len);
134 		buf[len] = '\0';
135 	}
136 	return buf;
137 }
138 EXPORT_SYMBOL(kmemdup_nul);
139 
140 /**
141  * memdup_user - duplicate memory region from user space
142  *
143  * @src: source address in user space
144  * @len: number of bytes to copy
145  *
146  * Returns an ERR_PTR() on failure.  Result is physically
147  * contiguous, to be freed by kfree().
148  */
149 void *memdup_user(const void __user *src, size_t len)
150 {
151 	void *p;
152 
153 	p = kmalloc_track_caller(len, GFP_USER);
154 	if (!p)
155 		return ERR_PTR(-ENOMEM);
156 
157 	if (copy_from_user(p, src, len)) {
158 		kfree(p);
159 		return ERR_PTR(-EFAULT);
160 	}
161 
162 	return p;
163 }
164 EXPORT_SYMBOL(memdup_user);
165 
166 /**
167  * vmemdup_user - duplicate memory region from user space
168  *
169  * @src: source address in user space
170  * @len: number of bytes to copy
171  *
172  * Returns an ERR_PTR() on failure.  Result may be not
173  * physically contiguous.  Use kvfree() to free.
174  */
175 void *vmemdup_user(const void __user *src, size_t len)
176 {
177 	void *p;
178 
179 	p = kvmalloc(len, GFP_USER);
180 	if (!p)
181 		return ERR_PTR(-ENOMEM);
182 
183 	if (copy_from_user(p, src, len)) {
184 		kvfree(p);
185 		return ERR_PTR(-EFAULT);
186 	}
187 
188 	return p;
189 }
190 EXPORT_SYMBOL(vmemdup_user);
191 
192 /**
193  * strndup_user - duplicate an existing string from user space
194  * @s: The string to duplicate
195  * @n: Maximum number of bytes to copy, including the trailing NUL.
196  */
197 char *strndup_user(const char __user *s, long n)
198 {
199 	char *p;
200 	long length;
201 
202 	length = strnlen_user(s, n);
203 
204 	if (!length)
205 		return ERR_PTR(-EFAULT);
206 
207 	if (length > n)
208 		return ERR_PTR(-EINVAL);
209 
210 	p = memdup_user(s, length);
211 
212 	if (IS_ERR(p))
213 		return p;
214 
215 	p[length - 1] = '\0';
216 
217 	return p;
218 }
219 EXPORT_SYMBOL(strndup_user);
220 
221 /**
222  * memdup_user_nul - duplicate memory region from user space and NUL-terminate
223  *
224  * @src: source address in user space
225  * @len: number of bytes to copy
226  *
227  * Returns an ERR_PTR() on failure.
228  */
229 void *memdup_user_nul(const void __user *src, size_t len)
230 {
231 	char *p;
232 
233 	/*
234 	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
235 	 * cause pagefault, which makes it pointless to use GFP_NOFS
236 	 * or GFP_ATOMIC.
237 	 */
238 	p = kmalloc_track_caller(len + 1, GFP_KERNEL);
239 	if (!p)
240 		return ERR_PTR(-ENOMEM);
241 
242 	if (copy_from_user(p, src, len)) {
243 		kfree(p);
244 		return ERR_PTR(-EFAULT);
245 	}
246 	p[len] = '\0';
247 
248 	return p;
249 }
250 EXPORT_SYMBOL(memdup_user_nul);
251 
252 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
253 		struct vm_area_struct *prev, struct rb_node *rb_parent)
254 {
255 	struct vm_area_struct *next;
256 
257 	vma->vm_prev = prev;
258 	if (prev) {
259 		next = prev->vm_next;
260 		prev->vm_next = vma;
261 	} else {
262 		mm->mmap = vma;
263 		if (rb_parent)
264 			next = rb_entry(rb_parent,
265 					struct vm_area_struct, vm_rb);
266 		else
267 			next = NULL;
268 	}
269 	vma->vm_next = next;
270 	if (next)
271 		next->vm_prev = vma;
272 }
273 
274 /* Check if the vma is being used as a stack by this task */
275 int vma_is_stack_for_current(struct vm_area_struct *vma)
276 {
277 	struct task_struct * __maybe_unused t = current;
278 
279 	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
280 }
281 
282 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
283 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
284 {
285 	mm->mmap_base = TASK_UNMAPPED_BASE;
286 	mm->get_unmapped_area = arch_get_unmapped_area;
287 }
288 #endif
289 
290 /*
291  * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
292  * back to the regular GUP.
293  * Note a difference with get_user_pages_fast: this always returns the
294  * number of pages pinned, 0 if no pages were pinned.
295  * If the architecture does not support this function, simply return with no
296  * pages pinned.
297  */
298 int __weak __get_user_pages_fast(unsigned long start,
299 				 int nr_pages, int write, struct page **pages)
300 {
301 	return 0;
302 }
303 EXPORT_SYMBOL_GPL(__get_user_pages_fast);
304 
305 /**
306  * get_user_pages_fast() - pin user pages in memory
307  * @start:	starting user address
308  * @nr_pages:	number of pages from start to pin
309  * @write:	whether pages will be written to
310  * @pages:	array that receives pointers to the pages pinned.
311  *		Should be at least nr_pages long.
312  *
313  * Returns number of pages pinned. This may be fewer than the number
314  * requested. If nr_pages is 0 or negative, returns 0. If no pages
315  * were pinned, returns -errno.
316  *
317  * get_user_pages_fast provides equivalent functionality to get_user_pages,
318  * operating on current and current->mm, with force=0 and vma=NULL. However
319  * unlike get_user_pages, it must be called without mmap_sem held.
320  *
321  * get_user_pages_fast may take mmap_sem and page table locks, so no
322  * assumptions can be made about lack of locking. get_user_pages_fast is to be
323  * implemented in a way that is advantageous (vs get_user_pages()) when the
324  * user memory area is already faulted in and present in ptes. However if the
325  * pages have to be faulted in, it may turn out to be slightly slower so
326  * callers need to carefully consider what to use. On many architectures,
327  * get_user_pages_fast simply falls back to get_user_pages.
328  */
329 int __weak get_user_pages_fast(unsigned long start,
330 				int nr_pages, int write, struct page **pages)
331 {
332 	return get_user_pages_unlocked(start, nr_pages, pages,
333 				       write ? FOLL_WRITE : 0);
334 }
335 EXPORT_SYMBOL_GPL(get_user_pages_fast);
336 
337 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
338 	unsigned long len, unsigned long prot,
339 	unsigned long flag, unsigned long pgoff)
340 {
341 	unsigned long ret;
342 	struct mm_struct *mm = current->mm;
343 	unsigned long populate;
344 	LIST_HEAD(uf);
345 
346 	ret = security_mmap_file(file, prot, flag);
347 	if (!ret) {
348 		if (down_write_killable(&mm->mmap_sem))
349 			return -EINTR;
350 		ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
351 				    &populate, &uf);
352 		up_write(&mm->mmap_sem);
353 		userfaultfd_unmap_complete(mm, &uf);
354 		if (populate)
355 			mm_populate(ret, populate);
356 	}
357 	return ret;
358 }
359 
360 unsigned long vm_mmap(struct file *file, unsigned long addr,
361 	unsigned long len, unsigned long prot,
362 	unsigned long flag, unsigned long offset)
363 {
364 	if (unlikely(offset + PAGE_ALIGN(len) < offset))
365 		return -EINVAL;
366 	if (unlikely(offset_in_page(offset)))
367 		return -EINVAL;
368 
369 	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
370 }
371 EXPORT_SYMBOL(vm_mmap);
372 
373 /**
374  * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
375  * failure, fall back to non-contiguous (vmalloc) allocation.
376  * @size: size of the request.
377  * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
378  * @node: numa node to allocate from
379  *
380  * Uses kmalloc to get the memory but if the allocation fails then falls back
381  * to the vmalloc allocator. Use kvfree for freeing the memory.
382  *
383  * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
384  * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
385  * preferable to the vmalloc fallback, due to visible performance drawbacks.
386  *
387  * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
388  * fall back to vmalloc.
389  */
390 void *kvmalloc_node(size_t size, gfp_t flags, int node)
391 {
392 	gfp_t kmalloc_flags = flags;
393 	void *ret;
394 
395 	/*
396 	 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
397 	 * so the given set of flags has to be compatible.
398 	 */
399 	if ((flags & GFP_KERNEL) != GFP_KERNEL)
400 		return kmalloc_node(size, flags, node);
401 
402 	/*
403 	 * We want to attempt a large physically contiguous block first because
404 	 * it is less likely to fragment multiple larger blocks and therefore
405 	 * contribute to a long term fragmentation less than vmalloc fallback.
406 	 * However make sure that larger requests are not too disruptive - no
407 	 * OOM killer and no allocation failure warnings as we have a fallback.
408 	 */
409 	if (size > PAGE_SIZE) {
410 		kmalloc_flags |= __GFP_NOWARN;
411 
412 		if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
413 			kmalloc_flags |= __GFP_NORETRY;
414 	}
415 
416 	ret = kmalloc_node(size, kmalloc_flags, node);
417 
418 	/*
419 	 * It doesn't really make sense to fallback to vmalloc for sub page
420 	 * requests
421 	 */
422 	if (ret || size <= PAGE_SIZE)
423 		return ret;
424 
425 	return __vmalloc_node_flags_caller(size, node, flags,
426 			__builtin_return_address(0));
427 }
428 EXPORT_SYMBOL(kvmalloc_node);
429 
430 /**
431  * kvfree() - Free memory.
432  * @addr: Pointer to allocated memory.
433  *
434  * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
435  * It is slightly more efficient to use kfree() or vfree() if you are certain
436  * that you know which one to use.
437  *
438  * Context: Either preemptible task context or not-NMI interrupt.
439  */
440 void kvfree(const void *addr)
441 {
442 	if (is_vmalloc_addr(addr))
443 		vfree(addr);
444 	else
445 		kfree(addr);
446 }
447 EXPORT_SYMBOL(kvfree);
448 
449 static inline void *__page_rmapping(struct page *page)
450 {
451 	unsigned long mapping;
452 
453 	mapping = (unsigned long)page->mapping;
454 	mapping &= ~PAGE_MAPPING_FLAGS;
455 
456 	return (void *)mapping;
457 }
458 
459 /* Neutral page->mapping pointer to address_space or anon_vma or other */
460 void *page_rmapping(struct page *page)
461 {
462 	page = compound_head(page);
463 	return __page_rmapping(page);
464 }
465 
466 /*
467  * Return true if this page is mapped into pagetables.
468  * For compound page it returns true if any subpage of compound page is mapped.
469  */
470 bool page_mapped(struct page *page)
471 {
472 	int i;
473 
474 	if (likely(!PageCompound(page)))
475 		return atomic_read(&page->_mapcount) >= 0;
476 	page = compound_head(page);
477 	if (atomic_read(compound_mapcount_ptr(page)) >= 0)
478 		return true;
479 	if (PageHuge(page))
480 		return false;
481 	for (i = 0; i < hpage_nr_pages(page); i++) {
482 		if (atomic_read(&page[i]._mapcount) >= 0)
483 			return true;
484 	}
485 	return false;
486 }
487 EXPORT_SYMBOL(page_mapped);
488 
489 struct anon_vma *page_anon_vma(struct page *page)
490 {
491 	unsigned long mapping;
492 
493 	page = compound_head(page);
494 	mapping = (unsigned long)page->mapping;
495 	if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
496 		return NULL;
497 	return __page_rmapping(page);
498 }
499 
500 struct address_space *page_mapping(struct page *page)
501 {
502 	struct address_space *mapping;
503 
504 	page = compound_head(page);
505 
506 	/* This happens if someone calls flush_dcache_page on slab page */
507 	if (unlikely(PageSlab(page)))
508 		return NULL;
509 
510 	if (unlikely(PageSwapCache(page))) {
511 		swp_entry_t entry;
512 
513 		entry.val = page_private(page);
514 		return swap_address_space(entry);
515 	}
516 
517 	mapping = page->mapping;
518 	if ((unsigned long)mapping & PAGE_MAPPING_ANON)
519 		return NULL;
520 
521 	return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
522 }
523 EXPORT_SYMBOL(page_mapping);
524 
525 /*
526  * For file cache pages, return the address_space, otherwise return NULL
527  */
528 struct address_space *page_mapping_file(struct page *page)
529 {
530 	if (unlikely(PageSwapCache(page)))
531 		return NULL;
532 	return page_mapping(page);
533 }
534 
535 /* Slow path of page_mapcount() for compound pages */
536 int __page_mapcount(struct page *page)
537 {
538 	int ret;
539 
540 	ret = atomic_read(&page->_mapcount) + 1;
541 	/*
542 	 * For file THP page->_mapcount contains total number of mapping
543 	 * of the page: no need to look into compound_mapcount.
544 	 */
545 	if (!PageAnon(page) && !PageHuge(page))
546 		return ret;
547 	page = compound_head(page);
548 	ret += atomic_read(compound_mapcount_ptr(page)) + 1;
549 	if (PageDoubleMap(page))
550 		ret--;
551 	return ret;
552 }
553 EXPORT_SYMBOL_GPL(__page_mapcount);
554 
555 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
556 int sysctl_overcommit_ratio __read_mostly = 50;
557 unsigned long sysctl_overcommit_kbytes __read_mostly;
558 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
559 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
560 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
561 
562 int overcommit_ratio_handler(struct ctl_table *table, int write,
563 			     void __user *buffer, size_t *lenp,
564 			     loff_t *ppos)
565 {
566 	int ret;
567 
568 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
569 	if (ret == 0 && write)
570 		sysctl_overcommit_kbytes = 0;
571 	return ret;
572 }
573 
574 int overcommit_kbytes_handler(struct ctl_table *table, int write,
575 			     void __user *buffer, size_t *lenp,
576 			     loff_t *ppos)
577 {
578 	int ret;
579 
580 	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
581 	if (ret == 0 && write)
582 		sysctl_overcommit_ratio = 0;
583 	return ret;
584 }
585 
586 /*
587  * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
588  */
589 unsigned long vm_commit_limit(void)
590 {
591 	unsigned long allowed;
592 
593 	if (sysctl_overcommit_kbytes)
594 		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
595 	else
596 		allowed = ((totalram_pages - hugetlb_total_pages())
597 			   * sysctl_overcommit_ratio / 100);
598 	allowed += total_swap_pages;
599 
600 	return allowed;
601 }
602 
603 /*
604  * Make sure vm_committed_as in one cacheline and not cacheline shared with
605  * other variables. It can be updated by several CPUs frequently.
606  */
607 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
608 
609 /*
610  * The global memory commitment made in the system can be a metric
611  * that can be used to drive ballooning decisions when Linux is hosted
612  * as a guest. On Hyper-V, the host implements a policy engine for dynamically
613  * balancing memory across competing virtual machines that are hosted.
614  * Several metrics drive this policy engine including the guest reported
615  * memory commitment.
616  */
617 unsigned long vm_memory_committed(void)
618 {
619 	return percpu_counter_read_positive(&vm_committed_as);
620 }
621 EXPORT_SYMBOL_GPL(vm_memory_committed);
622 
623 /*
624  * Check that a process has enough memory to allocate a new virtual
625  * mapping. 0 means there is enough memory for the allocation to
626  * succeed and -ENOMEM implies there is not.
627  *
628  * We currently support three overcommit policies, which are set via the
629  * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting.rst
630  *
631  * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
632  * Additional code 2002 Jul 20 by Robert Love.
633  *
634  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
635  *
636  * Note this is a helper function intended to be used by LSMs which
637  * wish to use this logic.
638  */
639 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
640 {
641 	long free, allowed, reserve;
642 
643 	VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
644 			-(s64)vm_committed_as_batch * num_online_cpus(),
645 			"memory commitment underflow");
646 
647 	vm_acct_memory(pages);
648 
649 	/*
650 	 * Sometimes we want to use more memory than we have
651 	 */
652 	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
653 		return 0;
654 
655 	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
656 		free = global_zone_page_state(NR_FREE_PAGES);
657 		free += global_node_page_state(NR_FILE_PAGES);
658 
659 		/*
660 		 * shmem pages shouldn't be counted as free in this
661 		 * case, they can't be purged, only swapped out, and
662 		 * that won't affect the overall amount of available
663 		 * memory in the system.
664 		 */
665 		free -= global_node_page_state(NR_SHMEM);
666 
667 		free += get_nr_swap_pages();
668 
669 		/*
670 		 * Any slabs which are created with the
671 		 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
672 		 * which are reclaimable, under pressure.  The dentry
673 		 * cache and most inode caches should fall into this
674 		 */
675 		free += global_node_page_state(NR_SLAB_RECLAIMABLE);
676 
677 		/*
678 		 * Part of the kernel memory, which can be released
679 		 * under memory pressure.
680 		 */
681 		free += global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
682 
683 		/*
684 		 * Leave reserved pages. The pages are not for anonymous pages.
685 		 */
686 		if (free <= totalreserve_pages)
687 			goto error;
688 		else
689 			free -= totalreserve_pages;
690 
691 		/*
692 		 * Reserve some for root
693 		 */
694 		if (!cap_sys_admin)
695 			free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
696 
697 		if (free > pages)
698 			return 0;
699 
700 		goto error;
701 	}
702 
703 	allowed = vm_commit_limit();
704 	/*
705 	 * Reserve some for root
706 	 */
707 	if (!cap_sys_admin)
708 		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
709 
710 	/*
711 	 * Don't let a single process grow so big a user can't recover
712 	 */
713 	if (mm) {
714 		reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
715 		allowed -= min_t(long, mm->total_vm / 32, reserve);
716 	}
717 
718 	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
719 		return 0;
720 error:
721 	vm_unacct_memory(pages);
722 
723 	return -ENOMEM;
724 }
725 
726 /**
727  * get_cmdline() - copy the cmdline value to a buffer.
728  * @task:     the task whose cmdline value to copy.
729  * @buffer:   the buffer to copy to.
730  * @buflen:   the length of the buffer. Larger cmdline values are truncated
731  *            to this length.
732  * Returns the size of the cmdline field copied. Note that the copy does
733  * not guarantee an ending NULL byte.
734  */
735 int get_cmdline(struct task_struct *task, char *buffer, int buflen)
736 {
737 	int res = 0;
738 	unsigned int len;
739 	struct mm_struct *mm = get_task_mm(task);
740 	unsigned long arg_start, arg_end, env_start, env_end;
741 	if (!mm)
742 		goto out;
743 	if (!mm->arg_end)
744 		goto out_mm;	/* Shh! No looking before we're done */
745 
746 	down_read(&mm->mmap_sem);
747 	arg_start = mm->arg_start;
748 	arg_end = mm->arg_end;
749 	env_start = mm->env_start;
750 	env_end = mm->env_end;
751 	up_read(&mm->mmap_sem);
752 
753 	len = arg_end - arg_start;
754 
755 	if (len > buflen)
756 		len = buflen;
757 
758 	res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
759 
760 	/*
761 	 * If the nul at the end of args has been overwritten, then
762 	 * assume application is using setproctitle(3).
763 	 */
764 	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
765 		len = strnlen(buffer, res);
766 		if (len < res) {
767 			res = len;
768 		} else {
769 			len = env_end - env_start;
770 			if (len > buflen - res)
771 				len = buflen - res;
772 			res += access_process_vm(task, env_start,
773 						 buffer+res, len,
774 						 FOLL_FORCE);
775 			res = strnlen(buffer, res);
776 		}
777 	}
778 out_mm:
779 	mmput(mm);
780 out:
781 	return res;
782 }
783