xref: /openbmc/linux/mm/util.c (revision 174cd4b1)
1 #include <linux/mm.h>
2 #include <linux/slab.h>
3 #include <linux/string.h>
4 #include <linux/compiler.h>
5 #include <linux/export.h>
6 #include <linux/err.h>
7 #include <linux/sched.h>
8 #include <linux/sched/mm.h>
9 #include <linux/security.h>
10 #include <linux/swap.h>
11 #include <linux/swapops.h>
12 #include <linux/mman.h>
13 #include <linux/hugetlb.h>
14 #include <linux/vmalloc.h>
15 #include <linux/userfaultfd_k.h>
16 
17 #include <asm/sections.h>
18 #include <linux/uaccess.h>
19 
20 #include "internal.h"
21 
22 static inline int is_kernel_rodata(unsigned long addr)
23 {
24 	return addr >= (unsigned long)__start_rodata &&
25 		addr < (unsigned long)__end_rodata;
26 }
27 
28 /**
29  * kfree_const - conditionally free memory
30  * @x: pointer to the memory
31  *
32  * Function calls kfree only if @x is not in .rodata section.
33  */
34 void kfree_const(const void *x)
35 {
36 	if (!is_kernel_rodata((unsigned long)x))
37 		kfree(x);
38 }
39 EXPORT_SYMBOL(kfree_const);
40 
41 /**
42  * kstrdup - allocate space for and copy an existing string
43  * @s: the string to duplicate
44  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
45  */
46 char *kstrdup(const char *s, gfp_t gfp)
47 {
48 	size_t len;
49 	char *buf;
50 
51 	if (!s)
52 		return NULL;
53 
54 	len = strlen(s) + 1;
55 	buf = kmalloc_track_caller(len, gfp);
56 	if (buf)
57 		memcpy(buf, s, len);
58 	return buf;
59 }
60 EXPORT_SYMBOL(kstrdup);
61 
62 /**
63  * kstrdup_const - conditionally duplicate an existing const string
64  * @s: the string to duplicate
65  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
66  *
67  * Function returns source string if it is in .rodata section otherwise it
68  * fallbacks to kstrdup.
69  * Strings allocated by kstrdup_const should be freed by kfree_const.
70  */
71 const char *kstrdup_const(const char *s, gfp_t gfp)
72 {
73 	if (is_kernel_rodata((unsigned long)s))
74 		return s;
75 
76 	return kstrdup(s, gfp);
77 }
78 EXPORT_SYMBOL(kstrdup_const);
79 
80 /**
81  * kstrndup - allocate space for and copy an existing string
82  * @s: the string to duplicate
83  * @max: read at most @max chars from @s
84  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
85  */
86 char *kstrndup(const char *s, size_t max, gfp_t gfp)
87 {
88 	size_t len;
89 	char *buf;
90 
91 	if (!s)
92 		return NULL;
93 
94 	len = strnlen(s, max);
95 	buf = kmalloc_track_caller(len+1, gfp);
96 	if (buf) {
97 		memcpy(buf, s, len);
98 		buf[len] = '\0';
99 	}
100 	return buf;
101 }
102 EXPORT_SYMBOL(kstrndup);
103 
104 /**
105  * kmemdup - duplicate region of memory
106  *
107  * @src: memory region to duplicate
108  * @len: memory region length
109  * @gfp: GFP mask to use
110  */
111 void *kmemdup(const void *src, size_t len, gfp_t gfp)
112 {
113 	void *p;
114 
115 	p = kmalloc_track_caller(len, gfp);
116 	if (p)
117 		memcpy(p, src, len);
118 	return p;
119 }
120 EXPORT_SYMBOL(kmemdup);
121 
122 /**
123  * memdup_user - duplicate memory region from user space
124  *
125  * @src: source address in user space
126  * @len: number of bytes to copy
127  *
128  * Returns an ERR_PTR() on failure.
129  */
130 void *memdup_user(const void __user *src, size_t len)
131 {
132 	void *p;
133 
134 	/*
135 	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
136 	 * cause pagefault, which makes it pointless to use GFP_NOFS
137 	 * or GFP_ATOMIC.
138 	 */
139 	p = kmalloc_track_caller(len, GFP_KERNEL);
140 	if (!p)
141 		return ERR_PTR(-ENOMEM);
142 
143 	if (copy_from_user(p, src, len)) {
144 		kfree(p);
145 		return ERR_PTR(-EFAULT);
146 	}
147 
148 	return p;
149 }
150 EXPORT_SYMBOL(memdup_user);
151 
152 /*
153  * strndup_user - duplicate an existing string from user space
154  * @s: The string to duplicate
155  * @n: Maximum number of bytes to copy, including the trailing NUL.
156  */
157 char *strndup_user(const char __user *s, long n)
158 {
159 	char *p;
160 	long length;
161 
162 	length = strnlen_user(s, n);
163 
164 	if (!length)
165 		return ERR_PTR(-EFAULT);
166 
167 	if (length > n)
168 		return ERR_PTR(-EINVAL);
169 
170 	p = memdup_user(s, length);
171 
172 	if (IS_ERR(p))
173 		return p;
174 
175 	p[length - 1] = '\0';
176 
177 	return p;
178 }
179 EXPORT_SYMBOL(strndup_user);
180 
181 /**
182  * memdup_user_nul - duplicate memory region from user space and NUL-terminate
183  *
184  * @src: source address in user space
185  * @len: number of bytes to copy
186  *
187  * Returns an ERR_PTR() on failure.
188  */
189 void *memdup_user_nul(const void __user *src, size_t len)
190 {
191 	char *p;
192 
193 	/*
194 	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
195 	 * cause pagefault, which makes it pointless to use GFP_NOFS
196 	 * or GFP_ATOMIC.
197 	 */
198 	p = kmalloc_track_caller(len + 1, GFP_KERNEL);
199 	if (!p)
200 		return ERR_PTR(-ENOMEM);
201 
202 	if (copy_from_user(p, src, len)) {
203 		kfree(p);
204 		return ERR_PTR(-EFAULT);
205 	}
206 	p[len] = '\0';
207 
208 	return p;
209 }
210 EXPORT_SYMBOL(memdup_user_nul);
211 
212 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
213 		struct vm_area_struct *prev, struct rb_node *rb_parent)
214 {
215 	struct vm_area_struct *next;
216 
217 	vma->vm_prev = prev;
218 	if (prev) {
219 		next = prev->vm_next;
220 		prev->vm_next = vma;
221 	} else {
222 		mm->mmap = vma;
223 		if (rb_parent)
224 			next = rb_entry(rb_parent,
225 					struct vm_area_struct, vm_rb);
226 		else
227 			next = NULL;
228 	}
229 	vma->vm_next = next;
230 	if (next)
231 		next->vm_prev = vma;
232 }
233 
234 /* Check if the vma is being used as a stack by this task */
235 int vma_is_stack_for_current(struct vm_area_struct *vma)
236 {
237 	struct task_struct * __maybe_unused t = current;
238 
239 	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
240 }
241 
242 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
243 void arch_pick_mmap_layout(struct mm_struct *mm)
244 {
245 	mm->mmap_base = TASK_UNMAPPED_BASE;
246 	mm->get_unmapped_area = arch_get_unmapped_area;
247 }
248 #endif
249 
250 /*
251  * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
252  * back to the regular GUP.
253  * If the architecture not support this function, simply return with no
254  * page pinned
255  */
256 int __weak __get_user_pages_fast(unsigned long start,
257 				 int nr_pages, int write, struct page **pages)
258 {
259 	return 0;
260 }
261 EXPORT_SYMBOL_GPL(__get_user_pages_fast);
262 
263 /**
264  * get_user_pages_fast() - pin user pages in memory
265  * @start:	starting user address
266  * @nr_pages:	number of pages from start to pin
267  * @write:	whether pages will be written to
268  * @pages:	array that receives pointers to the pages pinned.
269  *		Should be at least nr_pages long.
270  *
271  * Returns number of pages pinned. This may be fewer than the number
272  * requested. If nr_pages is 0 or negative, returns 0. If no pages
273  * were pinned, returns -errno.
274  *
275  * get_user_pages_fast provides equivalent functionality to get_user_pages,
276  * operating on current and current->mm, with force=0 and vma=NULL. However
277  * unlike get_user_pages, it must be called without mmap_sem held.
278  *
279  * get_user_pages_fast may take mmap_sem and page table locks, so no
280  * assumptions can be made about lack of locking. get_user_pages_fast is to be
281  * implemented in a way that is advantageous (vs get_user_pages()) when the
282  * user memory area is already faulted in and present in ptes. However if the
283  * pages have to be faulted in, it may turn out to be slightly slower so
284  * callers need to carefully consider what to use. On many architectures,
285  * get_user_pages_fast simply falls back to get_user_pages.
286  */
287 int __weak get_user_pages_fast(unsigned long start,
288 				int nr_pages, int write, struct page **pages)
289 {
290 	return get_user_pages_unlocked(start, nr_pages, pages,
291 				       write ? FOLL_WRITE : 0);
292 }
293 EXPORT_SYMBOL_GPL(get_user_pages_fast);
294 
295 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
296 	unsigned long len, unsigned long prot,
297 	unsigned long flag, unsigned long pgoff)
298 {
299 	unsigned long ret;
300 	struct mm_struct *mm = current->mm;
301 	unsigned long populate;
302 	LIST_HEAD(uf);
303 
304 	ret = security_mmap_file(file, prot, flag);
305 	if (!ret) {
306 		if (down_write_killable(&mm->mmap_sem))
307 			return -EINTR;
308 		ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
309 				    &populate, &uf);
310 		up_write(&mm->mmap_sem);
311 		userfaultfd_unmap_complete(mm, &uf);
312 		if (populate)
313 			mm_populate(ret, populate);
314 	}
315 	return ret;
316 }
317 
318 unsigned long vm_mmap(struct file *file, unsigned long addr,
319 	unsigned long len, unsigned long prot,
320 	unsigned long flag, unsigned long offset)
321 {
322 	if (unlikely(offset + PAGE_ALIGN(len) < offset))
323 		return -EINVAL;
324 	if (unlikely(offset_in_page(offset)))
325 		return -EINVAL;
326 
327 	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
328 }
329 EXPORT_SYMBOL(vm_mmap);
330 
331 void kvfree(const void *addr)
332 {
333 	if (is_vmalloc_addr(addr))
334 		vfree(addr);
335 	else
336 		kfree(addr);
337 }
338 EXPORT_SYMBOL(kvfree);
339 
340 static inline void *__page_rmapping(struct page *page)
341 {
342 	unsigned long mapping;
343 
344 	mapping = (unsigned long)page->mapping;
345 	mapping &= ~PAGE_MAPPING_FLAGS;
346 
347 	return (void *)mapping;
348 }
349 
350 /* Neutral page->mapping pointer to address_space or anon_vma or other */
351 void *page_rmapping(struct page *page)
352 {
353 	page = compound_head(page);
354 	return __page_rmapping(page);
355 }
356 
357 /*
358  * Return true if this page is mapped into pagetables.
359  * For compound page it returns true if any subpage of compound page is mapped.
360  */
361 bool page_mapped(struct page *page)
362 {
363 	int i;
364 
365 	if (likely(!PageCompound(page)))
366 		return atomic_read(&page->_mapcount) >= 0;
367 	page = compound_head(page);
368 	if (atomic_read(compound_mapcount_ptr(page)) >= 0)
369 		return true;
370 	if (PageHuge(page))
371 		return false;
372 	for (i = 0; i < hpage_nr_pages(page); i++) {
373 		if (atomic_read(&page[i]._mapcount) >= 0)
374 			return true;
375 	}
376 	return false;
377 }
378 EXPORT_SYMBOL(page_mapped);
379 
380 struct anon_vma *page_anon_vma(struct page *page)
381 {
382 	unsigned long mapping;
383 
384 	page = compound_head(page);
385 	mapping = (unsigned long)page->mapping;
386 	if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
387 		return NULL;
388 	return __page_rmapping(page);
389 }
390 
391 struct address_space *page_mapping(struct page *page)
392 {
393 	struct address_space *mapping;
394 
395 	page = compound_head(page);
396 
397 	/* This happens if someone calls flush_dcache_page on slab page */
398 	if (unlikely(PageSlab(page)))
399 		return NULL;
400 
401 	if (unlikely(PageSwapCache(page))) {
402 		swp_entry_t entry;
403 
404 		entry.val = page_private(page);
405 		return swap_address_space(entry);
406 	}
407 
408 	mapping = page->mapping;
409 	if ((unsigned long)mapping & PAGE_MAPPING_ANON)
410 		return NULL;
411 
412 	return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
413 }
414 EXPORT_SYMBOL(page_mapping);
415 
416 /* Slow path of page_mapcount() for compound pages */
417 int __page_mapcount(struct page *page)
418 {
419 	int ret;
420 
421 	ret = atomic_read(&page->_mapcount) + 1;
422 	/*
423 	 * For file THP page->_mapcount contains total number of mapping
424 	 * of the page: no need to look into compound_mapcount.
425 	 */
426 	if (!PageAnon(page) && !PageHuge(page))
427 		return ret;
428 	page = compound_head(page);
429 	ret += atomic_read(compound_mapcount_ptr(page)) + 1;
430 	if (PageDoubleMap(page))
431 		ret--;
432 	return ret;
433 }
434 EXPORT_SYMBOL_GPL(__page_mapcount);
435 
436 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
437 int sysctl_overcommit_ratio __read_mostly = 50;
438 unsigned long sysctl_overcommit_kbytes __read_mostly;
439 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
440 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
441 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
442 
443 int overcommit_ratio_handler(struct ctl_table *table, int write,
444 			     void __user *buffer, size_t *lenp,
445 			     loff_t *ppos)
446 {
447 	int ret;
448 
449 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
450 	if (ret == 0 && write)
451 		sysctl_overcommit_kbytes = 0;
452 	return ret;
453 }
454 
455 int overcommit_kbytes_handler(struct ctl_table *table, int write,
456 			     void __user *buffer, size_t *lenp,
457 			     loff_t *ppos)
458 {
459 	int ret;
460 
461 	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
462 	if (ret == 0 && write)
463 		sysctl_overcommit_ratio = 0;
464 	return ret;
465 }
466 
467 /*
468  * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
469  */
470 unsigned long vm_commit_limit(void)
471 {
472 	unsigned long allowed;
473 
474 	if (sysctl_overcommit_kbytes)
475 		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
476 	else
477 		allowed = ((totalram_pages - hugetlb_total_pages())
478 			   * sysctl_overcommit_ratio / 100);
479 	allowed += total_swap_pages;
480 
481 	return allowed;
482 }
483 
484 /*
485  * Make sure vm_committed_as in one cacheline and not cacheline shared with
486  * other variables. It can be updated by several CPUs frequently.
487  */
488 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
489 
490 /*
491  * The global memory commitment made in the system can be a metric
492  * that can be used to drive ballooning decisions when Linux is hosted
493  * as a guest. On Hyper-V, the host implements a policy engine for dynamically
494  * balancing memory across competing virtual machines that are hosted.
495  * Several metrics drive this policy engine including the guest reported
496  * memory commitment.
497  */
498 unsigned long vm_memory_committed(void)
499 {
500 	return percpu_counter_read_positive(&vm_committed_as);
501 }
502 EXPORT_SYMBOL_GPL(vm_memory_committed);
503 
504 /*
505  * Check that a process has enough memory to allocate a new virtual
506  * mapping. 0 means there is enough memory for the allocation to
507  * succeed and -ENOMEM implies there is not.
508  *
509  * We currently support three overcommit policies, which are set via the
510  * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
511  *
512  * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
513  * Additional code 2002 Jul 20 by Robert Love.
514  *
515  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
516  *
517  * Note this is a helper function intended to be used by LSMs which
518  * wish to use this logic.
519  */
520 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
521 {
522 	long free, allowed, reserve;
523 
524 	VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
525 			-(s64)vm_committed_as_batch * num_online_cpus(),
526 			"memory commitment underflow");
527 
528 	vm_acct_memory(pages);
529 
530 	/*
531 	 * Sometimes we want to use more memory than we have
532 	 */
533 	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
534 		return 0;
535 
536 	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
537 		free = global_page_state(NR_FREE_PAGES);
538 		free += global_node_page_state(NR_FILE_PAGES);
539 
540 		/*
541 		 * shmem pages shouldn't be counted as free in this
542 		 * case, they can't be purged, only swapped out, and
543 		 * that won't affect the overall amount of available
544 		 * memory in the system.
545 		 */
546 		free -= global_node_page_state(NR_SHMEM);
547 
548 		free += get_nr_swap_pages();
549 
550 		/*
551 		 * Any slabs which are created with the
552 		 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
553 		 * which are reclaimable, under pressure.  The dentry
554 		 * cache and most inode caches should fall into this
555 		 */
556 		free += global_page_state(NR_SLAB_RECLAIMABLE);
557 
558 		/*
559 		 * Leave reserved pages. The pages are not for anonymous pages.
560 		 */
561 		if (free <= totalreserve_pages)
562 			goto error;
563 		else
564 			free -= totalreserve_pages;
565 
566 		/*
567 		 * Reserve some for root
568 		 */
569 		if (!cap_sys_admin)
570 			free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
571 
572 		if (free > pages)
573 			return 0;
574 
575 		goto error;
576 	}
577 
578 	allowed = vm_commit_limit();
579 	/*
580 	 * Reserve some for root
581 	 */
582 	if (!cap_sys_admin)
583 		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
584 
585 	/*
586 	 * Don't let a single process grow so big a user can't recover
587 	 */
588 	if (mm) {
589 		reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
590 		allowed -= min_t(long, mm->total_vm / 32, reserve);
591 	}
592 
593 	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
594 		return 0;
595 error:
596 	vm_unacct_memory(pages);
597 
598 	return -ENOMEM;
599 }
600 
601 /**
602  * get_cmdline() - copy the cmdline value to a buffer.
603  * @task:     the task whose cmdline value to copy.
604  * @buffer:   the buffer to copy to.
605  * @buflen:   the length of the buffer. Larger cmdline values are truncated
606  *            to this length.
607  * Returns the size of the cmdline field copied. Note that the copy does
608  * not guarantee an ending NULL byte.
609  */
610 int get_cmdline(struct task_struct *task, char *buffer, int buflen)
611 {
612 	int res = 0;
613 	unsigned int len;
614 	struct mm_struct *mm = get_task_mm(task);
615 	unsigned long arg_start, arg_end, env_start, env_end;
616 	if (!mm)
617 		goto out;
618 	if (!mm->arg_end)
619 		goto out_mm;	/* Shh! No looking before we're done */
620 
621 	down_read(&mm->mmap_sem);
622 	arg_start = mm->arg_start;
623 	arg_end = mm->arg_end;
624 	env_start = mm->env_start;
625 	env_end = mm->env_end;
626 	up_read(&mm->mmap_sem);
627 
628 	len = arg_end - arg_start;
629 
630 	if (len > buflen)
631 		len = buflen;
632 
633 	res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
634 
635 	/*
636 	 * If the nul at the end of args has been overwritten, then
637 	 * assume application is using setproctitle(3).
638 	 */
639 	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
640 		len = strnlen(buffer, res);
641 		if (len < res) {
642 			res = len;
643 		} else {
644 			len = env_end - env_start;
645 			if (len > buflen - res)
646 				len = buflen - res;
647 			res += access_process_vm(task, env_start,
648 						 buffer+res, len,
649 						 FOLL_FORCE);
650 			res = strnlen(buffer, res);
651 		}
652 	}
653 out_mm:
654 	mmput(mm);
655 out:
656 	return res;
657 }
658