xref: /openbmc/linux/mm/util.c (revision e23feb16)
1 #include <linux/mm.h>
2 #include <linux/slab.h>
3 #include <linux/string.h>
4 #include <linux/export.h>
5 #include <linux/err.h>
6 #include <linux/sched.h>
7 #include <linux/security.h>
8 #include <linux/swap.h>
9 #include <linux/swapops.h>
10 #include <asm/uaccess.h>
11 
12 #include "internal.h"
13 
14 #define CREATE_TRACE_POINTS
15 #include <trace/events/kmem.h>
16 
17 /**
18  * kstrdup - allocate space for and copy an existing string
19  * @s: the string to duplicate
20  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
21  */
22 char *kstrdup(const char *s, gfp_t gfp)
23 {
24 	size_t len;
25 	char *buf;
26 
27 	if (!s)
28 		return NULL;
29 
30 	len = strlen(s) + 1;
31 	buf = kmalloc_track_caller(len, gfp);
32 	if (buf)
33 		memcpy(buf, s, len);
34 	return buf;
35 }
36 EXPORT_SYMBOL(kstrdup);
37 
38 /**
39  * kstrndup - allocate space for and copy an existing string
40  * @s: the string to duplicate
41  * @max: read at most @max chars from @s
42  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
43  */
44 char *kstrndup(const char *s, size_t max, gfp_t gfp)
45 {
46 	size_t len;
47 	char *buf;
48 
49 	if (!s)
50 		return NULL;
51 
52 	len = strnlen(s, max);
53 	buf = kmalloc_track_caller(len+1, gfp);
54 	if (buf) {
55 		memcpy(buf, s, len);
56 		buf[len] = '\0';
57 	}
58 	return buf;
59 }
60 EXPORT_SYMBOL(kstrndup);
61 
62 /**
63  * kmemdup - duplicate region of memory
64  *
65  * @src: memory region to duplicate
66  * @len: memory region length
67  * @gfp: GFP mask to use
68  */
69 void *kmemdup(const void *src, size_t len, gfp_t gfp)
70 {
71 	void *p;
72 
73 	p = kmalloc_track_caller(len, gfp);
74 	if (p)
75 		memcpy(p, src, len);
76 	return p;
77 }
78 EXPORT_SYMBOL(kmemdup);
79 
80 /**
81  * memdup_user - duplicate memory region from user space
82  *
83  * @src: source address in user space
84  * @len: number of bytes to copy
85  *
86  * Returns an ERR_PTR() on failure.
87  */
88 void *memdup_user(const void __user *src, size_t len)
89 {
90 	void *p;
91 
92 	/*
93 	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
94 	 * cause pagefault, which makes it pointless to use GFP_NOFS
95 	 * or GFP_ATOMIC.
96 	 */
97 	p = kmalloc_track_caller(len, GFP_KERNEL);
98 	if (!p)
99 		return ERR_PTR(-ENOMEM);
100 
101 	if (copy_from_user(p, src, len)) {
102 		kfree(p);
103 		return ERR_PTR(-EFAULT);
104 	}
105 
106 	return p;
107 }
108 EXPORT_SYMBOL(memdup_user);
109 
110 static __always_inline void *__do_krealloc(const void *p, size_t new_size,
111 					   gfp_t flags)
112 {
113 	void *ret;
114 	size_t ks = 0;
115 
116 	if (p)
117 		ks = ksize(p);
118 
119 	if (ks >= new_size)
120 		return (void *)p;
121 
122 	ret = kmalloc_track_caller(new_size, flags);
123 	if (ret && p)
124 		memcpy(ret, p, ks);
125 
126 	return ret;
127 }
128 
129 /**
130  * __krealloc - like krealloc() but don't free @p.
131  * @p: object to reallocate memory for.
132  * @new_size: how many bytes of memory are required.
133  * @flags: the type of memory to allocate.
134  *
135  * This function is like krealloc() except it never frees the originally
136  * allocated buffer. Use this if you don't want to free the buffer immediately
137  * like, for example, with RCU.
138  */
139 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
140 {
141 	if (unlikely(!new_size))
142 		return ZERO_SIZE_PTR;
143 
144 	return __do_krealloc(p, new_size, flags);
145 
146 }
147 EXPORT_SYMBOL(__krealloc);
148 
149 /**
150  * krealloc - reallocate memory. The contents will remain unchanged.
151  * @p: object to reallocate memory for.
152  * @new_size: how many bytes of memory are required.
153  * @flags: the type of memory to allocate.
154  *
155  * The contents of the object pointed to are preserved up to the
156  * lesser of the new and old sizes.  If @p is %NULL, krealloc()
157  * behaves exactly like kmalloc().  If @new_size is 0 and @p is not a
158  * %NULL pointer, the object pointed to is freed.
159  */
160 void *krealloc(const void *p, size_t new_size, gfp_t flags)
161 {
162 	void *ret;
163 
164 	if (unlikely(!new_size)) {
165 		kfree(p);
166 		return ZERO_SIZE_PTR;
167 	}
168 
169 	ret = __do_krealloc(p, new_size, flags);
170 	if (ret && p != ret)
171 		kfree(p);
172 
173 	return ret;
174 }
175 EXPORT_SYMBOL(krealloc);
176 
177 /**
178  * kzfree - like kfree but zero memory
179  * @p: object to free memory of
180  *
181  * The memory of the object @p points to is zeroed before freed.
182  * If @p is %NULL, kzfree() does nothing.
183  *
184  * Note: this function zeroes the whole allocated buffer which can be a good
185  * deal bigger than the requested buffer size passed to kmalloc(). So be
186  * careful when using this function in performance sensitive code.
187  */
188 void kzfree(const void *p)
189 {
190 	size_t ks;
191 	void *mem = (void *)p;
192 
193 	if (unlikely(ZERO_OR_NULL_PTR(mem)))
194 		return;
195 	ks = ksize(mem);
196 	memset(mem, 0, ks);
197 	kfree(mem);
198 }
199 EXPORT_SYMBOL(kzfree);
200 
201 /*
202  * strndup_user - duplicate an existing string from user space
203  * @s: The string to duplicate
204  * @n: Maximum number of bytes to copy, including the trailing NUL.
205  */
206 char *strndup_user(const char __user *s, long n)
207 {
208 	char *p;
209 	long length;
210 
211 	length = strnlen_user(s, n);
212 
213 	if (!length)
214 		return ERR_PTR(-EFAULT);
215 
216 	if (length > n)
217 		return ERR_PTR(-EINVAL);
218 
219 	p = memdup_user(s, length);
220 
221 	if (IS_ERR(p))
222 		return p;
223 
224 	p[length - 1] = '\0';
225 
226 	return p;
227 }
228 EXPORT_SYMBOL(strndup_user);
229 
230 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
231 		struct vm_area_struct *prev, struct rb_node *rb_parent)
232 {
233 	struct vm_area_struct *next;
234 
235 	vma->vm_prev = prev;
236 	if (prev) {
237 		next = prev->vm_next;
238 		prev->vm_next = vma;
239 	} else {
240 		mm->mmap = vma;
241 		if (rb_parent)
242 			next = rb_entry(rb_parent,
243 					struct vm_area_struct, vm_rb);
244 		else
245 			next = NULL;
246 	}
247 	vma->vm_next = next;
248 	if (next)
249 		next->vm_prev = vma;
250 }
251 
252 /* Check if the vma is being used as a stack by this task */
253 static int vm_is_stack_for_task(struct task_struct *t,
254 				struct vm_area_struct *vma)
255 {
256 	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
257 }
258 
259 /*
260  * Check if the vma is being used as a stack.
261  * If is_group is non-zero, check in the entire thread group or else
262  * just check in the current task. Returns the pid of the task that
263  * the vma is stack for.
264  */
265 pid_t vm_is_stack(struct task_struct *task,
266 		  struct vm_area_struct *vma, int in_group)
267 {
268 	pid_t ret = 0;
269 
270 	if (vm_is_stack_for_task(task, vma))
271 		return task->pid;
272 
273 	if (in_group) {
274 		struct task_struct *t;
275 		rcu_read_lock();
276 		if (!pid_alive(task))
277 			goto done;
278 
279 		t = task;
280 		do {
281 			if (vm_is_stack_for_task(t, vma)) {
282 				ret = t->pid;
283 				goto done;
284 			}
285 		} while_each_thread(task, t);
286 done:
287 		rcu_read_unlock();
288 	}
289 
290 	return ret;
291 }
292 
293 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
294 void arch_pick_mmap_layout(struct mm_struct *mm)
295 {
296 	mm->mmap_base = TASK_UNMAPPED_BASE;
297 	mm->get_unmapped_area = arch_get_unmapped_area;
298 }
299 #endif
300 
301 /*
302  * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
303  * back to the regular GUP.
304  * If the architecture not support this function, simply return with no
305  * page pinned
306  */
307 int __attribute__((weak)) __get_user_pages_fast(unsigned long start,
308 				 int nr_pages, int write, struct page **pages)
309 {
310 	return 0;
311 }
312 EXPORT_SYMBOL_GPL(__get_user_pages_fast);
313 
314 /**
315  * get_user_pages_fast() - pin user pages in memory
316  * @start:	starting user address
317  * @nr_pages:	number of pages from start to pin
318  * @write:	whether pages will be written to
319  * @pages:	array that receives pointers to the pages pinned.
320  *		Should be at least nr_pages long.
321  *
322  * Returns number of pages pinned. This may be fewer than the number
323  * requested. If nr_pages is 0 or negative, returns 0. If no pages
324  * were pinned, returns -errno.
325  *
326  * get_user_pages_fast provides equivalent functionality to get_user_pages,
327  * operating on current and current->mm, with force=0 and vma=NULL. However
328  * unlike get_user_pages, it must be called without mmap_sem held.
329  *
330  * get_user_pages_fast may take mmap_sem and page table locks, so no
331  * assumptions can be made about lack of locking. get_user_pages_fast is to be
332  * implemented in a way that is advantageous (vs get_user_pages()) when the
333  * user memory area is already faulted in and present in ptes. However if the
334  * pages have to be faulted in, it may turn out to be slightly slower so
335  * callers need to carefully consider what to use. On many architectures,
336  * get_user_pages_fast simply falls back to get_user_pages.
337  */
338 int __attribute__((weak)) get_user_pages_fast(unsigned long start,
339 				int nr_pages, int write, struct page **pages)
340 {
341 	struct mm_struct *mm = current->mm;
342 	int ret;
343 
344 	down_read(&mm->mmap_sem);
345 	ret = get_user_pages(current, mm, start, nr_pages,
346 					write, 0, pages, NULL);
347 	up_read(&mm->mmap_sem);
348 
349 	return ret;
350 }
351 EXPORT_SYMBOL_GPL(get_user_pages_fast);
352 
353 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
354 	unsigned long len, unsigned long prot,
355 	unsigned long flag, unsigned long pgoff)
356 {
357 	unsigned long ret;
358 	struct mm_struct *mm = current->mm;
359 	unsigned long populate;
360 
361 	ret = security_mmap_file(file, prot, flag);
362 	if (!ret) {
363 		down_write(&mm->mmap_sem);
364 		ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
365 				    &populate);
366 		up_write(&mm->mmap_sem);
367 		if (populate)
368 			mm_populate(ret, populate);
369 	}
370 	return ret;
371 }
372 
373 unsigned long vm_mmap(struct file *file, unsigned long addr,
374 	unsigned long len, unsigned long prot,
375 	unsigned long flag, unsigned long offset)
376 {
377 	if (unlikely(offset + PAGE_ALIGN(len) < offset))
378 		return -EINVAL;
379 	if (unlikely(offset & ~PAGE_MASK))
380 		return -EINVAL;
381 
382 	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
383 }
384 EXPORT_SYMBOL(vm_mmap);
385 
386 struct address_space *page_mapping(struct page *page)
387 {
388 	struct address_space *mapping = page->mapping;
389 
390 	VM_BUG_ON(PageSlab(page));
391 	if (unlikely(PageSwapCache(page))) {
392 		swp_entry_t entry;
393 
394 		entry.val = page_private(page);
395 		mapping = swap_address_space(entry);
396 	} else if ((unsigned long)mapping & PAGE_MAPPING_ANON)
397 		mapping = NULL;
398 	return mapping;
399 }
400 
401 /* Tracepoints definitions. */
402 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
403 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
404 EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
405 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
406 EXPORT_TRACEPOINT_SYMBOL(kfree);
407 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
408