xref: /openbmc/linux/mm/util.c (revision d670b479)
1 #include <linux/mm.h>
2 #include <linux/slab.h>
3 #include <linux/string.h>
4 #include <linux/export.h>
5 #include <linux/err.h>
6 #include <linux/sched.h>
7 #include <linux/security.h>
8 #include <asm/uaccess.h>
9 
10 #include "internal.h"
11 
12 #define CREATE_TRACE_POINTS
13 #include <trace/events/kmem.h>
14 
15 /**
16  * kstrdup - allocate space for and copy an existing string
17  * @s: the string to duplicate
18  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
19  */
20 char *kstrdup(const char *s, gfp_t gfp)
21 {
22 	size_t len;
23 	char *buf;
24 
25 	if (!s)
26 		return NULL;
27 
28 	len = strlen(s) + 1;
29 	buf = kmalloc_track_caller(len, gfp);
30 	if (buf)
31 		memcpy(buf, s, len);
32 	return buf;
33 }
34 EXPORT_SYMBOL(kstrdup);
35 
36 /**
37  * kstrndup - allocate space for and copy an existing string
38  * @s: the string to duplicate
39  * @max: read at most @max chars from @s
40  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
41  */
42 char *kstrndup(const char *s, size_t max, gfp_t gfp)
43 {
44 	size_t len;
45 	char *buf;
46 
47 	if (!s)
48 		return NULL;
49 
50 	len = strnlen(s, max);
51 	buf = kmalloc_track_caller(len+1, gfp);
52 	if (buf) {
53 		memcpy(buf, s, len);
54 		buf[len] = '\0';
55 	}
56 	return buf;
57 }
58 EXPORT_SYMBOL(kstrndup);
59 
60 /**
61  * kmemdup - duplicate region of memory
62  *
63  * @src: memory region to duplicate
64  * @len: memory region length
65  * @gfp: GFP mask to use
66  */
67 void *kmemdup(const void *src, size_t len, gfp_t gfp)
68 {
69 	void *p;
70 
71 	p = kmalloc_track_caller(len, gfp);
72 	if (p)
73 		memcpy(p, src, len);
74 	return p;
75 }
76 EXPORT_SYMBOL(kmemdup);
77 
78 /**
79  * memdup_user - duplicate memory region from user space
80  *
81  * @src: source address in user space
82  * @len: number of bytes to copy
83  *
84  * Returns an ERR_PTR() on failure.
85  */
86 void *memdup_user(const void __user *src, size_t len)
87 {
88 	void *p;
89 
90 	/*
91 	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
92 	 * cause pagefault, which makes it pointless to use GFP_NOFS
93 	 * or GFP_ATOMIC.
94 	 */
95 	p = kmalloc_track_caller(len, GFP_KERNEL);
96 	if (!p)
97 		return ERR_PTR(-ENOMEM);
98 
99 	if (copy_from_user(p, src, len)) {
100 		kfree(p);
101 		return ERR_PTR(-EFAULT);
102 	}
103 
104 	return p;
105 }
106 EXPORT_SYMBOL(memdup_user);
107 
108 /**
109  * __krealloc - like krealloc() but don't free @p.
110  * @p: object to reallocate memory for.
111  * @new_size: how many bytes of memory are required.
112  * @flags: the type of memory to allocate.
113  *
114  * This function is like krealloc() except it never frees the originally
115  * allocated buffer. Use this if you don't want to free the buffer immediately
116  * like, for example, with RCU.
117  */
118 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
119 {
120 	void *ret;
121 	size_t ks = 0;
122 
123 	if (unlikely(!new_size))
124 		return ZERO_SIZE_PTR;
125 
126 	if (p)
127 		ks = ksize(p);
128 
129 	if (ks >= new_size)
130 		return (void *)p;
131 
132 	ret = kmalloc_track_caller(new_size, flags);
133 	if (ret && p)
134 		memcpy(ret, p, ks);
135 
136 	return ret;
137 }
138 EXPORT_SYMBOL(__krealloc);
139 
140 /**
141  * krealloc - reallocate memory. The contents will remain unchanged.
142  * @p: object to reallocate memory for.
143  * @new_size: how many bytes of memory are required.
144  * @flags: the type of memory to allocate.
145  *
146  * The contents of the object pointed to are preserved up to the
147  * lesser of the new and old sizes.  If @p is %NULL, krealloc()
148  * behaves exactly like kmalloc().  If @size is 0 and @p is not a
149  * %NULL pointer, the object pointed to is freed.
150  */
151 void *krealloc(const void *p, size_t new_size, gfp_t flags)
152 {
153 	void *ret;
154 
155 	if (unlikely(!new_size)) {
156 		kfree(p);
157 		return ZERO_SIZE_PTR;
158 	}
159 
160 	ret = __krealloc(p, new_size, flags);
161 	if (ret && p != ret)
162 		kfree(p);
163 
164 	return ret;
165 }
166 EXPORT_SYMBOL(krealloc);
167 
168 /**
169  * kzfree - like kfree but zero memory
170  * @p: object to free memory of
171  *
172  * The memory of the object @p points to is zeroed before freed.
173  * If @p is %NULL, kzfree() does nothing.
174  *
175  * Note: this function zeroes the whole allocated buffer which can be a good
176  * deal bigger than the requested buffer size passed to kmalloc(). So be
177  * careful when using this function in performance sensitive code.
178  */
179 void kzfree(const void *p)
180 {
181 	size_t ks;
182 	void *mem = (void *)p;
183 
184 	if (unlikely(ZERO_OR_NULL_PTR(mem)))
185 		return;
186 	ks = ksize(mem);
187 	memset(mem, 0, ks);
188 	kfree(mem);
189 }
190 EXPORT_SYMBOL(kzfree);
191 
192 /*
193  * strndup_user - duplicate an existing string from user space
194  * @s: The string to duplicate
195  * @n: Maximum number of bytes to copy, including the trailing NUL.
196  */
197 char *strndup_user(const char __user *s, long n)
198 {
199 	char *p;
200 	long length;
201 
202 	length = strnlen_user(s, n);
203 
204 	if (!length)
205 		return ERR_PTR(-EFAULT);
206 
207 	if (length > n)
208 		return ERR_PTR(-EINVAL);
209 
210 	p = memdup_user(s, length);
211 
212 	if (IS_ERR(p))
213 		return p;
214 
215 	p[length - 1] = '\0';
216 
217 	return p;
218 }
219 EXPORT_SYMBOL(strndup_user);
220 
221 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
222 		struct vm_area_struct *prev, struct rb_node *rb_parent)
223 {
224 	struct vm_area_struct *next;
225 
226 	vma->vm_prev = prev;
227 	if (prev) {
228 		next = prev->vm_next;
229 		prev->vm_next = vma;
230 	} else {
231 		mm->mmap = vma;
232 		if (rb_parent)
233 			next = rb_entry(rb_parent,
234 					struct vm_area_struct, vm_rb);
235 		else
236 			next = NULL;
237 	}
238 	vma->vm_next = next;
239 	if (next)
240 		next->vm_prev = vma;
241 }
242 
243 /* Check if the vma is being used as a stack by this task */
244 static int vm_is_stack_for_task(struct task_struct *t,
245 				struct vm_area_struct *vma)
246 {
247 	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
248 }
249 
250 /*
251  * Check if the vma is being used as a stack.
252  * If is_group is non-zero, check in the entire thread group or else
253  * just check in the current task. Returns the pid of the task that
254  * the vma is stack for.
255  */
256 pid_t vm_is_stack(struct task_struct *task,
257 		  struct vm_area_struct *vma, int in_group)
258 {
259 	pid_t ret = 0;
260 
261 	if (vm_is_stack_for_task(task, vma))
262 		return task->pid;
263 
264 	if (in_group) {
265 		struct task_struct *t;
266 		rcu_read_lock();
267 		if (!pid_alive(task))
268 			goto done;
269 
270 		t = task;
271 		do {
272 			if (vm_is_stack_for_task(t, vma)) {
273 				ret = t->pid;
274 				goto done;
275 			}
276 		} while_each_thread(task, t);
277 done:
278 		rcu_read_unlock();
279 	}
280 
281 	return ret;
282 }
283 
284 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
285 void arch_pick_mmap_layout(struct mm_struct *mm)
286 {
287 	mm->mmap_base = TASK_UNMAPPED_BASE;
288 	mm->get_unmapped_area = arch_get_unmapped_area;
289 	mm->unmap_area = arch_unmap_area;
290 }
291 #endif
292 
293 /*
294  * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
295  * back to the regular GUP.
296  * If the architecture not support this function, simply return with no
297  * page pinned
298  */
299 int __attribute__((weak)) __get_user_pages_fast(unsigned long start,
300 				 int nr_pages, int write, struct page **pages)
301 {
302 	return 0;
303 }
304 EXPORT_SYMBOL_GPL(__get_user_pages_fast);
305 
306 /**
307  * get_user_pages_fast() - pin user pages in memory
308  * @start:	starting user address
309  * @nr_pages:	number of pages from start to pin
310  * @write:	whether pages will be written to
311  * @pages:	array that receives pointers to the pages pinned.
312  *		Should be at least nr_pages long.
313  *
314  * Returns number of pages pinned. This may be fewer than the number
315  * requested. If nr_pages is 0 or negative, returns 0. If no pages
316  * were pinned, returns -errno.
317  *
318  * get_user_pages_fast provides equivalent functionality to get_user_pages,
319  * operating on current and current->mm, with force=0 and vma=NULL. However
320  * unlike get_user_pages, it must be called without mmap_sem held.
321  *
322  * get_user_pages_fast may take mmap_sem and page table locks, so no
323  * assumptions can be made about lack of locking. get_user_pages_fast is to be
324  * implemented in a way that is advantageous (vs get_user_pages()) when the
325  * user memory area is already faulted in and present in ptes. However if the
326  * pages have to be faulted in, it may turn out to be slightly slower so
327  * callers need to carefully consider what to use. On many architectures,
328  * get_user_pages_fast simply falls back to get_user_pages.
329  */
330 int __attribute__((weak)) get_user_pages_fast(unsigned long start,
331 				int nr_pages, int write, struct page **pages)
332 {
333 	struct mm_struct *mm = current->mm;
334 	int ret;
335 
336 	down_read(&mm->mmap_sem);
337 	ret = get_user_pages(current, mm, start, nr_pages,
338 					write, 0, pages, NULL);
339 	up_read(&mm->mmap_sem);
340 
341 	return ret;
342 }
343 EXPORT_SYMBOL_GPL(get_user_pages_fast);
344 
345 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
346 	unsigned long len, unsigned long prot,
347 	unsigned long flag, unsigned long pgoff)
348 {
349 	unsigned long ret;
350 	struct mm_struct *mm = current->mm;
351 
352 	ret = security_mmap_file(file, prot, flag);
353 	if (!ret) {
354 		down_write(&mm->mmap_sem);
355 		ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff);
356 		up_write(&mm->mmap_sem);
357 	}
358 	return ret;
359 }
360 
361 unsigned long vm_mmap(struct file *file, unsigned long addr,
362 	unsigned long len, unsigned long prot,
363 	unsigned long flag, unsigned long offset)
364 {
365 	if (unlikely(offset + PAGE_ALIGN(len) < offset))
366 		return -EINVAL;
367 	if (unlikely(offset & ~PAGE_MASK))
368 		return -EINVAL;
369 
370 	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
371 }
372 EXPORT_SYMBOL(vm_mmap);
373 
374 /* Tracepoints definitions. */
375 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
376 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
377 EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
378 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
379 EXPORT_TRACEPOINT_SYMBOL(kfree);
380 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
381