xref: /openbmc/linux/mm/madvise.c (revision 5bd8e16d)
1 /*
2  *	linux/mm/madvise.c
3  *
4  * Copyright (C) 1999  Linus Torvalds
5  * Copyright (C) 2002  Christoph Hellwig
6  */
7 
8 #include <linux/mman.h>
9 #include <linux/pagemap.h>
10 #include <linux/syscalls.h>
11 #include <linux/mempolicy.h>
12 #include <linux/page-isolation.h>
13 #include <linux/hugetlb.h>
14 #include <linux/falloc.h>
15 #include <linux/sched.h>
16 #include <linux/ksm.h>
17 #include <linux/fs.h>
18 #include <linux/file.h>
19 #include <linux/blkdev.h>
20 #include <linux/swap.h>
21 #include <linux/swapops.h>
22 
23 /*
24  * Any behaviour which results in changes to the vma->vm_flags needs to
25  * take mmap_sem for writing. Others, which simply traverse vmas, need
26  * to only take it for reading.
27  */
28 static int madvise_need_mmap_write(int behavior)
29 {
30 	switch (behavior) {
31 	case MADV_REMOVE:
32 	case MADV_WILLNEED:
33 	case MADV_DONTNEED:
34 		return 0;
35 	default:
36 		/* be safe, default to 1. list exceptions explicitly */
37 		return 1;
38 	}
39 }
40 
41 /*
42  * We can potentially split a vm area into separate
43  * areas, each area with its own behavior.
44  */
45 static long madvise_behavior(struct vm_area_struct *vma,
46 		     struct vm_area_struct **prev,
47 		     unsigned long start, unsigned long end, int behavior)
48 {
49 	struct mm_struct *mm = vma->vm_mm;
50 	int error = 0;
51 	pgoff_t pgoff;
52 	unsigned long new_flags = vma->vm_flags;
53 
54 	switch (behavior) {
55 	case MADV_NORMAL:
56 		new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
57 		break;
58 	case MADV_SEQUENTIAL:
59 		new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
60 		break;
61 	case MADV_RANDOM:
62 		new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
63 		break;
64 	case MADV_DONTFORK:
65 		new_flags |= VM_DONTCOPY;
66 		break;
67 	case MADV_DOFORK:
68 		if (vma->vm_flags & VM_IO) {
69 			error = -EINVAL;
70 			goto out;
71 		}
72 		new_flags &= ~VM_DONTCOPY;
73 		break;
74 	case MADV_DONTDUMP:
75 		new_flags |= VM_DONTDUMP;
76 		break;
77 	case MADV_DODUMP:
78 		if (new_flags & VM_SPECIAL) {
79 			error = -EINVAL;
80 			goto out;
81 		}
82 		new_flags &= ~VM_DONTDUMP;
83 		break;
84 	case MADV_MERGEABLE:
85 	case MADV_UNMERGEABLE:
86 		error = ksm_madvise(vma, start, end, behavior, &new_flags);
87 		if (error)
88 			goto out;
89 		break;
90 	case MADV_HUGEPAGE:
91 	case MADV_NOHUGEPAGE:
92 		error = hugepage_madvise(vma, &new_flags, behavior);
93 		if (error)
94 			goto out;
95 		break;
96 	}
97 
98 	if (new_flags == vma->vm_flags) {
99 		*prev = vma;
100 		goto out;
101 	}
102 
103 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
104 	*prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
105 				vma->vm_file, pgoff, vma_policy(vma));
106 	if (*prev) {
107 		vma = *prev;
108 		goto success;
109 	}
110 
111 	*prev = vma;
112 
113 	if (start != vma->vm_start) {
114 		error = split_vma(mm, vma, start, 1);
115 		if (error)
116 			goto out;
117 	}
118 
119 	if (end != vma->vm_end) {
120 		error = split_vma(mm, vma, end, 0);
121 		if (error)
122 			goto out;
123 	}
124 
125 success:
126 	/*
127 	 * vm_flags is protected by the mmap_sem held in write mode.
128 	 */
129 	vma->vm_flags = new_flags;
130 
131 out:
132 	if (error == -ENOMEM)
133 		error = -EAGAIN;
134 	return error;
135 }
136 
137 #ifdef CONFIG_SWAP
138 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
139 	unsigned long end, struct mm_walk *walk)
140 {
141 	pte_t *orig_pte;
142 	struct vm_area_struct *vma = walk->private;
143 	unsigned long index;
144 
145 	if (pmd_none_or_trans_huge_or_clear_bad(pmd))
146 		return 0;
147 
148 	for (index = start; index != end; index += PAGE_SIZE) {
149 		pte_t pte;
150 		swp_entry_t entry;
151 		struct page *page;
152 		spinlock_t *ptl;
153 
154 		orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
155 		pte = *(orig_pte + ((index - start) / PAGE_SIZE));
156 		pte_unmap_unlock(orig_pte, ptl);
157 
158 		if (pte_present(pte) || pte_none(pte) || pte_file(pte))
159 			continue;
160 		entry = pte_to_swp_entry(pte);
161 		if (unlikely(non_swap_entry(entry)))
162 			continue;
163 
164 		page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
165 								vma, index);
166 		if (page)
167 			page_cache_release(page);
168 	}
169 
170 	return 0;
171 }
172 
173 static void force_swapin_readahead(struct vm_area_struct *vma,
174 		unsigned long start, unsigned long end)
175 {
176 	struct mm_walk walk = {
177 		.mm = vma->vm_mm,
178 		.pmd_entry = swapin_walk_pmd_entry,
179 		.private = vma,
180 	};
181 
182 	walk_page_range(start, end, &walk);
183 
184 	lru_add_drain();	/* Push any new pages onto the LRU now */
185 }
186 
187 static void force_shm_swapin_readahead(struct vm_area_struct *vma,
188 		unsigned long start, unsigned long end,
189 		struct address_space *mapping)
190 {
191 	pgoff_t index;
192 	struct page *page;
193 	swp_entry_t swap;
194 
195 	for (; start < end; start += PAGE_SIZE) {
196 		index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
197 
198 		page = find_get_page(mapping, index);
199 		if (!radix_tree_exceptional_entry(page)) {
200 			if (page)
201 				page_cache_release(page);
202 			continue;
203 		}
204 		swap = radix_to_swp_entry(page);
205 		page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
206 								NULL, 0);
207 		if (page)
208 			page_cache_release(page);
209 	}
210 
211 	lru_add_drain();	/* Push any new pages onto the LRU now */
212 }
213 #endif		/* CONFIG_SWAP */
214 
215 /*
216  * Schedule all required I/O operations.  Do not wait for completion.
217  */
218 static long madvise_willneed(struct vm_area_struct *vma,
219 			     struct vm_area_struct **prev,
220 			     unsigned long start, unsigned long end)
221 {
222 	struct file *file = vma->vm_file;
223 
224 #ifdef CONFIG_SWAP
225 	if (!file || mapping_cap_swap_backed(file->f_mapping)) {
226 		*prev = vma;
227 		if (!file)
228 			force_swapin_readahead(vma, start, end);
229 		else
230 			force_shm_swapin_readahead(vma, start, end,
231 						file->f_mapping);
232 		return 0;
233 	}
234 #endif
235 
236 	if (!file)
237 		return -EBADF;
238 
239 	if (file->f_mapping->a_ops->get_xip_mem) {
240 		/* no bad return value, but ignore advice */
241 		return 0;
242 	}
243 
244 	*prev = vma;
245 	start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
246 	if (end > vma->vm_end)
247 		end = vma->vm_end;
248 	end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
249 
250 	force_page_cache_readahead(file->f_mapping, file, start, end - start);
251 	return 0;
252 }
253 
254 /*
255  * Application no longer needs these pages.  If the pages are dirty,
256  * it's OK to just throw them away.  The app will be more careful about
257  * data it wants to keep.  Be sure to free swap resources too.  The
258  * zap_page_range call sets things up for shrink_active_list to actually free
259  * these pages later if no one else has touched them in the meantime,
260  * although we could add these pages to a global reuse list for
261  * shrink_active_list to pick up before reclaiming other pages.
262  *
263  * NB: This interface discards data rather than pushes it out to swap,
264  * as some implementations do.  This has performance implications for
265  * applications like large transactional databases which want to discard
266  * pages in anonymous maps after committing to backing store the data
267  * that was kept in them.  There is no reason to write this data out to
268  * the swap area if the application is discarding it.
269  *
270  * An interface that causes the system to free clean pages and flush
271  * dirty pages is already available as msync(MS_INVALIDATE).
272  */
273 static long madvise_dontneed(struct vm_area_struct *vma,
274 			     struct vm_area_struct **prev,
275 			     unsigned long start, unsigned long end)
276 {
277 	*prev = vma;
278 	if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
279 		return -EINVAL;
280 
281 	if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
282 		struct zap_details details = {
283 			.nonlinear_vma = vma,
284 			.last_index = ULONG_MAX,
285 		};
286 		zap_page_range(vma, start, end - start, &details);
287 	} else
288 		zap_page_range(vma, start, end - start, NULL);
289 	return 0;
290 }
291 
292 /*
293  * Application wants to free up the pages and associated backing store.
294  * This is effectively punching a hole into the middle of a file.
295  *
296  * NOTE: Currently, only shmfs/tmpfs is supported for this operation.
297  * Other filesystems return -ENOSYS.
298  */
299 static long madvise_remove(struct vm_area_struct *vma,
300 				struct vm_area_struct **prev,
301 				unsigned long start, unsigned long end)
302 {
303 	loff_t offset;
304 	int error;
305 	struct file *f;
306 
307 	*prev = NULL;	/* tell sys_madvise we drop mmap_sem */
308 
309 	if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
310 		return -EINVAL;
311 
312 	f = vma->vm_file;
313 
314 	if (!f || !f->f_mapping || !f->f_mapping->host) {
315 			return -EINVAL;
316 	}
317 
318 	if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
319 		return -EACCES;
320 
321 	offset = (loff_t)(start - vma->vm_start)
322 			+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
323 
324 	/*
325 	 * Filesystem's fallocate may need to take i_mutex.  We need to
326 	 * explicitly grab a reference because the vma (and hence the
327 	 * vma's reference to the file) can go away as soon as we drop
328 	 * mmap_sem.
329 	 */
330 	get_file(f);
331 	up_read(&current->mm->mmap_sem);
332 	error = do_fallocate(f,
333 				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
334 				offset, end - start);
335 	fput(f);
336 	down_read(&current->mm->mmap_sem);
337 	return error;
338 }
339 
340 #ifdef CONFIG_MEMORY_FAILURE
341 /*
342  * Error injection support for memory error handling.
343  */
344 static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
345 {
346 	if (!capable(CAP_SYS_ADMIN))
347 		return -EPERM;
348 	for (; start < end; start += PAGE_SIZE) {
349 		struct page *p;
350 		int ret;
351 
352 		ret = get_user_pages_fast(start, 1, 0, &p);
353 		if (ret != 1)
354 			return ret;
355 
356 		if (PageHWPoison(p)) {
357 			put_page(p);
358 			continue;
359 		}
360 		if (bhv == MADV_SOFT_OFFLINE) {
361 			pr_info("Soft offlining page %#lx at %#lx\n",
362 				page_to_pfn(p), start);
363 			ret = soft_offline_page(p, MF_COUNT_INCREASED);
364 			if (ret)
365 				return ret;
366 			continue;
367 		}
368 		pr_info("Injecting memory failure for page %#lx at %#lx\n",
369 		       page_to_pfn(p), start);
370 		/* Ignore return value for now */
371 		memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED);
372 	}
373 	return 0;
374 }
375 #endif
376 
377 static long
378 madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
379 		unsigned long start, unsigned long end, int behavior)
380 {
381 	switch (behavior) {
382 	case MADV_REMOVE:
383 		return madvise_remove(vma, prev, start, end);
384 	case MADV_WILLNEED:
385 		return madvise_willneed(vma, prev, start, end);
386 	case MADV_DONTNEED:
387 		return madvise_dontneed(vma, prev, start, end);
388 	default:
389 		return madvise_behavior(vma, prev, start, end, behavior);
390 	}
391 }
392 
393 static int
394 madvise_behavior_valid(int behavior)
395 {
396 	switch (behavior) {
397 	case MADV_DOFORK:
398 	case MADV_DONTFORK:
399 	case MADV_NORMAL:
400 	case MADV_SEQUENTIAL:
401 	case MADV_RANDOM:
402 	case MADV_REMOVE:
403 	case MADV_WILLNEED:
404 	case MADV_DONTNEED:
405 #ifdef CONFIG_KSM
406 	case MADV_MERGEABLE:
407 	case MADV_UNMERGEABLE:
408 #endif
409 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
410 	case MADV_HUGEPAGE:
411 	case MADV_NOHUGEPAGE:
412 #endif
413 	case MADV_DONTDUMP:
414 	case MADV_DODUMP:
415 		return 1;
416 
417 	default:
418 		return 0;
419 	}
420 }
421 
422 /*
423  * The madvise(2) system call.
424  *
425  * Applications can use madvise() to advise the kernel how it should
426  * handle paging I/O in this VM area.  The idea is to help the kernel
427  * use appropriate read-ahead and caching techniques.  The information
428  * provided is advisory only, and can be safely disregarded by the
429  * kernel without affecting the correct operation of the application.
430  *
431  * behavior values:
432  *  MADV_NORMAL - the default behavior is to read clusters.  This
433  *		results in some read-ahead and read-behind.
434  *  MADV_RANDOM - the system should read the minimum amount of data
435  *		on any access, since it is unlikely that the appli-
436  *		cation will need more than what it asks for.
437  *  MADV_SEQUENTIAL - pages in the given range will probably be accessed
438  *		once, so they can be aggressively read ahead, and
439  *		can be freed soon after they are accessed.
440  *  MADV_WILLNEED - the application is notifying the system to read
441  *		some pages ahead.
442  *  MADV_DONTNEED - the application is finished with the given range,
443  *		so the kernel can free resources associated with it.
444  *  MADV_REMOVE - the application wants to free up the given range of
445  *		pages and associated backing store.
446  *  MADV_DONTFORK - omit this area from child's address space when forking:
447  *		typically, to avoid COWing pages pinned by get_user_pages().
448  *  MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
449  *  MADV_MERGEABLE - the application recommends that KSM try to merge pages in
450  *		this area with pages of identical content from other such areas.
451  *  MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
452  *
453  * return values:
454  *  zero    - success
455  *  -EINVAL - start + len < 0, start is not page-aligned,
456  *		"behavior" is not a valid value, or application
457  *		is attempting to release locked or shared pages.
458  *  -ENOMEM - addresses in the specified range are not currently
459  *		mapped, or are outside the AS of the process.
460  *  -EIO    - an I/O error occurred while paging in data.
461  *  -EBADF  - map exists, but area maps something that isn't a file.
462  *  -EAGAIN - a kernel resource was temporarily unavailable.
463  */
464 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
465 {
466 	unsigned long end, tmp;
467 	struct vm_area_struct *vma, *prev;
468 	int unmapped_error = 0;
469 	int error = -EINVAL;
470 	int write;
471 	size_t len;
472 	struct blk_plug plug;
473 
474 #ifdef CONFIG_MEMORY_FAILURE
475 	if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
476 		return madvise_hwpoison(behavior, start, start+len_in);
477 #endif
478 	if (!madvise_behavior_valid(behavior))
479 		return error;
480 
481 	if (start & ~PAGE_MASK)
482 		return error;
483 	len = (len_in + ~PAGE_MASK) & PAGE_MASK;
484 
485 	/* Check to see whether len was rounded up from small -ve to zero */
486 	if (len_in && !len)
487 		return error;
488 
489 	end = start + len;
490 	if (end < start)
491 		return error;
492 
493 	error = 0;
494 	if (end == start)
495 		return error;
496 
497 	write = madvise_need_mmap_write(behavior);
498 	if (write)
499 		down_write(&current->mm->mmap_sem);
500 	else
501 		down_read(&current->mm->mmap_sem);
502 
503 	/*
504 	 * If the interval [start,end) covers some unmapped address
505 	 * ranges, just ignore them, but return -ENOMEM at the end.
506 	 * - different from the way of handling in mlock etc.
507 	 */
508 	vma = find_vma_prev(current->mm, start, &prev);
509 	if (vma && start > vma->vm_start)
510 		prev = vma;
511 
512 	blk_start_plug(&plug);
513 	for (;;) {
514 		/* Still start < end. */
515 		error = -ENOMEM;
516 		if (!vma)
517 			goto out;
518 
519 		/* Here start < (end|vma->vm_end). */
520 		if (start < vma->vm_start) {
521 			unmapped_error = -ENOMEM;
522 			start = vma->vm_start;
523 			if (start >= end)
524 				goto out;
525 		}
526 
527 		/* Here vma->vm_start <= start < (end|vma->vm_end) */
528 		tmp = vma->vm_end;
529 		if (end < tmp)
530 			tmp = end;
531 
532 		/* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
533 		error = madvise_vma(vma, &prev, start, tmp, behavior);
534 		if (error)
535 			goto out;
536 		start = tmp;
537 		if (prev && start < prev->vm_end)
538 			start = prev->vm_end;
539 		error = unmapped_error;
540 		if (start >= end)
541 			goto out;
542 		if (prev)
543 			vma = prev->vm_next;
544 		else	/* madvise_remove dropped mmap_sem */
545 			vma = find_vma(current->mm, start);
546 	}
547 out:
548 	blk_finish_plug(&plug);
549 	if (write)
550 		up_write(&current->mm->mmap_sem);
551 	else
552 		up_read(&current->mm->mmap_sem);
553 
554 	return error;
555 }
556