xref: /openbmc/linux/mm/madvise.c (revision dea54fba)
1 /*
2  *	linux/mm/madvise.c
3  *
4  * Copyright (C) 1999  Linus Torvalds
5  * Copyright (C) 2002  Christoph Hellwig
6  */
7 
8 #include <linux/mman.h>
9 #include <linux/pagemap.h>
10 #include <linux/syscalls.h>
11 #include <linux/mempolicy.h>
12 #include <linux/page-isolation.h>
13 #include <linux/userfaultfd_k.h>
14 #include <linux/hugetlb.h>
15 #include <linux/falloc.h>
16 #include <linux/sched.h>
17 #include <linux/ksm.h>
18 #include <linux/fs.h>
19 #include <linux/file.h>
20 #include <linux/blkdev.h>
21 #include <linux/backing-dev.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24 #include <linux/shmem_fs.h>
25 #include <linux/mmu_notifier.h>
26 
27 #include <asm/tlb.h>
28 
29 #include "internal.h"
30 
31 /*
32  * Any behaviour which results in changes to the vma->vm_flags needs to
33  * take mmap_sem for writing. Others, which simply traverse vmas, need
34  * to only take it for reading.
35  */
36 static int madvise_need_mmap_write(int behavior)
37 {
38 	switch (behavior) {
39 	case MADV_REMOVE:
40 	case MADV_WILLNEED:
41 	case MADV_DONTNEED:
42 	case MADV_FREE:
43 		return 0;
44 	default:
45 		/* be safe, default to 1. list exceptions explicitly */
46 		return 1;
47 	}
48 }
49 
50 /*
51  * We can potentially split a vm area into separate
52  * areas, each area with its own behavior.
53  */
54 static long madvise_behavior(struct vm_area_struct *vma,
55 		     struct vm_area_struct **prev,
56 		     unsigned long start, unsigned long end, int behavior)
57 {
58 	struct mm_struct *mm = vma->vm_mm;
59 	int error = 0;
60 	pgoff_t pgoff;
61 	unsigned long new_flags = vma->vm_flags;
62 
63 	switch (behavior) {
64 	case MADV_NORMAL:
65 		new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
66 		break;
67 	case MADV_SEQUENTIAL:
68 		new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
69 		break;
70 	case MADV_RANDOM:
71 		new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
72 		break;
73 	case MADV_DONTFORK:
74 		new_flags |= VM_DONTCOPY;
75 		break;
76 	case MADV_DOFORK:
77 		if (vma->vm_flags & VM_IO) {
78 			error = -EINVAL;
79 			goto out;
80 		}
81 		new_flags &= ~VM_DONTCOPY;
82 		break;
83 	case MADV_DONTDUMP:
84 		new_flags |= VM_DONTDUMP;
85 		break;
86 	case MADV_DODUMP:
87 		if (new_flags & VM_SPECIAL) {
88 			error = -EINVAL;
89 			goto out;
90 		}
91 		new_flags &= ~VM_DONTDUMP;
92 		break;
93 	case MADV_MERGEABLE:
94 	case MADV_UNMERGEABLE:
95 		error = ksm_madvise(vma, start, end, behavior, &new_flags);
96 		if (error) {
97 			/*
98 			 * madvise() returns EAGAIN if kernel resources, such as
99 			 * slab, are temporarily unavailable.
100 			 */
101 			if (error == -ENOMEM)
102 				error = -EAGAIN;
103 			goto out;
104 		}
105 		break;
106 	case MADV_HUGEPAGE:
107 	case MADV_NOHUGEPAGE:
108 		error = hugepage_madvise(vma, &new_flags, behavior);
109 		if (error) {
110 			/*
111 			 * madvise() returns EAGAIN if kernel resources, such as
112 			 * slab, are temporarily unavailable.
113 			 */
114 			if (error == -ENOMEM)
115 				error = -EAGAIN;
116 			goto out;
117 		}
118 		break;
119 	}
120 
121 	if (new_flags == vma->vm_flags) {
122 		*prev = vma;
123 		goto out;
124 	}
125 
126 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
127 	*prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
128 			  vma->vm_file, pgoff, vma_policy(vma),
129 			  vma->vm_userfaultfd_ctx);
130 	if (*prev) {
131 		vma = *prev;
132 		goto success;
133 	}
134 
135 	*prev = vma;
136 
137 	if (start != vma->vm_start) {
138 		if (unlikely(mm->map_count >= sysctl_max_map_count)) {
139 			error = -ENOMEM;
140 			goto out;
141 		}
142 		error = __split_vma(mm, vma, start, 1);
143 		if (error) {
144 			/*
145 			 * madvise() returns EAGAIN if kernel resources, such as
146 			 * slab, are temporarily unavailable.
147 			 */
148 			if (error == -ENOMEM)
149 				error = -EAGAIN;
150 			goto out;
151 		}
152 	}
153 
154 	if (end != vma->vm_end) {
155 		if (unlikely(mm->map_count >= sysctl_max_map_count)) {
156 			error = -ENOMEM;
157 			goto out;
158 		}
159 		error = __split_vma(mm, vma, end, 0);
160 		if (error) {
161 			/*
162 			 * madvise() returns EAGAIN if kernel resources, such as
163 			 * slab, are temporarily unavailable.
164 			 */
165 			if (error == -ENOMEM)
166 				error = -EAGAIN;
167 			goto out;
168 		}
169 	}
170 
171 success:
172 	/*
173 	 * vm_flags is protected by the mmap_sem held in write mode.
174 	 */
175 	vma->vm_flags = new_flags;
176 out:
177 	return error;
178 }
179 
180 #ifdef CONFIG_SWAP
181 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
182 	unsigned long end, struct mm_walk *walk)
183 {
184 	pte_t *orig_pte;
185 	struct vm_area_struct *vma = walk->private;
186 	unsigned long index;
187 
188 	if (pmd_none_or_trans_huge_or_clear_bad(pmd))
189 		return 0;
190 
191 	for (index = start; index != end; index += PAGE_SIZE) {
192 		pte_t pte;
193 		swp_entry_t entry;
194 		struct page *page;
195 		spinlock_t *ptl;
196 
197 		orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
198 		pte = *(orig_pte + ((index - start) / PAGE_SIZE));
199 		pte_unmap_unlock(orig_pte, ptl);
200 
201 		if (pte_present(pte) || pte_none(pte))
202 			continue;
203 		entry = pte_to_swp_entry(pte);
204 		if (unlikely(non_swap_entry(entry)))
205 			continue;
206 
207 		page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
208 							vma, index, false);
209 		if (page)
210 			put_page(page);
211 	}
212 
213 	return 0;
214 }
215 
216 static void force_swapin_readahead(struct vm_area_struct *vma,
217 		unsigned long start, unsigned long end)
218 {
219 	struct mm_walk walk = {
220 		.mm = vma->vm_mm,
221 		.pmd_entry = swapin_walk_pmd_entry,
222 		.private = vma,
223 	};
224 
225 	walk_page_range(start, end, &walk);
226 
227 	lru_add_drain();	/* Push any new pages onto the LRU now */
228 }
229 
230 static void force_shm_swapin_readahead(struct vm_area_struct *vma,
231 		unsigned long start, unsigned long end,
232 		struct address_space *mapping)
233 {
234 	pgoff_t index;
235 	struct page *page;
236 	swp_entry_t swap;
237 
238 	for (; start < end; start += PAGE_SIZE) {
239 		index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
240 
241 		page = find_get_entry(mapping, index);
242 		if (!radix_tree_exceptional_entry(page)) {
243 			if (page)
244 				put_page(page);
245 			continue;
246 		}
247 		swap = radix_to_swp_entry(page);
248 		page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
249 							NULL, 0, false);
250 		if (page)
251 			put_page(page);
252 	}
253 
254 	lru_add_drain();	/* Push any new pages onto the LRU now */
255 }
256 #endif		/* CONFIG_SWAP */
257 
258 /*
259  * Schedule all required I/O operations.  Do not wait for completion.
260  */
261 static long madvise_willneed(struct vm_area_struct *vma,
262 			     struct vm_area_struct **prev,
263 			     unsigned long start, unsigned long end)
264 {
265 	struct file *file = vma->vm_file;
266 
267 #ifdef CONFIG_SWAP
268 	if (!file) {
269 		*prev = vma;
270 		force_swapin_readahead(vma, start, end);
271 		return 0;
272 	}
273 
274 	if (shmem_mapping(file->f_mapping)) {
275 		*prev = vma;
276 		force_shm_swapin_readahead(vma, start, end,
277 					file->f_mapping);
278 		return 0;
279 	}
280 #else
281 	if (!file)
282 		return -EBADF;
283 #endif
284 
285 	if (IS_DAX(file_inode(file))) {
286 		/* no bad return value, but ignore advice */
287 		return 0;
288 	}
289 
290 	*prev = vma;
291 	start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
292 	if (end > vma->vm_end)
293 		end = vma->vm_end;
294 	end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
295 
296 	force_page_cache_readahead(file->f_mapping, file, start, end - start);
297 	return 0;
298 }
299 
300 static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
301 				unsigned long end, struct mm_walk *walk)
302 
303 {
304 	struct mmu_gather *tlb = walk->private;
305 	struct mm_struct *mm = tlb->mm;
306 	struct vm_area_struct *vma = walk->vma;
307 	spinlock_t *ptl;
308 	pte_t *orig_pte, *pte, ptent;
309 	struct page *page;
310 	int nr_swap = 0;
311 	unsigned long next;
312 
313 	next = pmd_addr_end(addr, end);
314 	if (pmd_trans_huge(*pmd))
315 		if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
316 			goto next;
317 
318 	if (pmd_trans_unstable(pmd))
319 		return 0;
320 
321 	tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
322 	orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
323 	flush_tlb_batched_pending(mm);
324 	arch_enter_lazy_mmu_mode();
325 	for (; addr != end; pte++, addr += PAGE_SIZE) {
326 		ptent = *pte;
327 
328 		if (pte_none(ptent))
329 			continue;
330 		/*
331 		 * If the pte has swp_entry, just clear page table to
332 		 * prevent swap-in which is more expensive rather than
333 		 * (page allocation + zeroing).
334 		 */
335 		if (!pte_present(ptent)) {
336 			swp_entry_t entry;
337 
338 			entry = pte_to_swp_entry(ptent);
339 			if (non_swap_entry(entry))
340 				continue;
341 			nr_swap--;
342 			free_swap_and_cache(entry);
343 			pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
344 			continue;
345 		}
346 
347 		page = vm_normal_page(vma, addr, ptent);
348 		if (!page)
349 			continue;
350 
351 		/*
352 		 * If pmd isn't transhuge but the page is THP and
353 		 * is owned by only this process, split it and
354 		 * deactivate all pages.
355 		 */
356 		if (PageTransCompound(page)) {
357 			if (page_mapcount(page) != 1)
358 				goto out;
359 			get_page(page);
360 			if (!trylock_page(page)) {
361 				put_page(page);
362 				goto out;
363 			}
364 			pte_unmap_unlock(orig_pte, ptl);
365 			if (split_huge_page(page)) {
366 				unlock_page(page);
367 				put_page(page);
368 				pte_offset_map_lock(mm, pmd, addr, &ptl);
369 				goto out;
370 			}
371 			unlock_page(page);
372 			put_page(page);
373 			pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
374 			pte--;
375 			addr -= PAGE_SIZE;
376 			continue;
377 		}
378 
379 		VM_BUG_ON_PAGE(PageTransCompound(page), page);
380 
381 		if (PageSwapCache(page) || PageDirty(page)) {
382 			if (!trylock_page(page))
383 				continue;
384 			/*
385 			 * If page is shared with others, we couldn't clear
386 			 * PG_dirty of the page.
387 			 */
388 			if (page_mapcount(page) != 1) {
389 				unlock_page(page);
390 				continue;
391 			}
392 
393 			if (PageSwapCache(page) && !try_to_free_swap(page)) {
394 				unlock_page(page);
395 				continue;
396 			}
397 
398 			ClearPageDirty(page);
399 			unlock_page(page);
400 		}
401 
402 		if (pte_young(ptent) || pte_dirty(ptent)) {
403 			/*
404 			 * Some of architecture(ex, PPC) don't update TLB
405 			 * with set_pte_at and tlb_remove_tlb_entry so for
406 			 * the portability, remap the pte with old|clean
407 			 * after pte clearing.
408 			 */
409 			ptent = ptep_get_and_clear_full(mm, addr, pte,
410 							tlb->fullmm);
411 
412 			ptent = pte_mkold(ptent);
413 			ptent = pte_mkclean(ptent);
414 			set_pte_at(mm, addr, pte, ptent);
415 			tlb_remove_tlb_entry(tlb, pte, addr);
416 		}
417 		mark_page_lazyfree(page);
418 	}
419 out:
420 	if (nr_swap) {
421 		if (current->mm == mm)
422 			sync_mm_rss(mm);
423 
424 		add_mm_counter(mm, MM_SWAPENTS, nr_swap);
425 	}
426 	arch_leave_lazy_mmu_mode();
427 	pte_unmap_unlock(orig_pte, ptl);
428 	cond_resched();
429 next:
430 	return 0;
431 }
432 
433 static void madvise_free_page_range(struct mmu_gather *tlb,
434 			     struct vm_area_struct *vma,
435 			     unsigned long addr, unsigned long end)
436 {
437 	struct mm_walk free_walk = {
438 		.pmd_entry = madvise_free_pte_range,
439 		.mm = vma->vm_mm,
440 		.private = tlb,
441 	};
442 
443 	tlb_start_vma(tlb, vma);
444 	walk_page_range(addr, end, &free_walk);
445 	tlb_end_vma(tlb, vma);
446 }
447 
448 static int madvise_free_single_vma(struct vm_area_struct *vma,
449 			unsigned long start_addr, unsigned long end_addr)
450 {
451 	unsigned long start, end;
452 	struct mm_struct *mm = vma->vm_mm;
453 	struct mmu_gather tlb;
454 
455 	/* MADV_FREE works for only anon vma at the moment */
456 	if (!vma_is_anonymous(vma))
457 		return -EINVAL;
458 
459 	start = max(vma->vm_start, start_addr);
460 	if (start >= vma->vm_end)
461 		return -EINVAL;
462 	end = min(vma->vm_end, end_addr);
463 	if (end <= vma->vm_start)
464 		return -EINVAL;
465 
466 	lru_add_drain();
467 	tlb_gather_mmu(&tlb, mm, start, end);
468 	update_hiwater_rss(mm);
469 
470 	mmu_notifier_invalidate_range_start(mm, start, end);
471 	madvise_free_page_range(&tlb, vma, start, end);
472 	mmu_notifier_invalidate_range_end(mm, start, end);
473 	tlb_finish_mmu(&tlb, start, end);
474 
475 	return 0;
476 }
477 
478 /*
479  * Application no longer needs these pages.  If the pages are dirty,
480  * it's OK to just throw them away.  The app will be more careful about
481  * data it wants to keep.  Be sure to free swap resources too.  The
482  * zap_page_range call sets things up for shrink_active_list to actually free
483  * these pages later if no one else has touched them in the meantime,
484  * although we could add these pages to a global reuse list for
485  * shrink_active_list to pick up before reclaiming other pages.
486  *
487  * NB: This interface discards data rather than pushes it out to swap,
488  * as some implementations do.  This has performance implications for
489  * applications like large transactional databases which want to discard
490  * pages in anonymous maps after committing to backing store the data
491  * that was kept in them.  There is no reason to write this data out to
492  * the swap area if the application is discarding it.
493  *
494  * An interface that causes the system to free clean pages and flush
495  * dirty pages is already available as msync(MS_INVALIDATE).
496  */
497 static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
498 					unsigned long start, unsigned long end)
499 {
500 	zap_page_range(vma, start, end - start);
501 	return 0;
502 }
503 
504 static long madvise_dontneed_free(struct vm_area_struct *vma,
505 				  struct vm_area_struct **prev,
506 				  unsigned long start, unsigned long end,
507 				  int behavior)
508 {
509 	*prev = vma;
510 	if (!can_madv_dontneed_vma(vma))
511 		return -EINVAL;
512 
513 	if (!userfaultfd_remove(vma, start, end)) {
514 		*prev = NULL; /* mmap_sem has been dropped, prev is stale */
515 
516 		down_read(&current->mm->mmap_sem);
517 		vma = find_vma(current->mm, start);
518 		if (!vma)
519 			return -ENOMEM;
520 		if (start < vma->vm_start) {
521 			/*
522 			 * This "vma" under revalidation is the one
523 			 * with the lowest vma->vm_start where start
524 			 * is also < vma->vm_end. If start <
525 			 * vma->vm_start it means an hole materialized
526 			 * in the user address space within the
527 			 * virtual range passed to MADV_DONTNEED
528 			 * or MADV_FREE.
529 			 */
530 			return -ENOMEM;
531 		}
532 		if (!can_madv_dontneed_vma(vma))
533 			return -EINVAL;
534 		if (end > vma->vm_end) {
535 			/*
536 			 * Don't fail if end > vma->vm_end. If the old
537 			 * vma was splitted while the mmap_sem was
538 			 * released the effect of the concurrent
539 			 * operation may not cause madvise() to
540 			 * have an undefined result. There may be an
541 			 * adjacent next vma that we'll walk
542 			 * next. userfaultfd_remove() will generate an
543 			 * UFFD_EVENT_REMOVE repetition on the
544 			 * end-vma->vm_end range, but the manager can
545 			 * handle a repetition fine.
546 			 */
547 			end = vma->vm_end;
548 		}
549 		VM_WARN_ON(start >= end);
550 	}
551 
552 	if (behavior == MADV_DONTNEED)
553 		return madvise_dontneed_single_vma(vma, start, end);
554 	else if (behavior == MADV_FREE)
555 		return madvise_free_single_vma(vma, start, end);
556 	else
557 		return -EINVAL;
558 }
559 
560 /*
561  * Application wants to free up the pages and associated backing store.
562  * This is effectively punching a hole into the middle of a file.
563  */
564 static long madvise_remove(struct vm_area_struct *vma,
565 				struct vm_area_struct **prev,
566 				unsigned long start, unsigned long end)
567 {
568 	loff_t offset;
569 	int error;
570 	struct file *f;
571 
572 	*prev = NULL;	/* tell sys_madvise we drop mmap_sem */
573 
574 	if (vma->vm_flags & VM_LOCKED)
575 		return -EINVAL;
576 
577 	f = vma->vm_file;
578 
579 	if (!f || !f->f_mapping || !f->f_mapping->host) {
580 			return -EINVAL;
581 	}
582 
583 	if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
584 		return -EACCES;
585 
586 	offset = (loff_t)(start - vma->vm_start)
587 			+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
588 
589 	/*
590 	 * Filesystem's fallocate may need to take i_mutex.  We need to
591 	 * explicitly grab a reference because the vma (and hence the
592 	 * vma's reference to the file) can go away as soon as we drop
593 	 * mmap_sem.
594 	 */
595 	get_file(f);
596 	if (userfaultfd_remove(vma, start, end)) {
597 		/* mmap_sem was not released by userfaultfd_remove() */
598 		up_read(&current->mm->mmap_sem);
599 	}
600 	error = vfs_fallocate(f,
601 				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
602 				offset, end - start);
603 	fput(f);
604 	down_read(&current->mm->mmap_sem);
605 	return error;
606 }
607 
608 #ifdef CONFIG_MEMORY_FAILURE
609 /*
610  * Error injection support for memory error handling.
611  */
612 static int madvise_inject_error(int behavior,
613 		unsigned long start, unsigned long end)
614 {
615 	struct page *page;
616 
617 	if (!capable(CAP_SYS_ADMIN))
618 		return -EPERM;
619 
620 	for (; start < end; start += PAGE_SIZE <<
621 				compound_order(compound_head(page))) {
622 		int ret;
623 
624 		ret = get_user_pages_fast(start, 1, 0, &page);
625 		if (ret != 1)
626 			return ret;
627 
628 		if (PageHWPoison(page)) {
629 			put_page(page);
630 			continue;
631 		}
632 
633 		if (behavior == MADV_SOFT_OFFLINE) {
634 			pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",
635 						page_to_pfn(page), start);
636 
637 			ret = soft_offline_page(page, MF_COUNT_INCREASED);
638 			if (ret)
639 				return ret;
640 			continue;
641 		}
642 		pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
643 						page_to_pfn(page), start);
644 
645 		ret = memory_failure(page_to_pfn(page), 0, MF_COUNT_INCREASED);
646 		if (ret)
647 			return ret;
648 	}
649 	return 0;
650 }
651 #endif
652 
653 static long
654 madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
655 		unsigned long start, unsigned long end, int behavior)
656 {
657 	switch (behavior) {
658 	case MADV_REMOVE:
659 		return madvise_remove(vma, prev, start, end);
660 	case MADV_WILLNEED:
661 		return madvise_willneed(vma, prev, start, end);
662 	case MADV_FREE:
663 	case MADV_DONTNEED:
664 		return madvise_dontneed_free(vma, prev, start, end, behavior);
665 	default:
666 		return madvise_behavior(vma, prev, start, end, behavior);
667 	}
668 }
669 
670 static bool
671 madvise_behavior_valid(int behavior)
672 {
673 	switch (behavior) {
674 	case MADV_DOFORK:
675 	case MADV_DONTFORK:
676 	case MADV_NORMAL:
677 	case MADV_SEQUENTIAL:
678 	case MADV_RANDOM:
679 	case MADV_REMOVE:
680 	case MADV_WILLNEED:
681 	case MADV_DONTNEED:
682 	case MADV_FREE:
683 #ifdef CONFIG_KSM
684 	case MADV_MERGEABLE:
685 	case MADV_UNMERGEABLE:
686 #endif
687 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
688 	case MADV_HUGEPAGE:
689 	case MADV_NOHUGEPAGE:
690 #endif
691 	case MADV_DONTDUMP:
692 	case MADV_DODUMP:
693 #ifdef CONFIG_MEMORY_FAILURE
694 	case MADV_SOFT_OFFLINE:
695 	case MADV_HWPOISON:
696 #endif
697 		return true;
698 
699 	default:
700 		return false;
701 	}
702 }
703 
704 /*
705  * The madvise(2) system call.
706  *
707  * Applications can use madvise() to advise the kernel how it should
708  * handle paging I/O in this VM area.  The idea is to help the kernel
709  * use appropriate read-ahead and caching techniques.  The information
710  * provided is advisory only, and can be safely disregarded by the
711  * kernel without affecting the correct operation of the application.
712  *
713  * behavior values:
714  *  MADV_NORMAL - the default behavior is to read clusters.  This
715  *		results in some read-ahead and read-behind.
716  *  MADV_RANDOM - the system should read the minimum amount of data
717  *		on any access, since it is unlikely that the appli-
718  *		cation will need more than what it asks for.
719  *  MADV_SEQUENTIAL - pages in the given range will probably be accessed
720  *		once, so they can be aggressively read ahead, and
721  *		can be freed soon after they are accessed.
722  *  MADV_WILLNEED - the application is notifying the system to read
723  *		some pages ahead.
724  *  MADV_DONTNEED - the application is finished with the given range,
725  *		so the kernel can free resources associated with it.
726  *  MADV_FREE - the application marks pages in the given range as lazy free,
727  *		where actual purges are postponed until memory pressure happens.
728  *  MADV_REMOVE - the application wants to free up the given range of
729  *		pages and associated backing store.
730  *  MADV_DONTFORK - omit this area from child's address space when forking:
731  *		typically, to avoid COWing pages pinned by get_user_pages().
732  *  MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
733  *  MADV_HWPOISON - trigger memory error handler as if the given memory range
734  *		were corrupted by unrecoverable hardware memory failure.
735  *  MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
736  *  MADV_MERGEABLE - the application recommends that KSM try to merge pages in
737  *		this area with pages of identical content from other such areas.
738  *  MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
739  *  MADV_HUGEPAGE - the application wants to back the given range by transparent
740  *		huge pages in the future. Existing pages might be coalesced and
741  *		new pages might be allocated as THP.
742  *  MADV_NOHUGEPAGE - mark the given range as not worth being backed by
743  *		transparent huge pages so the existing pages will not be
744  *		coalesced into THP and new pages will not be allocated as THP.
745  *  MADV_DONTDUMP - the application wants to prevent pages in the given range
746  *		from being included in its core dump.
747  *  MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
748  *
749  * return values:
750  *  zero    - success
751  *  -EINVAL - start + len < 0, start is not page-aligned,
752  *		"behavior" is not a valid value, or application
753  *		is attempting to release locked or shared pages.
754  *  -ENOMEM - addresses in the specified range are not currently
755  *		mapped, or are outside the AS of the process.
756  *  -EIO    - an I/O error occurred while paging in data.
757  *  -EBADF  - map exists, but area maps something that isn't a file.
758  *  -EAGAIN - a kernel resource was temporarily unavailable.
759  */
760 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
761 {
762 	unsigned long end, tmp;
763 	struct vm_area_struct *vma, *prev;
764 	int unmapped_error = 0;
765 	int error = -EINVAL;
766 	int write;
767 	size_t len;
768 	struct blk_plug plug;
769 
770 	if (!madvise_behavior_valid(behavior))
771 		return error;
772 
773 	if (start & ~PAGE_MASK)
774 		return error;
775 	len = (len_in + ~PAGE_MASK) & PAGE_MASK;
776 
777 	/* Check to see whether len was rounded up from small -ve to zero */
778 	if (len_in && !len)
779 		return error;
780 
781 	end = start + len;
782 	if (end < start)
783 		return error;
784 
785 	error = 0;
786 	if (end == start)
787 		return error;
788 
789 #ifdef CONFIG_MEMORY_FAILURE
790 	if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
791 		return madvise_inject_error(behavior, start, start + len_in);
792 #endif
793 
794 	write = madvise_need_mmap_write(behavior);
795 	if (write) {
796 		if (down_write_killable(&current->mm->mmap_sem))
797 			return -EINTR;
798 	} else {
799 		down_read(&current->mm->mmap_sem);
800 	}
801 
802 	/*
803 	 * If the interval [start,end) covers some unmapped address
804 	 * ranges, just ignore them, but return -ENOMEM at the end.
805 	 * - different from the way of handling in mlock etc.
806 	 */
807 	vma = find_vma_prev(current->mm, start, &prev);
808 	if (vma && start > vma->vm_start)
809 		prev = vma;
810 
811 	blk_start_plug(&plug);
812 	for (;;) {
813 		/* Still start < end. */
814 		error = -ENOMEM;
815 		if (!vma)
816 			goto out;
817 
818 		/* Here start < (end|vma->vm_end). */
819 		if (start < vma->vm_start) {
820 			unmapped_error = -ENOMEM;
821 			start = vma->vm_start;
822 			if (start >= end)
823 				goto out;
824 		}
825 
826 		/* Here vma->vm_start <= start < (end|vma->vm_end) */
827 		tmp = vma->vm_end;
828 		if (end < tmp)
829 			tmp = end;
830 
831 		/* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
832 		error = madvise_vma(vma, &prev, start, tmp, behavior);
833 		if (error)
834 			goto out;
835 		start = tmp;
836 		if (prev && start < prev->vm_end)
837 			start = prev->vm_end;
838 		error = unmapped_error;
839 		if (start >= end)
840 			goto out;
841 		if (prev)
842 			vma = prev->vm_next;
843 		else	/* madvise_remove dropped mmap_sem */
844 			vma = find_vma(current->mm, start);
845 	}
846 out:
847 	blk_finish_plug(&plug);
848 	if (write)
849 		up_write(&current->mm->mmap_sem);
850 	else
851 		up_read(&current->mm->mmap_sem);
852 
853 	return error;
854 }
855