xref: /openbmc/linux/mm/madvise.c (revision d5cb9783536a41df9f9cba5b0a1d78047ed787f7)
1 /*
2  *	linux/mm/madvise.c
3  *
4  * Copyright (C) 1999  Linus Torvalds
5  * Copyright (C) 2002  Christoph Hellwig
6  */
7 
8 #include <linux/mman.h>
9 #include <linux/pagemap.h>
10 #include <linux/syscalls.h>
11 #include <linux/mempolicy.h>
12 #include <linux/hugetlb.h>
13 
14 /*
15  * We can potentially split a vm area into separate
16  * areas, each area with its own behavior.
17  */
18 static long madvise_behavior(struct vm_area_struct * vma,
19 		     struct vm_area_struct **prev,
20 		     unsigned long start, unsigned long end, int behavior)
21 {
22 	struct mm_struct * mm = vma->vm_mm;
23 	int error = 0;
24 	pgoff_t pgoff;
25 	int new_flags = vma->vm_flags & ~VM_READHINTMASK;
26 
27 	switch (behavior) {
28 	case MADV_SEQUENTIAL:
29 		new_flags |= VM_SEQ_READ;
30 		break;
31 	case MADV_RANDOM:
32 		new_flags |= VM_RAND_READ;
33 		break;
34 	default:
35 		break;
36 	}
37 
38 	if (new_flags == vma->vm_flags) {
39 		*prev = vma;
40 		goto out;
41 	}
42 
43 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
44 	*prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
45 				vma->vm_file, pgoff, vma_policy(vma));
46 	if (*prev) {
47 		vma = *prev;
48 		goto success;
49 	}
50 
51 	*prev = vma;
52 
53 	if (start != vma->vm_start) {
54 		error = split_vma(mm, vma, start, 1);
55 		if (error)
56 			goto out;
57 	}
58 
59 	if (end != vma->vm_end) {
60 		error = split_vma(mm, vma, end, 0);
61 		if (error)
62 			goto out;
63 	}
64 
65 success:
66 	/*
67 	 * vm_flags is protected by the mmap_sem held in write mode.
68 	 */
69 	vma->vm_flags = new_flags;
70 
71 out:
72 	if (error == -ENOMEM)
73 		error = -EAGAIN;
74 	return error;
75 }
76 
77 /*
78  * Schedule all required I/O operations.  Do not wait for completion.
79  */
80 static long madvise_willneed(struct vm_area_struct * vma,
81 			     struct vm_area_struct ** prev,
82 			     unsigned long start, unsigned long end)
83 {
84 	struct file *file = vma->vm_file;
85 
86 	if (!file)
87 		return -EBADF;
88 
89 	if (file->f_mapping->a_ops->get_xip_page) {
90 		/* no bad return value, but ignore advice */
91 		return 0;
92 	}
93 
94 	*prev = vma;
95 	start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
96 	if (end > vma->vm_end)
97 		end = vma->vm_end;
98 	end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
99 
100 	force_page_cache_readahead(file->f_mapping,
101 			file, start, max_sane_readahead(end - start));
102 	return 0;
103 }
104 
105 /*
106  * Application no longer needs these pages.  If the pages are dirty,
107  * it's OK to just throw them away.  The app will be more careful about
108  * data it wants to keep.  Be sure to free swap resources too.  The
109  * zap_page_range call sets things up for refill_inactive to actually free
110  * these pages later if no one else has touched them in the meantime,
111  * although we could add these pages to a global reuse list for
112  * refill_inactive to pick up before reclaiming other pages.
113  *
114  * NB: This interface discards data rather than pushes it out to swap,
115  * as some implementations do.  This has performance implications for
116  * applications like large transactional databases which want to discard
117  * pages in anonymous maps after committing to backing store the data
118  * that was kept in them.  There is no reason to write this data out to
119  * the swap area if the application is discarding it.
120  *
121  * An interface that causes the system to free clean pages and flush
122  * dirty pages is already available as msync(MS_INVALIDATE).
123  */
124 static long madvise_dontneed(struct vm_area_struct * vma,
125 			     struct vm_area_struct ** prev,
126 			     unsigned long start, unsigned long end)
127 {
128 	*prev = vma;
129 	if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_RESERVED))
130 		return -EINVAL;
131 
132 	if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
133 		struct zap_details details = {
134 			.nonlinear_vma = vma,
135 			.last_index = ULONG_MAX,
136 		};
137 		zap_page_range(vma, start, end - start, &details);
138 	} else
139 		zap_page_range(vma, start, end - start, NULL);
140 	return 0;
141 }
142 
143 static long
144 madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
145 		unsigned long start, unsigned long end, int behavior)
146 {
147 	long error;
148 
149 	switch (behavior) {
150 	case MADV_NORMAL:
151 	case MADV_SEQUENTIAL:
152 	case MADV_RANDOM:
153 		error = madvise_behavior(vma, prev, start, end, behavior);
154 		break;
155 
156 	case MADV_WILLNEED:
157 		error = madvise_willneed(vma, prev, start, end);
158 		break;
159 
160 	case MADV_DONTNEED:
161 		error = madvise_dontneed(vma, prev, start, end);
162 		break;
163 
164 	default:
165 		error = -EINVAL;
166 		break;
167 	}
168 	return error;
169 }
170 
171 /*
172  * The madvise(2) system call.
173  *
174  * Applications can use madvise() to advise the kernel how it should
175  * handle paging I/O in this VM area.  The idea is to help the kernel
176  * use appropriate read-ahead and caching techniques.  The information
177  * provided is advisory only, and can be safely disregarded by the
178  * kernel without affecting the correct operation of the application.
179  *
180  * behavior values:
181  *  MADV_NORMAL - the default behavior is to read clusters.  This
182  *		results in some read-ahead and read-behind.
183  *  MADV_RANDOM - the system should read the minimum amount of data
184  *		on any access, since it is unlikely that the appli-
185  *		cation will need more than what it asks for.
186  *  MADV_SEQUENTIAL - pages in the given range will probably be accessed
187  *		once, so they can be aggressively read ahead, and
188  *		can be freed soon after they are accessed.
189  *  MADV_WILLNEED - the application is notifying the system to read
190  *		some pages ahead.
191  *  MADV_DONTNEED - the application is finished with the given range,
192  *		so the kernel can free resources associated with it.
193  *
194  * return values:
195  *  zero    - success
196  *  -EINVAL - start + len < 0, start is not page-aligned,
197  *		"behavior" is not a valid value, or application
198  *		is attempting to release locked or shared pages.
199  *  -ENOMEM - addresses in the specified range are not currently
200  *		mapped, or are outside the AS of the process.
201  *  -EIO    - an I/O error occurred while paging in data.
202  *  -EBADF  - map exists, but area maps something that isn't a file.
203  *  -EAGAIN - a kernel resource was temporarily unavailable.
204  */
205 asmlinkage long sys_madvise(unsigned long start, size_t len_in, int behavior)
206 {
207 	unsigned long end, tmp;
208 	struct vm_area_struct * vma, *prev;
209 	int unmapped_error = 0;
210 	int error = -EINVAL;
211 	size_t len;
212 
213 	down_write(&current->mm->mmap_sem);
214 
215 	if (start & ~PAGE_MASK)
216 		goto out;
217 	len = (len_in + ~PAGE_MASK) & PAGE_MASK;
218 
219 	/* Check to see whether len was rounded up from small -ve to zero */
220 	if (len_in && !len)
221 		goto out;
222 
223 	end = start + len;
224 	if (end < start)
225 		goto out;
226 
227 	error = 0;
228 	if (end == start)
229 		goto out;
230 
231 	/*
232 	 * If the interval [start,end) covers some unmapped address
233 	 * ranges, just ignore them, but return -ENOMEM at the end.
234 	 * - different from the way of handling in mlock etc.
235 	 */
236 	vma = find_vma_prev(current->mm, start, &prev);
237 	if (vma && start > vma->vm_start)
238 		prev = vma;
239 
240 	for (;;) {
241 		/* Still start < end. */
242 		error = -ENOMEM;
243 		if (!vma)
244 			goto out;
245 
246 		/* Here start < (end|vma->vm_end). */
247 		if (start < vma->vm_start) {
248 			unmapped_error = -ENOMEM;
249 			start = vma->vm_start;
250 			if (start >= end)
251 				goto out;
252 		}
253 
254 		/* Here vma->vm_start <= start < (end|vma->vm_end) */
255 		tmp = vma->vm_end;
256 		if (end < tmp)
257 			tmp = end;
258 
259 		/* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
260 		error = madvise_vma(vma, &prev, start, tmp, behavior);
261 		if (error)
262 			goto out;
263 		start = tmp;
264 		if (start < prev->vm_end)
265 			start = prev->vm_end;
266 		error = unmapped_error;
267 		if (start >= end)
268 			goto out;
269 		vma = prev->vm_next;
270 	}
271 out:
272 	up_write(&current->mm->mmap_sem);
273 	return error;
274 }
275