xref: /openbmc/linux/mm/madvise.c (revision b04b4f78)
1 /*
2  *	linux/mm/madvise.c
3  *
4  * Copyright (C) 1999  Linus Torvalds
5  * Copyright (C) 2002  Christoph Hellwig
6  */
7 
8 #include <linux/mman.h>
9 #include <linux/pagemap.h>
10 #include <linux/syscalls.h>
11 #include <linux/mempolicy.h>
12 #include <linux/hugetlb.h>
13 #include <linux/sched.h>
14 
15 /*
16  * Any behaviour which results in changes to the vma->vm_flags needs to
17  * take mmap_sem for writing. Others, which simply traverse vmas, need
18  * to only take it for reading.
19  */
20 static int madvise_need_mmap_write(int behavior)
21 {
22 	switch (behavior) {
23 	case MADV_REMOVE:
24 	case MADV_WILLNEED:
25 	case MADV_DONTNEED:
26 		return 0;
27 	default:
28 		/* be safe, default to 1. list exceptions explicitly */
29 		return 1;
30 	}
31 }
32 
33 /*
34  * We can potentially split a vm area into separate
35  * areas, each area with its own behavior.
36  */
37 static long madvise_behavior(struct vm_area_struct * vma,
38 		     struct vm_area_struct **prev,
39 		     unsigned long start, unsigned long end, int behavior)
40 {
41 	struct mm_struct * mm = vma->vm_mm;
42 	int error = 0;
43 	pgoff_t pgoff;
44 	int new_flags = vma->vm_flags;
45 
46 	switch (behavior) {
47 	case MADV_NORMAL:
48 		new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
49 		break;
50 	case MADV_SEQUENTIAL:
51 		new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
52 		break;
53 	case MADV_RANDOM:
54 		new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
55 		break;
56 	case MADV_DONTFORK:
57 		new_flags |= VM_DONTCOPY;
58 		break;
59 	case MADV_DOFORK:
60 		new_flags &= ~VM_DONTCOPY;
61 		break;
62 	}
63 
64 	if (new_flags == vma->vm_flags) {
65 		*prev = vma;
66 		goto out;
67 	}
68 
69 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
70 	*prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
71 				vma->vm_file, pgoff, vma_policy(vma));
72 	if (*prev) {
73 		vma = *prev;
74 		goto success;
75 	}
76 
77 	*prev = vma;
78 
79 	if (start != vma->vm_start) {
80 		error = split_vma(mm, vma, start, 1);
81 		if (error)
82 			goto out;
83 	}
84 
85 	if (end != vma->vm_end) {
86 		error = split_vma(mm, vma, end, 0);
87 		if (error)
88 			goto out;
89 	}
90 
91 success:
92 	/*
93 	 * vm_flags is protected by the mmap_sem held in write mode.
94 	 */
95 	vma->vm_flags = new_flags;
96 
97 out:
98 	if (error == -ENOMEM)
99 		error = -EAGAIN;
100 	return error;
101 }
102 
103 /*
104  * Schedule all required I/O operations.  Do not wait for completion.
105  */
106 static long madvise_willneed(struct vm_area_struct * vma,
107 			     struct vm_area_struct ** prev,
108 			     unsigned long start, unsigned long end)
109 {
110 	struct file *file = vma->vm_file;
111 
112 	if (!file)
113 		return -EBADF;
114 
115 	/*
116 	 * Page cache readahead assumes page cache pages are order-0 which
117 	 * is not the case for hugetlbfs. Do not give a bad return value
118 	 * but ignore the advice.
119 	 */
120 	if (vma->vm_flags & VM_HUGETLB)
121 		return 0;
122 
123 	if (file->f_mapping->a_ops->get_xip_mem) {
124 		/* no bad return value, but ignore advice */
125 		return 0;
126 	}
127 
128 	*prev = vma;
129 	start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
130 	if (end > vma->vm_end)
131 		end = vma->vm_end;
132 	end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
133 
134 	force_page_cache_readahead(file->f_mapping,
135 			file, start, max_sane_readahead(end - start));
136 	return 0;
137 }
138 
139 /*
140  * Application no longer needs these pages.  If the pages are dirty,
141  * it's OK to just throw them away.  The app will be more careful about
142  * data it wants to keep.  Be sure to free swap resources too.  The
143  * zap_page_range call sets things up for shrink_active_list to actually free
144  * these pages later if no one else has touched them in the meantime,
145  * although we could add these pages to a global reuse list for
146  * shrink_active_list to pick up before reclaiming other pages.
147  *
148  * NB: This interface discards data rather than pushes it out to swap,
149  * as some implementations do.  This has performance implications for
150  * applications like large transactional databases which want to discard
151  * pages in anonymous maps after committing to backing store the data
152  * that was kept in them.  There is no reason to write this data out to
153  * the swap area if the application is discarding it.
154  *
155  * An interface that causes the system to free clean pages and flush
156  * dirty pages is already available as msync(MS_INVALIDATE).
157  */
158 static long madvise_dontneed(struct vm_area_struct * vma,
159 			     struct vm_area_struct ** prev,
160 			     unsigned long start, unsigned long end)
161 {
162 	*prev = vma;
163 	if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
164 		return -EINVAL;
165 
166 	if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
167 		struct zap_details details = {
168 			.nonlinear_vma = vma,
169 			.last_index = ULONG_MAX,
170 		};
171 		zap_page_range(vma, start, end - start, &details);
172 	} else
173 		zap_page_range(vma, start, end - start, NULL);
174 	return 0;
175 }
176 
177 /*
178  * Application wants to free up the pages and associated backing store.
179  * This is effectively punching a hole into the middle of a file.
180  *
181  * NOTE: Currently, only shmfs/tmpfs is supported for this operation.
182  * Other filesystems return -ENOSYS.
183  */
184 static long madvise_remove(struct vm_area_struct *vma,
185 				struct vm_area_struct **prev,
186 				unsigned long start, unsigned long end)
187 {
188 	struct address_space *mapping;
189 	loff_t offset, endoff;
190 	int error;
191 
192 	*prev = NULL;	/* tell sys_madvise we drop mmap_sem */
193 
194 	if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
195 		return -EINVAL;
196 
197 	if (!vma->vm_file || !vma->vm_file->f_mapping
198 		|| !vma->vm_file->f_mapping->host) {
199 			return -EINVAL;
200 	}
201 
202 	if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
203 		return -EACCES;
204 
205 	mapping = vma->vm_file->f_mapping;
206 
207 	offset = (loff_t)(start - vma->vm_start)
208 			+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
209 	endoff = (loff_t)(end - vma->vm_start - 1)
210 			+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
211 
212 	/* vmtruncate_range needs to take i_mutex and i_alloc_sem */
213 	up_read(&current->mm->mmap_sem);
214 	error = vmtruncate_range(mapping->host, offset, endoff);
215 	down_read(&current->mm->mmap_sem);
216 	return error;
217 }
218 
219 static long
220 madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
221 		unsigned long start, unsigned long end, int behavior)
222 {
223 	long error;
224 
225 	switch (behavior) {
226 	case MADV_DOFORK:
227 		if (vma->vm_flags & VM_IO) {
228 			error = -EINVAL;
229 			break;
230 		}
231 	case MADV_DONTFORK:
232 	case MADV_NORMAL:
233 	case MADV_SEQUENTIAL:
234 	case MADV_RANDOM:
235 		error = madvise_behavior(vma, prev, start, end, behavior);
236 		break;
237 	case MADV_REMOVE:
238 		error = madvise_remove(vma, prev, start, end);
239 		break;
240 
241 	case MADV_WILLNEED:
242 		error = madvise_willneed(vma, prev, start, end);
243 		break;
244 
245 	case MADV_DONTNEED:
246 		error = madvise_dontneed(vma, prev, start, end);
247 		break;
248 
249 	default:
250 		error = -EINVAL;
251 		break;
252 	}
253 	return error;
254 }
255 
256 /*
257  * The madvise(2) system call.
258  *
259  * Applications can use madvise() to advise the kernel how it should
260  * handle paging I/O in this VM area.  The idea is to help the kernel
261  * use appropriate read-ahead and caching techniques.  The information
262  * provided is advisory only, and can be safely disregarded by the
263  * kernel without affecting the correct operation of the application.
264  *
265  * behavior values:
266  *  MADV_NORMAL - the default behavior is to read clusters.  This
267  *		results in some read-ahead and read-behind.
268  *  MADV_RANDOM - the system should read the minimum amount of data
269  *		on any access, since it is unlikely that the appli-
270  *		cation will need more than what it asks for.
271  *  MADV_SEQUENTIAL - pages in the given range will probably be accessed
272  *		once, so they can be aggressively read ahead, and
273  *		can be freed soon after they are accessed.
274  *  MADV_WILLNEED - the application is notifying the system to read
275  *		some pages ahead.
276  *  MADV_DONTNEED - the application is finished with the given range,
277  *		so the kernel can free resources associated with it.
278  *  MADV_REMOVE - the application wants to free up the given range of
279  *		pages and associated backing store.
280  *
281  * return values:
282  *  zero    - success
283  *  -EINVAL - start + len < 0, start is not page-aligned,
284  *		"behavior" is not a valid value, or application
285  *		is attempting to release locked or shared pages.
286  *  -ENOMEM - addresses in the specified range are not currently
287  *		mapped, or are outside the AS of the process.
288  *  -EIO    - an I/O error occurred while paging in data.
289  *  -EBADF  - map exists, but area maps something that isn't a file.
290  *  -EAGAIN - a kernel resource was temporarily unavailable.
291  */
292 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
293 {
294 	unsigned long end, tmp;
295 	struct vm_area_struct * vma, *prev;
296 	int unmapped_error = 0;
297 	int error = -EINVAL;
298 	int write;
299 	size_t len;
300 
301 	write = madvise_need_mmap_write(behavior);
302 	if (write)
303 		down_write(&current->mm->mmap_sem);
304 	else
305 		down_read(&current->mm->mmap_sem);
306 
307 	if (start & ~PAGE_MASK)
308 		goto out;
309 	len = (len_in + ~PAGE_MASK) & PAGE_MASK;
310 
311 	/* Check to see whether len was rounded up from small -ve to zero */
312 	if (len_in && !len)
313 		goto out;
314 
315 	end = start + len;
316 	if (end < start)
317 		goto out;
318 
319 	error = 0;
320 	if (end == start)
321 		goto out;
322 
323 	/*
324 	 * If the interval [start,end) covers some unmapped address
325 	 * ranges, just ignore them, but return -ENOMEM at the end.
326 	 * - different from the way of handling in mlock etc.
327 	 */
328 	vma = find_vma_prev(current->mm, start, &prev);
329 	if (vma && start > vma->vm_start)
330 		prev = vma;
331 
332 	for (;;) {
333 		/* Still start < end. */
334 		error = -ENOMEM;
335 		if (!vma)
336 			goto out;
337 
338 		/* Here start < (end|vma->vm_end). */
339 		if (start < vma->vm_start) {
340 			unmapped_error = -ENOMEM;
341 			start = vma->vm_start;
342 			if (start >= end)
343 				goto out;
344 		}
345 
346 		/* Here vma->vm_start <= start < (end|vma->vm_end) */
347 		tmp = vma->vm_end;
348 		if (end < tmp)
349 			tmp = end;
350 
351 		/* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
352 		error = madvise_vma(vma, &prev, start, tmp, behavior);
353 		if (error)
354 			goto out;
355 		start = tmp;
356 		if (prev && start < prev->vm_end)
357 			start = prev->vm_end;
358 		error = unmapped_error;
359 		if (start >= end)
360 			goto out;
361 		if (prev)
362 			vma = prev->vm_next;
363 		else	/* madvise_remove dropped mmap_sem */
364 			vma = find_vma(current->mm, start);
365 	}
366 out:
367 	if (write)
368 		up_write(&current->mm->mmap_sem);
369 	else
370 		up_read(&current->mm->mmap_sem);
371 
372 	return error;
373 }
374