xref: /openbmc/linux/mm/mincore.c (revision e8f6f3b4)
1 /*
2  *	linux/mm/mincore.c
3  *
4  * Copyright (C) 1994-2006  Linus Torvalds
5  */
6 
7 /*
8  * The mincore() system call.
9  */
10 #include <linux/pagemap.h>
11 #include <linux/gfp.h>
12 #include <linux/mm.h>
13 #include <linux/mman.h>
14 #include <linux/syscalls.h>
15 #include <linux/swap.h>
16 #include <linux/swapops.h>
17 #include <linux/hugetlb.h>
18 
19 #include <asm/uaccess.h>
20 #include <asm/pgtable.h>
21 
22 static void mincore_hugetlb_page_range(struct vm_area_struct *vma,
23 				unsigned long addr, unsigned long end,
24 				unsigned char *vec)
25 {
26 #ifdef CONFIG_HUGETLB_PAGE
27 	struct hstate *h;
28 
29 	h = hstate_vma(vma);
30 	while (1) {
31 		unsigned char present;
32 		pte_t *ptep;
33 		/*
34 		 * Huge pages are always in RAM for now, but
35 		 * theoretically it needs to be checked.
36 		 */
37 		ptep = huge_pte_offset(current->mm,
38 				       addr & huge_page_mask(h));
39 		present = ptep && !huge_pte_none(huge_ptep_get(ptep));
40 		while (1) {
41 			*vec = present;
42 			vec++;
43 			addr += PAGE_SIZE;
44 			if (addr == end)
45 				return;
46 			/* check hugepage border */
47 			if (!(addr & ~huge_page_mask(h)))
48 				break;
49 		}
50 	}
51 #else
52 	BUG();
53 #endif
54 }
55 
56 /*
57  * Later we can get more picky about what "in core" means precisely.
58  * For now, simply check to see if the page is in the page cache,
59  * and is up to date; i.e. that no page-in operation would be required
60  * at this time if an application were to map and access this page.
61  */
62 static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
63 {
64 	unsigned char present = 0;
65 	struct page *page;
66 
67 	/*
68 	 * When tmpfs swaps out a page from a file, any process mapping that
69 	 * file will not get a swp_entry_t in its pte, but rather it is like
70 	 * any other file mapping (ie. marked !present and faulted in with
71 	 * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
72 	 */
73 #ifdef CONFIG_SWAP
74 	if (shmem_mapping(mapping)) {
75 		page = find_get_entry(mapping, pgoff);
76 		/*
77 		 * shmem/tmpfs may return swap: account for swapcache
78 		 * page too.
79 		 */
80 		if (radix_tree_exceptional_entry(page)) {
81 			swp_entry_t swp = radix_to_swp_entry(page);
82 			page = find_get_page(swap_address_space(swp), swp.val);
83 		}
84 	} else
85 		page = find_get_page(mapping, pgoff);
86 #else
87 	page = find_get_page(mapping, pgoff);
88 #endif
89 	if (page) {
90 		present = PageUptodate(page);
91 		page_cache_release(page);
92 	}
93 
94 	return present;
95 }
96 
97 static void mincore_unmapped_range(struct vm_area_struct *vma,
98 				unsigned long addr, unsigned long end,
99 				unsigned char *vec)
100 {
101 	unsigned long nr = (end - addr) >> PAGE_SHIFT;
102 	int i;
103 
104 	if (vma->vm_file) {
105 		pgoff_t pgoff;
106 
107 		pgoff = linear_page_index(vma, addr);
108 		for (i = 0; i < nr; i++, pgoff++)
109 			vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
110 	} else {
111 		for (i = 0; i < nr; i++)
112 			vec[i] = 0;
113 	}
114 }
115 
116 static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
117 			unsigned long addr, unsigned long end,
118 			unsigned char *vec)
119 {
120 	unsigned long next;
121 	spinlock_t *ptl;
122 	pte_t *ptep;
123 
124 	ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
125 	do {
126 		pte_t pte = *ptep;
127 		pgoff_t pgoff;
128 
129 		next = addr + PAGE_SIZE;
130 		if (pte_none(pte))
131 			mincore_unmapped_range(vma, addr, next, vec);
132 		else if (pte_present(pte))
133 			*vec = 1;
134 		else if (pte_file(pte)) {
135 			pgoff = pte_to_pgoff(pte);
136 			*vec = mincore_page(vma->vm_file->f_mapping, pgoff);
137 		} else { /* pte is a swap entry */
138 			swp_entry_t entry = pte_to_swp_entry(pte);
139 
140 			if (non_swap_entry(entry)) {
141 				/*
142 				 * migration or hwpoison entries are always
143 				 * uptodate
144 				 */
145 				*vec = 1;
146 			} else {
147 #ifdef CONFIG_SWAP
148 				pgoff = entry.val;
149 				*vec = mincore_page(swap_address_space(entry),
150 					pgoff);
151 #else
152 				WARN_ON(1);
153 				*vec = 1;
154 #endif
155 			}
156 		}
157 		vec++;
158 	} while (ptep++, addr = next, addr != end);
159 	pte_unmap_unlock(ptep - 1, ptl);
160 }
161 
162 static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud,
163 			unsigned long addr, unsigned long end,
164 			unsigned char *vec)
165 {
166 	unsigned long next;
167 	pmd_t *pmd;
168 
169 	pmd = pmd_offset(pud, addr);
170 	do {
171 		next = pmd_addr_end(addr, end);
172 		if (pmd_trans_huge(*pmd)) {
173 			if (mincore_huge_pmd(vma, pmd, addr, next, vec)) {
174 				vec += (next - addr) >> PAGE_SHIFT;
175 				continue;
176 			}
177 			/* fall through */
178 		}
179 		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
180 			mincore_unmapped_range(vma, addr, next, vec);
181 		else
182 			mincore_pte_range(vma, pmd, addr, next, vec);
183 		vec += (next - addr) >> PAGE_SHIFT;
184 	} while (pmd++, addr = next, addr != end);
185 }
186 
187 static void mincore_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
188 			unsigned long addr, unsigned long end,
189 			unsigned char *vec)
190 {
191 	unsigned long next;
192 	pud_t *pud;
193 
194 	pud = pud_offset(pgd, addr);
195 	do {
196 		next = pud_addr_end(addr, end);
197 		if (pud_none_or_clear_bad(pud))
198 			mincore_unmapped_range(vma, addr, next, vec);
199 		else
200 			mincore_pmd_range(vma, pud, addr, next, vec);
201 		vec += (next - addr) >> PAGE_SHIFT;
202 	} while (pud++, addr = next, addr != end);
203 }
204 
205 static void mincore_page_range(struct vm_area_struct *vma,
206 			unsigned long addr, unsigned long end,
207 			unsigned char *vec)
208 {
209 	unsigned long next;
210 	pgd_t *pgd;
211 
212 	pgd = pgd_offset(vma->vm_mm, addr);
213 	do {
214 		next = pgd_addr_end(addr, end);
215 		if (pgd_none_or_clear_bad(pgd))
216 			mincore_unmapped_range(vma, addr, next, vec);
217 		else
218 			mincore_pud_range(vma, pgd, addr, next, vec);
219 		vec += (next - addr) >> PAGE_SHIFT;
220 	} while (pgd++, addr = next, addr != end);
221 }
222 
223 /*
224  * Do a chunk of "sys_mincore()". We've already checked
225  * all the arguments, we hold the mmap semaphore: we should
226  * just return the amount of info we're asked for.
227  */
228 static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
229 {
230 	struct vm_area_struct *vma;
231 	unsigned long end;
232 
233 	vma = find_vma(current->mm, addr);
234 	if (!vma || addr < vma->vm_start)
235 		return -ENOMEM;
236 
237 	end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
238 
239 	if (is_vm_hugetlb_page(vma))
240 		mincore_hugetlb_page_range(vma, addr, end, vec);
241 	else
242 		mincore_page_range(vma, addr, end, vec);
243 
244 	return (end - addr) >> PAGE_SHIFT;
245 }
246 
247 /*
248  * The mincore(2) system call.
249  *
250  * mincore() returns the memory residency status of the pages in the
251  * current process's address space specified by [addr, addr + len).
252  * The status is returned in a vector of bytes.  The least significant
253  * bit of each byte is 1 if the referenced page is in memory, otherwise
254  * it is zero.
255  *
256  * Because the status of a page can change after mincore() checks it
257  * but before it returns to the application, the returned vector may
258  * contain stale information.  Only locked pages are guaranteed to
259  * remain in memory.
260  *
261  * return values:
262  *  zero    - success
263  *  -EFAULT - vec points to an illegal address
264  *  -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE
265  *  -ENOMEM - Addresses in the range [addr, addr + len] are
266  *		invalid for the address space of this process, or
267  *		specify one or more pages which are not currently
268  *		mapped
269  *  -EAGAIN - A kernel resource was temporarily unavailable.
270  */
271 SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
272 		unsigned char __user *, vec)
273 {
274 	long retval;
275 	unsigned long pages;
276 	unsigned char *tmp;
277 
278 	/* Check the start address: needs to be page-aligned.. */
279  	if (start & ~PAGE_CACHE_MASK)
280 		return -EINVAL;
281 
282 	/* ..and we need to be passed a valid user-space range */
283 	if (!access_ok(VERIFY_READ, (void __user *) start, len))
284 		return -ENOMEM;
285 
286 	/* This also avoids any overflows on PAGE_CACHE_ALIGN */
287 	pages = len >> PAGE_SHIFT;
288 	pages += (len & ~PAGE_MASK) != 0;
289 
290 	if (!access_ok(VERIFY_WRITE, vec, pages))
291 		return -EFAULT;
292 
293 	tmp = (void *) __get_free_page(GFP_USER);
294 	if (!tmp)
295 		return -EAGAIN;
296 
297 	retval = 0;
298 	while (pages) {
299 		/*
300 		 * Do at most PAGE_SIZE entries per iteration, due to
301 		 * the temporary buffer size.
302 		 */
303 		down_read(&current->mm->mmap_sem);
304 		retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
305 		up_read(&current->mm->mmap_sem);
306 
307 		if (retval <= 0)
308 			break;
309 		if (copy_to_user(vec, tmp, retval)) {
310 			retval = -EFAULT;
311 			break;
312 		}
313 		pages -= retval;
314 		vec += retval;
315 		start += retval << PAGE_SHIFT;
316 		retval = 0;
317 	}
318 	free_page((unsigned long) tmp);
319 	return retval;
320 }
321