xref: /openbmc/linux/mm/mincore.c (revision 9d56dd3b083a3bec56e9da35ce07baca81030b03)
1 /*
2  *	linux/mm/mincore.c
3  *
4  * Copyright (C) 1994-2006  Linus Torvalds
5  */
6 
7 /*
8  * The mincore() system call.
9  */
10 #include <linux/slab.h>
11 #include <linux/pagemap.h>
12 #include <linux/mm.h>
13 #include <linux/mman.h>
14 #include <linux/syscalls.h>
15 #include <linux/swap.h>
16 #include <linux/swapops.h>
17 #include <linux/hugetlb.h>
18 
19 #include <asm/uaccess.h>
20 #include <asm/pgtable.h>
21 
22 /*
23  * Later we can get more picky about what "in core" means precisely.
24  * For now, simply check to see if the page is in the page cache,
25  * and is up to date; i.e. that no page-in operation would be required
26  * at this time if an application were to map and access this page.
27  */
28 static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
29 {
30 	unsigned char present = 0;
31 	struct page *page;
32 
33 	/*
34 	 * When tmpfs swaps out a page from a file, any process mapping that
35 	 * file will not get a swp_entry_t in its pte, but rather it is like
36 	 * any other file mapping (ie. marked !present and faulted in with
37 	 * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
38 	 *
39 	 * However when tmpfs moves the page from pagecache and into swapcache,
40 	 * it is still in core, but the find_get_page below won't find it.
41 	 * No big deal, but make a note of it.
42 	 */
43 	page = find_get_page(mapping, pgoff);
44 	if (page) {
45 		present = PageUptodate(page);
46 		page_cache_release(page);
47 	}
48 
49 	return present;
50 }
51 
52 /*
53  * Do a chunk of "sys_mincore()". We've already checked
54  * all the arguments, we hold the mmap semaphore: we should
55  * just return the amount of info we're asked for.
56  */
57 static long do_mincore(unsigned long addr, unsigned char *vec, unsigned long pages)
58 {
59 	pgd_t *pgd;
60 	pud_t *pud;
61 	pmd_t *pmd;
62 	pte_t *ptep;
63 	spinlock_t *ptl;
64 	unsigned long nr;
65 	int i;
66 	pgoff_t pgoff;
67 	struct vm_area_struct *vma = find_vma(current->mm, addr);
68 
69 	/*
70 	 * find_vma() didn't find anything above us, or we're
71 	 * in an unmapped hole in the address space: ENOMEM.
72 	 */
73 	if (!vma || addr < vma->vm_start)
74 		return -ENOMEM;
75 
76 #ifdef CONFIG_HUGETLB_PAGE
77 	if (is_vm_hugetlb_page(vma)) {
78 		struct hstate *h;
79 		unsigned long nr_huge;
80 		unsigned char present;
81 
82 		i = 0;
83 		nr = min(pages, (vma->vm_end - addr) >> PAGE_SHIFT);
84 		h = hstate_vma(vma);
85 		nr_huge = ((addr + pages * PAGE_SIZE - 1) >> huge_page_shift(h))
86 			  - (addr >> huge_page_shift(h)) + 1;
87 		nr_huge = min(nr_huge,
88 			      (vma->vm_end - addr) >> huge_page_shift(h));
89 		while (1) {
90 			/* hugepage always in RAM for now,
91 			 * but generally it needs to be check */
92 			ptep = huge_pte_offset(current->mm,
93 					       addr & huge_page_mask(h));
94 			present = !!(ptep &&
95 				     !huge_pte_none(huge_ptep_get(ptep)));
96 			while (1) {
97 				vec[i++] = present;
98 				addr += PAGE_SIZE;
99 				/* reach buffer limit */
100 				if (i == nr)
101 					return nr;
102 				/* check hugepage border */
103 				if (!((addr & ~huge_page_mask(h))
104 				      >> PAGE_SHIFT))
105 					break;
106 			}
107 		}
108 		return nr;
109 	}
110 #endif
111 
112 	/*
113 	 * Calculate how many pages there are left in the last level of the
114 	 * PTE array for our address.
115 	 */
116 	nr = PTRS_PER_PTE - ((addr >> PAGE_SHIFT) & (PTRS_PER_PTE-1));
117 
118 	/*
119 	 * Don't overrun this vma
120 	 */
121 	nr = min(nr, (vma->vm_end - addr) >> PAGE_SHIFT);
122 
123 	/*
124 	 * Don't return more than the caller asked for
125 	 */
126 	nr = min(nr, pages);
127 
128 	pgd = pgd_offset(vma->vm_mm, addr);
129 	if (pgd_none_or_clear_bad(pgd))
130 		goto none_mapped;
131 	pud = pud_offset(pgd, addr);
132 	if (pud_none_or_clear_bad(pud))
133 		goto none_mapped;
134 	pmd = pmd_offset(pud, addr);
135 	if (pmd_none_or_clear_bad(pmd))
136 		goto none_mapped;
137 
138 	ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
139 	for (i = 0; i < nr; i++, ptep++, addr += PAGE_SIZE) {
140 		unsigned char present;
141 		pte_t pte = *ptep;
142 
143 		if (pte_present(pte)) {
144 			present = 1;
145 
146 		} else if (pte_none(pte)) {
147 			if (vma->vm_file) {
148 				pgoff = linear_page_index(vma, addr);
149 				present = mincore_page(vma->vm_file->f_mapping,
150 							pgoff);
151 			} else
152 				present = 0;
153 
154 		} else if (pte_file(pte)) {
155 			pgoff = pte_to_pgoff(pte);
156 			present = mincore_page(vma->vm_file->f_mapping, pgoff);
157 
158 		} else { /* pte is a swap entry */
159 			swp_entry_t entry = pte_to_swp_entry(pte);
160 			if (is_migration_entry(entry)) {
161 				/* migration entries are always uptodate */
162 				present = 1;
163 			} else {
164 #ifdef CONFIG_SWAP
165 				pgoff = entry.val;
166 				present = mincore_page(&swapper_space, pgoff);
167 #else
168 				WARN_ON(1);
169 				present = 1;
170 #endif
171 			}
172 		}
173 
174 		vec[i] = present;
175 	}
176 	pte_unmap_unlock(ptep-1, ptl);
177 
178 	return nr;
179 
180 none_mapped:
181 	if (vma->vm_file) {
182 		pgoff = linear_page_index(vma, addr);
183 		for (i = 0; i < nr; i++, pgoff++)
184 			vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
185 	} else {
186 		for (i = 0; i < nr; i++)
187 			vec[i] = 0;
188 	}
189 
190 	return nr;
191 }
192 
193 /*
194  * The mincore(2) system call.
195  *
196  * mincore() returns the memory residency status of the pages in the
197  * current process's address space specified by [addr, addr + len).
198  * The status is returned in a vector of bytes.  The least significant
199  * bit of each byte is 1 if the referenced page is in memory, otherwise
200  * it is zero.
201  *
202  * Because the status of a page can change after mincore() checks it
203  * but before it returns to the application, the returned vector may
204  * contain stale information.  Only locked pages are guaranteed to
205  * remain in memory.
206  *
207  * return values:
208  *  zero    - success
209  *  -EFAULT - vec points to an illegal address
210  *  -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE
211  *  -ENOMEM - Addresses in the range [addr, addr + len] are
212  *		invalid for the address space of this process, or
213  *		specify one or more pages which are not currently
214  *		mapped
215  *  -EAGAIN - A kernel resource was temporarily unavailable.
216  */
217 SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
218 		unsigned char __user *, vec)
219 {
220 	long retval;
221 	unsigned long pages;
222 	unsigned char *tmp;
223 
224 	/* Check the start address: needs to be page-aligned.. */
225  	if (start & ~PAGE_CACHE_MASK)
226 		return -EINVAL;
227 
228 	/* ..and we need to be passed a valid user-space range */
229 	if (!access_ok(VERIFY_READ, (void __user *) start, len))
230 		return -ENOMEM;
231 
232 	/* This also avoids any overflows on PAGE_CACHE_ALIGN */
233 	pages = len >> PAGE_SHIFT;
234 	pages += (len & ~PAGE_MASK) != 0;
235 
236 	if (!access_ok(VERIFY_WRITE, vec, pages))
237 		return -EFAULT;
238 
239 	tmp = (void *) __get_free_page(GFP_USER);
240 	if (!tmp)
241 		return -EAGAIN;
242 
243 	retval = 0;
244 	while (pages) {
245 		/*
246 		 * Do at most PAGE_SIZE entries per iteration, due to
247 		 * the temporary buffer size.
248 		 */
249 		down_read(&current->mm->mmap_sem);
250 		retval = do_mincore(start, tmp, min(pages, PAGE_SIZE));
251 		up_read(&current->mm->mmap_sem);
252 
253 		if (retval <= 0)
254 			break;
255 		if (copy_to_user(vec, tmp, retval)) {
256 			retval = -EFAULT;
257 			break;
258 		}
259 		pages -= retval;
260 		vec += retval;
261 		start += retval << PAGE_SHIFT;
262 		retval = 0;
263 	}
264 	free_page((unsigned long) tmp);
265 	return retval;
266 }
267