xref: /openbmc/linux/fs/proc/page.c (revision 1edd0337)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/memblock.h>
3 #include <linux/compiler.h>
4 #include <linux/fs.h>
5 #include <linux/init.h>
6 #include <linux/ksm.h>
7 #include <linux/mm.h>
8 #include <linux/mmzone.h>
9 #include <linux/huge_mm.h>
10 #include <linux/proc_fs.h>
11 #include <linux/seq_file.h>
12 #include <linux/hugetlb.h>
13 #include <linux/memremap.h>
14 #include <linux/memcontrol.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/page_idle.h>
17 #include <linux/kernel-page-flags.h>
18 #include <linux/uaccess.h>
19 #include "internal.h"
20 
21 #define KPMSIZE sizeof(u64)
22 #define KPMMASK (KPMSIZE - 1)
23 #define KPMBITS (KPMSIZE * BITS_PER_BYTE)
24 
25 static inline unsigned long get_max_dump_pfn(void)
26 {
27 #ifdef CONFIG_SPARSEMEM
28 	/*
29 	 * The memmap of early sections is completely populated and marked
30 	 * online even if max_pfn does not fall on a section boundary -
31 	 * pfn_to_online_page() will succeed on all pages. Allow inspecting
32 	 * these memmaps.
33 	 */
34 	return round_up(max_pfn, PAGES_PER_SECTION);
35 #else
36 	return max_pfn;
37 #endif
38 }
39 
40 /* /proc/kpagecount - an array exposing page counts
41  *
42  * Each entry is a u64 representing the corresponding
43  * physical page count.
44  */
45 static ssize_t kpagecount_read(struct file *file, char __user *buf,
46 			     size_t count, loff_t *ppos)
47 {
48 	const unsigned long max_dump_pfn = get_max_dump_pfn();
49 	u64 __user *out = (u64 __user *)buf;
50 	struct page *ppage;
51 	unsigned long src = *ppos;
52 	unsigned long pfn;
53 	ssize_t ret = 0;
54 	u64 pcount;
55 
56 	pfn = src / KPMSIZE;
57 	if (src & KPMMASK || count & KPMMASK)
58 		return -EINVAL;
59 	if (src >= max_dump_pfn * KPMSIZE)
60 		return 0;
61 	count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
62 
63 	while (count > 0) {
64 		/*
65 		 * TODO: ZONE_DEVICE support requires to identify
66 		 * memmaps that were actually initialized.
67 		 */
68 		ppage = pfn_to_online_page(pfn);
69 
70 		if (!ppage || PageSlab(ppage) || page_has_type(ppage))
71 			pcount = 0;
72 		else
73 			pcount = page_mapcount(ppage);
74 
75 		if (put_user(pcount, out)) {
76 			ret = -EFAULT;
77 			break;
78 		}
79 
80 		pfn++;
81 		out++;
82 		count -= KPMSIZE;
83 
84 		cond_resched();
85 	}
86 
87 	*ppos += (char __user *)out - buf;
88 	if (!ret)
89 		ret = (char __user *)out - buf;
90 	return ret;
91 }
92 
93 static const struct proc_ops kpagecount_proc_ops = {
94 	.proc_flags	= PROC_ENTRY_PERMANENT,
95 	.proc_lseek	= mem_lseek,
96 	.proc_read	= kpagecount_read,
97 };
98 
99 /* /proc/kpageflags - an array exposing page flags
100  *
101  * Each entry is a u64 representing the corresponding
102  * physical page flags.
103  */
104 
105 static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit)
106 {
107 	return ((kflags >> kbit) & 1) << ubit;
108 }
109 
110 u64 stable_page_flags(struct page *page)
111 {
112 	u64 k;
113 	u64 u;
114 
115 	/*
116 	 * pseudo flag: KPF_NOPAGE
117 	 * it differentiates a memory hole from a page with no flags
118 	 */
119 	if (!page)
120 		return 1 << KPF_NOPAGE;
121 
122 	k = page->flags;
123 	u = 0;
124 
125 	/*
126 	 * pseudo flags for the well known (anonymous) memory mapped pages
127 	 *
128 	 * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the
129 	 * simple test in page_mapped() is not enough.
130 	 */
131 	if (!PageSlab(page) && page_mapped(page))
132 		u |= 1 << KPF_MMAP;
133 	if (PageAnon(page))
134 		u |= 1 << KPF_ANON;
135 	if (PageKsm(page))
136 		u |= 1 << KPF_KSM;
137 
138 	/*
139 	 * compound pages: export both head/tail info
140 	 * they together define a compound page's start/end pos and order
141 	 */
142 	if (PageHead(page))
143 		u |= 1 << KPF_COMPOUND_HEAD;
144 	if (PageTail(page))
145 		u |= 1 << KPF_COMPOUND_TAIL;
146 	if (PageHuge(page))
147 		u |= 1 << KPF_HUGE;
148 	/*
149 	 * PageTransCompound can be true for non-huge compound pages (slab
150 	 * pages or pages allocated by drivers with __GFP_COMP) because it
151 	 * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon
152 	 * to make sure a given page is a thp, not a non-huge compound page.
153 	 */
154 	else if (PageTransCompound(page)) {
155 		struct page *head = compound_head(page);
156 
157 		if (PageLRU(head) || PageAnon(head))
158 			u |= 1 << KPF_THP;
159 		else if (is_huge_zero_page(head)) {
160 			u |= 1 << KPF_ZERO_PAGE;
161 			u |= 1 << KPF_THP;
162 		}
163 	} else if (is_zero_pfn(page_to_pfn(page)))
164 		u |= 1 << KPF_ZERO_PAGE;
165 
166 
167 	/*
168 	 * Caveats on high order pages: page->_refcount will only be set
169 	 * -1 on the head page; SLUB/SLQB do the same for PG_slab;
170 	 * SLOB won't set PG_slab at all on compound pages.
171 	 */
172 	if (PageBuddy(page))
173 		u |= 1 << KPF_BUDDY;
174 	else if (page_count(page) == 0 && is_free_buddy_page(page))
175 		u |= 1 << KPF_BUDDY;
176 
177 	if (PageOffline(page))
178 		u |= 1 << KPF_OFFLINE;
179 	if (PageTable(page))
180 		u |= 1 << KPF_PGTABLE;
181 
182 	if (page_is_idle(page))
183 		u |= 1 << KPF_IDLE;
184 
185 	u |= kpf_copy_bit(k, KPF_LOCKED,	PG_locked);
186 
187 	u |= kpf_copy_bit(k, KPF_SLAB,		PG_slab);
188 	if (PageTail(page) && PageSlab(compound_head(page)))
189 		u |= 1 << KPF_SLAB;
190 
191 	u |= kpf_copy_bit(k, KPF_ERROR,		PG_error);
192 	u |= kpf_copy_bit(k, KPF_DIRTY,		PG_dirty);
193 	u |= kpf_copy_bit(k, KPF_UPTODATE,	PG_uptodate);
194 	u |= kpf_copy_bit(k, KPF_WRITEBACK,	PG_writeback);
195 
196 	u |= kpf_copy_bit(k, KPF_LRU,		PG_lru);
197 	u |= kpf_copy_bit(k, KPF_REFERENCED,	PG_referenced);
198 	u |= kpf_copy_bit(k, KPF_ACTIVE,	PG_active);
199 	u |= kpf_copy_bit(k, KPF_RECLAIM,	PG_reclaim);
200 
201 	if (PageSwapCache(page))
202 		u |= 1 << KPF_SWAPCACHE;
203 	u |= kpf_copy_bit(k, KPF_SWAPBACKED,	PG_swapbacked);
204 
205 	u |= kpf_copy_bit(k, KPF_UNEVICTABLE,	PG_unevictable);
206 	u |= kpf_copy_bit(k, KPF_MLOCKED,	PG_mlocked);
207 
208 #ifdef CONFIG_MEMORY_FAILURE
209 	u |= kpf_copy_bit(k, KPF_HWPOISON,	PG_hwpoison);
210 #endif
211 
212 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
213 	u |= kpf_copy_bit(k, KPF_UNCACHED,	PG_uncached);
214 #endif
215 
216 	u |= kpf_copy_bit(k, KPF_RESERVED,	PG_reserved);
217 	u |= kpf_copy_bit(k, KPF_MAPPEDTODISK,	PG_mappedtodisk);
218 	u |= kpf_copy_bit(k, KPF_PRIVATE,	PG_private);
219 	u |= kpf_copy_bit(k, KPF_PRIVATE_2,	PG_private_2);
220 	u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE,	PG_owner_priv_1);
221 	u |= kpf_copy_bit(k, KPF_ARCH,		PG_arch_1);
222 #ifdef CONFIG_ARCH_USES_PG_ARCH_X
223 	u |= kpf_copy_bit(k, KPF_ARCH_2,	PG_arch_2);
224 	u |= kpf_copy_bit(k, KPF_ARCH_3,	PG_arch_3);
225 #endif
226 
227 	return u;
228 };
229 
230 static ssize_t kpageflags_read(struct file *file, char __user *buf,
231 			     size_t count, loff_t *ppos)
232 {
233 	const unsigned long max_dump_pfn = get_max_dump_pfn();
234 	u64 __user *out = (u64 __user *)buf;
235 	struct page *ppage;
236 	unsigned long src = *ppos;
237 	unsigned long pfn;
238 	ssize_t ret = 0;
239 
240 	pfn = src / KPMSIZE;
241 	if (src & KPMMASK || count & KPMMASK)
242 		return -EINVAL;
243 	if (src >= max_dump_pfn * KPMSIZE)
244 		return 0;
245 	count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
246 
247 	while (count > 0) {
248 		/*
249 		 * TODO: ZONE_DEVICE support requires to identify
250 		 * memmaps that were actually initialized.
251 		 */
252 		ppage = pfn_to_online_page(pfn);
253 
254 		if (put_user(stable_page_flags(ppage), out)) {
255 			ret = -EFAULT;
256 			break;
257 		}
258 
259 		pfn++;
260 		out++;
261 		count -= KPMSIZE;
262 
263 		cond_resched();
264 	}
265 
266 	*ppos += (char __user *)out - buf;
267 	if (!ret)
268 		ret = (char __user *)out - buf;
269 	return ret;
270 }
271 
272 static const struct proc_ops kpageflags_proc_ops = {
273 	.proc_flags	= PROC_ENTRY_PERMANENT,
274 	.proc_lseek	= mem_lseek,
275 	.proc_read	= kpageflags_read,
276 };
277 
278 #ifdef CONFIG_MEMCG
279 static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
280 				size_t count, loff_t *ppos)
281 {
282 	const unsigned long max_dump_pfn = get_max_dump_pfn();
283 	u64 __user *out = (u64 __user *)buf;
284 	struct page *ppage;
285 	unsigned long src = *ppos;
286 	unsigned long pfn;
287 	ssize_t ret = 0;
288 	u64 ino;
289 
290 	pfn = src / KPMSIZE;
291 	if (src & KPMMASK || count & KPMMASK)
292 		return -EINVAL;
293 	if (src >= max_dump_pfn * KPMSIZE)
294 		return 0;
295 	count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
296 
297 	while (count > 0) {
298 		/*
299 		 * TODO: ZONE_DEVICE support requires to identify
300 		 * memmaps that were actually initialized.
301 		 */
302 		ppage = pfn_to_online_page(pfn);
303 
304 		if (ppage)
305 			ino = page_cgroup_ino(ppage);
306 		else
307 			ino = 0;
308 
309 		if (put_user(ino, out)) {
310 			ret = -EFAULT;
311 			break;
312 		}
313 
314 		pfn++;
315 		out++;
316 		count -= KPMSIZE;
317 
318 		cond_resched();
319 	}
320 
321 	*ppos += (char __user *)out - buf;
322 	if (!ret)
323 		ret = (char __user *)out - buf;
324 	return ret;
325 }
326 
327 static const struct proc_ops kpagecgroup_proc_ops = {
328 	.proc_flags	= PROC_ENTRY_PERMANENT,
329 	.proc_lseek	= mem_lseek,
330 	.proc_read	= kpagecgroup_read,
331 };
332 #endif /* CONFIG_MEMCG */
333 
334 static int __init proc_page_init(void)
335 {
336 	proc_create("kpagecount", S_IRUSR, NULL, &kpagecount_proc_ops);
337 	proc_create("kpageflags", S_IRUSR, NULL, &kpageflags_proc_ops);
338 #ifdef CONFIG_MEMCG
339 	proc_create("kpagecgroup", S_IRUSR, NULL, &kpagecgroup_proc_ops);
340 #endif
341 	return 0;
342 }
343 fs_initcall(proc_page_init);
344