xref: /openbmc/linux/fs/proc/page.c (revision ee65728e)
1  // SPDX-License-Identifier: GPL-2.0
2  #include <linux/memblock.h>
3  #include <linux/compiler.h>
4  #include <linux/fs.h>
5  #include <linux/init.h>
6  #include <linux/ksm.h>
7  #include <linux/mm.h>
8  #include <linux/mmzone.h>
9  #include <linux/huge_mm.h>
10  #include <linux/proc_fs.h>
11  #include <linux/seq_file.h>
12  #include <linux/hugetlb.h>
13  #include <linux/memremap.h>
14  #include <linux/memcontrol.h>
15  #include <linux/mmu_notifier.h>
16  #include <linux/page_idle.h>
17  #include <linux/kernel-page-flags.h>
18  #include <linux/uaccess.h>
19  #include "internal.h"
20  
21  #define KPMSIZE sizeof(u64)
22  #define KPMMASK (KPMSIZE - 1)
23  #define KPMBITS (KPMSIZE * BITS_PER_BYTE)
24  
25  static inline unsigned long get_max_dump_pfn(void)
26  {
27  #ifdef CONFIG_SPARSEMEM
28  	/*
29  	 * The memmap of early sections is completely populated and marked
30  	 * online even if max_pfn does not fall on a section boundary -
31  	 * pfn_to_online_page() will succeed on all pages. Allow inspecting
32  	 * these memmaps.
33  	 */
34  	return round_up(max_pfn, PAGES_PER_SECTION);
35  #else
36  	return max_pfn;
37  #endif
38  }
39  
40  /* /proc/kpagecount - an array exposing page counts
41   *
42   * Each entry is a u64 representing the corresponding
43   * physical page count.
44   */
45  static ssize_t kpagecount_read(struct file *file, char __user *buf,
46  			     size_t count, loff_t *ppos)
47  {
48  	const unsigned long max_dump_pfn = get_max_dump_pfn();
49  	u64 __user *out = (u64 __user *)buf;
50  	struct page *ppage;
51  	unsigned long src = *ppos;
52  	unsigned long pfn;
53  	ssize_t ret = 0;
54  	u64 pcount;
55  
56  	pfn = src / KPMSIZE;
57  	if (src & KPMMASK || count & KPMMASK)
58  		return -EINVAL;
59  	if (src >= max_dump_pfn * KPMSIZE)
60  		return 0;
61  	count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
62  
63  	while (count > 0) {
64  		/*
65  		 * TODO: ZONE_DEVICE support requires to identify
66  		 * memmaps that were actually initialized.
67  		 */
68  		ppage = pfn_to_online_page(pfn);
69  
70  		if (!ppage || PageSlab(ppage) || page_has_type(ppage))
71  			pcount = 0;
72  		else
73  			pcount = page_mapcount(ppage);
74  
75  		if (put_user(pcount, out)) {
76  			ret = -EFAULT;
77  			break;
78  		}
79  
80  		pfn++;
81  		out++;
82  		count -= KPMSIZE;
83  
84  		cond_resched();
85  	}
86  
87  	*ppos += (char __user *)out - buf;
88  	if (!ret)
89  		ret = (char __user *)out - buf;
90  	return ret;
91  }
92  
93  static const struct proc_ops kpagecount_proc_ops = {
94  	.proc_lseek	= mem_lseek,
95  	.proc_read	= kpagecount_read,
96  };
97  
98  /* /proc/kpageflags - an array exposing page flags
99   *
100   * Each entry is a u64 representing the corresponding
101   * physical page flags.
102   */
103  
104  static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit)
105  {
106  	return ((kflags >> kbit) & 1) << ubit;
107  }
108  
109  u64 stable_page_flags(struct page *page)
110  {
111  	u64 k;
112  	u64 u;
113  
114  	/*
115  	 * pseudo flag: KPF_NOPAGE
116  	 * it differentiates a memory hole from a page with no flags
117  	 */
118  	if (!page)
119  		return 1 << KPF_NOPAGE;
120  
121  	k = page->flags;
122  	u = 0;
123  
124  	/*
125  	 * pseudo flags for the well known (anonymous) memory mapped pages
126  	 *
127  	 * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the
128  	 * simple test in page_mapped() is not enough.
129  	 */
130  	if (!PageSlab(page) && page_mapped(page))
131  		u |= 1 << KPF_MMAP;
132  	if (PageAnon(page))
133  		u |= 1 << KPF_ANON;
134  	if (PageKsm(page))
135  		u |= 1 << KPF_KSM;
136  
137  	/*
138  	 * compound pages: export both head/tail info
139  	 * they together define a compound page's start/end pos and order
140  	 */
141  	if (PageHead(page))
142  		u |= 1 << KPF_COMPOUND_HEAD;
143  	if (PageTail(page))
144  		u |= 1 << KPF_COMPOUND_TAIL;
145  	if (PageHuge(page))
146  		u |= 1 << KPF_HUGE;
147  	/*
148  	 * PageTransCompound can be true for non-huge compound pages (slab
149  	 * pages or pages allocated by drivers with __GFP_COMP) because it
150  	 * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon
151  	 * to make sure a given page is a thp, not a non-huge compound page.
152  	 */
153  	else if (PageTransCompound(page)) {
154  		struct page *head = compound_head(page);
155  
156  		if (PageLRU(head) || PageAnon(head))
157  			u |= 1 << KPF_THP;
158  		else if (is_huge_zero_page(head)) {
159  			u |= 1 << KPF_ZERO_PAGE;
160  			u |= 1 << KPF_THP;
161  		}
162  	} else if (is_zero_pfn(page_to_pfn(page)))
163  		u |= 1 << KPF_ZERO_PAGE;
164  
165  
166  	/*
167  	 * Caveats on high order pages: page->_refcount will only be set
168  	 * -1 on the head page; SLUB/SLQB do the same for PG_slab;
169  	 * SLOB won't set PG_slab at all on compound pages.
170  	 */
171  	if (PageBuddy(page))
172  		u |= 1 << KPF_BUDDY;
173  	else if (page_count(page) == 0 && is_free_buddy_page(page))
174  		u |= 1 << KPF_BUDDY;
175  
176  	if (PageOffline(page))
177  		u |= 1 << KPF_OFFLINE;
178  	if (PageTable(page))
179  		u |= 1 << KPF_PGTABLE;
180  
181  	if (page_is_idle(page))
182  		u |= 1 << KPF_IDLE;
183  
184  	u |= kpf_copy_bit(k, KPF_LOCKED,	PG_locked);
185  
186  	u |= kpf_copy_bit(k, KPF_SLAB,		PG_slab);
187  	if (PageTail(page) && PageSlab(compound_head(page)))
188  		u |= 1 << KPF_SLAB;
189  
190  	u |= kpf_copy_bit(k, KPF_ERROR,		PG_error);
191  	u |= kpf_copy_bit(k, KPF_DIRTY,		PG_dirty);
192  	u |= kpf_copy_bit(k, KPF_UPTODATE,	PG_uptodate);
193  	u |= kpf_copy_bit(k, KPF_WRITEBACK,	PG_writeback);
194  
195  	u |= kpf_copy_bit(k, KPF_LRU,		PG_lru);
196  	u |= kpf_copy_bit(k, KPF_REFERENCED,	PG_referenced);
197  	u |= kpf_copy_bit(k, KPF_ACTIVE,	PG_active);
198  	u |= kpf_copy_bit(k, KPF_RECLAIM,	PG_reclaim);
199  
200  	if (PageSwapCache(page))
201  		u |= 1 << KPF_SWAPCACHE;
202  	u |= kpf_copy_bit(k, KPF_SWAPBACKED,	PG_swapbacked);
203  
204  	u |= kpf_copy_bit(k, KPF_UNEVICTABLE,	PG_unevictable);
205  	u |= kpf_copy_bit(k, KPF_MLOCKED,	PG_mlocked);
206  
207  #ifdef CONFIG_MEMORY_FAILURE
208  	u |= kpf_copy_bit(k, KPF_HWPOISON,	PG_hwpoison);
209  #endif
210  
211  #ifdef CONFIG_ARCH_USES_PG_UNCACHED
212  	u |= kpf_copy_bit(k, KPF_UNCACHED,	PG_uncached);
213  #endif
214  
215  	u |= kpf_copy_bit(k, KPF_RESERVED,	PG_reserved);
216  	u |= kpf_copy_bit(k, KPF_MAPPEDTODISK,	PG_mappedtodisk);
217  	u |= kpf_copy_bit(k, KPF_PRIVATE,	PG_private);
218  	u |= kpf_copy_bit(k, KPF_PRIVATE_2,	PG_private_2);
219  	u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE,	PG_owner_priv_1);
220  	u |= kpf_copy_bit(k, KPF_ARCH,		PG_arch_1);
221  #ifdef CONFIG_64BIT
222  	u |= kpf_copy_bit(k, KPF_ARCH_2,	PG_arch_2);
223  #endif
224  
225  	return u;
226  };
227  
228  static ssize_t kpageflags_read(struct file *file, char __user *buf,
229  			     size_t count, loff_t *ppos)
230  {
231  	const unsigned long max_dump_pfn = get_max_dump_pfn();
232  	u64 __user *out = (u64 __user *)buf;
233  	struct page *ppage;
234  	unsigned long src = *ppos;
235  	unsigned long pfn;
236  	ssize_t ret = 0;
237  
238  	pfn = src / KPMSIZE;
239  	if (src & KPMMASK || count & KPMMASK)
240  		return -EINVAL;
241  	if (src >= max_dump_pfn * KPMSIZE)
242  		return 0;
243  	count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
244  
245  	while (count > 0) {
246  		/*
247  		 * TODO: ZONE_DEVICE support requires to identify
248  		 * memmaps that were actually initialized.
249  		 */
250  		ppage = pfn_to_online_page(pfn);
251  
252  		if (put_user(stable_page_flags(ppage), out)) {
253  			ret = -EFAULT;
254  			break;
255  		}
256  
257  		pfn++;
258  		out++;
259  		count -= KPMSIZE;
260  
261  		cond_resched();
262  	}
263  
264  	*ppos += (char __user *)out - buf;
265  	if (!ret)
266  		ret = (char __user *)out - buf;
267  	return ret;
268  }
269  
270  static const struct proc_ops kpageflags_proc_ops = {
271  	.proc_lseek	= mem_lseek,
272  	.proc_read	= kpageflags_read,
273  };
274  
275  #ifdef CONFIG_MEMCG
276  static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
277  				size_t count, loff_t *ppos)
278  {
279  	const unsigned long max_dump_pfn = get_max_dump_pfn();
280  	u64 __user *out = (u64 __user *)buf;
281  	struct page *ppage;
282  	unsigned long src = *ppos;
283  	unsigned long pfn;
284  	ssize_t ret = 0;
285  	u64 ino;
286  
287  	pfn = src / KPMSIZE;
288  	if (src & KPMMASK || count & KPMMASK)
289  		return -EINVAL;
290  	if (src >= max_dump_pfn * KPMSIZE)
291  		return 0;
292  	count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
293  
294  	while (count > 0) {
295  		/*
296  		 * TODO: ZONE_DEVICE support requires to identify
297  		 * memmaps that were actually initialized.
298  		 */
299  		ppage = pfn_to_online_page(pfn);
300  
301  		if (ppage)
302  			ino = page_cgroup_ino(ppage);
303  		else
304  			ino = 0;
305  
306  		if (put_user(ino, out)) {
307  			ret = -EFAULT;
308  			break;
309  		}
310  
311  		pfn++;
312  		out++;
313  		count -= KPMSIZE;
314  
315  		cond_resched();
316  	}
317  
318  	*ppos += (char __user *)out - buf;
319  	if (!ret)
320  		ret = (char __user *)out - buf;
321  	return ret;
322  }
323  
324  static const struct proc_ops kpagecgroup_proc_ops = {
325  	.proc_lseek	= mem_lseek,
326  	.proc_read	= kpagecgroup_read,
327  };
328  #endif /* CONFIG_MEMCG */
329  
330  static int __init proc_page_init(void)
331  {
332  	proc_create("kpagecount", S_IRUSR, NULL, &kpagecount_proc_ops);
333  	proc_create("kpageflags", S_IRUSR, NULL, &kpageflags_proc_ops);
334  #ifdef CONFIG_MEMCG
335  	proc_create("kpagecgroup", S_IRUSR, NULL, &kpagecgroup_proc_ops);
336  #endif
337  	return 0;
338  }
339  fs_initcall(proc_page_init);
340