1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/memblock.h> 3 #include <linux/compiler.h> 4 #include <linux/fs.h> 5 #include <linux/init.h> 6 #include <linux/ksm.h> 7 #include <linux/mm.h> 8 #include <linux/mmzone.h> 9 #include <linux/huge_mm.h> 10 #include <linux/proc_fs.h> 11 #include <linux/seq_file.h> 12 #include <linux/hugetlb.h> 13 #include <linux/memcontrol.h> 14 #include <linux/mmu_notifier.h> 15 #include <linux/page_idle.h> 16 #include <linux/kernel-page-flags.h> 17 #include <linux/uaccess.h> 18 #include "internal.h" 19 20 #define KPMSIZE sizeof(u64) 21 #define KPMMASK (KPMSIZE - 1) 22 #define KPMBITS (KPMSIZE * BITS_PER_BYTE) 23 24 static inline unsigned long get_max_dump_pfn(void) 25 { 26 #ifdef CONFIG_SPARSEMEM 27 /* 28 * The memmap of early sections is completely populated and marked 29 * online even if max_pfn does not fall on a section boundary - 30 * pfn_to_online_page() will succeed on all pages. Allow inspecting 31 * these memmaps. 32 */ 33 return round_up(max_pfn, PAGES_PER_SECTION); 34 #else 35 return max_pfn; 36 #endif 37 } 38 39 /* /proc/kpagecount - an array exposing page counts 40 * 41 * Each entry is a u64 representing the corresponding 42 * physical page count. 43 */ 44 static ssize_t kpagecount_read(struct file *file, char __user *buf, 45 size_t count, loff_t *ppos) 46 { 47 const unsigned long max_dump_pfn = get_max_dump_pfn(); 48 u64 __user *out = (u64 __user *)buf; 49 struct page *ppage; 50 unsigned long src = *ppos; 51 unsigned long pfn; 52 ssize_t ret = 0; 53 u64 pcount; 54 55 pfn = src / KPMSIZE; 56 if (src & KPMMASK || count & KPMMASK) 57 return -EINVAL; 58 if (src >= max_dump_pfn * KPMSIZE) 59 return 0; 60 count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src); 61 62 while (count > 0) { 63 /* 64 * TODO: ZONE_DEVICE support requires to identify 65 * memmaps that were actually initialized. 66 */ 67 ppage = pfn_to_online_page(pfn); 68 69 if (!ppage || PageSlab(ppage) || page_has_type(ppage)) 70 pcount = 0; 71 else 72 pcount = page_mapcount(ppage); 73 74 if (put_user(pcount, out)) { 75 ret = -EFAULT; 76 break; 77 } 78 79 pfn++; 80 out++; 81 count -= KPMSIZE; 82 83 cond_resched(); 84 } 85 86 *ppos += (char __user *)out - buf; 87 if (!ret) 88 ret = (char __user *)out - buf; 89 return ret; 90 } 91 92 static const struct proc_ops kpagecount_proc_ops = { 93 .proc_lseek = mem_lseek, 94 .proc_read = kpagecount_read, 95 }; 96 97 /* /proc/kpageflags - an array exposing page flags 98 * 99 * Each entry is a u64 representing the corresponding 100 * physical page flags. 101 */ 102 103 static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit) 104 { 105 return ((kflags >> kbit) & 1) << ubit; 106 } 107 108 u64 stable_page_flags(struct page *page) 109 { 110 u64 k; 111 u64 u; 112 113 /* 114 * pseudo flag: KPF_NOPAGE 115 * it differentiates a memory hole from a page with no flags 116 */ 117 if (!page) 118 return 1 << KPF_NOPAGE; 119 120 k = page->flags; 121 u = 0; 122 123 /* 124 * pseudo flags for the well known (anonymous) memory mapped pages 125 * 126 * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the 127 * simple test in page_mapped() is not enough. 128 */ 129 if (!PageSlab(page) && page_mapped(page)) 130 u |= 1 << KPF_MMAP; 131 if (PageAnon(page)) 132 u |= 1 << KPF_ANON; 133 if (PageKsm(page)) 134 u |= 1 << KPF_KSM; 135 136 /* 137 * compound pages: export both head/tail info 138 * they together define a compound page's start/end pos and order 139 */ 140 if (PageHead(page)) 141 u |= 1 << KPF_COMPOUND_HEAD; 142 if (PageTail(page)) 143 u |= 1 << KPF_COMPOUND_TAIL; 144 if (PageHuge(page)) 145 u |= 1 << KPF_HUGE; 146 /* 147 * PageTransCompound can be true for non-huge compound pages (slab 148 * pages or pages allocated by drivers with __GFP_COMP) because it 149 * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon 150 * to make sure a given page is a thp, not a non-huge compound page. 151 */ 152 else if (PageTransCompound(page)) { 153 struct page *head = compound_head(page); 154 155 if (PageLRU(head) || PageAnon(head)) 156 u |= 1 << KPF_THP; 157 else if (is_huge_zero_page(head)) { 158 u |= 1 << KPF_ZERO_PAGE; 159 u |= 1 << KPF_THP; 160 } 161 } else if (is_zero_pfn(page_to_pfn(page))) 162 u |= 1 << KPF_ZERO_PAGE; 163 164 165 /* 166 * Caveats on high order pages: page->_refcount will only be set 167 * -1 on the head page; SLUB/SLQB do the same for PG_slab; 168 * SLOB won't set PG_slab at all on compound pages. 169 */ 170 if (PageBuddy(page)) 171 u |= 1 << KPF_BUDDY; 172 else if (page_count(page) == 0 && is_free_buddy_page(page)) 173 u |= 1 << KPF_BUDDY; 174 175 if (PageOffline(page)) 176 u |= 1 << KPF_OFFLINE; 177 if (PageTable(page)) 178 u |= 1 << KPF_PGTABLE; 179 180 if (page_is_idle(page)) 181 u |= 1 << KPF_IDLE; 182 183 u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked); 184 185 u |= kpf_copy_bit(k, KPF_SLAB, PG_slab); 186 if (PageTail(page) && PageSlab(compound_head(page))) 187 u |= 1 << KPF_SLAB; 188 189 u |= kpf_copy_bit(k, KPF_ERROR, PG_error); 190 u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty); 191 u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate); 192 u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback); 193 194 u |= kpf_copy_bit(k, KPF_LRU, PG_lru); 195 u |= kpf_copy_bit(k, KPF_REFERENCED, PG_referenced); 196 u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active); 197 u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim); 198 199 if (PageSwapCache(page)) 200 u |= 1 << KPF_SWAPCACHE; 201 u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked); 202 203 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); 204 u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked); 205 206 #ifdef CONFIG_MEMORY_FAILURE 207 u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison); 208 #endif 209 210 #ifdef CONFIG_ARCH_USES_PG_UNCACHED 211 u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); 212 #endif 213 214 u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved); 215 u |= kpf_copy_bit(k, KPF_MAPPEDTODISK, PG_mappedtodisk); 216 u |= kpf_copy_bit(k, KPF_PRIVATE, PG_private); 217 u |= kpf_copy_bit(k, KPF_PRIVATE_2, PG_private_2); 218 u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1); 219 u |= kpf_copy_bit(k, KPF_ARCH, PG_arch_1); 220 221 return u; 222 }; 223 224 static ssize_t kpageflags_read(struct file *file, char __user *buf, 225 size_t count, loff_t *ppos) 226 { 227 const unsigned long max_dump_pfn = get_max_dump_pfn(); 228 u64 __user *out = (u64 __user *)buf; 229 struct page *ppage; 230 unsigned long src = *ppos; 231 unsigned long pfn; 232 ssize_t ret = 0; 233 234 pfn = src / KPMSIZE; 235 if (src & KPMMASK || count & KPMMASK) 236 return -EINVAL; 237 if (src >= max_dump_pfn * KPMSIZE) 238 return 0; 239 count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src); 240 241 while (count > 0) { 242 /* 243 * TODO: ZONE_DEVICE support requires to identify 244 * memmaps that were actually initialized. 245 */ 246 ppage = pfn_to_online_page(pfn); 247 248 if (put_user(stable_page_flags(ppage), out)) { 249 ret = -EFAULT; 250 break; 251 } 252 253 pfn++; 254 out++; 255 count -= KPMSIZE; 256 257 cond_resched(); 258 } 259 260 *ppos += (char __user *)out - buf; 261 if (!ret) 262 ret = (char __user *)out - buf; 263 return ret; 264 } 265 266 static const struct proc_ops kpageflags_proc_ops = { 267 .proc_lseek = mem_lseek, 268 .proc_read = kpageflags_read, 269 }; 270 271 #ifdef CONFIG_MEMCG 272 static ssize_t kpagecgroup_read(struct file *file, char __user *buf, 273 size_t count, loff_t *ppos) 274 { 275 const unsigned long max_dump_pfn = get_max_dump_pfn(); 276 u64 __user *out = (u64 __user *)buf; 277 struct page *ppage; 278 unsigned long src = *ppos; 279 unsigned long pfn; 280 ssize_t ret = 0; 281 u64 ino; 282 283 pfn = src / KPMSIZE; 284 if (src & KPMMASK || count & KPMMASK) 285 return -EINVAL; 286 if (src >= max_dump_pfn * KPMSIZE) 287 return 0; 288 count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src); 289 290 while (count > 0) { 291 /* 292 * TODO: ZONE_DEVICE support requires to identify 293 * memmaps that were actually initialized. 294 */ 295 ppage = pfn_to_online_page(pfn); 296 297 if (ppage) 298 ino = page_cgroup_ino(ppage); 299 else 300 ino = 0; 301 302 if (put_user(ino, out)) { 303 ret = -EFAULT; 304 break; 305 } 306 307 pfn++; 308 out++; 309 count -= KPMSIZE; 310 311 cond_resched(); 312 } 313 314 *ppos += (char __user *)out - buf; 315 if (!ret) 316 ret = (char __user *)out - buf; 317 return ret; 318 } 319 320 static const struct proc_ops kpagecgroup_proc_ops = { 321 .proc_lseek = mem_lseek, 322 .proc_read = kpagecgroup_read, 323 }; 324 #endif /* CONFIG_MEMCG */ 325 326 static int __init proc_page_init(void) 327 { 328 proc_create("kpagecount", S_IRUSR, NULL, &kpagecount_proc_ops); 329 proc_create("kpageflags", S_IRUSR, NULL, &kpageflags_proc_ops); 330 #ifdef CONFIG_MEMCG 331 proc_create("kpagecgroup", S_IRUSR, NULL, &kpagecgroup_proc_ops); 332 #endif 333 return 0; 334 } 335 fs_initcall(proc_page_init); 336