1 #include <linux/bootmem.h> 2 #include <linux/compiler.h> 3 #include <linux/fs.h> 4 #include <linux/init.h> 5 #include <linux/ksm.h> 6 #include <linux/mm.h> 7 #include <linux/mmzone.h> 8 #include <linux/huge_mm.h> 9 #include <linux/proc_fs.h> 10 #include <linux/seq_file.h> 11 #include <linux/hugetlb.h> 12 #include <linux/memcontrol.h> 13 #include <linux/mmu_notifier.h> 14 #include <linux/page_idle.h> 15 #include <linux/kernel-page-flags.h> 16 #include <asm/uaccess.h> 17 #include "internal.h" 18 19 #define KPMSIZE sizeof(u64) 20 #define KPMMASK (KPMSIZE - 1) 21 #define KPMBITS (KPMSIZE * BITS_PER_BYTE) 22 23 /* /proc/kpagecount - an array exposing page counts 24 * 25 * Each entry is a u64 representing the corresponding 26 * physical page count. 27 */ 28 static ssize_t kpagecount_read(struct file *file, char __user *buf, 29 size_t count, loff_t *ppos) 30 { 31 u64 __user *out = (u64 __user *)buf; 32 struct page *ppage; 33 unsigned long src = *ppos; 34 unsigned long pfn; 35 ssize_t ret = 0; 36 u64 pcount; 37 38 pfn = src / KPMSIZE; 39 count = min_t(size_t, count, (max_pfn * KPMSIZE) - src); 40 if (src & KPMMASK || count & KPMMASK) 41 return -EINVAL; 42 43 while (count > 0) { 44 if (pfn_valid(pfn)) 45 ppage = pfn_to_page(pfn); 46 else 47 ppage = NULL; 48 if (!ppage || PageSlab(ppage)) 49 pcount = 0; 50 else 51 pcount = page_mapcount(ppage); 52 53 if (put_user(pcount, out)) { 54 ret = -EFAULT; 55 break; 56 } 57 58 pfn++; 59 out++; 60 count -= KPMSIZE; 61 62 cond_resched(); 63 } 64 65 *ppos += (char __user *)out - buf; 66 if (!ret) 67 ret = (char __user *)out - buf; 68 return ret; 69 } 70 71 static const struct file_operations proc_kpagecount_operations = { 72 .llseek = mem_lseek, 73 .read = kpagecount_read, 74 }; 75 76 /* /proc/kpageflags - an array exposing page flags 77 * 78 * Each entry is a u64 representing the corresponding 79 * physical page flags. 80 */ 81 82 static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit) 83 { 84 return ((kflags >> kbit) & 1) << ubit; 85 } 86 87 u64 stable_page_flags(struct page *page) 88 { 89 u64 k; 90 u64 u; 91 92 /* 93 * pseudo flag: KPF_NOPAGE 94 * it differentiates a memory hole from a page with no flags 95 */ 96 if (!page) 97 return 1 << KPF_NOPAGE; 98 99 k = page->flags; 100 u = 0; 101 102 /* 103 * pseudo flags for the well known (anonymous) memory mapped pages 104 * 105 * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the 106 * simple test in page_mapped() is not enough. 107 */ 108 if (!PageSlab(page) && page_mapped(page)) 109 u |= 1 << KPF_MMAP; 110 if (PageAnon(page)) 111 u |= 1 << KPF_ANON; 112 if (PageKsm(page)) 113 u |= 1 << KPF_KSM; 114 115 /* 116 * compound pages: export both head/tail info 117 * they together define a compound page's start/end pos and order 118 */ 119 if (PageHead(page)) 120 u |= 1 << KPF_COMPOUND_HEAD; 121 if (PageTail(page)) 122 u |= 1 << KPF_COMPOUND_TAIL; 123 if (PageHuge(page)) 124 u |= 1 << KPF_HUGE; 125 /* 126 * PageTransCompound can be true for non-huge compound pages (slab 127 * pages or pages allocated by drivers with __GFP_COMP) because it 128 * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon 129 * to make sure a given page is a thp, not a non-huge compound page. 130 */ 131 else if (PageTransCompound(page)) { 132 struct page *head = compound_head(page); 133 134 if (PageLRU(head) || PageAnon(head)) 135 u |= 1 << KPF_THP; 136 else if (is_huge_zero_page(head)) { 137 u |= 1 << KPF_ZERO_PAGE; 138 u |= 1 << KPF_THP; 139 } 140 } else if (is_zero_pfn(page_to_pfn(page))) 141 u |= 1 << KPF_ZERO_PAGE; 142 143 144 /* 145 * Caveats on high order pages: page->_count will only be set 146 * -1 on the head page; SLUB/SLQB do the same for PG_slab; 147 * SLOB won't set PG_slab at all on compound pages. 148 */ 149 if (PageBuddy(page)) 150 u |= 1 << KPF_BUDDY; 151 152 if (PageBalloon(page)) 153 u |= 1 << KPF_BALLOON; 154 155 if (page_is_idle(page)) 156 u |= 1 << KPF_IDLE; 157 158 u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked); 159 160 u |= kpf_copy_bit(k, KPF_SLAB, PG_slab); 161 162 u |= kpf_copy_bit(k, KPF_ERROR, PG_error); 163 u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty); 164 u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate); 165 u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback); 166 167 u |= kpf_copy_bit(k, KPF_LRU, PG_lru); 168 u |= kpf_copy_bit(k, KPF_REFERENCED, PG_referenced); 169 u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active); 170 u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim); 171 172 u |= kpf_copy_bit(k, KPF_SWAPCACHE, PG_swapcache); 173 u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked); 174 175 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); 176 u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked); 177 178 #ifdef CONFIG_MEMORY_FAILURE 179 u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison); 180 #endif 181 182 #ifdef CONFIG_ARCH_USES_PG_UNCACHED 183 u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); 184 #endif 185 186 u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved); 187 u |= kpf_copy_bit(k, KPF_MAPPEDTODISK, PG_mappedtodisk); 188 u |= kpf_copy_bit(k, KPF_PRIVATE, PG_private); 189 u |= kpf_copy_bit(k, KPF_PRIVATE_2, PG_private_2); 190 u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1); 191 u |= kpf_copy_bit(k, KPF_ARCH, PG_arch_1); 192 193 return u; 194 }; 195 196 static ssize_t kpageflags_read(struct file *file, char __user *buf, 197 size_t count, loff_t *ppos) 198 { 199 u64 __user *out = (u64 __user *)buf; 200 struct page *ppage; 201 unsigned long src = *ppos; 202 unsigned long pfn; 203 ssize_t ret = 0; 204 205 pfn = src / KPMSIZE; 206 count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src); 207 if (src & KPMMASK || count & KPMMASK) 208 return -EINVAL; 209 210 while (count > 0) { 211 if (pfn_valid(pfn)) 212 ppage = pfn_to_page(pfn); 213 else 214 ppage = NULL; 215 216 if (put_user(stable_page_flags(ppage), out)) { 217 ret = -EFAULT; 218 break; 219 } 220 221 pfn++; 222 out++; 223 count -= KPMSIZE; 224 225 cond_resched(); 226 } 227 228 *ppos += (char __user *)out - buf; 229 if (!ret) 230 ret = (char __user *)out - buf; 231 return ret; 232 } 233 234 static const struct file_operations proc_kpageflags_operations = { 235 .llseek = mem_lseek, 236 .read = kpageflags_read, 237 }; 238 239 #ifdef CONFIG_MEMCG 240 static ssize_t kpagecgroup_read(struct file *file, char __user *buf, 241 size_t count, loff_t *ppos) 242 { 243 u64 __user *out = (u64 __user *)buf; 244 struct page *ppage; 245 unsigned long src = *ppos; 246 unsigned long pfn; 247 ssize_t ret = 0; 248 u64 ino; 249 250 pfn = src / KPMSIZE; 251 count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src); 252 if (src & KPMMASK || count & KPMMASK) 253 return -EINVAL; 254 255 while (count > 0) { 256 if (pfn_valid(pfn)) 257 ppage = pfn_to_page(pfn); 258 else 259 ppage = NULL; 260 261 if (ppage) 262 ino = page_cgroup_ino(ppage); 263 else 264 ino = 0; 265 266 if (put_user(ino, out)) { 267 ret = -EFAULT; 268 break; 269 } 270 271 pfn++; 272 out++; 273 count -= KPMSIZE; 274 275 cond_resched(); 276 } 277 278 *ppos += (char __user *)out - buf; 279 if (!ret) 280 ret = (char __user *)out - buf; 281 return ret; 282 } 283 284 static const struct file_operations proc_kpagecgroup_operations = { 285 .llseek = mem_lseek, 286 .read = kpagecgroup_read, 287 }; 288 #endif /* CONFIG_MEMCG */ 289 290 static int __init proc_page_init(void) 291 { 292 proc_create("kpagecount", S_IRUSR, NULL, &proc_kpagecount_operations); 293 proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations); 294 #ifdef CONFIG_MEMCG 295 proc_create("kpagecgroup", S_IRUSR, NULL, &proc_kpagecgroup_operations); 296 #endif 297 return 0; 298 } 299 fs_initcall(proc_page_init); 300