1 #include <linux/bootmem.h> 2 #include <linux/compiler.h> 3 #include <linux/fs.h> 4 #include <linux/init.h> 5 #include <linux/ksm.h> 6 #include <linux/mm.h> 7 #include <linux/mmzone.h> 8 #include <linux/proc_fs.h> 9 #include <linux/seq_file.h> 10 #include <linux/hugetlb.h> 11 #include <linux/kernel-page-flags.h> 12 #include <asm/uaccess.h> 13 #include "internal.h" 14 15 #define KPMSIZE sizeof(u64) 16 #define KPMMASK (KPMSIZE - 1) 17 18 /* /proc/kpagecount - an array exposing page counts 19 * 20 * Each entry is a u64 representing the corresponding 21 * physical page count. 22 */ 23 static ssize_t kpagecount_read(struct file *file, char __user *buf, 24 size_t count, loff_t *ppos) 25 { 26 u64 __user *out = (u64 __user *)buf; 27 struct page *ppage; 28 unsigned long src = *ppos; 29 unsigned long pfn; 30 ssize_t ret = 0; 31 u64 pcount; 32 33 pfn = src / KPMSIZE; 34 count = min_t(size_t, count, (max_pfn * KPMSIZE) - src); 35 if (src & KPMMASK || count & KPMMASK) 36 return -EINVAL; 37 38 while (count > 0) { 39 if (pfn_valid(pfn)) 40 ppage = pfn_to_page(pfn); 41 else 42 ppage = NULL; 43 if (!ppage || PageSlab(ppage)) 44 pcount = 0; 45 else 46 pcount = page_mapcount(ppage); 47 48 if (put_user(pcount, out)) { 49 ret = -EFAULT; 50 break; 51 } 52 53 pfn++; 54 out++; 55 count -= KPMSIZE; 56 } 57 58 *ppos += (char __user *)out - buf; 59 if (!ret) 60 ret = (char __user *)out - buf; 61 return ret; 62 } 63 64 static const struct file_operations proc_kpagecount_operations = { 65 .llseek = mem_lseek, 66 .read = kpagecount_read, 67 }; 68 69 /* /proc/kpageflags - an array exposing page flags 70 * 71 * Each entry is a u64 representing the corresponding 72 * physical page flags. 73 */ 74 75 static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit) 76 { 77 return ((kflags >> kbit) & 1) << ubit; 78 } 79 80 u64 stable_page_flags(struct page *page) 81 { 82 u64 k; 83 u64 u; 84 85 /* 86 * pseudo flag: KPF_NOPAGE 87 * it differentiates a memory hole from a page with no flags 88 */ 89 if (!page) 90 return 1 << KPF_NOPAGE; 91 92 k = page->flags; 93 u = 0; 94 95 /* 96 * pseudo flags for the well known (anonymous) memory mapped pages 97 * 98 * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the 99 * simple test in page_mapped() is not enough. 100 */ 101 if (!PageSlab(page) && page_mapped(page)) 102 u |= 1 << KPF_MMAP; 103 if (PageAnon(page)) 104 u |= 1 << KPF_ANON; 105 if (PageKsm(page)) 106 u |= 1 << KPF_KSM; 107 108 /* 109 * compound pages: export both head/tail info 110 * they together define a compound page's start/end pos and order 111 */ 112 if (PageHead(page)) 113 u |= 1 << KPF_COMPOUND_HEAD; 114 if (PageTail(page)) 115 u |= 1 << KPF_COMPOUND_TAIL; 116 if (PageHuge(page)) 117 u |= 1 << KPF_HUGE; 118 /* 119 * PageTransCompound can be true for non-huge compound pages (slab 120 * pages or pages allocated by drivers with __GFP_COMP) because it 121 * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon 122 * to make sure a given page is a thp, not a non-huge compound page. 123 */ 124 else if (PageTransCompound(page) && (PageLRU(compound_head(page)) || 125 PageAnon(compound_head(page)))) 126 u |= 1 << KPF_THP; 127 128 /* 129 * Caveats on high order pages: page->_count will only be set 130 * -1 on the head page; SLUB/SLQB do the same for PG_slab; 131 * SLOB won't set PG_slab at all on compound pages. 132 */ 133 if (PageBuddy(page)) 134 u |= 1 << KPF_BUDDY; 135 136 u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked); 137 138 u |= kpf_copy_bit(k, KPF_SLAB, PG_slab); 139 140 u |= kpf_copy_bit(k, KPF_ERROR, PG_error); 141 u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty); 142 u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate); 143 u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback); 144 145 u |= kpf_copy_bit(k, KPF_LRU, PG_lru); 146 u |= kpf_copy_bit(k, KPF_REFERENCED, PG_referenced); 147 u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active); 148 u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim); 149 150 u |= kpf_copy_bit(k, KPF_SWAPCACHE, PG_swapcache); 151 u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked); 152 153 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); 154 u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked); 155 156 #ifdef CONFIG_MEMORY_FAILURE 157 u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison); 158 #endif 159 160 #ifdef CONFIG_ARCH_USES_PG_UNCACHED 161 u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); 162 #endif 163 164 u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved); 165 u |= kpf_copy_bit(k, KPF_MAPPEDTODISK, PG_mappedtodisk); 166 u |= kpf_copy_bit(k, KPF_PRIVATE, PG_private); 167 u |= kpf_copy_bit(k, KPF_PRIVATE_2, PG_private_2); 168 u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1); 169 u |= kpf_copy_bit(k, KPF_ARCH, PG_arch_1); 170 171 return u; 172 }; 173 174 static ssize_t kpageflags_read(struct file *file, char __user *buf, 175 size_t count, loff_t *ppos) 176 { 177 u64 __user *out = (u64 __user *)buf; 178 struct page *ppage; 179 unsigned long src = *ppos; 180 unsigned long pfn; 181 ssize_t ret = 0; 182 183 pfn = src / KPMSIZE; 184 count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src); 185 if (src & KPMMASK || count & KPMMASK) 186 return -EINVAL; 187 188 while (count > 0) { 189 if (pfn_valid(pfn)) 190 ppage = pfn_to_page(pfn); 191 else 192 ppage = NULL; 193 194 if (put_user(stable_page_flags(ppage), out)) { 195 ret = -EFAULT; 196 break; 197 } 198 199 pfn++; 200 out++; 201 count -= KPMSIZE; 202 } 203 204 *ppos += (char __user *)out - buf; 205 if (!ret) 206 ret = (char __user *)out - buf; 207 return ret; 208 } 209 210 static const struct file_operations proc_kpageflags_operations = { 211 .llseek = mem_lseek, 212 .read = kpageflags_read, 213 }; 214 215 static int __init proc_page_init(void) 216 { 217 proc_create("kpagecount", S_IRUSR, NULL, &proc_kpagecount_operations); 218 proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations); 219 return 0; 220 } 221 fs_initcall(proc_page_init); 222