1 #include <linux/fs.h> 2 #include <linux/init.h> 3 #include <linux/kernel.h> 4 #include <linux/mm.h> 5 #include <linux/hugetlb.h> 6 #include <linux/mman.h> 7 #include <linux/mmzone.h> 8 #include <linux/proc_fs.h> 9 #include <linux/quicklist.h> 10 #include <linux/seq_file.h> 11 #include <linux/swap.h> 12 #include <linux/vmstat.h> 13 #include <linux/atomic.h> 14 #include <linux/vmalloc.h> 15 #ifdef CONFIG_CMA 16 #include <linux/cma.h> 17 #endif 18 #include <asm/page.h> 19 #include <asm/pgtable.h> 20 #include "internal.h" 21 22 void __attribute__((weak)) arch_report_meminfo(struct seq_file *m) 23 { 24 } 25 26 static void show_val_kb(struct seq_file *m, const char *s, unsigned long num) 27 { 28 char v[32]; 29 static const char blanks[7] = {' ', ' ', ' ', ' ',' ', ' ', ' '}; 30 int len; 31 32 len = num_to_str(v, sizeof(v), num << (PAGE_SHIFT - 10)); 33 34 seq_write(m, s, 16); 35 36 if (len > 0) { 37 if (len < 8) 38 seq_write(m, blanks, 8 - len); 39 40 seq_write(m, v, len); 41 } 42 seq_write(m, " kB\n", 4); 43 } 44 45 static int meminfo_proc_show(struct seq_file *m, void *v) 46 { 47 struct sysinfo i; 48 unsigned long committed; 49 long cached; 50 long available; 51 unsigned long pages[NR_LRU_LISTS]; 52 int lru; 53 54 si_meminfo(&i); 55 si_swapinfo(&i); 56 committed = percpu_counter_read_positive(&vm_committed_as); 57 58 cached = global_node_page_state(NR_FILE_PAGES) - 59 total_swapcache_pages() - i.bufferram; 60 if (cached < 0) 61 cached = 0; 62 63 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) 64 pages[lru] = global_node_page_state(NR_LRU_BASE + lru); 65 66 available = si_mem_available(); 67 68 show_val_kb(m, "MemTotal: ", i.totalram); 69 show_val_kb(m, "MemFree: ", i.freeram); 70 show_val_kb(m, "MemAvailable: ", available); 71 show_val_kb(m, "Buffers: ", i.bufferram); 72 show_val_kb(m, "Cached: ", cached); 73 show_val_kb(m, "SwapCached: ", total_swapcache_pages()); 74 show_val_kb(m, "Active: ", pages[LRU_ACTIVE_ANON] + 75 pages[LRU_ACTIVE_FILE]); 76 show_val_kb(m, "Inactive: ", pages[LRU_INACTIVE_ANON] + 77 pages[LRU_INACTIVE_FILE]); 78 show_val_kb(m, "Active(anon): ", pages[LRU_ACTIVE_ANON]); 79 show_val_kb(m, "Inactive(anon): ", pages[LRU_INACTIVE_ANON]); 80 show_val_kb(m, "Active(file): ", pages[LRU_ACTIVE_FILE]); 81 show_val_kb(m, "Inactive(file): ", pages[LRU_INACTIVE_FILE]); 82 show_val_kb(m, "Unevictable: ", pages[LRU_UNEVICTABLE]); 83 show_val_kb(m, "Mlocked: ", global_page_state(NR_MLOCK)); 84 85 #ifdef CONFIG_HIGHMEM 86 show_val_kb(m, "HighTotal: ", i.totalhigh); 87 show_val_kb(m, "HighFree: ", i.freehigh); 88 show_val_kb(m, "LowTotal: ", i.totalram - i.totalhigh); 89 show_val_kb(m, "LowFree: ", i.freeram - i.freehigh); 90 #endif 91 92 #ifndef CONFIG_MMU 93 show_val_kb(m, "MmapCopy: ", 94 (unsigned long)atomic_long_read(&mmap_pages_allocated)); 95 #endif 96 97 show_val_kb(m, "SwapTotal: ", i.totalswap); 98 show_val_kb(m, "SwapFree: ", i.freeswap); 99 show_val_kb(m, "Dirty: ", 100 global_node_page_state(NR_FILE_DIRTY)); 101 show_val_kb(m, "Writeback: ", 102 global_node_page_state(NR_WRITEBACK)); 103 show_val_kb(m, "AnonPages: ", 104 global_node_page_state(NR_ANON_MAPPED)); 105 show_val_kb(m, "Mapped: ", 106 global_node_page_state(NR_FILE_MAPPED)); 107 show_val_kb(m, "Shmem: ", i.sharedram); 108 show_val_kb(m, "Slab: ", 109 global_node_page_state(NR_SLAB_RECLAIMABLE) + 110 global_node_page_state(NR_SLAB_UNRECLAIMABLE)); 111 112 show_val_kb(m, "SReclaimable: ", 113 global_node_page_state(NR_SLAB_RECLAIMABLE)); 114 show_val_kb(m, "SUnreclaim: ", 115 global_node_page_state(NR_SLAB_UNRECLAIMABLE)); 116 seq_printf(m, "KernelStack: %8lu kB\n", 117 global_page_state(NR_KERNEL_STACK_KB)); 118 show_val_kb(m, "PageTables: ", 119 global_page_state(NR_PAGETABLE)); 120 #ifdef CONFIG_QUICKLIST 121 show_val_kb(m, "Quicklists: ", quicklist_total_size()); 122 #endif 123 124 show_val_kb(m, "NFS_Unstable: ", 125 global_node_page_state(NR_UNSTABLE_NFS)); 126 show_val_kb(m, "Bounce: ", 127 global_page_state(NR_BOUNCE)); 128 show_val_kb(m, "WritebackTmp: ", 129 global_node_page_state(NR_WRITEBACK_TEMP)); 130 show_val_kb(m, "CommitLimit: ", vm_commit_limit()); 131 show_val_kb(m, "Committed_AS: ", committed); 132 seq_printf(m, "VmallocTotal: %8lu kB\n", 133 (unsigned long)VMALLOC_TOTAL >> 10); 134 show_val_kb(m, "VmallocUsed: ", 0ul); 135 show_val_kb(m, "VmallocChunk: ", 0ul); 136 137 #ifdef CONFIG_MEMORY_FAILURE 138 seq_printf(m, "HardwareCorrupted: %5lu kB\n", 139 atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)); 140 #endif 141 142 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 143 show_val_kb(m, "AnonHugePages: ", 144 global_node_page_state(NR_ANON_THPS) * HPAGE_PMD_NR); 145 show_val_kb(m, "ShmemHugePages: ", 146 global_node_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR); 147 show_val_kb(m, "ShmemPmdMapped: ", 148 global_node_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR); 149 #endif 150 151 #ifdef CONFIG_CMA 152 show_val_kb(m, "CmaTotal: ", totalcma_pages); 153 show_val_kb(m, "CmaFree: ", 154 global_page_state(NR_FREE_CMA_PAGES)); 155 #endif 156 157 hugetlb_report_meminfo(m); 158 159 arch_report_meminfo(m); 160 161 return 0; 162 } 163 164 static int meminfo_proc_open(struct inode *inode, struct file *file) 165 { 166 return single_open(file, meminfo_proc_show, NULL); 167 } 168 169 static const struct file_operations meminfo_proc_fops = { 170 .open = meminfo_proc_open, 171 .read = seq_read, 172 .llseek = seq_lseek, 173 .release = single_release, 174 }; 175 176 static int __init proc_meminfo_init(void) 177 { 178 proc_create("meminfo", 0, NULL, &meminfo_proc_fops); 179 return 0; 180 } 181 fs_initcall(proc_meminfo_init); 182