1 /* 2 * Copyright IBM Corp. 2008 3 * 4 * Guest page hinting for unused pages. 5 * 6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/errno.h> 11 #include <linux/types.h> 12 #include <linux/mm.h> 13 #include <linux/memblock.h> 14 #include <linux/gfp.h> 15 #include <linux/init.h> 16 #include <asm/facility.h> 17 #include <asm/page-states.h> 18 19 static int cmma_flag = 1; 20 21 static int __init cmma(char *str) 22 { 23 char *parm; 24 25 parm = strstrip(str); 26 if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) { 27 cmma_flag = 1; 28 return 1; 29 } 30 cmma_flag = 0; 31 if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0) 32 return 1; 33 return 0; 34 } 35 __setup("cmma=", cmma); 36 37 static inline int cmma_test_essa(void) 38 { 39 register unsigned long tmp asm("0") = 0; 40 register int rc asm("1"); 41 42 /* test ESSA_GET_STATE */ 43 asm volatile( 44 " .insn rrf,0xb9ab0000,%1,%1,%2,0\n" 45 "0: la %0,0\n" 46 "1:\n" 47 EX_TABLE(0b,1b) 48 : "=&d" (rc), "+&d" (tmp) 49 : "i" (ESSA_GET_STATE), "0" (-EOPNOTSUPP)); 50 return rc; 51 } 52 53 void __init cmma_init(void) 54 { 55 if (!cmma_flag) 56 return; 57 if (cmma_test_essa()) { 58 cmma_flag = 0; 59 return; 60 } 61 if (test_facility(147)) 62 cmma_flag = 2; 63 } 64 65 static inline unsigned char get_page_state(struct page *page) 66 { 67 unsigned char state; 68 69 asm volatile(" .insn rrf,0xb9ab0000,%0,%1,%2,0" 70 : "=&d" (state) 71 : "a" (page_to_phys(page)), 72 "i" (ESSA_GET_STATE)); 73 return state & 0x3f; 74 } 75 76 static inline void set_page_unused(struct page *page, int order) 77 { 78 int i, rc; 79 80 for (i = 0; i < (1 << order); i++) 81 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" 82 : "=&d" (rc) 83 : "a" (page_to_phys(page + i)), 84 "i" (ESSA_SET_UNUSED)); 85 } 86 87 static inline void set_page_stable_dat(struct page *page, int order) 88 { 89 int i, rc; 90 91 for (i = 0; i < (1 << order); i++) 92 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" 93 : "=&d" (rc) 94 : "a" (page_to_phys(page + i)), 95 "i" (ESSA_SET_STABLE)); 96 } 97 98 static inline void set_page_stable_nodat(struct page *page, int order) 99 { 100 int i, rc; 101 102 for (i = 0; i < (1 << order); i++) 103 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" 104 : "=&d" (rc) 105 : "a" (page_to_phys(page + i)), 106 "i" (ESSA_SET_STABLE_NODAT)); 107 } 108 109 static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end) 110 { 111 unsigned long next; 112 struct page *page; 113 pmd_t *pmd; 114 115 pmd = pmd_offset(pud, addr); 116 do { 117 next = pmd_addr_end(addr, end); 118 if (pmd_none(*pmd) || pmd_large(*pmd)) 119 continue; 120 page = virt_to_page(pmd_val(*pmd)); 121 set_bit(PG_arch_1, &page->flags); 122 } while (pmd++, addr = next, addr != end); 123 } 124 125 static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end) 126 { 127 unsigned long next; 128 struct page *page; 129 pud_t *pud; 130 int i; 131 132 pud = pud_offset(p4d, addr); 133 do { 134 next = pud_addr_end(addr, end); 135 if (pud_none(*pud) || pud_large(*pud)) 136 continue; 137 if (!pud_folded(*pud)) { 138 page = virt_to_page(pud_val(*pud)); 139 for (i = 0; i < 3; i++) 140 set_bit(PG_arch_1, &page[i].flags); 141 } 142 mark_kernel_pmd(pud, addr, next); 143 } while (pud++, addr = next, addr != end); 144 } 145 146 static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end) 147 { 148 unsigned long next; 149 struct page *page; 150 p4d_t *p4d; 151 int i; 152 153 p4d = p4d_offset(pgd, addr); 154 do { 155 next = p4d_addr_end(addr, end); 156 if (p4d_none(*p4d)) 157 continue; 158 if (!p4d_folded(*p4d)) { 159 page = virt_to_page(p4d_val(*p4d)); 160 for (i = 0; i < 3; i++) 161 set_bit(PG_arch_1, &page[i].flags); 162 } 163 mark_kernel_pud(p4d, addr, next); 164 } while (p4d++, addr = next, addr != end); 165 } 166 167 static void mark_kernel_pgd(void) 168 { 169 unsigned long addr, next; 170 struct page *page; 171 pgd_t *pgd; 172 int i; 173 174 addr = 0; 175 pgd = pgd_offset_k(addr); 176 do { 177 next = pgd_addr_end(addr, MODULES_END); 178 if (pgd_none(*pgd)) 179 continue; 180 if (!pgd_folded(*pgd)) { 181 page = virt_to_page(pgd_val(*pgd)); 182 for (i = 0; i < 3; i++) 183 set_bit(PG_arch_1, &page[i].flags); 184 } 185 mark_kernel_p4d(pgd, addr, next); 186 } while (pgd++, addr = next, addr != MODULES_END); 187 } 188 189 void __init cmma_init_nodat(void) 190 { 191 struct memblock_region *reg; 192 struct page *page; 193 unsigned long start, end, ix; 194 195 if (cmma_flag < 2) 196 return; 197 /* Mark pages used in kernel page tables */ 198 mark_kernel_pgd(); 199 200 /* Set all kernel pages not used for page tables to stable/no-dat */ 201 for_each_memblock(memory, reg) { 202 start = memblock_region_memory_base_pfn(reg); 203 end = memblock_region_memory_end_pfn(reg); 204 page = pfn_to_page(start); 205 for (ix = start; ix < end; ix++, page++) { 206 if (__test_and_clear_bit(PG_arch_1, &page->flags)) 207 continue; /* skip page table pages */ 208 if (!list_empty(&page->lru)) 209 continue; /* skip free pages */ 210 set_page_stable_nodat(page, 0); 211 } 212 } 213 } 214 215 void arch_free_page(struct page *page, int order) 216 { 217 if (!cmma_flag) 218 return; 219 set_page_unused(page, order); 220 } 221 222 void arch_alloc_page(struct page *page, int order) 223 { 224 if (!cmma_flag) 225 return; 226 if (cmma_flag < 2) 227 set_page_stable_dat(page, order); 228 else 229 set_page_stable_nodat(page, order); 230 } 231 232 void arch_set_page_dat(struct page *page, int order) 233 { 234 if (!cmma_flag) 235 return; 236 set_page_stable_dat(page, order); 237 } 238 239 void arch_set_page_nodat(struct page *page, int order) 240 { 241 if (cmma_flag < 2) 242 return; 243 set_page_stable_nodat(page, order); 244 } 245 246 int arch_test_page_nodat(struct page *page) 247 { 248 unsigned char state; 249 250 if (cmma_flag < 2) 251 return 0; 252 state = get_page_state(page); 253 return !!(state & 0x20); 254 } 255 256 void arch_set_page_states(int make_stable) 257 { 258 unsigned long flags, order, t; 259 struct list_head *l; 260 struct page *page; 261 struct zone *zone; 262 263 if (!cmma_flag) 264 return; 265 if (make_stable) 266 drain_local_pages(NULL); 267 for_each_populated_zone(zone) { 268 spin_lock_irqsave(&zone->lock, flags); 269 for_each_migratetype_order(order, t) { 270 list_for_each(l, &zone->free_area[order].free_list[t]) { 271 page = list_entry(l, struct page, lru); 272 if (make_stable) 273 set_page_stable_dat(page, 0); 274 else 275 set_page_unused(page, order); 276 } 277 } 278 spin_unlock_irqrestore(&zone->lock, flags); 279 } 280 } 281