1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2008 4 * 5 * Guest page hinting for unused pages. 6 * 7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/errno.h> 12 #include <linux/types.h> 13 #include <linux/mm.h> 14 #include <linux/memblock.h> 15 #include <linux/gfp.h> 16 #include <linux/init.h> 17 #include <asm/asm-extable.h> 18 #include <asm/facility.h> 19 #include <asm/page-states.h> 20 21 static int cmma_flag = 1; 22 23 static int __init cmma(char *str) 24 { 25 bool enabled; 26 27 if (!kstrtobool(str, &enabled)) 28 cmma_flag = enabled; 29 return 1; 30 } 31 __setup("cmma=", cmma); 32 33 static inline int cmma_test_essa(void) 34 { 35 unsigned long tmp = 0; 36 int rc = -EOPNOTSUPP; 37 38 /* test ESSA_GET_STATE */ 39 asm volatile( 40 " .insn rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n" 41 "0: la %[rc],0\n" 42 "1:\n" 43 EX_TABLE(0b,1b) 44 : [rc] "+&d" (rc), [tmp] "+&d" (tmp) 45 : [cmd] "i" (ESSA_GET_STATE)); 46 return rc; 47 } 48 49 void __init cmma_init(void) 50 { 51 if (!cmma_flag) 52 return; 53 if (cmma_test_essa()) { 54 cmma_flag = 0; 55 return; 56 } 57 if (test_facility(147)) 58 cmma_flag = 2; 59 } 60 61 static inline unsigned char get_page_state(struct page *page) 62 { 63 unsigned char state; 64 65 asm volatile(" .insn rrf,0xb9ab0000,%0,%1,%2,0" 66 : "=&d" (state) 67 : "a" (page_to_phys(page)), 68 "i" (ESSA_GET_STATE)); 69 return state & 0x3f; 70 } 71 72 static inline void set_page_unused(struct page *page, int order) 73 { 74 int i, rc; 75 76 for (i = 0; i < (1 << order); i++) 77 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" 78 : "=&d" (rc) 79 : "a" (page_to_phys(page + i)), 80 "i" (ESSA_SET_UNUSED)); 81 } 82 83 static inline void set_page_stable_dat(struct page *page, int order) 84 { 85 int i, rc; 86 87 for (i = 0; i < (1 << order); i++) 88 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" 89 : "=&d" (rc) 90 : "a" (page_to_phys(page + i)), 91 "i" (ESSA_SET_STABLE)); 92 } 93 94 static inline void set_page_stable_nodat(struct page *page, int order) 95 { 96 int i, rc; 97 98 for (i = 0; i < (1 << order); i++) 99 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" 100 : "=&d" (rc) 101 : "a" (page_to_phys(page + i)), 102 "i" (ESSA_SET_STABLE_NODAT)); 103 } 104 105 static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end) 106 { 107 unsigned long next; 108 struct page *page; 109 pmd_t *pmd; 110 111 pmd = pmd_offset(pud, addr); 112 do { 113 next = pmd_addr_end(addr, end); 114 if (pmd_none(*pmd) || pmd_large(*pmd)) 115 continue; 116 page = phys_to_page(pmd_val(*pmd)); 117 set_bit(PG_arch_1, &page->flags); 118 } while (pmd++, addr = next, addr != end); 119 } 120 121 static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end) 122 { 123 unsigned long next; 124 struct page *page; 125 pud_t *pud; 126 int i; 127 128 pud = pud_offset(p4d, addr); 129 do { 130 next = pud_addr_end(addr, end); 131 if (pud_none(*pud) || pud_large(*pud)) 132 continue; 133 if (!pud_folded(*pud)) { 134 page = phys_to_page(pud_val(*pud)); 135 for (i = 0; i < 3; i++) 136 set_bit(PG_arch_1, &page[i].flags); 137 } 138 mark_kernel_pmd(pud, addr, next); 139 } while (pud++, addr = next, addr != end); 140 } 141 142 static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end) 143 { 144 unsigned long next; 145 struct page *page; 146 p4d_t *p4d; 147 int i; 148 149 p4d = p4d_offset(pgd, addr); 150 do { 151 next = p4d_addr_end(addr, end); 152 if (p4d_none(*p4d)) 153 continue; 154 if (!p4d_folded(*p4d)) { 155 page = phys_to_page(p4d_val(*p4d)); 156 for (i = 0; i < 3; i++) 157 set_bit(PG_arch_1, &page[i].flags); 158 } 159 mark_kernel_pud(p4d, addr, next); 160 } while (p4d++, addr = next, addr != end); 161 } 162 163 static void mark_kernel_pgd(void) 164 { 165 unsigned long addr, next; 166 struct page *page; 167 pgd_t *pgd; 168 int i; 169 170 addr = 0; 171 pgd = pgd_offset_k(addr); 172 do { 173 next = pgd_addr_end(addr, MODULES_END); 174 if (pgd_none(*pgd)) 175 continue; 176 if (!pgd_folded(*pgd)) { 177 page = phys_to_page(pgd_val(*pgd)); 178 for (i = 0; i < 3; i++) 179 set_bit(PG_arch_1, &page[i].flags); 180 } 181 mark_kernel_p4d(pgd, addr, next); 182 } while (pgd++, addr = next, addr != MODULES_END); 183 } 184 185 void __init cmma_init_nodat(void) 186 { 187 struct page *page; 188 unsigned long start, end, ix; 189 int i; 190 191 if (cmma_flag < 2) 192 return; 193 /* Mark pages used in kernel page tables */ 194 mark_kernel_pgd(); 195 196 /* Set all kernel pages not used for page tables to stable/no-dat */ 197 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { 198 page = pfn_to_page(start); 199 for (ix = start; ix < end; ix++, page++) { 200 if (__test_and_clear_bit(PG_arch_1, &page->flags)) 201 continue; /* skip page table pages */ 202 if (!list_empty(&page->lru)) 203 continue; /* skip free pages */ 204 set_page_stable_nodat(page, 0); 205 } 206 } 207 } 208 209 void arch_free_page(struct page *page, int order) 210 { 211 if (!cmma_flag) 212 return; 213 set_page_unused(page, order); 214 } 215 216 void arch_alloc_page(struct page *page, int order) 217 { 218 if (!cmma_flag) 219 return; 220 if (cmma_flag < 2) 221 set_page_stable_dat(page, order); 222 else 223 set_page_stable_nodat(page, order); 224 } 225 226 void arch_set_page_dat(struct page *page, int order) 227 { 228 if (!cmma_flag) 229 return; 230 set_page_stable_dat(page, order); 231 } 232