1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright IBM Corp. 2008
4 *
5 * Guest page hinting for unused pages.
6 *
7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/types.h>
13 #include <linux/mm.h>
14 #include <linux/memblock.h>
15 #include <linux/gfp.h>
16 #include <linux/init.h>
17 #include <asm/asm-extable.h>
18 #include <asm/facility.h>
19 #include <asm/page-states.h>
20
21 static int cmma_flag = 1;
22
cmma(char * str)23 static int __init cmma(char *str)
24 {
25 bool enabled;
26
27 if (!kstrtobool(str, &enabled))
28 cmma_flag = enabled;
29 return 1;
30 }
31 __setup("cmma=", cmma);
32
cmma_test_essa(void)33 static inline int cmma_test_essa(void)
34 {
35 unsigned long tmp = 0;
36 int rc = -EOPNOTSUPP;
37
38 /* test ESSA_GET_STATE */
39 asm volatile(
40 " .insn rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n"
41 "0: la %[rc],0\n"
42 "1:\n"
43 EX_TABLE(0b,1b)
44 : [rc] "+&d" (rc), [tmp] "+&d" (tmp)
45 : [cmd] "i" (ESSA_GET_STATE));
46 return rc;
47 }
48
cmma_init(void)49 void __init cmma_init(void)
50 {
51 if (!cmma_flag)
52 return;
53 if (cmma_test_essa()) {
54 cmma_flag = 0;
55 return;
56 }
57 if (test_facility(147))
58 cmma_flag = 2;
59 }
60
set_page_unused(struct page * page,int order)61 static inline void set_page_unused(struct page *page, int order)
62 {
63 int i, rc;
64
65 for (i = 0; i < (1 << order); i++)
66 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
67 : "=&d" (rc)
68 : "a" (page_to_phys(page + i)),
69 "i" (ESSA_SET_UNUSED));
70 }
71
set_page_stable_dat(struct page * page,int order)72 static inline void set_page_stable_dat(struct page *page, int order)
73 {
74 int i, rc;
75
76 for (i = 0; i < (1 << order); i++)
77 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
78 : "=&d" (rc)
79 : "a" (page_to_phys(page + i)),
80 "i" (ESSA_SET_STABLE));
81 }
82
set_page_stable_nodat(struct page * page,int order)83 static inline void set_page_stable_nodat(struct page *page, int order)
84 {
85 int i, rc;
86
87 for (i = 0; i < (1 << order); i++)
88 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
89 : "=&d" (rc)
90 : "a" (page_to_phys(page + i)),
91 "i" (ESSA_SET_STABLE_NODAT));
92 }
93
mark_kernel_pmd(pud_t * pud,unsigned long addr,unsigned long end)94 static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end)
95 {
96 unsigned long next;
97 struct page *page;
98 pmd_t *pmd;
99
100 pmd = pmd_offset(pud, addr);
101 do {
102 next = pmd_addr_end(addr, end);
103 if (pmd_none(*pmd) || pmd_large(*pmd))
104 continue;
105 page = phys_to_page(pmd_val(*pmd));
106 set_bit(PG_arch_1, &page->flags);
107 } while (pmd++, addr = next, addr != end);
108 }
109
mark_kernel_pud(p4d_t * p4d,unsigned long addr,unsigned long end)110 static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
111 {
112 unsigned long next;
113 struct page *page;
114 pud_t *pud;
115 int i;
116
117 pud = pud_offset(p4d, addr);
118 do {
119 next = pud_addr_end(addr, end);
120 if (pud_none(*pud) || pud_large(*pud))
121 continue;
122 if (!pud_folded(*pud)) {
123 page = phys_to_page(pud_val(*pud));
124 for (i = 0; i < 4; i++)
125 set_bit(PG_arch_1, &page[i].flags);
126 }
127 mark_kernel_pmd(pud, addr, next);
128 } while (pud++, addr = next, addr != end);
129 }
130
mark_kernel_p4d(pgd_t * pgd,unsigned long addr,unsigned long end)131 static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
132 {
133 unsigned long next;
134 struct page *page;
135 p4d_t *p4d;
136 int i;
137
138 p4d = p4d_offset(pgd, addr);
139 do {
140 next = p4d_addr_end(addr, end);
141 if (p4d_none(*p4d))
142 continue;
143 if (!p4d_folded(*p4d)) {
144 page = phys_to_page(p4d_val(*p4d));
145 for (i = 0; i < 4; i++)
146 set_bit(PG_arch_1, &page[i].flags);
147 }
148 mark_kernel_pud(p4d, addr, next);
149 } while (p4d++, addr = next, addr != end);
150 }
151
mark_kernel_pgd(void)152 static void mark_kernel_pgd(void)
153 {
154 unsigned long addr, next;
155 struct page *page;
156 pgd_t *pgd;
157 int i;
158
159 addr = 0;
160 pgd = pgd_offset_k(addr);
161 do {
162 next = pgd_addr_end(addr, MODULES_END);
163 if (pgd_none(*pgd))
164 continue;
165 if (!pgd_folded(*pgd)) {
166 page = phys_to_page(pgd_val(*pgd));
167 for (i = 0; i < 4; i++)
168 set_bit(PG_arch_1, &page[i].flags);
169 }
170 mark_kernel_p4d(pgd, addr, next);
171 } while (pgd++, addr = next, addr != MODULES_END);
172 }
173
cmma_init_nodat(void)174 void __init cmma_init_nodat(void)
175 {
176 struct page *page;
177 unsigned long start, end, ix;
178 int i;
179
180 if (cmma_flag < 2)
181 return;
182 /* Mark pages used in kernel page tables */
183 mark_kernel_pgd();
184 page = virt_to_page(&swapper_pg_dir);
185 for (i = 0; i < 4; i++)
186 set_bit(PG_arch_1, &page[i].flags);
187 page = virt_to_page(&invalid_pg_dir);
188 for (i = 0; i < 4; i++)
189 set_bit(PG_arch_1, &page[i].flags);
190
191 /* Set all kernel pages not used for page tables to stable/no-dat */
192 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
193 page = pfn_to_page(start);
194 for (ix = start; ix < end; ix++, page++) {
195 if (__test_and_clear_bit(PG_arch_1, &page->flags))
196 continue; /* skip page table pages */
197 if (!list_empty(&page->lru))
198 continue; /* skip free pages */
199 set_page_stable_nodat(page, 0);
200 }
201 }
202 }
203
arch_free_page(struct page * page,int order)204 void arch_free_page(struct page *page, int order)
205 {
206 if (!cmma_flag)
207 return;
208 set_page_unused(page, order);
209 }
210
arch_alloc_page(struct page * page,int order)211 void arch_alloc_page(struct page *page, int order)
212 {
213 if (!cmma_flag)
214 return;
215 if (cmma_flag < 2)
216 set_page_stable_dat(page, order);
217 else
218 set_page_stable_nodat(page, order);
219 }
220
arch_set_page_dat(struct page * page,int order)221 void arch_set_page_dat(struct page *page, int order)
222 {
223 if (!cmma_flag)
224 return;
225 set_page_stable_dat(page, order);
226 }
227