xref: /openbmc/linux/arch/s390/mm/pageattr.c (revision 9a29f5fc)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright IBM Corp. 2011
4  * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
5  */
6 #include <linux/hugetlb.h>
7 #include <linux/mm.h>
8 #include <asm/cacheflush.h>
9 #include <asm/facility.h>
10 #include <asm/pgalloc.h>
11 #include <asm/kfence.h>
12 #include <asm/page.h>
13 #include <asm/set_memory.h>
14 
15 static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
16 {
17 	asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],1,0"
18 		     : [addr] "+a" (addr) : [skey] "d" (skey));
19 	return addr;
20 }
21 
22 void __storage_key_init_range(unsigned long start, unsigned long end)
23 {
24 	unsigned long boundary, size;
25 
26 	while (start < end) {
27 		if (MACHINE_HAS_EDAT1) {
28 			/* set storage keys for a 1MB frame */
29 			size = 1UL << 20;
30 			boundary = (start + size) & ~(size - 1);
31 			if (boundary <= end) {
32 				do {
33 					start = sske_frame(start, PAGE_DEFAULT_KEY);
34 				} while (start < boundary);
35 				continue;
36 			}
37 		}
38 		page_set_storage_key(start, PAGE_DEFAULT_KEY, 1);
39 		start += PAGE_SIZE;
40 	}
41 }
42 
43 #ifdef CONFIG_PROC_FS
44 atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
45 
46 void arch_report_meminfo(struct seq_file *m)
47 {
48 	seq_printf(m, "DirectMap4k:    %8lu kB\n",
49 		   atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_4K]) << 2);
50 	seq_printf(m, "DirectMap1M:    %8lu kB\n",
51 		   atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_1M]) << 10);
52 	seq_printf(m, "DirectMap2G:    %8lu kB\n",
53 		   atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_2G]) << 21);
54 }
55 #endif /* CONFIG_PROC_FS */
56 
57 static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr,
58 		    unsigned long dtt)
59 {
60 	unsigned long *table, mask;
61 
62 	mask = 0;
63 	if (MACHINE_HAS_EDAT2) {
64 		switch (dtt) {
65 		case CRDTE_DTT_REGION3:
66 			mask = ~(PTRS_PER_PUD * sizeof(pud_t) - 1);
67 			break;
68 		case CRDTE_DTT_SEGMENT:
69 			mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
70 			break;
71 		case CRDTE_DTT_PAGE:
72 			mask = ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
73 			break;
74 		}
75 		table = (unsigned long *)((unsigned long)old & mask);
76 		crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce);
77 	} else if (MACHINE_HAS_IDTE) {
78 		cspg(old, *old, new);
79 	} else {
80 		csp((unsigned int *)old + 1, *old, new);
81 	}
82 }
83 
84 static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
85 			  unsigned long flags)
86 {
87 	pte_t *ptep, new;
88 
89 	if (flags == SET_MEMORY_4K)
90 		return 0;
91 	ptep = pte_offset_kernel(pmdp, addr);
92 	do {
93 		new = *ptep;
94 		if (pte_none(new))
95 			return -EINVAL;
96 		if (flags & SET_MEMORY_RO)
97 			new = pte_wrprotect(new);
98 		else if (flags & SET_MEMORY_RW)
99 			new = pte_mkwrite(pte_mkdirty(new));
100 		if (flags & SET_MEMORY_NX)
101 			new = set_pte_bit(new, __pgprot(_PAGE_NOEXEC));
102 		else if (flags & SET_MEMORY_X)
103 			new = clear_pte_bit(new, __pgprot(_PAGE_NOEXEC));
104 		pgt_set((unsigned long *)ptep, pte_val(new), addr, CRDTE_DTT_PAGE);
105 		ptep++;
106 		addr += PAGE_SIZE;
107 		cond_resched();
108 	} while (addr < end);
109 	return 0;
110 }
111 
112 static int split_pmd_page(pmd_t *pmdp, unsigned long addr)
113 {
114 	unsigned long pte_addr, prot;
115 	pte_t *pt_dir, *ptep;
116 	pmd_t new;
117 	int i, ro, nx;
118 
119 	pt_dir = vmem_pte_alloc();
120 	if (!pt_dir)
121 		return -ENOMEM;
122 	pte_addr = pmd_pfn(*pmdp) << PAGE_SHIFT;
123 	ro = !!(pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT);
124 	nx = !!(pmd_val(*pmdp) & _SEGMENT_ENTRY_NOEXEC);
125 	prot = pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
126 	if (!nx)
127 		prot &= ~_PAGE_NOEXEC;
128 	ptep = pt_dir;
129 	for (i = 0; i < PTRS_PER_PTE; i++) {
130 		set_pte(ptep, __pte(pte_addr | prot));
131 		pte_addr += PAGE_SIZE;
132 		ptep++;
133 	}
134 	new = __pmd(__pa(pt_dir) | _SEGMENT_ENTRY);
135 	pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
136 	update_page_count(PG_DIRECT_MAP_4K, PTRS_PER_PTE);
137 	update_page_count(PG_DIRECT_MAP_1M, -1);
138 	return 0;
139 }
140 
141 static void modify_pmd_page(pmd_t *pmdp, unsigned long addr,
142 			    unsigned long flags)
143 {
144 	pmd_t new = *pmdp;
145 
146 	if (flags & SET_MEMORY_RO)
147 		new = pmd_wrprotect(new);
148 	else if (flags & SET_MEMORY_RW)
149 		new = pmd_mkwrite(pmd_mkdirty(new));
150 	if (flags & SET_MEMORY_NX)
151 		new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
152 	else if (flags & SET_MEMORY_X)
153 		new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
154 	pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
155 }
156 
157 static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
158 			  unsigned long flags)
159 {
160 	unsigned long next;
161 	int need_split;
162 	pmd_t *pmdp;
163 	int rc = 0;
164 
165 	pmdp = pmd_offset(pudp, addr);
166 	do {
167 		if (pmd_none(*pmdp))
168 			return -EINVAL;
169 		next = pmd_addr_end(addr, end);
170 		if (pmd_large(*pmdp)) {
171 			need_split  = !!(flags & SET_MEMORY_4K);
172 			need_split |= !!(addr & ~PMD_MASK);
173 			need_split |= !!(addr + PMD_SIZE > next);
174 			if (need_split) {
175 				rc = split_pmd_page(pmdp, addr);
176 				if (rc)
177 					return rc;
178 				continue;
179 			}
180 			modify_pmd_page(pmdp, addr, flags);
181 		} else {
182 			rc = walk_pte_level(pmdp, addr, next, flags);
183 			if (rc)
184 				return rc;
185 		}
186 		pmdp++;
187 		addr = next;
188 		cond_resched();
189 	} while (addr < end);
190 	return rc;
191 }
192 
193 static int split_pud_page(pud_t *pudp, unsigned long addr)
194 {
195 	unsigned long pmd_addr, prot;
196 	pmd_t *pm_dir, *pmdp;
197 	pud_t new;
198 	int i, ro, nx;
199 
200 	pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
201 	if (!pm_dir)
202 		return -ENOMEM;
203 	pmd_addr = pud_pfn(*pudp) << PAGE_SHIFT;
204 	ro = !!(pud_val(*pudp) & _REGION_ENTRY_PROTECT);
205 	nx = !!(pud_val(*pudp) & _REGION_ENTRY_NOEXEC);
206 	prot = pgprot_val(ro ? SEGMENT_KERNEL_RO : SEGMENT_KERNEL);
207 	if (!nx)
208 		prot &= ~_SEGMENT_ENTRY_NOEXEC;
209 	pmdp = pm_dir;
210 	for (i = 0; i < PTRS_PER_PMD; i++) {
211 		set_pmd(pmdp, __pmd(pmd_addr | prot));
212 		pmd_addr += PMD_SIZE;
213 		pmdp++;
214 	}
215 	new = __pud(__pa(pm_dir) | _REGION3_ENTRY);
216 	pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
217 	update_page_count(PG_DIRECT_MAP_1M, PTRS_PER_PMD);
218 	update_page_count(PG_DIRECT_MAP_2G, -1);
219 	return 0;
220 }
221 
222 static void modify_pud_page(pud_t *pudp, unsigned long addr,
223 			    unsigned long flags)
224 {
225 	pud_t new = *pudp;
226 
227 	if (flags & SET_MEMORY_RO)
228 		new = pud_wrprotect(new);
229 	else if (flags & SET_MEMORY_RW)
230 		new = pud_mkwrite(pud_mkdirty(new));
231 	if (flags & SET_MEMORY_NX)
232 		new = set_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
233 	else if (flags & SET_MEMORY_X)
234 		new = clear_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
235 	pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
236 }
237 
238 static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end,
239 			  unsigned long flags)
240 {
241 	unsigned long next;
242 	int need_split;
243 	pud_t *pudp;
244 	int rc = 0;
245 
246 	pudp = pud_offset(p4d, addr);
247 	do {
248 		if (pud_none(*pudp))
249 			return -EINVAL;
250 		next = pud_addr_end(addr, end);
251 		if (pud_large(*pudp)) {
252 			need_split  = !!(flags & SET_MEMORY_4K);
253 			need_split |= !!(addr & ~PUD_MASK);
254 			need_split |= !!(addr + PUD_SIZE > next);
255 			if (need_split) {
256 				rc = split_pud_page(pudp, addr);
257 				if (rc)
258 					break;
259 				continue;
260 			}
261 			modify_pud_page(pudp, addr, flags);
262 		} else {
263 			rc = walk_pmd_level(pudp, addr, next, flags);
264 		}
265 		pudp++;
266 		addr = next;
267 		cond_resched();
268 	} while (addr < end && !rc);
269 	return rc;
270 }
271 
272 static int walk_p4d_level(pgd_t *pgd, unsigned long addr, unsigned long end,
273 			  unsigned long flags)
274 {
275 	unsigned long next;
276 	p4d_t *p4dp;
277 	int rc = 0;
278 
279 	p4dp = p4d_offset(pgd, addr);
280 	do {
281 		if (p4d_none(*p4dp))
282 			return -EINVAL;
283 		next = p4d_addr_end(addr, end);
284 		rc = walk_pud_level(p4dp, addr, next, flags);
285 		p4dp++;
286 		addr = next;
287 		cond_resched();
288 	} while (addr < end && !rc);
289 	return rc;
290 }
291 
292 DEFINE_MUTEX(cpa_mutex);
293 
294 static int change_page_attr(unsigned long addr, unsigned long end,
295 			    unsigned long flags)
296 {
297 	unsigned long next;
298 	int rc = -EINVAL;
299 	pgd_t *pgdp;
300 
301 	if (addr == end)
302 		return 0;
303 	if (end >= MODULES_END)
304 		return -EINVAL;
305 	mutex_lock(&cpa_mutex);
306 	pgdp = pgd_offset_k(addr);
307 	do {
308 		if (pgd_none(*pgdp))
309 			break;
310 		next = pgd_addr_end(addr, end);
311 		rc = walk_p4d_level(pgdp, addr, next, flags);
312 		if (rc)
313 			break;
314 		cond_resched();
315 	} while (pgdp++, addr = next, addr < end && !rc);
316 	mutex_unlock(&cpa_mutex);
317 	return rc;
318 }
319 
320 int __set_memory(unsigned long addr, int numpages, unsigned long flags)
321 {
322 	if (!MACHINE_HAS_NX)
323 		flags &= ~(SET_MEMORY_NX | SET_MEMORY_X);
324 	if (!flags)
325 		return 0;
326 	addr &= PAGE_MASK;
327 	return change_page_attr(addr, addr + numpages * PAGE_SIZE, flags);
328 }
329 
330 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
331 
332 static void ipte_range(pte_t *pte, unsigned long address, int nr)
333 {
334 	int i;
335 
336 	if (test_facility(13)) {
337 		__ptep_ipte_range(address, nr - 1, pte, IPTE_GLOBAL);
338 		return;
339 	}
340 	for (i = 0; i < nr; i++) {
341 		__ptep_ipte(address, pte, 0, 0, IPTE_GLOBAL);
342 		address += PAGE_SIZE;
343 		pte++;
344 	}
345 }
346 
347 void __kernel_map_pages(struct page *page, int numpages, int enable)
348 {
349 	unsigned long address;
350 	pte_t *ptep, pte;
351 	int nr, i, j;
352 
353 	for (i = 0; i < numpages;) {
354 		address = (unsigned long)page_to_virt(page + i);
355 		ptep = virt_to_kpte(address);
356 		nr = (unsigned long)ptep >> ilog2(sizeof(long));
357 		nr = PTRS_PER_PTE - (nr & (PTRS_PER_PTE - 1));
358 		nr = min(numpages - i, nr);
359 		if (enable) {
360 			for (j = 0; j < nr; j++) {
361 				pte = clear_pte_bit(*ptep, __pgprot(_PAGE_INVALID));
362 				set_pte(ptep, pte);
363 				address += PAGE_SIZE;
364 				ptep++;
365 			}
366 		} else {
367 			ipte_range(ptep, address, nr);
368 		}
369 		i += nr;
370 	}
371 }
372 
373 #endif /* CONFIG_DEBUG_PAGEALLOC */
374