xref: /openbmc/linux/arch/arm64/mm/pageattr.c (revision bbaf1ff0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2014, The Linux Foundation. All rights reserved.
4  */
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/vmalloc.h>
10 
11 #include <asm/cacheflush.h>
12 #include <asm/set_memory.h>
13 #include <asm/tlbflush.h>
14 #include <asm/kfence.h>
15 
16 struct page_change_data {
17 	pgprot_t set_mask;
18 	pgprot_t clear_mask;
19 };
20 
21 bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED);
22 
23 bool can_set_direct_map(void)
24 {
25 	/*
26 	 * rodata_full and DEBUG_PAGEALLOC require linear map to be
27 	 * mapped at page granularity, so that it is possible to
28 	 * protect/unprotect single pages.
29 	 *
30 	 * KFENCE pool requires page-granular mapping if initialized late.
31 	 */
32 	return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
33 		arm64_kfence_can_set_direct_map();
34 }
35 
36 static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
37 {
38 	struct page_change_data *cdata = data;
39 	pte_t pte = READ_ONCE(*ptep);
40 
41 	pte = clear_pte_bit(pte, cdata->clear_mask);
42 	pte = set_pte_bit(pte, cdata->set_mask);
43 
44 	set_pte(ptep, pte);
45 	return 0;
46 }
47 
48 /*
49  * This function assumes that the range is mapped with PAGE_SIZE pages.
50  */
51 static int __change_memory_common(unsigned long start, unsigned long size,
52 				pgprot_t set_mask, pgprot_t clear_mask)
53 {
54 	struct page_change_data data;
55 	int ret;
56 
57 	data.set_mask = set_mask;
58 	data.clear_mask = clear_mask;
59 
60 	ret = apply_to_page_range(&init_mm, start, size, change_page_range,
61 					&data);
62 
63 	flush_tlb_kernel_range(start, start + size);
64 	return ret;
65 }
66 
67 static int change_memory_common(unsigned long addr, int numpages,
68 				pgprot_t set_mask, pgprot_t clear_mask)
69 {
70 	unsigned long start = addr;
71 	unsigned long size = PAGE_SIZE * numpages;
72 	unsigned long end = start + size;
73 	struct vm_struct *area;
74 	int i;
75 
76 	if (!PAGE_ALIGNED(addr)) {
77 		start &= PAGE_MASK;
78 		end = start + size;
79 		WARN_ON_ONCE(1);
80 	}
81 
82 	/*
83 	 * Kernel VA mappings are always live, and splitting live section
84 	 * mappings into page mappings may cause TLB conflicts. This means
85 	 * we have to ensure that changing the permission bits of the range
86 	 * we are operating on does not result in such splitting.
87 	 *
88 	 * Let's restrict ourselves to mappings created by vmalloc (or vmap).
89 	 * Those are guaranteed to consist entirely of page mappings, and
90 	 * splitting is never needed.
91 	 *
92 	 * So check whether the [addr, addr + size) interval is entirely
93 	 * covered by precisely one VM area that has the VM_ALLOC flag set.
94 	 */
95 	area = find_vm_area((void *)addr);
96 	if (!area ||
97 	    end > (unsigned long)kasan_reset_tag(area->addr) + area->size ||
98 	    !(area->flags & VM_ALLOC))
99 		return -EINVAL;
100 
101 	if (!numpages)
102 		return 0;
103 
104 	/*
105 	 * If we are manipulating read-only permissions, apply the same
106 	 * change to the linear mapping of the pages that back this VM area.
107 	 */
108 	if (rodata_enabled &&
109 	    rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
110 			    pgprot_val(clear_mask) == PTE_RDONLY)) {
111 		for (i = 0; i < area->nr_pages; i++) {
112 			__change_memory_common((u64)page_address(area->pages[i]),
113 					       PAGE_SIZE, set_mask, clear_mask);
114 		}
115 	}
116 
117 	/*
118 	 * Get rid of potentially aliasing lazily unmapped vm areas that may
119 	 * have permissions set that deviate from the ones we are setting here.
120 	 */
121 	vm_unmap_aliases();
122 
123 	return __change_memory_common(start, size, set_mask, clear_mask);
124 }
125 
126 int set_memory_ro(unsigned long addr, int numpages)
127 {
128 	return change_memory_common(addr, numpages,
129 					__pgprot(PTE_RDONLY),
130 					__pgprot(PTE_WRITE));
131 }
132 
133 int set_memory_rw(unsigned long addr, int numpages)
134 {
135 	return change_memory_common(addr, numpages,
136 					__pgprot(PTE_WRITE),
137 					__pgprot(PTE_RDONLY));
138 }
139 
140 int set_memory_nx(unsigned long addr, int numpages)
141 {
142 	return change_memory_common(addr, numpages,
143 					__pgprot(PTE_PXN),
144 					__pgprot(PTE_MAYBE_GP));
145 }
146 
147 int set_memory_x(unsigned long addr, int numpages)
148 {
149 	return change_memory_common(addr, numpages,
150 					__pgprot(PTE_MAYBE_GP),
151 					__pgprot(PTE_PXN));
152 }
153 
154 int set_memory_valid(unsigned long addr, int numpages, int enable)
155 {
156 	if (enable)
157 		return __change_memory_common(addr, PAGE_SIZE * numpages,
158 					__pgprot(PTE_VALID),
159 					__pgprot(0));
160 	else
161 		return __change_memory_common(addr, PAGE_SIZE * numpages,
162 					__pgprot(0),
163 					__pgprot(PTE_VALID));
164 }
165 
166 int set_direct_map_invalid_noflush(struct page *page)
167 {
168 	struct page_change_data data = {
169 		.set_mask = __pgprot(0),
170 		.clear_mask = __pgprot(PTE_VALID),
171 	};
172 
173 	if (!can_set_direct_map())
174 		return 0;
175 
176 	return apply_to_page_range(&init_mm,
177 				   (unsigned long)page_address(page),
178 				   PAGE_SIZE, change_page_range, &data);
179 }
180 
181 int set_direct_map_default_noflush(struct page *page)
182 {
183 	struct page_change_data data = {
184 		.set_mask = __pgprot(PTE_VALID | PTE_WRITE),
185 		.clear_mask = __pgprot(PTE_RDONLY),
186 	};
187 
188 	if (!can_set_direct_map())
189 		return 0;
190 
191 	return apply_to_page_range(&init_mm,
192 				   (unsigned long)page_address(page),
193 				   PAGE_SIZE, change_page_range, &data);
194 }
195 
196 #ifdef CONFIG_DEBUG_PAGEALLOC
197 void __kernel_map_pages(struct page *page, int numpages, int enable)
198 {
199 	if (!can_set_direct_map())
200 		return;
201 
202 	set_memory_valid((unsigned long)page_address(page), numpages, enable);
203 }
204 #endif /* CONFIG_DEBUG_PAGEALLOC */
205 
206 /*
207  * This function is used to determine if a linear map page has been marked as
208  * not-valid. Walk the page table and check the PTE_VALID bit.
209  *
210  * Because this is only called on the kernel linear map,  p?d_sect() implies
211  * p?d_present(). When debug_pagealloc is enabled, sections mappings are
212  * disabled.
213  */
214 bool kernel_page_present(struct page *page)
215 {
216 	pgd_t *pgdp;
217 	p4d_t *p4dp;
218 	pud_t *pudp, pud;
219 	pmd_t *pmdp, pmd;
220 	pte_t *ptep;
221 	unsigned long addr = (unsigned long)page_address(page);
222 
223 	if (!can_set_direct_map())
224 		return true;
225 
226 	pgdp = pgd_offset_k(addr);
227 	if (pgd_none(READ_ONCE(*pgdp)))
228 		return false;
229 
230 	p4dp = p4d_offset(pgdp, addr);
231 	if (p4d_none(READ_ONCE(*p4dp)))
232 		return false;
233 
234 	pudp = pud_offset(p4dp, addr);
235 	pud = READ_ONCE(*pudp);
236 	if (pud_none(pud))
237 		return false;
238 	if (pud_sect(pud))
239 		return true;
240 
241 	pmdp = pmd_offset(pudp, addr);
242 	pmd = READ_ONCE(*pmdp);
243 	if (pmd_none(pmd))
244 		return false;
245 	if (pmd_sect(pmd))
246 		return true;
247 
248 	ptep = pte_offset_kernel(pmdp, addr);
249 	return pte_valid(READ_ONCE(*ptep));
250 }
251