xref: /openbmc/linux/arch/arm64/mm/pageattr.c (revision 3a83e4e6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2014, The Linux Foundation. All rights reserved.
4  */
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/vmalloc.h>
10 
11 #include <asm/set_memory.h>
12 #include <asm/tlbflush.h>
13 
14 struct page_change_data {
15 	pgprot_t set_mask;
16 	pgprot_t clear_mask;
17 };
18 
19 bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED);
20 
21 static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
22 {
23 	struct page_change_data *cdata = data;
24 	pte_t pte = READ_ONCE(*ptep);
25 
26 	pte = clear_pte_bit(pte, cdata->clear_mask);
27 	pte = set_pte_bit(pte, cdata->set_mask);
28 
29 	set_pte(ptep, pte);
30 	return 0;
31 }
32 
33 /*
34  * This function assumes that the range is mapped with PAGE_SIZE pages.
35  */
36 static int __change_memory_common(unsigned long start, unsigned long size,
37 				pgprot_t set_mask, pgprot_t clear_mask)
38 {
39 	struct page_change_data data;
40 	int ret;
41 
42 	data.set_mask = set_mask;
43 	data.clear_mask = clear_mask;
44 
45 	ret = apply_to_page_range(&init_mm, start, size, change_page_range,
46 					&data);
47 
48 	flush_tlb_kernel_range(start, start + size);
49 	return ret;
50 }
51 
52 static int change_memory_common(unsigned long addr, int numpages,
53 				pgprot_t set_mask, pgprot_t clear_mask)
54 {
55 	unsigned long start = addr;
56 	unsigned long size = PAGE_SIZE * numpages;
57 	unsigned long end = start + size;
58 	struct vm_struct *area;
59 	int i;
60 
61 	if (!PAGE_ALIGNED(addr)) {
62 		start &= PAGE_MASK;
63 		end = start + size;
64 		WARN_ON_ONCE(1);
65 	}
66 
67 	/*
68 	 * Kernel VA mappings are always live, and splitting live section
69 	 * mappings into page mappings may cause TLB conflicts. This means
70 	 * we have to ensure that changing the permission bits of the range
71 	 * we are operating on does not result in such splitting.
72 	 *
73 	 * Let's restrict ourselves to mappings created by vmalloc (or vmap).
74 	 * Those are guaranteed to consist entirely of page mappings, and
75 	 * splitting is never needed.
76 	 *
77 	 * So check whether the [addr, addr + size) interval is entirely
78 	 * covered by precisely one VM area that has the VM_ALLOC flag set.
79 	 */
80 	area = find_vm_area((void *)addr);
81 	if (!area ||
82 	    end > (unsigned long)area->addr + area->size ||
83 	    !(area->flags & VM_ALLOC))
84 		return -EINVAL;
85 
86 	if (!numpages)
87 		return 0;
88 
89 	/*
90 	 * If we are manipulating read-only permissions, apply the same
91 	 * change to the linear mapping of the pages that back this VM area.
92 	 */
93 	if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
94 			    pgprot_val(clear_mask) == PTE_RDONLY)) {
95 		for (i = 0; i < area->nr_pages; i++) {
96 			__change_memory_common((u64)page_address(area->pages[i]),
97 					       PAGE_SIZE, set_mask, clear_mask);
98 		}
99 	}
100 
101 	/*
102 	 * Get rid of potentially aliasing lazily unmapped vm areas that may
103 	 * have permissions set that deviate from the ones we are setting here.
104 	 */
105 	vm_unmap_aliases();
106 
107 	return __change_memory_common(start, size, set_mask, clear_mask);
108 }
109 
110 int set_memory_ro(unsigned long addr, int numpages)
111 {
112 	return change_memory_common(addr, numpages,
113 					__pgprot(PTE_RDONLY),
114 					__pgprot(PTE_WRITE));
115 }
116 
117 int set_memory_rw(unsigned long addr, int numpages)
118 {
119 	return change_memory_common(addr, numpages,
120 					__pgprot(PTE_WRITE),
121 					__pgprot(PTE_RDONLY));
122 }
123 
124 int set_memory_nx(unsigned long addr, int numpages)
125 {
126 	return change_memory_common(addr, numpages,
127 					__pgprot(PTE_PXN),
128 					__pgprot(PTE_MAYBE_GP));
129 }
130 
131 int set_memory_x(unsigned long addr, int numpages)
132 {
133 	return change_memory_common(addr, numpages,
134 					__pgprot(PTE_MAYBE_GP),
135 					__pgprot(PTE_PXN));
136 }
137 
138 int set_memory_valid(unsigned long addr, int numpages, int enable)
139 {
140 	if (enable)
141 		return __change_memory_common(addr, PAGE_SIZE * numpages,
142 					__pgprot(PTE_VALID),
143 					__pgprot(0));
144 	else
145 		return __change_memory_common(addr, PAGE_SIZE * numpages,
146 					__pgprot(0),
147 					__pgprot(PTE_VALID));
148 }
149 
150 int set_direct_map_invalid_noflush(struct page *page)
151 {
152 	struct page_change_data data = {
153 		.set_mask = __pgprot(0),
154 		.clear_mask = __pgprot(PTE_VALID),
155 	};
156 
157 	if (!rodata_full)
158 		return 0;
159 
160 	return apply_to_page_range(&init_mm,
161 				   (unsigned long)page_address(page),
162 				   PAGE_SIZE, change_page_range, &data);
163 }
164 
165 int set_direct_map_default_noflush(struct page *page)
166 {
167 	struct page_change_data data = {
168 		.set_mask = __pgprot(PTE_VALID | PTE_WRITE),
169 		.clear_mask = __pgprot(PTE_RDONLY),
170 	};
171 
172 	if (!rodata_full)
173 		return 0;
174 
175 	return apply_to_page_range(&init_mm,
176 				   (unsigned long)page_address(page),
177 				   PAGE_SIZE, change_page_range, &data);
178 }
179 
180 void __kernel_map_pages(struct page *page, int numpages, int enable)
181 {
182 	if (!debug_pagealloc_enabled() && !rodata_full)
183 		return;
184 
185 	set_memory_valid((unsigned long)page_address(page), numpages, enable);
186 }
187 
188 /*
189  * This function is used to determine if a linear map page has been marked as
190  * not-valid. Walk the page table and check the PTE_VALID bit. This is based
191  * on kern_addr_valid(), which almost does what we need.
192  *
193  * Because this is only called on the kernel linear map,  p?d_sect() implies
194  * p?d_present(). When debug_pagealloc is enabled, sections mappings are
195  * disabled.
196  */
197 bool kernel_page_present(struct page *page)
198 {
199 	pgd_t *pgdp;
200 	p4d_t *p4dp;
201 	pud_t *pudp, pud;
202 	pmd_t *pmdp, pmd;
203 	pte_t *ptep;
204 	unsigned long addr = (unsigned long)page_address(page);
205 
206 	if (!debug_pagealloc_enabled() && !rodata_full)
207 		return true;
208 
209 	pgdp = pgd_offset_k(addr);
210 	if (pgd_none(READ_ONCE(*pgdp)))
211 		return false;
212 
213 	p4dp = p4d_offset(pgdp, addr);
214 	if (p4d_none(READ_ONCE(*p4dp)))
215 		return false;
216 
217 	pudp = pud_offset(p4dp, addr);
218 	pud = READ_ONCE(*pudp);
219 	if (pud_none(pud))
220 		return false;
221 	if (pud_sect(pud))
222 		return true;
223 
224 	pmdp = pmd_offset(pudp, addr);
225 	pmd = READ_ONCE(*pmdp);
226 	if (pmd_none(pmd))
227 		return false;
228 	if (pmd_sect(pmd))
229 		return true;
230 
231 	ptep = pte_offset_kernel(pmdp, addr);
232 	return pte_valid(READ_ONCE(*ptep));
233 }
234