xref: /openbmc/linux/arch/arm64/mm/pageattr.c (revision 1a59d1b8)
1 /*
2  * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 and
6  * only version 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  */
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/vmalloc.h>
18 
19 #include <asm/pgtable.h>
20 #include <asm/set_memory.h>
21 #include <asm/tlbflush.h>
22 
23 struct page_change_data {
24 	pgprot_t set_mask;
25 	pgprot_t clear_mask;
26 };
27 
28 bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED);
29 
30 static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
31 			void *data)
32 {
33 	struct page_change_data *cdata = data;
34 	pte_t pte = READ_ONCE(*ptep);
35 
36 	pte = clear_pte_bit(pte, cdata->clear_mask);
37 	pte = set_pte_bit(pte, cdata->set_mask);
38 
39 	set_pte(ptep, pte);
40 	return 0;
41 }
42 
43 /*
44  * This function assumes that the range is mapped with PAGE_SIZE pages.
45  */
46 static int __change_memory_common(unsigned long start, unsigned long size,
47 				pgprot_t set_mask, pgprot_t clear_mask)
48 {
49 	struct page_change_data data;
50 	int ret;
51 
52 	data.set_mask = set_mask;
53 	data.clear_mask = clear_mask;
54 
55 	ret = apply_to_page_range(&init_mm, start, size, change_page_range,
56 					&data);
57 
58 	flush_tlb_kernel_range(start, start + size);
59 	return ret;
60 }
61 
62 static int change_memory_common(unsigned long addr, int numpages,
63 				pgprot_t set_mask, pgprot_t clear_mask)
64 {
65 	unsigned long start = addr;
66 	unsigned long size = PAGE_SIZE*numpages;
67 	unsigned long end = start + size;
68 	struct vm_struct *area;
69 	int i;
70 
71 	if (!PAGE_ALIGNED(addr)) {
72 		start &= PAGE_MASK;
73 		end = start + size;
74 		WARN_ON_ONCE(1);
75 	}
76 
77 	/*
78 	 * Kernel VA mappings are always live, and splitting live section
79 	 * mappings into page mappings may cause TLB conflicts. This means
80 	 * we have to ensure that changing the permission bits of the range
81 	 * we are operating on does not result in such splitting.
82 	 *
83 	 * Let's restrict ourselves to mappings created by vmalloc (or vmap).
84 	 * Those are guaranteed to consist entirely of page mappings, and
85 	 * splitting is never needed.
86 	 *
87 	 * So check whether the [addr, addr + size) interval is entirely
88 	 * covered by precisely one VM area that has the VM_ALLOC flag set.
89 	 */
90 	area = find_vm_area((void *)addr);
91 	if (!area ||
92 	    end > (unsigned long)area->addr + area->size ||
93 	    !(area->flags & VM_ALLOC))
94 		return -EINVAL;
95 
96 	if (!numpages)
97 		return 0;
98 
99 	/*
100 	 * If we are manipulating read-only permissions, apply the same
101 	 * change to the linear mapping of the pages that back this VM area.
102 	 */
103 	if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
104 			    pgprot_val(clear_mask) == PTE_RDONLY)) {
105 		for (i = 0; i < area->nr_pages; i++) {
106 			__change_memory_common((u64)page_address(area->pages[i]),
107 					       PAGE_SIZE, set_mask, clear_mask);
108 		}
109 	}
110 
111 	/*
112 	 * Get rid of potentially aliasing lazily unmapped vm areas that may
113 	 * have permissions set that deviate from the ones we are setting here.
114 	 */
115 	vm_unmap_aliases();
116 
117 	return __change_memory_common(start, size, set_mask, clear_mask);
118 }
119 
120 int set_memory_ro(unsigned long addr, int numpages)
121 {
122 	return change_memory_common(addr, numpages,
123 					__pgprot(PTE_RDONLY),
124 					__pgprot(PTE_WRITE));
125 }
126 
127 int set_memory_rw(unsigned long addr, int numpages)
128 {
129 	return change_memory_common(addr, numpages,
130 					__pgprot(PTE_WRITE),
131 					__pgprot(PTE_RDONLY));
132 }
133 
134 int set_memory_nx(unsigned long addr, int numpages)
135 {
136 	return change_memory_common(addr, numpages,
137 					__pgprot(PTE_PXN),
138 					__pgprot(0));
139 }
140 EXPORT_SYMBOL_GPL(set_memory_nx);
141 
142 int set_memory_x(unsigned long addr, int numpages)
143 {
144 	return change_memory_common(addr, numpages,
145 					__pgprot(0),
146 					__pgprot(PTE_PXN));
147 }
148 EXPORT_SYMBOL_GPL(set_memory_x);
149 
150 int set_memory_valid(unsigned long addr, int numpages, int enable)
151 {
152 	if (enable)
153 		return __change_memory_common(addr, PAGE_SIZE * numpages,
154 					__pgprot(PTE_VALID),
155 					__pgprot(0));
156 	else
157 		return __change_memory_common(addr, PAGE_SIZE * numpages,
158 					__pgprot(0),
159 					__pgprot(PTE_VALID));
160 }
161 
162 #ifdef CONFIG_DEBUG_PAGEALLOC
163 void __kernel_map_pages(struct page *page, int numpages, int enable)
164 {
165 	set_memory_valid((unsigned long)page_address(page), numpages, enable);
166 }
167 #ifdef CONFIG_HIBERNATION
168 /*
169  * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
170  * is used to determine if a linear map page has been marked as not-valid by
171  * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
172  * This is based on kern_addr_valid(), which almost does what we need.
173  *
174  * Because this is only called on the kernel linear map,  p?d_sect() implies
175  * p?d_present(). When debug_pagealloc is enabled, sections mappings are
176  * disabled.
177  */
178 bool kernel_page_present(struct page *page)
179 {
180 	pgd_t *pgdp;
181 	pud_t *pudp, pud;
182 	pmd_t *pmdp, pmd;
183 	pte_t *ptep;
184 	unsigned long addr = (unsigned long)page_address(page);
185 
186 	pgdp = pgd_offset_k(addr);
187 	if (pgd_none(READ_ONCE(*pgdp)))
188 		return false;
189 
190 	pudp = pud_offset(pgdp, addr);
191 	pud = READ_ONCE(*pudp);
192 	if (pud_none(pud))
193 		return false;
194 	if (pud_sect(pud))
195 		return true;
196 
197 	pmdp = pmd_offset(pudp, addr);
198 	pmd = READ_ONCE(*pmdp);
199 	if (pmd_none(pmd))
200 		return false;
201 	if (pmd_sect(pmd))
202 		return true;
203 
204 	ptep = pte_offset_kernel(pmdp, addr);
205 	return pte_valid(READ_ONCE(*ptep));
206 }
207 #endif /* CONFIG_HIBERNATION */
208 #endif /* CONFIG_DEBUG_PAGEALLOC */
209