xref: /openbmc/linux/arch/arm/mm/pageattr.c (revision f9bff0e3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2014, The Linux Foundation. All rights reserved.
4  */
5 #include <linux/mm.h>
6 #include <linux/module.h>
7 
8 #include <asm/tlbflush.h>
9 #include <asm/set_memory.h>
10 
11 struct page_change_data {
12 	pgprot_t set_mask;
13 	pgprot_t clear_mask;
14 };
15 
change_page_range(pte_t * ptep,unsigned long addr,void * data)16 static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
17 {
18 	struct page_change_data *cdata = data;
19 	pte_t pte = *ptep;
20 
21 	pte = clear_pte_bit(pte, cdata->clear_mask);
22 	pte = set_pte_bit(pte, cdata->set_mask);
23 
24 	set_pte_ext(ptep, pte, 0);
25 	return 0;
26 }
27 
range_in_range(unsigned long start,unsigned long size,unsigned long range_start,unsigned long range_end)28 static bool range_in_range(unsigned long start, unsigned long size,
29 	unsigned long range_start, unsigned long range_end)
30 {
31 	return start >= range_start && start < range_end &&
32 		size <= range_end - start;
33 }
34 
35 /*
36  * This function assumes that the range is mapped with PAGE_SIZE pages.
37  */
__change_memory_common(unsigned long start,unsigned long size,pgprot_t set_mask,pgprot_t clear_mask)38 static int __change_memory_common(unsigned long start, unsigned long size,
39 				pgprot_t set_mask, pgprot_t clear_mask)
40 {
41 	struct page_change_data data;
42 	int ret;
43 
44 	data.set_mask = set_mask;
45 	data.clear_mask = clear_mask;
46 
47 	ret = apply_to_page_range(&init_mm, start, size, change_page_range,
48 				  &data);
49 
50 	flush_tlb_kernel_range(start, start + size);
51 	return ret;
52 }
53 
change_memory_common(unsigned long addr,int numpages,pgprot_t set_mask,pgprot_t clear_mask)54 static int change_memory_common(unsigned long addr, int numpages,
55 				pgprot_t set_mask, pgprot_t clear_mask)
56 {
57 	unsigned long start = addr & PAGE_MASK;
58 	unsigned long end = PAGE_ALIGN(addr) + numpages * PAGE_SIZE;
59 	unsigned long size = end - start;
60 
61 	WARN_ON_ONCE(start != addr);
62 
63 	if (!size)
64 		return 0;
65 
66 	if (!range_in_range(start, size, MODULES_VADDR, MODULES_END) &&
67 	    !range_in_range(start, size, VMALLOC_START, VMALLOC_END))
68 		return -EINVAL;
69 
70 	return __change_memory_common(start, size, set_mask, clear_mask);
71 }
72 
set_memory_ro(unsigned long addr,int numpages)73 int set_memory_ro(unsigned long addr, int numpages)
74 {
75 	return change_memory_common(addr, numpages,
76 					__pgprot(L_PTE_RDONLY),
77 					__pgprot(0));
78 }
79 
set_memory_rw(unsigned long addr,int numpages)80 int set_memory_rw(unsigned long addr, int numpages)
81 {
82 	return change_memory_common(addr, numpages,
83 					__pgprot(0),
84 					__pgprot(L_PTE_RDONLY));
85 }
86 
set_memory_nx(unsigned long addr,int numpages)87 int set_memory_nx(unsigned long addr, int numpages)
88 {
89 	return change_memory_common(addr, numpages,
90 					__pgprot(L_PTE_XN),
91 					__pgprot(0));
92 }
93 
set_memory_x(unsigned long addr,int numpages)94 int set_memory_x(unsigned long addr, int numpages)
95 {
96 	return change_memory_common(addr, numpages,
97 					__pgprot(0),
98 					__pgprot(L_PTE_XN));
99 }
100 
set_memory_valid(unsigned long addr,int numpages,int enable)101 int set_memory_valid(unsigned long addr, int numpages, int enable)
102 {
103 	if (enable)
104 		return __change_memory_common(addr, PAGE_SIZE * numpages,
105 					      __pgprot(L_PTE_VALID),
106 					      __pgprot(0));
107 	else
108 		return __change_memory_common(addr, PAGE_SIZE * numpages,
109 					      __pgprot(0),
110 					      __pgprot(L_PTE_VALID));
111 }
112