xref: /openbmc/linux/arch/arm64/mm/pageattr.c (revision a8fe58ce)
1 /*
2  * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 and
6  * only version 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  */
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/vmalloc.h>
18 
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
21 
22 struct page_change_data {
23 	pgprot_t set_mask;
24 	pgprot_t clear_mask;
25 };
26 
27 static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
28 			void *data)
29 {
30 	struct page_change_data *cdata = data;
31 	pte_t pte = *ptep;
32 
33 	pte = clear_pte_bit(pte, cdata->clear_mask);
34 	pte = set_pte_bit(pte, cdata->set_mask);
35 
36 	set_pte(ptep, pte);
37 	return 0;
38 }
39 
40 static int change_memory_common(unsigned long addr, int numpages,
41 				pgprot_t set_mask, pgprot_t clear_mask)
42 {
43 	unsigned long start = addr;
44 	unsigned long size = PAGE_SIZE*numpages;
45 	unsigned long end = start + size;
46 	int ret;
47 	struct page_change_data data;
48 	struct vm_struct *area;
49 
50 	if (!PAGE_ALIGNED(addr)) {
51 		start &= PAGE_MASK;
52 		end = start + size;
53 		WARN_ON_ONCE(1);
54 	}
55 
56 	/*
57 	 * Kernel VA mappings are always live, and splitting live section
58 	 * mappings into page mappings may cause TLB conflicts. This means
59 	 * we have to ensure that changing the permission bits of the range
60 	 * we are operating on does not result in such splitting.
61 	 *
62 	 * Let's restrict ourselves to mappings created by vmalloc (or vmap).
63 	 * Those are guaranteed to consist entirely of page mappings, and
64 	 * splitting is never needed.
65 	 *
66 	 * So check whether the [addr, addr + size) interval is entirely
67 	 * covered by precisely one VM area that has the VM_ALLOC flag set.
68 	 */
69 	area = find_vm_area((void *)addr);
70 	if (!area ||
71 	    end > (unsigned long)area->addr + area->size ||
72 	    !(area->flags & VM_ALLOC))
73 		return -EINVAL;
74 
75 	if (!numpages)
76 		return 0;
77 
78 	data.set_mask = set_mask;
79 	data.clear_mask = clear_mask;
80 
81 	ret = apply_to_page_range(&init_mm, start, size, change_page_range,
82 					&data);
83 
84 	flush_tlb_kernel_range(start, end);
85 	return ret;
86 }
87 
88 int set_memory_ro(unsigned long addr, int numpages)
89 {
90 	return change_memory_common(addr, numpages,
91 					__pgprot(PTE_RDONLY),
92 					__pgprot(PTE_WRITE));
93 }
94 
95 int set_memory_rw(unsigned long addr, int numpages)
96 {
97 	return change_memory_common(addr, numpages,
98 					__pgprot(PTE_WRITE),
99 					__pgprot(PTE_RDONLY));
100 }
101 
102 int set_memory_nx(unsigned long addr, int numpages)
103 {
104 	return change_memory_common(addr, numpages,
105 					__pgprot(PTE_PXN),
106 					__pgprot(0));
107 }
108 EXPORT_SYMBOL_GPL(set_memory_nx);
109 
110 int set_memory_x(unsigned long addr, int numpages)
111 {
112 	return change_memory_common(addr, numpages,
113 					__pgprot(0),
114 					__pgprot(PTE_PXN));
115 }
116 EXPORT_SYMBOL_GPL(set_memory_x);
117