1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2016 ARM Ltd.
4  */
5 #ifndef __ASM_PGTABLE_PROT_H
6 #define __ASM_PGTABLE_PROT_H
7 
8 #include <asm/memory.h>
9 #include <asm/pgtable-hwdef.h>
10 
11 #include <linux/const.h>
12 
13 /*
14  * Software defined PTE bits definition.
15  */
16 #define PTE_WRITE		(PTE_DBM)		 /* same as DBM (51) */
17 #define PTE_SWP_EXCLUSIVE	(_AT(pteval_t, 1) << 2)	 /* only for swp ptes */
18 #define PTE_DIRTY		(_AT(pteval_t, 1) << 55)
19 #define PTE_SPECIAL		(_AT(pteval_t, 1) << 56)
20 #define PTE_DEVMAP		(_AT(pteval_t, 1) << 57)
21 #define PTE_PROT_NONE		(_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
22 
23 /*
24  * This bit indicates that the entry is present i.e. pmd_page()
25  * still points to a valid huge page in memory even if the pmd
26  * has been invalidated.
27  */
28 #define PMD_PRESENT_INVALID	(_AT(pteval_t, 1) << 59) /* only when !PMD_SECT_VALID */
29 
30 #ifndef __ASSEMBLY__
31 
32 #include <asm/cpufeature.h>
33 #include <asm/pgtable-types.h>
34 
35 extern bool arm64_use_ng_mappings;
36 
37 #define _PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
38 #define _PROT_SECT_DEFAULT	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
39 
40 #define PTE_MAYBE_NG		(arm64_use_ng_mappings ? PTE_NG : 0)
41 #define PMD_MAYBE_NG		(arm64_use_ng_mappings ? PMD_SECT_NG : 0)
42 
43 /*
44  * If we have userspace only BTI we don't want to mark kernel pages
45  * guarded even if the system does support BTI.
46  */
47 #ifdef CONFIG_ARM64_BTI_KERNEL
48 #define PTE_MAYBE_GP		(system_supports_bti() ? PTE_GP : 0)
49 #else
50 #define PTE_MAYBE_GP		0
51 #endif
52 
53 #define PROT_DEFAULT		(_PROT_DEFAULT | PTE_MAYBE_NG)
54 #define PROT_SECT_DEFAULT	(_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
55 
56 #define PROT_DEVICE_nGnRnE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
57 #define PROT_DEVICE_nGnRE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
58 #define PROT_NORMAL_NC		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
59 #define PROT_NORMAL		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
60 #define PROT_NORMAL_TAGGED	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_TAGGED))
61 
62 #define PROT_SECT_DEVICE_nGnRE	(PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
63 #define PROT_SECT_NORMAL	(PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
64 #define PROT_SECT_NORMAL_EXEC	(PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
65 
66 #define _PAGE_DEFAULT		(_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
67 
68 #define PAGE_KERNEL		__pgprot(PROT_NORMAL)
69 #define PAGE_KERNEL_RO		__pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY)
70 #define PAGE_KERNEL_ROX		__pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY)
71 #define PAGE_KERNEL_EXEC	__pgprot(PROT_NORMAL & ~PTE_PXN)
72 #define PAGE_KERNEL_EXEC_CONT	__pgprot((PROT_NORMAL & ~PTE_PXN) | PTE_CONT)
73 
74 #define PAGE_S2_MEMATTR(attr, has_fwb)					\
75 	({								\
76 		u64 __val;						\
77 		if (has_fwb)						\
78 			__val = PTE_S2_MEMATTR(MT_S2_FWB_ ## attr);	\
79 		else							\
80 			__val = PTE_S2_MEMATTR(MT_S2_ ## attr);		\
81 		__val;							\
82 	 })
83 
84 #define PAGE_NONE		__pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
85 /* shared+writable pages are clean by default, hence PTE_RDONLY|PTE_WRITE */
86 #define PAGE_SHARED		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
87 #define PAGE_SHARED_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
88 #define PAGE_READONLY		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
89 #define PAGE_READONLY_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
90 #define PAGE_EXECONLY		__pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
91 
92 #endif /* __ASSEMBLY__ */
93 
94 #endif /* __ASM_PGTABLE_PROT_H */
95