xref: /openbmc/linux/arch/x86/include/asm/pgtable_64.h (revision 09bae3b6)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PGTABLE_64_H
3 #define _ASM_X86_PGTABLE_64_H
4 
5 #include <linux/const.h>
6 #include <asm/pgtable_64_types.h>
7 
8 #ifndef __ASSEMBLY__
9 
10 /*
11  * This file contains the functions and defines necessary to modify and use
12  * the x86-64 page table tree.
13  */
14 #include <asm/processor.h>
15 #include <linux/bitops.h>
16 #include <linux/threads.h>
17 
18 extern p4d_t level4_kernel_pgt[512];
19 extern p4d_t level4_ident_pgt[512];
20 extern pud_t level3_kernel_pgt[512];
21 extern pud_t level3_ident_pgt[512];
22 extern pmd_t level2_kernel_pgt[512];
23 extern pmd_t level2_fixmap_pgt[512];
24 extern pmd_t level2_ident_pgt[512];
25 extern pte_t level1_fixmap_pgt[512];
26 extern pgd_t init_top_pgt[];
27 
28 #define swapper_pg_dir init_top_pgt
29 
30 extern void paging_init(void);
31 static inline void sync_initial_page_table(void) { }
32 
33 #define pte_ERROR(e)					\
34 	pr_err("%s:%d: bad pte %p(%016lx)\n",		\
35 	       __FILE__, __LINE__, &(e), pte_val(e))
36 #define pmd_ERROR(e)					\
37 	pr_err("%s:%d: bad pmd %p(%016lx)\n",		\
38 	       __FILE__, __LINE__, &(e), pmd_val(e))
39 #define pud_ERROR(e)					\
40 	pr_err("%s:%d: bad pud %p(%016lx)\n",		\
41 	       __FILE__, __LINE__, &(e), pud_val(e))
42 
43 #if CONFIG_PGTABLE_LEVELS >= 5
44 #define p4d_ERROR(e)					\
45 	pr_err("%s:%d: bad p4d %p(%016lx)\n",		\
46 	       __FILE__, __LINE__, &(e), p4d_val(e))
47 #endif
48 
49 #define pgd_ERROR(e)					\
50 	pr_err("%s:%d: bad pgd %p(%016lx)\n",		\
51 	       __FILE__, __LINE__, &(e), pgd_val(e))
52 
53 struct mm_struct;
54 
55 void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte);
56 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
57 
58 static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
59 				    pte_t *ptep)
60 {
61 	*ptep = native_make_pte(0);
62 }
63 
64 static inline void native_set_pte(pte_t *ptep, pte_t pte)
65 {
66 	*ptep = pte;
67 }
68 
69 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
70 {
71 	native_set_pte(ptep, pte);
72 }
73 
74 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
75 {
76 	*pmdp = pmd;
77 }
78 
79 static inline void native_pmd_clear(pmd_t *pmd)
80 {
81 	native_set_pmd(pmd, native_make_pmd(0));
82 }
83 
84 static inline pte_t native_ptep_get_and_clear(pte_t *xp)
85 {
86 #ifdef CONFIG_SMP
87 	return native_make_pte(xchg(&xp->pte, 0));
88 #else
89 	/* native_local_ptep_get_and_clear,
90 	   but duplicated because of cyclic dependency */
91 	pte_t ret = *xp;
92 	native_pte_clear(NULL, 0, xp);
93 	return ret;
94 #endif
95 }
96 
97 static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
98 {
99 #ifdef CONFIG_SMP
100 	return native_make_pmd(xchg(&xp->pmd, 0));
101 #else
102 	/* native_local_pmdp_get_and_clear,
103 	   but duplicated because of cyclic dependency */
104 	pmd_t ret = *xp;
105 	native_pmd_clear(xp);
106 	return ret;
107 #endif
108 }
109 
110 static inline void native_set_pud(pud_t *pudp, pud_t pud)
111 {
112 	*pudp = pud;
113 }
114 
115 static inline void native_pud_clear(pud_t *pud)
116 {
117 	native_set_pud(pud, native_make_pud(0));
118 }
119 
120 static inline pud_t native_pudp_get_and_clear(pud_t *xp)
121 {
122 #ifdef CONFIG_SMP
123 	return native_make_pud(xchg(&xp->pud, 0));
124 #else
125 	/* native_local_pudp_get_and_clear,
126 	 * but duplicated because of cyclic dependency
127 	 */
128 	pud_t ret = *xp;
129 
130 	native_pud_clear(xp);
131 	return ret;
132 #endif
133 }
134 
135 static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
136 {
137 	pgd_t pgd;
138 
139 	if (pgtable_l5_enabled() || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) {
140 		*p4dp = p4d;
141 		return;
142 	}
143 
144 	pgd = native_make_pgd(native_p4d_val(p4d));
145 	pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd);
146 	*p4dp = native_make_p4d(native_pgd_val(pgd));
147 }
148 
149 static inline void native_p4d_clear(p4d_t *p4d)
150 {
151 	native_set_p4d(p4d, native_make_p4d(0));
152 }
153 
154 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
155 {
156 	*pgdp = pti_set_user_pgtbl(pgdp, pgd);
157 }
158 
159 static inline void native_pgd_clear(pgd_t *pgd)
160 {
161 	native_set_pgd(pgd, native_make_pgd(0));
162 }
163 
164 extern void sync_global_pgds(unsigned long start, unsigned long end);
165 
166 /*
167  * Conversion functions: convert a page and protection to a page entry,
168  * and a page entry and page directory to the page they refer to.
169  */
170 
171 /*
172  * Level 4 access.
173  */
174 #define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
175 
176 /* PUD - Level3 access */
177 
178 /* PMD  - Level 2 access */
179 
180 /* PTE - Level 1 access. */
181 
182 /* x86-64 always has all page tables mapped. */
183 #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
184 #define pte_unmap(pte) ((void)(pte))/* NOP */
185 
186 /*
187  * Encode and de-code a swap entry
188  *
189  * |     ...            | 11| 10|  9|8|7|6|5| 4| 3|2| 1|0| <- bit number
190  * |     ...            |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names
191  * | TYPE (59-63) | ~OFFSET (9-58)  |0|0|X|X| X| X|X|SD|0| <- swp entry
192  *
193  * G (8) is aliased and used as a PROT_NONE indicator for
194  * !present ptes.  We need to start storing swap entries above
195  * there.  We also need to avoid using A and D because of an
196  * erratum where they can be incorrectly set by hardware on
197  * non-present PTEs.
198  *
199  * SD (1) in swp entry is used to store soft dirty bit, which helps us
200  * remember soft dirty over page migration
201  *
202  * Bit 7 in swp entry should be 0 because pmd_present checks not only P,
203  * but also L and G.
204  *
205  * The offset is inverted by a binary not operation to make the high
206  * physical bits set.
207  */
208 #define SWP_TYPE_BITS		5
209 
210 #define SWP_OFFSET_FIRST_BIT	(_PAGE_BIT_PROTNONE + 1)
211 
212 /* We always extract/encode the offset by shifting it all the way up, and then down again */
213 #define SWP_OFFSET_SHIFT	(SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS)
214 
215 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
216 
217 /* Extract the high bits for type */
218 #define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS))
219 
220 /* Shift up (to get rid of type), then down to get value */
221 #define __swp_offset(x) (~(x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
222 
223 /*
224  * Shift the offset up "too far" by TYPE bits, then down again
225  * The offset is inverted by a binary not operation to make the high
226  * physical bits set.
227  */
228 #define __swp_entry(type, offset) ((swp_entry_t) { \
229 	(~(unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
230 	| ((unsigned long)(type) << (64-SWP_TYPE_BITS)) })
231 
232 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val((pte)) })
233 #define __pmd_to_swp_entry(pmd)		((swp_entry_t) { pmd_val((pmd)) })
234 #define __swp_entry_to_pte(x)		((pte_t) { .pte = (x).val })
235 #define __swp_entry_to_pmd(x)		((pmd_t) { .pmd = (x).val })
236 
237 extern int kern_addr_valid(unsigned long addr);
238 extern void cleanup_highmap(void);
239 
240 #define HAVE_ARCH_UNMAPPED_AREA
241 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
242 
243 #define pgtable_cache_init()   do { } while (0)
244 #define check_pgt_cache()      do { } while (0)
245 
246 #define PAGE_AGP    PAGE_KERNEL_NOCACHE
247 #define HAVE_PAGE_AGP 1
248 
249 /* fs/proc/kcore.c */
250 #define	kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
251 #define	kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK)
252 
253 #define __HAVE_ARCH_PTE_SAME
254 
255 #define vmemmap ((struct page *)VMEMMAP_START)
256 
257 extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
258 extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
259 
260 #define gup_fast_permitted gup_fast_permitted
261 static inline bool gup_fast_permitted(unsigned long start, int nr_pages,
262 		int write)
263 {
264 	unsigned long len, end;
265 
266 	len = (unsigned long)nr_pages << PAGE_SHIFT;
267 	end = start + len;
268 	if (end < start)
269 		return false;
270 	if (end >> __VIRTUAL_MASK_SHIFT)
271 		return false;
272 	return true;
273 }
274 
275 #include <asm/pgtable-invert.h>
276 
277 #endif /* !__ASSEMBLY__ */
278 #endif /* _ASM_X86_PGTABLE_64_H */
279