xref: /openbmc/linux/arch/x86/include/asm/pgtable_64.h (revision 9977a8c3497a8f7f7f951994f298a8e4d961234f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PGTABLE_64_H
3 #define _ASM_X86_PGTABLE_64_H
4 
5 #include <linux/const.h>
6 #include <asm/pgtable_64_types.h>
7 
8 #ifndef __ASSEMBLY__
9 
10 /*
11  * This file contains the functions and defines necessary to modify and use
12  * the x86-64 page table tree.
13  */
14 #include <asm/processor.h>
15 #include <linux/bitops.h>
16 #include <linux/threads.h>
17 
18 extern p4d_t level4_kernel_pgt[512];
19 extern p4d_t level4_ident_pgt[512];
20 extern pud_t level3_kernel_pgt[512];
21 extern pud_t level3_ident_pgt[512];
22 extern pmd_t level2_kernel_pgt[512];
23 extern pmd_t level2_fixmap_pgt[512];
24 extern pmd_t level2_ident_pgt[512];
25 extern pte_t level1_fixmap_pgt[512];
26 extern pgd_t init_top_pgt[];
27 
28 #define swapper_pg_dir init_top_pgt
29 
30 extern void paging_init(void);
31 
32 #define pte_ERROR(e)					\
33 	pr_err("%s:%d: bad pte %p(%016lx)\n",		\
34 	       __FILE__, __LINE__, &(e), pte_val(e))
35 #define pmd_ERROR(e)					\
36 	pr_err("%s:%d: bad pmd %p(%016lx)\n",		\
37 	       __FILE__, __LINE__, &(e), pmd_val(e))
38 #define pud_ERROR(e)					\
39 	pr_err("%s:%d: bad pud %p(%016lx)\n",		\
40 	       __FILE__, __LINE__, &(e), pud_val(e))
41 
42 #if CONFIG_PGTABLE_LEVELS >= 5
43 #define p4d_ERROR(e)					\
44 	pr_err("%s:%d: bad p4d %p(%016lx)\n",		\
45 	       __FILE__, __LINE__, &(e), p4d_val(e))
46 #endif
47 
48 #define pgd_ERROR(e)					\
49 	pr_err("%s:%d: bad pgd %p(%016lx)\n",		\
50 	       __FILE__, __LINE__, &(e), pgd_val(e))
51 
52 struct mm_struct;
53 
54 void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte);
55 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
56 
57 static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
58 				    pte_t *ptep)
59 {
60 	*ptep = native_make_pte(0);
61 }
62 
63 static inline void native_set_pte(pte_t *ptep, pte_t pte)
64 {
65 	*ptep = pte;
66 }
67 
68 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
69 {
70 	native_set_pte(ptep, pte);
71 }
72 
73 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
74 {
75 	*pmdp = pmd;
76 }
77 
78 static inline void native_pmd_clear(pmd_t *pmd)
79 {
80 	native_set_pmd(pmd, native_make_pmd(0));
81 }
82 
83 static inline pte_t native_ptep_get_and_clear(pte_t *xp)
84 {
85 #ifdef CONFIG_SMP
86 	return native_make_pte(xchg(&xp->pte, 0));
87 #else
88 	/* native_local_ptep_get_and_clear,
89 	   but duplicated because of cyclic dependency */
90 	pte_t ret = *xp;
91 	native_pte_clear(NULL, 0, xp);
92 	return ret;
93 #endif
94 }
95 
96 static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
97 {
98 #ifdef CONFIG_SMP
99 	return native_make_pmd(xchg(&xp->pmd, 0));
100 #else
101 	/* native_local_pmdp_get_and_clear,
102 	   but duplicated because of cyclic dependency */
103 	pmd_t ret = *xp;
104 	native_pmd_clear(xp);
105 	return ret;
106 #endif
107 }
108 
109 static inline void native_set_pud(pud_t *pudp, pud_t pud)
110 {
111 	*pudp = pud;
112 }
113 
114 static inline void native_pud_clear(pud_t *pud)
115 {
116 	native_set_pud(pud, native_make_pud(0));
117 }
118 
119 static inline pud_t native_pudp_get_and_clear(pud_t *xp)
120 {
121 #ifdef CONFIG_SMP
122 	return native_make_pud(xchg(&xp->pud, 0));
123 #else
124 	/* native_local_pudp_get_and_clear,
125 	 * but duplicated because of cyclic dependency
126 	 */
127 	pud_t ret = *xp;
128 
129 	native_pud_clear(xp);
130 	return ret;
131 #endif
132 }
133 
134 #ifdef CONFIG_PAGE_TABLE_ISOLATION
135 /*
136  * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
137  * (8k-aligned and 8k in size).  The kernel one is at the beginning 4k and
138  * the user one is in the last 4k.  To switch between them, you
139  * just need to flip the 12th bit in their addresses.
140  */
141 #define PTI_PGTABLE_SWITCH_BIT	PAGE_SHIFT
142 
143 /*
144  * This generates better code than the inline assembly in
145  * __set_bit().
146  */
147 static inline void *ptr_set_bit(void *ptr, int bit)
148 {
149 	unsigned long __ptr = (unsigned long)ptr;
150 
151 	__ptr |= BIT(bit);
152 	return (void *)__ptr;
153 }
154 static inline void *ptr_clear_bit(void *ptr, int bit)
155 {
156 	unsigned long __ptr = (unsigned long)ptr;
157 
158 	__ptr &= ~BIT(bit);
159 	return (void *)__ptr;
160 }
161 
162 static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
163 {
164 	return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
165 }
166 
167 static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
168 {
169 	return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
170 }
171 
172 static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
173 {
174 	return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
175 }
176 
177 static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
178 {
179 	return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
180 }
181 #endif /* CONFIG_PAGE_TABLE_ISOLATION */
182 
183 /*
184  * Page table pages are page-aligned.  The lower half of the top
185  * level is used for userspace and the top half for the kernel.
186  *
187  * Returns true for parts of the PGD that map userspace and
188  * false for the parts that map the kernel.
189  */
190 static inline bool pgdp_maps_userspace(void *__ptr)
191 {
192 	unsigned long ptr = (unsigned long)__ptr;
193 
194 	return (ptr & ~PAGE_MASK) < (PAGE_SIZE / 2);
195 }
196 
197 #ifdef CONFIG_PAGE_TABLE_ISOLATION
198 pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd);
199 
200 /*
201  * Take a PGD location (pgdp) and a pgd value that needs to be set there.
202  * Populates the user and returns the resulting PGD that must be set in
203  * the kernel copy of the page tables.
204  */
205 static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
206 {
207 	if (!static_cpu_has(X86_FEATURE_PTI))
208 		return pgd;
209 	return __pti_set_user_pgd(pgdp, pgd);
210 }
211 #else
212 static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
213 {
214 	return pgd;
215 }
216 #endif
217 
218 static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
219 {
220 #if defined(CONFIG_PAGE_TABLE_ISOLATION) && !defined(CONFIG_X86_5LEVEL)
221 	p4dp->pgd = pti_set_user_pgd(&p4dp->pgd, p4d.pgd);
222 #else
223 	*p4dp = p4d;
224 #endif
225 }
226 
227 static inline void native_p4d_clear(p4d_t *p4d)
228 {
229 #ifdef CONFIG_X86_5LEVEL
230 	native_set_p4d(p4d, native_make_p4d(0));
231 #else
232 	native_set_p4d(p4d, (p4d_t) { .pgd = native_make_pgd(0)});
233 #endif
234 }
235 
236 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
237 {
238 #ifdef CONFIG_PAGE_TABLE_ISOLATION
239 	*pgdp = pti_set_user_pgd(pgdp, pgd);
240 #else
241 	*pgdp = pgd;
242 #endif
243 }
244 
245 static inline void native_pgd_clear(pgd_t *pgd)
246 {
247 	native_set_pgd(pgd, native_make_pgd(0));
248 }
249 
250 extern void sync_global_pgds(unsigned long start, unsigned long end);
251 
252 /*
253  * Conversion functions: convert a page and protection to a page entry,
254  * and a page entry and page directory to the page they refer to.
255  */
256 
257 /*
258  * Level 4 access.
259  */
260 static inline int pgd_large(pgd_t pgd) { return 0; }
261 #define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
262 
263 /* PUD - Level3 access */
264 
265 /* PMD  - Level 2 access */
266 
267 /* PTE - Level 1 access. */
268 
269 /* x86-64 always has all page tables mapped. */
270 #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
271 #define pte_unmap(pte) ((void)(pte))/* NOP */
272 
273 /*
274  * Encode and de-code a swap entry
275  *
276  * |     ...            | 11| 10|  9|8|7|6|5| 4| 3|2| 1|0| <- bit number
277  * |     ...            |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names
278  * | OFFSET (14->63) | TYPE (9-13)  |0|0|X|X| X| X|X|SD|0| <- swp entry
279  *
280  * G (8) is aliased and used as a PROT_NONE indicator for
281  * !present ptes.  We need to start storing swap entries above
282  * there.  We also need to avoid using A and D because of an
283  * erratum where they can be incorrectly set by hardware on
284  * non-present PTEs.
285  *
286  * SD (1) in swp entry is used to store soft dirty bit, which helps us
287  * remember soft dirty over page migration
288  *
289  * Bit 7 in swp entry should be 0 because pmd_present checks not only P,
290  * but also L and G.
291  */
292 #define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
293 #define SWP_TYPE_BITS 5
294 /* Place the offset above the type: */
295 #define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS)
296 
297 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
298 
299 #define __swp_type(x)			(((x).val >> (SWP_TYPE_FIRST_BIT)) \
300 					 & ((1U << SWP_TYPE_BITS) - 1))
301 #define __swp_offset(x)			((x).val >> SWP_OFFSET_FIRST_BIT)
302 #define __swp_entry(type, offset)	((swp_entry_t) { \
303 					 ((type) << (SWP_TYPE_FIRST_BIT)) \
304 					 | ((offset) << SWP_OFFSET_FIRST_BIT) })
305 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val((pte)) })
306 #define __pmd_to_swp_entry(pmd)		((swp_entry_t) { pmd_val((pmd)) })
307 #define __swp_entry_to_pte(x)		((pte_t) { .pte = (x).val })
308 #define __swp_entry_to_pmd(x)		((pmd_t) { .pmd = (x).val })
309 
310 extern int kern_addr_valid(unsigned long addr);
311 extern void cleanup_highmap(void);
312 
313 #define HAVE_ARCH_UNMAPPED_AREA
314 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
315 
316 #define pgtable_cache_init()   do { } while (0)
317 #define check_pgt_cache()      do { } while (0)
318 
319 #define PAGE_AGP    PAGE_KERNEL_NOCACHE
320 #define HAVE_PAGE_AGP 1
321 
322 /* fs/proc/kcore.c */
323 #define	kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
324 #define	kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK)
325 
326 #define __HAVE_ARCH_PTE_SAME
327 
328 #define vmemmap ((struct page *)VMEMMAP_START)
329 
330 extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
331 extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
332 
333 #define gup_fast_permitted gup_fast_permitted
334 static inline bool gup_fast_permitted(unsigned long start, int nr_pages,
335 		int write)
336 {
337 	unsigned long len, end;
338 
339 	len = (unsigned long)nr_pages << PAGE_SHIFT;
340 	end = start + len;
341 	if (end < start)
342 		return false;
343 	if (end >> __VIRTUAL_MASK_SHIFT)
344 		return false;
345 	return true;
346 }
347 
348 #endif /* !__ASSEMBLY__ */
349 #endif /* _ASM_X86_PGTABLE_64_H */
350