xref: /openbmc/linux/arch/x86/include/asm/pgtable.h (revision 046b212a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PGTABLE_H
3 #define _ASM_X86_PGTABLE_H
4 
5 #include <linux/mem_encrypt.h>
6 #include <asm/page.h>
7 #include <asm/pgtable_types.h>
8 
9 /*
10  * Macro to mark a page protection value as UC-
11  */
12 #define pgprot_noncached(prot)						\
13 	((boot_cpu_data.x86 > 3)					\
14 	 ? (__pgprot(pgprot_val(prot) |					\
15 		     cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)))	\
16 	 : (prot))
17 
18 #ifndef __ASSEMBLY__
19 #include <linux/spinlock.h>
20 #include <asm/x86_init.h>
21 #include <asm/pkru.h>
22 #include <asm/fpu/api.h>
23 #include <asm/coco.h>
24 #include <asm-generic/pgtable_uffd.h>
25 #include <linux/page_table_check.h>
26 
27 extern pgd_t early_top_pgt[PTRS_PER_PGD];
28 bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
29 
30 struct seq_file;
31 void ptdump_walk_pgd_level(struct seq_file *m, struct mm_struct *mm);
32 void ptdump_walk_pgd_level_debugfs(struct seq_file *m, struct mm_struct *mm,
33 				   bool user);
34 void ptdump_walk_pgd_level_checkwx(void);
35 void ptdump_walk_user_pgd_level_checkwx(void);
36 
37 /*
38  * Macros to add or remove encryption attribute
39  */
40 #define pgprot_encrypted(prot)	__pgprot(cc_mkenc(pgprot_val(prot)))
41 #define pgprot_decrypted(prot)	__pgprot(cc_mkdec(pgprot_val(prot)))
42 
43 #ifdef CONFIG_DEBUG_WX
44 #define debug_checkwx()		ptdump_walk_pgd_level_checkwx()
45 #define debug_checkwx_user()	ptdump_walk_user_pgd_level_checkwx()
46 #else
47 #define debug_checkwx()		do { } while (0)
48 #define debug_checkwx_user()	do { } while (0)
49 #endif
50 
51 /*
52  * ZERO_PAGE is a global shared page that is always zero: used
53  * for zero-mapped memory areas etc..
54  */
55 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
56 	__visible;
57 #define ZERO_PAGE(vaddr) ((void)(vaddr),virt_to_page(empty_zero_page))
58 
59 extern spinlock_t pgd_lock;
60 extern struct list_head pgd_list;
61 
62 extern struct mm_struct *pgd_page_get_mm(struct page *page);
63 
64 extern pmdval_t early_pmd_flags;
65 
66 #ifdef CONFIG_PARAVIRT_XXL
67 #include <asm/paravirt.h>
68 #else  /* !CONFIG_PARAVIRT_XXL */
69 #define set_pte(ptep, pte)		native_set_pte(ptep, pte)
70 
71 #define set_pte_atomic(ptep, pte)					\
72 	native_set_pte_atomic(ptep, pte)
73 
74 #define set_pmd(pmdp, pmd)		native_set_pmd(pmdp, pmd)
75 
76 #ifndef __PAGETABLE_P4D_FOLDED
77 #define set_pgd(pgdp, pgd)		native_set_pgd(pgdp, pgd)
78 #define pgd_clear(pgd)			(pgtable_l5_enabled() ? native_pgd_clear(pgd) : 0)
79 #endif
80 
81 #ifndef set_p4d
82 # define set_p4d(p4dp, p4d)		native_set_p4d(p4dp, p4d)
83 #endif
84 
85 #ifndef __PAGETABLE_PUD_FOLDED
86 #define p4d_clear(p4d)			native_p4d_clear(p4d)
87 #endif
88 
89 #ifndef set_pud
90 # define set_pud(pudp, pud)		native_set_pud(pudp, pud)
91 #endif
92 
93 #ifndef __PAGETABLE_PUD_FOLDED
94 #define pud_clear(pud)			native_pud_clear(pud)
95 #endif
96 
97 #define pte_clear(mm, addr, ptep)	native_pte_clear(mm, addr, ptep)
98 #define pmd_clear(pmd)			native_pmd_clear(pmd)
99 
100 #define pgd_val(x)	native_pgd_val(x)
101 #define __pgd(x)	native_make_pgd(x)
102 
103 #ifndef __PAGETABLE_P4D_FOLDED
104 #define p4d_val(x)	native_p4d_val(x)
105 #define __p4d(x)	native_make_p4d(x)
106 #endif
107 
108 #ifndef __PAGETABLE_PUD_FOLDED
109 #define pud_val(x)	native_pud_val(x)
110 #define __pud(x)	native_make_pud(x)
111 #endif
112 
113 #ifndef __PAGETABLE_PMD_FOLDED
114 #define pmd_val(x)	native_pmd_val(x)
115 #define __pmd(x)	native_make_pmd(x)
116 #endif
117 
118 #define pte_val(x)	native_pte_val(x)
119 #define __pte(x)	native_make_pte(x)
120 
121 #define arch_end_context_switch(prev)	do {} while(0)
122 #endif	/* CONFIG_PARAVIRT_XXL */
123 
124 /*
125  * The following only work if pte_present() is true.
126  * Undefined behaviour if not..
127  */
128 static inline bool pte_dirty(pte_t pte)
129 {
130 	return pte_flags(pte) & _PAGE_DIRTY_BITS;
131 }
132 
133 static inline bool pte_shstk(pte_t pte)
134 {
135 	return cpu_feature_enabled(X86_FEATURE_SHSTK) &&
136 	       (pte_flags(pte) & (_PAGE_RW | _PAGE_DIRTY)) == _PAGE_DIRTY;
137 }
138 
139 static inline int pte_young(pte_t pte)
140 {
141 	return pte_flags(pte) & _PAGE_ACCESSED;
142 }
143 
144 static inline bool pmd_dirty(pmd_t pmd)
145 {
146 	return pmd_flags(pmd) & _PAGE_DIRTY_BITS;
147 }
148 
149 static inline bool pmd_shstk(pmd_t pmd)
150 {
151 	return cpu_feature_enabled(X86_FEATURE_SHSTK) &&
152 	       (pmd_flags(pmd) & (_PAGE_RW | _PAGE_DIRTY | _PAGE_PSE)) ==
153 	       (_PAGE_DIRTY | _PAGE_PSE);
154 }
155 
156 #define pmd_young pmd_young
157 static inline int pmd_young(pmd_t pmd)
158 {
159 	return pmd_flags(pmd) & _PAGE_ACCESSED;
160 }
161 
162 static inline bool pud_dirty(pud_t pud)
163 {
164 	return pud_flags(pud) & _PAGE_DIRTY_BITS;
165 }
166 
167 static inline int pud_young(pud_t pud)
168 {
169 	return pud_flags(pud) & _PAGE_ACCESSED;
170 }
171 
172 static inline int pte_write(pte_t pte)
173 {
174 	/*
175 	 * Shadow stack pages are logically writable, but do not have
176 	 * _PAGE_RW.  Check for them separately from _PAGE_RW itself.
177 	 */
178 	return (pte_flags(pte) & _PAGE_RW) || pte_shstk(pte);
179 }
180 
181 #define pmd_write pmd_write
182 static inline int pmd_write(pmd_t pmd)
183 {
184 	/*
185 	 * Shadow stack pages are logically writable, but do not have
186 	 * _PAGE_RW.  Check for them separately from _PAGE_RW itself.
187 	 */
188 	return (pmd_flags(pmd) & _PAGE_RW) || pmd_shstk(pmd);
189 }
190 
191 #define pud_write pud_write
192 static inline int pud_write(pud_t pud)
193 {
194 	return pud_flags(pud) & _PAGE_RW;
195 }
196 
197 static inline int pte_huge(pte_t pte)
198 {
199 	return pte_flags(pte) & _PAGE_PSE;
200 }
201 
202 static inline int pte_global(pte_t pte)
203 {
204 	return pte_flags(pte) & _PAGE_GLOBAL;
205 }
206 
207 static inline int pte_exec(pte_t pte)
208 {
209 	return !(pte_flags(pte) & _PAGE_NX);
210 }
211 
212 static inline int pte_special(pte_t pte)
213 {
214 	return pte_flags(pte) & _PAGE_SPECIAL;
215 }
216 
217 /* Entries that were set to PROT_NONE are inverted */
218 
219 static inline u64 protnone_mask(u64 val);
220 
221 #define PFN_PTE_SHIFT	PAGE_SHIFT
222 
223 static inline unsigned long pte_pfn(pte_t pte)
224 {
225 	phys_addr_t pfn = pte_val(pte);
226 	pfn ^= protnone_mask(pfn);
227 	return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
228 }
229 
230 static inline unsigned long pmd_pfn(pmd_t pmd)
231 {
232 	phys_addr_t pfn = pmd_val(pmd);
233 	pfn ^= protnone_mask(pfn);
234 	return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
235 }
236 
237 static inline unsigned long pud_pfn(pud_t pud)
238 {
239 	phys_addr_t pfn = pud_val(pud);
240 	pfn ^= protnone_mask(pfn);
241 	return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
242 }
243 
244 static inline unsigned long p4d_pfn(p4d_t p4d)
245 {
246 	return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT;
247 }
248 
249 static inline unsigned long pgd_pfn(pgd_t pgd)
250 {
251 	return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
252 }
253 
254 #define p4d_leaf	p4d_large
255 static inline int p4d_large(p4d_t p4d)
256 {
257 	/* No 512 GiB pages yet */
258 	return 0;
259 }
260 
261 #define pte_page(pte)	pfn_to_page(pte_pfn(pte))
262 
263 #define pmd_leaf	pmd_large
264 static inline int pmd_large(pmd_t pte)
265 {
266 	return pmd_flags(pte) & _PAGE_PSE;
267 }
268 
269 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
270 /* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_large */
271 static inline int pmd_trans_huge(pmd_t pmd)
272 {
273 	return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
274 }
275 
276 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
277 static inline int pud_trans_huge(pud_t pud)
278 {
279 	return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
280 }
281 #endif
282 
283 #define has_transparent_hugepage has_transparent_hugepage
284 static inline int has_transparent_hugepage(void)
285 {
286 	return boot_cpu_has(X86_FEATURE_PSE);
287 }
288 
289 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
290 static inline int pmd_devmap(pmd_t pmd)
291 {
292 	return !!(pmd_val(pmd) & _PAGE_DEVMAP);
293 }
294 
295 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
296 static inline int pud_devmap(pud_t pud)
297 {
298 	return !!(pud_val(pud) & _PAGE_DEVMAP);
299 }
300 #else
301 static inline int pud_devmap(pud_t pud)
302 {
303 	return 0;
304 }
305 #endif
306 
307 static inline int pgd_devmap(pgd_t pgd)
308 {
309 	return 0;
310 }
311 #endif
312 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
313 
314 static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
315 {
316 	pteval_t v = native_pte_val(pte);
317 
318 	return native_make_pte(v | set);
319 }
320 
321 static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
322 {
323 	pteval_t v = native_pte_val(pte);
324 
325 	return native_make_pte(v & ~clear);
326 }
327 
328 /*
329  * Write protection operations can result in Dirty=1,Write=0 PTEs. But in the
330  * case of X86_FEATURE_USER_SHSTK, these PTEs denote shadow stack memory. So
331  * when creating dirty, write-protected memory, a software bit is used:
332  * _PAGE_BIT_SAVED_DIRTY. The following functions take a PTE and transition the
333  * Dirty bit to SavedDirty, and vice-vesra.
334  *
335  * This shifting is only done if needed. In the case of shifting
336  * Dirty->SavedDirty, the condition is if the PTE is Write=0. In the case of
337  * shifting SavedDirty->Dirty, the condition is Write=1.
338  */
339 static inline pgprotval_t mksaveddirty_shift(pgprotval_t v)
340 {
341 	pgprotval_t cond = (~v >> _PAGE_BIT_RW) & 1;
342 
343 	v |= ((v >> _PAGE_BIT_DIRTY) & cond) << _PAGE_BIT_SAVED_DIRTY;
344 	v &= ~(cond << _PAGE_BIT_DIRTY);
345 
346 	return v;
347 }
348 
349 static inline pgprotval_t clear_saveddirty_shift(pgprotval_t v)
350 {
351 	pgprotval_t cond = (v >> _PAGE_BIT_RW) & 1;
352 
353 	v |= ((v >> _PAGE_BIT_SAVED_DIRTY) & cond) << _PAGE_BIT_DIRTY;
354 	v &= ~(cond << _PAGE_BIT_SAVED_DIRTY);
355 
356 	return v;
357 }
358 
359 static inline pte_t pte_mksaveddirty(pte_t pte)
360 {
361 	pteval_t v = native_pte_val(pte);
362 
363 	v = mksaveddirty_shift(v);
364 	return native_make_pte(v);
365 }
366 
367 static inline pte_t pte_clear_saveddirty(pte_t pte)
368 {
369 	pteval_t v = native_pte_val(pte);
370 
371 	v = clear_saveddirty_shift(v);
372 	return native_make_pte(v);
373 }
374 
375 static inline pte_t pte_wrprotect(pte_t pte)
376 {
377 	pte = pte_clear_flags(pte, _PAGE_RW);
378 
379 	/*
380 	 * Blindly clearing _PAGE_RW might accidentally create
381 	 * a shadow stack PTE (Write=0,Dirty=1). Move the hardware
382 	 * dirty value to the software bit, if present.
383 	 */
384 	return pte_mksaveddirty(pte);
385 }
386 
387 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
388 static inline int pte_uffd_wp(pte_t pte)
389 {
390 	bool wp = pte_flags(pte) & _PAGE_UFFD_WP;
391 
392 #ifdef CONFIG_DEBUG_VM
393 	/*
394 	 * Having write bit for wr-protect-marked present ptes is fatal,
395 	 * because it means the uffd-wp bit will be ignored and write will
396 	 * just go through.
397 	 *
398 	 * Use any chance of pgtable walking to verify this (e.g., when
399 	 * page swapped out or being migrated for all purposes). It means
400 	 * something is already wrong.  Tell the admin even before the
401 	 * process crashes. We also nail it with wrong pgtable setup.
402 	 */
403 	WARN_ON_ONCE(wp && pte_write(pte));
404 #endif
405 
406 	return wp;
407 }
408 
409 static inline pte_t pte_mkuffd_wp(pte_t pte)
410 {
411 	return pte_wrprotect(pte_set_flags(pte, _PAGE_UFFD_WP));
412 }
413 
414 static inline pte_t pte_clear_uffd_wp(pte_t pte)
415 {
416 	return pte_clear_flags(pte, _PAGE_UFFD_WP);
417 }
418 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
419 
420 static inline pte_t pte_mkclean(pte_t pte)
421 {
422 	return pte_clear_flags(pte, _PAGE_DIRTY_BITS);
423 }
424 
425 static inline pte_t pte_mkold(pte_t pte)
426 {
427 	return pte_clear_flags(pte, _PAGE_ACCESSED);
428 }
429 
430 static inline pte_t pte_mkexec(pte_t pte)
431 {
432 	return pte_clear_flags(pte, _PAGE_NX);
433 }
434 
435 static inline pte_t pte_mkdirty(pte_t pte)
436 {
437 	pte = pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
438 
439 	return pte_mksaveddirty(pte);
440 }
441 
442 static inline pte_t pte_mkwrite_shstk(pte_t pte)
443 {
444 	pte = pte_clear_flags(pte, _PAGE_RW);
445 
446 	return pte_set_flags(pte, _PAGE_DIRTY);
447 }
448 
449 static inline pte_t pte_mkyoung(pte_t pte)
450 {
451 	return pte_set_flags(pte, _PAGE_ACCESSED);
452 }
453 
454 static inline pte_t pte_mkwrite_novma(pte_t pte)
455 {
456 	return pte_set_flags(pte, _PAGE_RW);
457 }
458 
459 struct vm_area_struct;
460 pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma);
461 #define pte_mkwrite pte_mkwrite
462 
463 static inline pte_t pte_mkhuge(pte_t pte)
464 {
465 	return pte_set_flags(pte, _PAGE_PSE);
466 }
467 
468 static inline pte_t pte_clrhuge(pte_t pte)
469 {
470 	return pte_clear_flags(pte, _PAGE_PSE);
471 }
472 
473 static inline pte_t pte_mkglobal(pte_t pte)
474 {
475 	return pte_set_flags(pte, _PAGE_GLOBAL);
476 }
477 
478 static inline pte_t pte_clrglobal(pte_t pte)
479 {
480 	return pte_clear_flags(pte, _PAGE_GLOBAL);
481 }
482 
483 static inline pte_t pte_mkspecial(pte_t pte)
484 {
485 	return pte_set_flags(pte, _PAGE_SPECIAL);
486 }
487 
488 static inline pte_t pte_mkdevmap(pte_t pte)
489 {
490 	return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
491 }
492 
493 static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
494 {
495 	pmdval_t v = native_pmd_val(pmd);
496 
497 	return native_make_pmd(v | set);
498 }
499 
500 static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
501 {
502 	pmdval_t v = native_pmd_val(pmd);
503 
504 	return native_make_pmd(v & ~clear);
505 }
506 
507 /* See comments above mksaveddirty_shift() */
508 static inline pmd_t pmd_mksaveddirty(pmd_t pmd)
509 {
510 	pmdval_t v = native_pmd_val(pmd);
511 
512 	v = mksaveddirty_shift(v);
513 	return native_make_pmd(v);
514 }
515 
516 /* See comments above mksaveddirty_shift() */
517 static inline pmd_t pmd_clear_saveddirty(pmd_t pmd)
518 {
519 	pmdval_t v = native_pmd_val(pmd);
520 
521 	v = clear_saveddirty_shift(v);
522 	return native_make_pmd(v);
523 }
524 
525 static inline pmd_t pmd_wrprotect(pmd_t pmd)
526 {
527 	pmd = pmd_clear_flags(pmd, _PAGE_RW);
528 
529 	/*
530 	 * Blindly clearing _PAGE_RW might accidentally create
531 	 * a shadow stack PMD (RW=0, Dirty=1). Move the hardware
532 	 * dirty value to the software bit.
533 	 */
534 	return pmd_mksaveddirty(pmd);
535 }
536 
537 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
538 static inline int pmd_uffd_wp(pmd_t pmd)
539 {
540 	return pmd_flags(pmd) & _PAGE_UFFD_WP;
541 }
542 
543 static inline pmd_t pmd_mkuffd_wp(pmd_t pmd)
544 {
545 	return pmd_wrprotect(pmd_set_flags(pmd, _PAGE_UFFD_WP));
546 }
547 
548 static inline pmd_t pmd_clear_uffd_wp(pmd_t pmd)
549 {
550 	return pmd_clear_flags(pmd, _PAGE_UFFD_WP);
551 }
552 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
553 
554 static inline pmd_t pmd_mkold(pmd_t pmd)
555 {
556 	return pmd_clear_flags(pmd, _PAGE_ACCESSED);
557 }
558 
559 static inline pmd_t pmd_mkclean(pmd_t pmd)
560 {
561 	return pmd_clear_flags(pmd, _PAGE_DIRTY_BITS);
562 }
563 
564 static inline pmd_t pmd_mkdirty(pmd_t pmd)
565 {
566 	pmd = pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
567 
568 	return pmd_mksaveddirty(pmd);
569 }
570 
571 static inline pmd_t pmd_mkwrite_shstk(pmd_t pmd)
572 {
573 	pmd = pmd_clear_flags(pmd, _PAGE_RW);
574 
575 	return pmd_set_flags(pmd, _PAGE_DIRTY);
576 }
577 
578 static inline pmd_t pmd_mkdevmap(pmd_t pmd)
579 {
580 	return pmd_set_flags(pmd, _PAGE_DEVMAP);
581 }
582 
583 static inline pmd_t pmd_mkhuge(pmd_t pmd)
584 {
585 	return pmd_set_flags(pmd, _PAGE_PSE);
586 }
587 
588 static inline pmd_t pmd_mkyoung(pmd_t pmd)
589 {
590 	return pmd_set_flags(pmd, _PAGE_ACCESSED);
591 }
592 
593 static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
594 {
595 	return pmd_set_flags(pmd, _PAGE_RW);
596 }
597 
598 pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
599 #define pmd_mkwrite pmd_mkwrite
600 
601 static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
602 {
603 	pudval_t v = native_pud_val(pud);
604 
605 	return native_make_pud(v | set);
606 }
607 
608 static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
609 {
610 	pudval_t v = native_pud_val(pud);
611 
612 	return native_make_pud(v & ~clear);
613 }
614 
615 /* See comments above mksaveddirty_shift() */
616 static inline pud_t pud_mksaveddirty(pud_t pud)
617 {
618 	pudval_t v = native_pud_val(pud);
619 
620 	v = mksaveddirty_shift(v);
621 	return native_make_pud(v);
622 }
623 
624 /* See comments above mksaveddirty_shift() */
625 static inline pud_t pud_clear_saveddirty(pud_t pud)
626 {
627 	pudval_t v = native_pud_val(pud);
628 
629 	v = clear_saveddirty_shift(v);
630 	return native_make_pud(v);
631 }
632 
633 static inline pud_t pud_mkold(pud_t pud)
634 {
635 	return pud_clear_flags(pud, _PAGE_ACCESSED);
636 }
637 
638 static inline pud_t pud_mkclean(pud_t pud)
639 {
640 	return pud_clear_flags(pud, _PAGE_DIRTY_BITS);
641 }
642 
643 static inline pud_t pud_wrprotect(pud_t pud)
644 {
645 	pud = pud_clear_flags(pud, _PAGE_RW);
646 
647 	/*
648 	 * Blindly clearing _PAGE_RW might accidentally create
649 	 * a shadow stack PUD (RW=0, Dirty=1). Move the hardware
650 	 * dirty value to the software bit.
651 	 */
652 	return pud_mksaveddirty(pud);
653 }
654 
655 static inline pud_t pud_mkdirty(pud_t pud)
656 {
657 	pud = pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
658 
659 	return pud_mksaveddirty(pud);
660 }
661 
662 static inline pud_t pud_mkdevmap(pud_t pud)
663 {
664 	return pud_set_flags(pud, _PAGE_DEVMAP);
665 }
666 
667 static inline pud_t pud_mkhuge(pud_t pud)
668 {
669 	return pud_set_flags(pud, _PAGE_PSE);
670 }
671 
672 static inline pud_t pud_mkyoung(pud_t pud)
673 {
674 	return pud_set_flags(pud, _PAGE_ACCESSED);
675 }
676 
677 static inline pud_t pud_mkwrite(pud_t pud)
678 {
679 	pud = pud_set_flags(pud, _PAGE_RW);
680 
681 	return pud_clear_saveddirty(pud);
682 }
683 
684 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
685 static inline int pte_soft_dirty(pte_t pte)
686 {
687 	return pte_flags(pte) & _PAGE_SOFT_DIRTY;
688 }
689 
690 static inline int pmd_soft_dirty(pmd_t pmd)
691 {
692 	return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
693 }
694 
695 static inline int pud_soft_dirty(pud_t pud)
696 {
697 	return pud_flags(pud) & _PAGE_SOFT_DIRTY;
698 }
699 
700 static inline pte_t pte_mksoft_dirty(pte_t pte)
701 {
702 	return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
703 }
704 
705 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
706 {
707 	return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
708 }
709 
710 static inline pud_t pud_mksoft_dirty(pud_t pud)
711 {
712 	return pud_set_flags(pud, _PAGE_SOFT_DIRTY);
713 }
714 
715 static inline pte_t pte_clear_soft_dirty(pte_t pte)
716 {
717 	return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
718 }
719 
720 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
721 {
722 	return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
723 }
724 
725 static inline pud_t pud_clear_soft_dirty(pud_t pud)
726 {
727 	return pud_clear_flags(pud, _PAGE_SOFT_DIRTY);
728 }
729 
730 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
731 
732 /*
733  * Mask out unsupported bits in a present pgprot.  Non-present pgprots
734  * can use those bits for other purposes, so leave them be.
735  */
736 static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
737 {
738 	pgprotval_t protval = pgprot_val(pgprot);
739 
740 	if (protval & _PAGE_PRESENT)
741 		protval &= __supported_pte_mask;
742 
743 	return protval;
744 }
745 
746 static inline pgprotval_t check_pgprot(pgprot_t pgprot)
747 {
748 	pgprotval_t massaged_val = massage_pgprot(pgprot);
749 
750 	/* mmdebug.h can not be included here because of dependencies */
751 #ifdef CONFIG_DEBUG_VM
752 	WARN_ONCE(pgprot_val(pgprot) != massaged_val,
753 		  "attempted to set unsupported pgprot: %016llx "
754 		  "bits: %016llx supported: %016llx\n",
755 		  (u64)pgprot_val(pgprot),
756 		  (u64)pgprot_val(pgprot) ^ massaged_val,
757 		  (u64)__supported_pte_mask);
758 #endif
759 
760 	return massaged_val;
761 }
762 
763 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
764 {
765 	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
766 	pfn ^= protnone_mask(pgprot_val(pgprot));
767 	pfn &= PTE_PFN_MASK;
768 	return __pte(pfn | check_pgprot(pgprot));
769 }
770 
771 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
772 {
773 	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
774 	pfn ^= protnone_mask(pgprot_val(pgprot));
775 	pfn &= PHYSICAL_PMD_PAGE_MASK;
776 	return __pmd(pfn | check_pgprot(pgprot));
777 }
778 
779 static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
780 {
781 	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
782 	pfn ^= protnone_mask(pgprot_val(pgprot));
783 	pfn &= PHYSICAL_PUD_PAGE_MASK;
784 	return __pud(pfn | check_pgprot(pgprot));
785 }
786 
787 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
788 {
789 	return pfn_pmd(pmd_pfn(pmd),
790 		      __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
791 }
792 
793 static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
794 
795 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
796 {
797 	pteval_t val = pte_val(pte), oldval = val;
798 	pte_t pte_result;
799 
800 	/*
801 	 * Chop off the NX bit (if present), and add the NX portion of
802 	 * the newprot (if present):
803 	 */
804 	val &= _PAGE_CHG_MASK;
805 	val |= check_pgprot(newprot) & ~_PAGE_CHG_MASK;
806 	val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
807 
808 	pte_result = __pte(val);
809 
810 	/*
811 	 * To avoid creating Write=0,Dirty=1 PTEs, pte_modify() needs to avoid:
812 	 *  1. Marking Write=0 PTEs Dirty=1
813 	 *  2. Marking Dirty=1 PTEs Write=0
814 	 *
815 	 * The first case cannot happen because the _PAGE_CHG_MASK will filter
816 	 * out any Dirty bit passed in newprot. Handle the second case by
817 	 * going through the mksaveddirty exercise. Only do this if the old
818 	 * value was Write=1 to avoid doing this on Shadow Stack PTEs.
819 	 */
820 	if (oldval & _PAGE_RW)
821 		pte_result = pte_mksaveddirty(pte_result);
822 	else
823 		pte_result = pte_clear_saveddirty(pte_result);
824 
825 	return pte_result;
826 }
827 
828 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
829 {
830 	pmdval_t val = pmd_val(pmd), oldval = val;
831 	pmd_t pmd_result;
832 
833 	val &= (_HPAGE_CHG_MASK & ~_PAGE_DIRTY);
834 	val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK;
835 	val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
836 
837 	pmd_result = __pmd(val);
838 
839 	/*
840 	 * To avoid creating Write=0,Dirty=1 PMDs, pte_modify() needs to avoid:
841 	 *  1. Marking Write=0 PMDs Dirty=1
842 	 *  2. Marking Dirty=1 PMDs Write=0
843 	 *
844 	 * The first case cannot happen because the _PAGE_CHG_MASK will filter
845 	 * out any Dirty bit passed in newprot. Handle the second case by
846 	 * going through the mksaveddirty exercise. Only do this if the old
847 	 * value was Write=1 to avoid doing this on Shadow Stack PTEs.
848 	 */
849 	if (oldval & _PAGE_RW)
850 		pmd_result = pmd_mksaveddirty(pmd_result);
851 	else
852 		pmd_result = pmd_clear_saveddirty(pmd_result);
853 
854 	return pmd_result;
855 }
856 
857 /*
858  * mprotect needs to preserve PAT and encryption bits when updating
859  * vm_page_prot
860  */
861 #define pgprot_modify pgprot_modify
862 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
863 {
864 	pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
865 	pgprotval_t addbits = pgprot_val(newprot) & ~_PAGE_CHG_MASK;
866 	return __pgprot(preservebits | addbits);
867 }
868 
869 #define pte_pgprot(x) __pgprot(pte_flags(x))
870 #define pmd_pgprot(x) __pgprot(pmd_flags(x))
871 #define pud_pgprot(x) __pgprot(pud_flags(x))
872 #define p4d_pgprot(x) __pgprot(p4d_flags(x))
873 
874 #define canon_pgprot(p) __pgprot(massage_pgprot(p))
875 
876 static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
877 					 enum page_cache_mode pcm,
878 					 enum page_cache_mode new_pcm)
879 {
880 	/*
881 	 * PAT type is always WB for untracked ranges, so no need to check.
882 	 */
883 	if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
884 		return 1;
885 
886 	/*
887 	 * Certain new memtypes are not allowed with certain
888 	 * requested memtype:
889 	 * - request is uncached, return cannot be write-back
890 	 * - request is write-combine, return cannot be write-back
891 	 * - request is write-through, return cannot be write-back
892 	 * - request is write-through, return cannot be write-combine
893 	 */
894 	if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
895 	     new_pcm == _PAGE_CACHE_MODE_WB) ||
896 	    (pcm == _PAGE_CACHE_MODE_WC &&
897 	     new_pcm == _PAGE_CACHE_MODE_WB) ||
898 	    (pcm == _PAGE_CACHE_MODE_WT &&
899 	     new_pcm == _PAGE_CACHE_MODE_WB) ||
900 	    (pcm == _PAGE_CACHE_MODE_WT &&
901 	     new_pcm == _PAGE_CACHE_MODE_WC)) {
902 		return 0;
903 	}
904 
905 	return 1;
906 }
907 
908 pmd_t *populate_extra_pmd(unsigned long vaddr);
909 pte_t *populate_extra_pte(unsigned long vaddr);
910 
911 #ifdef CONFIG_PAGE_TABLE_ISOLATION
912 pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd);
913 
914 /*
915  * Take a PGD location (pgdp) and a pgd value that needs to be set there.
916  * Populates the user and returns the resulting PGD that must be set in
917  * the kernel copy of the page tables.
918  */
919 static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
920 {
921 	if (!static_cpu_has(X86_FEATURE_PTI))
922 		return pgd;
923 	return __pti_set_user_pgtbl(pgdp, pgd);
924 }
925 #else   /* CONFIG_PAGE_TABLE_ISOLATION */
926 static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
927 {
928 	return pgd;
929 }
930 #endif  /* CONFIG_PAGE_TABLE_ISOLATION */
931 
932 #endif	/* __ASSEMBLY__ */
933 
934 
935 #ifdef CONFIG_X86_32
936 # include <asm/pgtable_32.h>
937 #else
938 # include <asm/pgtable_64.h>
939 #endif
940 
941 #ifndef __ASSEMBLY__
942 #include <linux/mm_types.h>
943 #include <linux/mmdebug.h>
944 #include <linux/log2.h>
945 #include <asm/fixmap.h>
946 
947 static inline int pte_none(pte_t pte)
948 {
949 	return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
950 }
951 
952 #define __HAVE_ARCH_PTE_SAME
953 static inline int pte_same(pte_t a, pte_t b)
954 {
955 	return a.pte == b.pte;
956 }
957 
958 static inline int pte_present(pte_t a)
959 {
960 	return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
961 }
962 
963 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
964 static inline int pte_devmap(pte_t a)
965 {
966 	return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
967 }
968 #endif
969 
970 #define pte_accessible pte_accessible
971 static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
972 {
973 	if (pte_flags(a) & _PAGE_PRESENT)
974 		return true;
975 
976 	if ((pte_flags(a) & _PAGE_PROTNONE) &&
977 			atomic_read(&mm->tlb_flush_pending))
978 		return true;
979 
980 	return false;
981 }
982 
983 static inline int pmd_present(pmd_t pmd)
984 {
985 	/*
986 	 * Checking for _PAGE_PSE is needed too because
987 	 * split_huge_page will temporarily clear the present bit (but
988 	 * the _PAGE_PSE flag will remain set at all times while the
989 	 * _PAGE_PRESENT bit is clear).
990 	 */
991 	return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
992 }
993 
994 #ifdef CONFIG_NUMA_BALANCING
995 /*
996  * These work without NUMA balancing but the kernel does not care. See the
997  * comment in include/linux/pgtable.h
998  */
999 static inline int pte_protnone(pte_t pte)
1000 {
1001 	return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
1002 		== _PAGE_PROTNONE;
1003 }
1004 
1005 static inline int pmd_protnone(pmd_t pmd)
1006 {
1007 	return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
1008 		== _PAGE_PROTNONE;
1009 }
1010 #endif /* CONFIG_NUMA_BALANCING */
1011 
1012 static inline int pmd_none(pmd_t pmd)
1013 {
1014 	/* Only check low word on 32-bit platforms, since it might be
1015 	   out of sync with upper half. */
1016 	unsigned long val = native_pmd_val(pmd);
1017 	return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
1018 }
1019 
1020 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
1021 {
1022 	return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
1023 }
1024 
1025 /*
1026  * Currently stuck as a macro due to indirect forward reference to
1027  * linux/mmzone.h's __section_mem_map_addr() definition:
1028  */
1029 #define pmd_page(pmd)	pfn_to_page(pmd_pfn(pmd))
1030 
1031 /*
1032  * Conversion functions: convert a page and protection to a page entry,
1033  * and a page entry and page directory to the page they refer to.
1034  *
1035  * (Currently stuck as a macro because of indirect forward reference
1036  * to linux/mm.h:page_to_nid())
1037  */
1038 #define mk_pte(page, pgprot)						  \
1039 ({									  \
1040 	pgprot_t __pgprot = pgprot;					  \
1041 									  \
1042 	WARN_ON_ONCE((pgprot_val(__pgprot) & (_PAGE_DIRTY | _PAGE_RW)) == \
1043 		    _PAGE_DIRTY);					  \
1044 	pfn_pte(page_to_pfn(page), __pgprot);				  \
1045 })
1046 
1047 static inline int pmd_bad(pmd_t pmd)
1048 {
1049 	return (pmd_flags(pmd) & ~(_PAGE_USER | _PAGE_ACCESSED)) !=
1050 	       (_KERNPG_TABLE & ~_PAGE_ACCESSED);
1051 }
1052 
1053 static inline unsigned long pages_to_mb(unsigned long npg)
1054 {
1055 	return npg >> (20 - PAGE_SHIFT);
1056 }
1057 
1058 #if CONFIG_PGTABLE_LEVELS > 2
1059 static inline int pud_none(pud_t pud)
1060 {
1061 	return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
1062 }
1063 
1064 static inline int pud_present(pud_t pud)
1065 {
1066 	return pud_flags(pud) & _PAGE_PRESENT;
1067 }
1068 
1069 static inline pmd_t *pud_pgtable(pud_t pud)
1070 {
1071 	return (pmd_t *)__va(pud_val(pud) & pud_pfn_mask(pud));
1072 }
1073 
1074 /*
1075  * Currently stuck as a macro due to indirect forward reference to
1076  * linux/mmzone.h's __section_mem_map_addr() definition:
1077  */
1078 #define pud_page(pud)	pfn_to_page(pud_pfn(pud))
1079 
1080 #define pud_leaf	pud_large
1081 static inline int pud_large(pud_t pud)
1082 {
1083 	return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
1084 		(_PAGE_PSE | _PAGE_PRESENT);
1085 }
1086 
1087 static inline int pud_bad(pud_t pud)
1088 {
1089 	return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
1090 }
1091 #else
1092 #define pud_leaf	pud_large
1093 static inline int pud_large(pud_t pud)
1094 {
1095 	return 0;
1096 }
1097 #endif	/* CONFIG_PGTABLE_LEVELS > 2 */
1098 
1099 #if CONFIG_PGTABLE_LEVELS > 3
1100 static inline int p4d_none(p4d_t p4d)
1101 {
1102 	return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
1103 }
1104 
1105 static inline int p4d_present(p4d_t p4d)
1106 {
1107 	return p4d_flags(p4d) & _PAGE_PRESENT;
1108 }
1109 
1110 static inline pud_t *p4d_pgtable(p4d_t p4d)
1111 {
1112 	return (pud_t *)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
1113 }
1114 
1115 /*
1116  * Currently stuck as a macro due to indirect forward reference to
1117  * linux/mmzone.h's __section_mem_map_addr() definition:
1118  */
1119 #define p4d_page(p4d)	pfn_to_page(p4d_pfn(p4d))
1120 
1121 static inline int p4d_bad(p4d_t p4d)
1122 {
1123 	unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER;
1124 
1125 	if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
1126 		ignore_flags |= _PAGE_NX;
1127 
1128 	return (p4d_flags(p4d) & ~ignore_flags) != 0;
1129 }
1130 #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
1131 
1132 static inline unsigned long p4d_index(unsigned long address)
1133 {
1134 	return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1);
1135 }
1136 
1137 #if CONFIG_PGTABLE_LEVELS > 4
1138 static inline int pgd_present(pgd_t pgd)
1139 {
1140 	if (!pgtable_l5_enabled())
1141 		return 1;
1142 	return pgd_flags(pgd) & _PAGE_PRESENT;
1143 }
1144 
1145 static inline unsigned long pgd_page_vaddr(pgd_t pgd)
1146 {
1147 	return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
1148 }
1149 
1150 /*
1151  * Currently stuck as a macro due to indirect forward reference to
1152  * linux/mmzone.h's __section_mem_map_addr() definition:
1153  */
1154 #define pgd_page(pgd)	pfn_to_page(pgd_pfn(pgd))
1155 
1156 /* to find an entry in a page-table-directory. */
1157 static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
1158 {
1159 	if (!pgtable_l5_enabled())
1160 		return (p4d_t *)pgd;
1161 	return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address);
1162 }
1163 
1164 static inline int pgd_bad(pgd_t pgd)
1165 {
1166 	unsigned long ignore_flags = _PAGE_USER;
1167 
1168 	if (!pgtable_l5_enabled())
1169 		return 0;
1170 
1171 	if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
1172 		ignore_flags |= _PAGE_NX;
1173 
1174 	return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
1175 }
1176 
1177 static inline int pgd_none(pgd_t pgd)
1178 {
1179 	if (!pgtable_l5_enabled())
1180 		return 0;
1181 	/*
1182 	 * There is no need to do a workaround for the KNL stray
1183 	 * A/D bit erratum here.  PGDs only point to page tables
1184 	 * except on 32-bit non-PAE which is not supported on
1185 	 * KNL.
1186 	 */
1187 	return !native_pgd_val(pgd);
1188 }
1189 #endif	/* CONFIG_PGTABLE_LEVELS > 4 */
1190 
1191 #endif	/* __ASSEMBLY__ */
1192 
1193 #define KERNEL_PGD_BOUNDARY	pgd_index(PAGE_OFFSET)
1194 #define KERNEL_PGD_PTRS		(PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
1195 
1196 #ifndef __ASSEMBLY__
1197 
1198 extern int direct_gbpages;
1199 void init_mem_mapping(void);
1200 void early_alloc_pgt_buf(void);
1201 extern void memblock_find_dma_reserve(void);
1202 void __init poking_init(void);
1203 unsigned long init_memory_mapping(unsigned long start,
1204 				  unsigned long end, pgprot_t prot);
1205 
1206 #ifdef CONFIG_X86_64
1207 extern pgd_t trampoline_pgd_entry;
1208 #endif
1209 
1210 /* local pte updates need not use xchg for locking */
1211 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
1212 {
1213 	pte_t res = *ptep;
1214 
1215 	/* Pure native function needs no input for mm, addr */
1216 	native_pte_clear(NULL, 0, ptep);
1217 	return res;
1218 }
1219 
1220 static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
1221 {
1222 	pmd_t res = *pmdp;
1223 
1224 	native_pmd_clear(pmdp);
1225 	return res;
1226 }
1227 
1228 static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
1229 {
1230 	pud_t res = *pudp;
1231 
1232 	native_pud_clear(pudp);
1233 	return res;
1234 }
1235 
1236 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1237 			      pmd_t *pmdp, pmd_t pmd)
1238 {
1239 	page_table_check_pmd_set(mm, pmdp, pmd);
1240 	set_pmd(pmdp, pmd);
1241 }
1242 
1243 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
1244 			      pud_t *pudp, pud_t pud)
1245 {
1246 	page_table_check_pud_set(mm, pudp, pud);
1247 	native_set_pud(pudp, pud);
1248 }
1249 
1250 /*
1251  * We only update the dirty/accessed state if we set
1252  * the dirty bit by hand in the kernel, since the hardware
1253  * will do the accessed bit for us, and we don't want to
1254  * race with other CPU's that might be updating the dirty
1255  * bit at the same time.
1256  */
1257 struct vm_area_struct;
1258 
1259 #define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1260 extern int ptep_set_access_flags(struct vm_area_struct *vma,
1261 				 unsigned long address, pte_t *ptep,
1262 				 pte_t entry, int dirty);
1263 
1264 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1265 extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
1266 				     unsigned long addr, pte_t *ptep);
1267 
1268 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1269 extern int ptep_clear_flush_young(struct vm_area_struct *vma,
1270 				  unsigned long address, pte_t *ptep);
1271 
1272 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1273 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
1274 				       pte_t *ptep)
1275 {
1276 	pte_t pte = native_ptep_get_and_clear(ptep);
1277 	page_table_check_pte_clear(mm, pte);
1278 	return pte;
1279 }
1280 
1281 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1282 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1283 					    unsigned long addr, pte_t *ptep,
1284 					    int full)
1285 {
1286 	pte_t pte;
1287 	if (full) {
1288 		/*
1289 		 * Full address destruction in progress; paravirt does not
1290 		 * care about updates and native needs no locking
1291 		 */
1292 		pte = native_local_ptep_get_and_clear(ptep);
1293 		page_table_check_pte_clear(mm, pte);
1294 	} else {
1295 		pte = ptep_get_and_clear(mm, addr, ptep);
1296 	}
1297 	return pte;
1298 }
1299 
1300 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1301 static inline void ptep_set_wrprotect(struct mm_struct *mm,
1302 				      unsigned long addr, pte_t *ptep)
1303 {
1304 	/*
1305 	 * Avoid accidentally creating shadow stack PTEs
1306 	 * (Write=0,Dirty=1).  Use cmpxchg() to prevent races with
1307 	 * the hardware setting Dirty=1.
1308 	 */
1309 	pte_t old_pte, new_pte;
1310 
1311 	old_pte = READ_ONCE(*ptep);
1312 	do {
1313 		new_pte = pte_wrprotect(old_pte);
1314 	} while (!try_cmpxchg((long *)&ptep->pte, (long *)&old_pte, *(long *)&new_pte));
1315 }
1316 
1317 #define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0)
1318 
1319 #define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
1320 
1321 #define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1322 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
1323 				 unsigned long address, pmd_t *pmdp,
1324 				 pmd_t entry, int dirty);
1325 extern int pudp_set_access_flags(struct vm_area_struct *vma,
1326 				 unsigned long address, pud_t *pudp,
1327 				 pud_t entry, int dirty);
1328 
1329 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1330 extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1331 				     unsigned long addr, pmd_t *pmdp);
1332 extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
1333 				     unsigned long addr, pud_t *pudp);
1334 
1335 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1336 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
1337 				  unsigned long address, pmd_t *pmdp);
1338 
1339 
1340 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1341 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
1342 				       pmd_t *pmdp)
1343 {
1344 	pmd_t pmd = native_pmdp_get_and_clear(pmdp);
1345 
1346 	page_table_check_pmd_clear(mm, pmd);
1347 
1348 	return pmd;
1349 }
1350 
1351 #define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
1352 static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
1353 					unsigned long addr, pud_t *pudp)
1354 {
1355 	pud_t pud = native_pudp_get_and_clear(pudp);
1356 
1357 	page_table_check_pud_clear(mm, pud);
1358 
1359 	return pud;
1360 }
1361 
1362 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1363 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1364 				      unsigned long addr, pmd_t *pmdp)
1365 {
1366 	/*
1367 	 * Avoid accidentally creating shadow stack PTEs
1368 	 * (Write=0,Dirty=1).  Use cmpxchg() to prevent races with
1369 	 * the hardware setting Dirty=1.
1370 	 */
1371 	pmd_t old_pmd, new_pmd;
1372 
1373 	old_pmd = READ_ONCE(*pmdp);
1374 	do {
1375 		new_pmd = pmd_wrprotect(old_pmd);
1376 	} while (!try_cmpxchg((long *)pmdp, (long *)&old_pmd, *(long *)&new_pmd));
1377 }
1378 
1379 #ifndef pmdp_establish
1380 #define pmdp_establish pmdp_establish
1381 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
1382 		unsigned long address, pmd_t *pmdp, pmd_t pmd)
1383 {
1384 	page_table_check_pmd_set(vma->vm_mm, pmdp, pmd);
1385 	if (IS_ENABLED(CONFIG_SMP)) {
1386 		return xchg(pmdp, pmd);
1387 	} else {
1388 		pmd_t old = *pmdp;
1389 		WRITE_ONCE(*pmdp, pmd);
1390 		return old;
1391 	}
1392 }
1393 #endif
1394 
1395 #define __HAVE_ARCH_PMDP_INVALIDATE_AD
1396 extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma,
1397 				unsigned long address, pmd_t *pmdp);
1398 
1399 /*
1400  * Page table pages are page-aligned.  The lower half of the top
1401  * level is used for userspace and the top half for the kernel.
1402  *
1403  * Returns true for parts of the PGD that map userspace and
1404  * false for the parts that map the kernel.
1405  */
1406 static inline bool pgdp_maps_userspace(void *__ptr)
1407 {
1408 	unsigned long ptr = (unsigned long)__ptr;
1409 
1410 	return (((ptr & ~PAGE_MASK) / sizeof(pgd_t)) < PGD_KERNEL_START);
1411 }
1412 
1413 #define pgd_leaf	pgd_large
1414 static inline int pgd_large(pgd_t pgd) { return 0; }
1415 
1416 #ifdef CONFIG_PAGE_TABLE_ISOLATION
1417 /*
1418  * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
1419  * (8k-aligned and 8k in size).  The kernel one is at the beginning 4k and
1420  * the user one is in the last 4k.  To switch between them, you
1421  * just need to flip the 12th bit in their addresses.
1422  */
1423 #define PTI_PGTABLE_SWITCH_BIT	PAGE_SHIFT
1424 
1425 /*
1426  * This generates better code than the inline assembly in
1427  * __set_bit().
1428  */
1429 static inline void *ptr_set_bit(void *ptr, int bit)
1430 {
1431 	unsigned long __ptr = (unsigned long)ptr;
1432 
1433 	__ptr |= BIT(bit);
1434 	return (void *)__ptr;
1435 }
1436 static inline void *ptr_clear_bit(void *ptr, int bit)
1437 {
1438 	unsigned long __ptr = (unsigned long)ptr;
1439 
1440 	__ptr &= ~BIT(bit);
1441 	return (void *)__ptr;
1442 }
1443 
1444 static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
1445 {
1446 	return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
1447 }
1448 
1449 static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
1450 {
1451 	return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
1452 }
1453 
1454 static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
1455 {
1456 	return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
1457 }
1458 
1459 static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
1460 {
1461 	return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
1462 }
1463 #endif /* CONFIG_PAGE_TABLE_ISOLATION */
1464 
1465 /*
1466  * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
1467  *
1468  *  dst - pointer to pgd range anywhere on a pgd page
1469  *  src - ""
1470  *  count - the number of pgds to copy.
1471  *
1472  * dst and src can be on the same page, but the range must not overlap,
1473  * and must not cross a page boundary.
1474  */
1475 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
1476 {
1477 	memcpy(dst, src, count * sizeof(pgd_t));
1478 #ifdef CONFIG_PAGE_TABLE_ISOLATION
1479 	if (!static_cpu_has(X86_FEATURE_PTI))
1480 		return;
1481 	/* Clone the user space pgd as well */
1482 	memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src),
1483 	       count * sizeof(pgd_t));
1484 #endif
1485 }
1486 
1487 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
1488 static inline int page_level_shift(enum pg_level level)
1489 {
1490 	return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
1491 }
1492 static inline unsigned long page_level_size(enum pg_level level)
1493 {
1494 	return 1UL << page_level_shift(level);
1495 }
1496 static inline unsigned long page_level_mask(enum pg_level level)
1497 {
1498 	return ~(page_level_size(level) - 1);
1499 }
1500 
1501 /*
1502  * The x86 doesn't have any external MMU info: the kernel page
1503  * tables contain all the necessary information.
1504  */
1505 static inline void update_mmu_cache(struct vm_area_struct *vma,
1506 		unsigned long addr, pte_t *ptep)
1507 {
1508 }
1509 static inline void update_mmu_cache_range(struct vm_fault *vmf,
1510 		struct vm_area_struct *vma, unsigned long addr,
1511 		pte_t *ptep, unsigned int nr)
1512 {
1513 }
1514 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
1515 		unsigned long addr, pmd_t *pmd)
1516 {
1517 }
1518 static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
1519 		unsigned long addr, pud_t *pud)
1520 {
1521 }
1522 static inline pte_t pte_swp_mkexclusive(pte_t pte)
1523 {
1524 	return pte_set_flags(pte, _PAGE_SWP_EXCLUSIVE);
1525 }
1526 
1527 static inline int pte_swp_exclusive(pte_t pte)
1528 {
1529 	return pte_flags(pte) & _PAGE_SWP_EXCLUSIVE;
1530 }
1531 
1532 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
1533 {
1534 	return pte_clear_flags(pte, _PAGE_SWP_EXCLUSIVE);
1535 }
1536 
1537 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
1538 static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
1539 {
1540 	return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1541 }
1542 
1543 static inline int pte_swp_soft_dirty(pte_t pte)
1544 {
1545 	return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
1546 }
1547 
1548 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
1549 {
1550 	return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1551 }
1552 
1553 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1554 static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1555 {
1556 	return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1557 }
1558 
1559 static inline int pmd_swp_soft_dirty(pmd_t pmd)
1560 {
1561 	return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY;
1562 }
1563 
1564 static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1565 {
1566 	return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1567 }
1568 #endif
1569 #endif
1570 
1571 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
1572 static inline pte_t pte_swp_mkuffd_wp(pte_t pte)
1573 {
1574 	return pte_set_flags(pte, _PAGE_SWP_UFFD_WP);
1575 }
1576 
1577 static inline int pte_swp_uffd_wp(pte_t pte)
1578 {
1579 	return pte_flags(pte) & _PAGE_SWP_UFFD_WP;
1580 }
1581 
1582 static inline pte_t pte_swp_clear_uffd_wp(pte_t pte)
1583 {
1584 	return pte_clear_flags(pte, _PAGE_SWP_UFFD_WP);
1585 }
1586 
1587 static inline pmd_t pmd_swp_mkuffd_wp(pmd_t pmd)
1588 {
1589 	return pmd_set_flags(pmd, _PAGE_SWP_UFFD_WP);
1590 }
1591 
1592 static inline int pmd_swp_uffd_wp(pmd_t pmd)
1593 {
1594 	return pmd_flags(pmd) & _PAGE_SWP_UFFD_WP;
1595 }
1596 
1597 static inline pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd)
1598 {
1599 	return pmd_clear_flags(pmd, _PAGE_SWP_UFFD_WP);
1600 }
1601 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
1602 
1603 static inline u16 pte_flags_pkey(unsigned long pte_flags)
1604 {
1605 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1606 	/* ifdef to avoid doing 59-bit shift on 32-bit values */
1607 	return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
1608 #else
1609 	return 0;
1610 #endif
1611 }
1612 
1613 static inline bool __pkru_allows_pkey(u16 pkey, bool write)
1614 {
1615 	u32 pkru = read_pkru();
1616 
1617 	if (!__pkru_allows_read(pkru, pkey))
1618 		return false;
1619 	if (write && !__pkru_allows_write(pkru, pkey))
1620 		return false;
1621 
1622 	return true;
1623 }
1624 
1625 /*
1626  * 'pteval' can come from a PTE, PMD or PUD.  We only check
1627  * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
1628  * same value on all 3 types.
1629  */
1630 static inline bool __pte_access_permitted(unsigned long pteval, bool write)
1631 {
1632 	unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
1633 
1634 	/*
1635 	 * Write=0,Dirty=1 PTEs are shadow stack, which the kernel
1636 	 * shouldn't generally allow access to, but since they
1637 	 * are already Write=0, the below logic covers both cases.
1638 	 */
1639 	if (write)
1640 		need_pte_bits |= _PAGE_RW;
1641 
1642 	if ((pteval & need_pte_bits) != need_pte_bits)
1643 		return 0;
1644 
1645 	return __pkru_allows_pkey(pte_flags_pkey(pteval), write);
1646 }
1647 
1648 #define pte_access_permitted pte_access_permitted
1649 static inline bool pte_access_permitted(pte_t pte, bool write)
1650 {
1651 	return __pte_access_permitted(pte_val(pte), write);
1652 }
1653 
1654 #define pmd_access_permitted pmd_access_permitted
1655 static inline bool pmd_access_permitted(pmd_t pmd, bool write)
1656 {
1657 	return __pte_access_permitted(pmd_val(pmd), write);
1658 }
1659 
1660 #define pud_access_permitted pud_access_permitted
1661 static inline bool pud_access_permitted(pud_t pud, bool write)
1662 {
1663 	return __pte_access_permitted(pud_val(pud), write);
1664 }
1665 
1666 #define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
1667 extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
1668 
1669 static inline bool arch_has_pfn_modify_check(void)
1670 {
1671 	return boot_cpu_has_bug(X86_BUG_L1TF);
1672 }
1673 
1674 #define arch_has_hw_pte_young arch_has_hw_pte_young
1675 static inline bool arch_has_hw_pte_young(void)
1676 {
1677 	return true;
1678 }
1679 
1680 #define arch_check_zapped_pte arch_check_zapped_pte
1681 void arch_check_zapped_pte(struct vm_area_struct *vma, pte_t pte);
1682 
1683 #define arch_check_zapped_pmd arch_check_zapped_pmd
1684 void arch_check_zapped_pmd(struct vm_area_struct *vma, pmd_t pmd);
1685 
1686 #ifdef CONFIG_XEN_PV
1687 #define arch_has_hw_nonleaf_pmd_young arch_has_hw_nonleaf_pmd_young
1688 static inline bool arch_has_hw_nonleaf_pmd_young(void)
1689 {
1690 	return !cpu_feature_enabled(X86_FEATURE_XENPV);
1691 }
1692 #endif
1693 
1694 #ifdef CONFIG_PAGE_TABLE_CHECK
1695 static inline bool pte_user_accessible_page(pte_t pte)
1696 {
1697 	return (pte_val(pte) & _PAGE_PRESENT) && (pte_val(pte) & _PAGE_USER);
1698 }
1699 
1700 static inline bool pmd_user_accessible_page(pmd_t pmd)
1701 {
1702 	return pmd_leaf(pmd) && (pmd_val(pmd) & _PAGE_PRESENT) && (pmd_val(pmd) & _PAGE_USER);
1703 }
1704 
1705 static inline bool pud_user_accessible_page(pud_t pud)
1706 {
1707 	return pud_leaf(pud) && (pud_val(pud) & _PAGE_PRESENT) && (pud_val(pud) & _PAGE_USER);
1708 }
1709 #endif
1710 
1711 #endif	/* __ASSEMBLY__ */
1712 
1713 #endif /* _ASM_X86_PGTABLE_H */
1714