1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PGTABLE_3LEVEL_H
3 #define _ASM_X86_PGTABLE_3LEVEL_H
4 
5 /*
6  * Intel Physical Address Extension (PAE) Mode - three-level page
7  * tables on PPro+ CPUs.
8  *
9  * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
10  */
11 
12 #define pte_ERROR(e)							\
13 	pr_err("%s:%d: bad pte %p(%08lx%08lx)\n",			\
14 	       __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
15 #define pmd_ERROR(e)							\
16 	pr_err("%s:%d: bad pmd %p(%016Lx)\n",				\
17 	       __FILE__, __LINE__, &(e), pmd_val(e))
18 #define pgd_ERROR(e)							\
19 	pr_err("%s:%d: bad pgd %p(%016Lx)\n",				\
20 	       __FILE__, __LINE__, &(e), pgd_val(e))
21 
22 /* Rules for using set_pte: the pte being assigned *must* be
23  * either not present or in a state where the hardware will
24  * not attempt to update the pte.  In places where this is
25  * not possible, use pte_get_and_clear to obtain the old pte
26  * value and then use set_pte to update it.  -ben
27  */
28 static inline void native_set_pte(pte_t *ptep, pte_t pte)
29 {
30 	WRITE_ONCE(ptep->pte_high, pte.pte_high);
31 	smp_wmb();
32 	WRITE_ONCE(ptep->pte_low, pte.pte_low);
33 }
34 
35 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
36 {
37 	set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
38 }
39 
40 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
41 {
42 	set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
43 }
44 
45 static inline void native_set_pud(pud_t *pudp, pud_t pud)
46 {
47 #ifdef CONFIG_PAGE_TABLE_ISOLATION
48 	pud.p4d.pgd = pti_set_user_pgtbl(&pudp->p4d.pgd, pud.p4d.pgd);
49 #endif
50 	set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
51 }
52 
53 /*
54  * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
55  * entry, so clear the bottom half first and enforce ordering with a compiler
56  * barrier.
57  */
58 static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
59 				    pte_t *ptep)
60 {
61 	WRITE_ONCE(ptep->pte_low, 0);
62 	smp_wmb();
63 	WRITE_ONCE(ptep->pte_high, 0);
64 }
65 
66 static inline void native_pmd_clear(pmd_t *pmdp)
67 {
68 	WRITE_ONCE(pmdp->pmd_low, 0);
69 	smp_wmb();
70 	WRITE_ONCE(pmdp->pmd_high, 0);
71 }
72 
73 static inline void native_pud_clear(pud_t *pudp)
74 {
75 }
76 
77 static inline void pud_clear(pud_t *pudp)
78 {
79 	set_pud(pudp, __pud(0));
80 
81 	/*
82 	 * According to Intel App note "TLBs, Paging-Structure Caches,
83 	 * and Their Invalidation", April 2007, document 317080-001,
84 	 * section 8.1: in PAE mode we explicitly have to flush the
85 	 * TLB via cr3 if the top-level pgd is changed...
86 	 *
87 	 * Currently all places where pud_clear() is called either have
88 	 * flush_tlb_mm() followed or don't need TLB flush (x86_64 code or
89 	 * pud_clear_bad()), so we don't need TLB flush here.
90 	 */
91 }
92 
93 
94 #define pxx_xchg64(_pxx, _ptr, _val) ({					\
95 	_pxx##val_t *_p = (_pxx##val_t *)_ptr;				\
96 	_pxx##val_t _o = *_p;						\
97 	do { } while (!try_cmpxchg64(_p, &_o, (_val)));			\
98 	native_make_##_pxx(_o);						\
99 })
100 
101 #ifdef CONFIG_SMP
102 static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
103 {
104 	return pxx_xchg64(pte, ptep, 0ULL);
105 }
106 
107 static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
108 {
109 	return pxx_xchg64(pmd, pmdp, 0ULL);
110 }
111 
112 static inline pud_t native_pudp_get_and_clear(pud_t *pudp)
113 {
114 	return pxx_xchg64(pud, pudp, 0ULL);
115 }
116 #else
117 #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
118 #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
119 #define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp)
120 #endif
121 
122 #ifndef pmdp_establish
123 #define pmdp_establish pmdp_establish
124 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
125 		unsigned long address, pmd_t *pmdp, pmd_t pmd)
126 {
127 	pmd_t old;
128 
129 	/*
130 	 * If pmd has present bit cleared we can get away without expensive
131 	 * cmpxchg64: we can update pmdp half-by-half without racing with
132 	 * anybody.
133 	 */
134 	if (!(pmd_val(pmd) & _PAGE_PRESENT)) {
135 		/* xchg acts as a barrier before setting of the high bits */
136 		old.pmd_low = xchg(&pmdp->pmd_low, pmd.pmd_low);
137 		old.pmd_high = READ_ONCE(pmdp->pmd_high);
138 		WRITE_ONCE(pmdp->pmd_high, pmd.pmd_high);
139 
140 		return old;
141 	}
142 
143 	return pxx_xchg64(pmd, pmdp, pmd.pmd);
144 }
145 #endif
146 
147 /* Encode and de-code a swap entry */
148 #define SWP_TYPE_BITS		5
149 
150 #define SWP_OFFSET_FIRST_BIT	(_PAGE_BIT_PROTNONE + 1)
151 
152 /* We always extract/encode the offset by shifting it all the way up, and then down again */
153 #define SWP_OFFSET_SHIFT	(SWP_OFFSET_FIRST_BIT + SWP_TYPE_BITS)
154 
155 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
156 #define __swp_type(x)			(((x).val) & ((1UL << SWP_TYPE_BITS) - 1))
157 #define __swp_offset(x)			((x).val >> SWP_TYPE_BITS)
158 #define __swp_entry(type, offset)	((swp_entry_t){(type) | (offset) << SWP_TYPE_BITS})
159 
160 /*
161  * Normally, __swp_entry() converts from arch-independent swp_entry_t to
162  * arch-dependent swp_entry_t, and __swp_entry_to_pte() just stores the result
163  * to pte. But here we have 32bit swp_entry_t and 64bit pte, and need to use the
164  * whole 64 bits. Thus, we shift the "real" arch-dependent conversion to
165  * __swp_entry_to_pte() through the following helper macro based on 64bit
166  * __swp_entry().
167  */
168 #define __swp_pteval_entry(type, offset) ((pteval_t) { \
169 	(~(pteval_t)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
170 	| ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) })
171 
172 #define __swp_entry_to_pte(x)	((pte_t){ .pte = \
173 		__swp_pteval_entry(__swp_type(x), __swp_offset(x)) })
174 /*
175  * Analogically, __pte_to_swp_entry() doesn't just extract the arch-dependent
176  * swp_entry_t, but also has to convert it from 64bit to the 32bit
177  * intermediate representation, using the following macros based on 64bit
178  * __swp_type() and __swp_offset().
179  */
180 #define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS)))
181 #define __pteval_swp_offset(x) ((unsigned long)(~((x).pte) << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT))
182 
183 #define __pte_to_swp_entry(pte)	(__swp_entry(__pteval_swp_type(pte), \
184 					     __pteval_swp_offset(pte)))
185 
186 #include <asm/pgtable-invert.h>
187 
188 #endif /* _ASM_X86_PGTABLE_3LEVEL_H */
189