1 #ifndef _ASM_X86_PGTABLE_3LEVEL_H 2 #define _ASM_X86_PGTABLE_3LEVEL_H 3 4 /* 5 * Intel Physical Address Extension (PAE) Mode - three-level page 6 * tables on PPro+ CPUs. 7 * 8 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> 9 */ 10 11 #define pte_ERROR(e) \ 12 pr_err("%s:%d: bad pte %p(%08lx%08lx)\n", \ 13 __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) 14 #define pmd_ERROR(e) \ 15 pr_err("%s:%d: bad pmd %p(%016Lx)\n", \ 16 __FILE__, __LINE__, &(e), pmd_val(e)) 17 #define pgd_ERROR(e) \ 18 pr_err("%s:%d: bad pgd %p(%016Lx)\n", \ 19 __FILE__, __LINE__, &(e), pgd_val(e)) 20 21 /* Rules for using set_pte: the pte being assigned *must* be 22 * either not present or in a state where the hardware will 23 * not attempt to update the pte. In places where this is 24 * not possible, use pte_get_and_clear to obtain the old pte 25 * value and then use set_pte to update it. -ben 26 */ 27 static inline void native_set_pte(pte_t *ptep, pte_t pte) 28 { 29 ptep->pte_high = pte.pte_high; 30 smp_wmb(); 31 ptep->pte_low = pte.pte_low; 32 } 33 34 #define pmd_read_atomic pmd_read_atomic 35 /* 36 * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with 37 * a "*pmdp" dereference done by gcc. Problem is, in certain places 38 * where pte_offset_map_lock is called, concurrent page faults are 39 * allowed, if the mmap_sem is hold for reading. An example is mincore 40 * vs page faults vs MADV_DONTNEED. On the page fault side 41 * pmd_populate rightfully does a set_64bit, but if we're reading the 42 * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen 43 * because gcc will not read the 64bit of the pmd atomically. To fix 44 * this all places running pmd_offset_map_lock() while holding the 45 * mmap_sem in read mode, shall read the pmdp pointer using this 46 * function to know if the pmd is null nor not, and in turn to know if 47 * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd 48 * operations. 49 * 50 * Without THP if the mmap_sem is hold for reading, the pmd can only 51 * transition from null to not null while pmd_read_atomic runs. So 52 * we can always return atomic pmd values with this function. 53 * 54 * With THP if the mmap_sem is hold for reading, the pmd can become 55 * trans_huge or none or point to a pte (and in turn become "stable") 56 * at any time under pmd_read_atomic. We could read it really 57 * atomically here with a atomic64_read for the THP enabled case (and 58 * it would be a whole lot simpler), but to avoid using cmpxchg8b we 59 * only return an atomic pmdval if the low part of the pmdval is later 60 * found stable (i.e. pointing to a pte). And we're returning a none 61 * pmdval if the low part of the pmd is none. In some cases the high 62 * and low part of the pmdval returned may not be consistent if THP is 63 * enabled (the low part may point to previously mapped hugepage, 64 * while the high part may point to a more recently mapped hugepage), 65 * but pmd_none_or_trans_huge_or_clear_bad() only needs the low part 66 * of the pmd to be read atomically to decide if the pmd is unstable 67 * or not, with the only exception of when the low part of the pmd is 68 * zero in which case we return a none pmd. 69 */ 70 static inline pmd_t pmd_read_atomic(pmd_t *pmdp) 71 { 72 pmdval_t ret; 73 u32 *tmp = (u32 *)pmdp; 74 75 ret = (pmdval_t) (*tmp); 76 if (ret) { 77 /* 78 * If the low part is null, we must not read the high part 79 * or we can end up with a partial pmd. 80 */ 81 smp_rmb(); 82 ret |= ((pmdval_t)*(tmp + 1)) << 32; 83 } 84 85 return (pmd_t) { ret }; 86 } 87 88 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) 89 { 90 set_64bit((unsigned long long *)(ptep), native_pte_val(pte)); 91 } 92 93 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) 94 { 95 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd)); 96 } 97 98 static inline void native_set_pud(pud_t *pudp, pud_t pud) 99 { 100 set_64bit((unsigned long long *)(pudp), native_pud_val(pud)); 101 } 102 103 /* 104 * For PTEs and PDEs, we must clear the P-bit first when clearing a page table 105 * entry, so clear the bottom half first and enforce ordering with a compiler 106 * barrier. 107 */ 108 static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, 109 pte_t *ptep) 110 { 111 ptep->pte_low = 0; 112 smp_wmb(); 113 ptep->pte_high = 0; 114 } 115 116 static inline void native_pmd_clear(pmd_t *pmd) 117 { 118 u32 *tmp = (u32 *)pmd; 119 *tmp = 0; 120 smp_wmb(); 121 *(tmp + 1) = 0; 122 } 123 124 static inline void native_pud_clear(pud_t *pudp) 125 { 126 } 127 128 static inline void pud_clear(pud_t *pudp) 129 { 130 set_pud(pudp, __pud(0)); 131 132 /* 133 * According to Intel App note "TLBs, Paging-Structure Caches, 134 * and Their Invalidation", April 2007, document 317080-001, 135 * section 8.1: in PAE mode we explicitly have to flush the 136 * TLB via cr3 if the top-level pgd is changed... 137 * 138 * Currently all places where pud_clear() is called either have 139 * flush_tlb_mm() followed or don't need TLB flush (x86_64 code or 140 * pud_clear_bad()), so we don't need TLB flush here. 141 */ 142 } 143 144 #ifdef CONFIG_SMP 145 static inline pte_t native_ptep_get_and_clear(pte_t *ptep) 146 { 147 pte_t res; 148 149 /* xchg acts as a barrier before the setting of the high bits */ 150 res.pte_low = xchg(&ptep->pte_low, 0); 151 res.pte_high = ptep->pte_high; 152 ptep->pte_high = 0; 153 154 return res; 155 } 156 #else 157 #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) 158 #endif 159 160 #ifdef CONFIG_SMP 161 union split_pmd { 162 struct { 163 u32 pmd_low; 164 u32 pmd_high; 165 }; 166 pmd_t pmd; 167 }; 168 static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp) 169 { 170 union split_pmd res, *orig = (union split_pmd *)pmdp; 171 172 /* xchg acts as a barrier before setting of the high bits */ 173 res.pmd_low = xchg(&orig->pmd_low, 0); 174 res.pmd_high = orig->pmd_high; 175 orig->pmd_high = 0; 176 177 return res.pmd; 178 } 179 #else 180 #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) 181 #endif 182 183 #ifdef CONFIG_SMP 184 union split_pud { 185 struct { 186 u32 pud_low; 187 u32 pud_high; 188 }; 189 pud_t pud; 190 }; 191 192 static inline pud_t native_pudp_get_and_clear(pud_t *pudp) 193 { 194 union split_pud res, *orig = (union split_pud *)pudp; 195 196 /* xchg acts as a barrier before setting of the high bits */ 197 res.pud_low = xchg(&orig->pud_low, 0); 198 res.pud_high = orig->pud_high; 199 orig->pud_high = 0; 200 201 return res.pud; 202 } 203 #else 204 #define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp) 205 #endif 206 207 /* Encode and de-code a swap entry */ 208 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5) 209 #define __swp_type(x) (((x).val) & 0x1f) 210 #define __swp_offset(x) ((x).val >> 5) 211 #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5}) 212 #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) 213 #define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } }) 214 215 #define gup_get_pte gup_get_pte 216 /* 217 * WARNING: only to be used in the get_user_pages_fast() implementation. 218 * 219 * With get_user_pages_fast(), we walk down the pagetables without taking 220 * any locks. For this we would like to load the pointers atomically, 221 * but that is not possible (without expensive cmpxchg8b) on PAE. What 222 * we do have is the guarantee that a PTE will only either go from not 223 * present to present, or present to not present or both -- it will not 224 * switch to a completely different present page without a TLB flush in 225 * between; something that we are blocking by holding interrupts off. 226 * 227 * Setting ptes from not present to present goes: 228 * 229 * ptep->pte_high = h; 230 * smp_wmb(); 231 * ptep->pte_low = l; 232 * 233 * And present to not present goes: 234 * 235 * ptep->pte_low = 0; 236 * smp_wmb(); 237 * ptep->pte_high = 0; 238 * 239 * We must ensure here that the load of pte_low sees 'l' iff pte_high 240 * sees 'h'. We load pte_high *after* loading pte_low, which ensures we 241 * don't see an older value of pte_high. *Then* we recheck pte_low, 242 * which ensures that we haven't picked up a changed pte high. We might 243 * have gotten rubbish values from pte_low and pte_high, but we are 244 * guaranteed that pte_low will not have the present bit set *unless* 245 * it is 'l'. Because get_user_pages_fast() only operates on present ptes 246 * we're safe. 247 */ 248 static inline pte_t gup_get_pte(pte_t *ptep) 249 { 250 pte_t pte; 251 252 do { 253 pte.pte_low = ptep->pte_low; 254 smp_rmb(); 255 pte.pte_high = ptep->pte_high; 256 smp_rmb(); 257 } while (unlikely(pte.pte_low != ptep->pte_low)); 258 259 return pte; 260 } 261 262 #endif /* _ASM_X86_PGTABLE_3LEVEL_H */ 263