1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_TLBFLUSH_H 3 #define _ASM_X86_TLBFLUSH_H 4 5 #include <linux/mm_types.h> 6 #include <linux/sched.h> 7 8 #include <asm/processor.h> 9 #include <asm/cpufeature.h> 10 #include <asm/special_insns.h> 11 #include <asm/smp.h> 12 #include <asm/invpcid.h> 13 #include <asm/pti.h> 14 #include <asm/processor-flags.h> 15 #include <asm/pgtable.h> 16 17 void __flush_tlb_all(void); 18 19 #define TLB_FLUSH_ALL -1UL 20 #define TLB_GENERATION_INVALID 0 21 22 void cr4_update_irqsoff(unsigned long set, unsigned long clear); 23 unsigned long cr4_read_shadow(void); 24 25 /* Set in this cpu's CR4. */ 26 static inline void cr4_set_bits_irqsoff(unsigned long mask) 27 { 28 cr4_update_irqsoff(mask, 0); 29 } 30 31 /* Clear in this cpu's CR4. */ 32 static inline void cr4_clear_bits_irqsoff(unsigned long mask) 33 { 34 cr4_update_irqsoff(0, mask); 35 } 36 37 /* Set in this cpu's CR4. */ 38 static inline void cr4_set_bits(unsigned long mask) 39 { 40 unsigned long flags; 41 42 local_irq_save(flags); 43 cr4_set_bits_irqsoff(mask); 44 local_irq_restore(flags); 45 } 46 47 /* Clear in this cpu's CR4. */ 48 static inline void cr4_clear_bits(unsigned long mask) 49 { 50 unsigned long flags; 51 52 local_irq_save(flags); 53 cr4_clear_bits_irqsoff(mask); 54 local_irq_restore(flags); 55 } 56 57 #ifndef MODULE 58 /* 59 * 6 because 6 should be plenty and struct tlb_state will fit in two cache 60 * lines. 61 */ 62 #define TLB_NR_DYN_ASIDS 6 63 64 struct tlb_context { 65 u64 ctx_id; 66 u64 tlb_gen; 67 }; 68 69 struct tlb_state { 70 /* 71 * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts 72 * are on. This means that it may not match current->active_mm, 73 * which will contain the previous user mm when we're in lazy TLB 74 * mode even if we've already switched back to swapper_pg_dir. 75 * 76 * During switch_mm_irqs_off(), loaded_mm will be set to 77 * LOADED_MM_SWITCHING during the brief interrupts-off window 78 * when CR3 and loaded_mm would otherwise be inconsistent. This 79 * is for nmi_uaccess_okay()'s benefit. 80 */ 81 struct mm_struct *loaded_mm; 82 83 #define LOADED_MM_SWITCHING ((struct mm_struct *)1UL) 84 85 /* Last user mm for optimizing IBPB */ 86 union { 87 struct mm_struct *last_user_mm; 88 unsigned long last_user_mm_spec; 89 }; 90 91 u16 loaded_mm_asid; 92 u16 next_asid; 93 94 /* 95 * If set we changed the page tables in such a way that we 96 * needed an invalidation of all contexts (aka. PCIDs / ASIDs). 97 * This tells us to go invalidate all the non-loaded ctxs[] 98 * on the next context switch. 99 * 100 * The current ctx was kept up-to-date as it ran and does not 101 * need to be invalidated. 102 */ 103 bool invalidate_other; 104 105 #ifdef CONFIG_ADDRESS_MASKING 106 /* 107 * Active LAM mode. 108 * 109 * X86_CR3_LAM_U57/U48 shifted right by X86_CR3_LAM_U57_BIT or 0 if LAM 110 * disabled. 111 */ 112 u8 lam; 113 #endif 114 115 /* 116 * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate 117 * the corresponding user PCID needs a flush next time we 118 * switch to it; see SWITCH_TO_USER_CR3. 119 */ 120 unsigned short user_pcid_flush_mask; 121 122 /* 123 * Access to this CR4 shadow and to H/W CR4 is protected by 124 * disabling interrupts when modifying either one. 125 */ 126 unsigned long cr4; 127 128 /* 129 * This is a list of all contexts that might exist in the TLB. 130 * There is one per ASID that we use, and the ASID (what the 131 * CPU calls PCID) is the index into ctxts. 132 * 133 * For each context, ctx_id indicates which mm the TLB's user 134 * entries came from. As an invariant, the TLB will never 135 * contain entries that are out-of-date as when that mm reached 136 * the tlb_gen in the list. 137 * 138 * To be clear, this means that it's legal for the TLB code to 139 * flush the TLB without updating tlb_gen. This can happen 140 * (for now, at least) due to paravirt remote flushes. 141 * 142 * NB: context 0 is a bit special, since it's also used by 143 * various bits of init code. This is fine -- code that 144 * isn't aware of PCID will end up harmlessly flushing 145 * context 0. 146 */ 147 struct tlb_context ctxs[TLB_NR_DYN_ASIDS]; 148 }; 149 DECLARE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate); 150 151 struct tlb_state_shared { 152 /* 153 * We can be in one of several states: 154 * 155 * - Actively using an mm. Our CPU's bit will be set in 156 * mm_cpumask(loaded_mm) and is_lazy == false; 157 * 158 * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit 159 * will not be set in mm_cpumask(&init_mm) and is_lazy == false. 160 * 161 * - Lazily using a real mm. loaded_mm != &init_mm, our bit 162 * is set in mm_cpumask(loaded_mm), but is_lazy == true. 163 * We're heuristically guessing that the CR3 load we 164 * skipped more than makes up for the overhead added by 165 * lazy mode. 166 */ 167 bool is_lazy; 168 }; 169 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared); 170 171 bool nmi_uaccess_okay(void); 172 #define nmi_uaccess_okay nmi_uaccess_okay 173 174 /* Initialize cr4 shadow for this CPU. */ 175 static inline void cr4_init_shadow(void) 176 { 177 this_cpu_write(cpu_tlbstate.cr4, __read_cr4()); 178 } 179 180 extern unsigned long mmu_cr4_features; 181 extern u32 *trampoline_cr4_features; 182 183 extern void initialize_tlbstate_and_flush(void); 184 185 /* 186 * TLB flushing: 187 * 188 * - flush_tlb_all() flushes all processes TLBs 189 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 190 * - flush_tlb_page(vma, vmaddr) flushes one page 191 * - flush_tlb_range(vma, start, end) flushes a range of pages 192 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 193 * - flush_tlb_multi(cpumask, info) flushes TLBs on multiple cpus 194 * 195 * ..but the i386 has somewhat limited tlb flushing capabilities, 196 * and page-granular flushes are available only on i486 and up. 197 */ 198 struct flush_tlb_info { 199 /* 200 * We support several kinds of flushes. 201 * 202 * - Fully flush a single mm. .mm will be set, .end will be 203 * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to 204 * which the IPI sender is trying to catch us up. 205 * 206 * - Partially flush a single mm. .mm will be set, .start and 207 * .end will indicate the range, and .new_tlb_gen will be set 208 * such that the changes between generation .new_tlb_gen-1 and 209 * .new_tlb_gen are entirely contained in the indicated range. 210 * 211 * - Fully flush all mms whose tlb_gens have been updated. .mm 212 * will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen 213 * will be zero. 214 */ 215 struct mm_struct *mm; 216 unsigned long start; 217 unsigned long end; 218 u64 new_tlb_gen; 219 unsigned int initiating_cpu; 220 u8 stride_shift; 221 u8 freed_tables; 222 }; 223 224 void flush_tlb_local(void); 225 void flush_tlb_one_user(unsigned long addr); 226 void flush_tlb_one_kernel(unsigned long addr); 227 void flush_tlb_multi(const struct cpumask *cpumask, 228 const struct flush_tlb_info *info); 229 230 #ifdef CONFIG_PARAVIRT 231 #include <asm/paravirt.h> 232 #endif 233 234 #define flush_tlb_mm(mm) \ 235 flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true) 236 237 #define flush_tlb_range(vma, start, end) \ 238 flush_tlb_mm_range((vma)->vm_mm, start, end, \ 239 ((vma)->vm_flags & VM_HUGETLB) \ 240 ? huge_page_shift(hstate_vma(vma)) \ 241 : PAGE_SHIFT, false) 242 243 extern void flush_tlb_all(void); 244 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, 245 unsigned long end, unsigned int stride_shift, 246 bool freed_tables); 247 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 248 249 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) 250 { 251 flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false); 252 } 253 254 static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) 255 { 256 /* 257 * Bump the generation count. This also serves as a full barrier 258 * that synchronizes with switch_mm(): callers are required to order 259 * their read of mm_cpumask after their writes to the paging 260 * structures. 261 */ 262 return atomic64_inc_return(&mm->context.tlb_gen); 263 } 264 265 static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, 266 struct mm_struct *mm) 267 { 268 inc_mm_tlb_gen(mm); 269 cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); 270 } 271 272 extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); 273 274 static inline bool pte_flags_need_flush(unsigned long oldflags, 275 unsigned long newflags, 276 bool ignore_access) 277 { 278 /* 279 * Flags that require a flush when cleared but not when they are set. 280 * Only include flags that would not trigger spurious page-faults. 281 * Non-present entries are not cached. Hardware would set the 282 * dirty/access bit if needed without a fault. 283 */ 284 const pteval_t flush_on_clear = _PAGE_DIRTY | _PAGE_PRESENT | 285 _PAGE_ACCESSED; 286 const pteval_t software_flags = _PAGE_SOFTW1 | _PAGE_SOFTW2 | 287 _PAGE_SOFTW3 | _PAGE_SOFTW4; 288 const pteval_t flush_on_change = _PAGE_RW | _PAGE_USER | _PAGE_PWT | 289 _PAGE_PCD | _PAGE_PSE | _PAGE_GLOBAL | _PAGE_PAT | 290 _PAGE_PAT_LARGE | _PAGE_PKEY_BIT0 | _PAGE_PKEY_BIT1 | 291 _PAGE_PKEY_BIT2 | _PAGE_PKEY_BIT3 | _PAGE_NX; 292 unsigned long diff = oldflags ^ newflags; 293 294 BUILD_BUG_ON(flush_on_clear & software_flags); 295 BUILD_BUG_ON(flush_on_clear & flush_on_change); 296 BUILD_BUG_ON(flush_on_change & software_flags); 297 298 /* Ignore software flags */ 299 diff &= ~software_flags; 300 301 if (ignore_access) 302 diff &= ~_PAGE_ACCESSED; 303 304 /* 305 * Did any of the 'flush_on_clear' flags was clleared set from between 306 * 'oldflags' and 'newflags'? 307 */ 308 if (diff & oldflags & flush_on_clear) 309 return true; 310 311 /* Flush on modified flags. */ 312 if (diff & flush_on_change) 313 return true; 314 315 /* Ensure there are no flags that were left behind */ 316 if (IS_ENABLED(CONFIG_DEBUG_VM) && 317 (diff & ~(flush_on_clear | software_flags | flush_on_change))) { 318 VM_WARN_ON_ONCE(1); 319 return true; 320 } 321 322 return false; 323 } 324 325 /* 326 * pte_needs_flush() checks whether permissions were demoted and require a 327 * flush. It should only be used for userspace PTEs. 328 */ 329 static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte) 330 { 331 /* !PRESENT -> * ; no need for flush */ 332 if (!(pte_flags(oldpte) & _PAGE_PRESENT)) 333 return false; 334 335 /* PFN changed ; needs flush */ 336 if (pte_pfn(oldpte) != pte_pfn(newpte)) 337 return true; 338 339 /* 340 * check PTE flags; ignore access-bit; see comment in 341 * ptep_clear_flush_young(). 342 */ 343 return pte_flags_need_flush(pte_flags(oldpte), pte_flags(newpte), 344 true); 345 } 346 #define pte_needs_flush pte_needs_flush 347 348 /* 349 * huge_pmd_needs_flush() checks whether permissions were demoted and require a 350 * flush. It should only be used for userspace huge PMDs. 351 */ 352 static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd) 353 { 354 /* !PRESENT -> * ; no need for flush */ 355 if (!(pmd_flags(oldpmd) & _PAGE_PRESENT)) 356 return false; 357 358 /* PFN changed ; needs flush */ 359 if (pmd_pfn(oldpmd) != pmd_pfn(newpmd)) 360 return true; 361 362 /* 363 * check PMD flags; do not ignore access-bit; see 364 * pmdp_clear_flush_young(). 365 */ 366 return pte_flags_need_flush(pmd_flags(oldpmd), pmd_flags(newpmd), 367 false); 368 } 369 #define huge_pmd_needs_flush huge_pmd_needs_flush 370 371 #ifdef CONFIG_ADDRESS_MASKING 372 static inline u64 tlbstate_lam_cr3_mask(void) 373 { 374 u64 lam = this_cpu_read(cpu_tlbstate.lam); 375 376 return lam << X86_CR3_LAM_U57_BIT; 377 } 378 379 static inline void set_tlbstate_lam_mode(struct mm_struct *mm) 380 { 381 this_cpu_write(cpu_tlbstate.lam, 382 mm->context.lam_cr3_mask >> X86_CR3_LAM_U57_BIT); 383 } 384 385 #else 386 387 static inline u64 tlbstate_lam_cr3_mask(void) 388 { 389 return 0; 390 } 391 392 static inline void set_tlbstate_lam_mode(struct mm_struct *mm) 393 { 394 } 395 #endif 396 #endif /* !MODULE */ 397 398 static inline void __native_tlb_flush_global(unsigned long cr4) 399 { 400 native_write_cr4(cr4 ^ X86_CR4_PGE); 401 native_write_cr4(cr4); 402 } 403 #endif /* _ASM_X86_TLBFLUSH_H */ 404