1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_TLBFLUSH_H 3 #define _ASM_X86_TLBFLUSH_H 4 5 #include <linux/mm.h> 6 #include <linux/sched.h> 7 8 #include <asm/processor.h> 9 #include <asm/cpufeature.h> 10 #include <asm/special_insns.h> 11 #include <asm/smp.h> 12 #include <asm/invpcid.h> 13 #include <asm/pti.h> 14 #include <asm/processor-flags.h> 15 16 /* 17 * The x86 feature is called PCID (Process Context IDentifier). It is similar 18 * to what is traditionally called ASID on the RISC processors. 19 * 20 * We don't use the traditional ASID implementation, where each process/mm gets 21 * its own ASID and flush/restart when we run out of ASID space. 22 * 23 * Instead we have a small per-cpu array of ASIDs and cache the last few mm's 24 * that came by on this CPU, allowing cheaper switch_mm between processes on 25 * this CPU. 26 * 27 * We end up with different spaces for different things. To avoid confusion we 28 * use different names for each of them: 29 * 30 * ASID - [0, TLB_NR_DYN_ASIDS-1] 31 * the canonical identifier for an mm 32 * 33 * kPCID - [1, TLB_NR_DYN_ASIDS] 34 * the value we write into the PCID part of CR3; corresponds to the 35 * ASID+1, because PCID 0 is special. 36 * 37 * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS] 38 * for KPTI each mm has two address spaces and thus needs two 39 * PCID values, but we can still do with a single ASID denomination 40 * for each mm. Corresponds to kPCID + 2048. 41 * 42 */ 43 44 /* There are 12 bits of space for ASIDS in CR3 */ 45 #define CR3_HW_ASID_BITS 12 46 47 /* 48 * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for 49 * user/kernel switches 50 */ 51 #ifdef CONFIG_PAGE_TABLE_ISOLATION 52 # define PTI_CONSUMED_PCID_BITS 1 53 #else 54 # define PTI_CONSUMED_PCID_BITS 0 55 #endif 56 57 #define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS) 58 59 /* 60 * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account 61 * for them being zero-based. Another -1 is because PCID 0 is reserved for 62 * use by non-PCID-aware users. 63 */ 64 #define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2) 65 66 /* 67 * 6 because 6 should be plenty and struct tlb_state will fit in two cache 68 * lines. 69 */ 70 #define TLB_NR_DYN_ASIDS 6 71 72 /* 73 * Given @asid, compute kPCID 74 */ 75 static inline u16 kern_pcid(u16 asid) 76 { 77 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); 78 79 #ifdef CONFIG_PAGE_TABLE_ISOLATION 80 /* 81 * Make sure that the dynamic ASID space does not confict with the 82 * bit we are using to switch between user and kernel ASIDs. 83 */ 84 BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT)); 85 86 /* 87 * The ASID being passed in here should have respected the 88 * MAX_ASID_AVAILABLE and thus never have the switch bit set. 89 */ 90 VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT)); 91 #endif 92 /* 93 * The dynamically-assigned ASIDs that get passed in are small 94 * (<TLB_NR_DYN_ASIDS). They never have the high switch bit set, 95 * so do not bother to clear it. 96 * 97 * If PCID is on, ASID-aware code paths put the ASID+1 into the 98 * PCID bits. This serves two purposes. It prevents a nasty 99 * situation in which PCID-unaware code saves CR3, loads some other 100 * value (with PCID == 0), and then restores CR3, thus corrupting 101 * the TLB for ASID 0 if the saved ASID was nonzero. It also means 102 * that any bugs involving loading a PCID-enabled CR3 with 103 * CR4.PCIDE off will trigger deterministically. 104 */ 105 return asid + 1; 106 } 107 108 /* 109 * Given @asid, compute uPCID 110 */ 111 static inline u16 user_pcid(u16 asid) 112 { 113 u16 ret = kern_pcid(asid); 114 #ifdef CONFIG_PAGE_TABLE_ISOLATION 115 ret |= 1 << X86_CR3_PTI_PCID_USER_BIT; 116 #endif 117 return ret; 118 } 119 120 struct pgd_t; 121 static inline unsigned long build_cr3(pgd_t *pgd, u16 asid) 122 { 123 if (static_cpu_has(X86_FEATURE_PCID)) { 124 return __sme_pa(pgd) | kern_pcid(asid); 125 } else { 126 VM_WARN_ON_ONCE(asid != 0); 127 return __sme_pa(pgd); 128 } 129 } 130 131 static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid) 132 { 133 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); 134 /* 135 * Use boot_cpu_has() instead of this_cpu_has() as this function 136 * might be called during early boot. This should work even after 137 * boot because all CPU's the have same capabilities: 138 */ 139 VM_WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_PCID)); 140 return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH; 141 } 142 143 #ifdef CONFIG_PARAVIRT 144 #include <asm/paravirt.h> 145 #else 146 #define __flush_tlb() __native_flush_tlb() 147 #define __flush_tlb_global() __native_flush_tlb_global() 148 #define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr) 149 #endif 150 151 static inline bool tlb_defer_switch_to_init_mm(void) 152 { 153 /* 154 * If we have PCID, then switching to init_mm is reasonably 155 * fast. If we don't have PCID, then switching to init_mm is 156 * quite slow, so we try to defer it in the hopes that we can 157 * avoid it entirely. The latter approach runs the risk of 158 * receiving otherwise unnecessary IPIs. 159 * 160 * This choice is just a heuristic. The tlb code can handle this 161 * function returning true or false regardless of whether we have 162 * PCID. 163 */ 164 return !static_cpu_has(X86_FEATURE_PCID); 165 } 166 167 struct tlb_context { 168 u64 ctx_id; 169 u64 tlb_gen; 170 }; 171 172 struct tlb_state { 173 /* 174 * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts 175 * are on. This means that it may not match current->active_mm, 176 * which will contain the previous user mm when we're in lazy TLB 177 * mode even if we've already switched back to swapper_pg_dir. 178 */ 179 struct mm_struct *loaded_mm; 180 u16 loaded_mm_asid; 181 u16 next_asid; 182 /* last user mm's ctx id */ 183 u64 last_ctx_id; 184 185 /* 186 * We can be in one of several states: 187 * 188 * - Actively using an mm. Our CPU's bit will be set in 189 * mm_cpumask(loaded_mm) and is_lazy == false; 190 * 191 * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit 192 * will not be set in mm_cpumask(&init_mm) and is_lazy == false. 193 * 194 * - Lazily using a real mm. loaded_mm != &init_mm, our bit 195 * is set in mm_cpumask(loaded_mm), but is_lazy == true. 196 * We're heuristically guessing that the CR3 load we 197 * skipped more than makes up for the overhead added by 198 * lazy mode. 199 */ 200 bool is_lazy; 201 202 /* 203 * If set we changed the page tables in such a way that we 204 * needed an invalidation of all contexts (aka. PCIDs / ASIDs). 205 * This tells us to go invalidate all the non-loaded ctxs[] 206 * on the next context switch. 207 * 208 * The current ctx was kept up-to-date as it ran and does not 209 * need to be invalidated. 210 */ 211 bool invalidate_other; 212 213 /* 214 * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate 215 * the corresponding user PCID needs a flush next time we 216 * switch to it; see SWITCH_TO_USER_CR3. 217 */ 218 unsigned short user_pcid_flush_mask; 219 220 /* 221 * Access to this CR4 shadow and to H/W CR4 is protected by 222 * disabling interrupts when modifying either one. 223 */ 224 unsigned long cr4; 225 226 /* 227 * This is a list of all contexts that might exist in the TLB. 228 * There is one per ASID that we use, and the ASID (what the 229 * CPU calls PCID) is the index into ctxts. 230 * 231 * For each context, ctx_id indicates which mm the TLB's user 232 * entries came from. As an invariant, the TLB will never 233 * contain entries that are out-of-date as when that mm reached 234 * the tlb_gen in the list. 235 * 236 * To be clear, this means that it's legal for the TLB code to 237 * flush the TLB without updating tlb_gen. This can happen 238 * (for now, at least) due to paravirt remote flushes. 239 * 240 * NB: context 0 is a bit special, since it's also used by 241 * various bits of init code. This is fine -- code that 242 * isn't aware of PCID will end up harmlessly flushing 243 * context 0. 244 */ 245 struct tlb_context ctxs[TLB_NR_DYN_ASIDS]; 246 }; 247 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); 248 249 /* Initialize cr4 shadow for this CPU. */ 250 static inline void cr4_init_shadow(void) 251 { 252 this_cpu_write(cpu_tlbstate.cr4, __read_cr4()); 253 } 254 255 static inline void __cr4_set(unsigned long cr4) 256 { 257 lockdep_assert_irqs_disabled(); 258 this_cpu_write(cpu_tlbstate.cr4, cr4); 259 __write_cr4(cr4); 260 } 261 262 /* Set in this cpu's CR4. */ 263 static inline void cr4_set_bits(unsigned long mask) 264 { 265 unsigned long cr4, flags; 266 267 local_irq_save(flags); 268 cr4 = this_cpu_read(cpu_tlbstate.cr4); 269 if ((cr4 | mask) != cr4) 270 __cr4_set(cr4 | mask); 271 local_irq_restore(flags); 272 } 273 274 /* Clear in this cpu's CR4. */ 275 static inline void cr4_clear_bits(unsigned long mask) 276 { 277 unsigned long cr4, flags; 278 279 local_irq_save(flags); 280 cr4 = this_cpu_read(cpu_tlbstate.cr4); 281 if ((cr4 & ~mask) != cr4) 282 __cr4_set(cr4 & ~mask); 283 local_irq_restore(flags); 284 } 285 286 static inline void cr4_toggle_bits_irqsoff(unsigned long mask) 287 { 288 unsigned long cr4; 289 290 cr4 = this_cpu_read(cpu_tlbstate.cr4); 291 __cr4_set(cr4 ^ mask); 292 } 293 294 /* Read the CR4 shadow. */ 295 static inline unsigned long cr4_read_shadow(void) 296 { 297 return this_cpu_read(cpu_tlbstate.cr4); 298 } 299 300 /* 301 * Mark all other ASIDs as invalid, preserves the current. 302 */ 303 static inline void invalidate_other_asid(void) 304 { 305 this_cpu_write(cpu_tlbstate.invalidate_other, true); 306 } 307 308 /* 309 * Save some of cr4 feature set we're using (e.g. Pentium 4MB 310 * enable and PPro Global page enable), so that any CPU's that boot 311 * up after us can get the correct flags. This should only be used 312 * during boot on the boot cpu. 313 */ 314 extern unsigned long mmu_cr4_features; 315 extern u32 *trampoline_cr4_features; 316 317 static inline void cr4_set_bits_and_update_boot(unsigned long mask) 318 { 319 mmu_cr4_features |= mask; 320 if (trampoline_cr4_features) 321 *trampoline_cr4_features = mmu_cr4_features; 322 cr4_set_bits(mask); 323 } 324 325 extern void initialize_tlbstate_and_flush(void); 326 327 /* 328 * Given an ASID, flush the corresponding user ASID. We can delay this 329 * until the next time we switch to it. 330 * 331 * See SWITCH_TO_USER_CR3. 332 */ 333 static inline void invalidate_user_asid(u16 asid) 334 { 335 /* There is no user ASID if address space separation is off */ 336 if (!IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) 337 return; 338 339 /* 340 * We only have a single ASID if PCID is off and the CR3 341 * write will have flushed it. 342 */ 343 if (!cpu_feature_enabled(X86_FEATURE_PCID)) 344 return; 345 346 if (!static_cpu_has(X86_FEATURE_PTI)) 347 return; 348 349 __set_bit(kern_pcid(asid), 350 (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask)); 351 } 352 353 /* 354 * flush the entire current user mapping 355 */ 356 static inline void __native_flush_tlb(void) 357 { 358 /* 359 * Preemption or interrupts must be disabled to protect the access 360 * to the per CPU variable and to prevent being preempted between 361 * read_cr3() and write_cr3(). 362 */ 363 WARN_ON_ONCE(preemptible()); 364 365 invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid)); 366 367 /* If current->mm == NULL then the read_cr3() "borrows" an mm */ 368 native_write_cr3(__native_read_cr3()); 369 } 370 371 /* 372 * flush everything 373 */ 374 static inline void __native_flush_tlb_global(void) 375 { 376 unsigned long cr4, flags; 377 378 if (static_cpu_has(X86_FEATURE_INVPCID)) { 379 /* 380 * Using INVPCID is considerably faster than a pair of writes 381 * to CR4 sandwiched inside an IRQ flag save/restore. 382 * 383 * Note, this works with CR4.PCIDE=0 or 1. 384 */ 385 invpcid_flush_all(); 386 return; 387 } 388 389 /* 390 * Read-modify-write to CR4 - protect it from preemption and 391 * from interrupts. (Use the raw variant because this code can 392 * be called from deep inside debugging code.) 393 */ 394 raw_local_irq_save(flags); 395 396 cr4 = this_cpu_read(cpu_tlbstate.cr4); 397 /* toggle PGE */ 398 native_write_cr4(cr4 ^ X86_CR4_PGE); 399 /* write old PGE again and flush TLBs */ 400 native_write_cr4(cr4); 401 402 raw_local_irq_restore(flags); 403 } 404 405 /* 406 * flush one page in the user mapping 407 */ 408 static inline void __native_flush_tlb_one_user(unsigned long addr) 409 { 410 u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); 411 412 asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); 413 414 if (!static_cpu_has(X86_FEATURE_PTI)) 415 return; 416 417 /* 418 * Some platforms #GP if we call invpcid(type=1/2) before CR4.PCIDE=1. 419 * Just use invalidate_user_asid() in case we are called early. 420 */ 421 if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE)) 422 invalidate_user_asid(loaded_mm_asid); 423 else 424 invpcid_flush_one(user_pcid(loaded_mm_asid), addr); 425 } 426 427 /* 428 * flush everything 429 */ 430 static inline void __flush_tlb_all(void) 431 { 432 if (boot_cpu_has(X86_FEATURE_PGE)) { 433 __flush_tlb_global(); 434 } else { 435 /* 436 * !PGE -> !PCID (setup_pcid()), thus every flush is total. 437 */ 438 __flush_tlb(); 439 } 440 } 441 442 /* 443 * flush one page in the kernel mapping 444 */ 445 static inline void __flush_tlb_one_kernel(unsigned long addr) 446 { 447 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); 448 449 /* 450 * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its 451 * paravirt equivalent. Even with PCID, this is sufficient: we only 452 * use PCID if we also use global PTEs for the kernel mapping, and 453 * INVLPG flushes global translations across all address spaces. 454 * 455 * If PTI is on, then the kernel is mapped with non-global PTEs, and 456 * __flush_tlb_one_user() will flush the given address for the current 457 * kernel address space and for its usermode counterpart, but it does 458 * not flush it for other address spaces. 459 */ 460 __flush_tlb_one_user(addr); 461 462 if (!static_cpu_has(X86_FEATURE_PTI)) 463 return; 464 465 /* 466 * See above. We need to propagate the flush to all other address 467 * spaces. In principle, we only need to propagate it to kernelmode 468 * address spaces, but the extra bookkeeping we would need is not 469 * worth it. 470 */ 471 invalidate_other_asid(); 472 } 473 474 #define TLB_FLUSH_ALL -1UL 475 476 /* 477 * TLB flushing: 478 * 479 * - flush_tlb_all() flushes all processes TLBs 480 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 481 * - flush_tlb_page(vma, vmaddr) flushes one page 482 * - flush_tlb_range(vma, start, end) flushes a range of pages 483 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 484 * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus 485 * 486 * ..but the i386 has somewhat limited tlb flushing capabilities, 487 * and page-granular flushes are available only on i486 and up. 488 */ 489 struct flush_tlb_info { 490 /* 491 * We support several kinds of flushes. 492 * 493 * - Fully flush a single mm. .mm will be set, .end will be 494 * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to 495 * which the IPI sender is trying to catch us up. 496 * 497 * - Partially flush a single mm. .mm will be set, .start and 498 * .end will indicate the range, and .new_tlb_gen will be set 499 * such that the changes between generation .new_tlb_gen-1 and 500 * .new_tlb_gen are entirely contained in the indicated range. 501 * 502 * - Fully flush all mms whose tlb_gens have been updated. .mm 503 * will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen 504 * will be zero. 505 */ 506 struct mm_struct *mm; 507 unsigned long start; 508 unsigned long end; 509 u64 new_tlb_gen; 510 }; 511 512 #define local_flush_tlb() __flush_tlb() 513 514 #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL) 515 516 #define flush_tlb_range(vma, start, end) \ 517 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags) 518 519 extern void flush_tlb_all(void); 520 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, 521 unsigned long end, unsigned long vmflag); 522 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 523 524 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) 525 { 526 flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE); 527 } 528 529 void native_flush_tlb_others(const struct cpumask *cpumask, 530 const struct flush_tlb_info *info); 531 532 static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) 533 { 534 /* 535 * Bump the generation count. This also serves as a full barrier 536 * that synchronizes with switch_mm(): callers are required to order 537 * their read of mm_cpumask after their writes to the paging 538 * structures. 539 */ 540 return atomic64_inc_return(&mm->context.tlb_gen); 541 } 542 543 static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, 544 struct mm_struct *mm) 545 { 546 inc_mm_tlb_gen(mm); 547 cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); 548 } 549 550 extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); 551 552 #ifndef CONFIG_PARAVIRT 553 #define flush_tlb_others(mask, info) \ 554 native_flush_tlb_others(mask, info) 555 #endif 556 557 #endif /* _ASM_X86_TLBFLUSH_H */ 558