1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_TLBFLUSH_H 3 #define _ASM_X86_TLBFLUSH_H 4 5 #include <linux/mm.h> 6 #include <linux/sched.h> 7 8 #include <asm/processor.h> 9 #include <asm/cpufeature.h> 10 #include <asm/special_insns.h> 11 #include <asm/smp.h> 12 #include <asm/invpcid.h> 13 #include <asm/pti.h> 14 #include <asm/processor-flags.h> 15 16 /* 17 * The x86 feature is called PCID (Process Context IDentifier). It is similar 18 * to what is traditionally called ASID on the RISC processors. 19 * 20 * We don't use the traditional ASID implementation, where each process/mm gets 21 * its own ASID and flush/restart when we run out of ASID space. 22 * 23 * Instead we have a small per-cpu array of ASIDs and cache the last few mm's 24 * that came by on this CPU, allowing cheaper switch_mm between processes on 25 * this CPU. 26 * 27 * We end up with different spaces for different things. To avoid confusion we 28 * use different names for each of them: 29 * 30 * ASID - [0, TLB_NR_DYN_ASIDS-1] 31 * the canonical identifier for an mm 32 * 33 * kPCID - [1, TLB_NR_DYN_ASIDS] 34 * the value we write into the PCID part of CR3; corresponds to the 35 * ASID+1, because PCID 0 is special. 36 * 37 * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS] 38 * for KPTI each mm has two address spaces and thus needs two 39 * PCID values, but we can still do with a single ASID denomination 40 * for each mm. Corresponds to kPCID + 2048. 41 * 42 */ 43 44 /* There are 12 bits of space for ASIDS in CR3 */ 45 #define CR3_HW_ASID_BITS 12 46 47 /* 48 * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for 49 * user/kernel switches 50 */ 51 #ifdef CONFIG_PAGE_TABLE_ISOLATION 52 # define PTI_CONSUMED_PCID_BITS 1 53 #else 54 # define PTI_CONSUMED_PCID_BITS 0 55 #endif 56 57 #define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS) 58 59 /* 60 * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account 61 * for them being zero-based. Another -1 is because PCID 0 is reserved for 62 * use by non-PCID-aware users. 63 */ 64 #define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2) 65 66 /* 67 * 6 because 6 should be plenty and struct tlb_state will fit in two cache 68 * lines. 69 */ 70 #define TLB_NR_DYN_ASIDS 6 71 72 /* 73 * Given @asid, compute kPCID 74 */ 75 static inline u16 kern_pcid(u16 asid) 76 { 77 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); 78 79 #ifdef CONFIG_PAGE_TABLE_ISOLATION 80 /* 81 * Make sure that the dynamic ASID space does not confict with the 82 * bit we are using to switch between user and kernel ASIDs. 83 */ 84 BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT)); 85 86 /* 87 * The ASID being passed in here should have respected the 88 * MAX_ASID_AVAILABLE and thus never have the switch bit set. 89 */ 90 VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT)); 91 #endif 92 /* 93 * The dynamically-assigned ASIDs that get passed in are small 94 * (<TLB_NR_DYN_ASIDS). They never have the high switch bit set, 95 * so do not bother to clear it. 96 * 97 * If PCID is on, ASID-aware code paths put the ASID+1 into the 98 * PCID bits. This serves two purposes. It prevents a nasty 99 * situation in which PCID-unaware code saves CR3, loads some other 100 * value (with PCID == 0), and then restores CR3, thus corrupting 101 * the TLB for ASID 0 if the saved ASID was nonzero. It also means 102 * that any bugs involving loading a PCID-enabled CR3 with 103 * CR4.PCIDE off will trigger deterministically. 104 */ 105 return asid + 1; 106 } 107 108 /* 109 * Given @asid, compute uPCID 110 */ 111 static inline u16 user_pcid(u16 asid) 112 { 113 u16 ret = kern_pcid(asid); 114 #ifdef CONFIG_PAGE_TABLE_ISOLATION 115 ret |= 1 << X86_CR3_PTI_PCID_USER_BIT; 116 #endif 117 return ret; 118 } 119 120 struct pgd_t; 121 static inline unsigned long build_cr3(pgd_t *pgd, u16 asid) 122 { 123 if (static_cpu_has(X86_FEATURE_PCID)) { 124 return __sme_pa(pgd) | kern_pcid(asid); 125 } else { 126 VM_WARN_ON_ONCE(asid != 0); 127 return __sme_pa(pgd); 128 } 129 } 130 131 static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid) 132 { 133 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); 134 /* 135 * Use boot_cpu_has() instead of this_cpu_has() as this function 136 * might be called during early boot. This should work even after 137 * boot because all CPU's the have same capabilities: 138 */ 139 VM_WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_PCID)); 140 return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH; 141 } 142 143 #ifdef CONFIG_PARAVIRT 144 #include <asm/paravirt.h> 145 #else 146 #define __flush_tlb() __native_flush_tlb() 147 #define __flush_tlb_global() __native_flush_tlb_global() 148 #define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr) 149 #endif 150 151 struct tlb_context { 152 u64 ctx_id; 153 u64 tlb_gen; 154 }; 155 156 struct tlb_state { 157 /* 158 * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts 159 * are on. This means that it may not match current->active_mm, 160 * which will contain the previous user mm when we're in lazy TLB 161 * mode even if we've already switched back to swapper_pg_dir. 162 * 163 * During switch_mm_irqs_off(), loaded_mm will be set to 164 * LOADED_MM_SWITCHING during the brief interrupts-off window 165 * when CR3 and loaded_mm would otherwise be inconsistent. This 166 * is for nmi_uaccess_okay()'s benefit. 167 */ 168 struct mm_struct *loaded_mm; 169 170 #define LOADED_MM_SWITCHING ((struct mm_struct *)1UL) 171 172 /* Last user mm for optimizing IBPB */ 173 union { 174 struct mm_struct *last_user_mm; 175 unsigned long last_user_mm_ibpb; 176 }; 177 178 u16 loaded_mm_asid; 179 u16 next_asid; 180 181 /* 182 * We can be in one of several states: 183 * 184 * - Actively using an mm. Our CPU's bit will be set in 185 * mm_cpumask(loaded_mm) and is_lazy == false; 186 * 187 * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit 188 * will not be set in mm_cpumask(&init_mm) and is_lazy == false. 189 * 190 * - Lazily using a real mm. loaded_mm != &init_mm, our bit 191 * is set in mm_cpumask(loaded_mm), but is_lazy == true. 192 * We're heuristically guessing that the CR3 load we 193 * skipped more than makes up for the overhead added by 194 * lazy mode. 195 */ 196 bool is_lazy; 197 198 /* 199 * If set we changed the page tables in such a way that we 200 * needed an invalidation of all contexts (aka. PCIDs / ASIDs). 201 * This tells us to go invalidate all the non-loaded ctxs[] 202 * on the next context switch. 203 * 204 * The current ctx was kept up-to-date as it ran and does not 205 * need to be invalidated. 206 */ 207 bool invalidate_other; 208 209 /* 210 * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate 211 * the corresponding user PCID needs a flush next time we 212 * switch to it; see SWITCH_TO_USER_CR3. 213 */ 214 unsigned short user_pcid_flush_mask; 215 216 /* 217 * Access to this CR4 shadow and to H/W CR4 is protected by 218 * disabling interrupts when modifying either one. 219 */ 220 unsigned long cr4; 221 222 /* 223 * This is a list of all contexts that might exist in the TLB. 224 * There is one per ASID that we use, and the ASID (what the 225 * CPU calls PCID) is the index into ctxts. 226 * 227 * For each context, ctx_id indicates which mm the TLB's user 228 * entries came from. As an invariant, the TLB will never 229 * contain entries that are out-of-date as when that mm reached 230 * the tlb_gen in the list. 231 * 232 * To be clear, this means that it's legal for the TLB code to 233 * flush the TLB without updating tlb_gen. This can happen 234 * (for now, at least) due to paravirt remote flushes. 235 * 236 * NB: context 0 is a bit special, since it's also used by 237 * various bits of init code. This is fine -- code that 238 * isn't aware of PCID will end up harmlessly flushing 239 * context 0. 240 */ 241 struct tlb_context ctxs[TLB_NR_DYN_ASIDS]; 242 }; 243 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); 244 245 /* 246 * Blindly accessing user memory from NMI context can be dangerous 247 * if we're in the middle of switching the current user task or 248 * switching the loaded mm. It can also be dangerous if we 249 * interrupted some kernel code that was temporarily using a 250 * different mm. 251 */ 252 static inline bool nmi_uaccess_okay(void) 253 { 254 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); 255 struct mm_struct *current_mm = current->mm; 256 257 VM_WARN_ON_ONCE(!loaded_mm); 258 259 /* 260 * The condition we want to check is 261 * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though, 262 * if we're running in a VM with shadow paging, and nmi_uaccess_okay() 263 * is supposed to be reasonably fast. 264 * 265 * Instead, we check the almost equivalent but somewhat conservative 266 * condition below, and we rely on the fact that switch_mm_irqs_off() 267 * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3. 268 */ 269 if (loaded_mm != current_mm) 270 return false; 271 272 VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa())); 273 274 return true; 275 } 276 277 #define nmi_uaccess_okay nmi_uaccess_okay 278 279 /* Initialize cr4 shadow for this CPU. */ 280 static inline void cr4_init_shadow(void) 281 { 282 this_cpu_write(cpu_tlbstate.cr4, __read_cr4()); 283 } 284 285 static inline void __cr4_set(unsigned long cr4) 286 { 287 lockdep_assert_irqs_disabled(); 288 this_cpu_write(cpu_tlbstate.cr4, cr4); 289 __write_cr4(cr4); 290 } 291 292 /* Set in this cpu's CR4. */ 293 static inline void cr4_set_bits_irqsoff(unsigned long mask) 294 { 295 unsigned long cr4; 296 297 cr4 = this_cpu_read(cpu_tlbstate.cr4); 298 if ((cr4 | mask) != cr4) 299 __cr4_set(cr4 | mask); 300 } 301 302 /* Clear in this cpu's CR4. */ 303 static inline void cr4_clear_bits_irqsoff(unsigned long mask) 304 { 305 unsigned long cr4; 306 307 cr4 = this_cpu_read(cpu_tlbstate.cr4); 308 if ((cr4 & ~mask) != cr4) 309 __cr4_set(cr4 & ~mask); 310 } 311 312 /* Set in this cpu's CR4. */ 313 static inline void cr4_set_bits(unsigned long mask) 314 { 315 unsigned long flags; 316 317 local_irq_save(flags); 318 cr4_set_bits_irqsoff(mask); 319 local_irq_restore(flags); 320 } 321 322 /* Clear in this cpu's CR4. */ 323 static inline void cr4_clear_bits(unsigned long mask) 324 { 325 unsigned long flags; 326 327 local_irq_save(flags); 328 cr4_clear_bits_irqsoff(mask); 329 local_irq_restore(flags); 330 } 331 332 static inline void cr4_toggle_bits_irqsoff(unsigned long mask) 333 { 334 unsigned long cr4; 335 336 cr4 = this_cpu_read(cpu_tlbstate.cr4); 337 __cr4_set(cr4 ^ mask); 338 } 339 340 /* Read the CR4 shadow. */ 341 static inline unsigned long cr4_read_shadow(void) 342 { 343 return this_cpu_read(cpu_tlbstate.cr4); 344 } 345 346 /* 347 * Mark all other ASIDs as invalid, preserves the current. 348 */ 349 static inline void invalidate_other_asid(void) 350 { 351 this_cpu_write(cpu_tlbstate.invalidate_other, true); 352 } 353 354 /* 355 * Save some of cr4 feature set we're using (e.g. Pentium 4MB 356 * enable and PPro Global page enable), so that any CPU's that boot 357 * up after us can get the correct flags. This should only be used 358 * during boot on the boot cpu. 359 */ 360 extern unsigned long mmu_cr4_features; 361 extern u32 *trampoline_cr4_features; 362 363 static inline void cr4_set_bits_and_update_boot(unsigned long mask) 364 { 365 mmu_cr4_features |= mask; 366 if (trampoline_cr4_features) 367 *trampoline_cr4_features = mmu_cr4_features; 368 cr4_set_bits(mask); 369 } 370 371 extern void initialize_tlbstate_and_flush(void); 372 373 /* 374 * Given an ASID, flush the corresponding user ASID. We can delay this 375 * until the next time we switch to it. 376 * 377 * See SWITCH_TO_USER_CR3. 378 */ 379 static inline void invalidate_user_asid(u16 asid) 380 { 381 /* There is no user ASID if address space separation is off */ 382 if (!IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) 383 return; 384 385 /* 386 * We only have a single ASID if PCID is off and the CR3 387 * write will have flushed it. 388 */ 389 if (!cpu_feature_enabled(X86_FEATURE_PCID)) 390 return; 391 392 if (!static_cpu_has(X86_FEATURE_PTI)) 393 return; 394 395 __set_bit(kern_pcid(asid), 396 (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask)); 397 } 398 399 /* 400 * flush the entire current user mapping 401 */ 402 static inline void __native_flush_tlb(void) 403 { 404 /* 405 * Preemption or interrupts must be disabled to protect the access 406 * to the per CPU variable and to prevent being preempted between 407 * read_cr3() and write_cr3(). 408 */ 409 WARN_ON_ONCE(preemptible()); 410 411 invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid)); 412 413 /* If current->mm == NULL then the read_cr3() "borrows" an mm */ 414 native_write_cr3(__native_read_cr3()); 415 } 416 417 /* 418 * flush everything 419 */ 420 static inline void __native_flush_tlb_global(void) 421 { 422 unsigned long cr4, flags; 423 424 if (static_cpu_has(X86_FEATURE_INVPCID)) { 425 /* 426 * Using INVPCID is considerably faster than a pair of writes 427 * to CR4 sandwiched inside an IRQ flag save/restore. 428 * 429 * Note, this works with CR4.PCIDE=0 or 1. 430 */ 431 invpcid_flush_all(); 432 return; 433 } 434 435 /* 436 * Read-modify-write to CR4 - protect it from preemption and 437 * from interrupts. (Use the raw variant because this code can 438 * be called from deep inside debugging code.) 439 */ 440 raw_local_irq_save(flags); 441 442 cr4 = this_cpu_read(cpu_tlbstate.cr4); 443 /* toggle PGE */ 444 native_write_cr4(cr4 ^ X86_CR4_PGE); 445 /* write old PGE again and flush TLBs */ 446 native_write_cr4(cr4); 447 448 raw_local_irq_restore(flags); 449 } 450 451 /* 452 * flush one page in the user mapping 453 */ 454 static inline void __native_flush_tlb_one_user(unsigned long addr) 455 { 456 u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); 457 458 asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); 459 460 if (!static_cpu_has(X86_FEATURE_PTI)) 461 return; 462 463 /* 464 * Some platforms #GP if we call invpcid(type=1/2) before CR4.PCIDE=1. 465 * Just use invalidate_user_asid() in case we are called early. 466 */ 467 if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE)) 468 invalidate_user_asid(loaded_mm_asid); 469 else 470 invpcid_flush_one(user_pcid(loaded_mm_asid), addr); 471 } 472 473 /* 474 * flush everything 475 */ 476 static inline void __flush_tlb_all(void) 477 { 478 /* 479 * This is to catch users with enabled preemption and the PGE feature 480 * and don't trigger the warning in __native_flush_tlb(). 481 */ 482 VM_WARN_ON_ONCE(preemptible()); 483 484 if (boot_cpu_has(X86_FEATURE_PGE)) { 485 __flush_tlb_global(); 486 } else { 487 /* 488 * !PGE -> !PCID (setup_pcid()), thus every flush is total. 489 */ 490 __flush_tlb(); 491 } 492 } 493 494 /* 495 * flush one page in the kernel mapping 496 */ 497 static inline void __flush_tlb_one_kernel(unsigned long addr) 498 { 499 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); 500 501 /* 502 * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its 503 * paravirt equivalent. Even with PCID, this is sufficient: we only 504 * use PCID if we also use global PTEs for the kernel mapping, and 505 * INVLPG flushes global translations across all address spaces. 506 * 507 * If PTI is on, then the kernel is mapped with non-global PTEs, and 508 * __flush_tlb_one_user() will flush the given address for the current 509 * kernel address space and for its usermode counterpart, but it does 510 * not flush it for other address spaces. 511 */ 512 __flush_tlb_one_user(addr); 513 514 if (!static_cpu_has(X86_FEATURE_PTI)) 515 return; 516 517 /* 518 * See above. We need to propagate the flush to all other address 519 * spaces. In principle, we only need to propagate it to kernelmode 520 * address spaces, but the extra bookkeeping we would need is not 521 * worth it. 522 */ 523 invalidate_other_asid(); 524 } 525 526 #define TLB_FLUSH_ALL -1UL 527 528 /* 529 * TLB flushing: 530 * 531 * - flush_tlb_all() flushes all processes TLBs 532 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 533 * - flush_tlb_page(vma, vmaddr) flushes one page 534 * - flush_tlb_range(vma, start, end) flushes a range of pages 535 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 536 * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus 537 * 538 * ..but the i386 has somewhat limited tlb flushing capabilities, 539 * and page-granular flushes are available only on i486 and up. 540 */ 541 struct flush_tlb_info { 542 /* 543 * We support several kinds of flushes. 544 * 545 * - Fully flush a single mm. .mm will be set, .end will be 546 * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to 547 * which the IPI sender is trying to catch us up. 548 * 549 * - Partially flush a single mm. .mm will be set, .start and 550 * .end will indicate the range, and .new_tlb_gen will be set 551 * such that the changes between generation .new_tlb_gen-1 and 552 * .new_tlb_gen are entirely contained in the indicated range. 553 * 554 * - Fully flush all mms whose tlb_gens have been updated. .mm 555 * will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen 556 * will be zero. 557 */ 558 struct mm_struct *mm; 559 unsigned long start; 560 unsigned long end; 561 u64 new_tlb_gen; 562 unsigned int stride_shift; 563 bool freed_tables; 564 }; 565 566 #define local_flush_tlb() __flush_tlb() 567 568 #define flush_tlb_mm(mm) \ 569 flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true) 570 571 #define flush_tlb_range(vma, start, end) \ 572 flush_tlb_mm_range((vma)->vm_mm, start, end, \ 573 ((vma)->vm_flags & VM_HUGETLB) \ 574 ? huge_page_shift(hstate_vma(vma)) \ 575 : PAGE_SHIFT, false) 576 577 extern void flush_tlb_all(void); 578 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, 579 unsigned long end, unsigned int stride_shift, 580 bool freed_tables); 581 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 582 583 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) 584 { 585 flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false); 586 } 587 588 void native_flush_tlb_others(const struct cpumask *cpumask, 589 const struct flush_tlb_info *info); 590 591 static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) 592 { 593 /* 594 * Bump the generation count. This also serves as a full barrier 595 * that synchronizes with switch_mm(): callers are required to order 596 * their read of mm_cpumask after their writes to the paging 597 * structures. 598 */ 599 return atomic64_inc_return(&mm->context.tlb_gen); 600 } 601 602 static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, 603 struct mm_struct *mm) 604 { 605 inc_mm_tlb_gen(mm); 606 cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); 607 } 608 609 extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); 610 611 #ifndef CONFIG_PARAVIRT 612 #define flush_tlb_others(mask, info) \ 613 native_flush_tlb_others(mask, info) 614 615 #define paravirt_tlb_remove_table(tlb, page) \ 616 tlb_remove_page(tlb, (void *)(page)) 617 #endif 618 619 #endif /* _ASM_X86_TLBFLUSH_H */ 620