xref: /openbmc/linux/arch/x86/include/asm/tlbflush.h (revision 9b93eb47)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_TLBFLUSH_H
3 #define _ASM_X86_TLBFLUSH_H
4 
5 #include <linux/mm.h>
6 #include <linux/sched.h>
7 
8 #include <asm/processor.h>
9 #include <asm/cpufeature.h>
10 #include <asm/special_insns.h>
11 #include <asm/smp.h>
12 #include <asm/invpcid.h>
13 #include <asm/pti.h>
14 #include <asm/processor-flags.h>
15 
16 /*
17  * The x86 feature is called PCID (Process Context IDentifier). It is similar
18  * to what is traditionally called ASID on the RISC processors.
19  *
20  * We don't use the traditional ASID implementation, where each process/mm gets
21  * its own ASID and flush/restart when we run out of ASID space.
22  *
23  * Instead we have a small per-cpu array of ASIDs and cache the last few mm's
24  * that came by on this CPU, allowing cheaper switch_mm between processes on
25  * this CPU.
26  *
27  * We end up with different spaces for different things. To avoid confusion we
28  * use different names for each of them:
29  *
30  * ASID  - [0, TLB_NR_DYN_ASIDS-1]
31  *         the canonical identifier for an mm
32  *
33  * kPCID - [1, TLB_NR_DYN_ASIDS]
34  *         the value we write into the PCID part of CR3; corresponds to the
35  *         ASID+1, because PCID 0 is special.
36  *
37  * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS]
38  *         for KPTI each mm has two address spaces and thus needs two
39  *         PCID values, but we can still do with a single ASID denomination
40  *         for each mm. Corresponds to kPCID + 2048.
41  *
42  */
43 
44 /* There are 12 bits of space for ASIDS in CR3 */
45 #define CR3_HW_ASID_BITS		12
46 
47 /*
48  * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for
49  * user/kernel switches
50  */
51 #ifdef CONFIG_PAGE_TABLE_ISOLATION
52 # define PTI_CONSUMED_PCID_BITS	1
53 #else
54 # define PTI_CONSUMED_PCID_BITS	0
55 #endif
56 
57 #define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS)
58 
59 /*
60  * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid.  -1 below to account
61  * for them being zero-based.  Another -1 is because PCID 0 is reserved for
62  * use by non-PCID-aware users.
63  */
64 #define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2)
65 
66 /*
67  * 6 because 6 should be plenty and struct tlb_state will fit in two cache
68  * lines.
69  */
70 #define TLB_NR_DYN_ASIDS	6
71 
72 /*
73  * Given @asid, compute kPCID
74  */
75 static inline u16 kern_pcid(u16 asid)
76 {
77 	VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
78 
79 #ifdef CONFIG_PAGE_TABLE_ISOLATION
80 	/*
81 	 * Make sure that the dynamic ASID space does not confict with the
82 	 * bit we are using to switch between user and kernel ASIDs.
83 	 */
84 	BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT));
85 
86 	/*
87 	 * The ASID being passed in here should have respected the
88 	 * MAX_ASID_AVAILABLE and thus never have the switch bit set.
89 	 */
90 	VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT));
91 #endif
92 	/*
93 	 * The dynamically-assigned ASIDs that get passed in are small
94 	 * (<TLB_NR_DYN_ASIDS).  They never have the high switch bit set,
95 	 * so do not bother to clear it.
96 	 *
97 	 * If PCID is on, ASID-aware code paths put the ASID+1 into the
98 	 * PCID bits.  This serves two purposes.  It prevents a nasty
99 	 * situation in which PCID-unaware code saves CR3, loads some other
100 	 * value (with PCID == 0), and then restores CR3, thus corrupting
101 	 * the TLB for ASID 0 if the saved ASID was nonzero.  It also means
102 	 * that any bugs involving loading a PCID-enabled CR3 with
103 	 * CR4.PCIDE off will trigger deterministically.
104 	 */
105 	return asid + 1;
106 }
107 
108 /*
109  * Given @asid, compute uPCID
110  */
111 static inline u16 user_pcid(u16 asid)
112 {
113 	u16 ret = kern_pcid(asid);
114 #ifdef CONFIG_PAGE_TABLE_ISOLATION
115 	ret |= 1 << X86_CR3_PTI_PCID_USER_BIT;
116 #endif
117 	return ret;
118 }
119 
120 struct pgd_t;
121 static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
122 {
123 	if (static_cpu_has(X86_FEATURE_PCID)) {
124 		return __sme_pa(pgd) | kern_pcid(asid);
125 	} else {
126 		VM_WARN_ON_ONCE(asid != 0);
127 		return __sme_pa(pgd);
128 	}
129 }
130 
131 static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
132 {
133 	VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
134 	/*
135 	 * Use boot_cpu_has() instead of this_cpu_has() as this function
136 	 * might be called during early boot. This should work even after
137 	 * boot because all CPU's the have same capabilities:
138 	 */
139 	VM_WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_PCID));
140 	return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH;
141 }
142 
143 #ifdef CONFIG_PARAVIRT
144 #include <asm/paravirt.h>
145 #else
146 #define __flush_tlb() __native_flush_tlb()
147 #define __flush_tlb_global() __native_flush_tlb_global()
148 #define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr)
149 #endif
150 
151 struct tlb_context {
152 	u64 ctx_id;
153 	u64 tlb_gen;
154 };
155 
156 struct tlb_state {
157 	/*
158 	 * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
159 	 * are on.  This means that it may not match current->active_mm,
160 	 * which will contain the previous user mm when we're in lazy TLB
161 	 * mode even if we've already switched back to swapper_pg_dir.
162 	 *
163 	 * During switch_mm_irqs_off(), loaded_mm will be set to
164 	 * LOADED_MM_SWITCHING during the brief interrupts-off window
165 	 * when CR3 and loaded_mm would otherwise be inconsistent.  This
166 	 * is for nmi_uaccess_okay()'s benefit.
167 	 */
168 	struct mm_struct *loaded_mm;
169 
170 #define LOADED_MM_SWITCHING ((struct mm_struct *)1UL)
171 
172 	/* Last user mm for optimizing IBPB */
173 	union {
174 		struct mm_struct	*last_user_mm;
175 		unsigned long		last_user_mm_ibpb;
176 	};
177 
178 	u16 loaded_mm_asid;
179 	u16 next_asid;
180 
181 	/*
182 	 * We can be in one of several states:
183 	 *
184 	 *  - Actively using an mm.  Our CPU's bit will be set in
185 	 *    mm_cpumask(loaded_mm) and is_lazy == false;
186 	 *
187 	 *  - Not using a real mm.  loaded_mm == &init_mm.  Our CPU's bit
188 	 *    will not be set in mm_cpumask(&init_mm) and is_lazy == false.
189 	 *
190 	 *  - Lazily using a real mm.  loaded_mm != &init_mm, our bit
191 	 *    is set in mm_cpumask(loaded_mm), but is_lazy == true.
192 	 *    We're heuristically guessing that the CR3 load we
193 	 *    skipped more than makes up for the overhead added by
194 	 *    lazy mode.
195 	 */
196 	bool is_lazy;
197 
198 	/*
199 	 * If set we changed the page tables in such a way that we
200 	 * needed an invalidation of all contexts (aka. PCIDs / ASIDs).
201 	 * This tells us to go invalidate all the non-loaded ctxs[]
202 	 * on the next context switch.
203 	 *
204 	 * The current ctx was kept up-to-date as it ran and does not
205 	 * need to be invalidated.
206 	 */
207 	bool invalidate_other;
208 
209 	/*
210 	 * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate
211 	 * the corresponding user PCID needs a flush next time we
212 	 * switch to it; see SWITCH_TO_USER_CR3.
213 	 */
214 	unsigned short user_pcid_flush_mask;
215 
216 	/*
217 	 * Access to this CR4 shadow and to H/W CR4 is protected by
218 	 * disabling interrupts when modifying either one.
219 	 */
220 	unsigned long cr4;
221 
222 	/*
223 	 * This is a list of all contexts that might exist in the TLB.
224 	 * There is one per ASID that we use, and the ASID (what the
225 	 * CPU calls PCID) is the index into ctxts.
226 	 *
227 	 * For each context, ctx_id indicates which mm the TLB's user
228 	 * entries came from.  As an invariant, the TLB will never
229 	 * contain entries that are out-of-date as when that mm reached
230 	 * the tlb_gen in the list.
231 	 *
232 	 * To be clear, this means that it's legal for the TLB code to
233 	 * flush the TLB without updating tlb_gen.  This can happen
234 	 * (for now, at least) due to paravirt remote flushes.
235 	 *
236 	 * NB: context 0 is a bit special, since it's also used by
237 	 * various bits of init code.  This is fine -- code that
238 	 * isn't aware of PCID will end up harmlessly flushing
239 	 * context 0.
240 	 */
241 	struct tlb_context ctxs[TLB_NR_DYN_ASIDS];
242 };
243 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
244 
245 /*
246  * Blindly accessing user memory from NMI context can be dangerous
247  * if we're in the middle of switching the current user task or
248  * switching the loaded mm.  It can also be dangerous if we
249  * interrupted some kernel code that was temporarily using a
250  * different mm.
251  */
252 static inline bool nmi_uaccess_okay(void)
253 {
254 	struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
255 	struct mm_struct *current_mm = current->mm;
256 
257 	VM_WARN_ON_ONCE(!loaded_mm);
258 
259 	/*
260 	 * The condition we want to check is
261 	 * current_mm->pgd == __va(read_cr3_pa()).  This may be slow, though,
262 	 * if we're running in a VM with shadow paging, and nmi_uaccess_okay()
263 	 * is supposed to be reasonably fast.
264 	 *
265 	 * Instead, we check the almost equivalent but somewhat conservative
266 	 * condition below, and we rely on the fact that switch_mm_irqs_off()
267 	 * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
268 	 */
269 	if (loaded_mm != current_mm)
270 		return false;
271 
272 	VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
273 
274 	return true;
275 }
276 
277 #define nmi_uaccess_okay nmi_uaccess_okay
278 
279 /* Initialize cr4 shadow for this CPU. */
280 static inline void cr4_init_shadow(void)
281 {
282 	this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
283 }
284 
285 static inline void __cr4_set(unsigned long cr4)
286 {
287 	lockdep_assert_irqs_disabled();
288 	this_cpu_write(cpu_tlbstate.cr4, cr4);
289 	__write_cr4(cr4);
290 }
291 
292 /* Set in this cpu's CR4. */
293 static inline void cr4_set_bits(unsigned long mask)
294 {
295 	unsigned long cr4, flags;
296 
297 	local_irq_save(flags);
298 	cr4 = this_cpu_read(cpu_tlbstate.cr4);
299 	if ((cr4 | mask) != cr4)
300 		__cr4_set(cr4 | mask);
301 	local_irq_restore(flags);
302 }
303 
304 /* Clear in this cpu's CR4. */
305 static inline void cr4_clear_bits(unsigned long mask)
306 {
307 	unsigned long cr4, flags;
308 
309 	local_irq_save(flags);
310 	cr4 = this_cpu_read(cpu_tlbstate.cr4);
311 	if ((cr4 & ~mask) != cr4)
312 		__cr4_set(cr4 & ~mask);
313 	local_irq_restore(flags);
314 }
315 
316 static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
317 {
318 	unsigned long cr4;
319 
320 	cr4 = this_cpu_read(cpu_tlbstate.cr4);
321 	__cr4_set(cr4 ^ mask);
322 }
323 
324 /* Read the CR4 shadow. */
325 static inline unsigned long cr4_read_shadow(void)
326 {
327 	return this_cpu_read(cpu_tlbstate.cr4);
328 }
329 
330 /*
331  * Mark all other ASIDs as invalid, preserves the current.
332  */
333 static inline void invalidate_other_asid(void)
334 {
335 	this_cpu_write(cpu_tlbstate.invalidate_other, true);
336 }
337 
338 /*
339  * Save some of cr4 feature set we're using (e.g.  Pentium 4MB
340  * enable and PPro Global page enable), so that any CPU's that boot
341  * up after us can get the correct flags.  This should only be used
342  * during boot on the boot cpu.
343  */
344 extern unsigned long mmu_cr4_features;
345 extern u32 *trampoline_cr4_features;
346 
347 static inline void cr4_set_bits_and_update_boot(unsigned long mask)
348 {
349 	mmu_cr4_features |= mask;
350 	if (trampoline_cr4_features)
351 		*trampoline_cr4_features = mmu_cr4_features;
352 	cr4_set_bits(mask);
353 }
354 
355 extern void initialize_tlbstate_and_flush(void);
356 
357 /*
358  * Given an ASID, flush the corresponding user ASID.  We can delay this
359  * until the next time we switch to it.
360  *
361  * See SWITCH_TO_USER_CR3.
362  */
363 static inline void invalidate_user_asid(u16 asid)
364 {
365 	/* There is no user ASID if address space separation is off */
366 	if (!IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
367 		return;
368 
369 	/*
370 	 * We only have a single ASID if PCID is off and the CR3
371 	 * write will have flushed it.
372 	 */
373 	if (!cpu_feature_enabled(X86_FEATURE_PCID))
374 		return;
375 
376 	if (!static_cpu_has(X86_FEATURE_PTI))
377 		return;
378 
379 	__set_bit(kern_pcid(asid),
380 		  (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask));
381 }
382 
383 /*
384  * flush the entire current user mapping
385  */
386 static inline void __native_flush_tlb(void)
387 {
388 	/*
389 	 * Preemption or interrupts must be disabled to protect the access
390 	 * to the per CPU variable and to prevent being preempted between
391 	 * read_cr3() and write_cr3().
392 	 */
393 	WARN_ON_ONCE(preemptible());
394 
395 	invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid));
396 
397 	/* If current->mm == NULL then the read_cr3() "borrows" an mm */
398 	native_write_cr3(__native_read_cr3());
399 }
400 
401 /*
402  * flush everything
403  */
404 static inline void __native_flush_tlb_global(void)
405 {
406 	unsigned long cr4, flags;
407 
408 	if (static_cpu_has(X86_FEATURE_INVPCID)) {
409 		/*
410 		 * Using INVPCID is considerably faster than a pair of writes
411 		 * to CR4 sandwiched inside an IRQ flag save/restore.
412 		 *
413 		 * Note, this works with CR4.PCIDE=0 or 1.
414 		 */
415 		invpcid_flush_all();
416 		return;
417 	}
418 
419 	/*
420 	 * Read-modify-write to CR4 - protect it from preemption and
421 	 * from interrupts. (Use the raw variant because this code can
422 	 * be called from deep inside debugging code.)
423 	 */
424 	raw_local_irq_save(flags);
425 
426 	cr4 = this_cpu_read(cpu_tlbstate.cr4);
427 	/* toggle PGE */
428 	native_write_cr4(cr4 ^ X86_CR4_PGE);
429 	/* write old PGE again and flush TLBs */
430 	native_write_cr4(cr4);
431 
432 	raw_local_irq_restore(flags);
433 }
434 
435 /*
436  * flush one page in the user mapping
437  */
438 static inline void __native_flush_tlb_one_user(unsigned long addr)
439 {
440 	u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
441 
442 	asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
443 
444 	if (!static_cpu_has(X86_FEATURE_PTI))
445 		return;
446 
447 	/*
448 	 * Some platforms #GP if we call invpcid(type=1/2) before CR4.PCIDE=1.
449 	 * Just use invalidate_user_asid() in case we are called early.
450 	 */
451 	if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE))
452 		invalidate_user_asid(loaded_mm_asid);
453 	else
454 		invpcid_flush_one(user_pcid(loaded_mm_asid), addr);
455 }
456 
457 /*
458  * flush everything
459  */
460 static inline void __flush_tlb_all(void)
461 {
462 	/*
463 	 * This is to catch users with enabled preemption and the PGE feature
464 	 * and don't trigger the warning in __native_flush_tlb().
465 	 */
466 	VM_WARN_ON_ONCE(preemptible());
467 
468 	if (boot_cpu_has(X86_FEATURE_PGE)) {
469 		__flush_tlb_global();
470 	} else {
471 		/*
472 		 * !PGE -> !PCID (setup_pcid()), thus every flush is total.
473 		 */
474 		__flush_tlb();
475 	}
476 }
477 
478 /*
479  * flush one page in the kernel mapping
480  */
481 static inline void __flush_tlb_one_kernel(unsigned long addr)
482 {
483 	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
484 
485 	/*
486 	 * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its
487 	 * paravirt equivalent.  Even with PCID, this is sufficient: we only
488 	 * use PCID if we also use global PTEs for the kernel mapping, and
489 	 * INVLPG flushes global translations across all address spaces.
490 	 *
491 	 * If PTI is on, then the kernel is mapped with non-global PTEs, and
492 	 * __flush_tlb_one_user() will flush the given address for the current
493 	 * kernel address space and for its usermode counterpart, but it does
494 	 * not flush it for other address spaces.
495 	 */
496 	__flush_tlb_one_user(addr);
497 
498 	if (!static_cpu_has(X86_FEATURE_PTI))
499 		return;
500 
501 	/*
502 	 * See above.  We need to propagate the flush to all other address
503 	 * spaces.  In principle, we only need to propagate it to kernelmode
504 	 * address spaces, but the extra bookkeeping we would need is not
505 	 * worth it.
506 	 */
507 	invalidate_other_asid();
508 }
509 
510 #define TLB_FLUSH_ALL	-1UL
511 
512 /*
513  * TLB flushing:
514  *
515  *  - flush_tlb_all() flushes all processes TLBs
516  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
517  *  - flush_tlb_page(vma, vmaddr) flushes one page
518  *  - flush_tlb_range(vma, start, end) flushes a range of pages
519  *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
520  *  - flush_tlb_others(cpumask, info) flushes TLBs on other cpus
521  *
522  * ..but the i386 has somewhat limited tlb flushing capabilities,
523  * and page-granular flushes are available only on i486 and up.
524  */
525 struct flush_tlb_info {
526 	/*
527 	 * We support several kinds of flushes.
528 	 *
529 	 * - Fully flush a single mm.  .mm will be set, .end will be
530 	 *   TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to
531 	 *   which the IPI sender is trying to catch us up.
532 	 *
533 	 * - Partially flush a single mm.  .mm will be set, .start and
534 	 *   .end will indicate the range, and .new_tlb_gen will be set
535 	 *   such that the changes between generation .new_tlb_gen-1 and
536 	 *   .new_tlb_gen are entirely contained in the indicated range.
537 	 *
538 	 * - Fully flush all mms whose tlb_gens have been updated.  .mm
539 	 *   will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen
540 	 *   will be zero.
541 	 */
542 	struct mm_struct	*mm;
543 	unsigned long		start;
544 	unsigned long		end;
545 	u64			new_tlb_gen;
546 	unsigned int		stride_shift;
547 	bool			freed_tables;
548 };
549 
550 #define local_flush_tlb() __flush_tlb()
551 
552 #define flush_tlb_mm(mm)						\
553 		flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true)
554 
555 #define flush_tlb_range(vma, start, end)				\
556 	flush_tlb_mm_range((vma)->vm_mm, start, end,			\
557 			   ((vma)->vm_flags & VM_HUGETLB)		\
558 				? huge_page_shift(hstate_vma(vma))	\
559 				: PAGE_SHIFT, false)
560 
561 extern void flush_tlb_all(void);
562 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
563 				unsigned long end, unsigned int stride_shift,
564 				bool freed_tables);
565 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
566 
567 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
568 {
569 	flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false);
570 }
571 
572 void native_flush_tlb_others(const struct cpumask *cpumask,
573 			     const struct flush_tlb_info *info);
574 
575 static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
576 {
577 	/*
578 	 * Bump the generation count.  This also serves as a full barrier
579 	 * that synchronizes with switch_mm(): callers are required to order
580 	 * their read of mm_cpumask after their writes to the paging
581 	 * structures.
582 	 */
583 	return atomic64_inc_return(&mm->context.tlb_gen);
584 }
585 
586 static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
587 					struct mm_struct *mm)
588 {
589 	inc_mm_tlb_gen(mm);
590 	cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
591 }
592 
593 extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
594 
595 #ifndef CONFIG_PARAVIRT
596 #define flush_tlb_others(mask, info)	\
597 	native_flush_tlb_others(mask, info)
598 
599 #define paravirt_tlb_remove_table(tlb, page) \
600 	tlb_remove_page(tlb, (void *)(page))
601 #endif
602 
603 #endif /* _ASM_X86_TLBFLUSH_H */
604