xref: /openbmc/linux/arch/x86/include/asm/tlbflush.h (revision 2f4305b1)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21965aae3SH. Peter Anvin #ifndef _ASM_X86_TLBFLUSH_H
31965aae3SH. Peter Anvin #define _ASM_X86_TLBFLUSH_H
4bb898558SAl Viro 
5bb898558SAl Viro #include <linux/mm.h>
6bb898558SAl Viro #include <linux/sched.h>
7bb898558SAl Viro 
8bb898558SAl Viro #include <asm/processor.h>
9cd4d09ecSBorislav Petkov #include <asm/cpufeature.h>
10f05e798aSDavid Howells #include <asm/special_insns.h>
11ce4a4e56SAndy Lutomirski #include <asm/smp.h>
121a3b0caeSPeter Zijlstra #include <asm/invpcid.h>
136fd166aaSPeter Zijlstra #include <asm/pti.h>
146fd166aaSPeter Zijlstra #include <asm/processor-flags.h>
15060a402aSAndy Lutomirski 
164b04e6c2SThomas Gleixner void __flush_tlb_all(void);
172faf153bSThomas Gleixner 
18bfe3d8f6SThomas Gleixner #define TLB_FLUSH_ALL	-1UL
19bb898558SAl Viro 
20bfe3d8f6SThomas Gleixner void cr4_update_irqsoff(unsigned long set, unsigned long clear);
21bfe3d8f6SThomas Gleixner unsigned long cr4_read_shadow(void);
22bfe3d8f6SThomas Gleixner 
23bfe3d8f6SThomas Gleixner /* Set in this cpu's CR4. */
24bfe3d8f6SThomas Gleixner static inline void cr4_set_bits_irqsoff(unsigned long mask)
25bfe3d8f6SThomas Gleixner {
26bfe3d8f6SThomas Gleixner 	cr4_update_irqsoff(mask, 0);
27bfe3d8f6SThomas Gleixner }
28bfe3d8f6SThomas Gleixner 
29bfe3d8f6SThomas Gleixner /* Clear in this cpu's CR4. */
30bfe3d8f6SThomas Gleixner static inline void cr4_clear_bits_irqsoff(unsigned long mask)
31bfe3d8f6SThomas Gleixner {
32bfe3d8f6SThomas Gleixner 	cr4_update_irqsoff(0, mask);
33bfe3d8f6SThomas Gleixner }
34bfe3d8f6SThomas Gleixner 
35bfe3d8f6SThomas Gleixner /* Set in this cpu's CR4. */
36bfe3d8f6SThomas Gleixner static inline void cr4_set_bits(unsigned long mask)
37bfe3d8f6SThomas Gleixner {
38bfe3d8f6SThomas Gleixner 	unsigned long flags;
39bfe3d8f6SThomas Gleixner 
40bfe3d8f6SThomas Gleixner 	local_irq_save(flags);
41bfe3d8f6SThomas Gleixner 	cr4_set_bits_irqsoff(mask);
42bfe3d8f6SThomas Gleixner 	local_irq_restore(flags);
43bfe3d8f6SThomas Gleixner }
44bfe3d8f6SThomas Gleixner 
45bfe3d8f6SThomas Gleixner /* Clear in this cpu's CR4. */
46bfe3d8f6SThomas Gleixner static inline void cr4_clear_bits(unsigned long mask)
47bfe3d8f6SThomas Gleixner {
48bfe3d8f6SThomas Gleixner 	unsigned long flags;
49bfe3d8f6SThomas Gleixner 
50bfe3d8f6SThomas Gleixner 	local_irq_save(flags);
51bfe3d8f6SThomas Gleixner 	cr4_clear_bits_irqsoff(mask);
52bfe3d8f6SThomas Gleixner 	local_irq_restore(flags);
53bfe3d8f6SThomas Gleixner }
54bfe3d8f6SThomas Gleixner 
55bfe3d8f6SThomas Gleixner #ifndef MODULE
566c9b7d79SThomas Gleixner /*
576c9b7d79SThomas Gleixner  * 6 because 6 should be plenty and struct tlb_state will fit in two cache
586c9b7d79SThomas Gleixner  * lines.
596c9b7d79SThomas Gleixner  */
606c9b7d79SThomas Gleixner #define TLB_NR_DYN_ASIDS	6
616c9b7d79SThomas Gleixner 
62b0579adeSAndy Lutomirski struct tlb_context {
63b0579adeSAndy Lutomirski 	u64 ctx_id;
64b0579adeSAndy Lutomirski 	u64 tlb_gen;
65b0579adeSAndy Lutomirski };
66b0579adeSAndy Lutomirski 
671e02ce4cSAndy Lutomirski struct tlb_state {
683d28ebceSAndy Lutomirski 	/*
693d28ebceSAndy Lutomirski 	 * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
703d28ebceSAndy Lutomirski 	 * are on.  This means that it may not match current->active_mm,
713d28ebceSAndy Lutomirski 	 * which will contain the previous user mm when we're in lazy TLB
723d28ebceSAndy Lutomirski 	 * mode even if we've already switched back to swapper_pg_dir.
734012e77aSAndy Lutomirski 	 *
744012e77aSAndy Lutomirski 	 * During switch_mm_irqs_off(), loaded_mm will be set to
754012e77aSAndy Lutomirski 	 * LOADED_MM_SWITCHING during the brief interrupts-off window
764012e77aSAndy Lutomirski 	 * when CR3 and loaded_mm would otherwise be inconsistent.  This
774012e77aSAndy Lutomirski 	 * is for nmi_uaccess_okay()'s benefit.
783d28ebceSAndy Lutomirski 	 */
793d28ebceSAndy Lutomirski 	struct mm_struct *loaded_mm;
804012e77aSAndy Lutomirski 
81a72a1932SJann Horn #define LOADED_MM_SWITCHING ((struct mm_struct *)1UL)
824012e77aSAndy Lutomirski 
834c71a2b6SThomas Gleixner 	/* Last user mm for optimizing IBPB */
844c71a2b6SThomas Gleixner 	union {
854c71a2b6SThomas Gleixner 		struct mm_struct	*last_user_mm;
864c71a2b6SThomas Gleixner 		unsigned long		last_user_mm_ibpb;
874c71a2b6SThomas Gleixner 	};
884c71a2b6SThomas Gleixner 
8910af6235SAndy Lutomirski 	u16 loaded_mm_asid;
9010af6235SAndy Lutomirski 	u16 next_asid;
911e02ce4cSAndy Lutomirski 
921e02ce4cSAndy Lutomirski 	/*
932ea907c4SDave Hansen 	 * If set we changed the page tables in such a way that we
942ea907c4SDave Hansen 	 * needed an invalidation of all contexts (aka. PCIDs / ASIDs).
952ea907c4SDave Hansen 	 * This tells us to go invalidate all the non-loaded ctxs[]
962ea907c4SDave Hansen 	 * on the next context switch.
972ea907c4SDave Hansen 	 *
982ea907c4SDave Hansen 	 * The current ctx was kept up-to-date as it ran and does not
992ea907c4SDave Hansen 	 * need to be invalidated.
1002ea907c4SDave Hansen 	 */
1012ea907c4SDave Hansen 	bool invalidate_other;
1022ea907c4SDave Hansen 
1032ea907c4SDave Hansen 	/*
1046fd166aaSPeter Zijlstra 	 * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate
1056fd166aaSPeter Zijlstra 	 * the corresponding user PCID needs a flush next time we
1066fd166aaSPeter Zijlstra 	 * switch to it; see SWITCH_TO_USER_CR3.
1076fd166aaSPeter Zijlstra 	 */
1086fd166aaSPeter Zijlstra 	unsigned short user_pcid_flush_mask;
1096fd166aaSPeter Zijlstra 
1106fd166aaSPeter Zijlstra 	/*
1111e02ce4cSAndy Lutomirski 	 * Access to this CR4 shadow and to H/W CR4 is protected by
1121e02ce4cSAndy Lutomirski 	 * disabling interrupts when modifying either one.
1131e02ce4cSAndy Lutomirski 	 */
1141e02ce4cSAndy Lutomirski 	unsigned long cr4;
115b0579adeSAndy Lutomirski 
116b0579adeSAndy Lutomirski 	/*
117b0579adeSAndy Lutomirski 	 * This is a list of all contexts that might exist in the TLB.
11810af6235SAndy Lutomirski 	 * There is one per ASID that we use, and the ASID (what the
11910af6235SAndy Lutomirski 	 * CPU calls PCID) is the index into ctxts.
120b0579adeSAndy Lutomirski 	 *
121b0579adeSAndy Lutomirski 	 * For each context, ctx_id indicates which mm the TLB's user
122b0579adeSAndy Lutomirski 	 * entries came from.  As an invariant, the TLB will never
123b0579adeSAndy Lutomirski 	 * contain entries that are out-of-date as when that mm reached
124b0579adeSAndy Lutomirski 	 * the tlb_gen in the list.
125b0579adeSAndy Lutomirski 	 *
126b0579adeSAndy Lutomirski 	 * To be clear, this means that it's legal for the TLB code to
127b0579adeSAndy Lutomirski 	 * flush the TLB without updating tlb_gen.  This can happen
128b0579adeSAndy Lutomirski 	 * (for now, at least) due to paravirt remote flushes.
12910af6235SAndy Lutomirski 	 *
13010af6235SAndy Lutomirski 	 * NB: context 0 is a bit special, since it's also used by
13110af6235SAndy Lutomirski 	 * various bits of init code.  This is fine -- code that
13210af6235SAndy Lutomirski 	 * isn't aware of PCID will end up harmlessly flushing
13310af6235SAndy Lutomirski 	 * context 0.
134b0579adeSAndy Lutomirski 	 */
13510af6235SAndy Lutomirski 	struct tlb_context ctxs[TLB_NR_DYN_ASIDS];
1361e02ce4cSAndy Lutomirski };
137*2f4305b1SNadav Amit DECLARE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate);
138*2f4305b1SNadav Amit 
139*2f4305b1SNadav Amit struct tlb_state_shared {
140*2f4305b1SNadav Amit 	/*
141*2f4305b1SNadav Amit 	 * We can be in one of several states:
142*2f4305b1SNadav Amit 	 *
143*2f4305b1SNadav Amit 	 *  - Actively using an mm.  Our CPU's bit will be set in
144*2f4305b1SNadav Amit 	 *    mm_cpumask(loaded_mm) and is_lazy == false;
145*2f4305b1SNadav Amit 	 *
146*2f4305b1SNadav Amit 	 *  - Not using a real mm.  loaded_mm == &init_mm.  Our CPU's bit
147*2f4305b1SNadav Amit 	 *    will not be set in mm_cpumask(&init_mm) and is_lazy == false.
148*2f4305b1SNadav Amit 	 *
149*2f4305b1SNadav Amit 	 *  - Lazily using a real mm.  loaded_mm != &init_mm, our bit
150*2f4305b1SNadav Amit 	 *    is set in mm_cpumask(loaded_mm), but is_lazy == true.
151*2f4305b1SNadav Amit 	 *    We're heuristically guessing that the CR3 load we
152*2f4305b1SNadav Amit 	 *    skipped more than makes up for the overhead added by
153*2f4305b1SNadav Amit 	 *    lazy mode.
154*2f4305b1SNadav Amit 	 */
155*2f4305b1SNadav Amit 	bool is_lazy;
156*2f4305b1SNadav Amit };
157*2f4305b1SNadav Amit DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
1581e02ce4cSAndy Lutomirski 
159af5c40c6SThomas Gleixner bool nmi_uaccess_okay(void);
1605932c9fdSNadav Amit #define nmi_uaccess_okay nmi_uaccess_okay
1615932c9fdSNadav Amit 
1621e02ce4cSAndy Lutomirski /* Initialize cr4 shadow for this CPU. */
1631e02ce4cSAndy Lutomirski static inline void cr4_init_shadow(void)
1641e02ce4cSAndy Lutomirski {
1651ef55be1SAndy Lutomirski 	this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
1661e02ce4cSAndy Lutomirski }
1671e02ce4cSAndy Lutomirski 
168375074ccSAndy Lutomirski extern unsigned long mmu_cr4_features;
169375074ccSAndy Lutomirski extern u32 *trampoline_cr4_features;
170375074ccSAndy Lutomirski 
17172c0098dSAndy Lutomirski extern void initialize_tlbstate_and_flush(void);
17272c0098dSAndy Lutomirski 
173bb898558SAl Viro /*
174bb898558SAl Viro  * TLB flushing:
175bb898558SAl Viro  *
176bb898558SAl Viro  *  - flush_tlb_all() flushes all processes TLBs
177bb898558SAl Viro  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
178bb898558SAl Viro  *  - flush_tlb_page(vma, vmaddr) flushes one page
179bb898558SAl Viro  *  - flush_tlb_range(vma, start, end) flushes a range of pages
180bb898558SAl Viro  *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
1814ce94eabSNadav Amit  *  - flush_tlb_multi(cpumask, info) flushes TLBs on multiple cpus
182bb898558SAl Viro  *
183bb898558SAl Viro  * ..but the i386 has somewhat limited tlb flushing capabilities,
184bb898558SAl Viro  * and page-granular flushes are available only on i486 and up.
185bb898558SAl Viro  */
186a2055abeSAndy Lutomirski struct flush_tlb_info {
187b0579adeSAndy Lutomirski 	/*
188b0579adeSAndy Lutomirski 	 * We support several kinds of flushes.
189b0579adeSAndy Lutomirski 	 *
190b0579adeSAndy Lutomirski 	 * - Fully flush a single mm.  .mm will be set, .end will be
191b0579adeSAndy Lutomirski 	 *   TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to
192b0579adeSAndy Lutomirski 	 *   which the IPI sender is trying to catch us up.
193b0579adeSAndy Lutomirski 	 *
194b0579adeSAndy Lutomirski 	 * - Partially flush a single mm.  .mm will be set, .start and
195b0579adeSAndy Lutomirski 	 *   .end will indicate the range, and .new_tlb_gen will be set
196b0579adeSAndy Lutomirski 	 *   such that the changes between generation .new_tlb_gen-1 and
197b0579adeSAndy Lutomirski 	 *   .new_tlb_gen are entirely contained in the indicated range.
198b0579adeSAndy Lutomirski 	 *
199b0579adeSAndy Lutomirski 	 * - Fully flush all mms whose tlb_gens have been updated.  .mm
200b0579adeSAndy Lutomirski 	 *   will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen
201b0579adeSAndy Lutomirski 	 *   will be zero.
202b0579adeSAndy Lutomirski 	 */
203a2055abeSAndy Lutomirski 	struct mm_struct	*mm;
204a2055abeSAndy Lutomirski 	unsigned long		start;
205a2055abeSAndy Lutomirski 	unsigned long		end;
206b0579adeSAndy Lutomirski 	u64			new_tlb_gen;
2074c1ba392SNadav Amit 	unsigned int		initiating_cpu;
2084c1ba392SNadav Amit 	u8			stride_shift;
2094c1ba392SNadav Amit 	u8			freed_tables;
210a2055abeSAndy Lutomirski };
211a2055abeSAndy Lutomirski 
212bfe3d8f6SThomas Gleixner void flush_tlb_local(void);
213bfe3d8f6SThomas Gleixner void flush_tlb_one_user(unsigned long addr);
214bfe3d8f6SThomas Gleixner void flush_tlb_one_kernel(unsigned long addr);
2154ce94eabSNadav Amit void flush_tlb_multi(const struct cpumask *cpumask,
216bfe3d8f6SThomas Gleixner 		      const struct flush_tlb_info *info);
217bfe3d8f6SThomas Gleixner 
218bfe3d8f6SThomas Gleixner #ifdef CONFIG_PARAVIRT
219bfe3d8f6SThomas Gleixner #include <asm/paravirt.h>
220bfe3d8f6SThomas Gleixner #endif
221bfe3d8f6SThomas Gleixner 
222016c4d92SRik van Riel #define flush_tlb_mm(mm)						\
223016c4d92SRik van Riel 		flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true)
224611ae8e3SAlex Shi 
225611ae8e3SAlex Shi #define flush_tlb_range(vma, start, end)				\
226a31acd3eSPeter Zijlstra 	flush_tlb_mm_range((vma)->vm_mm, start, end,			\
227a31acd3eSPeter Zijlstra 			   ((vma)->vm_flags & VM_HUGETLB)		\
228a31acd3eSPeter Zijlstra 				? huge_page_shift(hstate_vma(vma))	\
229016c4d92SRik van Riel 				: PAGE_SHIFT, false)
230611ae8e3SAlex Shi 
231bb898558SAl Viro extern void flush_tlb_all(void);
232611ae8e3SAlex Shi extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
233016c4d92SRik van Riel 				unsigned long end, unsigned int stride_shift,
234016c4d92SRik van Riel 				bool freed_tables);
235effee4b9SAlex Shi extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
236bb898558SAl Viro 
237ca6c99c0SAndy Lutomirski static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
238ca6c99c0SAndy Lutomirski {
239016c4d92SRik van Riel 	flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false);
240ca6c99c0SAndy Lutomirski }
241ca6c99c0SAndy Lutomirski 
2420a126abdSPeter Zijlstra static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
2430a126abdSPeter Zijlstra {
2440a126abdSPeter Zijlstra 	/*
2450a126abdSPeter Zijlstra 	 * Bump the generation count.  This also serves as a full barrier
2460a126abdSPeter Zijlstra 	 * that synchronizes with switch_mm(): callers are required to order
2470a126abdSPeter Zijlstra 	 * their read of mm_cpumask after their writes to the paging
2480a126abdSPeter Zijlstra 	 * structures.
2490a126abdSPeter Zijlstra 	 */
2500a126abdSPeter Zijlstra 	return atomic64_inc_return(&mm->context.tlb_gen);
2510a126abdSPeter Zijlstra }
2520a126abdSPeter Zijlstra 
253e73ad5ffSAndy Lutomirski static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
254e73ad5ffSAndy Lutomirski 					struct mm_struct *mm)
255e73ad5ffSAndy Lutomirski {
256f39681edSAndy Lutomirski 	inc_mm_tlb_gen(mm);
257e73ad5ffSAndy Lutomirski 	cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
258e73ad5ffSAndy Lutomirski }
259e73ad5ffSAndy Lutomirski 
260e73ad5ffSAndy Lutomirski extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
261e73ad5ffSAndy Lutomirski 
262bfe3d8f6SThomas Gleixner #endif /* !MODULE */
263bfe3d8f6SThomas Gleixner 
2641965aae3SH. Peter Anvin #endif /* _ASM_X86_TLBFLUSH_H */
265