xref: /openbmc/linux/arch/x86/include/asm/tlbflush.h (revision f73419bb)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21965aae3SH. Peter Anvin #ifndef _ASM_X86_TLBFLUSH_H
31965aae3SH. Peter Anvin #define _ASM_X86_TLBFLUSH_H
4bb898558SAl Viro 
582721d8bSKirill A. Shutemov #include <linux/mm_types.h>
6bb898558SAl Viro #include <linux/sched.h>
7bb898558SAl Viro 
8bb898558SAl Viro #include <asm/processor.h>
9cd4d09ecSBorislav Petkov #include <asm/cpufeature.h>
10f05e798aSDavid Howells #include <asm/special_insns.h>
11ce4a4e56SAndy Lutomirski #include <asm/smp.h>
121a3b0caeSPeter Zijlstra #include <asm/invpcid.h>
136fd166aaSPeter Zijlstra #include <asm/pti.h>
146fd166aaSPeter Zijlstra #include <asm/processor-flags.h>
1582721d8bSKirill A. Shutemov #include <asm/pgtable.h>
16060a402aSAndy Lutomirski 
17013fdeb0SBorislav Petkov (AMD) DECLARE_PER_CPU(u64, tlbstate_untag_mask);
18013fdeb0SBorislav Petkov (AMD) 
194b04e6c2SThomas Gleixner void __flush_tlb_all(void);
202faf153bSThomas Gleixner 
21bfe3d8f6SThomas Gleixner #define TLB_FLUSH_ALL	-1UL
228f1d56f6SNadav Amit #define TLB_GENERATION_INVALID	0
23bb898558SAl Viro 
24bfe3d8f6SThomas Gleixner void cr4_update_irqsoff(unsigned long set, unsigned long clear);
25bfe3d8f6SThomas Gleixner unsigned long cr4_read_shadow(void);
26bfe3d8f6SThomas Gleixner 
27bfe3d8f6SThomas Gleixner /* Set in this cpu's CR4. */
28bfe3d8f6SThomas Gleixner static inline void cr4_set_bits_irqsoff(unsigned long mask)
29bfe3d8f6SThomas Gleixner {
30bfe3d8f6SThomas Gleixner 	cr4_update_irqsoff(mask, 0);
31bfe3d8f6SThomas Gleixner }
32bfe3d8f6SThomas Gleixner 
33bfe3d8f6SThomas Gleixner /* Clear in this cpu's CR4. */
34bfe3d8f6SThomas Gleixner static inline void cr4_clear_bits_irqsoff(unsigned long mask)
35bfe3d8f6SThomas Gleixner {
36bfe3d8f6SThomas Gleixner 	cr4_update_irqsoff(0, mask);
37bfe3d8f6SThomas Gleixner }
38bfe3d8f6SThomas Gleixner 
39bfe3d8f6SThomas Gleixner /* Set in this cpu's CR4. */
40bfe3d8f6SThomas Gleixner static inline void cr4_set_bits(unsigned long mask)
41bfe3d8f6SThomas Gleixner {
42bfe3d8f6SThomas Gleixner 	unsigned long flags;
43bfe3d8f6SThomas Gleixner 
44bfe3d8f6SThomas Gleixner 	local_irq_save(flags);
45bfe3d8f6SThomas Gleixner 	cr4_set_bits_irqsoff(mask);
46bfe3d8f6SThomas Gleixner 	local_irq_restore(flags);
47bfe3d8f6SThomas Gleixner }
48bfe3d8f6SThomas Gleixner 
49bfe3d8f6SThomas Gleixner /* Clear in this cpu's CR4. */
50bfe3d8f6SThomas Gleixner static inline void cr4_clear_bits(unsigned long mask)
51bfe3d8f6SThomas Gleixner {
52bfe3d8f6SThomas Gleixner 	unsigned long flags;
53bfe3d8f6SThomas Gleixner 
54bfe3d8f6SThomas Gleixner 	local_irq_save(flags);
55bfe3d8f6SThomas Gleixner 	cr4_clear_bits_irqsoff(mask);
56bfe3d8f6SThomas Gleixner 	local_irq_restore(flags);
57bfe3d8f6SThomas Gleixner }
58bfe3d8f6SThomas Gleixner 
59bfe3d8f6SThomas Gleixner #ifndef MODULE
606c9b7d79SThomas Gleixner /*
616c9b7d79SThomas Gleixner  * 6 because 6 should be plenty and struct tlb_state will fit in two cache
626c9b7d79SThomas Gleixner  * lines.
636c9b7d79SThomas Gleixner  */
646c9b7d79SThomas Gleixner #define TLB_NR_DYN_ASIDS	6
656c9b7d79SThomas Gleixner 
66b0579adeSAndy Lutomirski struct tlb_context {
67b0579adeSAndy Lutomirski 	u64 ctx_id;
68b0579adeSAndy Lutomirski 	u64 tlb_gen;
69b0579adeSAndy Lutomirski };
70b0579adeSAndy Lutomirski 
711e02ce4cSAndy Lutomirski struct tlb_state {
723d28ebceSAndy Lutomirski 	/*
733d28ebceSAndy Lutomirski 	 * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
743d28ebceSAndy Lutomirski 	 * are on.  This means that it may not match current->active_mm,
753d28ebceSAndy Lutomirski 	 * which will contain the previous user mm when we're in lazy TLB
763d28ebceSAndy Lutomirski 	 * mode even if we've already switched back to swapper_pg_dir.
774012e77aSAndy Lutomirski 	 *
784012e77aSAndy Lutomirski 	 * During switch_mm_irqs_off(), loaded_mm will be set to
794012e77aSAndy Lutomirski 	 * LOADED_MM_SWITCHING during the brief interrupts-off window
804012e77aSAndy Lutomirski 	 * when CR3 and loaded_mm would otherwise be inconsistent.  This
814012e77aSAndy Lutomirski 	 * is for nmi_uaccess_okay()'s benefit.
823d28ebceSAndy Lutomirski 	 */
833d28ebceSAndy Lutomirski 	struct mm_struct *loaded_mm;
844012e77aSAndy Lutomirski 
85a72a1932SJann Horn #define LOADED_MM_SWITCHING ((struct mm_struct *)1UL)
864012e77aSAndy Lutomirski 
874c71a2b6SThomas Gleixner 	/* Last user mm for optimizing IBPB */
884c71a2b6SThomas Gleixner 	union {
894c71a2b6SThomas Gleixner 		struct mm_struct	*last_user_mm;
90371b09c6SBalbir Singh 		unsigned long		last_user_mm_spec;
914c71a2b6SThomas Gleixner 	};
924c71a2b6SThomas Gleixner 
9310af6235SAndy Lutomirski 	u16 loaded_mm_asid;
9410af6235SAndy Lutomirski 	u16 next_asid;
951e02ce4cSAndy Lutomirski 
961e02ce4cSAndy Lutomirski 	/*
972ea907c4SDave Hansen 	 * If set we changed the page tables in such a way that we
982ea907c4SDave Hansen 	 * needed an invalidation of all contexts (aka. PCIDs / ASIDs).
992ea907c4SDave Hansen 	 * This tells us to go invalidate all the non-loaded ctxs[]
1002ea907c4SDave Hansen 	 * on the next context switch.
1012ea907c4SDave Hansen 	 *
1022ea907c4SDave Hansen 	 * The current ctx was kept up-to-date as it ran and does not
1032ea907c4SDave Hansen 	 * need to be invalidated.
1042ea907c4SDave Hansen 	 */
1052ea907c4SDave Hansen 	bool invalidate_other;
1062ea907c4SDave Hansen 
10782721d8bSKirill A. Shutemov #ifdef CONFIG_ADDRESS_MASKING
10882721d8bSKirill A. Shutemov 	/*
10982721d8bSKirill A. Shutemov 	 * Active LAM mode.
11082721d8bSKirill A. Shutemov 	 *
11182721d8bSKirill A. Shutemov 	 * X86_CR3_LAM_U57/U48 shifted right by X86_CR3_LAM_U57_BIT or 0 if LAM
11282721d8bSKirill A. Shutemov 	 * disabled.
11382721d8bSKirill A. Shutemov 	 */
11482721d8bSKirill A. Shutemov 	u8 lam;
11582721d8bSKirill A. Shutemov #endif
11682721d8bSKirill A. Shutemov 
1172ea907c4SDave Hansen 	/*
1186fd166aaSPeter Zijlstra 	 * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate
1196fd166aaSPeter Zijlstra 	 * the corresponding user PCID needs a flush next time we
1206fd166aaSPeter Zijlstra 	 * switch to it; see SWITCH_TO_USER_CR3.
1216fd166aaSPeter Zijlstra 	 */
1226fd166aaSPeter Zijlstra 	unsigned short user_pcid_flush_mask;
1236fd166aaSPeter Zijlstra 
1246fd166aaSPeter Zijlstra 	/*
1251e02ce4cSAndy Lutomirski 	 * Access to this CR4 shadow and to H/W CR4 is protected by
1261e02ce4cSAndy Lutomirski 	 * disabling interrupts when modifying either one.
1271e02ce4cSAndy Lutomirski 	 */
1281e02ce4cSAndy Lutomirski 	unsigned long cr4;
129b0579adeSAndy Lutomirski 
130b0579adeSAndy Lutomirski 	/*
131b0579adeSAndy Lutomirski 	 * This is a list of all contexts that might exist in the TLB.
13210af6235SAndy Lutomirski 	 * There is one per ASID that we use, and the ASID (what the
13310af6235SAndy Lutomirski 	 * CPU calls PCID) is the index into ctxts.
134b0579adeSAndy Lutomirski 	 *
135b0579adeSAndy Lutomirski 	 * For each context, ctx_id indicates which mm the TLB's user
136b0579adeSAndy Lutomirski 	 * entries came from.  As an invariant, the TLB will never
137b0579adeSAndy Lutomirski 	 * contain entries that are out-of-date as when that mm reached
138b0579adeSAndy Lutomirski 	 * the tlb_gen in the list.
139b0579adeSAndy Lutomirski 	 *
140b0579adeSAndy Lutomirski 	 * To be clear, this means that it's legal for the TLB code to
141b0579adeSAndy Lutomirski 	 * flush the TLB without updating tlb_gen.  This can happen
142b0579adeSAndy Lutomirski 	 * (for now, at least) due to paravirt remote flushes.
14310af6235SAndy Lutomirski 	 *
14410af6235SAndy Lutomirski 	 * NB: context 0 is a bit special, since it's also used by
14510af6235SAndy Lutomirski 	 * various bits of init code.  This is fine -- code that
14610af6235SAndy Lutomirski 	 * isn't aware of PCID will end up harmlessly flushing
14710af6235SAndy Lutomirski 	 * context 0.
148b0579adeSAndy Lutomirski 	 */
14910af6235SAndy Lutomirski 	struct tlb_context ctxs[TLB_NR_DYN_ASIDS];
1501e02ce4cSAndy Lutomirski };
1512f4305b1SNadav Amit DECLARE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate);
1522f4305b1SNadav Amit 
1532f4305b1SNadav Amit struct tlb_state_shared {
1542f4305b1SNadav Amit 	/*
1552f4305b1SNadav Amit 	 * We can be in one of several states:
1562f4305b1SNadav Amit 	 *
1572f4305b1SNadav Amit 	 *  - Actively using an mm.  Our CPU's bit will be set in
1582f4305b1SNadav Amit 	 *    mm_cpumask(loaded_mm) and is_lazy == false;
1592f4305b1SNadav Amit 	 *
1602f4305b1SNadav Amit 	 *  - Not using a real mm.  loaded_mm == &init_mm.  Our CPU's bit
1612f4305b1SNadav Amit 	 *    will not be set in mm_cpumask(&init_mm) and is_lazy == false.
1622f4305b1SNadav Amit 	 *
1632f4305b1SNadav Amit 	 *  - Lazily using a real mm.  loaded_mm != &init_mm, our bit
1642f4305b1SNadav Amit 	 *    is set in mm_cpumask(loaded_mm), but is_lazy == true.
1652f4305b1SNadav Amit 	 *    We're heuristically guessing that the CR3 load we
1662f4305b1SNadav Amit 	 *    skipped more than makes up for the overhead added by
1672f4305b1SNadav Amit 	 *    lazy mode.
1682f4305b1SNadav Amit 	 */
1692f4305b1SNadav Amit 	bool is_lazy;
1702f4305b1SNadav Amit };
1712f4305b1SNadav Amit DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
1721e02ce4cSAndy Lutomirski 
173af5c40c6SThomas Gleixner bool nmi_uaccess_okay(void);
1745932c9fdSNadav Amit #define nmi_uaccess_okay nmi_uaccess_okay
1755932c9fdSNadav Amit 
1761e02ce4cSAndy Lutomirski /* Initialize cr4 shadow for this CPU. */
1771e02ce4cSAndy Lutomirski static inline void cr4_init_shadow(void)
1781e02ce4cSAndy Lutomirski {
1791ef55be1SAndy Lutomirski 	this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
1801e02ce4cSAndy Lutomirski }
1811e02ce4cSAndy Lutomirski 
182375074ccSAndy Lutomirski extern unsigned long mmu_cr4_features;
183375074ccSAndy Lutomirski extern u32 *trampoline_cr4_features;
184375074ccSAndy Lutomirski 
18572c0098dSAndy Lutomirski extern void initialize_tlbstate_and_flush(void);
18672c0098dSAndy Lutomirski 
187bb898558SAl Viro /*
188bb898558SAl Viro  * TLB flushing:
189bb898558SAl Viro  *
190bb898558SAl Viro  *  - flush_tlb_all() flushes all processes TLBs
191bb898558SAl Viro  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
192bb898558SAl Viro  *  - flush_tlb_page(vma, vmaddr) flushes one page
193bb898558SAl Viro  *  - flush_tlb_range(vma, start, end) flushes a range of pages
194bb898558SAl Viro  *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
1954ce94eabSNadav Amit  *  - flush_tlb_multi(cpumask, info) flushes TLBs on multiple cpus
196bb898558SAl Viro  *
197bb898558SAl Viro  * ..but the i386 has somewhat limited tlb flushing capabilities,
198bb898558SAl Viro  * and page-granular flushes are available only on i486 and up.
199bb898558SAl Viro  */
200a2055abeSAndy Lutomirski struct flush_tlb_info {
201b0579adeSAndy Lutomirski 	/*
202b0579adeSAndy Lutomirski 	 * We support several kinds of flushes.
203b0579adeSAndy Lutomirski 	 *
204b0579adeSAndy Lutomirski 	 * - Fully flush a single mm.  .mm will be set, .end will be
205b0579adeSAndy Lutomirski 	 *   TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to
206b0579adeSAndy Lutomirski 	 *   which the IPI sender is trying to catch us up.
207b0579adeSAndy Lutomirski 	 *
208b0579adeSAndy Lutomirski 	 * - Partially flush a single mm.  .mm will be set, .start and
209b0579adeSAndy Lutomirski 	 *   .end will indicate the range, and .new_tlb_gen will be set
210b0579adeSAndy Lutomirski 	 *   such that the changes between generation .new_tlb_gen-1 and
211b0579adeSAndy Lutomirski 	 *   .new_tlb_gen are entirely contained in the indicated range.
212b0579adeSAndy Lutomirski 	 *
213b0579adeSAndy Lutomirski 	 * - Fully flush all mms whose tlb_gens have been updated.  .mm
214b0579adeSAndy Lutomirski 	 *   will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen
215b0579adeSAndy Lutomirski 	 *   will be zero.
216b0579adeSAndy Lutomirski 	 */
217a2055abeSAndy Lutomirski 	struct mm_struct	*mm;
218a2055abeSAndy Lutomirski 	unsigned long		start;
219a2055abeSAndy Lutomirski 	unsigned long		end;
220b0579adeSAndy Lutomirski 	u64			new_tlb_gen;
2214c1ba392SNadav Amit 	unsigned int		initiating_cpu;
2224c1ba392SNadav Amit 	u8			stride_shift;
2234c1ba392SNadav Amit 	u8			freed_tables;
224a2055abeSAndy Lutomirski };
225a2055abeSAndy Lutomirski 
226bfe3d8f6SThomas Gleixner void flush_tlb_local(void);
227bfe3d8f6SThomas Gleixner void flush_tlb_one_user(unsigned long addr);
228bfe3d8f6SThomas Gleixner void flush_tlb_one_kernel(unsigned long addr);
2294ce94eabSNadav Amit void flush_tlb_multi(const struct cpumask *cpumask,
230bfe3d8f6SThomas Gleixner 		      const struct flush_tlb_info *info);
231bfe3d8f6SThomas Gleixner 
232bfe3d8f6SThomas Gleixner #ifdef CONFIG_PARAVIRT
233bfe3d8f6SThomas Gleixner #include <asm/paravirt.h>
234bfe3d8f6SThomas Gleixner #endif
235bfe3d8f6SThomas Gleixner 
236016c4d92SRik van Riel #define flush_tlb_mm(mm)						\
237016c4d92SRik van Riel 		flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true)
238611ae8e3SAlex Shi 
239611ae8e3SAlex Shi #define flush_tlb_range(vma, start, end)				\
240a31acd3eSPeter Zijlstra 	flush_tlb_mm_range((vma)->vm_mm, start, end,			\
241a31acd3eSPeter Zijlstra 			   ((vma)->vm_flags & VM_HUGETLB)		\
242a31acd3eSPeter Zijlstra 				? huge_page_shift(hstate_vma(vma))	\
243016c4d92SRik van Riel 				: PAGE_SHIFT, false)
244611ae8e3SAlex Shi 
245bb898558SAl Viro extern void flush_tlb_all(void);
246611ae8e3SAlex Shi extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
247016c4d92SRik van Riel 				unsigned long end, unsigned int stride_shift,
248016c4d92SRik van Riel 				bool freed_tables);
249effee4b9SAlex Shi extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
250bb898558SAl Viro 
251ca6c99c0SAndy Lutomirski static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
252ca6c99c0SAndy Lutomirski {
253016c4d92SRik van Riel 	flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false);
254ca6c99c0SAndy Lutomirski }
255ca6c99c0SAndy Lutomirski 
25665c8d30eSAnshuman Khandual static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
25765c8d30eSAnshuman Khandual {
25865c8d30eSAnshuman Khandual 	bool should_defer = false;
25965c8d30eSAnshuman Khandual 
26065c8d30eSAnshuman Khandual 	/* If remote CPUs need to be flushed then defer batch the flush */
26165c8d30eSAnshuman Khandual 	if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
26265c8d30eSAnshuman Khandual 		should_defer = true;
26365c8d30eSAnshuman Khandual 	put_cpu();
26465c8d30eSAnshuman Khandual 
26565c8d30eSAnshuman Khandual 	return should_defer;
26665c8d30eSAnshuman Khandual }
26765c8d30eSAnshuman Khandual 
2680a126abdSPeter Zijlstra static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
2690a126abdSPeter Zijlstra {
2700a126abdSPeter Zijlstra 	/*
2710a126abdSPeter Zijlstra 	 * Bump the generation count.  This also serves as a full barrier
2720a126abdSPeter Zijlstra 	 * that synchronizes with switch_mm(): callers are required to order
2730a126abdSPeter Zijlstra 	 * their read of mm_cpumask after their writes to the paging
2740a126abdSPeter Zijlstra 	 * structures.
2750a126abdSPeter Zijlstra 	 */
2760a126abdSPeter Zijlstra 	return atomic64_inc_return(&mm->context.tlb_gen);
2770a126abdSPeter Zijlstra }
2780a126abdSPeter Zijlstra 
279*f73419bbSBarry Song static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
280*f73419bbSBarry Song 					     struct mm_struct *mm,
281*f73419bbSBarry Song 					     unsigned long uaddr)
282e73ad5ffSAndy Lutomirski {
283f39681edSAndy Lutomirski 	inc_mm_tlb_gen(mm);
284e73ad5ffSAndy Lutomirski 	cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
285e73ad5ffSAndy Lutomirski }
286e73ad5ffSAndy Lutomirski 
287e73ad5ffSAndy Lutomirski extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
288e73ad5ffSAndy Lutomirski 
289c9fe6656SNadav Amit static inline bool pte_flags_need_flush(unsigned long oldflags,
290c9fe6656SNadav Amit 					unsigned long newflags,
291c9fe6656SNadav Amit 					bool ignore_access)
292c9fe6656SNadav Amit {
293c9fe6656SNadav Amit 	/*
294c9fe6656SNadav Amit 	 * Flags that require a flush when cleared but not when they are set.
295c9fe6656SNadav Amit 	 * Only include flags that would not trigger spurious page-faults.
296c9fe6656SNadav Amit 	 * Non-present entries are not cached. Hardware would set the
297c9fe6656SNadav Amit 	 * dirty/access bit if needed without a fault.
298c9fe6656SNadav Amit 	 */
299c9fe6656SNadav Amit 	const pteval_t flush_on_clear = _PAGE_DIRTY | _PAGE_PRESENT |
300c9fe6656SNadav Amit 					_PAGE_ACCESSED;
301c9fe6656SNadav Amit 	const pteval_t software_flags = _PAGE_SOFTW1 | _PAGE_SOFTW2 |
302c9fe6656SNadav Amit 					_PAGE_SOFTW3 | _PAGE_SOFTW4;
303c9fe6656SNadav Amit 	const pteval_t flush_on_change = _PAGE_RW | _PAGE_USER | _PAGE_PWT |
304c9fe6656SNadav Amit 			  _PAGE_PCD | _PAGE_PSE | _PAGE_GLOBAL | _PAGE_PAT |
305c9fe6656SNadav Amit 			  _PAGE_PAT_LARGE | _PAGE_PKEY_BIT0 | _PAGE_PKEY_BIT1 |
306c9fe6656SNadav Amit 			  _PAGE_PKEY_BIT2 | _PAGE_PKEY_BIT3 | _PAGE_NX;
307c9fe6656SNadav Amit 	unsigned long diff = oldflags ^ newflags;
308c9fe6656SNadav Amit 
309c9fe6656SNadav Amit 	BUILD_BUG_ON(flush_on_clear & software_flags);
310c9fe6656SNadav Amit 	BUILD_BUG_ON(flush_on_clear & flush_on_change);
311c9fe6656SNadav Amit 	BUILD_BUG_ON(flush_on_change & software_flags);
312c9fe6656SNadav Amit 
313c9fe6656SNadav Amit 	/* Ignore software flags */
314c9fe6656SNadav Amit 	diff &= ~software_flags;
315c9fe6656SNadav Amit 
316c9fe6656SNadav Amit 	if (ignore_access)
317c9fe6656SNadav Amit 		diff &= ~_PAGE_ACCESSED;
318c9fe6656SNadav Amit 
319c9fe6656SNadav Amit 	/*
320c9fe6656SNadav Amit 	 * Did any of the 'flush_on_clear' flags was clleared set from between
321c9fe6656SNadav Amit 	 * 'oldflags' and 'newflags'?
322c9fe6656SNadav Amit 	 */
323c9fe6656SNadav Amit 	if (diff & oldflags & flush_on_clear)
324c9fe6656SNadav Amit 		return true;
325c9fe6656SNadav Amit 
326c9fe6656SNadav Amit 	/* Flush on modified flags. */
327c9fe6656SNadav Amit 	if (diff & flush_on_change)
328c9fe6656SNadav Amit 		return true;
329c9fe6656SNadav Amit 
330c9fe6656SNadav Amit 	/* Ensure there are no flags that were left behind */
331c9fe6656SNadav Amit 	if (IS_ENABLED(CONFIG_DEBUG_VM) &&
332c9fe6656SNadav Amit 	    (diff & ~(flush_on_clear | software_flags | flush_on_change))) {
333c9fe6656SNadav Amit 		VM_WARN_ON_ONCE(1);
334c9fe6656SNadav Amit 		return true;
335c9fe6656SNadav Amit 	}
336c9fe6656SNadav Amit 
337c9fe6656SNadav Amit 	return false;
338c9fe6656SNadav Amit }
339c9fe6656SNadav Amit 
340c9fe6656SNadav Amit /*
341c9fe6656SNadav Amit  * pte_needs_flush() checks whether permissions were demoted and require a
342c9fe6656SNadav Amit  * flush. It should only be used for userspace PTEs.
343c9fe6656SNadav Amit  */
344c9fe6656SNadav Amit static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
345c9fe6656SNadav Amit {
346c9fe6656SNadav Amit 	/* !PRESENT -> * ; no need for flush */
347c9fe6656SNadav Amit 	if (!(pte_flags(oldpte) & _PAGE_PRESENT))
348c9fe6656SNadav Amit 		return false;
349c9fe6656SNadav Amit 
350c9fe6656SNadav Amit 	/* PFN changed ; needs flush */
351c9fe6656SNadav Amit 	if (pte_pfn(oldpte) != pte_pfn(newpte))
352c9fe6656SNadav Amit 		return true;
353c9fe6656SNadav Amit 
354c9fe6656SNadav Amit 	/*
355c9fe6656SNadav Amit 	 * check PTE flags; ignore access-bit; see comment in
356c9fe6656SNadav Amit 	 * ptep_clear_flush_young().
357c9fe6656SNadav Amit 	 */
358c9fe6656SNadav Amit 	return pte_flags_need_flush(pte_flags(oldpte), pte_flags(newpte),
359c9fe6656SNadav Amit 				    true);
360c9fe6656SNadav Amit }
361c9fe6656SNadav Amit #define pte_needs_flush pte_needs_flush
362c9fe6656SNadav Amit 
363c9fe6656SNadav Amit /*
364c9fe6656SNadav Amit  * huge_pmd_needs_flush() checks whether permissions were demoted and require a
365c9fe6656SNadav Amit  * flush. It should only be used for userspace huge PMDs.
366c9fe6656SNadav Amit  */
367c9fe6656SNadav Amit static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
368c9fe6656SNadav Amit {
369c9fe6656SNadav Amit 	/* !PRESENT -> * ; no need for flush */
370c9fe6656SNadav Amit 	if (!(pmd_flags(oldpmd) & _PAGE_PRESENT))
371c9fe6656SNadav Amit 		return false;
372c9fe6656SNadav Amit 
373c9fe6656SNadav Amit 	/* PFN changed ; needs flush */
374c9fe6656SNadav Amit 	if (pmd_pfn(oldpmd) != pmd_pfn(newpmd))
375c9fe6656SNadav Amit 		return true;
376c9fe6656SNadav Amit 
377c9fe6656SNadav Amit 	/*
378c9fe6656SNadav Amit 	 * check PMD flags; do not ignore access-bit; see
379c9fe6656SNadav Amit 	 * pmdp_clear_flush_young().
380c9fe6656SNadav Amit 	 */
381c9fe6656SNadav Amit 	return pte_flags_need_flush(pmd_flags(oldpmd), pmd_flags(newpmd),
382c9fe6656SNadav Amit 				    false);
383c9fe6656SNadav Amit }
384c9fe6656SNadav Amit #define huge_pmd_needs_flush huge_pmd_needs_flush
385c9fe6656SNadav Amit 
38682721d8bSKirill A. Shutemov #ifdef CONFIG_ADDRESS_MASKING
38782721d8bSKirill A. Shutemov static inline  u64 tlbstate_lam_cr3_mask(void)
38882721d8bSKirill A. Shutemov {
38982721d8bSKirill A. Shutemov 	u64 lam = this_cpu_read(cpu_tlbstate.lam);
39082721d8bSKirill A. Shutemov 
39182721d8bSKirill A. Shutemov 	return lam << X86_CR3_LAM_U57_BIT;
39282721d8bSKirill A. Shutemov }
39382721d8bSKirill A. Shutemov 
39482721d8bSKirill A. Shutemov static inline void set_tlbstate_lam_mode(struct mm_struct *mm)
39582721d8bSKirill A. Shutemov {
39682721d8bSKirill A. Shutemov 	this_cpu_write(cpu_tlbstate.lam,
39782721d8bSKirill A. Shutemov 		       mm->context.lam_cr3_mask >> X86_CR3_LAM_U57_BIT);
39874c228d2SKirill A. Shutemov 	this_cpu_write(tlbstate_untag_mask, mm->context.untag_mask);
39982721d8bSKirill A. Shutemov }
40082721d8bSKirill A. Shutemov 
40182721d8bSKirill A. Shutemov #else
40282721d8bSKirill A. Shutemov 
40382721d8bSKirill A. Shutemov static inline u64 tlbstate_lam_cr3_mask(void)
40482721d8bSKirill A. Shutemov {
40582721d8bSKirill A. Shutemov 	return 0;
40682721d8bSKirill A. Shutemov }
40782721d8bSKirill A. Shutemov 
40882721d8bSKirill A. Shutemov static inline void set_tlbstate_lam_mode(struct mm_struct *mm)
40982721d8bSKirill A. Shutemov {
41082721d8bSKirill A. Shutemov }
41182721d8bSKirill A. Shutemov #endif
412bfe3d8f6SThomas Gleixner #endif /* !MODULE */
413bfe3d8f6SThomas Gleixner 
414f154f290SJoerg Roedel static inline void __native_tlb_flush_global(unsigned long cr4)
415f154f290SJoerg Roedel {
416f154f290SJoerg Roedel 	native_write_cr4(cr4 ^ X86_CR4_PGE);
417f154f290SJoerg Roedel 	native_write_cr4(cr4);
418f154f290SJoerg Roedel }
4191965aae3SH. Peter Anvin #endif /* _ASM_X86_TLBFLUSH_H */
420