xref: /openbmc/linux/arch/x86/include/asm/tlbflush.h (revision 1af5a810)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21965aae3SH. Peter Anvin #ifndef _ASM_X86_TLBFLUSH_H
31965aae3SH. Peter Anvin #define _ASM_X86_TLBFLUSH_H
4bb898558SAl Viro 
582721d8bSKirill A. Shutemov #include <linux/mm_types.h>
66bbd42e2SAlistair Popple #include <linux/mmu_notifier.h>
7bb898558SAl Viro #include <linux/sched.h>
8bb898558SAl Viro 
9bb898558SAl Viro #include <asm/processor.h>
10cd4d09ecSBorislav Petkov #include <asm/cpufeature.h>
11f05e798aSDavid Howells #include <asm/special_insns.h>
12ce4a4e56SAndy Lutomirski #include <asm/smp.h>
131a3b0caeSPeter Zijlstra #include <asm/invpcid.h>
146fd166aaSPeter Zijlstra #include <asm/pti.h>
156fd166aaSPeter Zijlstra #include <asm/processor-flags.h>
1682721d8bSKirill A. Shutemov #include <asm/pgtable.h>
17060a402aSAndy Lutomirski 
18013fdeb0SBorislav Petkov (AMD) DECLARE_PER_CPU(u64, tlbstate_untag_mask);
19013fdeb0SBorislav Petkov (AMD) 
204b04e6c2SThomas Gleixner void __flush_tlb_all(void);
212faf153bSThomas Gleixner 
22bfe3d8f6SThomas Gleixner #define TLB_FLUSH_ALL	-1UL
238f1d56f6SNadav Amit #define TLB_GENERATION_INVALID	0
24bb898558SAl Viro 
25bfe3d8f6SThomas Gleixner void cr4_update_irqsoff(unsigned long set, unsigned long clear);
26bfe3d8f6SThomas Gleixner unsigned long cr4_read_shadow(void);
27bfe3d8f6SThomas Gleixner 
28bfe3d8f6SThomas Gleixner /* Set in this cpu's CR4. */
cr4_set_bits_irqsoff(unsigned long mask)29bfe3d8f6SThomas Gleixner static inline void cr4_set_bits_irqsoff(unsigned long mask)
30bfe3d8f6SThomas Gleixner {
31bfe3d8f6SThomas Gleixner 	cr4_update_irqsoff(mask, 0);
32bfe3d8f6SThomas Gleixner }
33bfe3d8f6SThomas Gleixner 
34bfe3d8f6SThomas Gleixner /* Clear in this cpu's CR4. */
cr4_clear_bits_irqsoff(unsigned long mask)35bfe3d8f6SThomas Gleixner static inline void cr4_clear_bits_irqsoff(unsigned long mask)
36bfe3d8f6SThomas Gleixner {
37bfe3d8f6SThomas Gleixner 	cr4_update_irqsoff(0, mask);
38bfe3d8f6SThomas Gleixner }
39bfe3d8f6SThomas Gleixner 
40bfe3d8f6SThomas Gleixner /* Set in this cpu's CR4. */
cr4_set_bits(unsigned long mask)41bfe3d8f6SThomas Gleixner static inline void cr4_set_bits(unsigned long mask)
42bfe3d8f6SThomas Gleixner {
43bfe3d8f6SThomas Gleixner 	unsigned long flags;
44bfe3d8f6SThomas Gleixner 
45bfe3d8f6SThomas Gleixner 	local_irq_save(flags);
46bfe3d8f6SThomas Gleixner 	cr4_set_bits_irqsoff(mask);
47bfe3d8f6SThomas Gleixner 	local_irq_restore(flags);
48bfe3d8f6SThomas Gleixner }
49bfe3d8f6SThomas Gleixner 
50bfe3d8f6SThomas Gleixner /* Clear in this cpu's CR4. */
cr4_clear_bits(unsigned long mask)51bfe3d8f6SThomas Gleixner static inline void cr4_clear_bits(unsigned long mask)
52bfe3d8f6SThomas Gleixner {
53bfe3d8f6SThomas Gleixner 	unsigned long flags;
54bfe3d8f6SThomas Gleixner 
55bfe3d8f6SThomas Gleixner 	local_irq_save(flags);
56bfe3d8f6SThomas Gleixner 	cr4_clear_bits_irqsoff(mask);
57bfe3d8f6SThomas Gleixner 	local_irq_restore(flags);
58bfe3d8f6SThomas Gleixner }
59bfe3d8f6SThomas Gleixner 
60bfe3d8f6SThomas Gleixner #ifndef MODULE
616c9b7d79SThomas Gleixner /*
626c9b7d79SThomas Gleixner  * 6 because 6 should be plenty and struct tlb_state will fit in two cache
636c9b7d79SThomas Gleixner  * lines.
646c9b7d79SThomas Gleixner  */
656c9b7d79SThomas Gleixner #define TLB_NR_DYN_ASIDS	6
666c9b7d79SThomas Gleixner 
67b0579adeSAndy Lutomirski struct tlb_context {
68b0579adeSAndy Lutomirski 	u64 ctx_id;
69b0579adeSAndy Lutomirski 	u64 tlb_gen;
70b0579adeSAndy Lutomirski };
71b0579adeSAndy Lutomirski 
721e02ce4cSAndy Lutomirski struct tlb_state {
733d28ebceSAndy Lutomirski 	/*
743d28ebceSAndy Lutomirski 	 * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
753d28ebceSAndy Lutomirski 	 * are on.  This means that it may not match current->active_mm,
763d28ebceSAndy Lutomirski 	 * which will contain the previous user mm when we're in lazy TLB
773d28ebceSAndy Lutomirski 	 * mode even if we've already switched back to swapper_pg_dir.
784012e77aSAndy Lutomirski 	 *
794012e77aSAndy Lutomirski 	 * During switch_mm_irqs_off(), loaded_mm will be set to
804012e77aSAndy Lutomirski 	 * LOADED_MM_SWITCHING during the brief interrupts-off window
814012e77aSAndy Lutomirski 	 * when CR3 and loaded_mm would otherwise be inconsistent.  This
824012e77aSAndy Lutomirski 	 * is for nmi_uaccess_okay()'s benefit.
833d28ebceSAndy Lutomirski 	 */
843d28ebceSAndy Lutomirski 	struct mm_struct *loaded_mm;
854012e77aSAndy Lutomirski 
86a72a1932SJann Horn #define LOADED_MM_SWITCHING ((struct mm_struct *)1UL)
874012e77aSAndy Lutomirski 
884c71a2b6SThomas Gleixner 	/* Last user mm for optimizing IBPB */
894c71a2b6SThomas Gleixner 	union {
904c71a2b6SThomas Gleixner 		struct mm_struct	*last_user_mm;
91371b09c6SBalbir Singh 		unsigned long		last_user_mm_spec;
924c71a2b6SThomas Gleixner 	};
934c71a2b6SThomas Gleixner 
9410af6235SAndy Lutomirski 	u16 loaded_mm_asid;
9510af6235SAndy Lutomirski 	u16 next_asid;
961e02ce4cSAndy Lutomirski 
971e02ce4cSAndy Lutomirski 	/*
982ea907c4SDave Hansen 	 * If set we changed the page tables in such a way that we
992ea907c4SDave Hansen 	 * needed an invalidation of all contexts (aka. PCIDs / ASIDs).
1002ea907c4SDave Hansen 	 * This tells us to go invalidate all the non-loaded ctxs[]
1012ea907c4SDave Hansen 	 * on the next context switch.
1022ea907c4SDave Hansen 	 *
1032ea907c4SDave Hansen 	 * The current ctx was kept up-to-date as it ran and does not
1042ea907c4SDave Hansen 	 * need to be invalidated.
1052ea907c4SDave Hansen 	 */
1062ea907c4SDave Hansen 	bool invalidate_other;
1072ea907c4SDave Hansen 
10882721d8bSKirill A. Shutemov #ifdef CONFIG_ADDRESS_MASKING
10982721d8bSKirill A. Shutemov 	/*
11082721d8bSKirill A. Shutemov 	 * Active LAM mode.
11182721d8bSKirill A. Shutemov 	 *
11282721d8bSKirill A. Shutemov 	 * X86_CR3_LAM_U57/U48 shifted right by X86_CR3_LAM_U57_BIT or 0 if LAM
11382721d8bSKirill A. Shutemov 	 * disabled.
11482721d8bSKirill A. Shutemov 	 */
11582721d8bSKirill A. Shutemov 	u8 lam;
11682721d8bSKirill A. Shutemov #endif
11782721d8bSKirill A. Shutemov 
1182ea907c4SDave Hansen 	/*
1196fd166aaSPeter Zijlstra 	 * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate
1206fd166aaSPeter Zijlstra 	 * the corresponding user PCID needs a flush next time we
1216fd166aaSPeter Zijlstra 	 * switch to it; see SWITCH_TO_USER_CR3.
1226fd166aaSPeter Zijlstra 	 */
1236fd166aaSPeter Zijlstra 	unsigned short user_pcid_flush_mask;
1246fd166aaSPeter Zijlstra 
1256fd166aaSPeter Zijlstra 	/*
1261e02ce4cSAndy Lutomirski 	 * Access to this CR4 shadow and to H/W CR4 is protected by
1271e02ce4cSAndy Lutomirski 	 * disabling interrupts when modifying either one.
1281e02ce4cSAndy Lutomirski 	 */
1291e02ce4cSAndy Lutomirski 	unsigned long cr4;
130b0579adeSAndy Lutomirski 
131b0579adeSAndy Lutomirski 	/*
132b0579adeSAndy Lutomirski 	 * This is a list of all contexts that might exist in the TLB.
13310af6235SAndy Lutomirski 	 * There is one per ASID that we use, and the ASID (what the
13410af6235SAndy Lutomirski 	 * CPU calls PCID) is the index into ctxts.
135b0579adeSAndy Lutomirski 	 *
136b0579adeSAndy Lutomirski 	 * For each context, ctx_id indicates which mm the TLB's user
137b0579adeSAndy Lutomirski 	 * entries came from.  As an invariant, the TLB will never
138b0579adeSAndy Lutomirski 	 * contain entries that are out-of-date as when that mm reached
139b0579adeSAndy Lutomirski 	 * the tlb_gen in the list.
140b0579adeSAndy Lutomirski 	 *
141b0579adeSAndy Lutomirski 	 * To be clear, this means that it's legal for the TLB code to
142b0579adeSAndy Lutomirski 	 * flush the TLB without updating tlb_gen.  This can happen
143b0579adeSAndy Lutomirski 	 * (for now, at least) due to paravirt remote flushes.
14410af6235SAndy Lutomirski 	 *
14510af6235SAndy Lutomirski 	 * NB: context 0 is a bit special, since it's also used by
14610af6235SAndy Lutomirski 	 * various bits of init code.  This is fine -- code that
14710af6235SAndy Lutomirski 	 * isn't aware of PCID will end up harmlessly flushing
14810af6235SAndy Lutomirski 	 * context 0.
149b0579adeSAndy Lutomirski 	 */
15010af6235SAndy Lutomirski 	struct tlb_context ctxs[TLB_NR_DYN_ASIDS];
1511e02ce4cSAndy Lutomirski };
1522f4305b1SNadav Amit DECLARE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate);
1532f4305b1SNadav Amit 
1542f4305b1SNadav Amit struct tlb_state_shared {
1552f4305b1SNadav Amit 	/*
1562f4305b1SNadav Amit 	 * We can be in one of several states:
1572f4305b1SNadav Amit 	 *
1582f4305b1SNadav Amit 	 *  - Actively using an mm.  Our CPU's bit will be set in
1592f4305b1SNadav Amit 	 *    mm_cpumask(loaded_mm) and is_lazy == false;
1602f4305b1SNadav Amit 	 *
1612f4305b1SNadav Amit 	 *  - Not using a real mm.  loaded_mm == &init_mm.  Our CPU's bit
1622f4305b1SNadav Amit 	 *    will not be set in mm_cpumask(&init_mm) and is_lazy == false.
1632f4305b1SNadav Amit 	 *
1642f4305b1SNadav Amit 	 *  - Lazily using a real mm.  loaded_mm != &init_mm, our bit
1652f4305b1SNadav Amit 	 *    is set in mm_cpumask(loaded_mm), but is_lazy == true.
1662f4305b1SNadav Amit 	 *    We're heuristically guessing that the CR3 load we
1672f4305b1SNadav Amit 	 *    skipped more than makes up for the overhead added by
1682f4305b1SNadav Amit 	 *    lazy mode.
1692f4305b1SNadav Amit 	 */
1702f4305b1SNadav Amit 	bool is_lazy;
1712f4305b1SNadav Amit };
1722f4305b1SNadav Amit DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
1731e02ce4cSAndy Lutomirski 
174af5c40c6SThomas Gleixner bool nmi_uaccess_okay(void);
1755932c9fdSNadav Amit #define nmi_uaccess_okay nmi_uaccess_okay
1765932c9fdSNadav Amit 
1771e02ce4cSAndy Lutomirski /* Initialize cr4 shadow for this CPU. */
cr4_init_shadow(void)1781e02ce4cSAndy Lutomirski static inline void cr4_init_shadow(void)
1791e02ce4cSAndy Lutomirski {
1801ef55be1SAndy Lutomirski 	this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
1811e02ce4cSAndy Lutomirski }
1821e02ce4cSAndy Lutomirski 
183375074ccSAndy Lutomirski extern unsigned long mmu_cr4_features;
184375074ccSAndy Lutomirski extern u32 *trampoline_cr4_features;
185375074ccSAndy Lutomirski 
18672c0098dSAndy Lutomirski extern void initialize_tlbstate_and_flush(void);
18772c0098dSAndy Lutomirski 
188bb898558SAl Viro /*
189bb898558SAl Viro  * TLB flushing:
190bb898558SAl Viro  *
191bb898558SAl Viro  *  - flush_tlb_all() flushes all processes TLBs
192bb898558SAl Viro  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
193bb898558SAl Viro  *  - flush_tlb_page(vma, vmaddr) flushes one page
194bb898558SAl Viro  *  - flush_tlb_range(vma, start, end) flushes a range of pages
195bb898558SAl Viro  *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
1964ce94eabSNadav Amit  *  - flush_tlb_multi(cpumask, info) flushes TLBs on multiple cpus
197bb898558SAl Viro  *
198bb898558SAl Viro  * ..but the i386 has somewhat limited tlb flushing capabilities,
199bb898558SAl Viro  * and page-granular flushes are available only on i486 and up.
200bb898558SAl Viro  */
201a2055abeSAndy Lutomirski struct flush_tlb_info {
202b0579adeSAndy Lutomirski 	/*
203b0579adeSAndy Lutomirski 	 * We support several kinds of flushes.
204b0579adeSAndy Lutomirski 	 *
205b0579adeSAndy Lutomirski 	 * - Fully flush a single mm.  .mm will be set, .end will be
206b0579adeSAndy Lutomirski 	 *   TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to
207b0579adeSAndy Lutomirski 	 *   which the IPI sender is trying to catch us up.
208b0579adeSAndy Lutomirski 	 *
209b0579adeSAndy Lutomirski 	 * - Partially flush a single mm.  .mm will be set, .start and
210b0579adeSAndy Lutomirski 	 *   .end will indicate the range, and .new_tlb_gen will be set
211b0579adeSAndy Lutomirski 	 *   such that the changes between generation .new_tlb_gen-1 and
212b0579adeSAndy Lutomirski 	 *   .new_tlb_gen are entirely contained in the indicated range.
213b0579adeSAndy Lutomirski 	 *
214b0579adeSAndy Lutomirski 	 * - Fully flush all mms whose tlb_gens have been updated.  .mm
215b0579adeSAndy Lutomirski 	 *   will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen
216b0579adeSAndy Lutomirski 	 *   will be zero.
217b0579adeSAndy Lutomirski 	 */
218a2055abeSAndy Lutomirski 	struct mm_struct	*mm;
219a2055abeSAndy Lutomirski 	unsigned long		start;
220a2055abeSAndy Lutomirski 	unsigned long		end;
221b0579adeSAndy Lutomirski 	u64			new_tlb_gen;
2224c1ba392SNadav Amit 	unsigned int		initiating_cpu;
2234c1ba392SNadav Amit 	u8			stride_shift;
2244c1ba392SNadav Amit 	u8			freed_tables;
225a2055abeSAndy Lutomirski };
226a2055abeSAndy Lutomirski 
227bfe3d8f6SThomas Gleixner void flush_tlb_local(void);
228bfe3d8f6SThomas Gleixner void flush_tlb_one_user(unsigned long addr);
229bfe3d8f6SThomas Gleixner void flush_tlb_one_kernel(unsigned long addr);
2304ce94eabSNadav Amit void flush_tlb_multi(const struct cpumask *cpumask,
231bfe3d8f6SThomas Gleixner 		      const struct flush_tlb_info *info);
232bfe3d8f6SThomas Gleixner 
233bfe3d8f6SThomas Gleixner #ifdef CONFIG_PARAVIRT
234bfe3d8f6SThomas Gleixner #include <asm/paravirt.h>
235bfe3d8f6SThomas Gleixner #endif
236bfe3d8f6SThomas Gleixner 
237016c4d92SRik van Riel #define flush_tlb_mm(mm)						\
238016c4d92SRik van Riel 		flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true)
239611ae8e3SAlex Shi 
240611ae8e3SAlex Shi #define flush_tlb_range(vma, start, end)				\
241a31acd3eSPeter Zijlstra 	flush_tlb_mm_range((vma)->vm_mm, start, end,			\
242a31acd3eSPeter Zijlstra 			   ((vma)->vm_flags & VM_HUGETLB)		\
243a31acd3eSPeter Zijlstra 				? huge_page_shift(hstate_vma(vma))	\
244016c4d92SRik van Riel 				: PAGE_SHIFT, false)
245611ae8e3SAlex Shi 
246bb898558SAl Viro extern void flush_tlb_all(void);
247611ae8e3SAlex Shi extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
248016c4d92SRik van Riel 				unsigned long end, unsigned int stride_shift,
249016c4d92SRik van Riel 				bool freed_tables);
250effee4b9SAlex Shi extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
251bb898558SAl Viro 
flush_tlb_page(struct vm_area_struct * vma,unsigned long a)252ca6c99c0SAndy Lutomirski static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
253ca6c99c0SAndy Lutomirski {
254016c4d92SRik van Riel 	flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false);
255ca6c99c0SAndy Lutomirski }
256ca6c99c0SAndy Lutomirski 
arch_tlbbatch_should_defer(struct mm_struct * mm)25765c8d30eSAnshuman Khandual static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
25865c8d30eSAnshuman Khandual {
25965c8d30eSAnshuman Khandual 	bool should_defer = false;
26065c8d30eSAnshuman Khandual 
26165c8d30eSAnshuman Khandual 	/* If remote CPUs need to be flushed then defer batch the flush */
26265c8d30eSAnshuman Khandual 	if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
26365c8d30eSAnshuman Khandual 		should_defer = true;
26465c8d30eSAnshuman Khandual 	put_cpu();
26565c8d30eSAnshuman Khandual 
26665c8d30eSAnshuman Khandual 	return should_defer;
26765c8d30eSAnshuman Khandual }
26865c8d30eSAnshuman Khandual 
inc_mm_tlb_gen(struct mm_struct * mm)2690a126abdSPeter Zijlstra static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
2700a126abdSPeter Zijlstra {
2710a126abdSPeter Zijlstra 	/*
2720a126abdSPeter Zijlstra 	 * Bump the generation count.  This also serves as a full barrier
2730a126abdSPeter Zijlstra 	 * that synchronizes with switch_mm(): callers are required to order
2740a126abdSPeter Zijlstra 	 * their read of mm_cpumask after their writes to the paging
2750a126abdSPeter Zijlstra 	 * structures.
2760a126abdSPeter Zijlstra 	 */
2770a126abdSPeter Zijlstra 	return atomic64_inc_return(&mm->context.tlb_gen);
2780a126abdSPeter Zijlstra }
2790a126abdSPeter Zijlstra 
arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch * batch,struct mm_struct * mm,unsigned long uaddr)280f73419bbSBarry Song static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
281f73419bbSBarry Song 					     struct mm_struct *mm,
282f73419bbSBarry Song 					     unsigned long uaddr)
283e73ad5ffSAndy Lutomirski {
284f39681edSAndy Lutomirski 	inc_mm_tlb_gen(mm);
285e73ad5ffSAndy Lutomirski 	cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
286*1af5a810SAlistair Popple 	mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
287e73ad5ffSAndy Lutomirski }
288e73ad5ffSAndy Lutomirski 
arch_flush_tlb_batched_pending(struct mm_struct * mm)289db6c1f6fSYicong Yang static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
290db6c1f6fSYicong Yang {
291db6c1f6fSYicong Yang 	flush_tlb_mm(mm);
292db6c1f6fSYicong Yang }
293db6c1f6fSYicong Yang 
294e73ad5ffSAndy Lutomirski extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
295e73ad5ffSAndy Lutomirski 
pte_flags_need_flush(unsigned long oldflags,unsigned long newflags,bool ignore_access)296c9fe6656SNadav Amit static inline bool pte_flags_need_flush(unsigned long oldflags,
297c9fe6656SNadav Amit 					unsigned long newflags,
298c9fe6656SNadav Amit 					bool ignore_access)
299c9fe6656SNadav Amit {
300c9fe6656SNadav Amit 	/*
301c9fe6656SNadav Amit 	 * Flags that require a flush when cleared but not when they are set.
302c9fe6656SNadav Amit 	 * Only include flags that would not trigger spurious page-faults.
303c9fe6656SNadav Amit 	 * Non-present entries are not cached. Hardware would set the
304c9fe6656SNadav Amit 	 * dirty/access bit if needed without a fault.
305c9fe6656SNadav Amit 	 */
306c9fe6656SNadav Amit 	const pteval_t flush_on_clear = _PAGE_DIRTY | _PAGE_PRESENT |
307c9fe6656SNadav Amit 					_PAGE_ACCESSED;
308c9fe6656SNadav Amit 	const pteval_t software_flags = _PAGE_SOFTW1 | _PAGE_SOFTW2 |
309c9fe6656SNadav Amit 					_PAGE_SOFTW3 | _PAGE_SOFTW4 |
310c9fe6656SNadav Amit 					_PAGE_SAVED_DIRTY;
311c9fe6656SNadav Amit 	const pteval_t flush_on_change = _PAGE_RW | _PAGE_USER | _PAGE_PWT |
312c9fe6656SNadav Amit 			  _PAGE_PCD | _PAGE_PSE | _PAGE_GLOBAL | _PAGE_PAT |
313c9fe6656SNadav Amit 			  _PAGE_PAT_LARGE | _PAGE_PKEY_BIT0 | _PAGE_PKEY_BIT1 |
314c9fe6656SNadav Amit 			  _PAGE_PKEY_BIT2 | _PAGE_PKEY_BIT3 | _PAGE_NX;
315c9fe6656SNadav Amit 	unsigned long diff = oldflags ^ newflags;
316c9fe6656SNadav Amit 
317c9fe6656SNadav Amit 	BUILD_BUG_ON(flush_on_clear & software_flags);
318c9fe6656SNadav Amit 	BUILD_BUG_ON(flush_on_clear & flush_on_change);
319c9fe6656SNadav Amit 	BUILD_BUG_ON(flush_on_change & software_flags);
320c9fe6656SNadav Amit 
321c9fe6656SNadav Amit 	/* Ignore software flags */
322c9fe6656SNadav Amit 	diff &= ~software_flags;
323c9fe6656SNadav Amit 
324c9fe6656SNadav Amit 	if (ignore_access)
325c9fe6656SNadav Amit 		diff &= ~_PAGE_ACCESSED;
326c9fe6656SNadav Amit 
327c9fe6656SNadav Amit 	/*
328c9fe6656SNadav Amit 	 * Did any of the 'flush_on_clear' flags was clleared set from between
329c9fe6656SNadav Amit 	 * 'oldflags' and 'newflags'?
330c9fe6656SNadav Amit 	 */
331c9fe6656SNadav Amit 	if (diff & oldflags & flush_on_clear)
332c9fe6656SNadav Amit 		return true;
333c9fe6656SNadav Amit 
334c9fe6656SNadav Amit 	/* Flush on modified flags. */
335c9fe6656SNadav Amit 	if (diff & flush_on_change)
336c9fe6656SNadav Amit 		return true;
337c9fe6656SNadav Amit 
338c9fe6656SNadav Amit 	/* Ensure there are no flags that were left behind */
339c9fe6656SNadav Amit 	if (IS_ENABLED(CONFIG_DEBUG_VM) &&
340c9fe6656SNadav Amit 	    (diff & ~(flush_on_clear | software_flags | flush_on_change))) {
341c9fe6656SNadav Amit 		VM_WARN_ON_ONCE(1);
342c9fe6656SNadav Amit 		return true;
343c9fe6656SNadav Amit 	}
344c9fe6656SNadav Amit 
345c9fe6656SNadav Amit 	return false;
346c9fe6656SNadav Amit }
347c9fe6656SNadav Amit 
348c9fe6656SNadav Amit /*
349c9fe6656SNadav Amit  * pte_needs_flush() checks whether permissions were demoted and require a
350c9fe6656SNadav Amit  * flush. It should only be used for userspace PTEs.
351c9fe6656SNadav Amit  */
pte_needs_flush(pte_t oldpte,pte_t newpte)352c9fe6656SNadav Amit static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
353c9fe6656SNadav Amit {
354c9fe6656SNadav Amit 	/* !PRESENT -> * ; no need for flush */
355c9fe6656SNadav Amit 	if (!(pte_flags(oldpte) & _PAGE_PRESENT))
356c9fe6656SNadav Amit 		return false;
357c9fe6656SNadav Amit 
358c9fe6656SNadav Amit 	/* PFN changed ; needs flush */
359c9fe6656SNadav Amit 	if (pte_pfn(oldpte) != pte_pfn(newpte))
360c9fe6656SNadav Amit 		return true;
361c9fe6656SNadav Amit 
362c9fe6656SNadav Amit 	/*
363c9fe6656SNadav Amit 	 * check PTE flags; ignore access-bit; see comment in
364c9fe6656SNadav Amit 	 * ptep_clear_flush_young().
365c9fe6656SNadav Amit 	 */
366c9fe6656SNadav Amit 	return pte_flags_need_flush(pte_flags(oldpte), pte_flags(newpte),
367c9fe6656SNadav Amit 				    true);
368c9fe6656SNadav Amit }
369c9fe6656SNadav Amit #define pte_needs_flush pte_needs_flush
370c9fe6656SNadav Amit 
371c9fe6656SNadav Amit /*
372c9fe6656SNadav Amit  * huge_pmd_needs_flush() checks whether permissions were demoted and require a
373c9fe6656SNadav Amit  * flush. It should only be used for userspace huge PMDs.
374c9fe6656SNadav Amit  */
huge_pmd_needs_flush(pmd_t oldpmd,pmd_t newpmd)375c9fe6656SNadav Amit static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
376c9fe6656SNadav Amit {
377c9fe6656SNadav Amit 	/* !PRESENT -> * ; no need for flush */
378c9fe6656SNadav Amit 	if (!(pmd_flags(oldpmd) & _PAGE_PRESENT))
379c9fe6656SNadav Amit 		return false;
380c9fe6656SNadav Amit 
381c9fe6656SNadav Amit 	/* PFN changed ; needs flush */
382c9fe6656SNadav Amit 	if (pmd_pfn(oldpmd) != pmd_pfn(newpmd))
383c9fe6656SNadav Amit 		return true;
384c9fe6656SNadav Amit 
385c9fe6656SNadav Amit 	/*
386c9fe6656SNadav Amit 	 * check PMD flags; do not ignore access-bit; see
387c9fe6656SNadav Amit 	 * pmdp_clear_flush_young().
388c9fe6656SNadav Amit 	 */
389c9fe6656SNadav Amit 	return pte_flags_need_flush(pmd_flags(oldpmd), pmd_flags(newpmd),
390c9fe6656SNadav Amit 				    false);
391c9fe6656SNadav Amit }
392c9fe6656SNadav Amit #define huge_pmd_needs_flush huge_pmd_needs_flush
39382721d8bSKirill A. Shutemov 
39482721d8bSKirill A. Shutemov #ifdef CONFIG_ADDRESS_MASKING
tlbstate_lam_cr3_mask(void)39582721d8bSKirill A. Shutemov static inline  u64 tlbstate_lam_cr3_mask(void)
39682721d8bSKirill A. Shutemov {
39782721d8bSKirill A. Shutemov 	u64 lam = this_cpu_read(cpu_tlbstate.lam);
39882721d8bSKirill A. Shutemov 
39982721d8bSKirill A. Shutemov 	return lam << X86_CR3_LAM_U57_BIT;
40082721d8bSKirill A. Shutemov }
40182721d8bSKirill A. Shutemov 
set_tlbstate_lam_mode(struct mm_struct * mm)40282721d8bSKirill A. Shutemov static inline void set_tlbstate_lam_mode(struct mm_struct *mm)
40382721d8bSKirill A. Shutemov {
40482721d8bSKirill A. Shutemov 	this_cpu_write(cpu_tlbstate.lam,
40574c228d2SKirill A. Shutemov 		       mm->context.lam_cr3_mask >> X86_CR3_LAM_U57_BIT);
40682721d8bSKirill A. Shutemov 	this_cpu_write(tlbstate_untag_mask, mm->context.untag_mask);
40782721d8bSKirill A. Shutemov }
40882721d8bSKirill A. Shutemov 
40982721d8bSKirill A. Shutemov #else
41082721d8bSKirill A. Shutemov 
tlbstate_lam_cr3_mask(void)41182721d8bSKirill A. Shutemov static inline u64 tlbstate_lam_cr3_mask(void)
41282721d8bSKirill A. Shutemov {
41382721d8bSKirill A. Shutemov 	return 0;
41482721d8bSKirill A. Shutemov }
41582721d8bSKirill A. Shutemov 
set_tlbstate_lam_mode(struct mm_struct * mm)41682721d8bSKirill A. Shutemov static inline void set_tlbstate_lam_mode(struct mm_struct *mm)
41782721d8bSKirill A. Shutemov {
41882721d8bSKirill A. Shutemov }
419bfe3d8f6SThomas Gleixner #endif
420bfe3d8f6SThomas Gleixner #endif /* !MODULE */
421f154f290SJoerg Roedel 
__native_tlb_flush_global(unsigned long cr4)422f154f290SJoerg Roedel static inline void __native_tlb_flush_global(unsigned long cr4)
423f154f290SJoerg Roedel {
424f154f290SJoerg Roedel 	native_write_cr4(cr4 ^ X86_CR4_PGE);
425f154f290SJoerg Roedel 	native_write_cr4(cr4);
4261965aae3SH. Peter Anvin }
427 #endif /* _ASM_X86_TLBFLUSH_H */
428