xref: /openbmc/linux/arch/x86/include/asm/tlb.h (revision 9144f784f852f9a125cabe9927b986d909bfa439)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21965aae3SH. Peter Anvin #ifndef _ASM_X86_TLB_H
31965aae3SH. Peter Anvin #define _ASM_X86_TLB_H
4bb898558SAl Viro 
55f307be1SPeter Zijlstra #define tlb_flush tlb_flush
6a31acd3eSPeter Zijlstra static inline void tlb_flush(struct mmu_gather *tlb);
7bb898558SAl Viro 
8bb898558SAl Viro #include <asm-generic/tlb.h>
9bb898558SAl Viro 
tlb_flush(struct mmu_gather * tlb)10a31acd3eSPeter Zijlstra static inline void tlb_flush(struct mmu_gather *tlb)
11a31acd3eSPeter Zijlstra {
12a31acd3eSPeter Zijlstra 	unsigned long start = 0UL, end = TLB_FLUSH_ALL;
13a31acd3eSPeter Zijlstra 	unsigned int stride_shift = tlb_get_unmap_shift(tlb);
14a31acd3eSPeter Zijlstra 
15a31acd3eSPeter Zijlstra 	if (!tlb->fullmm && !tlb->need_flush_all) {
16a31acd3eSPeter Zijlstra 		start = tlb->start;
17a31acd3eSPeter Zijlstra 		end = tlb->end;
18a31acd3eSPeter Zijlstra 	}
19a31acd3eSPeter Zijlstra 
20016c4d92SRik van Riel 	flush_tlb_mm_range(tlb->mm, start, end, stride_shift, tlb->freed_tables);
21a31acd3eSPeter Zijlstra }
22a31acd3eSPeter Zijlstra 
239e52fc2bSVitaly Kuznetsov /*
249e52fc2bSVitaly Kuznetsov  * While x86 architecture in general requires an IPI to perform TLB
259e52fc2bSVitaly Kuznetsov  * shootdown, enablement code for several hypervisors overrides
269e52fc2bSVitaly Kuznetsov  * .flush_tlb_others hook in pv_mmu_ops and implements it by issuing
279e52fc2bSVitaly Kuznetsov  * a hypercall. To keep software pagetable walkers safe in this case we
28ff2e6d72SPeter Zijlstra  * switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the comment
29ff2e6d72SPeter Zijlstra  * below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h
309e52fc2bSVitaly Kuznetsov  * for more details.
319e52fc2bSVitaly Kuznetsov  */
__tlb_remove_table(void * table)329e52fc2bSVitaly Kuznetsov static inline void __tlb_remove_table(void *table)
339e52fc2bSVitaly Kuznetsov {
349e52fc2bSVitaly Kuznetsov 	free_page_and_swap_cache(table);
359e52fc2bSVitaly Kuznetsov }
369e52fc2bSVitaly Kuznetsov 
invlpg(unsigned long addr)37*8322a66fSBorislav Petkov (AMD) static inline void invlpg(unsigned long addr)
38*8322a66fSBorislav Petkov (AMD) {
39*8322a66fSBorislav Petkov (AMD) 	asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
40*8322a66fSBorislav Petkov (AMD) }
411965aae3SH. Peter Anvin #endif /* _ASM_X86_TLB_H */
42