xref: /openbmc/linux/arch/x86/include/asm/tlbflush.h (revision 086fc8f8)
11965aae3SH. Peter Anvin #ifndef _ASM_X86_TLBFLUSH_H
21965aae3SH. Peter Anvin #define _ASM_X86_TLBFLUSH_H
3bb898558SAl Viro 
4bb898558SAl Viro #include <linux/mm.h>
5bb898558SAl Viro #include <linux/sched.h>
6bb898558SAl Viro 
7bb898558SAl Viro #include <asm/processor.h>
8f05e798aSDavid Howells #include <asm/special_insns.h>
9bb898558SAl Viro 
10bb898558SAl Viro #ifdef CONFIG_PARAVIRT
11bb898558SAl Viro #include <asm/paravirt.h>
12bb898558SAl Viro #else
13bb898558SAl Viro #define __flush_tlb() __native_flush_tlb()
14bb898558SAl Viro #define __flush_tlb_global() __native_flush_tlb_global()
15bb898558SAl Viro #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
16bb898558SAl Viro #endif
17bb898558SAl Viro 
18bb898558SAl Viro static inline void __native_flush_tlb(void)
19bb898558SAl Viro {
20d7285c6bSChris Wright 	native_write_cr3(native_read_cr3());
21bb898558SAl Viro }
22bb898558SAl Viro 
23086fc8f8SFenghua Yu static inline void __native_flush_tlb_global_irq_disabled(void)
24086fc8f8SFenghua Yu {
25086fc8f8SFenghua Yu 	unsigned long cr4;
26086fc8f8SFenghua Yu 
27086fc8f8SFenghua Yu 	cr4 = native_read_cr4();
28086fc8f8SFenghua Yu 	/* clear PGE */
29086fc8f8SFenghua Yu 	native_write_cr4(cr4 & ~X86_CR4_PGE);
30086fc8f8SFenghua Yu 	/* write old PGE again and flush TLBs */
31086fc8f8SFenghua Yu 	native_write_cr4(cr4);
32086fc8f8SFenghua Yu }
33086fc8f8SFenghua Yu 
34bb898558SAl Viro static inline void __native_flush_tlb_global(void)
35bb898558SAl Viro {
36bb898558SAl Viro 	unsigned long flags;
37bb898558SAl Viro 
38bb898558SAl Viro 	/*
39bb898558SAl Viro 	 * Read-modify-write to CR4 - protect it from preemption and
40bb898558SAl Viro 	 * from interrupts. (Use the raw variant because this code can
41bb898558SAl Viro 	 * be called from deep inside debugging code.)
42bb898558SAl Viro 	 */
43bb898558SAl Viro 	raw_local_irq_save(flags);
44bb898558SAl Viro 
45086fc8f8SFenghua Yu 	__native_flush_tlb_global_irq_disabled();
46bb898558SAl Viro 
47bb898558SAl Viro 	raw_local_irq_restore(flags);
48bb898558SAl Viro }
49bb898558SAl Viro 
50bb898558SAl Viro static inline void __native_flush_tlb_single(unsigned long addr)
51bb898558SAl Viro {
52bb898558SAl Viro 	asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
53bb898558SAl Viro }
54bb898558SAl Viro 
55bb898558SAl Viro static inline void __flush_tlb_all(void)
56bb898558SAl Viro {
57bb898558SAl Viro 	if (cpu_has_pge)
58bb898558SAl Viro 		__flush_tlb_global();
59bb898558SAl Viro 	else
60bb898558SAl Viro 		__flush_tlb();
61bb898558SAl Viro }
62bb898558SAl Viro 
63bb898558SAl Viro static inline void __flush_tlb_one(unsigned long addr)
64bb898558SAl Viro {
65bb898558SAl Viro 		__flush_tlb_single(addr);
66bb898558SAl Viro }
67bb898558SAl Viro 
683e7f3db0SAlex Shi #define TLB_FLUSH_ALL	-1UL
69bb898558SAl Viro 
70bb898558SAl Viro /*
71bb898558SAl Viro  * TLB flushing:
72bb898558SAl Viro  *
73bb898558SAl Viro  *  - flush_tlb() flushes the current mm struct TLBs
74bb898558SAl Viro  *  - flush_tlb_all() flushes all processes TLBs
75bb898558SAl Viro  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
76bb898558SAl Viro  *  - flush_tlb_page(vma, vmaddr) flushes one page
77bb898558SAl Viro  *  - flush_tlb_range(vma, start, end) flushes a range of pages
78bb898558SAl Viro  *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
79e7b52ffdSAlex Shi  *  - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus
80bb898558SAl Viro  *
81bb898558SAl Viro  * ..but the i386 has somewhat limited tlb flushing capabilities,
82bb898558SAl Viro  * and page-granular flushes are available only on i486 and up.
83bb898558SAl Viro  */
84bb898558SAl Viro 
85bb898558SAl Viro #ifndef CONFIG_SMP
86bb898558SAl Viro 
87bb898558SAl Viro #define flush_tlb() __flush_tlb()
88bb898558SAl Viro #define flush_tlb_all() __flush_tlb_all()
89bb898558SAl Viro #define local_flush_tlb() __flush_tlb()
90bb898558SAl Viro 
91bb898558SAl Viro static inline void flush_tlb_mm(struct mm_struct *mm)
92bb898558SAl Viro {
93bb898558SAl Viro 	if (mm == current->active_mm)
94bb898558SAl Viro 		__flush_tlb();
95bb898558SAl Viro }
96bb898558SAl Viro 
97bb898558SAl Viro static inline void flush_tlb_page(struct vm_area_struct *vma,
98bb898558SAl Viro 				  unsigned long addr)
99bb898558SAl Viro {
100bb898558SAl Viro 	if (vma->vm_mm == current->active_mm)
101bb898558SAl Viro 		__flush_tlb_one(addr);
102bb898558SAl Viro }
103bb898558SAl Viro 
104bb898558SAl Viro static inline void flush_tlb_range(struct vm_area_struct *vma,
105bb898558SAl Viro 				   unsigned long start, unsigned long end)
106bb898558SAl Viro {
107bb898558SAl Viro 	if (vma->vm_mm == current->active_mm)
108bb898558SAl Viro 		__flush_tlb();
109bb898558SAl Viro }
110bb898558SAl Viro 
1117efa1c87SAlex Shi static inline void flush_tlb_mm_range(struct mm_struct *mm,
112611ae8e3SAlex Shi 	   unsigned long start, unsigned long end, unsigned long vmflag)
113611ae8e3SAlex Shi {
1147efa1c87SAlex Shi 	if (mm == current->active_mm)
115611ae8e3SAlex Shi 		__flush_tlb();
116611ae8e3SAlex Shi }
117611ae8e3SAlex Shi 
1184595f962SRusty Russell static inline void native_flush_tlb_others(const struct cpumask *cpumask,
119bb898558SAl Viro 					   struct mm_struct *mm,
120e7b52ffdSAlex Shi 					   unsigned long start,
121e7b52ffdSAlex Shi 					   unsigned long end)
122bb898558SAl Viro {
123bb898558SAl Viro }
124bb898558SAl Viro 
125bb898558SAl Viro static inline void reset_lazy_tlbstate(void)
126bb898558SAl Viro {
127bb898558SAl Viro }
128bb898558SAl Viro 
129effee4b9SAlex Shi static inline void flush_tlb_kernel_range(unsigned long start,
130effee4b9SAlex Shi 					  unsigned long end)
131effee4b9SAlex Shi {
132effee4b9SAlex Shi 	flush_tlb_all();
133effee4b9SAlex Shi }
134effee4b9SAlex Shi 
135bb898558SAl Viro #else  /* SMP */
136bb898558SAl Viro 
137bb898558SAl Viro #include <asm/smp.h>
138bb898558SAl Viro 
139bb898558SAl Viro #define local_flush_tlb() __flush_tlb()
140bb898558SAl Viro 
141611ae8e3SAlex Shi #define flush_tlb_mm(mm)	flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
142611ae8e3SAlex Shi 
143611ae8e3SAlex Shi #define flush_tlb_range(vma, start, end)	\
144611ae8e3SAlex Shi 		flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
145611ae8e3SAlex Shi 
146bb898558SAl Viro extern void flush_tlb_all(void);
147bb898558SAl Viro extern void flush_tlb_current_task(void);
148bb898558SAl Viro extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
149611ae8e3SAlex Shi extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
150611ae8e3SAlex Shi 				unsigned long end, unsigned long vmflag);
151effee4b9SAlex Shi extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
152bb898558SAl Viro 
153bb898558SAl Viro #define flush_tlb()	flush_tlb_current_task()
154bb898558SAl Viro 
1554595f962SRusty Russell void native_flush_tlb_others(const struct cpumask *cpumask,
156e7b52ffdSAlex Shi 				struct mm_struct *mm,
157e7b52ffdSAlex Shi 				unsigned long start, unsigned long end);
158bb898558SAl Viro 
159bb898558SAl Viro #define TLBSTATE_OK	1
160bb898558SAl Viro #define TLBSTATE_LAZY	2
161bb898558SAl Viro 
162bb898558SAl Viro struct tlb_state {
163bb898558SAl Viro 	struct mm_struct *active_mm;
164bb898558SAl Viro 	int state;
165bb898558SAl Viro };
1669b8de747SDavid Howells DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
167bb898558SAl Viro 
168bb898558SAl Viro static inline void reset_lazy_tlbstate(void)
169bb898558SAl Viro {
170c6ae41e7SAlex Shi 	this_cpu_write(cpu_tlbstate.state, 0);
171c6ae41e7SAlex Shi 	this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
172bb898558SAl Viro }
173bb898558SAl Viro 
174bb898558SAl Viro #endif	/* SMP */
175bb898558SAl Viro 
176bb898558SAl Viro #ifndef CONFIG_PARAVIRT
177e7b52ffdSAlex Shi #define flush_tlb_others(mask, mm, start, end)	\
178e7b52ffdSAlex Shi 	native_flush_tlb_others(mask, mm, start, end)
179bb898558SAl Viro #endif
180bb898558SAl Viro 
1811965aae3SH. Peter Anvin #endif /* _ASM_X86_TLBFLUSH_H */
182