xref: /openbmc/linux/arch/x86/include/asm/tlbflush.h (revision 1965aae3)
11965aae3SH. Peter Anvin #ifndef _ASM_X86_TLBFLUSH_H
21965aae3SH. Peter Anvin #define _ASM_X86_TLBFLUSH_H
3bb898558SAl Viro 
4bb898558SAl Viro #include <linux/mm.h>
5bb898558SAl Viro #include <linux/sched.h>
6bb898558SAl Viro 
7bb898558SAl Viro #include <asm/processor.h>
8bb898558SAl Viro #include <asm/system.h>
9bb898558SAl Viro 
10bb898558SAl Viro #ifdef CONFIG_PARAVIRT
11bb898558SAl Viro #include <asm/paravirt.h>
12bb898558SAl Viro #else
13bb898558SAl Viro #define __flush_tlb() __native_flush_tlb()
14bb898558SAl Viro #define __flush_tlb_global() __native_flush_tlb_global()
15bb898558SAl Viro #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
16bb898558SAl Viro #endif
17bb898558SAl Viro 
18bb898558SAl Viro static inline void __native_flush_tlb(void)
19bb898558SAl Viro {
20bb898558SAl Viro 	write_cr3(read_cr3());
21bb898558SAl Viro }
22bb898558SAl Viro 
23bb898558SAl Viro static inline void __native_flush_tlb_global(void)
24bb898558SAl Viro {
25bb898558SAl Viro 	unsigned long flags;
26bb898558SAl Viro 	unsigned long cr4;
27bb898558SAl Viro 
28bb898558SAl Viro 	/*
29bb898558SAl Viro 	 * Read-modify-write to CR4 - protect it from preemption and
30bb898558SAl Viro 	 * from interrupts. (Use the raw variant because this code can
31bb898558SAl Viro 	 * be called from deep inside debugging code.)
32bb898558SAl Viro 	 */
33bb898558SAl Viro 	raw_local_irq_save(flags);
34bb898558SAl Viro 
35bb898558SAl Viro 	cr4 = read_cr4();
36bb898558SAl Viro 	/* clear PGE */
37bb898558SAl Viro 	write_cr4(cr4 & ~X86_CR4_PGE);
38bb898558SAl Viro 	/* write old PGE again and flush TLBs */
39bb898558SAl Viro 	write_cr4(cr4);
40bb898558SAl Viro 
41bb898558SAl Viro 	raw_local_irq_restore(flags);
42bb898558SAl Viro }
43bb898558SAl Viro 
44bb898558SAl Viro static inline void __native_flush_tlb_single(unsigned long addr)
45bb898558SAl Viro {
46bb898558SAl Viro 	asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
47bb898558SAl Viro }
48bb898558SAl Viro 
49bb898558SAl Viro static inline void __flush_tlb_all(void)
50bb898558SAl Viro {
51bb898558SAl Viro 	if (cpu_has_pge)
52bb898558SAl Viro 		__flush_tlb_global();
53bb898558SAl Viro 	else
54bb898558SAl Viro 		__flush_tlb();
55bb898558SAl Viro }
56bb898558SAl Viro 
57bb898558SAl Viro static inline void __flush_tlb_one(unsigned long addr)
58bb898558SAl Viro {
59bb898558SAl Viro 	if (cpu_has_invlpg)
60bb898558SAl Viro 		__flush_tlb_single(addr);
61bb898558SAl Viro 	else
62bb898558SAl Viro 		__flush_tlb();
63bb898558SAl Viro }
64bb898558SAl Viro 
65bb898558SAl Viro #ifdef CONFIG_X86_32
66bb898558SAl Viro # define TLB_FLUSH_ALL	0xffffffff
67bb898558SAl Viro #else
68bb898558SAl Viro # define TLB_FLUSH_ALL	-1ULL
69bb898558SAl Viro #endif
70bb898558SAl Viro 
71bb898558SAl Viro /*
72bb898558SAl Viro  * TLB flushing:
73bb898558SAl Viro  *
74bb898558SAl Viro  *  - flush_tlb() flushes the current mm struct TLBs
75bb898558SAl Viro  *  - flush_tlb_all() flushes all processes TLBs
76bb898558SAl Viro  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
77bb898558SAl Viro  *  - flush_tlb_page(vma, vmaddr) flushes one page
78bb898558SAl Viro  *  - flush_tlb_range(vma, start, end) flushes a range of pages
79bb898558SAl Viro  *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
80bb898558SAl Viro  *  - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus
81bb898558SAl Viro  *
82bb898558SAl Viro  * ..but the i386 has somewhat limited tlb flushing capabilities,
83bb898558SAl Viro  * and page-granular flushes are available only on i486 and up.
84bb898558SAl Viro  *
85bb898558SAl Viro  * x86-64 can only flush individual pages or full VMs. For a range flush
86bb898558SAl Viro  * we always do the full VM. Might be worth trying if for a small
87bb898558SAl Viro  * range a few INVLPGs in a row are a win.
88bb898558SAl Viro  */
89bb898558SAl Viro 
90bb898558SAl Viro #ifndef CONFIG_SMP
91bb898558SAl Viro 
92bb898558SAl Viro #define flush_tlb() __flush_tlb()
93bb898558SAl Viro #define flush_tlb_all() __flush_tlb_all()
94bb898558SAl Viro #define local_flush_tlb() __flush_tlb()
95bb898558SAl Viro 
96bb898558SAl Viro static inline void flush_tlb_mm(struct mm_struct *mm)
97bb898558SAl Viro {
98bb898558SAl Viro 	if (mm == current->active_mm)
99bb898558SAl Viro 		__flush_tlb();
100bb898558SAl Viro }
101bb898558SAl Viro 
102bb898558SAl Viro static inline void flush_tlb_page(struct vm_area_struct *vma,
103bb898558SAl Viro 				  unsigned long addr)
104bb898558SAl Viro {
105bb898558SAl Viro 	if (vma->vm_mm == current->active_mm)
106bb898558SAl Viro 		__flush_tlb_one(addr);
107bb898558SAl Viro }
108bb898558SAl Viro 
109bb898558SAl Viro static inline void flush_tlb_range(struct vm_area_struct *vma,
110bb898558SAl Viro 				   unsigned long start, unsigned long end)
111bb898558SAl Viro {
112bb898558SAl Viro 	if (vma->vm_mm == current->active_mm)
113bb898558SAl Viro 		__flush_tlb();
114bb898558SAl Viro }
115bb898558SAl Viro 
116bb898558SAl Viro static inline void native_flush_tlb_others(const cpumask_t *cpumask,
117bb898558SAl Viro 					   struct mm_struct *mm,
118bb898558SAl Viro 					   unsigned long va)
119bb898558SAl Viro {
120bb898558SAl Viro }
121bb898558SAl Viro 
122bb898558SAl Viro static inline void reset_lazy_tlbstate(void)
123bb898558SAl Viro {
124bb898558SAl Viro }
125bb898558SAl Viro 
126bb898558SAl Viro #else  /* SMP */
127bb898558SAl Viro 
128bb898558SAl Viro #include <asm/smp.h>
129bb898558SAl Viro 
130bb898558SAl Viro #define local_flush_tlb() __flush_tlb()
131bb898558SAl Viro 
132bb898558SAl Viro extern void flush_tlb_all(void);
133bb898558SAl Viro extern void flush_tlb_current_task(void);
134bb898558SAl Viro extern void flush_tlb_mm(struct mm_struct *);
135bb898558SAl Viro extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
136bb898558SAl Viro 
137bb898558SAl Viro #define flush_tlb()	flush_tlb_current_task()
138bb898558SAl Viro 
139bb898558SAl Viro static inline void flush_tlb_range(struct vm_area_struct *vma,
140bb898558SAl Viro 				   unsigned long start, unsigned long end)
141bb898558SAl Viro {
142bb898558SAl Viro 	flush_tlb_mm(vma->vm_mm);
143bb898558SAl Viro }
144bb898558SAl Viro 
145bb898558SAl Viro void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm,
146bb898558SAl Viro 			     unsigned long va);
147bb898558SAl Viro 
148bb898558SAl Viro #define TLBSTATE_OK	1
149bb898558SAl Viro #define TLBSTATE_LAZY	2
150bb898558SAl Viro 
151bb898558SAl Viro #ifdef CONFIG_X86_32
152bb898558SAl Viro struct tlb_state {
153bb898558SAl Viro 	struct mm_struct *active_mm;
154bb898558SAl Viro 	int state;
155bb898558SAl Viro 	char __cacheline_padding[L1_CACHE_BYTES-8];
156bb898558SAl Viro };
157bb898558SAl Viro DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
158bb898558SAl Viro 
159bb898558SAl Viro void reset_lazy_tlbstate(void);
160bb898558SAl Viro #else
161bb898558SAl Viro static inline void reset_lazy_tlbstate(void)
162bb898558SAl Viro {
163bb898558SAl Viro }
164bb898558SAl Viro #endif
165bb898558SAl Viro 
166bb898558SAl Viro #endif	/* SMP */
167bb898558SAl Viro 
168bb898558SAl Viro #ifndef CONFIG_PARAVIRT
169bb898558SAl Viro #define flush_tlb_others(mask, mm, va)	native_flush_tlb_others(&mask, mm, va)
170bb898558SAl Viro #endif
171bb898558SAl Viro 
172bb898558SAl Viro static inline void flush_tlb_kernel_range(unsigned long start,
173bb898558SAl Viro 					  unsigned long end)
174bb898558SAl Viro {
175bb898558SAl Viro 	flush_tlb_all();
176bb898558SAl Viro }
177bb898558SAl Viro 
1781965aae3SH. Peter Anvin #endif /* _ASM_X86_TLBFLUSH_H */
179