xref: /openbmc/linux/arch/x86/include/asm/tlbflush.h (revision 060a402a)
11965aae3SH. Peter Anvin #ifndef _ASM_X86_TLBFLUSH_H
21965aae3SH. Peter Anvin #define _ASM_X86_TLBFLUSH_H
3bb898558SAl Viro 
4bb898558SAl Viro #include <linux/mm.h>
5bb898558SAl Viro #include <linux/sched.h>
6bb898558SAl Viro 
7bb898558SAl Viro #include <asm/processor.h>
8f05e798aSDavid Howells #include <asm/special_insns.h>
9bb898558SAl Viro 
10060a402aSAndy Lutomirski static inline void __invpcid(unsigned long pcid, unsigned long addr,
11060a402aSAndy Lutomirski 			     unsigned long type)
12060a402aSAndy Lutomirski {
13060a402aSAndy Lutomirski 	u64 desc[2] = { pcid, addr };
14060a402aSAndy Lutomirski 
15060a402aSAndy Lutomirski 	/*
16060a402aSAndy Lutomirski 	 * The memory clobber is because the whole point is to invalidate
17060a402aSAndy Lutomirski 	 * stale TLB entries and, especially if we're flushing global
18060a402aSAndy Lutomirski 	 * mappings, we don't want the compiler to reorder any subsequent
19060a402aSAndy Lutomirski 	 * memory accesses before the TLB flush.
20060a402aSAndy Lutomirski 	 *
21060a402aSAndy Lutomirski 	 * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
22060a402aSAndy Lutomirski 	 * invpcid (%rcx), %rax in long mode.
23060a402aSAndy Lutomirski 	 */
24060a402aSAndy Lutomirski 	asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
25060a402aSAndy Lutomirski 		      : : "m" (desc), "a" (type), "c" (desc) : "memory");
26060a402aSAndy Lutomirski }
27060a402aSAndy Lutomirski 
28060a402aSAndy Lutomirski #define INVPCID_TYPE_INDIV_ADDR		0
29060a402aSAndy Lutomirski #define INVPCID_TYPE_SINGLE_CTXT	1
30060a402aSAndy Lutomirski #define INVPCID_TYPE_ALL_INCL_GLOBAL	2
31060a402aSAndy Lutomirski #define INVPCID_TYPE_ALL_NON_GLOBAL	3
32060a402aSAndy Lutomirski 
33060a402aSAndy Lutomirski /* Flush all mappings for a given pcid and addr, not including globals. */
34060a402aSAndy Lutomirski static inline void invpcid_flush_one(unsigned long pcid,
35060a402aSAndy Lutomirski 				     unsigned long addr)
36060a402aSAndy Lutomirski {
37060a402aSAndy Lutomirski 	__invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
38060a402aSAndy Lutomirski }
39060a402aSAndy Lutomirski 
40060a402aSAndy Lutomirski /* Flush all mappings for a given PCID, not including globals. */
41060a402aSAndy Lutomirski static inline void invpcid_flush_single_context(unsigned long pcid)
42060a402aSAndy Lutomirski {
43060a402aSAndy Lutomirski 	__invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
44060a402aSAndy Lutomirski }
45060a402aSAndy Lutomirski 
46060a402aSAndy Lutomirski /* Flush all mappings, including globals, for all PCIDs. */
47060a402aSAndy Lutomirski static inline void invpcid_flush_all(void)
48060a402aSAndy Lutomirski {
49060a402aSAndy Lutomirski 	__invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
50060a402aSAndy Lutomirski }
51060a402aSAndy Lutomirski 
52060a402aSAndy Lutomirski /* Flush all mappings for all PCIDs except globals. */
53060a402aSAndy Lutomirski static inline void invpcid_flush_all_nonglobals(void)
54060a402aSAndy Lutomirski {
55060a402aSAndy Lutomirski 	__invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
56060a402aSAndy Lutomirski }
57060a402aSAndy Lutomirski 
58bb898558SAl Viro #ifdef CONFIG_PARAVIRT
59bb898558SAl Viro #include <asm/paravirt.h>
60bb898558SAl Viro #else
61bb898558SAl Viro #define __flush_tlb() __native_flush_tlb()
62bb898558SAl Viro #define __flush_tlb_global() __native_flush_tlb_global()
63bb898558SAl Viro #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
64bb898558SAl Viro #endif
65bb898558SAl Viro 
661e02ce4cSAndy Lutomirski struct tlb_state {
671e02ce4cSAndy Lutomirski #ifdef CONFIG_SMP
681e02ce4cSAndy Lutomirski 	struct mm_struct *active_mm;
691e02ce4cSAndy Lutomirski 	int state;
701e02ce4cSAndy Lutomirski #endif
711e02ce4cSAndy Lutomirski 
721e02ce4cSAndy Lutomirski 	/*
731e02ce4cSAndy Lutomirski 	 * Access to this CR4 shadow and to H/W CR4 is protected by
741e02ce4cSAndy Lutomirski 	 * disabling interrupts when modifying either one.
751e02ce4cSAndy Lutomirski 	 */
761e02ce4cSAndy Lutomirski 	unsigned long cr4;
771e02ce4cSAndy Lutomirski };
781e02ce4cSAndy Lutomirski DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
791e02ce4cSAndy Lutomirski 
801e02ce4cSAndy Lutomirski /* Initialize cr4 shadow for this CPU. */
811e02ce4cSAndy Lutomirski static inline void cr4_init_shadow(void)
821e02ce4cSAndy Lutomirski {
831e02ce4cSAndy Lutomirski 	this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
841e02ce4cSAndy Lutomirski }
851e02ce4cSAndy Lutomirski 
86375074ccSAndy Lutomirski /* Set in this cpu's CR4. */
87375074ccSAndy Lutomirski static inline void cr4_set_bits(unsigned long mask)
88375074ccSAndy Lutomirski {
89375074ccSAndy Lutomirski 	unsigned long cr4;
90375074ccSAndy Lutomirski 
911e02ce4cSAndy Lutomirski 	cr4 = this_cpu_read(cpu_tlbstate.cr4);
921e02ce4cSAndy Lutomirski 	if ((cr4 | mask) != cr4) {
93375074ccSAndy Lutomirski 		cr4 |= mask;
941e02ce4cSAndy Lutomirski 		this_cpu_write(cpu_tlbstate.cr4, cr4);
951e02ce4cSAndy Lutomirski 		__write_cr4(cr4);
961e02ce4cSAndy Lutomirski 	}
97375074ccSAndy Lutomirski }
98375074ccSAndy Lutomirski 
99375074ccSAndy Lutomirski /* Clear in this cpu's CR4. */
100375074ccSAndy Lutomirski static inline void cr4_clear_bits(unsigned long mask)
101375074ccSAndy Lutomirski {
102375074ccSAndy Lutomirski 	unsigned long cr4;
103375074ccSAndy Lutomirski 
1041e02ce4cSAndy Lutomirski 	cr4 = this_cpu_read(cpu_tlbstate.cr4);
1051e02ce4cSAndy Lutomirski 	if ((cr4 & ~mask) != cr4) {
106375074ccSAndy Lutomirski 		cr4 &= ~mask;
1071e02ce4cSAndy Lutomirski 		this_cpu_write(cpu_tlbstate.cr4, cr4);
1081e02ce4cSAndy Lutomirski 		__write_cr4(cr4);
1091e02ce4cSAndy Lutomirski 	}
1101e02ce4cSAndy Lutomirski }
1111e02ce4cSAndy Lutomirski 
1121e02ce4cSAndy Lutomirski /* Read the CR4 shadow. */
1131e02ce4cSAndy Lutomirski static inline unsigned long cr4_read_shadow(void)
1141e02ce4cSAndy Lutomirski {
1151e02ce4cSAndy Lutomirski 	return this_cpu_read(cpu_tlbstate.cr4);
116375074ccSAndy Lutomirski }
117375074ccSAndy Lutomirski 
118375074ccSAndy Lutomirski /*
119375074ccSAndy Lutomirski  * Save some of cr4 feature set we're using (e.g.  Pentium 4MB
120375074ccSAndy Lutomirski  * enable and PPro Global page enable), so that any CPU's that boot
121375074ccSAndy Lutomirski  * up after us can get the correct flags.  This should only be used
122375074ccSAndy Lutomirski  * during boot on the boot cpu.
123375074ccSAndy Lutomirski  */
124375074ccSAndy Lutomirski extern unsigned long mmu_cr4_features;
125375074ccSAndy Lutomirski extern u32 *trampoline_cr4_features;
126375074ccSAndy Lutomirski 
127375074ccSAndy Lutomirski static inline void cr4_set_bits_and_update_boot(unsigned long mask)
128375074ccSAndy Lutomirski {
129375074ccSAndy Lutomirski 	mmu_cr4_features |= mask;
130375074ccSAndy Lutomirski 	if (trampoline_cr4_features)
131375074ccSAndy Lutomirski 		*trampoline_cr4_features = mmu_cr4_features;
132375074ccSAndy Lutomirski 	cr4_set_bits(mask);
133375074ccSAndy Lutomirski }
134375074ccSAndy Lutomirski 
135bb898558SAl Viro static inline void __native_flush_tlb(void)
136bb898558SAl Viro {
137d7285c6bSChris Wright 	native_write_cr3(native_read_cr3());
138bb898558SAl Viro }
139bb898558SAl Viro 
140086fc8f8SFenghua Yu static inline void __native_flush_tlb_global_irq_disabled(void)
141086fc8f8SFenghua Yu {
142086fc8f8SFenghua Yu 	unsigned long cr4;
143086fc8f8SFenghua Yu 
1441e02ce4cSAndy Lutomirski 	cr4 = this_cpu_read(cpu_tlbstate.cr4);
145086fc8f8SFenghua Yu 	/* clear PGE */
146086fc8f8SFenghua Yu 	native_write_cr4(cr4 & ~X86_CR4_PGE);
147086fc8f8SFenghua Yu 	/* write old PGE again and flush TLBs */
148086fc8f8SFenghua Yu 	native_write_cr4(cr4);
149086fc8f8SFenghua Yu }
150086fc8f8SFenghua Yu 
151bb898558SAl Viro static inline void __native_flush_tlb_global(void)
152bb898558SAl Viro {
153bb898558SAl Viro 	unsigned long flags;
154bb898558SAl Viro 
155bb898558SAl Viro 	/*
156bb898558SAl Viro 	 * Read-modify-write to CR4 - protect it from preemption and
157bb898558SAl Viro 	 * from interrupts. (Use the raw variant because this code can
158bb898558SAl Viro 	 * be called from deep inside debugging code.)
159bb898558SAl Viro 	 */
160bb898558SAl Viro 	raw_local_irq_save(flags);
161bb898558SAl Viro 
162086fc8f8SFenghua Yu 	__native_flush_tlb_global_irq_disabled();
163bb898558SAl Viro 
164bb898558SAl Viro 	raw_local_irq_restore(flags);
165bb898558SAl Viro }
166bb898558SAl Viro 
167bb898558SAl Viro static inline void __native_flush_tlb_single(unsigned long addr)
168bb898558SAl Viro {
169bb898558SAl Viro 	asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
170bb898558SAl Viro }
171bb898558SAl Viro 
172bb898558SAl Viro static inline void __flush_tlb_all(void)
173bb898558SAl Viro {
174bb898558SAl Viro 	if (cpu_has_pge)
175bb898558SAl Viro 		__flush_tlb_global();
176bb898558SAl Viro 	else
177bb898558SAl Viro 		__flush_tlb();
178bb898558SAl Viro }
179bb898558SAl Viro 
180bb898558SAl Viro static inline void __flush_tlb_one(unsigned long addr)
181bb898558SAl Viro {
182ec659934SMel Gorman 	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
183bb898558SAl Viro 	__flush_tlb_single(addr);
184bb898558SAl Viro }
185bb898558SAl Viro 
1863e7f3db0SAlex Shi #define TLB_FLUSH_ALL	-1UL
187bb898558SAl Viro 
188bb898558SAl Viro /*
189bb898558SAl Viro  * TLB flushing:
190bb898558SAl Viro  *
191bb898558SAl Viro  *  - flush_tlb() flushes the current mm struct TLBs
192bb898558SAl Viro  *  - flush_tlb_all() flushes all processes TLBs
193bb898558SAl Viro  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
194bb898558SAl Viro  *  - flush_tlb_page(vma, vmaddr) flushes one page
195bb898558SAl Viro  *  - flush_tlb_range(vma, start, end) flushes a range of pages
196bb898558SAl Viro  *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
197e7b52ffdSAlex Shi  *  - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus
198bb898558SAl Viro  *
199bb898558SAl Viro  * ..but the i386 has somewhat limited tlb flushing capabilities,
200bb898558SAl Viro  * and page-granular flushes are available only on i486 and up.
201bb898558SAl Viro  */
202bb898558SAl Viro 
203bb898558SAl Viro #ifndef CONFIG_SMP
204bb898558SAl Viro 
2056df46865SDave Hansen /* "_up" is for UniProcessor.
2066df46865SDave Hansen  *
2076df46865SDave Hansen  * This is a helper for other header functions.  *Not* intended to be called
2086df46865SDave Hansen  * directly.  All global TLB flushes need to either call this, or to bump the
2096df46865SDave Hansen  * vm statistics themselves.
2106df46865SDave Hansen  */
2116df46865SDave Hansen static inline void __flush_tlb_up(void)
2126df46865SDave Hansen {
213ec659934SMel Gorman 	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
2146df46865SDave Hansen 	__flush_tlb();
2156df46865SDave Hansen }
2166df46865SDave Hansen 
2176df46865SDave Hansen static inline void flush_tlb_all(void)
2186df46865SDave Hansen {
219ec659934SMel Gorman 	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
2206df46865SDave Hansen 	__flush_tlb_all();
2216df46865SDave Hansen }
2226df46865SDave Hansen 
2236df46865SDave Hansen static inline void flush_tlb(void)
2246df46865SDave Hansen {
2256df46865SDave Hansen 	__flush_tlb_up();
2266df46865SDave Hansen }
2276df46865SDave Hansen 
2286df46865SDave Hansen static inline void local_flush_tlb(void)
2296df46865SDave Hansen {
2306df46865SDave Hansen 	__flush_tlb_up();
2316df46865SDave Hansen }
232bb898558SAl Viro 
233bb898558SAl Viro static inline void flush_tlb_mm(struct mm_struct *mm)
234bb898558SAl Viro {
235bb898558SAl Viro 	if (mm == current->active_mm)
2366df46865SDave Hansen 		__flush_tlb_up();
237bb898558SAl Viro }
238bb898558SAl Viro 
239bb898558SAl Viro static inline void flush_tlb_page(struct vm_area_struct *vma,
240bb898558SAl Viro 				  unsigned long addr)
241bb898558SAl Viro {
242bb898558SAl Viro 	if (vma->vm_mm == current->active_mm)
243bb898558SAl Viro 		__flush_tlb_one(addr);
244bb898558SAl Viro }
245bb898558SAl Viro 
246bb898558SAl Viro static inline void flush_tlb_range(struct vm_area_struct *vma,
247bb898558SAl Viro 				   unsigned long start, unsigned long end)
248bb898558SAl Viro {
249bb898558SAl Viro 	if (vma->vm_mm == current->active_mm)
2506df46865SDave Hansen 		__flush_tlb_up();
251bb898558SAl Viro }
252bb898558SAl Viro 
2537efa1c87SAlex Shi static inline void flush_tlb_mm_range(struct mm_struct *mm,
254611ae8e3SAlex Shi 	   unsigned long start, unsigned long end, unsigned long vmflag)
255611ae8e3SAlex Shi {
2567efa1c87SAlex Shi 	if (mm == current->active_mm)
2576df46865SDave Hansen 		__flush_tlb_up();
258611ae8e3SAlex Shi }
259611ae8e3SAlex Shi 
2604595f962SRusty Russell static inline void native_flush_tlb_others(const struct cpumask *cpumask,
261bb898558SAl Viro 					   struct mm_struct *mm,
262e7b52ffdSAlex Shi 					   unsigned long start,
263e7b52ffdSAlex Shi 					   unsigned long end)
264bb898558SAl Viro {
265bb898558SAl Viro }
266bb898558SAl Viro 
267bb898558SAl Viro static inline void reset_lazy_tlbstate(void)
268bb898558SAl Viro {
269bb898558SAl Viro }
270bb898558SAl Viro 
271effee4b9SAlex Shi static inline void flush_tlb_kernel_range(unsigned long start,
272effee4b9SAlex Shi 					  unsigned long end)
273effee4b9SAlex Shi {
274effee4b9SAlex Shi 	flush_tlb_all();
275effee4b9SAlex Shi }
276effee4b9SAlex Shi 
277bb898558SAl Viro #else  /* SMP */
278bb898558SAl Viro 
279bb898558SAl Viro #include <asm/smp.h>
280bb898558SAl Viro 
281bb898558SAl Viro #define local_flush_tlb() __flush_tlb()
282bb898558SAl Viro 
283611ae8e3SAlex Shi #define flush_tlb_mm(mm)	flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
284611ae8e3SAlex Shi 
285611ae8e3SAlex Shi #define flush_tlb_range(vma, start, end)	\
286611ae8e3SAlex Shi 		flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
287611ae8e3SAlex Shi 
288bb898558SAl Viro extern void flush_tlb_all(void);
289bb898558SAl Viro extern void flush_tlb_current_task(void);
290bb898558SAl Viro extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
291611ae8e3SAlex Shi extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
292611ae8e3SAlex Shi 				unsigned long end, unsigned long vmflag);
293effee4b9SAlex Shi extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
294bb898558SAl Viro 
295bb898558SAl Viro #define flush_tlb()	flush_tlb_current_task()
296bb898558SAl Viro 
2974595f962SRusty Russell void native_flush_tlb_others(const struct cpumask *cpumask,
298e7b52ffdSAlex Shi 				struct mm_struct *mm,
299e7b52ffdSAlex Shi 				unsigned long start, unsigned long end);
300bb898558SAl Viro 
301bb898558SAl Viro #define TLBSTATE_OK	1
302bb898558SAl Viro #define TLBSTATE_LAZY	2
303bb898558SAl Viro 
304bb898558SAl Viro static inline void reset_lazy_tlbstate(void)
305bb898558SAl Viro {
306c6ae41e7SAlex Shi 	this_cpu_write(cpu_tlbstate.state, 0);
307c6ae41e7SAlex Shi 	this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
308bb898558SAl Viro }
309bb898558SAl Viro 
310bb898558SAl Viro #endif	/* SMP */
311bb898558SAl Viro 
31272b252aeSMel Gorman /* Not inlined due to inc_irq_stat not being defined yet */
31372b252aeSMel Gorman #define flush_tlb_local() {		\
31472b252aeSMel Gorman 	inc_irq_stat(irq_tlb_count);	\
31572b252aeSMel Gorman 	local_flush_tlb();		\
31672b252aeSMel Gorman }
31772b252aeSMel Gorman 
318bb898558SAl Viro #ifndef CONFIG_PARAVIRT
319e7b52ffdSAlex Shi #define flush_tlb_others(mask, mm, start, end)	\
320e7b52ffdSAlex Shi 	native_flush_tlb_others(mask, mm, start, end)
321bb898558SAl Viro #endif
322bb898558SAl Viro 
3231965aae3SH. Peter Anvin #endif /* _ASM_X86_TLBFLUSH_H */
324