xref: /openbmc/linux/arch/s390/include/asm/tlbflush.h (revision 293d5b43)
1 #ifndef _S390_TLBFLUSH_H
2 #define _S390_TLBFLUSH_H
3 
4 #include <linux/mm.h>
5 #include <linux/sched.h>
6 #include <asm/processor.h>
7 #include <asm/pgalloc.h>
8 #include <asm/pgtable.h>
9 
10 /*
11  * Flush all TLB entries on the local CPU.
12  */
13 static inline void __tlb_flush_local(void)
14 {
15 	asm volatile("ptlb" : : : "memory");
16 }
17 
18 /*
19  * Flush TLB entries for a specific ASCE on all CPUs
20  */
21 static inline void __tlb_flush_idte(unsigned long asce)
22 {
23 	/* Global TLB flush for the mm */
24 	asm volatile(
25 		"	.insn	rrf,0xb98e0000,0,%0,%1,0"
26 		: : "a" (2048), "a" (asce) : "cc");
27 }
28 
29 /*
30  * Flush TLB entries for a specific ASCE on the local CPU
31  */
32 static inline void __tlb_flush_idte_local(unsigned long asce)
33 {
34 	/* Local TLB flush for the mm */
35 	asm volatile(
36 		"	.insn	rrf,0xb98e0000,0,%0,%1,1"
37 		: : "a" (2048), "a" (asce) : "cc");
38 }
39 
40 #ifdef CONFIG_SMP
41 void smp_ptlb_all(void);
42 
43 /*
44  * Flush all TLB entries on all CPUs.
45  */
46 static inline void __tlb_flush_global(void)
47 {
48 	unsigned int dummy = 0;
49 
50 	csp(&dummy, 0, 0);
51 }
52 
53 /*
54  * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
55  * this implicates multiple ASCEs!).
56  */
57 static inline void __tlb_flush_full(struct mm_struct *mm)
58 {
59 	preempt_disable();
60 	atomic_inc(&mm->context.flush_count);
61 	if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
62 		/* Local TLB flush */
63 		__tlb_flush_local();
64 	} else {
65 		/* Global TLB flush */
66 		__tlb_flush_global();
67 		/* Reset TLB flush mask */
68 		if (MACHINE_HAS_TLB_LC)
69 			cpumask_copy(mm_cpumask(mm),
70 				     &mm->context.cpu_attach_mask);
71 	}
72 	atomic_dec(&mm->context.flush_count);
73 	preempt_enable();
74 }
75 
76 /*
77  * Flush TLB entries for a specific ASCE on all CPUs. Should never be used
78  * when more than one asce (e.g. gmap) ran on this mm.
79  */
80 static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
81 {
82 	preempt_disable();
83 	atomic_inc(&mm->context.flush_count);
84 	if (MACHINE_HAS_TLB_LC &&
85 	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
86 		__tlb_flush_idte_local(asce);
87 	} else {
88 		if (MACHINE_HAS_IDTE)
89 			__tlb_flush_idte(asce);
90 		else
91 			__tlb_flush_global();
92 		/* Reset TLB flush mask */
93 		if (MACHINE_HAS_TLB_LC)
94 			cpumask_copy(mm_cpumask(mm),
95 				     &mm->context.cpu_attach_mask);
96 	}
97 	atomic_dec(&mm->context.flush_count);
98 	preempt_enable();
99 }
100 
101 static inline void __tlb_flush_kernel(void)
102 {
103 	if (MACHINE_HAS_IDTE)
104 		__tlb_flush_idte(init_mm.context.asce);
105 	else
106 		__tlb_flush_global();
107 }
108 #else
109 #define __tlb_flush_global()	__tlb_flush_local()
110 #define __tlb_flush_full(mm)	__tlb_flush_local()
111 
112 /*
113  * Flush TLB entries for a specific ASCE on all CPUs.
114  */
115 static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
116 {
117 	if (MACHINE_HAS_TLB_LC)
118 		__tlb_flush_idte_local(asce);
119 	else
120 		__tlb_flush_local();
121 }
122 
123 static inline void __tlb_flush_kernel(void)
124 {
125 	if (MACHINE_HAS_TLB_LC)
126 		__tlb_flush_idte_local(init_mm.context.asce);
127 	else
128 		__tlb_flush_local();
129 }
130 #endif
131 
132 static inline void __tlb_flush_mm(struct mm_struct * mm)
133 {
134 	/*
135 	 * If the machine has IDTE we prefer to do a per mm flush
136 	 * on all cpus instead of doing a local flush if the mm
137 	 * only ran on the local cpu.
138 	 */
139 	if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
140 		__tlb_flush_asce(mm, mm->context.asce);
141 	else
142 		__tlb_flush_full(mm);
143 }
144 
145 static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
146 {
147 	if (mm->context.flush_mm) {
148 		__tlb_flush_mm(mm);
149 		mm->context.flush_mm = 0;
150 	}
151 }
152 
153 /*
154  * TLB flushing:
155  *  flush_tlb() - flushes the current mm struct TLBs
156  *  flush_tlb_all() - flushes all processes TLBs
157  *  flush_tlb_mm(mm) - flushes the specified mm context TLB's
158  *  flush_tlb_page(vma, vmaddr) - flushes one page
159  *  flush_tlb_range(vma, start, end) - flushes a range of pages
160  *  flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
161  */
162 
163 /*
164  * flush_tlb_mm goes together with ptep_set_wrprotect for the
165  * copy_page_range operation and flush_tlb_range is related to
166  * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
167  * ptep_get_and_clear do not flush the TLBs directly if the mm has
168  * only one user. At the end of the update the flush_tlb_mm and
169  * flush_tlb_range functions need to do the flush.
170  */
171 #define flush_tlb()				do { } while (0)
172 #define flush_tlb_all()				do { } while (0)
173 #define flush_tlb_page(vma, addr)		do { } while (0)
174 
175 static inline void flush_tlb_mm(struct mm_struct *mm)
176 {
177 	__tlb_flush_mm_lazy(mm);
178 }
179 
180 static inline void flush_tlb_range(struct vm_area_struct *vma,
181 				   unsigned long start, unsigned long end)
182 {
183 	__tlb_flush_mm_lazy(vma->vm_mm);
184 }
185 
186 static inline void flush_tlb_kernel_range(unsigned long start,
187 					  unsigned long end)
188 {
189 	__tlb_flush_kernel();
190 }
191 
192 #endif /* _S390_TLBFLUSH_H */
193