xref: /openbmc/linux/arch/s390/include/asm/tlbflush.h (revision 55fd7e02)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _S390_TLBFLUSH_H
3 #define _S390_TLBFLUSH_H
4 
5 #include <linux/mm.h>
6 #include <linux/sched.h>
7 #include <asm/processor.h>
8 #include <asm/pgalloc.h>
9 
10 /*
11  * Flush all TLB entries on the local CPU.
12  */
13 static inline void __tlb_flush_local(void)
14 {
15 	asm volatile("ptlb" : : : "memory");
16 }
17 
18 /*
19  * Flush TLB entries for a specific ASCE on all CPUs
20  */
21 static inline void __tlb_flush_idte(unsigned long asce)
22 {
23 	unsigned long opt;
24 
25 	opt = IDTE_PTOA;
26 	if (MACHINE_HAS_TLB_GUEST)
27 		opt |= IDTE_GUEST_ASCE;
28 	/* Global TLB flush for the mm */
29 	asm volatile(
30 		"	.insn	rrf,0xb98e0000,0,%0,%1,0"
31 		: : "a" (opt), "a" (asce) : "cc");
32 }
33 
34 void smp_ptlb_all(void);
35 
36 /*
37  * Flush all TLB entries on all CPUs.
38  */
39 static inline void __tlb_flush_global(void)
40 {
41 	unsigned int dummy = 0;
42 
43 	csp(&dummy, 0, 0);
44 }
45 
46 /*
47  * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
48  * this implicates multiple ASCEs!).
49  */
50 static inline void __tlb_flush_mm(struct mm_struct *mm)
51 {
52 	unsigned long gmap_asce;
53 
54 	/*
55 	 * If the machine has IDTE we prefer to do a per mm flush
56 	 * on all cpus instead of doing a local flush if the mm
57 	 * only ran on the local cpu.
58 	 */
59 	preempt_disable();
60 	atomic_inc(&mm->context.flush_count);
61 	/* Reset TLB flush mask */
62 	cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
63 	barrier();
64 	gmap_asce = READ_ONCE(mm->context.gmap_asce);
65 	if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
66 		if (gmap_asce)
67 			__tlb_flush_idte(gmap_asce);
68 		__tlb_flush_idte(mm->context.asce);
69 	} else {
70 		/* Global TLB flush */
71 		__tlb_flush_global();
72 	}
73 	atomic_dec(&mm->context.flush_count);
74 	preempt_enable();
75 }
76 
77 static inline void __tlb_flush_kernel(void)
78 {
79 	if (MACHINE_HAS_IDTE)
80 		__tlb_flush_idte(init_mm.context.asce);
81 	else
82 		__tlb_flush_global();
83 }
84 
85 static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
86 {
87 	spin_lock(&mm->context.lock);
88 	if (mm->context.flush_mm) {
89 		mm->context.flush_mm = 0;
90 		__tlb_flush_mm(mm);
91 	}
92 	spin_unlock(&mm->context.lock);
93 }
94 
95 /*
96  * TLB flushing:
97  *  flush_tlb() - flushes the current mm struct TLBs
98  *  flush_tlb_all() - flushes all processes TLBs
99  *  flush_tlb_mm(mm) - flushes the specified mm context TLB's
100  *  flush_tlb_page(vma, vmaddr) - flushes one page
101  *  flush_tlb_range(vma, start, end) - flushes a range of pages
102  *  flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
103  */
104 
105 /*
106  * flush_tlb_mm goes together with ptep_set_wrprotect for the
107  * copy_page_range operation and flush_tlb_range is related to
108  * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
109  * ptep_get_and_clear do not flush the TLBs directly if the mm has
110  * only one user. At the end of the update the flush_tlb_mm and
111  * flush_tlb_range functions need to do the flush.
112  */
113 #define flush_tlb()				do { } while (0)
114 #define flush_tlb_all()				do { } while (0)
115 #define flush_tlb_page(vma, addr)		do { } while (0)
116 
117 static inline void flush_tlb_mm(struct mm_struct *mm)
118 {
119 	__tlb_flush_mm_lazy(mm);
120 }
121 
122 static inline void flush_tlb_range(struct vm_area_struct *vma,
123 				   unsigned long start, unsigned long end)
124 {
125 	__tlb_flush_mm_lazy(vma->vm_mm);
126 }
127 
128 static inline void flush_tlb_kernel_range(unsigned long start,
129 					  unsigned long end)
130 {
131 	__tlb_flush_kernel();
132 }
133 
134 #endif /* _S390_TLBFLUSH_H */
135