xref: /openbmc/linux/arch/s390/include/asm/tlbflush.h (revision 4eb5928d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _S390_TLBFLUSH_H
3 #define _S390_TLBFLUSH_H
4 
5 #include <linux/mm.h>
6 #include <linux/sched.h>
7 #include <asm/processor.h>
8 
9 /*
10  * Flush all TLB entries on the local CPU.
11  */
12 static inline void __tlb_flush_local(void)
13 {
14 	asm volatile("ptlb" : : : "memory");
15 }
16 
17 /*
18  * Flush TLB entries for a specific ASCE on all CPUs
19  */
20 static inline void __tlb_flush_idte(unsigned long asce)
21 {
22 	unsigned long opt;
23 
24 	opt = IDTE_PTOA;
25 	if (MACHINE_HAS_TLB_GUEST)
26 		opt |= IDTE_GUEST_ASCE;
27 	/* Global TLB flush for the mm */
28 	asm volatile(
29 		"	.insn	rrf,0xb98e0000,0,%0,%1,0"
30 		: : "a" (opt), "a" (asce) : "cc");
31 }
32 
33 void smp_ptlb_all(void);
34 
35 /*
36  * Flush all TLB entries on all CPUs.
37  */
38 static inline void __tlb_flush_global(void)
39 {
40 	unsigned int dummy = 0;
41 
42 	csp(&dummy, 0, 0);
43 }
44 
45 /*
46  * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
47  * this implicates multiple ASCEs!).
48  */
49 static inline void __tlb_flush_mm(struct mm_struct *mm)
50 {
51 	unsigned long gmap_asce;
52 
53 	/*
54 	 * If the machine has IDTE we prefer to do a per mm flush
55 	 * on all cpus instead of doing a local flush if the mm
56 	 * only ran on the local cpu.
57 	 */
58 	preempt_disable();
59 	atomic_inc(&mm->context.flush_count);
60 	/* Reset TLB flush mask */
61 	cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
62 	barrier();
63 	gmap_asce = READ_ONCE(mm->context.gmap_asce);
64 	if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
65 		if (gmap_asce)
66 			__tlb_flush_idte(gmap_asce);
67 		__tlb_flush_idte(mm->context.asce);
68 	} else {
69 		/* Global TLB flush */
70 		__tlb_flush_global();
71 	}
72 	atomic_dec(&mm->context.flush_count);
73 	preempt_enable();
74 }
75 
76 static inline void __tlb_flush_kernel(void)
77 {
78 	if (MACHINE_HAS_IDTE)
79 		__tlb_flush_idte(init_mm.context.asce);
80 	else
81 		__tlb_flush_global();
82 }
83 
84 static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
85 {
86 	spin_lock(&mm->context.lock);
87 	if (mm->context.flush_mm) {
88 		mm->context.flush_mm = 0;
89 		__tlb_flush_mm(mm);
90 	}
91 	spin_unlock(&mm->context.lock);
92 }
93 
94 /*
95  * TLB flushing:
96  *  flush_tlb() - flushes the current mm struct TLBs
97  *  flush_tlb_all() - flushes all processes TLBs
98  *  flush_tlb_mm(mm) - flushes the specified mm context TLB's
99  *  flush_tlb_page(vma, vmaddr) - flushes one page
100  *  flush_tlb_range(vma, start, end) - flushes a range of pages
101  *  flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
102  */
103 
104 /*
105  * flush_tlb_mm goes together with ptep_set_wrprotect for the
106  * copy_page_range operation and flush_tlb_range is related to
107  * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
108  * ptep_get_and_clear do not flush the TLBs directly if the mm has
109  * only one user. At the end of the update the flush_tlb_mm and
110  * flush_tlb_range functions need to do the flush.
111  */
112 #define flush_tlb()				do { } while (0)
113 #define flush_tlb_all()				do { } while (0)
114 #define flush_tlb_page(vma, addr)		do { } while (0)
115 
116 static inline void flush_tlb_mm(struct mm_struct *mm)
117 {
118 	__tlb_flush_mm_lazy(mm);
119 }
120 
121 static inline void flush_tlb_range(struct vm_area_struct *vma,
122 				   unsigned long start, unsigned long end)
123 {
124 	__tlb_flush_mm_lazy(vma->vm_mm);
125 }
126 
127 static inline void flush_tlb_kernel_range(unsigned long start,
128 					  unsigned long end)
129 {
130 	__tlb_flush_kernel();
131 }
132 
133 #endif /* _S390_TLBFLUSH_H */
134