xref: /openbmc/linux/arch/s390/include/asm/tlbflush.h (revision 47010c04)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _S390_TLBFLUSH_H
3 #define _S390_TLBFLUSH_H
4 
5 #include <linux/mm.h>
6 #include <linux/sched.h>
7 #include <asm/processor.h>
8 
9 /*
10  * Flush all TLB entries on the local CPU.
11  */
12 static inline void __tlb_flush_local(void)
13 {
14 	asm volatile("ptlb" : : : "memory");
15 }
16 
17 /*
18  * Flush TLB entries for a specific ASCE on all CPUs
19  */
20 static inline void __tlb_flush_idte(unsigned long asce)
21 {
22 	unsigned long opt;
23 
24 	opt = IDTE_PTOA;
25 	if (MACHINE_HAS_TLB_GUEST)
26 		opt |= IDTE_GUEST_ASCE;
27 	/* Global TLB flush for the mm */
28 	asm volatile("idte 0,%1,%0" : : "a" (opt), "a" (asce) : "cc");
29 }
30 
31 /*
32  * Flush all TLB entries on all CPUs.
33  */
34 static inline void __tlb_flush_global(void)
35 {
36 	unsigned int dummy = 0;
37 
38 	csp(&dummy, 0, 0);
39 }
40 
41 /*
42  * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
43  * this implicates multiple ASCEs!).
44  */
45 static inline void __tlb_flush_mm(struct mm_struct *mm)
46 {
47 	unsigned long gmap_asce;
48 
49 	/*
50 	 * If the machine has IDTE we prefer to do a per mm flush
51 	 * on all cpus instead of doing a local flush if the mm
52 	 * only ran on the local cpu.
53 	 */
54 	preempt_disable();
55 	atomic_inc(&mm->context.flush_count);
56 	/* Reset TLB flush mask */
57 	cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
58 	barrier();
59 	gmap_asce = READ_ONCE(mm->context.gmap_asce);
60 	if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
61 		if (gmap_asce)
62 			__tlb_flush_idte(gmap_asce);
63 		__tlb_flush_idte(mm->context.asce);
64 	} else {
65 		/* Global TLB flush */
66 		__tlb_flush_global();
67 	}
68 	atomic_dec(&mm->context.flush_count);
69 	preempt_enable();
70 }
71 
72 static inline void __tlb_flush_kernel(void)
73 {
74 	if (MACHINE_HAS_IDTE)
75 		__tlb_flush_idte(init_mm.context.asce);
76 	else
77 		__tlb_flush_global();
78 }
79 
80 static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
81 {
82 	spin_lock(&mm->context.lock);
83 	if (mm->context.flush_mm) {
84 		mm->context.flush_mm = 0;
85 		__tlb_flush_mm(mm);
86 	}
87 	spin_unlock(&mm->context.lock);
88 }
89 
90 /*
91  * TLB flushing:
92  *  flush_tlb() - flushes the current mm struct TLBs
93  *  flush_tlb_all() - flushes all processes TLBs
94  *  flush_tlb_mm(mm) - flushes the specified mm context TLB's
95  *  flush_tlb_page(vma, vmaddr) - flushes one page
96  *  flush_tlb_range(vma, start, end) - flushes a range of pages
97  *  flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
98  */
99 
100 /*
101  * flush_tlb_mm goes together with ptep_set_wrprotect for the
102  * copy_page_range operation and flush_tlb_range is related to
103  * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
104  * ptep_get_and_clear do not flush the TLBs directly if the mm has
105  * only one user. At the end of the update the flush_tlb_mm and
106  * flush_tlb_range functions need to do the flush.
107  */
108 #define flush_tlb()				do { } while (0)
109 #define flush_tlb_all()				do { } while (0)
110 #define flush_tlb_page(vma, addr)		do { } while (0)
111 
112 static inline void flush_tlb_mm(struct mm_struct *mm)
113 {
114 	__tlb_flush_mm_lazy(mm);
115 }
116 
117 static inline void flush_tlb_range(struct vm_area_struct *vma,
118 				   unsigned long start, unsigned long end)
119 {
120 	__tlb_flush_mm_lazy(vma->vm_mm);
121 }
122 
123 static inline void flush_tlb_kernel_range(unsigned long start,
124 					  unsigned long end)
125 {
126 	__tlb_flush_kernel();
127 }
128 
129 #endif /* _S390_TLBFLUSH_H */
130