xref: /openbmc/linux/arch/s390/include/asm/tlbflush.h (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2c6557e7fSMartin Schwidefsky #ifndef _S390_TLBFLUSH_H
3c6557e7fSMartin Schwidefsky #define _S390_TLBFLUSH_H
4c6557e7fSMartin Schwidefsky 
5c6557e7fSMartin Schwidefsky #include <linux/mm.h>
6c6557e7fSMartin Schwidefsky #include <linux/sched.h>
7c6557e7fSMartin Schwidefsky #include <asm/processor.h>
8c6557e7fSMartin Schwidefsky 
9c6557e7fSMartin Schwidefsky /*
101b948d6cSMartin Schwidefsky  * Flush all TLB entries on the local CPU.
11c6557e7fSMartin Schwidefsky  */
__tlb_flush_local(void)12c6557e7fSMartin Schwidefsky static inline void __tlb_flush_local(void)
13c6557e7fSMartin Schwidefsky {
14c6557e7fSMartin Schwidefsky 	asm volatile("ptlb" : : : "memory");
15c6557e7fSMartin Schwidefsky }
16c6557e7fSMartin Schwidefsky 
17c6557e7fSMartin Schwidefsky /*
181b948d6cSMartin Schwidefsky  * Flush TLB entries for a specific ASCE on all CPUs
19c6557e7fSMartin Schwidefsky  */
__tlb_flush_idte(unsigned long asce)201b948d6cSMartin Schwidefsky static inline void __tlb_flush_idte(unsigned long asce)
211b948d6cSMartin Schwidefsky {
22118bd31bSMartin Schwidefsky 	unsigned long opt;
23118bd31bSMartin Schwidefsky 
24118bd31bSMartin Schwidefsky 	opt = IDTE_PTOA;
2528c807e5SMartin Schwidefsky 	if (MACHINE_HAS_TLB_GUEST)
2628c807e5SMartin Schwidefsky 		opt |= IDTE_GUEST_ASCE;
271b948d6cSMartin Schwidefsky 	/* Global TLB flush for the mm */
28*731efc96SVasily Gorbik 	asm volatile("idte 0,%1,%0" : : "a" (opt), "a" (asce) : "cc");
291b948d6cSMartin Schwidefsky }
301b948d6cSMartin Schwidefsky 
311b948d6cSMartin Schwidefsky /*
321b948d6cSMartin Schwidefsky  * Flush all TLB entries on all CPUs.
331b948d6cSMartin Schwidefsky  */
__tlb_flush_global(void)34c6557e7fSMartin Schwidefsky static inline void __tlb_flush_global(void)
35c6557e7fSMartin Schwidefsky {
364ccccc52SHeiko Carstens 	unsigned int dummy = 0;
37c6557e7fSMartin Schwidefsky 
384ccccc52SHeiko Carstens 	csp(&dummy, 0, 0);
39c6557e7fSMartin Schwidefsky }
40c6557e7fSMartin Schwidefsky 
411b948d6cSMartin Schwidefsky /*
421b948d6cSMartin Schwidefsky  * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
431b948d6cSMartin Schwidefsky  * this implicates multiple ASCEs!).
441b948d6cSMartin Schwidefsky  */
__tlb_flush_mm(struct mm_struct * mm)4544b6cc81SMartin Schwidefsky static inline void __tlb_flush_mm(struct mm_struct *mm)
46c6557e7fSMartin Schwidefsky {
4744b6cc81SMartin Schwidefsky 	unsigned long gmap_asce;
4844b6cc81SMartin Schwidefsky 
4944b6cc81SMartin Schwidefsky 	/*
5044b6cc81SMartin Schwidefsky 	 * If the machine has IDTE we prefer to do a per mm flush
5144b6cc81SMartin Schwidefsky 	 * on all cpus instead of doing a local flush if the mm
5244b6cc81SMartin Schwidefsky 	 * only ran on the local cpu.
5344b6cc81SMartin Schwidefsky 	 */
541b948d6cSMartin Schwidefsky 	preempt_disable();
5564f31d58SMartin Schwidefsky 	atomic_inc(&mm->context.flush_count);
56b3e5dc45SMartin Schwidefsky 	/* Reset TLB flush mask */
57b3e5dc45SMartin Schwidefsky 	cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
58b3e5dc45SMartin Schwidefsky 	barrier();
5944b6cc81SMartin Schwidefsky 	gmap_asce = READ_ONCE(mm->context.gmap_asce);
6044b6cc81SMartin Schwidefsky 	if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
6144b6cc81SMartin Schwidefsky 		if (gmap_asce)
6244b6cc81SMartin Schwidefsky 			__tlb_flush_idte(gmap_asce);
6344b6cc81SMartin Schwidefsky 		__tlb_flush_idte(mm->context.asce);
6444b6cc81SMartin Schwidefsky 	} else {
65b3e5dc45SMartin Schwidefsky 		/* Global TLB flush */
66b3e5dc45SMartin Schwidefsky 		__tlb_flush_global();
6744b6cc81SMartin Schwidefsky 	}
6864f31d58SMartin Schwidefsky 	atomic_dec(&mm->context.flush_count);
691b948d6cSMartin Schwidefsky 	preempt_enable();
701b948d6cSMartin Schwidefsky }
711b948d6cSMartin Schwidefsky 
__tlb_flush_kernel(void)721b948d6cSMartin Schwidefsky static inline void __tlb_flush_kernel(void)
731b948d6cSMartin Schwidefsky {
741b948d6cSMartin Schwidefsky 	if (MACHINE_HAS_IDTE)
75723cacbdSGerald Schaefer 		__tlb_flush_idte(init_mm.context.asce);
761b948d6cSMartin Schwidefsky 	else
771b948d6cSMartin Schwidefsky 		__tlb_flush_global();
781b948d6cSMartin Schwidefsky }
79c6557e7fSMartin Schwidefsky 
__tlb_flush_mm_lazy(struct mm_struct * mm)805c474a1eSMartin Schwidefsky static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
81c6557e7fSMartin Schwidefsky {
8260f07c8eSMartin Schwidefsky 	spin_lock(&mm->context.lock);
83050eef36SMartin Schwidefsky 	if (mm->context.flush_mm) {
84050eef36SMartin Schwidefsky 		mm->context.flush_mm = 0;
8560f07c8eSMartin Schwidefsky 		__tlb_flush_mm(mm);
86050eef36SMartin Schwidefsky 	}
8760f07c8eSMartin Schwidefsky 	spin_unlock(&mm->context.lock);
88c6557e7fSMartin Schwidefsky }
89c6557e7fSMartin Schwidefsky 
90c6557e7fSMartin Schwidefsky /*
91c6557e7fSMartin Schwidefsky  * TLB flushing:
92c6557e7fSMartin Schwidefsky  *  flush_tlb() - flushes the current mm struct TLBs
93c6557e7fSMartin Schwidefsky  *  flush_tlb_all() - flushes all processes TLBs
94c6557e7fSMartin Schwidefsky  *  flush_tlb_mm(mm) - flushes the specified mm context TLB's
95c6557e7fSMartin Schwidefsky  *  flush_tlb_page(vma, vmaddr) - flushes one page
96c6557e7fSMartin Schwidefsky  *  flush_tlb_range(vma, start, end) - flushes a range of pages
97c6557e7fSMartin Schwidefsky  *  flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
98c6557e7fSMartin Schwidefsky  */
99c6557e7fSMartin Schwidefsky 
100c6557e7fSMartin Schwidefsky /*
101c6557e7fSMartin Schwidefsky  * flush_tlb_mm goes together with ptep_set_wrprotect for the
102c6557e7fSMartin Schwidefsky  * copy_page_range operation and flush_tlb_range is related to
103c6557e7fSMartin Schwidefsky  * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
104c6557e7fSMartin Schwidefsky  * ptep_get_and_clear do not flush the TLBs directly if the mm has
105c6557e7fSMartin Schwidefsky  * only one user. At the end of the update the flush_tlb_mm and
106c6557e7fSMartin Schwidefsky  * flush_tlb_range functions need to do the flush.
107c6557e7fSMartin Schwidefsky  */
108c6557e7fSMartin Schwidefsky #define flush_tlb()				do { } while (0)
109c6557e7fSMartin Schwidefsky #define flush_tlb_all()				do { } while (0)
110c6557e7fSMartin Schwidefsky #define flush_tlb_page(vma, addr)		do { } while (0)
111c6557e7fSMartin Schwidefsky 
flush_tlb_mm(struct mm_struct * mm)112c6557e7fSMartin Schwidefsky static inline void flush_tlb_mm(struct mm_struct *mm)
113c6557e7fSMartin Schwidefsky {
1145c474a1eSMartin Schwidefsky 	__tlb_flush_mm_lazy(mm);
115c6557e7fSMartin Schwidefsky }
116c6557e7fSMartin Schwidefsky 
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)117c6557e7fSMartin Schwidefsky static inline void flush_tlb_range(struct vm_area_struct *vma,
118c6557e7fSMartin Schwidefsky 				   unsigned long start, unsigned long end)
119c6557e7fSMartin Schwidefsky {
1205c474a1eSMartin Schwidefsky 	__tlb_flush_mm_lazy(vma->vm_mm);
121c6557e7fSMartin Schwidefsky }
122c6557e7fSMartin Schwidefsky 
flush_tlb_kernel_range(unsigned long start,unsigned long end)123c6557e7fSMartin Schwidefsky static inline void flush_tlb_kernel_range(unsigned long start,
124c6557e7fSMartin Schwidefsky 					  unsigned long end)
125c6557e7fSMartin Schwidefsky {
1261b948d6cSMartin Schwidefsky 	__tlb_flush_kernel();
127c6557e7fSMartin Schwidefsky }
128c6557e7fSMartin Schwidefsky 
129c6557e7fSMartin Schwidefsky #endif /* _S390_TLBFLUSH_H */
130