1 #ifndef _S390_TLBFLUSH_H 2 #define _S390_TLBFLUSH_H 3 4 #include <linux/mm.h> 5 #include <linux/sched.h> 6 #include <asm/processor.h> 7 #include <asm/pgalloc.h> 8 #include <asm/pgtable.h> 9 10 /* 11 * Flush all TLB entries on the local CPU. 12 */ 13 static inline void __tlb_flush_local(void) 14 { 15 asm volatile("ptlb" : : : "memory"); 16 } 17 18 /* 19 * Flush TLB entries for a specific ASCE on all CPUs 20 */ 21 static inline void __tlb_flush_idte(unsigned long asce) 22 { 23 unsigned long opt; 24 25 opt = IDTE_PTOA; 26 if (MACHINE_HAS_TLB_GUEST) 27 opt |= IDTE_GUEST_ASCE; 28 /* Global TLB flush for the mm */ 29 asm volatile( 30 " .insn rrf,0xb98e0000,0,%0,%1,0" 31 : : "a" (opt), "a" (asce) : "cc"); 32 } 33 34 #ifdef CONFIG_SMP 35 void smp_ptlb_all(void); 36 37 /* 38 * Flush all TLB entries on all CPUs. 39 */ 40 static inline void __tlb_flush_global(void) 41 { 42 unsigned int dummy = 0; 43 44 csp(&dummy, 0, 0); 45 } 46 47 /* 48 * Flush TLB entries for a specific mm on all CPUs (in case gmap is used 49 * this implicates multiple ASCEs!). 50 */ 51 static inline void __tlb_flush_mm(struct mm_struct *mm) 52 { 53 unsigned long gmap_asce; 54 55 /* 56 * If the machine has IDTE we prefer to do a per mm flush 57 * on all cpus instead of doing a local flush if the mm 58 * only ran on the local cpu. 59 */ 60 preempt_disable(); 61 atomic_inc(&mm->context.flush_count); 62 /* Reset TLB flush mask */ 63 cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask); 64 barrier(); 65 gmap_asce = READ_ONCE(mm->context.gmap_asce); 66 if (MACHINE_HAS_IDTE && gmap_asce != -1UL) { 67 if (gmap_asce) 68 __tlb_flush_idte(gmap_asce); 69 __tlb_flush_idte(mm->context.asce); 70 } else { 71 /* Global TLB flush */ 72 __tlb_flush_global(); 73 } 74 atomic_dec(&mm->context.flush_count); 75 preempt_enable(); 76 } 77 78 static inline void __tlb_flush_kernel(void) 79 { 80 if (MACHINE_HAS_IDTE) 81 __tlb_flush_idte(init_mm.context.asce); 82 else 83 __tlb_flush_global(); 84 } 85 #else 86 #define __tlb_flush_global() __tlb_flush_local() 87 88 /* 89 * Flush TLB entries for a specific ASCE on all CPUs. 90 */ 91 static inline void __tlb_flush_mm(struct mm_struct *mm) 92 { 93 __tlb_flush_local(); 94 } 95 96 static inline void __tlb_flush_kernel(void) 97 { 98 __tlb_flush_local(); 99 } 100 #endif 101 102 static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) 103 { 104 spin_lock(&mm->context.lock); 105 if (mm->context.flush_mm) { 106 mm->context.flush_mm = 0; 107 __tlb_flush_mm(mm); 108 } 109 spin_unlock(&mm->context.lock); 110 } 111 112 /* 113 * TLB flushing: 114 * flush_tlb() - flushes the current mm struct TLBs 115 * flush_tlb_all() - flushes all processes TLBs 116 * flush_tlb_mm(mm) - flushes the specified mm context TLB's 117 * flush_tlb_page(vma, vmaddr) - flushes one page 118 * flush_tlb_range(vma, start, end) - flushes a range of pages 119 * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages 120 */ 121 122 /* 123 * flush_tlb_mm goes together with ptep_set_wrprotect for the 124 * copy_page_range operation and flush_tlb_range is related to 125 * ptep_get_and_clear for change_protection. ptep_set_wrprotect and 126 * ptep_get_and_clear do not flush the TLBs directly if the mm has 127 * only one user. At the end of the update the flush_tlb_mm and 128 * flush_tlb_range functions need to do the flush. 129 */ 130 #define flush_tlb() do { } while (0) 131 #define flush_tlb_all() do { } while (0) 132 #define flush_tlb_page(vma, addr) do { } while (0) 133 134 static inline void flush_tlb_mm(struct mm_struct *mm) 135 { 136 __tlb_flush_mm_lazy(mm); 137 } 138 139 static inline void flush_tlb_range(struct vm_area_struct *vma, 140 unsigned long start, unsigned long end) 141 { 142 __tlb_flush_mm_lazy(vma->vm_mm); 143 } 144 145 static inline void flush_tlb_kernel_range(unsigned long start, 146 unsigned long end) 147 { 148 __tlb_flush_kernel(); 149 } 150 151 #endif /* _S390_TLBFLUSH_H */ 152