1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/arch/arm/kernel/smp_tlb.c 4 * 5 * Copyright (C) 2002 ARM Limited, All Rights Reserved. 6 */ 7 #include <linux/preempt.h> 8 #include <linux/smp.h> 9 #include <linux/uaccess.h> 10 11 #include <asm/smp_plat.h> 12 #include <asm/tlbflush.h> 13 #include <asm/mmu_context.h> 14 15 /**********************************************************************/ 16 17 /* 18 * TLB operations 19 */ 20 struct tlb_args { 21 struct vm_area_struct *ta_vma; 22 unsigned long ta_start; 23 unsigned long ta_end; 24 }; 25 26 static inline void ipi_flush_tlb_all(void *ignored) 27 { 28 local_flush_tlb_all(); 29 } 30 31 static inline void ipi_flush_tlb_mm(void *arg) 32 { 33 struct mm_struct *mm = (struct mm_struct *)arg; 34 35 local_flush_tlb_mm(mm); 36 } 37 38 static inline void ipi_flush_tlb_page(void *arg) 39 { 40 struct tlb_args *ta = (struct tlb_args *)arg; 41 unsigned int __ua_flags = uaccess_save_and_enable(); 42 43 local_flush_tlb_page(ta->ta_vma, ta->ta_start); 44 45 uaccess_restore(__ua_flags); 46 } 47 48 static inline void ipi_flush_tlb_kernel_page(void *arg) 49 { 50 struct tlb_args *ta = (struct tlb_args *)arg; 51 52 local_flush_tlb_kernel_page(ta->ta_start); 53 } 54 55 static inline void ipi_flush_tlb_range(void *arg) 56 { 57 struct tlb_args *ta = (struct tlb_args *)arg; 58 unsigned int __ua_flags = uaccess_save_and_enable(); 59 60 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); 61 62 uaccess_restore(__ua_flags); 63 } 64 65 static inline void ipi_flush_tlb_kernel_range(void *arg) 66 { 67 struct tlb_args *ta = (struct tlb_args *)arg; 68 69 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); 70 } 71 72 static inline void ipi_flush_bp_all(void *ignored) 73 { 74 local_flush_bp_all(); 75 } 76 77 #ifdef CONFIG_ARM_ERRATA_798181 78 bool (*erratum_a15_798181_handler)(void); 79 80 static bool erratum_a15_798181_partial(void) 81 { 82 asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); 83 dsb(ish); 84 return false; 85 } 86 87 static bool erratum_a15_798181_broadcast(void) 88 { 89 asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); 90 dsb(ish); 91 return true; 92 } 93 94 void erratum_a15_798181_init(void) 95 { 96 unsigned int midr = read_cpuid_id(); 97 unsigned int revidr = read_cpuid(CPUID_REVIDR); 98 99 /* Brahma-B15 r0p0..r0p2 affected 100 * Cortex-A15 r0p0..r3p3 w/o ECO fix affected 101 * Fixes applied to A15 with respect to the revision and revidr are: 102 * 103 * r0p0-r2p1: No fixes applied 104 * r2p2,r2p3: 105 * REVIDR[4]: 798181 Moving a virtual page that is being accessed 106 * by an active process can lead to unexpected behavior 107 * REVIDR[9]: Not defined 108 * r2p4,r3p0,r3p1,r3p2: 109 * REVIDR[4]: 798181 Moving a virtual page that is being accessed 110 * by an active process can lead to unexpected behavior 111 * REVIDR[9]: 798181 Moving a virtual page that is being accessed 112 * by an active process can lead to unexpected behavior 113 * - This is an update to a previously released ECO. 114 * r3p3: 115 * REVIDR[4]: Reserved 116 * REVIDR[9]: 798181 Moving a virtual page that is being accessed 117 * by an active process can lead to unexpected behavior 118 * - This is an update to a previously released ECO. 119 * 120 * Handling: 121 * REVIDR[9] set -> No WA 122 * REVIDR[4] set, REVIDR[9] cleared -> Partial WA 123 * Both cleared -> Full WA 124 */ 125 if ((midr & 0xff0ffff0) == 0x420f00f0 && midr <= 0x420f00f2) { 126 erratum_a15_798181_handler = erratum_a15_798181_broadcast; 127 } else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x412fc0f2) { 128 erratum_a15_798181_handler = erratum_a15_798181_broadcast; 129 } else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x412fc0f4) { 130 if (revidr & 0x10) 131 erratum_a15_798181_handler = 132 erratum_a15_798181_partial; 133 else 134 erratum_a15_798181_handler = 135 erratum_a15_798181_broadcast; 136 } else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x413fc0f3) { 137 if ((revidr & 0x210) == 0) 138 erratum_a15_798181_handler = 139 erratum_a15_798181_broadcast; 140 else if (revidr & 0x10) 141 erratum_a15_798181_handler = 142 erratum_a15_798181_partial; 143 } else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x414fc0f0) { 144 if ((revidr & 0x200) == 0) 145 erratum_a15_798181_handler = 146 erratum_a15_798181_partial; 147 } 148 } 149 #endif 150 151 static void ipi_flush_tlb_a15_erratum(void *arg) 152 { 153 dmb(); 154 } 155 156 static void broadcast_tlb_a15_erratum(void) 157 { 158 if (!erratum_a15_798181()) 159 return; 160 161 smp_call_function(ipi_flush_tlb_a15_erratum, NULL, 1); 162 } 163 164 static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) 165 { 166 int this_cpu; 167 cpumask_t mask = { CPU_BITS_NONE }; 168 169 if (!erratum_a15_798181()) 170 return; 171 172 this_cpu = get_cpu(); 173 a15_erratum_get_cpumask(this_cpu, mm, &mask); 174 smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); 175 put_cpu(); 176 } 177 178 void flush_tlb_all(void) 179 { 180 if (tlb_ops_need_broadcast()) 181 on_each_cpu(ipi_flush_tlb_all, NULL, 1); 182 else 183 __flush_tlb_all(); 184 broadcast_tlb_a15_erratum(); 185 } 186 187 void flush_tlb_mm(struct mm_struct *mm) 188 { 189 if (tlb_ops_need_broadcast()) 190 on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1); 191 else 192 __flush_tlb_mm(mm); 193 broadcast_tlb_mm_a15_erratum(mm); 194 } 195 196 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) 197 { 198 if (tlb_ops_need_broadcast()) { 199 struct tlb_args ta; 200 ta.ta_vma = vma; 201 ta.ta_start = uaddr; 202 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, 203 &ta, 1); 204 } else 205 __flush_tlb_page(vma, uaddr); 206 broadcast_tlb_mm_a15_erratum(vma->vm_mm); 207 } 208 209 void flush_tlb_kernel_page(unsigned long kaddr) 210 { 211 if (tlb_ops_need_broadcast()) { 212 struct tlb_args ta; 213 ta.ta_start = kaddr; 214 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); 215 } else 216 __flush_tlb_kernel_page(kaddr); 217 broadcast_tlb_a15_erratum(); 218 } 219 220 void flush_tlb_range(struct vm_area_struct *vma, 221 unsigned long start, unsigned long end) 222 { 223 if (tlb_ops_need_broadcast()) { 224 struct tlb_args ta; 225 ta.ta_vma = vma; 226 ta.ta_start = start; 227 ta.ta_end = end; 228 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, 229 &ta, 1); 230 } else 231 local_flush_tlb_range(vma, start, end); 232 broadcast_tlb_mm_a15_erratum(vma->vm_mm); 233 } 234 235 void flush_tlb_kernel_range(unsigned long start, unsigned long end) 236 { 237 if (tlb_ops_need_broadcast()) { 238 struct tlb_args ta; 239 ta.ta_start = start; 240 ta.ta_end = end; 241 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); 242 } else 243 local_flush_tlb_kernel_range(start, end); 244 broadcast_tlb_a15_erratum(); 245 } 246 247 void flush_bp_all(void) 248 { 249 if (tlb_ops_need_broadcast()) 250 on_each_cpu(ipi_flush_bp_all, NULL, 1); 251 else 252 __flush_bp_all(); 253 } 254