1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/mm/context.c 4 * 5 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 9 #include <linux/bitops.h> 10 #include <linux/sched.h> 11 #include <linux/slab.h> 12 #include <linux/mm.h> 13 14 #include <asm/cpufeature.h> 15 #include <asm/mmu_context.h> 16 #include <asm/smp.h> 17 #include <asm/tlbflush.h> 18 19 static u32 asid_bits; 20 static DEFINE_RAW_SPINLOCK(cpu_asid_lock); 21 22 static atomic64_t asid_generation; 23 static unsigned long *asid_map; 24 25 static DEFINE_PER_CPU(atomic64_t, active_asids); 26 static DEFINE_PER_CPU(u64, reserved_asids); 27 static cpumask_t tlb_flush_pending; 28 29 #define ASID_MASK (~GENMASK(asid_bits - 1, 0)) 30 #define ASID_FIRST_VERSION (1UL << asid_bits) 31 32 #define NUM_USER_ASIDS ASID_FIRST_VERSION 33 #define asid2idx(asid) ((asid) & ~ASID_MASK) 34 #define idx2asid(idx) asid2idx(idx) 35 36 /* Get the ASIDBits supported by the current CPU */ 37 static u32 get_cpu_asid_bits(void) 38 { 39 u32 asid; 40 int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1), 41 ID_AA64MMFR0_ASID_SHIFT); 42 43 switch (fld) { 44 default: 45 pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", 46 smp_processor_id(), fld); 47 /* Fallthrough */ 48 case 0: 49 asid = 8; 50 break; 51 case 2: 52 asid = 16; 53 } 54 55 return asid; 56 } 57 58 /* Check if the current cpu's ASIDBits is compatible with asid_bits */ 59 void verify_cpu_asid_bits(void) 60 { 61 u32 asid = get_cpu_asid_bits(); 62 63 if (asid < asid_bits) { 64 /* 65 * We cannot decrease the ASID size at runtime, so panic if we support 66 * fewer ASID bits than the boot CPU. 67 */ 68 pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n", 69 smp_processor_id(), asid, asid_bits); 70 cpu_panic_kernel(); 71 } 72 } 73 74 static void set_kpti_asid_bits(void) 75 { 76 unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long); 77 /* 78 * In case of KPTI kernel/user ASIDs are allocated in 79 * pairs, the bottom bit distinguishes the two: if it 80 * is set, then the ASID will map only userspace. Thus 81 * mark even as reserved for kernel. 82 */ 83 memset(asid_map, 0xaa, len); 84 } 85 86 static void set_reserved_asid_bits(void) 87 { 88 if (arm64_kernel_unmapped_at_el0()) 89 set_kpti_asid_bits(); 90 else 91 bitmap_clear(asid_map, 0, NUM_USER_ASIDS); 92 } 93 94 static void flush_context(void) 95 { 96 int i; 97 u64 asid; 98 99 /* Update the list of reserved ASIDs and the ASID bitmap. */ 100 set_reserved_asid_bits(); 101 102 for_each_possible_cpu(i) { 103 asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0); 104 /* 105 * If this CPU has already been through a 106 * rollover, but hasn't run another task in 107 * the meantime, we must preserve its reserved 108 * ASID, as this is the only trace we have of 109 * the process it is still running. 110 */ 111 if (asid == 0) 112 asid = per_cpu(reserved_asids, i); 113 __set_bit(asid2idx(asid), asid_map); 114 per_cpu(reserved_asids, i) = asid; 115 } 116 117 /* 118 * Queue a TLB invalidation for each CPU to perform on next 119 * context-switch 120 */ 121 cpumask_setall(&tlb_flush_pending); 122 } 123 124 static bool check_update_reserved_asid(u64 asid, u64 newasid) 125 { 126 int cpu; 127 bool hit = false; 128 129 /* 130 * Iterate over the set of reserved ASIDs looking for a match. 131 * If we find one, then we can update our mm to use newasid 132 * (i.e. the same ASID in the current generation) but we can't 133 * exit the loop early, since we need to ensure that all copies 134 * of the old ASID are updated to reflect the mm. Failure to do 135 * so could result in us missing the reserved ASID in a future 136 * generation. 137 */ 138 for_each_possible_cpu(cpu) { 139 if (per_cpu(reserved_asids, cpu) == asid) { 140 hit = true; 141 per_cpu(reserved_asids, cpu) = newasid; 142 } 143 } 144 145 return hit; 146 } 147 148 static u64 new_context(struct mm_struct *mm) 149 { 150 static u32 cur_idx = 1; 151 u64 asid = atomic64_read(&mm->context.id); 152 u64 generation = atomic64_read(&asid_generation); 153 154 if (asid != 0) { 155 u64 newasid = generation | (asid & ~ASID_MASK); 156 157 /* 158 * If our current ASID was active during a rollover, we 159 * can continue to use it and this was just a false alarm. 160 */ 161 if (check_update_reserved_asid(asid, newasid)) 162 return newasid; 163 164 /* 165 * We had a valid ASID in a previous life, so try to re-use 166 * it if possible. 167 */ 168 if (!__test_and_set_bit(asid2idx(asid), asid_map)) 169 return newasid; 170 } 171 172 /* 173 * Allocate a free ASID. If we can't find one, take a note of the 174 * currently active ASIDs and mark the TLBs as requiring flushes. We 175 * always count from ASID #2 (index 1), as we use ASID #0 when setting 176 * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd 177 * pairs. 178 */ 179 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); 180 if (asid != NUM_USER_ASIDS) 181 goto set_asid; 182 183 /* We're out of ASIDs, so increment the global generation count */ 184 generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION, 185 &asid_generation); 186 flush_context(); 187 188 /* We have more ASIDs than CPUs, so this will always succeed */ 189 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); 190 191 set_asid: 192 __set_bit(asid, asid_map); 193 cur_idx = asid; 194 return idx2asid(asid) | generation; 195 } 196 197 void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) 198 { 199 unsigned long flags; 200 u64 asid, old_active_asid; 201 202 if (system_supports_cnp()) 203 cpu_set_reserved_ttbr0(); 204 205 asid = atomic64_read(&mm->context.id); 206 207 /* 208 * The memory ordering here is subtle. 209 * If our active_asids is non-zero and the ASID matches the current 210 * generation, then we update the active_asids entry with a relaxed 211 * cmpxchg. Racing with a concurrent rollover means that either: 212 * 213 * - We get a zero back from the cmpxchg and end up waiting on the 214 * lock. Taking the lock synchronises with the rollover and so 215 * we are forced to see the updated generation. 216 * 217 * - We get a valid ASID back from the cmpxchg, which means the 218 * relaxed xchg in flush_context will treat us as reserved 219 * because atomic RmWs are totally ordered for a given location. 220 */ 221 old_active_asid = atomic64_read(&per_cpu(active_asids, cpu)); 222 if (old_active_asid && 223 !((asid ^ atomic64_read(&asid_generation)) >> asid_bits) && 224 atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu), 225 old_active_asid, asid)) 226 goto switch_mm_fastpath; 227 228 raw_spin_lock_irqsave(&cpu_asid_lock, flags); 229 /* Check that our ASID belongs to the current generation. */ 230 asid = atomic64_read(&mm->context.id); 231 if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) { 232 asid = new_context(mm); 233 atomic64_set(&mm->context.id, asid); 234 } 235 236 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) 237 local_flush_tlb_all(); 238 239 atomic64_set(&per_cpu(active_asids, cpu), asid); 240 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); 241 242 switch_mm_fastpath: 243 244 arm64_apply_bp_hardening(); 245 246 /* 247 * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when 248 * emulating PAN. 249 */ 250 if (!system_uses_ttbr0_pan()) 251 cpu_switch_mm(mm->pgd, mm); 252 } 253 254 /* Errata workaround post TTBRx_EL1 update. */ 255 asmlinkage void post_ttbr_update_workaround(void) 256 { 257 asm(ALTERNATIVE("nop; nop; nop", 258 "ic iallu; dsb nsh; isb", 259 ARM64_WORKAROUND_CAVIUM_27456, 260 CONFIG_CAVIUM_ERRATUM_27456)); 261 } 262 263 static int asids_init(void) 264 { 265 asid_bits = get_cpu_asid_bits(); 266 /* 267 * Expect allocation after rollover to fail if we don't have at least 268 * one more ASID than CPUs. ASID #0 is reserved for init_mm. 269 */ 270 WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus()); 271 atomic64_set(&asid_generation, ASID_FIRST_VERSION); 272 asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map), 273 GFP_KERNEL); 274 if (!asid_map) 275 panic("Failed to allocate bitmap for %lu ASIDs\n", 276 NUM_USER_ASIDS); 277 278 /* 279 * We cannot call set_reserved_asid_bits() here because CPU 280 * caps are not finalized yet, so it is safer to assume KPTI 281 * and reserve kernel ASID's from beginning. 282 */ 283 if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) 284 set_kpti_asid_bits(); 285 286 pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS); 287 return 0; 288 } 289 early_initcall(asids_init); 290