1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2b3901d54SCatalin Marinas /* 3b3901d54SCatalin Marinas * Based on arch/arm/mm/context.c 4b3901d54SCatalin Marinas * 5b3901d54SCatalin Marinas * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. 6b3901d54SCatalin Marinas * Copyright (C) 2012 ARM Ltd. 7b3901d54SCatalin Marinas */ 8b3901d54SCatalin Marinas 95aec715dSWill Deacon #include <linux/bitops.h> 10b3901d54SCatalin Marinas #include <linux/sched.h> 115aec715dSWill Deacon #include <linux/slab.h> 12b3901d54SCatalin Marinas #include <linux/mm.h> 13b3901d54SCatalin Marinas 145aec715dSWill Deacon #include <asm/cpufeature.h> 15b3901d54SCatalin Marinas #include <asm/mmu_context.h> 1613f417f3SSuzuki K Poulose #include <asm/smp.h> 17b3901d54SCatalin Marinas #include <asm/tlbflush.h> 18b3901d54SCatalin Marinas 195aec715dSWill Deacon static u32 asid_bits; 20b3901d54SCatalin Marinas static DEFINE_RAW_SPINLOCK(cpu_asid_lock); 215aec715dSWill Deacon 225aec715dSWill Deacon static atomic64_t asid_generation; 235aec715dSWill Deacon static unsigned long *asid_map; 245aec715dSWill Deacon 255aec715dSWill Deacon static DEFINE_PER_CPU(atomic64_t, active_asids); 265aec715dSWill Deacon static DEFINE_PER_CPU(u64, reserved_asids); 275aec715dSWill Deacon static cpumask_t tlb_flush_pending; 285aec715dSWill Deacon 295aec715dSWill Deacon #define ASID_MASK (~GENMASK(asid_bits - 1, 0)) 305aec715dSWill Deacon #define ASID_FIRST_VERSION (1UL << asid_bits) 310c8ea531SWill Deacon 320c8ea531SWill Deacon #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 330c8ea531SWill Deacon #define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1) 340c8ea531SWill Deacon #define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1) 350c8ea531SWill Deacon #define idx2asid(idx) (((idx) << 1) & ~ASID_MASK) 360c8ea531SWill Deacon #else 370c8ea531SWill Deacon #define NUM_USER_ASIDS (ASID_FIRST_VERSION) 380c8ea531SWill Deacon #define asid2idx(asid) ((asid) & ~ASID_MASK) 390c8ea531SWill Deacon #define idx2asid(idx) asid2idx(idx) 400c8ea531SWill Deacon #endif 415aec715dSWill Deacon 42038dc9c6SSuzuki K Poulose /* Get the ASIDBits supported by the current CPU */ 43038dc9c6SSuzuki K Poulose static u32 get_cpu_asid_bits(void) 44038dc9c6SSuzuki K Poulose { 45038dc9c6SSuzuki K Poulose u32 asid; 461cc6ed90SMark Rutland int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1), 47038dc9c6SSuzuki K Poulose ID_AA64MMFR0_ASID_SHIFT); 48038dc9c6SSuzuki K Poulose 49038dc9c6SSuzuki K Poulose switch (fld) { 50038dc9c6SSuzuki K Poulose default: 51038dc9c6SSuzuki K Poulose pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", 52038dc9c6SSuzuki K Poulose smp_processor_id(), fld); 53038dc9c6SSuzuki K Poulose /* Fallthrough */ 54038dc9c6SSuzuki K Poulose case 0: 55038dc9c6SSuzuki K Poulose asid = 8; 56038dc9c6SSuzuki K Poulose break; 57038dc9c6SSuzuki K Poulose case 2: 58038dc9c6SSuzuki K Poulose asid = 16; 59038dc9c6SSuzuki K Poulose } 60038dc9c6SSuzuki K Poulose 61038dc9c6SSuzuki K Poulose return asid; 62038dc9c6SSuzuki K Poulose } 63038dc9c6SSuzuki K Poulose 6413f417f3SSuzuki K Poulose /* Check if the current cpu's ASIDBits is compatible with asid_bits */ 6513f417f3SSuzuki K Poulose void verify_cpu_asid_bits(void) 6613f417f3SSuzuki K Poulose { 6713f417f3SSuzuki K Poulose u32 asid = get_cpu_asid_bits(); 6813f417f3SSuzuki K Poulose 6913f417f3SSuzuki K Poulose if (asid < asid_bits) { 7013f417f3SSuzuki K Poulose /* 7113f417f3SSuzuki K Poulose * We cannot decrease the ASID size at runtime, so panic if we support 7213f417f3SSuzuki K Poulose * fewer ASID bits than the boot CPU. 7313f417f3SSuzuki K Poulose */ 7413f417f3SSuzuki K Poulose pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n", 7513f417f3SSuzuki K Poulose smp_processor_id(), asid, asid_bits); 7617eebd1aSSuzuki K Poulose cpu_panic_kernel(); 7713f417f3SSuzuki K Poulose } 7813f417f3SSuzuki K Poulose } 7913f417f3SSuzuki K Poulose 80742fafa5SShaokun Zhang static void flush_context(void) 815aec715dSWill Deacon { 825aec715dSWill Deacon int i; 835aec715dSWill Deacon u64 asid; 845aec715dSWill Deacon 855aec715dSWill Deacon /* Update the list of reserved ASIDs and the ASID bitmap. */ 865aec715dSWill Deacon bitmap_clear(asid_map, 0, NUM_USER_ASIDS); 87b3901d54SCatalin Marinas 885aec715dSWill Deacon for_each_possible_cpu(i) { 895aec715dSWill Deacon asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0); 905aec715dSWill Deacon /* 915aec715dSWill Deacon * If this CPU has already been through a 925aec715dSWill Deacon * rollover, but hasn't run another task in 935aec715dSWill Deacon * the meantime, we must preserve its reserved 945aec715dSWill Deacon * ASID, as this is the only trace we have of 955aec715dSWill Deacon * the process it is still running. 965aec715dSWill Deacon */ 975aec715dSWill Deacon if (asid == 0) 985aec715dSWill Deacon asid = per_cpu(reserved_asids, i); 990c8ea531SWill Deacon __set_bit(asid2idx(asid), asid_map); 1005aec715dSWill Deacon per_cpu(reserved_asids, i) = asid; 101b3901d54SCatalin Marinas } 102b3901d54SCatalin Marinas 103f81a3487SMark Rutland /* 104f81a3487SMark Rutland * Queue a TLB invalidation for each CPU to perform on next 105f81a3487SMark Rutland * context-switch 106f81a3487SMark Rutland */ 1075aec715dSWill Deacon cpumask_setall(&tlb_flush_pending); 108b3901d54SCatalin Marinas } 109b3901d54SCatalin Marinas 1100ebea808SWill Deacon static bool check_update_reserved_asid(u64 asid, u64 newasid) 1115aec715dSWill Deacon { 1125aec715dSWill Deacon int cpu; 1130ebea808SWill Deacon bool hit = false; 1140ebea808SWill Deacon 1150ebea808SWill Deacon /* 1160ebea808SWill Deacon * Iterate over the set of reserved ASIDs looking for a match. 1170ebea808SWill Deacon * If we find one, then we can update our mm to use newasid 1180ebea808SWill Deacon * (i.e. the same ASID in the current generation) but we can't 1190ebea808SWill Deacon * exit the loop early, since we need to ensure that all copies 1200ebea808SWill Deacon * of the old ASID are updated to reflect the mm. Failure to do 1210ebea808SWill Deacon * so could result in us missing the reserved ASID in a future 1220ebea808SWill Deacon * generation. 1230ebea808SWill Deacon */ 1240ebea808SWill Deacon for_each_possible_cpu(cpu) { 1250ebea808SWill Deacon if (per_cpu(reserved_asids, cpu) == asid) { 1260ebea808SWill Deacon hit = true; 1270ebea808SWill Deacon per_cpu(reserved_asids, cpu) = newasid; 1280ebea808SWill Deacon } 1290ebea808SWill Deacon } 1300ebea808SWill Deacon 1310ebea808SWill Deacon return hit; 1325aec715dSWill Deacon } 1335aec715dSWill Deacon 134742fafa5SShaokun Zhang static u64 new_context(struct mm_struct *mm) 1355aec715dSWill Deacon { 1365aec715dSWill Deacon static u32 cur_idx = 1; 1375aec715dSWill Deacon u64 asid = atomic64_read(&mm->context.id); 1385aec715dSWill Deacon u64 generation = atomic64_read(&asid_generation); 1395aec715dSWill Deacon 1405aec715dSWill Deacon if (asid != 0) { 1410ebea808SWill Deacon u64 newasid = generation | (asid & ~ASID_MASK); 1420ebea808SWill Deacon 1435aec715dSWill Deacon /* 1445aec715dSWill Deacon * If our current ASID was active during a rollover, we 1455aec715dSWill Deacon * can continue to use it and this was just a false alarm. 1465aec715dSWill Deacon */ 1470ebea808SWill Deacon if (check_update_reserved_asid(asid, newasid)) 1480ebea808SWill Deacon return newasid; 1495aec715dSWill Deacon 1505aec715dSWill Deacon /* 1515aec715dSWill Deacon * We had a valid ASID in a previous life, so try to re-use 1525aec715dSWill Deacon * it if possible. 1535aec715dSWill Deacon */ 1540c8ea531SWill Deacon if (!__test_and_set_bit(asid2idx(asid), asid_map)) 1550ebea808SWill Deacon return newasid; 1565aec715dSWill Deacon } 1575aec715dSWill Deacon 1585aec715dSWill Deacon /* 1595aec715dSWill Deacon * Allocate a free ASID. If we can't find one, take a note of the 1600c8ea531SWill Deacon * currently active ASIDs and mark the TLBs as requiring flushes. We 1610c8ea531SWill Deacon * always count from ASID #2 (index 1), as we use ASID #0 when setting 1620c8ea531SWill Deacon * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd 1630c8ea531SWill Deacon * pairs. 1645aec715dSWill Deacon */ 1655aec715dSWill Deacon asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); 1665aec715dSWill Deacon if (asid != NUM_USER_ASIDS) 1675aec715dSWill Deacon goto set_asid; 1685aec715dSWill Deacon 1695aec715dSWill Deacon /* We're out of ASIDs, so increment the global generation count */ 1705aec715dSWill Deacon generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION, 1715aec715dSWill Deacon &asid_generation); 172742fafa5SShaokun Zhang flush_context(); 1735aec715dSWill Deacon 174f7e0efc9SJean-Philippe Brucker /* We have more ASIDs than CPUs, so this will always succeed */ 1755aec715dSWill Deacon asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); 1765aec715dSWill Deacon 1775aec715dSWill Deacon set_asid: 1785aec715dSWill Deacon __set_bit(asid, asid_map); 1795aec715dSWill Deacon cur_idx = asid; 1800c8ea531SWill Deacon return idx2asid(asid) | generation; 1815aec715dSWill Deacon } 1825aec715dSWill Deacon 1835aec715dSWill Deacon void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) 184b3901d54SCatalin Marinas { 185b3901d54SCatalin Marinas unsigned long flags; 186a8ffaaa0SCatalin Marinas u64 asid, old_active_asid; 1875aec715dSWill Deacon 1885ffdfaedSVladimir Murzin if (system_supports_cnp()) 1895ffdfaedSVladimir Murzin cpu_set_reserved_ttbr0(); 1905ffdfaedSVladimir Murzin 1915aec715dSWill Deacon asid = atomic64_read(&mm->context.id); 192b3901d54SCatalin Marinas 193b3901d54SCatalin Marinas /* 1943a33c760SWill Deacon * The memory ordering here is subtle. 195a8ffaaa0SCatalin Marinas * If our active_asids is non-zero and the ASID matches the current 196a8ffaaa0SCatalin Marinas * generation, then we update the active_asids entry with a relaxed 197a8ffaaa0SCatalin Marinas * cmpxchg. Racing with a concurrent rollover means that either: 1983a33c760SWill Deacon * 199a8ffaaa0SCatalin Marinas * - We get a zero back from the cmpxchg and end up waiting on the 2003a33c760SWill Deacon * lock. Taking the lock synchronises with the rollover and so 2013a33c760SWill Deacon * we are forced to see the updated generation. 2023a33c760SWill Deacon * 203a8ffaaa0SCatalin Marinas * - We get a valid ASID back from the cmpxchg, which means the 2043a33c760SWill Deacon * relaxed xchg in flush_context will treat us as reserved 2053a33c760SWill Deacon * because atomic RmWs are totally ordered for a given location. 206b3901d54SCatalin Marinas */ 207a8ffaaa0SCatalin Marinas old_active_asid = atomic64_read(&per_cpu(active_asids, cpu)); 208a8ffaaa0SCatalin Marinas if (old_active_asid && 209a8ffaaa0SCatalin Marinas !((asid ^ atomic64_read(&asid_generation)) >> asid_bits) && 210a8ffaaa0SCatalin Marinas atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu), 211a8ffaaa0SCatalin Marinas old_active_asid, asid)) 2125aec715dSWill Deacon goto switch_mm_fastpath; 213b3901d54SCatalin Marinas 2145aec715dSWill Deacon raw_spin_lock_irqsave(&cpu_asid_lock, flags); 2155aec715dSWill Deacon /* Check that our ASID belongs to the current generation. */ 2165aec715dSWill Deacon asid = atomic64_read(&mm->context.id); 2175aec715dSWill Deacon if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) { 218742fafa5SShaokun Zhang asid = new_context(mm); 2195aec715dSWill Deacon atomic64_set(&mm->context.id, asid); 220b3901d54SCatalin Marinas } 221b3901d54SCatalin Marinas 2225aec715dSWill Deacon if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) 2235aec715dSWill Deacon local_flush_tlb_all(); 224b3901d54SCatalin Marinas 2255aec715dSWill Deacon atomic64_set(&per_cpu(active_asids, cpu), asid); 2265aec715dSWill Deacon raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); 227565630d5SCatalin Marinas 2285aec715dSWill Deacon switch_mm_fastpath: 229a8e4c0a9SMarc Zyngier 230a8e4c0a9SMarc Zyngier arm64_apply_bp_hardening(); 231a8e4c0a9SMarc Zyngier 23239bc88e5SCatalin Marinas /* 23339bc88e5SCatalin Marinas * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when 23439bc88e5SCatalin Marinas * emulating PAN. 23539bc88e5SCatalin Marinas */ 23639bc88e5SCatalin Marinas if (!system_uses_ttbr0_pan()) 237b3901d54SCatalin Marinas cpu_switch_mm(mm->pgd, mm); 238b3901d54SCatalin Marinas } 239b3901d54SCatalin Marinas 24095e3de35SMarc Zyngier /* Errata workaround post TTBRx_EL1 update. */ 24195e3de35SMarc Zyngier asmlinkage void post_ttbr_update_workaround(void) 24295e3de35SMarc Zyngier { 24395e3de35SMarc Zyngier asm(ALTERNATIVE("nop; nop; nop", 24495e3de35SMarc Zyngier "ic iallu; dsb nsh; isb", 24595e3de35SMarc Zyngier ARM64_WORKAROUND_CAVIUM_27456, 24695e3de35SMarc Zyngier CONFIG_CAVIUM_ERRATUM_27456)); 24795e3de35SMarc Zyngier } 24895e3de35SMarc Zyngier 2495aec715dSWill Deacon static int asids_init(void) 250b3901d54SCatalin Marinas { 251038dc9c6SSuzuki K Poulose asid_bits = get_cpu_asid_bits(); 252f7e0efc9SJean-Philippe Brucker /* 253f7e0efc9SJean-Philippe Brucker * Expect allocation after rollover to fail if we don't have at least 254f7e0efc9SJean-Philippe Brucker * one more ASID than CPUs. ASID #0 is reserved for init_mm. 255f7e0efc9SJean-Philippe Brucker */ 256f7e0efc9SJean-Philippe Brucker WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus()); 2575aec715dSWill Deacon atomic64_set(&asid_generation, ASID_FIRST_VERSION); 2586396bb22SKees Cook asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map), 2595aec715dSWill Deacon GFP_KERNEL); 2605aec715dSWill Deacon if (!asid_map) 2615aec715dSWill Deacon panic("Failed to allocate bitmap for %lu ASIDs\n", 2625aec715dSWill Deacon NUM_USER_ASIDS); 2635aec715dSWill Deacon 2645aec715dSWill Deacon pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS); 2655aec715dSWill Deacon return 0; 266b3901d54SCatalin Marinas } 2675aec715dSWill Deacon early_initcall(asids_init); 268