1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2b3901d54SCatalin Marinas /* 3b3901d54SCatalin Marinas * Based on arch/arm/mm/context.c 4b3901d54SCatalin Marinas * 5b3901d54SCatalin Marinas * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. 6b3901d54SCatalin Marinas * Copyright (C) 2012 ARM Ltd. 7b3901d54SCatalin Marinas */ 8b3901d54SCatalin Marinas 95aec715dSWill Deacon #include <linux/bitops.h> 10b3901d54SCatalin Marinas #include <linux/sched.h> 115aec715dSWill Deacon #include <linux/slab.h> 12b3901d54SCatalin Marinas #include <linux/mm.h> 13b3901d54SCatalin Marinas 145aec715dSWill Deacon #include <asm/cpufeature.h> 15b3901d54SCatalin Marinas #include <asm/mmu_context.h> 1613f417f3SSuzuki K Poulose #include <asm/smp.h> 17b3901d54SCatalin Marinas #include <asm/tlbflush.h> 18b3901d54SCatalin Marinas 195aec715dSWill Deacon static u32 asid_bits; 20b3901d54SCatalin Marinas static DEFINE_RAW_SPINLOCK(cpu_asid_lock); 215aec715dSWill Deacon 225aec715dSWill Deacon static atomic64_t asid_generation; 235aec715dSWill Deacon static unsigned long *asid_map; 245aec715dSWill Deacon 255aec715dSWill Deacon static DEFINE_PER_CPU(atomic64_t, active_asids); 265aec715dSWill Deacon static DEFINE_PER_CPU(u64, reserved_asids); 275aec715dSWill Deacon static cpumask_t tlb_flush_pending; 285aec715dSWill Deacon 295aec715dSWill Deacon #define ASID_MASK (~GENMASK(asid_bits - 1, 0)) 305aec715dSWill Deacon #define ASID_FIRST_VERSION (1UL << asid_bits) 310c8ea531SWill Deacon 32f88f42f8SVladimir Murzin #define NUM_USER_ASIDS ASID_FIRST_VERSION 330c8ea531SWill Deacon #define asid2idx(asid) ((asid) & ~ASID_MASK) 340c8ea531SWill Deacon #define idx2asid(idx) asid2idx(idx) 355aec715dSWill Deacon 36038dc9c6SSuzuki K Poulose /* Get the ASIDBits supported by the current CPU */ 37038dc9c6SSuzuki K Poulose static u32 get_cpu_asid_bits(void) 38038dc9c6SSuzuki K Poulose { 39038dc9c6SSuzuki K Poulose u32 asid; 401cc6ed90SMark Rutland int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1), 41038dc9c6SSuzuki K Poulose ID_AA64MMFR0_ASID_SHIFT); 42038dc9c6SSuzuki K Poulose 43038dc9c6SSuzuki K Poulose switch (fld) { 44038dc9c6SSuzuki K Poulose default: 45038dc9c6SSuzuki K Poulose pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", 46038dc9c6SSuzuki K Poulose smp_processor_id(), fld); 47038dc9c6SSuzuki K Poulose /* Fallthrough */ 48038dc9c6SSuzuki K Poulose case 0: 49038dc9c6SSuzuki K Poulose asid = 8; 50038dc9c6SSuzuki K Poulose break; 51038dc9c6SSuzuki K Poulose case 2: 52038dc9c6SSuzuki K Poulose asid = 16; 53038dc9c6SSuzuki K Poulose } 54038dc9c6SSuzuki K Poulose 55038dc9c6SSuzuki K Poulose return asid; 56038dc9c6SSuzuki K Poulose } 57038dc9c6SSuzuki K Poulose 5813f417f3SSuzuki K Poulose /* Check if the current cpu's ASIDBits is compatible with asid_bits */ 5913f417f3SSuzuki K Poulose void verify_cpu_asid_bits(void) 6013f417f3SSuzuki K Poulose { 6113f417f3SSuzuki K Poulose u32 asid = get_cpu_asid_bits(); 6213f417f3SSuzuki K Poulose 6313f417f3SSuzuki K Poulose if (asid < asid_bits) { 6413f417f3SSuzuki K Poulose /* 6513f417f3SSuzuki K Poulose * We cannot decrease the ASID size at runtime, so panic if we support 6613f417f3SSuzuki K Poulose * fewer ASID bits than the boot CPU. 6713f417f3SSuzuki K Poulose */ 6813f417f3SSuzuki K Poulose pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n", 6913f417f3SSuzuki K Poulose smp_processor_id(), asid, asid_bits); 7017eebd1aSSuzuki K Poulose cpu_panic_kernel(); 7113f417f3SSuzuki K Poulose } 7213f417f3SSuzuki K Poulose } 7313f417f3SSuzuki K Poulose 74f88f42f8SVladimir Murzin static void set_kpti_asid_bits(void) 75f88f42f8SVladimir Murzin { 76f88f42f8SVladimir Murzin unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long); 77f88f42f8SVladimir Murzin /* 78f88f42f8SVladimir Murzin * In case of KPTI kernel/user ASIDs are allocated in 79f88f42f8SVladimir Murzin * pairs, the bottom bit distinguishes the two: if it 80f88f42f8SVladimir Murzin * is set, then the ASID will map only userspace. Thus 81f88f42f8SVladimir Murzin * mark even as reserved for kernel. 82f88f42f8SVladimir Murzin */ 83f88f42f8SVladimir Murzin memset(asid_map, 0xaa, len); 84f88f42f8SVladimir Murzin } 85f88f42f8SVladimir Murzin 86f88f42f8SVladimir Murzin static void set_reserved_asid_bits(void) 87f88f42f8SVladimir Murzin { 88f88f42f8SVladimir Murzin if (arm64_kernel_unmapped_at_el0()) 89f88f42f8SVladimir Murzin set_kpti_asid_bits(); 90f88f42f8SVladimir Murzin else 91f88f42f8SVladimir Murzin bitmap_clear(asid_map, 0, NUM_USER_ASIDS); 92f88f42f8SVladimir Murzin } 93f88f42f8SVladimir Murzin 94742fafa5SShaokun Zhang static void flush_context(void) 955aec715dSWill Deacon { 965aec715dSWill Deacon int i; 975aec715dSWill Deacon u64 asid; 985aec715dSWill Deacon 995aec715dSWill Deacon /* Update the list of reserved ASIDs and the ASID bitmap. */ 100f88f42f8SVladimir Murzin set_reserved_asid_bits(); 101b3901d54SCatalin Marinas 1025aec715dSWill Deacon for_each_possible_cpu(i) { 1035aec715dSWill Deacon asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0); 1045aec715dSWill Deacon /* 1055aec715dSWill Deacon * If this CPU has already been through a 1065aec715dSWill Deacon * rollover, but hasn't run another task in 1075aec715dSWill Deacon * the meantime, we must preserve its reserved 1085aec715dSWill Deacon * ASID, as this is the only trace we have of 1095aec715dSWill Deacon * the process it is still running. 1105aec715dSWill Deacon */ 1115aec715dSWill Deacon if (asid == 0) 1125aec715dSWill Deacon asid = per_cpu(reserved_asids, i); 1130c8ea531SWill Deacon __set_bit(asid2idx(asid), asid_map); 1145aec715dSWill Deacon per_cpu(reserved_asids, i) = asid; 115b3901d54SCatalin Marinas } 116b3901d54SCatalin Marinas 117f81a3487SMark Rutland /* 118f81a3487SMark Rutland * Queue a TLB invalidation for each CPU to perform on next 119f81a3487SMark Rutland * context-switch 120f81a3487SMark Rutland */ 1215aec715dSWill Deacon cpumask_setall(&tlb_flush_pending); 122b3901d54SCatalin Marinas } 123b3901d54SCatalin Marinas 1240ebea808SWill Deacon static bool check_update_reserved_asid(u64 asid, u64 newasid) 1255aec715dSWill Deacon { 1265aec715dSWill Deacon int cpu; 1270ebea808SWill Deacon bool hit = false; 1280ebea808SWill Deacon 1290ebea808SWill Deacon /* 1300ebea808SWill Deacon * Iterate over the set of reserved ASIDs looking for a match. 1310ebea808SWill Deacon * If we find one, then we can update our mm to use newasid 1320ebea808SWill Deacon * (i.e. the same ASID in the current generation) but we can't 1330ebea808SWill Deacon * exit the loop early, since we need to ensure that all copies 1340ebea808SWill Deacon * of the old ASID are updated to reflect the mm. Failure to do 1350ebea808SWill Deacon * so could result in us missing the reserved ASID in a future 1360ebea808SWill Deacon * generation. 1370ebea808SWill Deacon */ 1380ebea808SWill Deacon for_each_possible_cpu(cpu) { 1390ebea808SWill Deacon if (per_cpu(reserved_asids, cpu) == asid) { 1400ebea808SWill Deacon hit = true; 1410ebea808SWill Deacon per_cpu(reserved_asids, cpu) = newasid; 1420ebea808SWill Deacon } 1430ebea808SWill Deacon } 1440ebea808SWill Deacon 1450ebea808SWill Deacon return hit; 1465aec715dSWill Deacon } 1475aec715dSWill Deacon 148742fafa5SShaokun Zhang static u64 new_context(struct mm_struct *mm) 1495aec715dSWill Deacon { 1505aec715dSWill Deacon static u32 cur_idx = 1; 1515aec715dSWill Deacon u64 asid = atomic64_read(&mm->context.id); 1525aec715dSWill Deacon u64 generation = atomic64_read(&asid_generation); 1535aec715dSWill Deacon 1545aec715dSWill Deacon if (asid != 0) { 1550ebea808SWill Deacon u64 newasid = generation | (asid & ~ASID_MASK); 1560ebea808SWill Deacon 1575aec715dSWill Deacon /* 1585aec715dSWill Deacon * If our current ASID was active during a rollover, we 1595aec715dSWill Deacon * can continue to use it and this was just a false alarm. 1605aec715dSWill Deacon */ 1610ebea808SWill Deacon if (check_update_reserved_asid(asid, newasid)) 1620ebea808SWill Deacon return newasid; 1635aec715dSWill Deacon 1645aec715dSWill Deacon /* 1655aec715dSWill Deacon * We had a valid ASID in a previous life, so try to re-use 1665aec715dSWill Deacon * it if possible. 1675aec715dSWill Deacon */ 1680c8ea531SWill Deacon if (!__test_and_set_bit(asid2idx(asid), asid_map)) 1690ebea808SWill Deacon return newasid; 1705aec715dSWill Deacon } 1715aec715dSWill Deacon 1725aec715dSWill Deacon /* 1735aec715dSWill Deacon * Allocate a free ASID. If we can't find one, take a note of the 1740c8ea531SWill Deacon * currently active ASIDs and mark the TLBs as requiring flushes. We 1750c8ea531SWill Deacon * always count from ASID #2 (index 1), as we use ASID #0 when setting 1760c8ea531SWill Deacon * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd 1770c8ea531SWill Deacon * pairs. 1785aec715dSWill Deacon */ 1795aec715dSWill Deacon asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); 1805aec715dSWill Deacon if (asid != NUM_USER_ASIDS) 1815aec715dSWill Deacon goto set_asid; 1825aec715dSWill Deacon 1835aec715dSWill Deacon /* We're out of ASIDs, so increment the global generation count */ 1845aec715dSWill Deacon generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION, 1855aec715dSWill Deacon &asid_generation); 186742fafa5SShaokun Zhang flush_context(); 1875aec715dSWill Deacon 188f7e0efc9SJean-Philippe Brucker /* We have more ASIDs than CPUs, so this will always succeed */ 1895aec715dSWill Deacon asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); 1905aec715dSWill Deacon 1915aec715dSWill Deacon set_asid: 1925aec715dSWill Deacon __set_bit(asid, asid_map); 1935aec715dSWill Deacon cur_idx = asid; 1940c8ea531SWill Deacon return idx2asid(asid) | generation; 1955aec715dSWill Deacon } 1965aec715dSWill Deacon 1975aec715dSWill Deacon void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) 198b3901d54SCatalin Marinas { 199b3901d54SCatalin Marinas unsigned long flags; 200a8ffaaa0SCatalin Marinas u64 asid, old_active_asid; 2015aec715dSWill Deacon 2025ffdfaedSVladimir Murzin if (system_supports_cnp()) 2035ffdfaedSVladimir Murzin cpu_set_reserved_ttbr0(); 2045ffdfaedSVladimir Murzin 2055aec715dSWill Deacon asid = atomic64_read(&mm->context.id); 206b3901d54SCatalin Marinas 207b3901d54SCatalin Marinas /* 2083a33c760SWill Deacon * The memory ordering here is subtle. 209a8ffaaa0SCatalin Marinas * If our active_asids is non-zero and the ASID matches the current 210a8ffaaa0SCatalin Marinas * generation, then we update the active_asids entry with a relaxed 211a8ffaaa0SCatalin Marinas * cmpxchg. Racing with a concurrent rollover means that either: 2123a33c760SWill Deacon * 213a8ffaaa0SCatalin Marinas * - We get a zero back from the cmpxchg and end up waiting on the 2143a33c760SWill Deacon * lock. Taking the lock synchronises with the rollover and so 2153a33c760SWill Deacon * we are forced to see the updated generation. 2163a33c760SWill Deacon * 217a8ffaaa0SCatalin Marinas * - We get a valid ASID back from the cmpxchg, which means the 2183a33c760SWill Deacon * relaxed xchg in flush_context will treat us as reserved 2193a33c760SWill Deacon * because atomic RmWs are totally ordered for a given location. 220b3901d54SCatalin Marinas */ 221a8ffaaa0SCatalin Marinas old_active_asid = atomic64_read(&per_cpu(active_asids, cpu)); 222a8ffaaa0SCatalin Marinas if (old_active_asid && 223a8ffaaa0SCatalin Marinas !((asid ^ atomic64_read(&asid_generation)) >> asid_bits) && 224a8ffaaa0SCatalin Marinas atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu), 225a8ffaaa0SCatalin Marinas old_active_asid, asid)) 2265aec715dSWill Deacon goto switch_mm_fastpath; 227b3901d54SCatalin Marinas 2285aec715dSWill Deacon raw_spin_lock_irqsave(&cpu_asid_lock, flags); 2295aec715dSWill Deacon /* Check that our ASID belongs to the current generation. */ 2305aec715dSWill Deacon asid = atomic64_read(&mm->context.id); 2315aec715dSWill Deacon if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) { 232742fafa5SShaokun Zhang asid = new_context(mm); 2335aec715dSWill Deacon atomic64_set(&mm->context.id, asid); 234b3901d54SCatalin Marinas } 235b3901d54SCatalin Marinas 2365aec715dSWill Deacon if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) 2375aec715dSWill Deacon local_flush_tlb_all(); 238b3901d54SCatalin Marinas 2395aec715dSWill Deacon atomic64_set(&per_cpu(active_asids, cpu), asid); 2405aec715dSWill Deacon raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); 241565630d5SCatalin Marinas 2425aec715dSWill Deacon switch_mm_fastpath: 243a8e4c0a9SMarc Zyngier 244a8e4c0a9SMarc Zyngier arm64_apply_bp_hardening(); 245a8e4c0a9SMarc Zyngier 24639bc88e5SCatalin Marinas /* 24739bc88e5SCatalin Marinas * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when 24839bc88e5SCatalin Marinas * emulating PAN. 24939bc88e5SCatalin Marinas */ 25039bc88e5SCatalin Marinas if (!system_uses_ttbr0_pan()) 251b3901d54SCatalin Marinas cpu_switch_mm(mm->pgd, mm); 252b3901d54SCatalin Marinas } 253b3901d54SCatalin Marinas 25495e3de35SMarc Zyngier /* Errata workaround post TTBRx_EL1 update. */ 25595e3de35SMarc Zyngier asmlinkage void post_ttbr_update_workaround(void) 25695e3de35SMarc Zyngier { 25795e3de35SMarc Zyngier asm(ALTERNATIVE("nop; nop; nop", 25895e3de35SMarc Zyngier "ic iallu; dsb nsh; isb", 25995e3de35SMarc Zyngier ARM64_WORKAROUND_CAVIUM_27456, 26095e3de35SMarc Zyngier CONFIG_CAVIUM_ERRATUM_27456)); 26195e3de35SMarc Zyngier } 26295e3de35SMarc Zyngier 2635aec715dSWill Deacon static int asids_init(void) 264b3901d54SCatalin Marinas { 265038dc9c6SSuzuki K Poulose asid_bits = get_cpu_asid_bits(); 266f7e0efc9SJean-Philippe Brucker /* 267f7e0efc9SJean-Philippe Brucker * Expect allocation after rollover to fail if we don't have at least 268f7e0efc9SJean-Philippe Brucker * one more ASID than CPUs. ASID #0 is reserved for init_mm. 269f7e0efc9SJean-Philippe Brucker */ 270f7e0efc9SJean-Philippe Brucker WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus()); 2715aec715dSWill Deacon atomic64_set(&asid_generation, ASID_FIRST_VERSION); 2726396bb22SKees Cook asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map), 2735aec715dSWill Deacon GFP_KERNEL); 2745aec715dSWill Deacon if (!asid_map) 2755aec715dSWill Deacon panic("Failed to allocate bitmap for %lu ASIDs\n", 2765aec715dSWill Deacon NUM_USER_ASIDS); 2775aec715dSWill Deacon 278f88f42f8SVladimir Murzin /* 279f88f42f8SVladimir Murzin * We cannot call set_reserved_asid_bits() here because CPU 280f88f42f8SVladimir Murzin * caps are not finalized yet, so it is safer to assume KPTI 281f88f42f8SVladimir Murzin * and reserve kernel ASID's from beginning. 282f88f42f8SVladimir Murzin */ 283f88f42f8SVladimir Murzin if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) 284f88f42f8SVladimir Murzin set_kpti_asid_bits(); 285f88f42f8SVladimir Murzin 2865aec715dSWill Deacon pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS); 2875aec715dSWill Deacon return 0; 288b3901d54SCatalin Marinas } 2895aec715dSWill Deacon early_initcall(asids_init); 290