xref: /openbmc/linux/arch/arm64/mm/context.c (revision 038dc9c6)
1b3901d54SCatalin Marinas /*
2b3901d54SCatalin Marinas  * Based on arch/arm/mm/context.c
3b3901d54SCatalin Marinas  *
4b3901d54SCatalin Marinas  * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
5b3901d54SCatalin Marinas  * Copyright (C) 2012 ARM Ltd.
6b3901d54SCatalin Marinas  *
7b3901d54SCatalin Marinas  * This program is free software; you can redistribute it and/or modify
8b3901d54SCatalin Marinas  * it under the terms of the GNU General Public License version 2 as
9b3901d54SCatalin Marinas  * published by the Free Software Foundation.
10b3901d54SCatalin Marinas  *
11b3901d54SCatalin Marinas  * This program is distributed in the hope that it will be useful,
12b3901d54SCatalin Marinas  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13b3901d54SCatalin Marinas  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14b3901d54SCatalin Marinas  * GNU General Public License for more details.
15b3901d54SCatalin Marinas  *
16b3901d54SCatalin Marinas  * You should have received a copy of the GNU General Public License
17b3901d54SCatalin Marinas  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18b3901d54SCatalin Marinas  */
19b3901d54SCatalin Marinas 
205aec715dSWill Deacon #include <linux/bitops.h>
21b3901d54SCatalin Marinas #include <linux/sched.h>
225aec715dSWill Deacon #include <linux/slab.h>
23b3901d54SCatalin Marinas #include <linux/mm.h>
24b3901d54SCatalin Marinas 
255aec715dSWill Deacon #include <asm/cpufeature.h>
26b3901d54SCatalin Marinas #include <asm/mmu_context.h>
27b3901d54SCatalin Marinas #include <asm/tlbflush.h>
28b3901d54SCatalin Marinas 
295aec715dSWill Deacon static u32 asid_bits;
30b3901d54SCatalin Marinas static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
315aec715dSWill Deacon 
325aec715dSWill Deacon static atomic64_t asid_generation;
335aec715dSWill Deacon static unsigned long *asid_map;
345aec715dSWill Deacon 
355aec715dSWill Deacon static DEFINE_PER_CPU(atomic64_t, active_asids);
365aec715dSWill Deacon static DEFINE_PER_CPU(u64, reserved_asids);
375aec715dSWill Deacon static cpumask_t tlb_flush_pending;
385aec715dSWill Deacon 
395aec715dSWill Deacon #define ASID_MASK		(~GENMASK(asid_bits - 1, 0))
405aec715dSWill Deacon #define ASID_FIRST_VERSION	(1UL << asid_bits)
415aec715dSWill Deacon #define NUM_USER_ASIDS		ASID_FIRST_VERSION
425aec715dSWill Deacon 
43038dc9c6SSuzuki K Poulose /* Get the ASIDBits supported by the current CPU */
44038dc9c6SSuzuki K Poulose static u32 get_cpu_asid_bits(void)
45038dc9c6SSuzuki K Poulose {
46038dc9c6SSuzuki K Poulose 	u32 asid;
47038dc9c6SSuzuki K Poulose 	int fld = cpuid_feature_extract_field(read_cpuid(SYS_ID_AA64MMFR0_EL1),
48038dc9c6SSuzuki K Poulose 						ID_AA64MMFR0_ASID_SHIFT);
49038dc9c6SSuzuki K Poulose 
50038dc9c6SSuzuki K Poulose 	switch (fld) {
51038dc9c6SSuzuki K Poulose 	default:
52038dc9c6SSuzuki K Poulose 		pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
53038dc9c6SSuzuki K Poulose 					smp_processor_id(),  fld);
54038dc9c6SSuzuki K Poulose 		/* Fallthrough */
55038dc9c6SSuzuki K Poulose 	case 0:
56038dc9c6SSuzuki K Poulose 		asid = 8;
57038dc9c6SSuzuki K Poulose 		break;
58038dc9c6SSuzuki K Poulose 	case 2:
59038dc9c6SSuzuki K Poulose 		asid = 16;
60038dc9c6SSuzuki K Poulose 	}
61038dc9c6SSuzuki K Poulose 
62038dc9c6SSuzuki K Poulose 	return asid;
63038dc9c6SSuzuki K Poulose }
64038dc9c6SSuzuki K Poulose 
655aec715dSWill Deacon static void flush_context(unsigned int cpu)
665aec715dSWill Deacon {
675aec715dSWill Deacon 	int i;
685aec715dSWill Deacon 	u64 asid;
695aec715dSWill Deacon 
705aec715dSWill Deacon 	/* Update the list of reserved ASIDs and the ASID bitmap. */
715aec715dSWill Deacon 	bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
72b3901d54SCatalin Marinas 
73b3901d54SCatalin Marinas 	/*
745aec715dSWill Deacon 	 * Ensure the generation bump is observed before we xchg the
755aec715dSWill Deacon 	 * active_asids.
76b3901d54SCatalin Marinas 	 */
775aec715dSWill Deacon 	smp_wmb();
785aec715dSWill Deacon 
795aec715dSWill Deacon 	for_each_possible_cpu(i) {
805aec715dSWill Deacon 		asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
815aec715dSWill Deacon 		/*
825aec715dSWill Deacon 		 * If this CPU has already been through a
835aec715dSWill Deacon 		 * rollover, but hasn't run another task in
845aec715dSWill Deacon 		 * the meantime, we must preserve its reserved
855aec715dSWill Deacon 		 * ASID, as this is the only trace we have of
865aec715dSWill Deacon 		 * the process it is still running.
875aec715dSWill Deacon 		 */
885aec715dSWill Deacon 		if (asid == 0)
895aec715dSWill Deacon 			asid = per_cpu(reserved_asids, i);
905aec715dSWill Deacon 		__set_bit(asid & ~ASID_MASK, asid_map);
915aec715dSWill Deacon 		per_cpu(reserved_asids, i) = asid;
92b3901d54SCatalin Marinas 	}
93b3901d54SCatalin Marinas 
945aec715dSWill Deacon 	/* Queue a TLB invalidate and flush the I-cache if necessary. */
955aec715dSWill Deacon 	cpumask_setall(&tlb_flush_pending);
965aec715dSWill Deacon 
97b3901d54SCatalin Marinas 	if (icache_is_aivivt())
985aec715dSWill Deacon 		__flush_icache_all();
99b3901d54SCatalin Marinas }
100b3901d54SCatalin Marinas 
1010ebea808SWill Deacon static bool check_update_reserved_asid(u64 asid, u64 newasid)
1025aec715dSWill Deacon {
1035aec715dSWill Deacon 	int cpu;
1040ebea808SWill Deacon 	bool hit = false;
1050ebea808SWill Deacon 
1060ebea808SWill Deacon 	/*
1070ebea808SWill Deacon 	 * Iterate over the set of reserved ASIDs looking for a match.
1080ebea808SWill Deacon 	 * If we find one, then we can update our mm to use newasid
1090ebea808SWill Deacon 	 * (i.e. the same ASID in the current generation) but we can't
1100ebea808SWill Deacon 	 * exit the loop early, since we need to ensure that all copies
1110ebea808SWill Deacon 	 * of the old ASID are updated to reflect the mm. Failure to do
1120ebea808SWill Deacon 	 * so could result in us missing the reserved ASID in a future
1130ebea808SWill Deacon 	 * generation.
1140ebea808SWill Deacon 	 */
1150ebea808SWill Deacon 	for_each_possible_cpu(cpu) {
1160ebea808SWill Deacon 		if (per_cpu(reserved_asids, cpu) == asid) {
1170ebea808SWill Deacon 			hit = true;
1180ebea808SWill Deacon 			per_cpu(reserved_asids, cpu) = newasid;
1190ebea808SWill Deacon 		}
1200ebea808SWill Deacon 	}
1210ebea808SWill Deacon 
1220ebea808SWill Deacon 	return hit;
1235aec715dSWill Deacon }
1245aec715dSWill Deacon 
1255aec715dSWill Deacon static u64 new_context(struct mm_struct *mm, unsigned int cpu)
1265aec715dSWill Deacon {
1275aec715dSWill Deacon 	static u32 cur_idx = 1;
1285aec715dSWill Deacon 	u64 asid = atomic64_read(&mm->context.id);
1295aec715dSWill Deacon 	u64 generation = atomic64_read(&asid_generation);
1305aec715dSWill Deacon 
1315aec715dSWill Deacon 	if (asid != 0) {
1320ebea808SWill Deacon 		u64 newasid = generation | (asid & ~ASID_MASK);
1330ebea808SWill Deacon 
1345aec715dSWill Deacon 		/*
1355aec715dSWill Deacon 		 * If our current ASID was active during a rollover, we
1365aec715dSWill Deacon 		 * can continue to use it and this was just a false alarm.
1375aec715dSWill Deacon 		 */
1380ebea808SWill Deacon 		if (check_update_reserved_asid(asid, newasid))
1390ebea808SWill Deacon 			return newasid;
1405aec715dSWill Deacon 
1415aec715dSWill Deacon 		/*
1425aec715dSWill Deacon 		 * We had a valid ASID in a previous life, so try to re-use
1435aec715dSWill Deacon 		 * it if possible.
1445aec715dSWill Deacon 		 */
1455aec715dSWill Deacon 		asid &= ~ASID_MASK;
1465aec715dSWill Deacon 		if (!__test_and_set_bit(asid, asid_map))
1470ebea808SWill Deacon 			return newasid;
1485aec715dSWill Deacon 	}
1495aec715dSWill Deacon 
1505aec715dSWill Deacon 	/*
1515aec715dSWill Deacon 	 * Allocate a free ASID. If we can't find one, take a note of the
1525aec715dSWill Deacon 	 * currently active ASIDs and mark the TLBs as requiring flushes.
1535aec715dSWill Deacon 	 * We always count from ASID #1, as we use ASID #0 when setting a
1545aec715dSWill Deacon 	 * reserved TTBR0 for the init_mm.
1555aec715dSWill Deacon 	 */
1565aec715dSWill Deacon 	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
1575aec715dSWill Deacon 	if (asid != NUM_USER_ASIDS)
1585aec715dSWill Deacon 		goto set_asid;
1595aec715dSWill Deacon 
1605aec715dSWill Deacon 	/* We're out of ASIDs, so increment the global generation count */
1615aec715dSWill Deacon 	generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
1625aec715dSWill Deacon 						 &asid_generation);
1635aec715dSWill Deacon 	flush_context(cpu);
1645aec715dSWill Deacon 
1655aec715dSWill Deacon 	/* We have at least 1 ASID per CPU, so this will always succeed */
1665aec715dSWill Deacon 	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
1675aec715dSWill Deacon 
1685aec715dSWill Deacon set_asid:
1695aec715dSWill Deacon 	__set_bit(asid, asid_map);
1705aec715dSWill Deacon 	cur_idx = asid;
1710ebea808SWill Deacon 	return asid | generation;
1725aec715dSWill Deacon }
1735aec715dSWill Deacon 
1745aec715dSWill Deacon void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
175b3901d54SCatalin Marinas {
176b3901d54SCatalin Marinas 	unsigned long flags;
1775aec715dSWill Deacon 	u64 asid;
1785aec715dSWill Deacon 
1795aec715dSWill Deacon 	asid = atomic64_read(&mm->context.id);
180b3901d54SCatalin Marinas 
181b3901d54SCatalin Marinas 	/*
1825aec715dSWill Deacon 	 * The memory ordering here is subtle. We rely on the control
1835aec715dSWill Deacon 	 * dependency between the generation read and the update of
1845aec715dSWill Deacon 	 * active_asids to ensure that we are synchronised with a
1855aec715dSWill Deacon 	 * parallel rollover (i.e. this pairs with the smp_wmb() in
1865aec715dSWill Deacon 	 * flush_context).
187b3901d54SCatalin Marinas 	 */
1885aec715dSWill Deacon 	if (!((asid ^ atomic64_read(&asid_generation)) >> asid_bits)
1895aec715dSWill Deacon 	    && atomic64_xchg_relaxed(&per_cpu(active_asids, cpu), asid))
1905aec715dSWill Deacon 		goto switch_mm_fastpath;
191b3901d54SCatalin Marinas 
1925aec715dSWill Deacon 	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
1935aec715dSWill Deacon 	/* Check that our ASID belongs to the current generation. */
1945aec715dSWill Deacon 	asid = atomic64_read(&mm->context.id);
1955aec715dSWill Deacon 	if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) {
1965aec715dSWill Deacon 		asid = new_context(mm, cpu);
1975aec715dSWill Deacon 		atomic64_set(&mm->context.id, asid);
198b3901d54SCatalin Marinas 	}
199b3901d54SCatalin Marinas 
2005aec715dSWill Deacon 	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
2015aec715dSWill Deacon 		local_flush_tlb_all();
202b3901d54SCatalin Marinas 
2035aec715dSWill Deacon 	atomic64_set(&per_cpu(active_asids, cpu), asid);
2045aec715dSWill Deacon 	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
205565630d5SCatalin Marinas 
2065aec715dSWill Deacon switch_mm_fastpath:
207b3901d54SCatalin Marinas 	cpu_switch_mm(mm->pgd, mm);
208b3901d54SCatalin Marinas }
209b3901d54SCatalin Marinas 
2105aec715dSWill Deacon static int asids_init(void)
211b3901d54SCatalin Marinas {
212038dc9c6SSuzuki K Poulose 	asid_bits = get_cpu_asid_bits();
2135aec715dSWill Deacon 	/* If we end up with more CPUs than ASIDs, expect things to crash */
2145aec715dSWill Deacon 	WARN_ON(NUM_USER_ASIDS < num_possible_cpus());
2155aec715dSWill Deacon 	atomic64_set(&asid_generation, ASID_FIRST_VERSION);
2165aec715dSWill Deacon 	asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map),
2175aec715dSWill Deacon 			   GFP_KERNEL);
2185aec715dSWill Deacon 	if (!asid_map)
2195aec715dSWill Deacon 		panic("Failed to allocate bitmap for %lu ASIDs\n",
2205aec715dSWill Deacon 		      NUM_USER_ASIDS);
2215aec715dSWill Deacon 
2225aec715dSWill Deacon 	pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
2235aec715dSWill Deacon 	return 0;
224b3901d54SCatalin Marinas }
2255aec715dSWill Deacon early_initcall(asids_init);
226