xref: /openbmc/linux/arch/arm64/mm/context.c (revision 55fd7e02)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Based on arch/arm/mm/context.c
4  *
5  * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
6  * Copyright (C) 2012 ARM Ltd.
7  */
8 
9 #include <linux/bitfield.h>
10 #include <linux/bitops.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 #include <linux/mm.h>
14 
15 #include <asm/cpufeature.h>
16 #include <asm/mmu_context.h>
17 #include <asm/smp.h>
18 #include <asm/tlbflush.h>
19 
20 static u32 asid_bits;
21 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
22 
23 static atomic64_t asid_generation;
24 static unsigned long *asid_map;
25 
26 static DEFINE_PER_CPU(atomic64_t, active_asids);
27 static DEFINE_PER_CPU(u64, reserved_asids);
28 static cpumask_t tlb_flush_pending;
29 
30 #define ASID_MASK		(~GENMASK(asid_bits - 1, 0))
31 #define ASID_FIRST_VERSION	(1UL << asid_bits)
32 
33 #define NUM_USER_ASIDS		ASID_FIRST_VERSION
34 #define asid2idx(asid)		((asid) & ~ASID_MASK)
35 #define idx2asid(idx)		asid2idx(idx)
36 
37 /* Get the ASIDBits supported by the current CPU */
38 static u32 get_cpu_asid_bits(void)
39 {
40 	u32 asid;
41 	int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
42 						ID_AA64MMFR0_ASID_SHIFT);
43 
44 	switch (fld) {
45 	default:
46 		pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
47 					smp_processor_id(),  fld);
48 		/* Fallthrough */
49 	case 0:
50 		asid = 8;
51 		break;
52 	case 2:
53 		asid = 16;
54 	}
55 
56 	return asid;
57 }
58 
59 /* Check if the current cpu's ASIDBits is compatible with asid_bits */
60 void verify_cpu_asid_bits(void)
61 {
62 	u32 asid = get_cpu_asid_bits();
63 
64 	if (asid < asid_bits) {
65 		/*
66 		 * We cannot decrease the ASID size at runtime, so panic if we support
67 		 * fewer ASID bits than the boot CPU.
68 		 */
69 		pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n",
70 				smp_processor_id(), asid, asid_bits);
71 		cpu_panic_kernel();
72 	}
73 }
74 
75 static void set_kpti_asid_bits(void)
76 {
77 	unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long);
78 	/*
79 	 * In case of KPTI kernel/user ASIDs are allocated in
80 	 * pairs, the bottom bit distinguishes the two: if it
81 	 * is set, then the ASID will map only userspace. Thus
82 	 * mark even as reserved for kernel.
83 	 */
84 	memset(asid_map, 0xaa, len);
85 }
86 
87 static void set_reserved_asid_bits(void)
88 {
89 	if (arm64_kernel_unmapped_at_el0())
90 		set_kpti_asid_bits();
91 	else
92 		bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
93 }
94 
95 #define asid_gen_match(asid) \
96 	(!(((asid) ^ atomic64_read(&asid_generation)) >> asid_bits))
97 
98 static void flush_context(void)
99 {
100 	int i;
101 	u64 asid;
102 
103 	/* Update the list of reserved ASIDs and the ASID bitmap. */
104 	set_reserved_asid_bits();
105 
106 	for_each_possible_cpu(i) {
107 		asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
108 		/*
109 		 * If this CPU has already been through a
110 		 * rollover, but hasn't run another task in
111 		 * the meantime, we must preserve its reserved
112 		 * ASID, as this is the only trace we have of
113 		 * the process it is still running.
114 		 */
115 		if (asid == 0)
116 			asid = per_cpu(reserved_asids, i);
117 		__set_bit(asid2idx(asid), asid_map);
118 		per_cpu(reserved_asids, i) = asid;
119 	}
120 
121 	/*
122 	 * Queue a TLB invalidation for each CPU to perform on next
123 	 * context-switch
124 	 */
125 	cpumask_setall(&tlb_flush_pending);
126 }
127 
128 static bool check_update_reserved_asid(u64 asid, u64 newasid)
129 {
130 	int cpu;
131 	bool hit = false;
132 
133 	/*
134 	 * Iterate over the set of reserved ASIDs looking for a match.
135 	 * If we find one, then we can update our mm to use newasid
136 	 * (i.e. the same ASID in the current generation) but we can't
137 	 * exit the loop early, since we need to ensure that all copies
138 	 * of the old ASID are updated to reflect the mm. Failure to do
139 	 * so could result in us missing the reserved ASID in a future
140 	 * generation.
141 	 */
142 	for_each_possible_cpu(cpu) {
143 		if (per_cpu(reserved_asids, cpu) == asid) {
144 			hit = true;
145 			per_cpu(reserved_asids, cpu) = newasid;
146 		}
147 	}
148 
149 	return hit;
150 }
151 
152 static u64 new_context(struct mm_struct *mm)
153 {
154 	static u32 cur_idx = 1;
155 	u64 asid = atomic64_read(&mm->context.id);
156 	u64 generation = atomic64_read(&asid_generation);
157 
158 	if (asid != 0) {
159 		u64 newasid = generation | (asid & ~ASID_MASK);
160 
161 		/*
162 		 * If our current ASID was active during a rollover, we
163 		 * can continue to use it and this was just a false alarm.
164 		 */
165 		if (check_update_reserved_asid(asid, newasid))
166 			return newasid;
167 
168 		/*
169 		 * We had a valid ASID in a previous life, so try to re-use
170 		 * it if possible.
171 		 */
172 		if (!__test_and_set_bit(asid2idx(asid), asid_map))
173 			return newasid;
174 	}
175 
176 	/*
177 	 * Allocate a free ASID. If we can't find one, take a note of the
178 	 * currently active ASIDs and mark the TLBs as requiring flushes.  We
179 	 * always count from ASID #2 (index 1), as we use ASID #0 when setting
180 	 * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
181 	 * pairs.
182 	 */
183 	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
184 	if (asid != NUM_USER_ASIDS)
185 		goto set_asid;
186 
187 	/* We're out of ASIDs, so increment the global generation count */
188 	generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
189 						 &asid_generation);
190 	flush_context();
191 
192 	/* We have more ASIDs than CPUs, so this will always succeed */
193 	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
194 
195 set_asid:
196 	__set_bit(asid, asid_map);
197 	cur_idx = asid;
198 	return idx2asid(asid) | generation;
199 }
200 
201 void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
202 {
203 	unsigned long flags;
204 	u64 asid, old_active_asid;
205 
206 	if (system_supports_cnp())
207 		cpu_set_reserved_ttbr0();
208 
209 	asid = atomic64_read(&mm->context.id);
210 
211 	/*
212 	 * The memory ordering here is subtle.
213 	 * If our active_asids is non-zero and the ASID matches the current
214 	 * generation, then we update the active_asids entry with a relaxed
215 	 * cmpxchg. Racing with a concurrent rollover means that either:
216 	 *
217 	 * - We get a zero back from the cmpxchg and end up waiting on the
218 	 *   lock. Taking the lock synchronises with the rollover and so
219 	 *   we are forced to see the updated generation.
220 	 *
221 	 * - We get a valid ASID back from the cmpxchg, which means the
222 	 *   relaxed xchg in flush_context will treat us as reserved
223 	 *   because atomic RmWs are totally ordered for a given location.
224 	 */
225 	old_active_asid = atomic64_read(&per_cpu(active_asids, cpu));
226 	if (old_active_asid && asid_gen_match(asid) &&
227 	    atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu),
228 				     old_active_asid, asid))
229 		goto switch_mm_fastpath;
230 
231 	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
232 	/* Check that our ASID belongs to the current generation. */
233 	asid = atomic64_read(&mm->context.id);
234 	if (!asid_gen_match(asid)) {
235 		asid = new_context(mm);
236 		atomic64_set(&mm->context.id, asid);
237 	}
238 
239 	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
240 		local_flush_tlb_all();
241 
242 	atomic64_set(&per_cpu(active_asids, cpu), asid);
243 	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
244 
245 switch_mm_fastpath:
246 
247 	arm64_apply_bp_hardening();
248 
249 	/*
250 	 * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
251 	 * emulating PAN.
252 	 */
253 	if (!system_uses_ttbr0_pan())
254 		cpu_switch_mm(mm->pgd, mm);
255 }
256 
257 /* Errata workaround post TTBRx_EL1 update. */
258 asmlinkage void post_ttbr_update_workaround(void)
259 {
260 	if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456))
261 		return;
262 
263 	asm(ALTERNATIVE("nop; nop; nop",
264 			"ic iallu; dsb nsh; isb",
265 			ARM64_WORKAROUND_CAVIUM_27456));
266 }
267 
268 void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm)
269 {
270 	unsigned long ttbr1 = read_sysreg(ttbr1_el1);
271 	unsigned long asid = ASID(mm);
272 	unsigned long ttbr0 = phys_to_ttbr(pgd_phys);
273 
274 	/* Skip CNP for the reserved ASID */
275 	if (system_supports_cnp() && asid)
276 		ttbr0 |= TTBR_CNP_BIT;
277 
278 	/* SW PAN needs a copy of the ASID in TTBR0 for entry */
279 	if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN))
280 		ttbr0 |= FIELD_PREP(TTBR_ASID_MASK, asid);
281 
282 	/* Set ASID in TTBR1 since TCR.A1 is set */
283 	ttbr1 &= ~TTBR_ASID_MASK;
284 	ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, asid);
285 
286 	write_sysreg(ttbr1, ttbr1_el1);
287 	isb();
288 	write_sysreg(ttbr0, ttbr0_el1);
289 	isb();
290 	post_ttbr_update_workaround();
291 }
292 
293 static int asids_update_limit(void)
294 {
295 	unsigned long num_available_asids = NUM_USER_ASIDS;
296 
297 	if (arm64_kernel_unmapped_at_el0())
298 		num_available_asids /= 2;
299 	/*
300 	 * Expect allocation after rollover to fail if we don't have at least
301 	 * one more ASID than CPUs. ASID #0 is reserved for init_mm.
302 	 */
303 	WARN_ON(num_available_asids - 1 <= num_possible_cpus());
304 	pr_info("ASID allocator initialised with %lu entries\n",
305 		num_available_asids);
306 	return 0;
307 }
308 arch_initcall(asids_update_limit);
309 
310 static int asids_init(void)
311 {
312 	asid_bits = get_cpu_asid_bits();
313 	atomic64_set(&asid_generation, ASID_FIRST_VERSION);
314 	asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
315 			   GFP_KERNEL);
316 	if (!asid_map)
317 		panic("Failed to allocate bitmap for %lu ASIDs\n",
318 		      NUM_USER_ASIDS);
319 
320 	/*
321 	 * We cannot call set_reserved_asid_bits() here because CPU
322 	 * caps are not finalized yet, so it is safer to assume KPTI
323 	 * and reserve kernel ASID's from beginning.
324 	 */
325 	if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
326 		set_kpti_asid_bits();
327 	return 0;
328 }
329 early_initcall(asids_init);
330