xref: /openbmc/linux/arch/mips/mm/context.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
14ebea49cSPaul Burton // SPDX-License-Identifier: GPL-2.0
2c8790d65SPaul Burton #include <linux/atomic.h>
34ebea49cSPaul Burton #include <linux/mmu_context.h>
4c8790d65SPaul Burton #include <linux/percpu.h>
5c8790d65SPaul Burton #include <linux/spinlock.h>
6c8790d65SPaul Burton 
7c8790d65SPaul Burton static DEFINE_RAW_SPINLOCK(cpu_mmid_lock);
8c8790d65SPaul Burton 
9c8790d65SPaul Burton static atomic64_t mmid_version;
10c8790d65SPaul Burton static unsigned int num_mmids;
11c8790d65SPaul Burton static unsigned long *mmid_map;
12c8790d65SPaul Burton 
13c8790d65SPaul Burton static DEFINE_PER_CPU(u64, reserved_mmids);
14c8790d65SPaul Burton static cpumask_t tlb_flush_pending;
15c8790d65SPaul Burton 
asid_versions_eq(int cpu,u64 a,u64 b)16c8790d65SPaul Burton static bool asid_versions_eq(int cpu, u64 a, u64 b)
17c8790d65SPaul Burton {
18c8790d65SPaul Burton 	return ((a ^ b) & asid_version_mask(cpu)) == 0;
19c8790d65SPaul Burton }
204ebea49cSPaul Burton 
get_new_mmu_context(struct mm_struct * mm)214ebea49cSPaul Burton void get_new_mmu_context(struct mm_struct *mm)
224ebea49cSPaul Burton {
234ebea49cSPaul Burton 	unsigned int cpu;
244ebea49cSPaul Burton 	u64 asid;
254ebea49cSPaul Burton 
26c8790d65SPaul Burton 	/*
27c8790d65SPaul Burton 	 * This function is specific to ASIDs, and should not be called when
28c8790d65SPaul Burton 	 * MMIDs are in use.
29c8790d65SPaul Burton 	 */
30c8790d65SPaul Burton 	if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
31c8790d65SPaul Burton 		return;
32c8790d65SPaul Burton 
334ebea49cSPaul Burton 	cpu = smp_processor_id();
344ebea49cSPaul Burton 	asid = asid_cache(cpu);
354ebea49cSPaul Burton 
364ebea49cSPaul Burton 	if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
374ebea49cSPaul Burton 		if (cpu_has_vtag_icache)
384ebea49cSPaul Burton 			flush_icache_all();
394ebea49cSPaul Burton 		local_flush_tlb_all();	/* start new asid cycle */
404ebea49cSPaul Burton 	}
414ebea49cSPaul Burton 
420b317c38SPaul Burton 	set_cpu_context(cpu, mm, asid);
430b317c38SPaul Burton 	asid_cache(cpu) = asid;
444ebea49cSPaul Burton }
456782f26cSPaul Burton EXPORT_SYMBOL_GPL(get_new_mmu_context);
4642d5b846SPaul Burton 
check_mmu_context(struct mm_struct * mm)4742d5b846SPaul Burton void check_mmu_context(struct mm_struct *mm)
4842d5b846SPaul Burton {
4942d5b846SPaul Burton 	unsigned int cpu = smp_processor_id();
5042d5b846SPaul Burton 
51c8790d65SPaul Burton 	/*
52c8790d65SPaul Burton 	 * This function is specific to ASIDs, and should not be called when
53c8790d65SPaul Burton 	 * MMIDs are in use.
54c8790d65SPaul Burton 	 */
55c8790d65SPaul Burton 	if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
56c8790d65SPaul Burton 		return;
57c8790d65SPaul Burton 
5842d5b846SPaul Burton 	/* Check if our ASID is of an older version and thus invalid */
59c8790d65SPaul Burton 	if (!asid_versions_eq(cpu, cpu_context(cpu, mm), asid_cache(cpu)))
6042d5b846SPaul Burton 		get_new_mmu_context(mm);
6142d5b846SPaul Burton }
626782f26cSPaul Burton EXPORT_SYMBOL_GPL(check_mmu_context);
6342d5b846SPaul Burton 
flush_context(void)64c8790d65SPaul Burton static void flush_context(void)
65c8790d65SPaul Burton {
66c8790d65SPaul Burton 	u64 mmid;
67c8790d65SPaul Burton 	int cpu;
68c8790d65SPaul Burton 
69c8790d65SPaul Burton 	/* Update the list of reserved MMIDs and the MMID bitmap */
70*b5eb8b53SChristophe JAILLET 	bitmap_zero(mmid_map, num_mmids);
71c8790d65SPaul Burton 
72c8790d65SPaul Burton 	/* Reserve an MMID for kmap/wired entries */
73c8790d65SPaul Burton 	__set_bit(MMID_KERNEL_WIRED, mmid_map);
74c8790d65SPaul Burton 
75c8790d65SPaul Burton 	for_each_possible_cpu(cpu) {
76c8790d65SPaul Burton 		mmid = xchg_relaxed(&cpu_data[cpu].asid_cache, 0);
77c8790d65SPaul Burton 
78c8790d65SPaul Burton 		/*
79c8790d65SPaul Burton 		 * If this CPU has already been through a
80c8790d65SPaul Burton 		 * rollover, but hasn't run another task in
81c8790d65SPaul Burton 		 * the meantime, we must preserve its reserved
82c8790d65SPaul Burton 		 * MMID, as this is the only trace we have of
83c8790d65SPaul Burton 		 * the process it is still running.
84c8790d65SPaul Burton 		 */
85c8790d65SPaul Burton 		if (mmid == 0)
86c8790d65SPaul Burton 			mmid = per_cpu(reserved_mmids, cpu);
87c8790d65SPaul Burton 
88c8790d65SPaul Burton 		__set_bit(mmid & cpu_asid_mask(&cpu_data[cpu]), mmid_map);
89c8790d65SPaul Burton 		per_cpu(reserved_mmids, cpu) = mmid;
90c8790d65SPaul Burton 	}
91c8790d65SPaul Burton 
92c8790d65SPaul Burton 	/*
93c8790d65SPaul Burton 	 * Queue a TLB invalidation for each CPU to perform on next
94c8790d65SPaul Burton 	 * context-switch
95c8790d65SPaul Burton 	 */
96c8790d65SPaul Burton 	cpumask_setall(&tlb_flush_pending);
97c8790d65SPaul Burton }
98c8790d65SPaul Burton 
check_update_reserved_mmid(u64 mmid,u64 newmmid)99c8790d65SPaul Burton static bool check_update_reserved_mmid(u64 mmid, u64 newmmid)
100c8790d65SPaul Burton {
101c8790d65SPaul Burton 	bool hit;
102c8790d65SPaul Burton 	int cpu;
103c8790d65SPaul Burton 
104c8790d65SPaul Burton 	/*
105c8790d65SPaul Burton 	 * Iterate over the set of reserved MMIDs looking for a match.
106c8790d65SPaul Burton 	 * If we find one, then we can update our mm to use newmmid
107c8790d65SPaul Burton 	 * (i.e. the same MMID in the current generation) but we can't
108c8790d65SPaul Burton 	 * exit the loop early, since we need to ensure that all copies
109c8790d65SPaul Burton 	 * of the old MMID are updated to reflect the mm. Failure to do
110c8790d65SPaul Burton 	 * so could result in us missing the reserved MMID in a future
111c8790d65SPaul Burton 	 * generation.
112c8790d65SPaul Burton 	 */
113c8790d65SPaul Burton 	hit = false;
114c8790d65SPaul Burton 	for_each_possible_cpu(cpu) {
115c8790d65SPaul Burton 		if (per_cpu(reserved_mmids, cpu) == mmid) {
116c8790d65SPaul Burton 			hit = true;
117c8790d65SPaul Burton 			per_cpu(reserved_mmids, cpu) = newmmid;
118c8790d65SPaul Burton 		}
119c8790d65SPaul Burton 	}
120c8790d65SPaul Burton 
121c8790d65SPaul Burton 	return hit;
122c8790d65SPaul Burton }
123c8790d65SPaul Burton 
get_new_mmid(struct mm_struct * mm)124c8790d65SPaul Burton static u64 get_new_mmid(struct mm_struct *mm)
125c8790d65SPaul Burton {
126c8790d65SPaul Burton 	static u32 cur_idx = MMID_KERNEL_WIRED + 1;
127c8790d65SPaul Burton 	u64 mmid, version, mmid_mask;
128c8790d65SPaul Burton 
129c8790d65SPaul Burton 	mmid = cpu_context(0, mm);
130c8790d65SPaul Burton 	version = atomic64_read(&mmid_version);
131c8790d65SPaul Burton 	mmid_mask = cpu_asid_mask(&boot_cpu_data);
132c8790d65SPaul Burton 
133c8790d65SPaul Burton 	if (!asid_versions_eq(0, mmid, 0)) {
134c8790d65SPaul Burton 		u64 newmmid = version | (mmid & mmid_mask);
135c8790d65SPaul Burton 
136c8790d65SPaul Burton 		/*
137c8790d65SPaul Burton 		 * If our current MMID was active during a rollover, we
138c8790d65SPaul Burton 		 * can continue to use it and this was just a false alarm.
139c8790d65SPaul Burton 		 */
140c8790d65SPaul Burton 		if (check_update_reserved_mmid(mmid, newmmid)) {
141c8790d65SPaul Burton 			mmid = newmmid;
142c8790d65SPaul Burton 			goto set_context;
143c8790d65SPaul Burton 		}
144c8790d65SPaul Burton 
145c8790d65SPaul Burton 		/*
146c8790d65SPaul Burton 		 * We had a valid MMID in a previous life, so try to re-use
147c8790d65SPaul Burton 		 * it if possible.
148c8790d65SPaul Burton 		 */
149c8790d65SPaul Burton 		if (!__test_and_set_bit(mmid & mmid_mask, mmid_map)) {
150c8790d65SPaul Burton 			mmid = newmmid;
151c8790d65SPaul Burton 			goto set_context;
152c8790d65SPaul Burton 		}
153c8790d65SPaul Burton 	}
154c8790d65SPaul Burton 
155c8790d65SPaul Burton 	/* Allocate a free MMID */
156c8790d65SPaul Burton 	mmid = find_next_zero_bit(mmid_map, num_mmids, cur_idx);
157c8790d65SPaul Burton 	if (mmid != num_mmids)
158c8790d65SPaul Burton 		goto reserve_mmid;
159c8790d65SPaul Burton 
160c8790d65SPaul Burton 	/* We're out of MMIDs, so increment the global version */
161c8790d65SPaul Burton 	version = atomic64_add_return_relaxed(asid_first_version(0),
162c8790d65SPaul Burton 					      &mmid_version);
163c8790d65SPaul Burton 
164c8790d65SPaul Burton 	/* Note currently active MMIDs & mark TLBs as requiring flushes */
165c8790d65SPaul Burton 	flush_context();
166c8790d65SPaul Burton 
167c8790d65SPaul Burton 	/* We have more MMIDs than CPUs, so this will always succeed */
168c8790d65SPaul Burton 	mmid = find_first_zero_bit(mmid_map, num_mmids);
169c8790d65SPaul Burton 
170c8790d65SPaul Burton reserve_mmid:
171c8790d65SPaul Burton 	__set_bit(mmid, mmid_map);
172c8790d65SPaul Burton 	cur_idx = mmid;
173c8790d65SPaul Burton 	mmid |= version;
174c8790d65SPaul Burton set_context:
175c8790d65SPaul Burton 	set_cpu_context(0, mm, mmid);
176c8790d65SPaul Burton 	return mmid;
177c8790d65SPaul Burton }
178c8790d65SPaul Burton 
check_switch_mmu_context(struct mm_struct * mm)17942d5b846SPaul Burton void check_switch_mmu_context(struct mm_struct *mm)
18042d5b846SPaul Burton {
18142d5b846SPaul Burton 	unsigned int cpu = smp_processor_id();
182c8790d65SPaul Burton 	u64 ctx, old_active_mmid;
183c8790d65SPaul Burton 	unsigned long flags;
18442d5b846SPaul Burton 
185c8790d65SPaul Burton 	if (!cpu_has_mmid) {
18642d5b846SPaul Burton 		check_mmu_context(mm);
18742d5b846SPaul Burton 		write_c0_entryhi(cpu_asid(cpu, mm));
188c8790d65SPaul Burton 		goto setup_pgd;
189c8790d65SPaul Burton 	}
190c8790d65SPaul Burton 
191c8790d65SPaul Burton 	/*
192c8790d65SPaul Burton 	 * MMID switch fast-path, to avoid acquiring cpu_mmid_lock when it's
193c8790d65SPaul Burton 	 * unnecessary.
194c8790d65SPaul Burton 	 *
195c8790d65SPaul Burton 	 * The memory ordering here is subtle. If our active_mmids is non-zero
196c8790d65SPaul Burton 	 * and the MMID matches the current version, then we update the CPU's
197c8790d65SPaul Burton 	 * asid_cache with a relaxed cmpxchg. Racing with a concurrent rollover
198c8790d65SPaul Burton 	 * means that either:
199c8790d65SPaul Burton 	 *
200c8790d65SPaul Burton 	 * - We get a zero back from the cmpxchg and end up waiting on
201c8790d65SPaul Burton 	 *   cpu_mmid_lock in check_mmu_context(). Taking the lock synchronises
202c8790d65SPaul Burton 	 *   with the rollover and so we are forced to see the updated
203c8790d65SPaul Burton 	 *   generation.
204c8790d65SPaul Burton 	 *
205c8790d65SPaul Burton 	 * - We get a valid MMID back from the cmpxchg, which means the
206c8790d65SPaul Burton 	 *   relaxed xchg in flush_context will treat us as reserved
207c8790d65SPaul Burton 	 *   because atomic RmWs are totally ordered for a given location.
208c8790d65SPaul Burton 	 */
209c8790d65SPaul Burton 	ctx = cpu_context(cpu, mm);
210c8790d65SPaul Burton 	old_active_mmid = READ_ONCE(cpu_data[cpu].asid_cache);
211c8790d65SPaul Burton 	if (!old_active_mmid ||
212c8790d65SPaul Burton 	    !asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)) ||
213c8790d65SPaul Burton 	    !cmpxchg_relaxed(&cpu_data[cpu].asid_cache, old_active_mmid, ctx)) {
214c8790d65SPaul Burton 		raw_spin_lock_irqsave(&cpu_mmid_lock, flags);
215c8790d65SPaul Burton 
216c8790d65SPaul Burton 		ctx = cpu_context(cpu, mm);
217c8790d65SPaul Burton 		if (!asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)))
218c8790d65SPaul Burton 			ctx = get_new_mmid(mm);
219c8790d65SPaul Burton 
220c8790d65SPaul Burton 		WRITE_ONCE(cpu_data[cpu].asid_cache, ctx);
221c8790d65SPaul Burton 		raw_spin_unlock_irqrestore(&cpu_mmid_lock, flags);
222c8790d65SPaul Burton 	}
223c8790d65SPaul Burton 
224c8790d65SPaul Burton 	/*
225c8790d65SPaul Burton 	 * Invalidate the local TLB if needed. Note that we must only clear our
226c8790d65SPaul Burton 	 * bit in tlb_flush_pending after this is complete, so that the
227c8790d65SPaul Burton 	 * cpu_has_shared_ftlb_entries case below isn't misled.
228c8790d65SPaul Burton 	 */
229c8790d65SPaul Burton 	if (cpumask_test_cpu(cpu, &tlb_flush_pending)) {
230c8790d65SPaul Burton 		if (cpu_has_vtag_icache)
231c8790d65SPaul Burton 			flush_icache_all();
232c8790d65SPaul Burton 		local_flush_tlb_all();
233c8790d65SPaul Burton 		cpumask_clear_cpu(cpu, &tlb_flush_pending);
234c8790d65SPaul Burton 	}
235c8790d65SPaul Burton 
236c8790d65SPaul Burton 	write_c0_memorymapid(ctx & cpu_asid_mask(&boot_cpu_data));
237c8790d65SPaul Burton 
238c8790d65SPaul Burton 	/*
239c8790d65SPaul Burton 	 * If this CPU shares FTLB entries with its siblings and one or more of
240c8790d65SPaul Burton 	 * those siblings hasn't yet invalidated its TLB following a version
241c8790d65SPaul Burton 	 * increase then we need to invalidate any TLB entries for our MMID
242c8790d65SPaul Burton 	 * that we might otherwise pick up from a sibling.
243c8790d65SPaul Burton 	 *
244c8790d65SPaul Burton 	 * We ifdef on CONFIG_SMP because cpu_sibling_map isn't defined in
245c8790d65SPaul Burton 	 * CONFIG_SMP=n kernels.
246c8790d65SPaul Burton 	 */
247c8790d65SPaul Burton #ifdef CONFIG_SMP
248c8790d65SPaul Burton 	if (cpu_has_shared_ftlb_entries &&
249c8790d65SPaul Burton 	    cpumask_intersects(&tlb_flush_pending, &cpu_sibling_map[cpu])) {
250c8790d65SPaul Burton 		/* Ensure we operate on the new MMID */
251c8790d65SPaul Burton 		mtc0_tlbw_hazard();
252c8790d65SPaul Burton 
253c8790d65SPaul Burton 		/*
254c8790d65SPaul Burton 		 * Invalidate all TLB entries associated with the new
255c8790d65SPaul Burton 		 * MMID, and wait for the invalidation to complete.
256c8790d65SPaul Burton 		 */
257c8790d65SPaul Burton 		ginvt_mmid();
258c8790d65SPaul Burton 		sync_ginv();
259c8790d65SPaul Burton 	}
260c8790d65SPaul Burton #endif
261c8790d65SPaul Burton 
262c8790d65SPaul Burton setup_pgd:
26342d5b846SPaul Burton 	TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
26442d5b846SPaul Burton }
2656782f26cSPaul Burton EXPORT_SYMBOL_GPL(check_switch_mmu_context);
266c8790d65SPaul Burton 
mmid_init(void)267c8790d65SPaul Burton static int mmid_init(void)
268c8790d65SPaul Burton {
269c8790d65SPaul Burton 	if (!cpu_has_mmid)
270c8790d65SPaul Burton 		return 0;
271c8790d65SPaul Burton 
272c8790d65SPaul Burton 	/*
273c8790d65SPaul Burton 	 * Expect allocation after rollover to fail if we don't have at least
274c8790d65SPaul Burton 	 * one more MMID than CPUs.
275c8790d65SPaul Burton 	 */
276c8790d65SPaul Burton 	num_mmids = asid_first_version(0);
277c8790d65SPaul Burton 	WARN_ON(num_mmids <= num_possible_cpus());
278c8790d65SPaul Burton 
279c8790d65SPaul Burton 	atomic64_set(&mmid_version, asid_first_version(0));
280*b5eb8b53SChristophe JAILLET 	mmid_map = bitmap_zalloc(num_mmids, GFP_KERNEL);
281c8790d65SPaul Burton 	if (!mmid_map)
282c8790d65SPaul Burton 		panic("Failed to allocate bitmap for %u MMIDs\n", num_mmids);
283c8790d65SPaul Burton 
284c8790d65SPaul Burton 	/* Reserve an MMID for kmap/wired entries */
285c8790d65SPaul Burton 	__set_bit(MMID_KERNEL_WIRED, mmid_map);
286c8790d65SPaul Burton 
287c8790d65SPaul Burton 	pr_info("MMID allocator initialised with %u entries\n", num_mmids);
288c8790d65SPaul Burton 	return 0;
289c8790d65SPaul Burton }
290c8790d65SPaul Burton early_initcall(mmid_init);
291