1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2d84b4711SRussell King /*
3d84b4711SRussell King * linux/arch/arm/mm/context.c
4d84b4711SRussell King *
5d84b4711SRussell King * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
6b5466f87SWill Deacon * Copyright (C) 2012 ARM Limited
7b5466f87SWill Deacon *
8b5466f87SWill Deacon * Author: Will Deacon <will.deacon@arm.com>
9d84b4711SRussell King */
10d84b4711SRussell King #include <linux/init.h>
11d84b4711SRussell King #include <linux/sched.h>
12d84b4711SRussell King #include <linux/mm.h>
1311805bcfSCatalin Marinas #include <linux/smp.h>
1411805bcfSCatalin Marinas #include <linux/percpu.h>
15d84b4711SRussell King
16d84b4711SRussell King #include <asm/mmu_context.h>
17b5466f87SWill Deacon #include <asm/smp_plat.h>
18575320d6SWill Deacon #include <asm/thread_notify.h>
19d84b4711SRussell King #include <asm/tlbflush.h>
201fc84ae8SCyril Chemparathy #include <asm/proc-fns.h>
21d84b4711SRussell King
22b5466f87SWill Deacon /*
23b5466f87SWill Deacon * On ARMv6, we have the following structure in the Context ID:
24b5466f87SWill Deacon *
25b5466f87SWill Deacon * 31 7 0
26b5466f87SWill Deacon * +-------------------------+-----------+
27b5466f87SWill Deacon * | process ID | ASID |
28b5466f87SWill Deacon * +-------------------------+-----------+
29b5466f87SWill Deacon * | context ID |
30b5466f87SWill Deacon * +-------------------------------------+
31b5466f87SWill Deacon *
32b5466f87SWill Deacon * The ASID is used to tag entries in the CPU caches and TLBs.
33b5466f87SWill Deacon * The context ID is used by debuggers and trace logic, and
34b5466f87SWill Deacon * should be unique within all running processes.
359520a5beSBen Dooks *
365d497509SWill Deacon * In big endian operation, the two 32 bit words are swapped if accessed
375d497509SWill Deacon * by non-64-bit operations.
38b5466f87SWill Deacon */
39b5466f87SWill Deacon #define ASID_FIRST_VERSION (1ULL << ASID_BITS)
40b8e4a474SMarc Zyngier #define NUM_USER_ASIDS ASID_FIRST_VERSION
41b5466f87SWill Deacon
42bd31b859SThomas Gleixner static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
43bf51bb82SWill Deacon static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
44bf51bb82SWill Deacon static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
45b5466f87SWill Deacon
460d0752bcSMarc Zyngier static DEFINE_PER_CPU(atomic64_t, active_asids);
47b5466f87SWill Deacon static DEFINE_PER_CPU(u64, reserved_asids);
48b5466f87SWill Deacon static cpumask_t tlb_flush_pending;
49d84b4711SRussell King
500d0752bcSMarc Zyngier #ifdef CONFIG_ARM_ERRATA_798181
a15_erratum_get_cpumask(int this_cpu,struct mm_struct * mm,cpumask_t * mask)510d0752bcSMarc Zyngier void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
520d0752bcSMarc Zyngier cpumask_t *mask)
530d0752bcSMarc Zyngier {
540d0752bcSMarc Zyngier int cpu;
550d0752bcSMarc Zyngier unsigned long flags;
560d0752bcSMarc Zyngier u64 context_id, asid;
570d0752bcSMarc Zyngier
580d0752bcSMarc Zyngier raw_spin_lock_irqsave(&cpu_asid_lock, flags);
590d0752bcSMarc Zyngier context_id = mm->context.id.counter;
600d0752bcSMarc Zyngier for_each_online_cpu(cpu) {
610d0752bcSMarc Zyngier if (cpu == this_cpu)
620d0752bcSMarc Zyngier continue;
630d0752bcSMarc Zyngier /*
640d0752bcSMarc Zyngier * We only need to send an IPI if the other CPUs are
650d0752bcSMarc Zyngier * running the same ASID as the one being invalidated.
660d0752bcSMarc Zyngier */
670d0752bcSMarc Zyngier asid = per_cpu(active_asids, cpu).counter;
680d0752bcSMarc Zyngier if (asid == 0)
690d0752bcSMarc Zyngier asid = per_cpu(reserved_asids, cpu);
700d0752bcSMarc Zyngier if (context_id == asid)
710d0752bcSMarc Zyngier cpumask_set_cpu(cpu, mask);
720d0752bcSMarc Zyngier }
730d0752bcSMarc Zyngier raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
740d0752bcSMarc Zyngier }
750d0752bcSMarc Zyngier #endif
760d0752bcSMarc Zyngier
7714d8c951SCatalin Marinas #ifdef CONFIG_ARM_LPAE
783c5f7e7bSWill Deacon /*
79e1a5848eSWill Deacon * With LPAE, the ASID and page tables are updated atomicly, so there is
80e1a5848eSWill Deacon * no need for a reserved set of tables (the active ASID tracking prevents
81e1a5848eSWill Deacon * any issues across a rollover).
823c5f7e7bSWill Deacon */
83e1a5848eSWill Deacon #define cpu_set_reserved_ttbr0()
8414d8c951SCatalin Marinas #else
cpu_set_reserved_ttbr0(void)85b5466f87SWill Deacon static void cpu_set_reserved_ttbr0(void)
863c5f7e7bSWill Deacon {
873c5f7e7bSWill Deacon u32 ttb;
88e1a5848eSWill Deacon /*
89e1a5848eSWill Deacon * Copy TTBR1 into TTBR0.
90e1a5848eSWill Deacon * This points at swapper_pg_dir, which contains only global
91e1a5848eSWill Deacon * entries so any speculative walks are perfectly safe.
92e1a5848eSWill Deacon */
933c5f7e7bSWill Deacon asm volatile(
943c5f7e7bSWill Deacon " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
953c5f7e7bSWill Deacon " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"
963c5f7e7bSWill Deacon : "=r" (ttb));
973c5f7e7bSWill Deacon isb();
983c5f7e7bSWill Deacon }
9914d8c951SCatalin Marinas #endif
10014d8c951SCatalin Marinas
101575320d6SWill Deacon #ifdef CONFIG_PID_IN_CONTEXTIDR
contextidr_notifier(struct notifier_block * unused,unsigned long cmd,void * t)102575320d6SWill Deacon static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
103575320d6SWill Deacon void *t)
104575320d6SWill Deacon {
105575320d6SWill Deacon u32 contextidr;
106575320d6SWill Deacon pid_t pid;
107575320d6SWill Deacon struct thread_info *thread = t;
108575320d6SWill Deacon
109575320d6SWill Deacon if (cmd != THREAD_NOTIFY_SWITCH)
110575320d6SWill Deacon return NOTIFY_DONE;
111575320d6SWill Deacon
112fa191b71SArd Biesheuvel pid = task_pid_nr(thread_task(thread)) << ASID_BITS;
113575320d6SWill Deacon asm volatile(
114575320d6SWill Deacon " mrc p15, 0, %0, c13, c0, 1\n"
115ae3790b8SWill Deacon " and %0, %0, %2\n"
116ae3790b8SWill Deacon " orr %0, %0, %1\n"
117ae3790b8SWill Deacon " mcr p15, 0, %0, c13, c0, 1\n"
118575320d6SWill Deacon : "=r" (contextidr), "+r" (pid)
119ae3790b8SWill Deacon : "I" (~ASID_MASK));
120575320d6SWill Deacon isb();
121575320d6SWill Deacon
122575320d6SWill Deacon return NOTIFY_OK;
123575320d6SWill Deacon }
124575320d6SWill Deacon
125575320d6SWill Deacon static struct notifier_block contextidr_notifier_block = {
126575320d6SWill Deacon .notifier_call = contextidr_notifier,
127575320d6SWill Deacon };
128575320d6SWill Deacon
contextidr_notifier_init(void)129575320d6SWill Deacon static int __init contextidr_notifier_init(void)
130575320d6SWill Deacon {
131575320d6SWill Deacon return thread_register_notifier(&contextidr_notifier_block);
132575320d6SWill Deacon }
133575320d6SWill Deacon arch_initcall(contextidr_notifier_init);
134575320d6SWill Deacon #endif
135575320d6SWill Deacon
flush_context(unsigned int cpu)136b5466f87SWill Deacon static void flush_context(unsigned int cpu)
137d84b4711SRussell King {
138b5466f87SWill Deacon int i;
139bf51bb82SWill Deacon u64 asid;
140d84b4711SRussell King
141bf51bb82SWill Deacon /* Update the list of reserved ASIDs and the ASID bitmap. */
142bf51bb82SWill Deacon bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
143bf51bb82SWill Deacon for_each_possible_cpu(i) {
144bf51bb82SWill Deacon asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
145ae120d9eSMarc Zyngier /*
146ae120d9eSMarc Zyngier * If this CPU has already been through a
147ae120d9eSMarc Zyngier * rollover, but hasn't run another task in
148ae120d9eSMarc Zyngier * the meantime, we must preserve its reserved
149ae120d9eSMarc Zyngier * ASID, as this is the only trace we have of
150ae120d9eSMarc Zyngier * the process it is still running.
151ae120d9eSMarc Zyngier */
152ae120d9eSMarc Zyngier if (asid == 0)
153ae120d9eSMarc Zyngier asid = per_cpu(reserved_asids, i);
154b8e4a474SMarc Zyngier __set_bit(asid & ~ASID_MASK, asid_map);
155bf51bb82SWill Deacon per_cpu(reserved_asids, i) = asid;
156bf51bb82SWill Deacon }
157b5466f87SWill Deacon
158b5466f87SWill Deacon /* Queue a TLB invalidate and flush the I-cache if necessary. */
159b5466f87SWill Deacon cpumask_setall(&tlb_flush_pending);
160b5466f87SWill Deacon
161b5466f87SWill Deacon if (icache_is_vivt_asid_tagged())
16211805bcfSCatalin Marinas __flush_icache_all();
16311805bcfSCatalin Marinas }
16411805bcfSCatalin Marinas
check_update_reserved_asid(u64 asid,u64 newasid)16540ee068eSWill Deacon static bool check_update_reserved_asid(u64 asid, u64 newasid)
16611805bcfSCatalin Marinas {
167b5466f87SWill Deacon int cpu;
16840ee068eSWill Deacon bool hit = false;
16940ee068eSWill Deacon
17040ee068eSWill Deacon /*
17140ee068eSWill Deacon * Iterate over the set of reserved ASIDs looking for a match.
17240ee068eSWill Deacon * If we find one, then we can update our mm to use newasid
17340ee068eSWill Deacon * (i.e. the same ASID in the current generation) but we can't
17440ee068eSWill Deacon * exit the loop early, since we need to ensure that all copies
17540ee068eSWill Deacon * of the old ASID are updated to reflect the mm. Failure to do
17640ee068eSWill Deacon * so could result in us missing the reserved ASID in a future
17740ee068eSWill Deacon * generation.
17840ee068eSWill Deacon */
17940ee068eSWill Deacon for_each_possible_cpu(cpu) {
18040ee068eSWill Deacon if (per_cpu(reserved_asids, cpu) == asid) {
18140ee068eSWill Deacon hit = true;
18240ee068eSWill Deacon per_cpu(reserved_asids, cpu) = newasid;
18340ee068eSWill Deacon }
18440ee068eSWill Deacon }
18540ee068eSWill Deacon
18640ee068eSWill Deacon return hit;
187b5466f87SWill Deacon }
18811805bcfSCatalin Marinas
new_context(struct mm_struct * mm,unsigned int cpu)1898a4e3a9eSWill Deacon static u64 new_context(struct mm_struct *mm, unsigned int cpu)
190b5466f87SWill Deacon {
191a7a04105SWill Deacon static u32 cur_idx = 1;
1928a4e3a9eSWill Deacon u64 asid = atomic64_read(&mm->context.id);
193bf51bb82SWill Deacon u64 generation = atomic64_read(&asid_generation);
194b5466f87SWill Deacon
195a391263cSWill Deacon if (asid != 0) {
19640ee068eSWill Deacon u64 newasid = generation | (asid & ~ASID_MASK);
19740ee068eSWill Deacon
19811805bcfSCatalin Marinas /*
199a391263cSWill Deacon * If our current ASID was active during a rollover, we
200a391263cSWill Deacon * can continue to use it and this was just a false alarm.
20111805bcfSCatalin Marinas */
20240ee068eSWill Deacon if (check_update_reserved_asid(asid, newasid))
20340ee068eSWill Deacon return newasid;
204a391263cSWill Deacon
20511805bcfSCatalin Marinas /*
206a391263cSWill Deacon * We had a valid ASID in a previous life, so try to re-use
207a391263cSWill Deacon * it if possible.,
208a391263cSWill Deacon */
209a391263cSWill Deacon asid &= ~ASID_MASK;
210a391263cSWill Deacon if (!__test_and_set_bit(asid, asid_map))
21140ee068eSWill Deacon return newasid;
212a391263cSWill Deacon }
213a391263cSWill Deacon
214a391263cSWill Deacon /*
215a391263cSWill Deacon * Allocate a free ASID. If we can't find one, take a note of the
216a391263cSWill Deacon * currently active ASIDs and mark the TLBs as requiring flushes.
217a391263cSWill Deacon * We always count from ASID #1, as we reserve ASID #0 to switch
218a391263cSWill Deacon * via TTBR0 and to avoid speculative page table walks from hitting
219a391263cSWill Deacon * in any partial walk caches, which could be populated from
220a391263cSWill Deacon * overlapping level-1 descriptors used to map both the module
221a391263cSWill Deacon * area and the userspace stack.
22211805bcfSCatalin Marinas */
223a7a04105SWill Deacon asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
224bf51bb82SWill Deacon if (asid == NUM_USER_ASIDS) {
225bf51bb82SWill Deacon generation = atomic64_add_return(ASID_FIRST_VERSION,
226bf51bb82SWill Deacon &asid_generation);
227b5466f87SWill Deacon flush_context(cpu);
228b8e4a474SMarc Zyngier asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
229bf51bb82SWill Deacon }
230a391263cSWill Deacon
231bf51bb82SWill Deacon __set_bit(asid, asid_map);
232a7a04105SWill Deacon cur_idx = asid;
23311805bcfSCatalin Marinas cpumask_clear(mm_cpumask(mm));
23440ee068eSWill Deacon return asid | generation;
23511805bcfSCatalin Marinas }
23611805bcfSCatalin Marinas
check_and_switch_context(struct mm_struct * mm,struct task_struct * tsk)237b5466f87SWill Deacon void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
238d84b4711SRussell King {
239b5466f87SWill Deacon unsigned long flags;
240b5466f87SWill Deacon unsigned int cpu = smp_processor_id();
2418a4e3a9eSWill Deacon u64 asid;
242d84b4711SRussell King
243*d31e23afSArd Biesheuvel check_vmalloc_seq(mm);
244d84b4711SRussell King
245d84b4711SRussell King /*
2465d497509SWill Deacon * We cannot update the pgd and the ASID atomicly with classic
2475d497509SWill Deacon * MMU, so switch exclusively to global mappings to avoid
2485d497509SWill Deacon * speculative page table walking with the wrong TTBR.
249d84b4711SRussell King */
250b5466f87SWill Deacon cpu_set_reserved_ttbr0();
251d84b4711SRussell King
2528a4e3a9eSWill Deacon asid = atomic64_read(&mm->context.id);
2538a4e3a9eSWill Deacon if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
2548a4e3a9eSWill Deacon && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
2554b883160SWill Deacon goto switch_mm_fastpath;
2564b883160SWill Deacon
257b5466f87SWill Deacon raw_spin_lock_irqsave(&cpu_asid_lock, flags);
258b5466f87SWill Deacon /* Check that our ASID belongs to the current generation. */
2598a4e3a9eSWill Deacon asid = atomic64_read(&mm->context.id);
2608a4e3a9eSWill Deacon if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
2618a4e3a9eSWill Deacon asid = new_context(mm, cpu);
2628a4e3a9eSWill Deacon atomic64_set(&mm->context.id, asid);
2638a4e3a9eSWill Deacon }
264b5466f87SWill Deacon
26589c7e4b8SWill Deacon if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
26689c7e4b8SWill Deacon local_flush_bp_all();
267b5466f87SWill Deacon local_flush_tlb_all();
26889c7e4b8SWill Deacon }
26937f47e3dSWill Deacon
2708a4e3a9eSWill Deacon atomic64_set(&per_cpu(active_asids, cpu), asid);
27137f47e3dSWill Deacon cpumask_set_cpu(cpu, mm_cpumask(mm));
272b5466f87SWill Deacon raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
273b5466f87SWill Deacon
2744b883160SWill Deacon switch_mm_fastpath:
275b5466f87SWill Deacon cpu_switch_mm(mm->pgd, mm);
276d84b4711SRussell King }
277