1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
2b3901d54SCatalin Marinas /*
3b3901d54SCatalin Marinas * Based on arch/arm/include/asm/mmu_context.h
4b3901d54SCatalin Marinas *
5b3901d54SCatalin Marinas * Copyright (C) 1996 Russell King.
6b3901d54SCatalin Marinas * Copyright (C) 2012 ARM Ltd.
7b3901d54SCatalin Marinas */
8b3901d54SCatalin Marinas #ifndef __ASM_MMU_CONTEXT_H
9b3901d54SCatalin Marinas #define __ASM_MMU_CONTEXT_H
10b3901d54SCatalin Marinas
1138fd94b0SChristopher Covington #ifndef __ASSEMBLY__
1238fd94b0SChristopher Covington
13b3901d54SCatalin Marinas #include <linux/compiler.h>
14b3901d54SCatalin Marinas #include <linux/sched.h>
15ef8bd77fSIngo Molnar #include <linux/sched/hotplug.h>
16589ee628SIngo Molnar #include <linux/mm_types.h>
1765fddcfcSMike Rapoport #include <linux/pgtable.h>
18b3901d54SCatalin Marinas
19b3901d54SCatalin Marinas #include <asm/cacheflush.h>
2039bc88e5SCatalin Marinas #include <asm/cpufeature.h>
21a8bf2fc4SMark Brown #include <asm/daifflags.h>
22b3901d54SCatalin Marinas #include <asm/proc-fns.h>
23b3901d54SCatalin Marinas #include <asm-generic/mm_hooks.h>
24b3901d54SCatalin Marinas #include <asm/cputype.h>
25adf75899SMark Rutland #include <asm/sysreg.h>
269e8e865bSMark Rutland #include <asm/tlbflush.h>
27b3901d54SCatalin Marinas
28c55191e9SArd Biesheuvel extern bool rodata_full;
29c55191e9SArd Biesheuvel
contextidr_thread_switch(struct task_struct * next)30ec45d1cfSWill Deacon static inline void contextidr_thread_switch(struct task_struct *next)
31ec45d1cfSWill Deacon {
32d3ea42aaSMark Rutland if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR))
33d3ea42aaSMark Rutland return;
34d3ea42aaSMark Rutland
35adf75899SMark Rutland write_sysreg(task_pid_nr(next), contextidr_el1);
36adf75899SMark Rutland isb();
37ec45d1cfSWill Deacon }
38ec45d1cfSWill Deacon
39b3901d54SCatalin Marinas /*
40833be850SMark Rutland * Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0.
41b3901d54SCatalin Marinas */
cpu_set_reserved_ttbr0_nosync(void)42b3901d54SCatalin Marinas static inline void cpu_set_reserved_ttbr0_nosync(void)
43b3901d54SCatalin Marinas {
44833be850SMark Rutland unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
45b3901d54SCatalin Marinas
46adf75899SMark Rutland write_sysreg(ttbr, ttbr0_el1);
47adf75899SMark Rutland }
48b3901d54SCatalin Marinas
cpu_set_reserved_ttbr0(void)49b3901d54SCatalin Marinas static inline void cpu_set_reserved_ttbr0(void)
5025b92693SMark Rutland {
5125b92693SMark Rutland cpu_set_reserved_ttbr0_nosync();
527655abb9SWill Deacon isb();
537655abb9SWill Deacon }
547655abb9SWill Deacon
557655abb9SWill Deacon void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
567655abb9SWill Deacon
cpu_switch_mm(pgd_t * pgd,struct mm_struct * mm)577655abb9SWill Deacon static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
587655abb9SWill Deacon {
59dd006da2SArd Biesheuvel BUG_ON(pgd == swapper_pg_dir);
60dd006da2SArd Biesheuvel cpu_do_switch_mm(virt_to_phys(pgd),mm);
61dd006da2SArd Biesheuvel }
62dd006da2SArd Biesheuvel
63dd006da2SArd Biesheuvel /*
64e8d13cceSArd Biesheuvel * TCR.T0SZ value to use when the ID map is active. Usually equals
65dd006da2SArd Biesheuvel * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
66fa2a8445SKristina Martsenko * physical memory, in which case it will be smaller.
671401bef7SJames Morse */
68c51e97d8SWill Deacon extern int idmap_t0sz;
69609116d2SMark Rutland
70dd006da2SArd Biesheuvel /*
711401bef7SJames Morse * Ensure TCR.T0SZ is set to the provided value.
72dd006da2SArd Biesheuvel */
__cpu_set_tcr_t0sz(unsigned long t0sz)731401bef7SJames Morse static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
74c51e97d8SWill Deacon {
75c51e97d8SWill Deacon unsigned long tcr = read_sysreg(tcr_el1);
76adf75899SMark Rutland
77adf75899SMark Rutland if ((tcr & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET == t0sz)
78adf75899SMark Rutland return;
79adf75899SMark Rutland
80dd006da2SArd Biesheuvel tcr &= ~TCR_T0SZ_MASK;
81dd006da2SArd Biesheuvel tcr |= t0sz << TCR_T0SZ_OFFSET;
825383cc6eSSteve Capper write_sysreg(tcr, tcr_el1);
83609116d2SMark Rutland isb();
84609116d2SMark Rutland }
85b3901d54SCatalin Marinas
869e8e865bSMark Rutland #define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual))
879e8e865bSMark Rutland #define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz)
889e8e865bSMark Rutland
899e8e865bSMark Rutland /*
909e8e865bSMark Rutland * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
919e8e865bSMark Rutland *
929e8e865bSMark Rutland * The idmap lives in the same VA range as userspace, but uses global entries
939e8e865bSMark Rutland * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from
949e8e865bSMark Rutland * speculative TLB fetches, we must temporarily install the reserved page
959e8e865bSMark Rutland * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ.
969e8e865bSMark Rutland *
979e8e865bSMark Rutland * If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
989e8e865bSMark Rutland * which should not be installed in TTBR0_EL1. In this case we can leave the
999e8e865bSMark Rutland * reserved page tables in place.
1009e8e865bSMark Rutland */
cpu_uninstall_idmap(void)1019e8e865bSMark Rutland static inline void cpu_uninstall_idmap(void)
1029e8e865bSMark Rutland {
1039e8e865bSMark Rutland struct mm_struct *mm = current->active_mm;
1049e8e865bSMark Rutland
10539bc88e5SCatalin Marinas cpu_set_reserved_ttbr0();
1069e8e865bSMark Rutland local_flush_tlb_all();
1079e8e865bSMark Rutland cpu_set_default_tcr_t0sz();
1089e8e865bSMark Rutland
1091682c45bSArd Biesheuvel if (mm != &init_mm && !system_uses_ttbr0_pan())
110609116d2SMark Rutland cpu_switch_mm(mm->pgd, mm);
111609116d2SMark Rutland }
112609116d2SMark Rutland
__cpu_install_idmap(pgd_t * idmap)113609116d2SMark Rutland static inline void __cpu_install_idmap(pgd_t *idmap)
114609116d2SMark Rutland {
1151682c45bSArd Biesheuvel cpu_set_reserved_ttbr0();
1161682c45bSArd Biesheuvel local_flush_tlb_all();
1171682c45bSArd Biesheuvel cpu_set_idmap_tcr_t0sz();
1181682c45bSArd Biesheuvel
1191682c45bSArd Biesheuvel cpu_switch_mm(lm_alias(idmap), &init_mm);
1201682c45bSArd Biesheuvel }
121609116d2SMark Rutland
cpu_install_idmap(void)122609116d2SMark Rutland static inline void cpu_install_idmap(void)
1239e8e865bSMark Rutland {
124a347f601SPasha Tatashin __cpu_install_idmap(idmap_pg_dir);
125a347f601SPasha Tatashin }
126a347f601SPasha Tatashin
127a347f601SPasha Tatashin /*
128a347f601SPasha Tatashin * Load our new page tables. A strict BBM approach requires that we ensure that
129a347f601SPasha Tatashin * TLBs are free of any entries that may overlap with the global mappings we are
130a347f601SPasha Tatashin * about to install.
131a347f601SPasha Tatashin *
132a347f601SPasha Tatashin * For a real hibernate/resume/kexec cycle TTBR0 currently points to a zero
133a347f601SPasha Tatashin * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI runtime
134a347f601SPasha Tatashin * services), while for a userspace-driven test_resume cycle it points to
135a347f601SPasha Tatashin * userspace page tables (and we must point it at a zero page ourselves).
136a347f601SPasha Tatashin *
137a347f601SPasha Tatashin * We change T0SZ as part of installing the idmap. This is undone by
138a347f601SPasha Tatashin * cpu_uninstall_idmap() in __cpu_suspend_exit().
139a347f601SPasha Tatashin */
cpu_install_ttbr0(phys_addr_t ttbr0,unsigned long t0sz)140a347f601SPasha Tatashin static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz)
141a347f601SPasha Tatashin {
142a347f601SPasha Tatashin cpu_set_reserved_ttbr0();
143a347f601SPasha Tatashin local_flush_tlb_all();
144a347f601SPasha Tatashin __cpu_set_tcr_t0sz(t0sz);
145a347f601SPasha Tatashin
146a347f601SPasha Tatashin /* avoid cpu_switch_mm() and its SW-PAN and CNP interactions */
147a347f601SPasha Tatashin write_sysreg(ttbr0, ttbr0_el1);
14850e1881dSMark Rutland isb();
14950e1881dSMark Rutland }
15050e1881dSMark Rutland
1515f20997cSSami Tolvanen /*
15250e1881dSMark Rutland * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
15350e1881dSMark Rutland * avoiding the possibility of conflicting TLB entries being allocated.
15450e1881dSMark Rutland */
cpu_replace_ttbr1(pgd_t * pgdp,pgd_t * idmap)15550e1881dSMark Rutland static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
156a8bf2fc4SMark Brown {
15750e1881dSMark Rutland typedef void (ttbr_replace_func)(phys_addr_t);
1585ffdfaedSVladimir Murzin extern ttbr_replace_func idmap_cpu_replace_ttbr1;
1595ffdfaedSVladimir Murzin ttbr_replace_func *replace_phys;
1605ffdfaedSVladimir Murzin unsigned long daif;
1615ffdfaedSVladimir Murzin
1625ffdfaedSVladimir Murzin /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
1635ffdfaedSVladimir Murzin phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
1645ffdfaedSVladimir Murzin
1655ffdfaedSVladimir Murzin if (system_supports_cnp() && !WARN_ON(pgdp != lm_alias(swapper_pg_dir))) {
1665ffdfaedSVladimir Murzin /*
1675ffdfaedSVladimir Murzin * cpu_replace_ttbr1() is used when there's a boot CPU
1685ffdfaedSVladimir Murzin * up (i.e. cpufeature framework is not up yet) and
1695ffdfaedSVladimir Murzin * latter only when we enable CNP via cpufeature's
1705ffdfaedSVladimir Murzin * enable() callback.
1715ffdfaedSVladimir Murzin * Also we rely on the system_cpucaps bit being set before
17250e1881dSMark Rutland * calling the enable() function.
173607289a7SSami Tolvanen */
17450e1881dSMark Rutland ttbr1 |= TTBR_CNP_BIT;
1751682c45bSArd Biesheuvel }
176a8bf2fc4SMark Brown
177a8bf2fc4SMark Brown replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
178a8bf2fc4SMark Brown
179a8bf2fc4SMark Brown __cpu_install_idmap(idmap);
180a8bf2fc4SMark Brown
181a8bf2fc4SMark Brown /*
1825ffdfaedSVladimir Murzin * We really don't want to take *any* exceptions while TTBR1 is
183a8bf2fc4SMark Brown * in the process of being replaced so mask everything.
184a8bf2fc4SMark Brown */
18550e1881dSMark Rutland daif = local_daif_save();
18650e1881dSMark Rutland replace_phys(ttbr1);
18750e1881dSMark Rutland local_daif_restore(daif);
18850e1881dSMark Rutland
1895aec715dSWill Deacon cpu_uninstall_idmap();
1905aec715dSWill Deacon }
1915aec715dSWill Deacon
1925aec715dSWill Deacon /*
1935aec715dSWill Deacon * It would be nice to return ASIDs back to the allocator, but unfortunately
1945aec715dSWill Deacon * that introduces a race with a generation rollover where we could erroneously
1955aec715dSWill Deacon * free an ASID allocated in a future generation. We could workaround this by
196b3901d54SCatalin Marinas * freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap),
197c4885bbbSPingfan Liu * but we'd then need to make sure that we didn't dirty any TLBs afterwards.
198b3901d54SCatalin Marinas * Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you
199d98295d3SNicholas Piggin * take CPU migration into account.
20048118151SJean-Philippe Brucker */
20148118151SJean-Philippe Brucker void check_and_switch_context(struct mm_struct *mm);
20248118151SJean-Philippe Brucker
20348118151SJean-Philippe Brucker #define init_new_context(tsk, mm) init_new_context(tsk, mm)
20448118151SJean-Philippe Brucker static inline int
init_new_context(struct task_struct * tsk,struct mm_struct * mm)20548118151SJean-Philippe Brucker init_new_context(struct task_struct *tsk, struct mm_struct *mm)
20648118151SJean-Philippe Brucker {
207b3901d54SCatalin Marinas atomic64_set(&mm->context.id, 0);
20839bc88e5SCatalin Marinas refcount_set(&mm->context.pinned, 0);
20939bc88e5SCatalin Marinas return 0;
21039bc88e5SCatalin Marinas }
21139bc88e5SCatalin Marinas
2120adbdfdeSWill Deacon #ifdef CONFIG_ARM64_SW_TTBR0_PAN
update_saved_ttbr0(struct task_struct * tsk,struct mm_struct * mm)2130adbdfdeSWill Deacon static inline void update_saved_ttbr0(struct task_struct *tsk,
2140adbdfdeSWill Deacon struct mm_struct *mm)
2150adbdfdeSWill Deacon {
2160adbdfdeSWill Deacon u64 ttbr;
2170adbdfdeSWill Deacon
2189163f011SAnshuman Khandual if (!system_uses_ttbr0_pan())
2190adbdfdeSWill Deacon return;
2209163f011SAnshuman Khandual
2210adbdfdeSWill Deacon if (mm == &init_mm)
2226b88a32cSCatalin Marinas ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
22339bc88e5SCatalin Marinas else
22439bc88e5SCatalin Marinas ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48;
22539bc88e5SCatalin Marinas
22639bc88e5SCatalin Marinas WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
22739bc88e5SCatalin Marinas }
22839bc88e5SCatalin Marinas #else
update_saved_ttbr0(struct task_struct * tsk,struct mm_struct * mm)22939bc88e5SCatalin Marinas static inline void update_saved_ttbr0(struct task_struct *tsk,
23039bc88e5SCatalin Marinas struct mm_struct *mm)
231d98295d3SNicholas Piggin {
232d96cc49bSWill Deacon }
233d96cc49bSWill Deacon #endif
234d96cc49bSWill Deacon
235d96cc49bSWill Deacon #define enter_lazy_tlb enter_lazy_tlb
236d96cc49bSWill Deacon static inline void
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk)237d96cc49bSWill Deacon enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
238d96cc49bSWill Deacon {
239d96cc49bSWill Deacon /*
240d96cc49bSWill Deacon * We don't actually care about the ttbr0 mapping, so point it at the
241d96cc49bSWill Deacon * zero page.
24239bc88e5SCatalin Marinas */
243b3901d54SCatalin Marinas update_saved_ttbr0(tsk, &init_mm);
244e53f21bcSCatalin Marinas }
245e53f21bcSCatalin Marinas
__switch_mm(struct mm_struct * next)246e53f21bcSCatalin Marinas static inline void __switch_mm(struct mm_struct *next)
247e53f21bcSCatalin Marinas {
248e53f21bcSCatalin Marinas /*
249e53f21bcSCatalin Marinas * init_mm.pgd does not contain any user mappings and it is always
250e53f21bcSCatalin Marinas * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
251e53f21bcSCatalin Marinas */
252e53f21bcSCatalin Marinas if (next == &init_mm) {
253c4885bbbSPingfan Liu cpu_set_reserved_ttbr0();
254b3901d54SCatalin Marinas return;
255b3901d54SCatalin Marinas }
25639bc88e5SCatalin Marinas
25739bc88e5SCatalin Marinas check_and_switch_context(next);
25839bc88e5SCatalin Marinas }
25939bc88e5SCatalin Marinas
26039bc88e5SCatalin Marinas static inline void
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)26139bc88e5SCatalin Marinas switch_mm(struct mm_struct *prev, struct mm_struct *next,
26239bc88e5SCatalin Marinas struct task_struct *tsk)
26339bc88e5SCatalin Marinas {
26439bc88e5SCatalin Marinas if (prev != next)
26539bc88e5SCatalin Marinas __switch_mm(next);
26639bc88e5SCatalin Marinas
2670adbdfdeSWill Deacon /*
26839bc88e5SCatalin Marinas * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
26939bc88e5SCatalin Marinas * value may have not been initialised yet (activate_mm caller) or the
27039bc88e5SCatalin Marinas * ASID has changed since the last run (following the context switch
27139bc88e5SCatalin Marinas * of another thread of the same process).
272d82158faSWill Deacon */
273d82158faSWill Deacon update_saved_ttbr0(tsk, next);
274d82158faSWill Deacon }
275d82158faSWill Deacon
276d82158faSWill Deacon static inline const struct cpumask *
task_cpu_possible_mask(struct task_struct * p)277d82158faSWill Deacon task_cpu_possible_mask(struct task_struct *p)
278d82158faSWill Deacon {
279d82158faSWill Deacon if (!static_branch_unlikely(&arm64_mismatched_32bit_el0))
280d82158faSWill Deacon return cpu_possible_mask;
281d82158faSWill Deacon
282d82158faSWill Deacon if (!is_compat_thread(task_thread_info(p)))
283d82158faSWill Deacon return cpu_possible_mask;
284d82158faSWill Deacon
28513f417f3SSuzuki K Poulose return system_32bit_el0_cpumask();
2866b88a32cSCatalin Marinas }
28713f417f3SSuzuki K Poulose #define task_cpu_possible_mask task_cpu_possible_mask
28848118151SJean-Philippe Brucker
28948118151SJean-Philippe Brucker void verify_cpu_asid_bits(void);
29048118151SJean-Philippe Brucker void post_ttbr_update_workaround(void);
291*f7d30434SKirill A. Shutemov
292*f7d30434SKirill A. Shutemov unsigned long arm64_mm_context_get(struct mm_struct *mm);
293*f7d30434SKirill A. Shutemov void arm64_mm_context_put(struct mm_struct *mm);
294*f7d30434SKirill A. Shutemov
295*f7d30434SKirill A. Shutemov #define mm_untag_mask mm_untag_mask
mm_untag_mask(struct mm_struct * mm)296*f7d30434SKirill A. Shutemov static inline unsigned long mm_untag_mask(struct mm_struct *mm)
297d98295d3SNicholas Piggin {
298d98295d3SNicholas Piggin return -1UL >> 8;
29938fd94b0SChristopher Covington }
30038fd94b0SChristopher Covington
30138fd94b0SChristopher Covington #include <asm-generic/mmu_context.h>
302
303 #endif /* !__ASSEMBLY__ */
304
305 #endif /* !__ASM_MMU_CONTEXT_H */
306