1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
2b3901d54SCatalin Marinas /*
3b3901d54SCatalin Marinas  * Based on arch/arm/include/asm/mmu_context.h
4b3901d54SCatalin Marinas  *
5b3901d54SCatalin Marinas  * Copyright (C) 1996 Russell King.
6b3901d54SCatalin Marinas  * Copyright (C) 2012 ARM Ltd.
7b3901d54SCatalin Marinas  */
8b3901d54SCatalin Marinas #ifndef __ASM_MMU_CONTEXT_H
9b3901d54SCatalin Marinas #define __ASM_MMU_CONTEXT_H
10b3901d54SCatalin Marinas 
1138fd94b0SChristopher Covington #ifndef __ASSEMBLY__
1238fd94b0SChristopher Covington 
13b3901d54SCatalin Marinas #include <linux/compiler.h>
14b3901d54SCatalin Marinas #include <linux/sched.h>
15ef8bd77fSIngo Molnar #include <linux/sched/hotplug.h>
16589ee628SIngo Molnar #include <linux/mm_types.h>
1765fddcfcSMike Rapoport #include <linux/pgtable.h>
18b3901d54SCatalin Marinas 
19b3901d54SCatalin Marinas #include <asm/cacheflush.h>
2039bc88e5SCatalin Marinas #include <asm/cpufeature.h>
21a8bf2fc4SMark Brown #include <asm/daifflags.h>
22b3901d54SCatalin Marinas #include <asm/proc-fns.h>
23b3901d54SCatalin Marinas #include <asm-generic/mm_hooks.h>
24b3901d54SCatalin Marinas #include <asm/cputype.h>
25adf75899SMark Rutland #include <asm/sysreg.h>
269e8e865bSMark Rutland #include <asm/tlbflush.h>
27b3901d54SCatalin Marinas 
28c55191e9SArd Biesheuvel extern bool rodata_full;
29c55191e9SArd Biesheuvel 
contextidr_thread_switch(struct task_struct * next)30ec45d1cfSWill Deacon static inline void contextidr_thread_switch(struct task_struct *next)
31ec45d1cfSWill Deacon {
32d3ea42aaSMark Rutland 	if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR))
33d3ea42aaSMark Rutland 		return;
34d3ea42aaSMark Rutland 
35adf75899SMark Rutland 	write_sysreg(task_pid_nr(next), contextidr_el1);
36adf75899SMark Rutland 	isb();
37ec45d1cfSWill Deacon }
38ec45d1cfSWill Deacon 
39b3901d54SCatalin Marinas /*
40833be850SMark Rutland  * Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0.
41b3901d54SCatalin Marinas  */
cpu_set_reserved_ttbr0_nosync(void)42*b9293d45SJamie Iles static inline void cpu_set_reserved_ttbr0_nosync(void)
43b3901d54SCatalin Marinas {
44833be850SMark Rutland 	unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
45b3901d54SCatalin Marinas 
46adf75899SMark Rutland 	write_sysreg(ttbr, ttbr0_el1);
47*b9293d45SJamie Iles }
48*b9293d45SJamie Iles 
cpu_set_reserved_ttbr0(void)49*b9293d45SJamie Iles static inline void cpu_set_reserved_ttbr0(void)
50*b9293d45SJamie Iles {
51*b9293d45SJamie Iles 	cpu_set_reserved_ttbr0_nosync();
52adf75899SMark Rutland 	isb();
53b3901d54SCatalin Marinas }
54b3901d54SCatalin Marinas 
5525b92693SMark Rutland void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
5625b92693SMark Rutland 
cpu_switch_mm(pgd_t * pgd,struct mm_struct * mm)577655abb9SWill Deacon static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
587655abb9SWill Deacon {
597655abb9SWill Deacon 	BUG_ON(pgd == swapper_pg_dir);
607655abb9SWill Deacon 	cpu_do_switch_mm(virt_to_phys(pgd),mm);
617655abb9SWill Deacon }
627655abb9SWill Deacon 
63dd006da2SArd Biesheuvel /*
64dd006da2SArd Biesheuvel  * TCR.T0SZ value to use when the ID map is active. Usually equals
65dd006da2SArd Biesheuvel  * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
66dd006da2SArd Biesheuvel  * physical memory, in which case it will be smaller.
67dd006da2SArd Biesheuvel  */
68e8d13cceSArd Biesheuvel extern int idmap_t0sz;
69dd006da2SArd Biesheuvel 
70fa2a8445SKristina Martsenko /*
711401bef7SJames Morse  * Ensure TCR.T0SZ is set to the provided value.
72c51e97d8SWill Deacon  */
__cpu_set_tcr_t0sz(unsigned long t0sz)73609116d2SMark Rutland static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
74dd006da2SArd Biesheuvel {
751401bef7SJames Morse 	unsigned long tcr = read_sysreg(tcr_el1);
76dd006da2SArd Biesheuvel 
771401bef7SJames Morse 	if ((tcr & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET == t0sz)
78c51e97d8SWill Deacon 		return;
79c51e97d8SWill Deacon 
80adf75899SMark Rutland 	tcr &= ~TCR_T0SZ_MASK;
81adf75899SMark Rutland 	tcr |= t0sz << TCR_T0SZ_OFFSET;
82adf75899SMark Rutland 	write_sysreg(tcr, tcr_el1);
83adf75899SMark Rutland 	isb();
84dd006da2SArd Biesheuvel }
85dd006da2SArd Biesheuvel 
865383cc6eSSteve Capper #define cpu_set_default_tcr_t0sz()	__cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual))
87609116d2SMark Rutland #define cpu_set_idmap_tcr_t0sz()	__cpu_set_tcr_t0sz(idmap_t0sz)
88609116d2SMark Rutland 
89b3901d54SCatalin Marinas /*
909e8e865bSMark Rutland  * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
919e8e865bSMark Rutland  *
929e8e865bSMark Rutland  * The idmap lives in the same VA range as userspace, but uses global entries
939e8e865bSMark Rutland  * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from
949e8e865bSMark Rutland  * speculative TLB fetches, we must temporarily install the reserved page
959e8e865bSMark Rutland  * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ.
969e8e865bSMark Rutland  *
979e8e865bSMark Rutland  * If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
989e8e865bSMark Rutland  * which should not be installed in TTBR0_EL1. In this case we can leave the
999e8e865bSMark Rutland  * reserved page tables in place.
1009e8e865bSMark Rutland  */
cpu_uninstall_idmap(void)1019e8e865bSMark Rutland static inline void cpu_uninstall_idmap(void)
1029e8e865bSMark Rutland {
1039e8e865bSMark Rutland 	struct mm_struct *mm = current->active_mm;
1049e8e865bSMark Rutland 
1059e8e865bSMark Rutland 	cpu_set_reserved_ttbr0();
1069e8e865bSMark Rutland 	local_flush_tlb_all();
1079e8e865bSMark Rutland 	cpu_set_default_tcr_t0sz();
1089e8e865bSMark Rutland 
10939bc88e5SCatalin Marinas 	if (mm != &init_mm && !system_uses_ttbr0_pan())
1109e8e865bSMark Rutland 		cpu_switch_mm(mm->pgd, mm);
1119e8e865bSMark Rutland }
1129e8e865bSMark Rutland 
__cpu_install_idmap(pgd_t * idmap)1131682c45bSArd Biesheuvel static inline void __cpu_install_idmap(pgd_t *idmap)
114609116d2SMark Rutland {
115609116d2SMark Rutland 	cpu_set_reserved_ttbr0();
116609116d2SMark Rutland 	local_flush_tlb_all();
117609116d2SMark Rutland 	cpu_set_idmap_tcr_t0sz();
118609116d2SMark Rutland 
1191682c45bSArd Biesheuvel 	cpu_switch_mm(lm_alias(idmap), &init_mm);
1201682c45bSArd Biesheuvel }
1211682c45bSArd Biesheuvel 
cpu_install_idmap(void)1221682c45bSArd Biesheuvel static inline void cpu_install_idmap(void)
1231682c45bSArd Biesheuvel {
1241682c45bSArd Biesheuvel 	__cpu_install_idmap(idmap_pg_dir);
125609116d2SMark Rutland }
126609116d2SMark Rutland 
1279e8e865bSMark Rutland /*
128a347f601SPasha Tatashin  * Load our new page tables. A strict BBM approach requires that we ensure that
129a347f601SPasha Tatashin  * TLBs are free of any entries that may overlap with the global mappings we are
130a347f601SPasha Tatashin  * about to install.
131a347f601SPasha Tatashin  *
132a347f601SPasha Tatashin  * For a real hibernate/resume/kexec cycle TTBR0 currently points to a zero
133a347f601SPasha Tatashin  * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI runtime
134a347f601SPasha Tatashin  * services), while for a userspace-driven test_resume cycle it points to
135a347f601SPasha Tatashin  * userspace page tables (and we must point it at a zero page ourselves).
136a347f601SPasha Tatashin  *
137a347f601SPasha Tatashin  * We change T0SZ as part of installing the idmap. This is undone by
138a347f601SPasha Tatashin  * cpu_uninstall_idmap() in __cpu_suspend_exit().
139a347f601SPasha Tatashin  */
cpu_install_ttbr0(phys_addr_t ttbr0,unsigned long t0sz)140a347f601SPasha Tatashin static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz)
141a347f601SPasha Tatashin {
142a347f601SPasha Tatashin 	cpu_set_reserved_ttbr0();
143a347f601SPasha Tatashin 	local_flush_tlb_all();
144a347f601SPasha Tatashin 	__cpu_set_tcr_t0sz(t0sz);
145a347f601SPasha Tatashin 
146a347f601SPasha Tatashin 	/* avoid cpu_switch_mm() and its SW-PAN and CNP interactions */
147a347f601SPasha Tatashin 	write_sysreg(ttbr0, ttbr0_el1);
148a347f601SPasha Tatashin 	isb();
149a347f601SPasha Tatashin }
150a347f601SPasha Tatashin 
151a347f601SPasha Tatashin /*
15250e1881dSMark Rutland  * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
15350e1881dSMark Rutland  * avoiding the possibility of conflicting TLB entries being allocated.
15450e1881dSMark Rutland  */
cpu_replace_ttbr1(pgd_t * pgdp,pgd_t * idmap)1555f20997cSSami Tolvanen static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
15650e1881dSMark Rutland {
15750e1881dSMark Rutland 	typedef void (ttbr_replace_func)(phys_addr_t);
15850e1881dSMark Rutland 	extern ttbr_replace_func idmap_cpu_replace_ttbr1;
15950e1881dSMark Rutland 	ttbr_replace_func *replace_phys;
160a8bf2fc4SMark Brown 	unsigned long daif;
16150e1881dSMark Rutland 
1625ffdfaedSVladimir Murzin 	/* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
1635ffdfaedSVladimir Murzin 	phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
1645ffdfaedSVladimir Murzin 
1655ffdfaedSVladimir Murzin 	if (system_supports_cnp() && !WARN_ON(pgdp != lm_alias(swapper_pg_dir))) {
1665ffdfaedSVladimir Murzin 		/*
1675ffdfaedSVladimir Murzin 		 * cpu_replace_ttbr1() is used when there's a boot CPU
1685ffdfaedSVladimir Murzin 		 * up (i.e. cpufeature framework is not up yet) and
1695ffdfaedSVladimir Murzin 		 * latter only when we enable CNP via cpufeature's
1705ffdfaedSVladimir Murzin 		 * enable() callback.
1715ffdfaedSVladimir Murzin 		 * Also we rely on the system_cpucaps bit being set before
1725ffdfaedSVladimir Murzin 		 * calling the enable() function.
1735ffdfaedSVladimir Murzin 		 */
1745ffdfaedSVladimir Murzin 		ttbr1 |= TTBR_CNP_BIT;
1755ffdfaedSVladimir Murzin 	}
17650e1881dSMark Rutland 
177607289a7SSami Tolvanen 	replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
17850e1881dSMark Rutland 
1791682c45bSArd Biesheuvel 	__cpu_install_idmap(idmap);
180a8bf2fc4SMark Brown 
181a8bf2fc4SMark Brown 	/*
182a8bf2fc4SMark Brown 	 * We really don't want to take *any* exceptions while TTBR1 is
183a8bf2fc4SMark Brown 	 * in the process of being replaced so mask everything.
184a8bf2fc4SMark Brown 	 */
185a8bf2fc4SMark Brown 	daif = local_daif_save();
1865ffdfaedSVladimir Murzin 	replace_phys(ttbr1);
187a8bf2fc4SMark Brown 	local_daif_restore(daif);
188a8bf2fc4SMark Brown 
18950e1881dSMark Rutland 	cpu_uninstall_idmap();
19050e1881dSMark Rutland }
19150e1881dSMark Rutland 
19250e1881dSMark Rutland /*
1935aec715dSWill Deacon  * It would be nice to return ASIDs back to the allocator, but unfortunately
1945aec715dSWill Deacon  * that introduces a race with a generation rollover where we could erroneously
1955aec715dSWill Deacon  * free an ASID allocated in a future generation. We could workaround this by
1965aec715dSWill Deacon  * freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap),
1975aec715dSWill Deacon  * but we'd then need to make sure that we didn't dirty any TLBs afterwards.
1985aec715dSWill Deacon  * Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you
1995aec715dSWill Deacon  * take CPU migration into account.
200b3901d54SCatalin Marinas  */
201c4885bbbSPingfan Liu void check_and_switch_context(struct mm_struct *mm);
202b3901d54SCatalin Marinas 
203d98295d3SNicholas Piggin #define init_new_context(tsk, mm) init_new_context(tsk, mm)
20448118151SJean-Philippe Brucker static inline int
init_new_context(struct task_struct * tsk,struct mm_struct * mm)20548118151SJean-Philippe Brucker init_new_context(struct task_struct *tsk, struct mm_struct *mm)
20648118151SJean-Philippe Brucker {
20748118151SJean-Philippe Brucker 	atomic64_set(&mm->context.id, 0);
20848118151SJean-Philippe Brucker 	refcount_set(&mm->context.pinned, 0);
20948118151SJean-Philippe Brucker 	return 0;
21048118151SJean-Philippe Brucker }
211b3901d54SCatalin Marinas 
21239bc88e5SCatalin Marinas #ifdef CONFIG_ARM64_SW_TTBR0_PAN
update_saved_ttbr0(struct task_struct * tsk,struct mm_struct * mm)21339bc88e5SCatalin Marinas static inline void update_saved_ttbr0(struct task_struct *tsk,
21439bc88e5SCatalin Marinas 				      struct mm_struct *mm)
21539bc88e5SCatalin Marinas {
2160adbdfdeSWill Deacon 	u64 ttbr;
2170adbdfdeSWill Deacon 
2180adbdfdeSWill Deacon 	if (!system_uses_ttbr0_pan())
2190adbdfdeSWill Deacon 		return;
2200adbdfdeSWill Deacon 
2210adbdfdeSWill Deacon 	if (mm == &init_mm)
2229163f011SAnshuman Khandual 		ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
2230adbdfdeSWill Deacon 	else
2249163f011SAnshuman Khandual 		ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48;
2250adbdfdeSWill Deacon 
2266b88a32cSCatalin Marinas 	WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
22739bc88e5SCatalin Marinas }
22839bc88e5SCatalin Marinas #else
update_saved_ttbr0(struct task_struct * tsk,struct mm_struct * mm)22939bc88e5SCatalin Marinas static inline void update_saved_ttbr0(struct task_struct *tsk,
23039bc88e5SCatalin Marinas 				      struct mm_struct *mm)
23139bc88e5SCatalin Marinas {
23239bc88e5SCatalin Marinas }
23339bc88e5SCatalin Marinas #endif
23439bc88e5SCatalin Marinas 
235d98295d3SNicholas Piggin #define enter_lazy_tlb enter_lazy_tlb
236d96cc49bSWill Deacon static inline void
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk)237d96cc49bSWill Deacon enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
238d96cc49bSWill Deacon {
239d96cc49bSWill Deacon 	/*
240d96cc49bSWill Deacon 	 * We don't actually care about the ttbr0 mapping, so point it at the
241d96cc49bSWill Deacon 	 * zero page.
242d96cc49bSWill Deacon 	 */
243d96cc49bSWill Deacon 	update_saved_ttbr0(tsk, &init_mm);
244d96cc49bSWill Deacon }
245d96cc49bSWill Deacon 
__switch_mm(struct mm_struct * next)24639bc88e5SCatalin Marinas static inline void __switch_mm(struct mm_struct *next)
247b3901d54SCatalin Marinas {
248e53f21bcSCatalin Marinas 	/*
249e53f21bcSCatalin Marinas 	 * init_mm.pgd does not contain any user mappings and it is always
250e53f21bcSCatalin Marinas 	 * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
251e53f21bcSCatalin Marinas 	 */
252e53f21bcSCatalin Marinas 	if (next == &init_mm) {
253e53f21bcSCatalin Marinas 		cpu_set_reserved_ttbr0();
254e53f21bcSCatalin Marinas 		return;
255e53f21bcSCatalin Marinas 	}
256e53f21bcSCatalin Marinas 
257c4885bbbSPingfan Liu 	check_and_switch_context(next);
258b3901d54SCatalin Marinas }
259b3901d54SCatalin Marinas 
26039bc88e5SCatalin Marinas static inline void
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)26139bc88e5SCatalin Marinas switch_mm(struct mm_struct *prev, struct mm_struct *next,
26239bc88e5SCatalin Marinas 	  struct task_struct *tsk)
26339bc88e5SCatalin Marinas {
26439bc88e5SCatalin Marinas 	if (prev != next)
26539bc88e5SCatalin Marinas 		__switch_mm(next);
26639bc88e5SCatalin Marinas 
26739bc88e5SCatalin Marinas 	/*
26839bc88e5SCatalin Marinas 	 * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
26939bc88e5SCatalin Marinas 	 * value may have not been initialised yet (activate_mm caller) or the
27039bc88e5SCatalin Marinas 	 * ASID has changed since the last run (following the context switch
2710adbdfdeSWill Deacon 	 * of another thread of the same process).
27239bc88e5SCatalin Marinas 	 */
27339bc88e5SCatalin Marinas 	update_saved_ttbr0(tsk, next);
27439bc88e5SCatalin Marinas }
27539bc88e5SCatalin Marinas 
276d82158faSWill Deacon static inline const struct cpumask *
task_cpu_possible_mask(struct task_struct * p)277d82158faSWill Deacon task_cpu_possible_mask(struct task_struct *p)
278d82158faSWill Deacon {
279d82158faSWill Deacon 	if (!static_branch_unlikely(&arm64_mismatched_32bit_el0))
280d82158faSWill Deacon 		return cpu_possible_mask;
281d82158faSWill Deacon 
282d82158faSWill Deacon 	if (!is_compat_thread(task_thread_info(p)))
283d82158faSWill Deacon 		return cpu_possible_mask;
284d82158faSWill Deacon 
285d82158faSWill Deacon 	return system_32bit_el0_cpumask();
286d82158faSWill Deacon }
287d82158faSWill Deacon #define task_cpu_possible_mask	task_cpu_possible_mask
288d82158faSWill Deacon 
28913f417f3SSuzuki K Poulose void verify_cpu_asid_bits(void);
2906b88a32cSCatalin Marinas void post_ttbr_update_workaround(void);
29113f417f3SSuzuki K Poulose 
29248118151SJean-Philippe Brucker unsigned long arm64_mm_context_get(struct mm_struct *mm);
29348118151SJean-Philippe Brucker void arm64_mm_context_put(struct mm_struct *mm);
29448118151SJean-Philippe Brucker 
295f7d30434SKirill A. Shutemov #define mm_untag_mask mm_untag_mask
mm_untag_mask(struct mm_struct * mm)296f7d30434SKirill A. Shutemov static inline unsigned long mm_untag_mask(struct mm_struct *mm)
297f7d30434SKirill A. Shutemov {
298f7d30434SKirill A. Shutemov 	return -1UL >> 8;
299f7d30434SKirill A. Shutemov }
300f7d30434SKirill A. Shutemov 
301d98295d3SNicholas Piggin #include <asm-generic/mmu_context.h>
302d98295d3SNicholas Piggin 
30338fd94b0SChristopher Covington #endif /* !__ASSEMBLY__ */
30438fd94b0SChristopher Covington 
30538fd94b0SChristopher Covington #endif /* !__ASM_MMU_CONTEXT_H */
306