1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/mmu_context.h
4  *
5  * Copyright (C) 1996 Russell King.
6  * Copyright (C) 2012 ARM Ltd.
7  */
8 #ifndef __ASM_MMU_CONTEXT_H
9 #define __ASM_MMU_CONTEXT_H
10 
11 #ifndef __ASSEMBLY__
12 
13 #include <linux/compiler.h>
14 #include <linux/sched.h>
15 #include <linux/sched/hotplug.h>
16 #include <linux/mm_types.h>
17 #include <linux/pgtable.h>
18 
19 #include <asm/cacheflush.h>
20 #include <asm/cpufeature.h>
21 #include <asm/proc-fns.h>
22 #include <asm-generic/mm_hooks.h>
23 #include <asm/cputype.h>
24 #include <asm/sysreg.h>
25 #include <asm/tlbflush.h>
26 
27 extern bool rodata_full;
28 
29 static inline void contextidr_thread_switch(struct task_struct *next)
30 {
31 	if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR))
32 		return;
33 
34 	write_sysreg(task_pid_nr(next), contextidr_el1);
35 	isb();
36 }
37 
38 /*
39  * Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0.
40  */
41 static inline void cpu_set_reserved_ttbr0(void)
42 {
43 	unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
44 
45 	write_sysreg(ttbr, ttbr0_el1);
46 	isb();
47 }
48 
49 void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
50 
51 static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
52 {
53 	BUG_ON(pgd == swapper_pg_dir);
54 	cpu_set_reserved_ttbr0();
55 	cpu_do_switch_mm(virt_to_phys(pgd),mm);
56 }
57 
58 /*
59  * TCR.T0SZ value to use when the ID map is active. Usually equals
60  * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
61  * physical memory, in which case it will be smaller.
62  */
63 extern u64 idmap_t0sz;
64 extern u64 idmap_ptrs_per_pgd;
65 
66 static inline bool __cpu_uses_extended_idmap(void)
67 {
68 	if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52))
69 		return false;
70 
71 	return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
72 }
73 
74 /*
75  * True if the extended ID map requires an extra level of translation table
76  * to be configured.
77  */
78 static inline bool __cpu_uses_extended_idmap_level(void)
79 {
80 	return ARM64_HW_PGTABLE_LEVELS(64 - idmap_t0sz) > CONFIG_PGTABLE_LEVELS;
81 }
82 
83 /*
84  * Ensure TCR.T0SZ is set to the provided value.
85  */
86 static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
87 {
88 	unsigned long tcr = read_sysreg(tcr_el1);
89 
90 	if ((tcr & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET == t0sz)
91 		return;
92 
93 	tcr &= ~TCR_T0SZ_MASK;
94 	tcr |= t0sz << TCR_T0SZ_OFFSET;
95 	write_sysreg(tcr, tcr_el1);
96 	isb();
97 }
98 
99 #define cpu_set_default_tcr_t0sz()	__cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual))
100 #define cpu_set_idmap_tcr_t0sz()	__cpu_set_tcr_t0sz(idmap_t0sz)
101 
102 /*
103  * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
104  *
105  * The idmap lives in the same VA range as userspace, but uses global entries
106  * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from
107  * speculative TLB fetches, we must temporarily install the reserved page
108  * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ.
109  *
110  * If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
111  * which should not be installed in TTBR0_EL1. In this case we can leave the
112  * reserved page tables in place.
113  */
114 static inline void cpu_uninstall_idmap(void)
115 {
116 	struct mm_struct *mm = current->active_mm;
117 
118 	cpu_set_reserved_ttbr0();
119 	local_flush_tlb_all();
120 	cpu_set_default_tcr_t0sz();
121 
122 	if (mm != &init_mm && !system_uses_ttbr0_pan())
123 		cpu_switch_mm(mm->pgd, mm);
124 }
125 
126 static inline void cpu_install_idmap(void)
127 {
128 	cpu_set_reserved_ttbr0();
129 	local_flush_tlb_all();
130 	cpu_set_idmap_tcr_t0sz();
131 
132 	cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
133 }
134 
135 /*
136  * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
137  * avoiding the possibility of conflicting TLB entries being allocated.
138  */
139 static inline void cpu_replace_ttbr1(pgd_t *pgdp)
140 {
141 	typedef void (ttbr_replace_func)(phys_addr_t);
142 	extern ttbr_replace_func idmap_cpu_replace_ttbr1;
143 	ttbr_replace_func *replace_phys;
144 
145 	/* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
146 	phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
147 
148 	if (system_supports_cnp() && !WARN_ON(pgdp != lm_alias(swapper_pg_dir))) {
149 		/*
150 		 * cpu_replace_ttbr1() is used when there's a boot CPU
151 		 * up (i.e. cpufeature framework is not up yet) and
152 		 * latter only when we enable CNP via cpufeature's
153 		 * enable() callback.
154 		 * Also we rely on the cpu_hwcap bit being set before
155 		 * calling the enable() function.
156 		 */
157 		ttbr1 |= TTBR_CNP_BIT;
158 	}
159 
160 	replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
161 
162 	cpu_install_idmap();
163 	replace_phys(ttbr1);
164 	cpu_uninstall_idmap();
165 }
166 
167 /*
168  * It would be nice to return ASIDs back to the allocator, but unfortunately
169  * that introduces a race with a generation rollover where we could erroneously
170  * free an ASID allocated in a future generation. We could workaround this by
171  * freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap),
172  * but we'd then need to make sure that we didn't dirty any TLBs afterwards.
173  * Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you
174  * take CPU migration into account.
175  */
176 void check_and_switch_context(struct mm_struct *mm);
177 
178 #define init_new_context(tsk, mm) init_new_context(tsk, mm)
179 static inline int
180 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
181 {
182 	atomic64_set(&mm->context.id, 0);
183 	refcount_set(&mm->context.pinned, 0);
184 	return 0;
185 }
186 
187 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
188 static inline void update_saved_ttbr0(struct task_struct *tsk,
189 				      struct mm_struct *mm)
190 {
191 	u64 ttbr;
192 
193 	if (!system_uses_ttbr0_pan())
194 		return;
195 
196 	if (mm == &init_mm)
197 		ttbr = __pa_symbol(reserved_pg_dir);
198 	else
199 		ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
200 
201 	WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
202 }
203 #else
204 static inline void update_saved_ttbr0(struct task_struct *tsk,
205 				      struct mm_struct *mm)
206 {
207 }
208 #endif
209 
210 #define enter_lazy_tlb enter_lazy_tlb
211 static inline void
212 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
213 {
214 	/*
215 	 * We don't actually care about the ttbr0 mapping, so point it at the
216 	 * zero page.
217 	 */
218 	update_saved_ttbr0(tsk, &init_mm);
219 }
220 
221 static inline void __switch_mm(struct mm_struct *next)
222 {
223 	/*
224 	 * init_mm.pgd does not contain any user mappings and it is always
225 	 * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
226 	 */
227 	if (next == &init_mm) {
228 		cpu_set_reserved_ttbr0();
229 		return;
230 	}
231 
232 	check_and_switch_context(next);
233 }
234 
235 static inline void
236 switch_mm(struct mm_struct *prev, struct mm_struct *next,
237 	  struct task_struct *tsk)
238 {
239 	if (prev != next)
240 		__switch_mm(next);
241 
242 	/*
243 	 * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
244 	 * value may have not been initialised yet (activate_mm caller) or the
245 	 * ASID has changed since the last run (following the context switch
246 	 * of another thread of the same process).
247 	 */
248 	update_saved_ttbr0(tsk, next);
249 }
250 
251 void verify_cpu_asid_bits(void);
252 void post_ttbr_update_workaround(void);
253 
254 unsigned long arm64_mm_context_get(struct mm_struct *mm);
255 void arm64_mm_context_put(struct mm_struct *mm);
256 
257 #include <asm-generic/mmu_context.h>
258 
259 #endif /* !__ASSEMBLY__ */
260 
261 #endif /* !__ASM_MMU_CONTEXT_H */
262