1 /*
2  * Based on arch/arm/include/asm/mmu_context.h
3  *
4  * Copyright (C) 1996 Russell King.
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #ifndef __ASM_MMU_CONTEXT_H
20 #define __ASM_MMU_CONTEXT_H
21 
22 #ifndef __ASSEMBLY__
23 
24 #include <linux/compiler.h>
25 #include <linux/sched.h>
26 #include <linux/sched/hotplug.h>
27 #include <linux/mm_types.h>
28 
29 #include <asm/cacheflush.h>
30 #include <asm/cpufeature.h>
31 #include <asm/proc-fns.h>
32 #include <asm-generic/mm_hooks.h>
33 #include <asm/cputype.h>
34 #include <asm/pgtable.h>
35 #include <asm/sysreg.h>
36 #include <asm/tlbflush.h>
37 
38 static inline void contextidr_thread_switch(struct task_struct *next)
39 {
40 	if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR))
41 		return;
42 
43 	write_sysreg(task_pid_nr(next), contextidr_el1);
44 	isb();
45 }
46 
47 /*
48  * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0.
49  */
50 static inline void cpu_set_reserved_ttbr0(void)
51 {
52 	unsigned long ttbr = phys_to_ttbr(__pa_symbol(empty_zero_page));
53 
54 	write_sysreg(ttbr, ttbr0_el1);
55 	isb();
56 }
57 
58 static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
59 {
60 	BUG_ON(pgd == swapper_pg_dir);
61 	cpu_set_reserved_ttbr0();
62 	cpu_do_switch_mm(virt_to_phys(pgd),mm);
63 }
64 
65 /*
66  * TCR.T0SZ value to use when the ID map is active. Usually equals
67  * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
68  * physical memory, in which case it will be smaller.
69  */
70 extern u64 idmap_t0sz;
71 extern u64 idmap_ptrs_per_pgd;
72 
73 static inline bool __cpu_uses_extended_idmap(void)
74 {
75 	return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
76 }
77 
78 /*
79  * True if the extended ID map requires an extra level of translation table
80  * to be configured.
81  */
82 static inline bool __cpu_uses_extended_idmap_level(void)
83 {
84 	return ARM64_HW_PGTABLE_LEVELS(64 - idmap_t0sz) > CONFIG_PGTABLE_LEVELS;
85 }
86 
87 /*
88  * Set TCR.T0SZ to its default value (based on VA_BITS)
89  */
90 static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
91 {
92 	unsigned long tcr;
93 
94 	if (!__cpu_uses_extended_idmap())
95 		return;
96 
97 	tcr = read_sysreg(tcr_el1);
98 	tcr &= ~TCR_T0SZ_MASK;
99 	tcr |= t0sz << TCR_T0SZ_OFFSET;
100 	write_sysreg(tcr, tcr_el1);
101 	isb();
102 }
103 
104 #define cpu_set_default_tcr_t0sz()	__cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS))
105 #define cpu_set_idmap_tcr_t0sz()	__cpu_set_tcr_t0sz(idmap_t0sz)
106 
107 /*
108  * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
109  *
110  * The idmap lives in the same VA range as userspace, but uses global entries
111  * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from
112  * speculative TLB fetches, we must temporarily install the reserved page
113  * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ.
114  *
115  * If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
116  * which should not be installed in TTBR0_EL1. In this case we can leave the
117  * reserved page tables in place.
118  */
119 static inline void cpu_uninstall_idmap(void)
120 {
121 	struct mm_struct *mm = current->active_mm;
122 
123 	cpu_set_reserved_ttbr0();
124 	local_flush_tlb_all();
125 	cpu_set_default_tcr_t0sz();
126 
127 	if (mm != &init_mm && !system_uses_ttbr0_pan())
128 		cpu_switch_mm(mm->pgd, mm);
129 }
130 
131 static inline void cpu_install_idmap(void)
132 {
133 	cpu_set_reserved_ttbr0();
134 	local_flush_tlb_all();
135 	cpu_set_idmap_tcr_t0sz();
136 
137 	cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
138 }
139 
140 /*
141  * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
142  * avoiding the possibility of conflicting TLB entries being allocated.
143  */
144 static inline void cpu_replace_ttbr1(pgd_t *pgdp)
145 {
146 	typedef void (ttbr_replace_func)(phys_addr_t);
147 	extern ttbr_replace_func idmap_cpu_replace_ttbr1;
148 	ttbr_replace_func *replace_phys;
149 
150 	/* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
151 	phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
152 
153 	if (system_supports_cnp() && !WARN_ON(pgdp != lm_alias(swapper_pg_dir))) {
154 		/*
155 		 * cpu_replace_ttbr1() is used when there's a boot CPU
156 		 * up (i.e. cpufeature framework is not up yet) and
157 		 * latter only when we enable CNP via cpufeature's
158 		 * enable() callback.
159 		 * Also we rely on the cpu_hwcap bit being set before
160 		 * calling the enable() function.
161 		 */
162 		ttbr1 |= TTBR_CNP_BIT;
163 	}
164 
165 	replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
166 
167 	cpu_install_idmap();
168 	replace_phys(ttbr1);
169 	cpu_uninstall_idmap();
170 }
171 
172 /*
173  * It would be nice to return ASIDs back to the allocator, but unfortunately
174  * that introduces a race with a generation rollover where we could erroneously
175  * free an ASID allocated in a future generation. We could workaround this by
176  * freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap),
177  * but we'd then need to make sure that we didn't dirty any TLBs afterwards.
178  * Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you
179  * take CPU migration into account.
180  */
181 #define destroy_context(mm)		do { } while(0)
182 void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
183 
184 #define init_new_context(tsk,mm)	({ atomic64_set(&(mm)->context.id, 0); 0; })
185 
186 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
187 static inline void update_saved_ttbr0(struct task_struct *tsk,
188 				      struct mm_struct *mm)
189 {
190 	u64 ttbr;
191 
192 	if (!system_uses_ttbr0_pan())
193 		return;
194 
195 	if (mm == &init_mm)
196 		ttbr = __pa_symbol(empty_zero_page);
197 	else
198 		ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
199 
200 	WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
201 }
202 #else
203 static inline void update_saved_ttbr0(struct task_struct *tsk,
204 				      struct mm_struct *mm)
205 {
206 }
207 #endif
208 
209 static inline void
210 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
211 {
212 	/*
213 	 * We don't actually care about the ttbr0 mapping, so point it at the
214 	 * zero page.
215 	 */
216 	update_saved_ttbr0(tsk, &init_mm);
217 }
218 
219 static inline void __switch_mm(struct mm_struct *next)
220 {
221 	unsigned int cpu = smp_processor_id();
222 
223 	/*
224 	 * init_mm.pgd does not contain any user mappings and it is always
225 	 * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
226 	 */
227 	if (next == &init_mm) {
228 		cpu_set_reserved_ttbr0();
229 		return;
230 	}
231 
232 	check_and_switch_context(next, cpu);
233 }
234 
235 static inline void
236 switch_mm(struct mm_struct *prev, struct mm_struct *next,
237 	  struct task_struct *tsk)
238 {
239 	if (prev != next)
240 		__switch_mm(next);
241 
242 	/*
243 	 * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
244 	 * value may have not been initialised yet (activate_mm caller) or the
245 	 * ASID has changed since the last run (following the context switch
246 	 * of another thread of the same process).
247 	 */
248 	update_saved_ttbr0(tsk, next);
249 }
250 
251 #define deactivate_mm(tsk,mm)	do { } while (0)
252 #define activate_mm(prev,next)	switch_mm(prev, next, current)
253 
254 void verify_cpu_asid_bits(void);
255 void post_ttbr_update_workaround(void);
256 
257 #endif /* !__ASSEMBLY__ */
258 
259 #endif /* !__ASM_MMU_CONTEXT_H */
260