xref: /openbmc/linux/arch/arm64/mm/proc.S (revision 8ab59da2)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Based on arch/arm/mm/proc.S
4 *
5 * Copyright (C) 2001 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
7 * Author: Catalin Marinas <catalin.marinas@arm.com>
8 */
9
10#include <linux/init.h>
11#include <linux/linkage.h>
12#include <linux/pgtable.h>
13#include <linux/cfi_types.h>
14#include <asm/assembler.h>
15#include <asm/asm-offsets.h>
16#include <asm/asm_pointer_auth.h>
17#include <asm/hwcap.h>
18#include <asm/kernel-pgtable.h>
19#include <asm/pgtable-hwdef.h>
20#include <asm/cpufeature.h>
21#include <asm/alternative.h>
22#include <asm/smp.h>
23#include <asm/sysreg.h>
24
25#ifdef CONFIG_ARM64_64K_PAGES
26#define TCR_TG_FLAGS	TCR_TG0_64K | TCR_TG1_64K
27#elif defined(CONFIG_ARM64_16K_PAGES)
28#define TCR_TG_FLAGS	TCR_TG0_16K | TCR_TG1_16K
29#else /* CONFIG_ARM64_4K_PAGES */
30#define TCR_TG_FLAGS	TCR_TG0_4K | TCR_TG1_4K
31#endif
32
33#ifdef CONFIG_RANDOMIZE_BASE
34#define TCR_KASLR_FLAGS	TCR_NFD1
35#else
36#define TCR_KASLR_FLAGS	0
37#endif
38
39#define TCR_SMP_FLAGS	TCR_SHARED
40
41/* PTWs cacheable, inner/outer WBWA */
42#define TCR_CACHE_FLAGS	TCR_IRGN_WBWA | TCR_ORGN_WBWA
43
44#ifdef CONFIG_KASAN_SW_TAGS
45#define TCR_KASAN_SW_FLAGS TCR_TBI1 | TCR_TBID1
46#else
47#define TCR_KASAN_SW_FLAGS 0
48#endif
49
50#ifdef CONFIG_KASAN_HW_TAGS
51#define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID1
52#elif defined(CONFIG_ARM64_MTE)
53/*
54 * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on
55 * TBI being enabled at EL1.
56 */
57#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1
58#else
59#define TCR_MTE_FLAGS 0
60#endif
61
62/*
63 * Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and
64 * changed during mte_cpu_setup to Normal Tagged if the system supports MTE.
65 */
66#define MAIR_EL1_SET							\
67	(MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) |	\
68	 MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) |	\
69	 MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) |		\
70	 MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) |			\
71	 MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED))
72
73#ifdef CONFIG_CPU_PM
74/**
75 * cpu_do_suspend - save CPU registers context
76 *
77 * x0: virtual address of context pointer
78 *
79 * This must be kept in sync with struct cpu_suspend_ctx in <asm/suspend.h>.
80 */
81SYM_FUNC_START(cpu_do_suspend)
82	mrs	x2, tpidr_el0
83	mrs	x3, tpidrro_el0
84	mrs	x4, contextidr_el1
85	mrs	x5, osdlr_el1
86	mrs	x6, cpacr_el1
87	mrs	x7, tcr_el1
88	mrs	x8, vbar_el1
89	mrs	x9, mdscr_el1
90	mrs	x10, oslsr_el1
91	mrs	x11, sctlr_el1
92	get_this_cpu_offset x12
93	mrs	x13, sp_el0
94	stp	x2, x3, [x0]
95	stp	x4, x5, [x0, #16]
96	stp	x6, x7, [x0, #32]
97	stp	x8, x9, [x0, #48]
98	stp	x10, x11, [x0, #64]
99	stp	x12, x13, [x0, #80]
100	/*
101	 * Save x18 as it may be used as a platform register, e.g. by shadow
102	 * call stack.
103	 */
104	str	x18, [x0, #96]
105	ret
106SYM_FUNC_END(cpu_do_suspend)
107
108/**
109 * cpu_do_resume - restore CPU register context
110 *
111 * x0: Address of context pointer
112 */
113	.pushsection ".idmap.text", "awx"
114SYM_FUNC_START(cpu_do_resume)
115	ldp	x2, x3, [x0]
116	ldp	x4, x5, [x0, #16]
117	ldp	x6, x8, [x0, #32]
118	ldp	x9, x10, [x0, #48]
119	ldp	x11, x12, [x0, #64]
120	ldp	x13, x14, [x0, #80]
121	/*
122	 * Restore x18, as it may be used as a platform register, and clear
123	 * the buffer to minimize the risk of exposure when used for shadow
124	 * call stack.
125	 */
126	ldr	x18, [x0, #96]
127	str	xzr, [x0, #96]
128	msr	tpidr_el0, x2
129	msr	tpidrro_el0, x3
130	msr	contextidr_el1, x4
131	msr	cpacr_el1, x6
132
133	/* Don't change t0sz here, mask those bits when restoring */
134	mrs	x7, tcr_el1
135	bfi	x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
136
137	msr	tcr_el1, x8
138	msr	vbar_el1, x9
139
140	/*
141	 * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking
142	 * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug
143	 * exception. Mask them until local_daif_restore() in cpu_suspend()
144	 * resets them.
145	 */
146	disable_daif
147	msr	mdscr_el1, x10
148
149	msr	sctlr_el1, x12
150	set_this_cpu_offset x13
151	msr	sp_el0, x14
152	/*
153	 * Restore oslsr_el1 by writing oslar_el1
154	 */
155	msr	osdlr_el1, x5
156	ubfx	x11, x11, #1, #1
157	msr	oslar_el1, x11
158	reset_pmuserenr_el0 x0			// Disable PMU access from EL0
159	reset_amuserenr_el0 x0			// Disable AMU access from EL0
160
161alternative_if ARM64_HAS_RAS_EXTN
162	msr_s	SYS_DISR_EL1, xzr
163alternative_else_nop_endif
164
165	ptrauth_keys_install_kernel_nosync x14, x1, x2, x3
166	isb
167	ret
168SYM_FUNC_END(cpu_do_resume)
169	.popsection
170#endif
171
172	.pushsection ".idmap.text", "awx"
173
174.macro	__idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
175	adrp	\tmp1, reserved_pg_dir
176	phys_to_ttbr \tmp2, \tmp1
177	offset_ttbr1 \tmp2, \tmp1
178	msr	ttbr1_el1, \tmp2
179	isb
180	tlbi	vmalle1
181	dsb	nsh
182	isb
183.endm
184
185/*
186 * void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1)
187 *
188 * This is the low-level counterpart to cpu_replace_ttbr1, and should not be
189 * called by anything else. It can only be executed from a TTBR0 mapping.
190 */
191SYM_TYPED_FUNC_START(idmap_cpu_replace_ttbr1)
192	save_and_disable_daif flags=x2
193
194	__idmap_cpu_set_reserved_ttbr1 x1, x3
195
196	offset_ttbr1 x0, x3
197	msr	ttbr1_el1, x0
198	isb
199
200	restore_daif x2
201
202	ret
203SYM_FUNC_END(idmap_cpu_replace_ttbr1)
204	.popsection
205
206#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
207
208#define KPTI_NG_PTE_FLAGS	(PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
209
210	.pushsection ".idmap.text", "awx"
211
212	.macro	kpti_mk_tbl_ng, type, num_entries
213	add	end_\type\()p, cur_\type\()p, #\num_entries * 8
214.Ldo_\type:
215	ldr	\type, [cur_\type\()p]		// Load the entry
216	tbz	\type, #0, .Lnext_\type		// Skip invalid and
217	tbnz	\type, #11, .Lnext_\type	// non-global entries
218	orr	\type, \type, #PTE_NG		// Same bit for blocks and pages
219	str	\type, [cur_\type\()p]		// Update the entry
220	.ifnc	\type, pte
221	tbnz	\type, #1, .Lderef_\type
222	.endif
223.Lnext_\type:
224	add	cur_\type\()p, cur_\type\()p, #8
225	cmp	cur_\type\()p, end_\type\()p
226	b.ne	.Ldo_\type
227	.endm
228
229	/*
230	 * Dereference the current table entry and map it into the temporary
231	 * fixmap slot associated with the current level.
232	 */
233	.macro	kpti_map_pgtbl, type, level
234	str	xzr, [temp_pte, #8 * (\level + 1)]	// break before make
235	dsb	nshst
236	add	pte, temp_pte, #PAGE_SIZE * (\level + 1)
237	lsr	pte, pte, #12
238	tlbi	vaae1, pte
239	dsb	nsh
240	isb
241
242	phys_to_pte pte, cur_\type\()p
243	add	cur_\type\()p, temp_pte, #PAGE_SIZE * (\level + 1)
244	orr	pte, pte, pte_flags
245	str	pte, [temp_pte, #8 * (\level + 1)]
246	dsb	nshst
247	.endm
248
249/*
250 * void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd,
251 *				   unsigned long temp_pte_va)
252 *
253 * Called exactly once from stop_machine context by each CPU found during boot.
254 */
255	.pushsection	".data", "aw", %progbits
256SYM_DATA(__idmap_kpti_flag, .long 1)
257	.popsection
258
259SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings)
260	cpu		.req	w0
261	temp_pte	.req	x0
262	num_cpus	.req	w1
263	pte_flags	.req	x1
264	temp_pgd_phys	.req	x2
265	swapper_ttb	.req	x3
266	flag_ptr	.req	x4
267	cur_pgdp	.req	x5
268	end_pgdp	.req	x6
269	pgd		.req	x7
270	cur_pudp	.req	x8
271	end_pudp	.req	x9
272	cur_pmdp	.req	x11
273	end_pmdp	.req	x12
274	cur_ptep	.req	x14
275	end_ptep	.req	x15
276	pte		.req	x16
277	valid		.req	x17
278
279	mov	x5, x3				// preserve temp_pte arg
280	mrs	swapper_ttb, ttbr1_el1
281	adr_l	flag_ptr, __idmap_kpti_flag
282
283	cbnz	cpu, __idmap_kpti_secondary
284
285	/* We're the boot CPU. Wait for the others to catch up */
286	sevl
2871:	wfe
288	ldaxr	w17, [flag_ptr]
289	eor	w17, w17, num_cpus
290	cbnz	w17, 1b
291
292	/* Switch to the temporary page tables on this CPU only */
293	__idmap_cpu_set_reserved_ttbr1 x8, x9
294	offset_ttbr1 temp_pgd_phys, x8
295	msr	ttbr1_el1, temp_pgd_phys
296	isb
297
298	mov	temp_pte, x5
299	mov	pte_flags, #KPTI_NG_PTE_FLAGS
300
301	/* Everybody is enjoying the idmap, so we can rewrite swapper. */
302	/* PGD */
303	adrp		cur_pgdp, swapper_pg_dir
304	kpti_map_pgtbl	pgd, 0
305	kpti_mk_tbl_ng	pgd, PTRS_PER_PGD
306
307	/* Ensure all the updated entries are visible to secondary CPUs */
308	dsb	ishst
309
310	/* We're done: fire up swapper_pg_dir again */
311	__idmap_cpu_set_reserved_ttbr1 x8, x9
312	msr	ttbr1_el1, swapper_ttb
313	isb
314
315	/* Set the flag to zero to indicate that we're all done */
316	str	wzr, [flag_ptr]
317	ret
318
319.Lderef_pgd:
320	/* PUD */
321	.if		CONFIG_PGTABLE_LEVELS > 3
322	pud		.req	x10
323	pte_to_phys	cur_pudp, pgd
324	kpti_map_pgtbl	pud, 1
325	kpti_mk_tbl_ng	pud, PTRS_PER_PUD
326	b		.Lnext_pgd
327	.else		/* CONFIG_PGTABLE_LEVELS <= 3 */
328	pud		.req	pgd
329	.set		.Lnext_pud, .Lnext_pgd
330	.endif
331
332.Lderef_pud:
333	/* PMD */
334	.if		CONFIG_PGTABLE_LEVELS > 2
335	pmd		.req	x13
336	pte_to_phys	cur_pmdp, pud
337	kpti_map_pgtbl	pmd, 2
338	kpti_mk_tbl_ng	pmd, PTRS_PER_PMD
339	b		.Lnext_pud
340	.else		/* CONFIG_PGTABLE_LEVELS <= 2 */
341	pmd		.req	pgd
342	.set		.Lnext_pmd, .Lnext_pgd
343	.endif
344
345.Lderef_pmd:
346	/* PTE */
347	pte_to_phys	cur_ptep, pmd
348	kpti_map_pgtbl	pte, 3
349	kpti_mk_tbl_ng	pte, PTRS_PER_PTE
350	b		.Lnext_pmd
351
352	.unreq	cpu
353	.unreq	temp_pte
354	.unreq	num_cpus
355	.unreq	pte_flags
356	.unreq	temp_pgd_phys
357	.unreq	cur_pgdp
358	.unreq	end_pgdp
359	.unreq	pgd
360	.unreq	cur_pudp
361	.unreq	end_pudp
362	.unreq	pud
363	.unreq	cur_pmdp
364	.unreq	end_pmdp
365	.unreq	pmd
366	.unreq	cur_ptep
367	.unreq	end_ptep
368	.unreq	pte
369	.unreq	valid
370
371	/* Secondary CPUs end up here */
372__idmap_kpti_secondary:
373	/* Uninstall swapper before surgery begins */
374	__idmap_cpu_set_reserved_ttbr1 x16, x17
375
376	/* Increment the flag to let the boot CPU we're ready */
3771:	ldxr	w16, [flag_ptr]
378	add	w16, w16, #1
379	stxr	w17, w16, [flag_ptr]
380	cbnz	w17, 1b
381
382	/* Wait for the boot CPU to finish messing around with swapper */
383	sevl
3841:	wfe
385	ldxr	w16, [flag_ptr]
386	cbnz	w16, 1b
387
388	/* All done, act like nothing happened */
389	msr	ttbr1_el1, swapper_ttb
390	isb
391	ret
392
393	.unreq	swapper_ttb
394	.unreq	flag_ptr
395SYM_FUNC_END(idmap_kpti_install_ng_mappings)
396	.popsection
397#endif
398
399/*
400 *	__cpu_setup
401 *
402 *	Initialise the processor for turning the MMU on.
403 *
404 * Input:
405 *	x0 - actual number of VA bits (ignored unless VA_BITS > 48)
406 * Output:
407 *	Return in x0 the value of the SCTLR_EL1 register.
408 */
409	.pushsection ".idmap.text", "awx"
410SYM_FUNC_START(__cpu_setup)
411	tlbi	vmalle1				// Invalidate local TLB
412	dsb	nsh
413
414	mov	x1, #3 << 20
415	msr	cpacr_el1, x1			// Enable FP/ASIMD
416	mov	x1, #1 << 12			// Reset mdscr_el1 and disable
417	msr	mdscr_el1, x1			// access to the DCC from EL0
418	isb					// Unmask debug exceptions now,
419	enable_dbg				// since this is per-cpu
420	reset_pmuserenr_el0 x1			// Disable PMU access from EL0
421	reset_amuserenr_el0 x1			// Disable AMU access from EL0
422
423	/*
424	 * Default values for VMSA control registers. These will be adjusted
425	 * below depending on detected CPU features.
426	 */
427	mair	.req	x17
428	tcr	.req	x16
429	mov_q	mair, MAIR_EL1_SET
430	mov_q	tcr, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
431			TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
432			TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS
433
434	tcr_clear_errata_bits tcr, x9, x5
435
436#ifdef CONFIG_ARM64_VA_BITS_52
437	sub		x9, xzr, x0
438	add		x9, x9, #64
439	tcr_set_t1sz	tcr, x9
440#else
441	idmap_get_t0sz	x9
442#endif
443	tcr_set_t0sz	tcr, x9
444
445	/*
446	 * Set the IPS bits in TCR_EL1.
447	 */
448	tcr_compute_pa_size tcr, #TCR_IPS_SHIFT, x5, x6
449#ifdef CONFIG_ARM64_HW_AFDBM
450	/*
451	 * Enable hardware update of the Access Flags bit.
452	 * Hardware dirty bit management is enabled later,
453	 * via capabilities.
454	 */
455	mrs	x9, ID_AA64MMFR1_EL1
456	and	x9, x9, #0xf
457	cbz	x9, 1f
458	orr	tcr, tcr, #TCR_HA		// hardware Access flag update
4591:
460#endif	/* CONFIG_ARM64_HW_AFDBM */
461	msr	mair_el1, mair
462	msr	tcr_el1, tcr
463	/*
464	 * Prepare SCTLR
465	 */
466	mov_q	x0, INIT_SCTLR_EL1_MMU_ON
467	ret					// return to head.S
468
469	.unreq	mair
470	.unreq	tcr
471SYM_FUNC_END(__cpu_setup)
472