xref: /openbmc/linux/arch/arm64/include/asm/assembler.h (revision 7ae9fb1b7ecbb5d85d07857943f677fd1a559b18)
1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
20be7320aSCatalin Marinas /*
37b7293aeSGeoff Levand  * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
40be7320aSCatalin Marinas  *
50be7320aSCatalin Marinas  * Copyright (C) 1996-2000 Russell King
60be7320aSCatalin Marinas  * Copyright (C) 2012 ARM Ltd.
70be7320aSCatalin Marinas  */
80be7320aSCatalin Marinas #ifndef __ASSEMBLY__
90be7320aSCatalin Marinas #error "Only include this from assembly code"
100be7320aSCatalin Marinas #endif
110be7320aSCatalin Marinas 
12f3e39273SMarc Zyngier #ifndef __ASM_ASSEMBLER_H
13f3e39273SMarc Zyngier #define __ASM_ASSEMBLER_H
14f3e39273SMarc Zyngier 
15386b3c7bSMark Rutland #include <asm-generic/export.h>
16386b3c7bSMark Rutland 
1713150149SArd Biesheuvel #include <asm/alternative.h>
18755db234SMarc Zyngier #include <asm/asm-bug.h>
19819771ccSMark Rutland #include <asm/asm-extable.h>
20819771ccSMark Rutland #include <asm/asm-offsets.h>
21823066d9SAndre Przywara #include <asm/cpufeature.h>
223e32131aSZhang Lei #include <asm/cputype.h>
23e28cc025SJulien Thierry #include <asm/debug-monitors.h>
245003dbdeSGeoff Levand #include <asm/page.h>
257b7293aeSGeoff Levand #include <asm/pgtable-hwdef.h>
260be7320aSCatalin Marinas #include <asm/ptrace.h>
272a283070SWill Deacon #include <asm/thread_info.h>
280be7320aSCatalin Marinas 
294c4dcd35SArd Biesheuvel 	/*
304c4dcd35SArd Biesheuvel 	 * Provide a wxN alias for each wN register so what we can paste a xN
314c4dcd35SArd Biesheuvel 	 * reference after a 'w' to obtain the 32-bit version.
324c4dcd35SArd Biesheuvel 	 */
334c4dcd35SArd Biesheuvel 	.irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
344c4dcd35SArd Biesheuvel 	wx\n	.req	w\n
354c4dcd35SArd Biesheuvel 	.endr
364c4dcd35SArd Biesheuvel 
370fbeb318SJames Morse 	.macro disable_daif
380fbeb318SJames Morse 	msr	daifset, #0xf
390fbeb318SJames Morse 	.endm
400fbeb318SJames Morse 
410fbeb318SJames Morse 	.macro enable_daif
420fbeb318SJames Morse 	msr	daifclr, #0xf
430fbeb318SJames Morse 	.endm
440fbeb318SJames Morse 
450be7320aSCatalin Marinas /*
46a82785a9SJulien Thierry  * Save/restore interrupts.
470be7320aSCatalin Marinas  */
484b65a5dbSCatalin Marinas 	.macro	save_and_disable_irq, flags
494b65a5dbSCatalin Marinas 	mrs	\flags, daif
50f0098155SHector Martin 	msr	daifset, #3
514b65a5dbSCatalin Marinas 	.endm
524b65a5dbSCatalin Marinas 
534b65a5dbSCatalin Marinas 	.macro	restore_irq, flags
544b65a5dbSCatalin Marinas 	msr	daif, \flags
554b65a5dbSCatalin Marinas 	.endm
564b65a5dbSCatalin Marinas 
570be7320aSCatalin Marinas 	.macro	enable_dbg
580be7320aSCatalin Marinas 	msr	daifclr, #8
590be7320aSCatalin Marinas 	.endm
600be7320aSCatalin Marinas 
612a283070SWill Deacon 	.macro	disable_step_tsk, flgs, tmp
622a283070SWill Deacon 	tbz	\flgs, #TIF_SINGLESTEP, 9990f
630be7320aSCatalin Marinas 	mrs	\tmp, mdscr_el1
64e28cc025SJulien Thierry 	bic	\tmp, \tmp, #DBG_MDSCR_SS
650be7320aSCatalin Marinas 	msr	mdscr_el1, \tmp
662a283070SWill Deacon 	isb	// Synchronise with enable_dbg
672a283070SWill Deacon 9990:
680be7320aSCatalin Marinas 	.endm
690be7320aSCatalin Marinas 
7084d0fb1bSJames Morse 	/* call with daif masked */
712a283070SWill Deacon 	.macro	enable_step_tsk, flgs, tmp
722a283070SWill Deacon 	tbz	\flgs, #TIF_SINGLESTEP, 9990f
730be7320aSCatalin Marinas 	mrs	\tmp, mdscr_el1
74e28cc025SJulien Thierry 	orr	\tmp, \tmp, #DBG_MDSCR_SS
750be7320aSCatalin Marinas 	msr	mdscr_el1, \tmp
762a283070SWill Deacon 9990:
770be7320aSCatalin Marinas 	.endm
780be7320aSCatalin Marinas 
792a283070SWill Deacon /*
8068ddbf09SJames Morse  * RAS Error Synchronization barrier
8168ddbf09SJames Morse  */
8268ddbf09SJames Morse 	.macro  esb
832b68a2a9SJames Morse #ifdef CONFIG_ARM64_RAS_EXTN
8468ddbf09SJames Morse 	hint    #16
852b68a2a9SJames Morse #else
862b68a2a9SJames Morse 	nop
872b68a2a9SJames Morse #endif
8868ddbf09SJames Morse 	.endm
8968ddbf09SJames Morse 
9068ddbf09SJames Morse /*
91669474e7SWill Deacon  * Value prediction barrier
92669474e7SWill Deacon  */
93669474e7SWill Deacon 	.macro	csdb
94669474e7SWill Deacon 	hint	#20
95669474e7SWill Deacon 	.endm
96669474e7SWill Deacon 
97669474e7SWill Deacon /*
98228a26b9SJames Morse  * Clear Branch History instruction
99228a26b9SJames Morse  */
100228a26b9SJames Morse 	.macro clearbhb
101228a26b9SJames Morse 	hint	#22
102228a26b9SJames Morse 	.endm
103228a26b9SJames Morse 
104228a26b9SJames Morse /*
105bd4fb6d2SWill Deacon  * Speculation barrier
106bd4fb6d2SWill Deacon  */
107bd4fb6d2SWill Deacon 	.macro	sb
108bd4fb6d2SWill Deacon alternative_if_not ARM64_HAS_SB
109bd4fb6d2SWill Deacon 	dsb	nsh
110bd4fb6d2SWill Deacon 	isb
111bd4fb6d2SWill Deacon alternative_else
112bd4fb6d2SWill Deacon 	SB_BARRIER_INSN
113bd4fb6d2SWill Deacon 	nop
114bd4fb6d2SWill Deacon alternative_endif
115bd4fb6d2SWill Deacon 	.endm
116bd4fb6d2SWill Deacon 
117bd4fb6d2SWill Deacon /*
118f99a250cSWill Deacon  * NOP sequence
119f99a250cSWill Deacon  */
120f99a250cSWill Deacon 	.macro	nops, num
121f99a250cSWill Deacon 	.rept	\num
122f99a250cSWill Deacon 	nop
123f99a250cSWill Deacon 	.endr
124f99a250cSWill Deacon 	.endm
125f99a250cSWill Deacon 
126f99a250cSWill Deacon /*
1270be7320aSCatalin Marinas  * Register aliases.
1280be7320aSCatalin Marinas  */
1290be7320aSCatalin Marinas lr	.req	x30		// link register
130dc637f1fSMarc Zyngier 
131dc637f1fSMarc Zyngier /*
132dc637f1fSMarc Zyngier  * Vector entry
133dc637f1fSMarc Zyngier  */
134dc637f1fSMarc Zyngier 	 .macro	ventry	label
135dc637f1fSMarc Zyngier 	.align	7
136dc637f1fSMarc Zyngier 	b	\label
137dc637f1fSMarc Zyngier 	.endm
138e68bedaaSMatthew Leach 
139e68bedaaSMatthew Leach /*
140e68bedaaSMatthew Leach  * Select code when configured for BE.
141e68bedaaSMatthew Leach  */
142e68bedaaSMatthew Leach #ifdef CONFIG_CPU_BIG_ENDIAN
143e68bedaaSMatthew Leach #define CPU_BE(code...) code
144e68bedaaSMatthew Leach #else
145e68bedaaSMatthew Leach #define CPU_BE(code...)
146e68bedaaSMatthew Leach #endif
147e68bedaaSMatthew Leach 
148e68bedaaSMatthew Leach /*
149e68bedaaSMatthew Leach  * Select code when configured for LE.
150e68bedaaSMatthew Leach  */
151e68bedaaSMatthew Leach #ifdef CONFIG_CPU_BIG_ENDIAN
152e68bedaaSMatthew Leach #define CPU_LE(code...)
153e68bedaaSMatthew Leach #else
154e68bedaaSMatthew Leach #define CPU_LE(code...) code
155e68bedaaSMatthew Leach #endif
156e68bedaaSMatthew Leach 
15755b89540SMatthew Leach /*
15855b89540SMatthew Leach  * Define a macro that constructs a 64-bit value by concatenating two
15955b89540SMatthew Leach  * 32-bit registers. Note that on big endian systems the order of the
16055b89540SMatthew Leach  * registers is swapped.
16155b89540SMatthew Leach  */
16255b89540SMatthew Leach #ifndef CONFIG_CPU_BIG_ENDIAN
16355b89540SMatthew Leach 	.macro	regs_to_64, rd, lbits, hbits
16455b89540SMatthew Leach #else
16555b89540SMatthew Leach 	.macro	regs_to_64, rd, hbits, lbits
16655b89540SMatthew Leach #endif
16755b89540SMatthew Leach 	orr	\rd, \lbits, \hbits, lsl #32
16855b89540SMatthew Leach 	.endm
169f3e39273SMarc Zyngier 
170b784a5d9SArd Biesheuvel /*
171b784a5d9SArd Biesheuvel  * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
172350e1dadSArd Biesheuvel  * <symbol> is within the range +/- 4 GB of the PC.
173b784a5d9SArd Biesheuvel  */
174b784a5d9SArd Biesheuvel 	/*
175b784a5d9SArd Biesheuvel 	 * @dst: destination register (64 bit wide)
176b784a5d9SArd Biesheuvel 	 * @sym: name of the symbol
177b784a5d9SArd Biesheuvel 	 */
17841c066f2SArd Biesheuvel 	.macro	adr_l, dst, sym
179b784a5d9SArd Biesheuvel 	adrp	\dst, \sym
180b784a5d9SArd Biesheuvel 	add	\dst, \dst, :lo12:\sym
181b784a5d9SArd Biesheuvel 	.endm
182b784a5d9SArd Biesheuvel 
183b784a5d9SArd Biesheuvel 	/*
184b784a5d9SArd Biesheuvel 	 * @dst: destination register (32 or 64 bit wide)
185b784a5d9SArd Biesheuvel 	 * @sym: name of the symbol
186b784a5d9SArd Biesheuvel 	 * @tmp: optional 64-bit scratch register to be used if <dst> is a
187b784a5d9SArd Biesheuvel 	 *       32-bit wide register, in which case it cannot be used to hold
188b784a5d9SArd Biesheuvel 	 *       the address
189b784a5d9SArd Biesheuvel 	 */
190b784a5d9SArd Biesheuvel 	.macro	ldr_l, dst, sym, tmp=
191b784a5d9SArd Biesheuvel 	.ifb	\tmp
192b784a5d9SArd Biesheuvel 	adrp	\dst, \sym
193b784a5d9SArd Biesheuvel 	ldr	\dst, [\dst, :lo12:\sym]
194b784a5d9SArd Biesheuvel 	.else
195b784a5d9SArd Biesheuvel 	adrp	\tmp, \sym
196b784a5d9SArd Biesheuvel 	ldr	\dst, [\tmp, :lo12:\sym]
197b784a5d9SArd Biesheuvel 	.endif
198b784a5d9SArd Biesheuvel 	.endm
199b784a5d9SArd Biesheuvel 
200b784a5d9SArd Biesheuvel 	/*
201b784a5d9SArd Biesheuvel 	 * @src: source register (32 or 64 bit wide)
202b784a5d9SArd Biesheuvel 	 * @sym: name of the symbol
203b784a5d9SArd Biesheuvel 	 * @tmp: mandatory 64-bit scratch register to calculate the address
204b784a5d9SArd Biesheuvel 	 *       while <src> needs to be preserved.
205b784a5d9SArd Biesheuvel 	 */
206b784a5d9SArd Biesheuvel 	.macro	str_l, src, sym, tmp
207b784a5d9SArd Biesheuvel 	adrp	\tmp, \sym
208b784a5d9SArd Biesheuvel 	str	\src, [\tmp, :lo12:\sym]
209b784a5d9SArd Biesheuvel 	.endm
210b784a5d9SArd Biesheuvel 
211aa4d5d3cSJames Morse 	/*
212ea391027SDavid Brazdil 	 * @dst: destination register
213ea391027SDavid Brazdil 	 */
214ea391027SDavid Brazdil #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
215e176e267SMark Rutland 	.macro	get_this_cpu_offset, dst
216ea391027SDavid Brazdil 	mrs	\dst, tpidr_el2
217ea391027SDavid Brazdil 	.endm
218ea391027SDavid Brazdil #else
219e176e267SMark Rutland 	.macro	get_this_cpu_offset, dst
220ea391027SDavid Brazdil alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
221ea391027SDavid Brazdil 	mrs	\dst, tpidr_el1
222ea391027SDavid Brazdil alternative_else
223ea391027SDavid Brazdil 	mrs	\dst, tpidr_el2
224ea391027SDavid Brazdil alternative_endif
225ea391027SDavid Brazdil 	.endm
226e176e267SMark Rutland 
227e176e267SMark Rutland 	.macro	set_this_cpu_offset, src
228e176e267SMark Rutland alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
229e176e267SMark Rutland 	msr	tpidr_el1, \src
230e176e267SMark Rutland alternative_else
231e176e267SMark Rutland 	msr	tpidr_el2, \src
232e176e267SMark Rutland alternative_endif
233e176e267SMark Rutland 	.endm
234ea391027SDavid Brazdil #endif
235ea391027SDavid Brazdil 
236ea391027SDavid Brazdil 	/*
237350e1dadSArd Biesheuvel 	 * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
238aa4d5d3cSJames Morse 	 * @sym: The name of the per-cpu variable
239aa4d5d3cSJames Morse 	 * @tmp: scratch register
240aa4d5d3cSJames Morse 	 */
2411b7e2296SMark Rutland 	.macro adr_this_cpu, dst, sym, tmp
2428ea41b11SArd Biesheuvel 	adrp	\tmp, \sym
2438ea41b11SArd Biesheuvel 	add	\dst, \tmp, #:lo12:\sym
244e176e267SMark Rutland 	get_this_cpu_offset \tmp
2451b7e2296SMark Rutland 	add	\dst, \dst, \tmp
2461b7e2296SMark Rutland 	.endm
2471b7e2296SMark Rutland 
2481b7e2296SMark Rutland 	/*
2491b7e2296SMark Rutland 	 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
2501b7e2296SMark Rutland 	 * @sym: The name of the per-cpu variable
2511b7e2296SMark Rutland 	 * @tmp: scratch register
2521b7e2296SMark Rutland 	 */
2531b7e2296SMark Rutland 	.macro ldr_this_cpu dst, sym, tmp
2541b7e2296SMark Rutland 	adr_l	\dst, \sym
255e176e267SMark Rutland 	get_this_cpu_offset \tmp
2561b7e2296SMark Rutland 	ldr	\dst, [\dst, \tmp]
257aa4d5d3cSJames Morse 	.endm
258aa4d5d3cSJames Morse 
25920791846SArd Biesheuvel /*
2607b7293aeSGeoff Levand  * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
2617b7293aeSGeoff Levand  */
2627b7293aeSGeoff Levand 	.macro	vma_vm_mm, rd, rn
2637b7293aeSGeoff Levand 	ldr	\rd, [\rn, #VMA_VM_MM]
2647b7293aeSGeoff Levand 	.endm
2657b7293aeSGeoff Levand 
2667b7293aeSGeoff Levand /*
267880f7cc4SWill Deacon  * read_ctr - read CTR_EL0. If the system has mismatched register fields,
268880f7cc4SWill Deacon  * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
269116c81f4SSuzuki K Poulose  */
270116c81f4SSuzuki K Poulose 	.macro	read_ctr, reg
271755db234SMarc Zyngier #ifndef __KVM_NVHE_HYPERVISOR__
272880f7cc4SWill Deacon alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
273116c81f4SSuzuki K Poulose 	mrs	\reg, ctr_el0			// read CTR
274116c81f4SSuzuki K Poulose 	nop
275116c81f4SSuzuki K Poulose alternative_else
276116c81f4SSuzuki K Poulose 	ldr_l	\reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
277116c81f4SSuzuki K Poulose alternative_endif
278755db234SMarc Zyngier #else
279755db234SMarc Zyngier alternative_if_not ARM64_KVM_PROTECTED_MODE
280755db234SMarc Zyngier 	ASM_BUG()
281755db234SMarc Zyngier alternative_else_nop_endif
2824c0bd995SMark Rutland alternative_cb ARM64_ALWAYS_SYSTEM, kvm_compute_final_ctr_el0
283755db234SMarc Zyngier 	movz	\reg, #0
284755db234SMarc Zyngier 	movk	\reg, #0, lsl #16
285755db234SMarc Zyngier 	movk	\reg, #0, lsl #32
286755db234SMarc Zyngier 	movk	\reg, #0, lsl #48
287755db234SMarc Zyngier alternative_cb_end
288755db234SMarc Zyngier #endif
289116c81f4SSuzuki K Poulose 	.endm
290116c81f4SSuzuki K Poulose 
2917b7293aeSGeoff Levand 
2927b7293aeSGeoff Levand /*
293072f0a63SSuzuki K Poulose  * raw_dcache_line_size - get the minimum D-cache line size on this CPU
294072f0a63SSuzuki K Poulose  * from the CTR register.
2957b7293aeSGeoff Levand  */
296072f0a63SSuzuki K Poulose 	.macro	raw_dcache_line_size, reg, tmp
2977b7293aeSGeoff Levand 	mrs	\tmp, ctr_el0			// read CTR
2987b7293aeSGeoff Levand 	ubfm	\tmp, \tmp, #16, #19		// cache line size encoding
2997b7293aeSGeoff Levand 	mov	\reg, #4			// bytes per word
3007b7293aeSGeoff Levand 	lsl	\reg, \reg, \tmp		// actual cache line size
3017b7293aeSGeoff Levand 	.endm
3027b7293aeSGeoff Levand 
3037b7293aeSGeoff Levand /*
304072f0a63SSuzuki K Poulose  * dcache_line_size - get the safe D-cache line size across all CPUs
3057b7293aeSGeoff Levand  */
306072f0a63SSuzuki K Poulose 	.macro	dcache_line_size, reg, tmp
307116c81f4SSuzuki K Poulose 	read_ctr	\tmp
308116c81f4SSuzuki K Poulose 	ubfm		\tmp, \tmp, #16, #19	// cache line size encoding
309116c81f4SSuzuki K Poulose 	mov		\reg, #4		// bytes per word
310116c81f4SSuzuki K Poulose 	lsl		\reg, \reg, \tmp	// actual cache line size
311072f0a63SSuzuki K Poulose 	.endm
312072f0a63SSuzuki K Poulose 
313072f0a63SSuzuki K Poulose /*
314072f0a63SSuzuki K Poulose  * raw_icache_line_size - get the minimum I-cache line size on this CPU
315072f0a63SSuzuki K Poulose  * from the CTR register.
316072f0a63SSuzuki K Poulose  */
317072f0a63SSuzuki K Poulose 	.macro	raw_icache_line_size, reg, tmp
3187b7293aeSGeoff Levand 	mrs	\tmp, ctr_el0			// read CTR
3197b7293aeSGeoff Levand 	and	\tmp, \tmp, #0xf		// cache line size encoding
3207b7293aeSGeoff Levand 	mov	\reg, #4			// bytes per word
3217b7293aeSGeoff Levand 	lsl	\reg, \reg, \tmp		// actual cache line size
3227b7293aeSGeoff Levand 	.endm
3237b7293aeSGeoff Levand 
3247b7293aeSGeoff Levand /*
325072f0a63SSuzuki K Poulose  * icache_line_size - get the safe I-cache line size across all CPUs
326072f0a63SSuzuki K Poulose  */
327072f0a63SSuzuki K Poulose 	.macro	icache_line_size, reg, tmp
328116c81f4SSuzuki K Poulose 	read_ctr	\tmp
329116c81f4SSuzuki K Poulose 	and		\tmp, \tmp, #0xf	// cache line size encoding
330116c81f4SSuzuki K Poulose 	mov		\reg, #4		// bytes per word
331116c81f4SSuzuki K Poulose 	lsl		\reg, \reg, \tmp	// actual cache line size
332072f0a63SSuzuki K Poulose 	.endm
333072f0a63SSuzuki K Poulose 
334072f0a63SSuzuki K Poulose /*
33567e7fdfcSSteve Capper  * tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map
3367b7293aeSGeoff Levand  */
33767e7fdfcSSteve Capper 	.macro	tcr_set_t0sz, valreg, t0sz
33867e7fdfcSSteve Capper 	bfi	\valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
3397b7293aeSGeoff Levand 	.endm
3407b7293aeSGeoff Levand 
3417b7293aeSGeoff Levand /*
342b6d00d47SSteve Capper  * tcr_set_t1sz - update TCR.T1SZ
343b6d00d47SSteve Capper  */
344b6d00d47SSteve Capper 	.macro	tcr_set_t1sz, valreg, t1sz
345b6d00d47SSteve Capper 	bfi	\valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH
346b6d00d47SSteve Capper 	.endm
347b6d00d47SSteve Capper 
348b6d00d47SSteve Capper /*
349e8d13cceSArd Biesheuvel  * idmap_get_t0sz - get the T0SZ value needed to cover the ID map
350e8d13cceSArd Biesheuvel  *
351e8d13cceSArd Biesheuvel  * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
352e8d13cceSArd Biesheuvel  * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
353e8d13cceSArd Biesheuvel  * this number conveniently equals the number of leading zeroes in
354e8d13cceSArd Biesheuvel  * the physical address of _end.
355e8d13cceSArd Biesheuvel  */
356e8d13cceSArd Biesheuvel 	.macro	idmap_get_t0sz, reg
357e8d13cceSArd Biesheuvel 	adrp	\reg, _end
358e8d13cceSArd Biesheuvel 	orr	\reg, \reg, #(1 << VA_BITS_MIN) - 1
359e8d13cceSArd Biesheuvel 	clz	\reg, \reg
360e8d13cceSArd Biesheuvel 	.endm
361e8d13cceSArd Biesheuvel 
362e8d13cceSArd Biesheuvel /*
363787fd1d0SKristina Martsenko  * tcr_compute_pa_size - set TCR.(I)PS to the highest supported
364787fd1d0SKristina Martsenko  * ID_AA64MMFR0_EL1.PARange value
365787fd1d0SKristina Martsenko  *
366787fd1d0SKristina Martsenko  *	tcr:		register with the TCR_ELx value to be updated
36739610a68SKristina Martsenko  *	pos:		IPS or PS bitfield position
368787fd1d0SKristina Martsenko  *	tmp{0,1}:	temporary registers
369787fd1d0SKristina Martsenko  */
370787fd1d0SKristina Martsenko 	.macro	tcr_compute_pa_size, tcr, pos, tmp0, tmp1
371787fd1d0SKristina Martsenko 	mrs	\tmp0, ID_AA64MMFR0_EL1
372787fd1d0SKristina Martsenko 	// Narrow PARange to fit the PS field in TCR_ELx
3732d987e64SMark Brown 	ubfx	\tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3
3742d987e64SMark Brown 	mov	\tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX
375787fd1d0SKristina Martsenko 	cmp	\tmp0, \tmp1
376787fd1d0SKristina Martsenko 	csel	\tmp0, \tmp1, \tmp0, hi
377787fd1d0SKristina Martsenko 	bfi	\tcr, \tmp0, \pos, #3
3787b7293aeSGeoff Levand 	.endm
3797b7293aeSGeoff Levand 
38006b7a568SFuad Tabba 	.macro __dcache_op_workaround_clean_cache, op, addr
38133309ecdSWill Deacon alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
38206b7a568SFuad Tabba 	dc	\op, \addr
38333309ecdSWill Deacon alternative_else
38406b7a568SFuad Tabba 	dc	civac, \addr
38533309ecdSWill Deacon alternative_endif
38633309ecdSWill Deacon 	.endm
38733309ecdSWill Deacon 
3887b7293aeSGeoff Levand /*
3897b7293aeSGeoff Levand  * Macro to perform a data cache maintenance for the interval
3903036ec59SPasha Tatashin  * [start, end) with dcache line size explicitly provided.
3917b7293aeSGeoff Levand  *
3927b7293aeSGeoff Levand  * 	op:		operation passed to dc instruction
3937b7293aeSGeoff Levand  * 	domain:		domain used in dsb instruciton
394163d3f80SFuad Tabba  * 	start:          starting virtual address of the region
395163d3f80SFuad Tabba  * 	end:            end virtual address of the region
3963036ec59SPasha Tatashin  *	linesz:		dcache line size
397d11b1877SMark Rutland  * 	fixup:		optional label to branch to on user fault
3983036ec59SPasha Tatashin  * 	Corrupts:       start, end, tmp
3997b7293aeSGeoff Levand  */
4003036ec59SPasha Tatashin 	.macro dcache_by_myline_op op, domain, start, end, linesz, tmp, fixup
4013036ec59SPasha Tatashin 	sub	\tmp, \linesz, #1
4023036ec59SPasha Tatashin 	bic	\start, \start, \tmp
403d11b1877SMark Rutland .Ldcache_op\@:
40433309ecdSWill Deacon 	.ifc	\op, cvau
405163d3f80SFuad Tabba 	__dcache_op_workaround_clean_cache \op, \start
40633309ecdSWill Deacon 	.else
40733309ecdSWill Deacon 	.ifc	\op, cvac
408163d3f80SFuad Tabba 	__dcache_op_workaround_clean_cache \op, \start
40933309ecdSWill Deacon 	.else
41033309ecdSWill Deacon 	.ifc	\op, cvap
411163d3f80SFuad Tabba 	sys	3, c7, c12, 1, \start	// dc cvap
412823066d9SAndre Przywara 	.else
41304a1438eSAndrew Murray 	.ifc	\op, cvadp
414163d3f80SFuad Tabba 	sys	3, c7, c13, 1, \start	// dc cvadp
41504a1438eSAndrew Murray 	.else
416163d3f80SFuad Tabba 	dc	\op, \start
417823066d9SAndre Przywara 	.endif
41833309ecdSWill Deacon 	.endif
41933309ecdSWill Deacon 	.endif
42004a1438eSAndrew Murray 	.endif
4213036ec59SPasha Tatashin 	add	\start, \start, \linesz
422163d3f80SFuad Tabba 	cmp	\start, \end
423d11b1877SMark Rutland 	b.lo	.Ldcache_op\@
4247b7293aeSGeoff Levand 	dsb	\domain
425d11b1877SMark Rutland 
426e4208e80STong Tiangen 	_cond_uaccess_extable .Ldcache_op\@, \fixup
4277b7293aeSGeoff Levand 	.endm
4287b7293aeSGeoff Levand 
4297b7293aeSGeoff Levand /*
4303036ec59SPasha Tatashin  * Macro to perform a data cache maintenance for the interval
4313036ec59SPasha Tatashin  * [start, end)
4323036ec59SPasha Tatashin  *
4333036ec59SPasha Tatashin  * 	op:		operation passed to dc instruction
4343036ec59SPasha Tatashin  * 	domain:		domain used in dsb instruciton
4353036ec59SPasha Tatashin  * 	start:          starting virtual address of the region
4363036ec59SPasha Tatashin  * 	end:            end virtual address of the region
4373036ec59SPasha Tatashin  * 	fixup:		optional label to branch to on user fault
4383036ec59SPasha Tatashin  * 	Corrupts:       start, end, tmp1, tmp2
4393036ec59SPasha Tatashin  */
4403036ec59SPasha Tatashin 	.macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup
4413036ec59SPasha Tatashin 	dcache_line_size \tmp1, \tmp2
4423036ec59SPasha Tatashin 	dcache_by_myline_op \op, \domain, \start, \end, \tmp1, \tmp2, \fixup
4433036ec59SPasha Tatashin 	.endm
4443036ec59SPasha Tatashin 
4453036ec59SPasha Tatashin /*
4464fee9473SMarc Zyngier  * Macro to perform an instruction cache maintenance for the interval
4474fee9473SMarc Zyngier  * [start, end)
4484fee9473SMarc Zyngier  *
4494fee9473SMarc Zyngier  * 	start, end:	virtual addresses describing the region
450d11b1877SMark Rutland  *	fixup:		optional label to branch to on user fault
4514fee9473SMarc Zyngier  * 	Corrupts:	tmp1, tmp2
4524fee9473SMarc Zyngier  */
453d11b1877SMark Rutland 	.macro invalidate_icache_by_line start, end, tmp1, tmp2, fixup
4544fee9473SMarc Zyngier 	icache_line_size \tmp1, \tmp2
4554fee9473SMarc Zyngier 	sub	\tmp2, \tmp1, #1
4564fee9473SMarc Zyngier 	bic	\tmp2, \start, \tmp2
457d11b1877SMark Rutland .Licache_op\@:
458d11b1877SMark Rutland 	ic	ivau, \tmp2			// invalidate I line PoU
4594fee9473SMarc Zyngier 	add	\tmp2, \tmp2, \tmp1
4604fee9473SMarc Zyngier 	cmp	\tmp2, \end
461d11b1877SMark Rutland 	b.lo	.Licache_op\@
4624fee9473SMarc Zyngier 	dsb	ish
4634fee9473SMarc Zyngier 	isb
464d11b1877SMark Rutland 
465e4208e80STong Tiangen 	_cond_uaccess_extable .Licache_op\@, \fixup
4664fee9473SMarc Zyngier 	.endm
4674fee9473SMarc Zyngier 
4684fee9473SMarc Zyngier /*
469c0be8f18SArd Biesheuvel  * load_ttbr1 - install @pgtbl as a TTBR1 page table
470c0be8f18SArd Biesheuvel  * pgtbl preserved
471c0be8f18SArd Biesheuvel  * tmp1/tmp2 clobbered, either may overlap with pgtbl
472c0be8f18SArd Biesheuvel  */
473c0be8f18SArd Biesheuvel 	.macro		load_ttbr1, pgtbl, tmp1, tmp2
474c0be8f18SArd Biesheuvel 	phys_to_ttbr	\tmp1, \pgtbl
475c0be8f18SArd Biesheuvel 	offset_ttbr1 	\tmp1, \tmp2
476c0be8f18SArd Biesheuvel 	msr		ttbr1_el1, \tmp1
477c0be8f18SArd Biesheuvel 	isb
478c0be8f18SArd Biesheuvel 	.endm
479c0be8f18SArd Biesheuvel 
480c0be8f18SArd Biesheuvel /*
4813744b528SPasha Tatashin  * To prevent the possibility of old and new partial table walks being visible
4823744b528SPasha Tatashin  * in the tlb, switch the ttbr to a zero page when we invalidate the old
4833744b528SPasha Tatashin  * records. D4.7.1 'General TLB maintenance requirements' in ARM DDI 0487A.i
4843744b528SPasha Tatashin  * Even switching to our copied tables will cause a changed output address at
4853744b528SPasha Tatashin  * each stage of the walk.
4863744b528SPasha Tatashin  */
4873744b528SPasha Tatashin 	.macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2
4883744b528SPasha Tatashin 	phys_to_ttbr \tmp, \zero_page
4893744b528SPasha Tatashin 	msr	ttbr1_el1, \tmp
4903744b528SPasha Tatashin 	isb
4913744b528SPasha Tatashin 	tlbi	vmalle1
4923744b528SPasha Tatashin 	dsb	nsh
493c0be8f18SArd Biesheuvel 	load_ttbr1 \page_table, \tmp, \tmp2
4943744b528SPasha Tatashin 	.endm
4953744b528SPasha Tatashin 
4963744b528SPasha Tatashin /*
4977b7293aeSGeoff Levand  * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
4987b7293aeSGeoff Levand  */
4997b7293aeSGeoff Levand 	.macro	reset_pmuserenr_el0, tmpreg
500f6e56435SAlexandru Elisei 	mrs	\tmpreg, id_aa64dfr0_el1
501fcf37b38SMark Brown 	sbfx	\tmpreg, \tmpreg, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
5027b7293aeSGeoff Levand 	cmp	\tmpreg, #1			// Skip if no PMU present
5037b7293aeSGeoff Levand 	b.lt	9000f
5047b7293aeSGeoff Levand 	msr	pmuserenr_el0, xzr		// Disable PMU access from EL0
5057b7293aeSGeoff Levand 9000:
5067b7293aeSGeoff Levand 	.endm
5077b7293aeSGeoff Levand 
5087b7293aeSGeoff Levand /*
50987a1f063SIonela Voinescu  * reset_amuserenr_el0 - reset AMUSERENR_EL0 if AMUv1 present
51087a1f063SIonela Voinescu  */
51187a1f063SIonela Voinescu 	.macro	reset_amuserenr_el0, tmpreg
51287a1f063SIonela Voinescu 	mrs	\tmpreg, id_aa64pfr0_el1	// Check ID_AA64PFR0_EL1
51355adc08dSMark Brown 	ubfx	\tmpreg, \tmpreg, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
51487a1f063SIonela Voinescu 	cbz	\tmpreg, .Lskip_\@		// Skip if no AMU present
51587a1f063SIonela Voinescu 	msr_s	SYS_AMUSERENR_EL0, xzr		// Disable AMU access from EL0
51687a1f063SIonela Voinescu .Lskip_\@:
51787a1f063SIonela Voinescu 	.endm
51887a1f063SIonela Voinescu /*
5195003dbdeSGeoff Levand  * copy_page - copy src to dest using temp registers t1-t8
5205003dbdeSGeoff Levand  */
5215003dbdeSGeoff Levand 	.macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
5225003dbdeSGeoff Levand 9998:	ldp	\t1, \t2, [\src]
5235003dbdeSGeoff Levand 	ldp	\t3, \t4, [\src, #16]
5245003dbdeSGeoff Levand 	ldp	\t5, \t6, [\src, #32]
5255003dbdeSGeoff Levand 	ldp	\t7, \t8, [\src, #48]
5265003dbdeSGeoff Levand 	add	\src, \src, #64
5275003dbdeSGeoff Levand 	stnp	\t1, \t2, [\dest]
5285003dbdeSGeoff Levand 	stnp	\t3, \t4, [\dest, #16]
5295003dbdeSGeoff Levand 	stnp	\t5, \t6, [\dest, #32]
5305003dbdeSGeoff Levand 	stnp	\t7, \t8, [\dest, #48]
5315003dbdeSGeoff Levand 	add	\dest, \dest, #64
5325003dbdeSGeoff Levand 	tst	\src, #(PAGE_SIZE - 1)
5335003dbdeSGeoff Levand 	b.ne	9998b
5345003dbdeSGeoff Levand 	.endm
5355003dbdeSGeoff Levand 
5365003dbdeSGeoff Levand /*
537ed84b4e9SMark Rutland  * Annotate a function as being unsuitable for kprobes.
538ed84b4e9SMark Rutland  */
539ed84b4e9SMark Rutland #ifdef CONFIG_KPROBES
540ed84b4e9SMark Rutland #define NOKPROBE(x)				\
541ed84b4e9SMark Rutland 	.pushsection "_kprobe_blacklist", "aw";	\
542ed84b4e9SMark Rutland 	.quad	x;				\
543ed84b4e9SMark Rutland 	.popsection;
544ed84b4e9SMark Rutland #else
545ed84b4e9SMark Rutland #define NOKPROBE(x)
546ed84b4e9SMark Rutland #endif
547386b3c7bSMark Rutland 
5480fea6e9aSAndrey Konovalov #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
549386b3c7bSMark Rutland #define EXPORT_SYMBOL_NOKASAN(name)
550386b3c7bSMark Rutland #else
551386b3c7bSMark Rutland #define EXPORT_SYMBOL_NOKASAN(name)	EXPORT_SYMBOL(name)
552386b3c7bSMark Rutland #endif
553386b3c7bSMark Rutland 
5546ad1fe5dSArd Biesheuvel 	/*
5556ad1fe5dSArd Biesheuvel 	 * Emit a 64-bit absolute little endian symbol reference in a way that
5566ad1fe5dSArd Biesheuvel 	 * ensures that it will be resolved at build time, even when building a
5576ad1fe5dSArd Biesheuvel 	 * PIE binary. This requires cooperation from the linker script, which
5586ad1fe5dSArd Biesheuvel 	 * must emit the lo32/hi32 halves individually.
5596ad1fe5dSArd Biesheuvel 	 */
5606ad1fe5dSArd Biesheuvel 	.macro	le64sym, sym
5616ad1fe5dSArd Biesheuvel 	.long	\sym\()_lo32
5626ad1fe5dSArd Biesheuvel 	.long	\sym\()_hi32
5636ad1fe5dSArd Biesheuvel 	.endm
5646ad1fe5dSArd Biesheuvel 
56530b5ba5cSArd Biesheuvel 	/*
56630b5ba5cSArd Biesheuvel 	 * mov_q - move an immediate constant into a 64-bit register using
56730b5ba5cSArd Biesheuvel 	 *         between 2 and 4 movz/movk instructions (depending on the
56830b5ba5cSArd Biesheuvel 	 *         magnitude and sign of the operand)
56930b5ba5cSArd Biesheuvel 	 */
57030b5ba5cSArd Biesheuvel 	.macro	mov_q, reg, val
57130b5ba5cSArd Biesheuvel 	.if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
57230b5ba5cSArd Biesheuvel 	movz	\reg, :abs_g1_s:\val
57330b5ba5cSArd Biesheuvel 	.else
57430b5ba5cSArd Biesheuvel 	.if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
57530b5ba5cSArd Biesheuvel 	movz	\reg, :abs_g2_s:\val
57630b5ba5cSArd Biesheuvel 	.else
57730b5ba5cSArd Biesheuvel 	movz	\reg, :abs_g3:\val
57830b5ba5cSArd Biesheuvel 	movk	\reg, :abs_g2_nc:\val
57930b5ba5cSArd Biesheuvel 	.endif
58030b5ba5cSArd Biesheuvel 	movk	\reg, :abs_g1_nc:\val
58130b5ba5cSArd Biesheuvel 	.endif
58230b5ba5cSArd Biesheuvel 	movk	\reg, :abs_g0_nc:\val
58330b5ba5cSArd Biesheuvel 	.endm
58430b5ba5cSArd Biesheuvel 
585f33bcf03SCatalin Marinas /*
5864caf8758SJulien Thierry  * Return the current task_struct.
5874b65a5dbSCatalin Marinas  */
5884caf8758SJulien Thierry 	.macro	get_current_task, rd
5894b65a5dbSCatalin Marinas 	mrs	\rd, sp_el0
5904b65a5dbSCatalin Marinas 	.endm
5914b65a5dbSCatalin Marinas 
5924b65a5dbSCatalin Marinas /*
593e842dfb5SSteve Capper  * Offset ttbr1 to allow for 48-bit kernel VAs set with 52-bit PTRS_PER_PGD.
594e842dfb5SSteve Capper  * orr is used as it can cover the immediate value (and is idempotent).
595e842dfb5SSteve Capper  * In future this may be nop'ed out when dealing with 52-bit kernel VAs.
596e842dfb5SSteve Capper  * 	ttbr: Value of ttbr to set, modified.
597e842dfb5SSteve Capper  */
598c812026cSSteve Capper 	.macro	offset_ttbr1, ttbr, tmp
599c812026cSSteve Capper #ifdef CONFIG_ARM64_VA_BITS_52
600c812026cSSteve Capper 	mrs_s	\tmp, SYS_ID_AA64MMFR2_EL1
6018f40badeSMark Brown 	and	\tmp, \tmp, #(0xf << ID_AA64MMFR2_EL1_VARange_SHIFT)
602c812026cSSteve Capper 	cbnz	\tmp, .Lskipoffs_\@
603e842dfb5SSteve Capper 	orr	\ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
604c812026cSSteve Capper .Lskipoffs_\@ :
605e842dfb5SSteve Capper #endif
606e842dfb5SSteve Capper 	.endm
607e842dfb5SSteve Capper 
608e842dfb5SSteve Capper /*
609529c4b05SKristina Martsenko  * Arrange a physical address in a TTBR register, taking care of 52-bit
610529c4b05SKristina Martsenko  * addresses.
611529c4b05SKristina Martsenko  *
612529c4b05SKristina Martsenko  * 	phys:	physical address, preserved
613529c4b05SKristina Martsenko  * 	ttbr:	returns the TTBR value
614529c4b05SKristina Martsenko  */
615fa0465fcSWill Deacon 	.macro	phys_to_ttbr, ttbr, phys
616529c4b05SKristina Martsenko #ifdef CONFIG_ARM64_PA_BITS_52
617529c4b05SKristina Martsenko 	orr	\ttbr, \phys, \phys, lsr #46
618529c4b05SKristina Martsenko 	and	\ttbr, \ttbr, #TTBR_BADDR_MASK_52
619529c4b05SKristina Martsenko #else
620529c4b05SKristina Martsenko 	mov	\ttbr, \phys
621529c4b05SKristina Martsenko #endif
622529c4b05SKristina Martsenko 	.endm
623529c4b05SKristina Martsenko 
62479ddab3bSWill Deacon 	.macro	phys_to_pte, pte, phys
62579ddab3bSWill Deacon #ifdef CONFIG_ARM64_PA_BITS_52
62679ddab3bSWill Deacon 	/*
62779ddab3bSWill Deacon 	 * We assume \phys is 64K aligned and this is guaranteed by only
62879ddab3bSWill Deacon 	 * supporting this configuration with 64K pages.
62979ddab3bSWill Deacon 	 */
63079ddab3bSWill Deacon 	orr	\pte, \phys, \phys, lsr #36
63179ddab3bSWill Deacon 	and	\pte, \pte, #PTE_ADDR_MASK
63279ddab3bSWill Deacon #else
63379ddab3bSWill Deacon 	mov	\pte, \phys
63479ddab3bSWill Deacon #endif
63579ddab3bSWill Deacon 	.endm
63679ddab3bSWill Deacon 
637f992b4dfSWill Deacon 	.macro	pte_to_phys, phys, pte
638f992b4dfSWill Deacon 	and	\phys, \pte, #PTE_ADDR_MASK
639*a4ee2861SAnshuman Khandual #ifdef CONFIG_ARM64_PA_BITS_52
640*a4ee2861SAnshuman Khandual 	orr	\phys, \phys, \phys, lsl #PTE_ADDR_HIGH_SHIFT
641*a4ee2861SAnshuman Khandual 	and	\phys, \phys, GENMASK_ULL(PHYS_MASK_SHIFT - 1, PAGE_SHIFT)
642f992b4dfSWill Deacon #endif
643f992b4dfSWill Deacon 	.endm
644f992b4dfSWill Deacon 
6453e32131aSZhang Lei /*
6463e32131aSZhang Lei  * tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU.
6473e32131aSZhang Lei  */
6483e32131aSZhang Lei 	.macro	tcr_clear_errata_bits, tcr, tmp1, tmp2
6493e32131aSZhang Lei #ifdef CONFIG_FUJITSU_ERRATUM_010001
6503e32131aSZhang Lei 	mrs	\tmp1, midr_el1
6513e32131aSZhang Lei 
6523e32131aSZhang Lei 	mov_q	\tmp2, MIDR_FUJITSU_ERRATUM_010001_MASK
6533e32131aSZhang Lei 	and	\tmp1, \tmp1, \tmp2
6543e32131aSZhang Lei 	mov_q	\tmp2, MIDR_FUJITSU_ERRATUM_010001
6553e32131aSZhang Lei 	cmp	\tmp1, \tmp2
6563e32131aSZhang Lei 	b.ne	10f
6573e32131aSZhang Lei 
6583e32131aSZhang Lei 	mov_q	\tmp2, TCR_CLEAR_FUJITSU_ERRATUM_010001
6593e32131aSZhang Lei 	bic	\tcr, \tcr, \tmp2
6603e32131aSZhang Lei 10:
6613e32131aSZhang Lei #endif /* CONFIG_FUJITSU_ERRATUM_010001 */
6623e32131aSZhang Lei 	.endm
6633e32131aSZhang Lei 
6643060e9f0SShanker Donthineni /**
6653060e9f0SShanker Donthineni  * Errata workaround prior to disable MMU. Insert an ISB immediately prior
6663060e9f0SShanker Donthineni  * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
6673060e9f0SShanker Donthineni  */
6683060e9f0SShanker Donthineni 	.macro pre_disable_mmu_workaround
6693060e9f0SShanker Donthineni #ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
6703060e9f0SShanker Donthineni 	isb
6713060e9f0SShanker Donthineni #endif
6723060e9f0SShanker Donthineni 	.endm
6733060e9f0SShanker Donthineni 
6740f468e22SArd Biesheuvel 	/*
6750f468e22SArd Biesheuvel 	 * frame_push - Push @regcount callee saved registers to the stack,
6760f468e22SArd Biesheuvel 	 *              starting at x19, as well as x29/x30, and set x29 to
6770f468e22SArd Biesheuvel 	 *              the new value of sp. Add @extra bytes of stack space
6780f468e22SArd Biesheuvel 	 *              for locals.
6790f468e22SArd Biesheuvel 	 */
6800f468e22SArd Biesheuvel 	.macro		frame_push, regcount:req, extra
6810f468e22SArd Biesheuvel 	__frame		st, \regcount, \extra
6820f468e22SArd Biesheuvel 	.endm
6830f468e22SArd Biesheuvel 
6840f468e22SArd Biesheuvel 	/*
6850f468e22SArd Biesheuvel 	 * frame_pop  - Pop the callee saved registers from the stack that were
6860f468e22SArd Biesheuvel 	 *              pushed in the most recent call to frame_push, as well
6870f468e22SArd Biesheuvel 	 *              as x29/x30 and any extra stack space that may have been
6880f468e22SArd Biesheuvel 	 *              allocated.
6890f468e22SArd Biesheuvel 	 */
6900f468e22SArd Biesheuvel 	.macro		frame_pop
6910f468e22SArd Biesheuvel 	__frame		ld
6920f468e22SArd Biesheuvel 	.endm
6930f468e22SArd Biesheuvel 
6940f468e22SArd Biesheuvel 	.macro		__frame_regs, reg1, reg2, op, num
6950f468e22SArd Biesheuvel 	.if		.Lframe_regcount == \num
6960f468e22SArd Biesheuvel 	\op\()r		\reg1, [sp, #(\num + 1) * 8]
6970f468e22SArd Biesheuvel 	.elseif		.Lframe_regcount > \num
6980f468e22SArd Biesheuvel 	\op\()p		\reg1, \reg2, [sp, #(\num + 1) * 8]
6990f468e22SArd Biesheuvel 	.endif
7000f468e22SArd Biesheuvel 	.endm
7010f468e22SArd Biesheuvel 
7020f468e22SArd Biesheuvel 	.macro		__frame, op, regcount, extra=0
7030f468e22SArd Biesheuvel 	.ifc		\op, st
7040f468e22SArd Biesheuvel 	.if		(\regcount) < 0 || (\regcount) > 10
7050f468e22SArd Biesheuvel 	.error		"regcount should be in the range [0 ... 10]"
7060f468e22SArd Biesheuvel 	.endif
7070f468e22SArd Biesheuvel 	.if		((\extra) % 16) != 0
7080f468e22SArd Biesheuvel 	.error		"extra should be a multiple of 16 bytes"
7090f468e22SArd Biesheuvel 	.endif
7100f468e22SArd Biesheuvel 	.ifdef		.Lframe_regcount
7110f468e22SArd Biesheuvel 	.if		.Lframe_regcount != -1
7120f468e22SArd Biesheuvel 	.error		"frame_push/frame_pop may not be nested"
7130f468e22SArd Biesheuvel 	.endif
7140f468e22SArd Biesheuvel 	.endif
7150f468e22SArd Biesheuvel 	.set		.Lframe_regcount, \regcount
7160f468e22SArd Biesheuvel 	.set		.Lframe_extra, \extra
7170f468e22SArd Biesheuvel 	.set		.Lframe_local_offset, ((\regcount + 3) / 2) * 16
7180f468e22SArd Biesheuvel 	stp		x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]!
7190f468e22SArd Biesheuvel 	mov		x29, sp
7200f468e22SArd Biesheuvel 	.endif
7210f468e22SArd Biesheuvel 
7220f468e22SArd Biesheuvel 	__frame_regs	x19, x20, \op, 1
7230f468e22SArd Biesheuvel 	__frame_regs	x21, x22, \op, 3
7240f468e22SArd Biesheuvel 	__frame_regs	x23, x24, \op, 5
7250f468e22SArd Biesheuvel 	__frame_regs	x25, x26, \op, 7
7260f468e22SArd Biesheuvel 	__frame_regs	x27, x28, \op, 9
7270f468e22SArd Biesheuvel 
7280f468e22SArd Biesheuvel 	.ifc		\op, ld
7290f468e22SArd Biesheuvel 	.if		.Lframe_regcount == -1
7300f468e22SArd Biesheuvel 	.error		"frame_push/frame_pop may not be nested"
7310f468e22SArd Biesheuvel 	.endif
7320f468e22SArd Biesheuvel 	ldp		x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra
7330f468e22SArd Biesheuvel 	.set		.Lframe_regcount, -1
7340f468e22SArd Biesheuvel 	.endif
7350f468e22SArd Biesheuvel 	.endm
7360f468e22SArd Biesheuvel 
73724534b35SArd Biesheuvel /*
7388f4de66eSQuentin Perret  * Set SCTLR_ELx to the @reg value, and invalidate the local icache
7398cc8a324SMarc Zyngier  * in the process. This is called when setting the MMU on.
7408cc8a324SMarc Zyngier  */
7418f4de66eSQuentin Perret .macro set_sctlr, sreg, reg
7428f4de66eSQuentin Perret 	msr	\sreg, \reg
7438cc8a324SMarc Zyngier 	isb
7448cc8a324SMarc Zyngier 	/*
7458cc8a324SMarc Zyngier 	 * Invalidate the local I-cache so that any instructions fetched
7468cc8a324SMarc Zyngier 	 * speculatively from the PoC are discarded, since they may have
7478cc8a324SMarc Zyngier 	 * been dynamically patched at the PoU.
7488cc8a324SMarc Zyngier 	 */
7498cc8a324SMarc Zyngier 	ic	iallu
7508cc8a324SMarc Zyngier 	dsb	nsh
7518cc8a324SMarc Zyngier 	isb
7528cc8a324SMarc Zyngier .endm
7538cc8a324SMarc Zyngier 
7548f4de66eSQuentin Perret .macro set_sctlr_el1, reg
7558f4de66eSQuentin Perret 	set_sctlr sctlr_el1, \reg
7568f4de66eSQuentin Perret .endm
7578f4de66eSQuentin Perret 
7588f4de66eSQuentin Perret .macro set_sctlr_el2, reg
7598f4de66eSQuentin Perret 	set_sctlr sctlr_el2, \reg
7608f4de66eSQuentin Perret .endm
7618f4de66eSQuentin Perret 
7628cc8a324SMarc Zyngier 	/*
76313150149SArd Biesheuvel 	 * Check whether preempt/bh-disabled asm code should yield as soon as
76413150149SArd Biesheuvel 	 * it is able. This is the case if we are currently running in task
76513150149SArd Biesheuvel 	 * context, and either a softirq is pending, or the TIF_NEED_RESCHED
76613150149SArd Biesheuvel 	 * flag is set and re-enabling preemption a single time would result in
76713150149SArd Biesheuvel 	 * a preempt count of zero. (Note that the TIF_NEED_RESCHED flag is
76813150149SArd Biesheuvel 	 * stored negated in the top word of the thread_info::preempt_count
76913150149SArd Biesheuvel 	 * field)
77024534b35SArd Biesheuvel 	 */
77113150149SArd Biesheuvel 	.macro		cond_yield, lbl:req, tmp:req, tmp2:req
772d13c613fSArd Biesheuvel 	get_current_task \tmp
773d13c613fSArd Biesheuvel 	ldr		\tmp, [\tmp, #TSK_TI_PREEMPT]
77413150149SArd Biesheuvel 	/*
77513150149SArd Biesheuvel 	 * If we are serving a softirq, there is no point in yielding: the
77613150149SArd Biesheuvel 	 * softirq will not be preempted no matter what we do, so we should
77713150149SArd Biesheuvel 	 * run to completion as quickly as we can.
77813150149SArd Biesheuvel 	 */
77913150149SArd Biesheuvel 	tbnz		\tmp, #SOFTIRQ_SHIFT, .Lnoyield_\@
78013150149SArd Biesheuvel #ifdef CONFIG_PREEMPTION
781d13c613fSArd Biesheuvel 	sub		\tmp, \tmp, #PREEMPT_DISABLE_OFFSET
782d13c613fSArd Biesheuvel 	cbz		\tmp, \lbl
783d13c613fSArd Biesheuvel #endif
78413150149SArd Biesheuvel 	adr_l		\tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
785e176e267SMark Rutland 	get_this_cpu_offset	\tmp2
78613150149SArd Biesheuvel 	ldr		w\tmp, [\tmp, \tmp2]
78713150149SArd Biesheuvel 	cbnz		w\tmp, \lbl	// yield on pending softirq in task context
78813150149SArd Biesheuvel .Lnoyield_\@:
789d13c613fSArd Biesheuvel 	.endm
790d13c613fSArd Biesheuvel 
7913a9b136cSMark Brown /*
7929be34be8SMark Brown  * Branch Target Identifier (BTI)
7939be34be8SMark Brown  */
7949be34be8SMark Brown 	.macro  bti, targets
7959be34be8SMark Brown 	.equ	.L__bti_targets_c, 34
7969be34be8SMark Brown 	.equ	.L__bti_targets_j, 36
7979be34be8SMark Brown 	.equ	.L__bti_targets_jc,38
7989be34be8SMark Brown 	hint	#.L__bti_targets_\targets
7999be34be8SMark Brown 	.endm
8009be34be8SMark Brown 
8019be34be8SMark Brown /*
8023a9b136cSMark Brown  * This macro emits a program property note section identifying
8033a9b136cSMark Brown  * architecture features which require special handling, mainly for
8043a9b136cSMark Brown  * use in assembly files included in the VDSO.
8053a9b136cSMark Brown  */
8063a9b136cSMark Brown 
8073a9b136cSMark Brown #define NT_GNU_PROPERTY_TYPE_0  5
8083a9b136cSMark Brown #define GNU_PROPERTY_AARCH64_FEATURE_1_AND      0xc0000000
8093a9b136cSMark Brown 
8103a9b136cSMark Brown #define GNU_PROPERTY_AARCH64_FEATURE_1_BTI      (1U << 0)
8113a9b136cSMark Brown #define GNU_PROPERTY_AARCH64_FEATURE_1_PAC      (1U << 1)
8123a9b136cSMark Brown 
8133a9b136cSMark Brown #ifdef CONFIG_ARM64_BTI_KERNEL
8143a9b136cSMark Brown #define GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT		\
8153a9b136cSMark Brown 		((GNU_PROPERTY_AARCH64_FEATURE_1_BTI |	\
8163a9b136cSMark Brown 		  GNU_PROPERTY_AARCH64_FEATURE_1_PAC))
8173a9b136cSMark Brown #endif
8183a9b136cSMark Brown 
8193a9b136cSMark Brown #ifdef GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
8203a9b136cSMark Brown .macro emit_aarch64_feature_1_and, feat=GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
8213a9b136cSMark Brown 	.pushsection .note.gnu.property, "a"
8223a9b136cSMark Brown 	.align  3
8233a9b136cSMark Brown 	.long   2f - 1f
8243a9b136cSMark Brown 	.long   6f - 3f
8253a9b136cSMark Brown 	.long   NT_GNU_PROPERTY_TYPE_0
8263a9b136cSMark Brown 1:      .string "GNU"
8273a9b136cSMark Brown 2:
8283a9b136cSMark Brown 	.align  3
8293a9b136cSMark Brown 3:      .long   GNU_PROPERTY_AARCH64_FEATURE_1_AND
8303a9b136cSMark Brown 	.long   5f - 4f
8313a9b136cSMark Brown 4:
8323a9b136cSMark Brown 	/*
8333a9b136cSMark Brown 	 * This is described with an array of char in the Linux API
8343a9b136cSMark Brown 	 * spec but the text and all other usage (including binutils,
8353a9b136cSMark Brown 	 * clang and GCC) treat this as a 32 bit value so no swizzling
8363a9b136cSMark Brown 	 * is required for big endian.
8373a9b136cSMark Brown 	 */
8383a9b136cSMark Brown 	.long   \feat
8393a9b136cSMark Brown 5:
8403a9b136cSMark Brown 	.align  3
8413a9b136cSMark Brown 6:
8423a9b136cSMark Brown 	.popsection
8433a9b136cSMark Brown .endm
8443a9b136cSMark Brown 
8453a9b136cSMark Brown #else
8463a9b136cSMark Brown .macro emit_aarch64_feature_1_and, feat=0
8473a9b136cSMark Brown .endm
8483a9b136cSMark Brown 
8493a9b136cSMark Brown #endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */
8503a9b136cSMark Brown 
851ba268923SJames Morse 	.macro __mitigate_spectre_bhb_loop      tmp
852ba268923SJames Morse #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
8534c0bd995SMark Rutland alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_loop_iter
854558c303cSJames Morse 	mov	\tmp, #32		// Patched to correct the immediate
855558c303cSJames Morse alternative_cb_end
856ba268923SJames Morse .Lspectre_bhb_loop\@:
857ba268923SJames Morse 	b	. + 4
858ba268923SJames Morse 	subs	\tmp, \tmp, #1
859ba268923SJames Morse 	b.ne	.Lspectre_bhb_loop\@
860ba268923SJames Morse 	sb
861ba268923SJames Morse #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
862ba268923SJames Morse 	.endm
863ba268923SJames Morse 
864558c303cSJames Morse 	.macro mitigate_spectre_bhb_loop	tmp
865558c303cSJames Morse #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
8664c0bd995SMark Rutland alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_loop_mitigation_enable
867558c303cSJames Morse 	b	.L_spectre_bhb_loop_done\@	// Patched to NOP
868558c303cSJames Morse alternative_cb_end
869558c303cSJames Morse 	__mitigate_spectre_bhb_loop	\tmp
870558c303cSJames Morse .L_spectre_bhb_loop_done\@:
871558c303cSJames Morse #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
872558c303cSJames Morse 	.endm
873558c303cSJames Morse 
874ba268923SJames Morse 	/* Save/restores x0-x3 to the stack */
875ba268923SJames Morse 	.macro __mitigate_spectre_bhb_fw
876ba268923SJames Morse #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
877ba268923SJames Morse 	stp	x0, x1, [sp, #-16]!
878ba268923SJames Morse 	stp	x2, x3, [sp, #-16]!
879ba268923SJames Morse 	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_3
8804c0bd995SMark Rutland alternative_cb ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit
881ba268923SJames Morse 	nop					// Patched to SMC/HVC #0
882ba268923SJames Morse alternative_cb_end
883ba268923SJames Morse 	ldp	x2, x3, [sp], #16
884ba268923SJames Morse 	ldp	x0, x1, [sp], #16
885ba268923SJames Morse #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
886ba268923SJames Morse 	.endm
887228a26b9SJames Morse 
888228a26b9SJames Morse 	.macro mitigate_spectre_bhb_clear_insn
889228a26b9SJames Morse #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
8904c0bd995SMark Rutland alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_clearbhb
891228a26b9SJames Morse 	/* Patched to NOP when not supported */
892228a26b9SJames Morse 	clearbhb
893228a26b9SJames Morse 	isb
894228a26b9SJames Morse alternative_cb_end
895228a26b9SJames Morse #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
896228a26b9SJames Morse 	.endm
897f3e39273SMarc Zyngier #endif	/* __ASM_ASSEMBLER_H */
898