xref: /openbmc/linux/arch/arm64/include/asm/assembler.h (revision 249592bf)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
4  *
5  * Copyright (C) 1996-2000 Russell King
6  * Copyright (C) 2012 ARM Ltd.
7  */
8 #ifndef __ASSEMBLY__
9 #error "Only include this from assembly code"
10 #endif
11 
12 #ifndef __ASM_ASSEMBLER_H
13 #define __ASM_ASSEMBLER_H
14 
15 #include <asm-generic/export.h>
16 
17 #include <asm/asm-offsets.h>
18 #include <asm/alternative.h>
19 #include <asm/asm-bug.h>
20 #include <asm/cpufeature.h>
21 #include <asm/cputype.h>
22 #include <asm/debug-monitors.h>
23 #include <asm/page.h>
24 #include <asm/pgtable-hwdef.h>
25 #include <asm/ptrace.h>
26 #include <asm/thread_info.h>
27 
28 	/*
29 	 * Provide a wxN alias for each wN register so what we can paste a xN
30 	 * reference after a 'w' to obtain the 32-bit version.
31 	 */
32 	.irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
33 	wx\n	.req	w\n
34 	.endr
35 
36 	.macro save_and_disable_daif, flags
37 	mrs	\flags, daif
38 	msr	daifset, #0xf
39 	.endm
40 
41 	.macro disable_daif
42 	msr	daifset, #0xf
43 	.endm
44 
45 	.macro enable_daif
46 	msr	daifclr, #0xf
47 	.endm
48 
49 	.macro	restore_daif, flags:req
50 	msr	daif, \flags
51 	.endm
52 
53 	/* IRQ/FIQ are the lowest priority flags, unconditionally unmask the rest. */
54 	.macro enable_da
55 	msr	daifclr, #(8 | 4)
56 	.endm
57 
58 /*
59  * Save/restore interrupts.
60  */
61 	.macro	save_and_disable_irq, flags
62 	mrs	\flags, daif
63 	msr	daifset, #3
64 	.endm
65 
66 	.macro	restore_irq, flags
67 	msr	daif, \flags
68 	.endm
69 
70 	.macro	enable_dbg
71 	msr	daifclr, #8
72 	.endm
73 
74 	.macro	disable_step_tsk, flgs, tmp
75 	tbz	\flgs, #TIF_SINGLESTEP, 9990f
76 	mrs	\tmp, mdscr_el1
77 	bic	\tmp, \tmp, #DBG_MDSCR_SS
78 	msr	mdscr_el1, \tmp
79 	isb	// Synchronise with enable_dbg
80 9990:
81 	.endm
82 
83 	/* call with daif masked */
84 	.macro	enable_step_tsk, flgs, tmp
85 	tbz	\flgs, #TIF_SINGLESTEP, 9990f
86 	mrs	\tmp, mdscr_el1
87 	orr	\tmp, \tmp, #DBG_MDSCR_SS
88 	msr	mdscr_el1, \tmp
89 9990:
90 	.endm
91 
92 /*
93  * RAS Error Synchronization barrier
94  */
95 	.macro  esb
96 #ifdef CONFIG_ARM64_RAS_EXTN
97 	hint    #16
98 #else
99 	nop
100 #endif
101 	.endm
102 
103 /*
104  * Value prediction barrier
105  */
106 	.macro	csdb
107 	hint	#20
108 	.endm
109 
110 /*
111  * Speculation barrier
112  */
113 	.macro	sb
114 alternative_if_not ARM64_HAS_SB
115 	dsb	nsh
116 	isb
117 alternative_else
118 	SB_BARRIER_INSN
119 	nop
120 alternative_endif
121 	.endm
122 
123 /*
124  * NOP sequence
125  */
126 	.macro	nops, num
127 	.rept	\num
128 	nop
129 	.endr
130 	.endm
131 
132 /*
133  * Emit an entry into the exception table
134  */
135 	.macro		_asm_extable, from, to
136 	.pushsection	__ex_table, "a"
137 	.align		3
138 	.long		(\from - .), (\to - .)
139 	.popsection
140 	.endm
141 
142 #define USER(l, x...)				\
143 9999:	x;					\
144 	_asm_extable	9999b, l
145 
146 /*
147  * Register aliases.
148  */
149 lr	.req	x30		// link register
150 
151 /*
152  * Vector entry
153  */
154 	 .macro	ventry	label
155 	.align	7
156 	b	\label
157 	.endm
158 
159 /*
160  * Select code when configured for BE.
161  */
162 #ifdef CONFIG_CPU_BIG_ENDIAN
163 #define CPU_BE(code...) code
164 #else
165 #define CPU_BE(code...)
166 #endif
167 
168 /*
169  * Select code when configured for LE.
170  */
171 #ifdef CONFIG_CPU_BIG_ENDIAN
172 #define CPU_LE(code...)
173 #else
174 #define CPU_LE(code...) code
175 #endif
176 
177 /*
178  * Define a macro that constructs a 64-bit value by concatenating two
179  * 32-bit registers. Note that on big endian systems the order of the
180  * registers is swapped.
181  */
182 #ifndef CONFIG_CPU_BIG_ENDIAN
183 	.macro	regs_to_64, rd, lbits, hbits
184 #else
185 	.macro	regs_to_64, rd, hbits, lbits
186 #endif
187 	orr	\rd, \lbits, \hbits, lsl #32
188 	.endm
189 
190 /*
191  * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
192  * <symbol> is within the range +/- 4 GB of the PC.
193  */
194 	/*
195 	 * @dst: destination register (64 bit wide)
196 	 * @sym: name of the symbol
197 	 */
198 	.macro	adr_l, dst, sym
199 	adrp	\dst, \sym
200 	add	\dst, \dst, :lo12:\sym
201 	.endm
202 
203 	/*
204 	 * @dst: destination register (32 or 64 bit wide)
205 	 * @sym: name of the symbol
206 	 * @tmp: optional 64-bit scratch register to be used if <dst> is a
207 	 *       32-bit wide register, in which case it cannot be used to hold
208 	 *       the address
209 	 */
210 	.macro	ldr_l, dst, sym, tmp=
211 	.ifb	\tmp
212 	adrp	\dst, \sym
213 	ldr	\dst, [\dst, :lo12:\sym]
214 	.else
215 	adrp	\tmp, \sym
216 	ldr	\dst, [\tmp, :lo12:\sym]
217 	.endif
218 	.endm
219 
220 	/*
221 	 * @src: source register (32 or 64 bit wide)
222 	 * @sym: name of the symbol
223 	 * @tmp: mandatory 64-bit scratch register to calculate the address
224 	 *       while <src> needs to be preserved.
225 	 */
226 	.macro	str_l, src, sym, tmp
227 	adrp	\tmp, \sym
228 	str	\src, [\tmp, :lo12:\sym]
229 	.endm
230 
231 	/*
232 	 * @dst: destination register
233 	 */
234 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
235 	.macro	this_cpu_offset, dst
236 	mrs	\dst, tpidr_el2
237 	.endm
238 #else
239 	.macro	this_cpu_offset, dst
240 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
241 	mrs	\dst, tpidr_el1
242 alternative_else
243 	mrs	\dst, tpidr_el2
244 alternative_endif
245 	.endm
246 #endif
247 
248 	/*
249 	 * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
250 	 * @sym: The name of the per-cpu variable
251 	 * @tmp: scratch register
252 	 */
253 	.macro adr_this_cpu, dst, sym, tmp
254 	adrp	\tmp, \sym
255 	add	\dst, \tmp, #:lo12:\sym
256 	this_cpu_offset \tmp
257 	add	\dst, \dst, \tmp
258 	.endm
259 
260 	/*
261 	 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
262 	 * @sym: The name of the per-cpu variable
263 	 * @tmp: scratch register
264 	 */
265 	.macro ldr_this_cpu dst, sym, tmp
266 	adr_l	\dst, \sym
267 	this_cpu_offset \tmp
268 	ldr	\dst, [\dst, \tmp]
269 	.endm
270 
271 /*
272  * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
273  */
274 	.macro	vma_vm_mm, rd, rn
275 	ldr	\rd, [\rn, #VMA_VM_MM]
276 	.endm
277 
278 /*
279  * read_ctr - read CTR_EL0. If the system has mismatched register fields,
280  * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
281  */
282 	.macro	read_ctr, reg
283 #ifndef __KVM_NVHE_HYPERVISOR__
284 alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
285 	mrs	\reg, ctr_el0			// read CTR
286 	nop
287 alternative_else
288 	ldr_l	\reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
289 alternative_endif
290 #else
291 alternative_if_not ARM64_KVM_PROTECTED_MODE
292 	ASM_BUG()
293 alternative_else_nop_endif
294 alternative_cb kvm_compute_final_ctr_el0
295 	movz	\reg, #0
296 	movk	\reg, #0, lsl #16
297 	movk	\reg, #0, lsl #32
298 	movk	\reg, #0, lsl #48
299 alternative_cb_end
300 #endif
301 	.endm
302 
303 
304 /*
305  * raw_dcache_line_size - get the minimum D-cache line size on this CPU
306  * from the CTR register.
307  */
308 	.macro	raw_dcache_line_size, reg, tmp
309 	mrs	\tmp, ctr_el0			// read CTR
310 	ubfm	\tmp, \tmp, #16, #19		// cache line size encoding
311 	mov	\reg, #4			// bytes per word
312 	lsl	\reg, \reg, \tmp		// actual cache line size
313 	.endm
314 
315 /*
316  * dcache_line_size - get the safe D-cache line size across all CPUs
317  */
318 	.macro	dcache_line_size, reg, tmp
319 	read_ctr	\tmp
320 	ubfm		\tmp, \tmp, #16, #19	// cache line size encoding
321 	mov		\reg, #4		// bytes per word
322 	lsl		\reg, \reg, \tmp	// actual cache line size
323 	.endm
324 
325 /*
326  * raw_icache_line_size - get the minimum I-cache line size on this CPU
327  * from the CTR register.
328  */
329 	.macro	raw_icache_line_size, reg, tmp
330 	mrs	\tmp, ctr_el0			// read CTR
331 	and	\tmp, \tmp, #0xf		// cache line size encoding
332 	mov	\reg, #4			// bytes per word
333 	lsl	\reg, \reg, \tmp		// actual cache line size
334 	.endm
335 
336 /*
337  * icache_line_size - get the safe I-cache line size across all CPUs
338  */
339 	.macro	icache_line_size, reg, tmp
340 	read_ctr	\tmp
341 	and		\tmp, \tmp, #0xf	// cache line size encoding
342 	mov		\reg, #4		// bytes per word
343 	lsl		\reg, \reg, \tmp	// actual cache line size
344 	.endm
345 
346 /*
347  * tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map
348  */
349 	.macro	tcr_set_t0sz, valreg, t0sz
350 	bfi	\valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
351 	.endm
352 
353 /*
354  * tcr_set_t1sz - update TCR.T1SZ
355  */
356 	.macro	tcr_set_t1sz, valreg, t1sz
357 	bfi	\valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH
358 	.endm
359 
360 /*
361  * tcr_compute_pa_size - set TCR.(I)PS to the highest supported
362  * ID_AA64MMFR0_EL1.PARange value
363  *
364  *	tcr:		register with the TCR_ELx value to be updated
365  *	pos:		IPS or PS bitfield position
366  *	tmp{0,1}:	temporary registers
367  */
368 	.macro	tcr_compute_pa_size, tcr, pos, tmp0, tmp1
369 	mrs	\tmp0, ID_AA64MMFR0_EL1
370 	// Narrow PARange to fit the PS field in TCR_ELx
371 	ubfx	\tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3
372 	mov	\tmp1, #ID_AA64MMFR0_PARANGE_MAX
373 	cmp	\tmp0, \tmp1
374 	csel	\tmp0, \tmp1, \tmp0, hi
375 	bfi	\tcr, \tmp0, \pos, #3
376 	.endm
377 
378 /*
379  * Macro to perform a data cache maintenance for the interval
380  * [kaddr, kaddr + size)
381  *
382  * 	op:		operation passed to dc instruction
383  * 	domain:		domain used in dsb instruciton
384  * 	kaddr:		starting virtual address of the region
385  * 	size:		size of the region
386  * 	Corrupts:	kaddr, size, tmp1, tmp2
387  */
388 	.macro __dcache_op_workaround_clean_cache, op, kaddr
389 alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
390 	dc	\op, \kaddr
391 alternative_else
392 	dc	civac, \kaddr
393 alternative_endif
394 	.endm
395 
396 	.macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
397 	dcache_line_size \tmp1, \tmp2
398 	add	\size, \kaddr, \size
399 	sub	\tmp2, \tmp1, #1
400 	bic	\kaddr, \kaddr, \tmp2
401 9998:
402 	.ifc	\op, cvau
403 	__dcache_op_workaround_clean_cache \op, \kaddr
404 	.else
405 	.ifc	\op, cvac
406 	__dcache_op_workaround_clean_cache \op, \kaddr
407 	.else
408 	.ifc	\op, cvap
409 	sys	3, c7, c12, 1, \kaddr	// dc cvap
410 	.else
411 	.ifc	\op, cvadp
412 	sys	3, c7, c13, 1, \kaddr	// dc cvadp
413 	.else
414 	dc	\op, \kaddr
415 	.endif
416 	.endif
417 	.endif
418 	.endif
419 	add	\kaddr, \kaddr, \tmp1
420 	cmp	\kaddr, \size
421 	b.lo	9998b
422 	dsb	\domain
423 	.endm
424 
425 /*
426  * Macro to perform an instruction cache maintenance for the interval
427  * [start, end)
428  *
429  * 	start, end:	virtual addresses describing the region
430  *	label:		A label to branch to on user fault.
431  * 	Corrupts:	tmp1, tmp2
432  */
433 	.macro invalidate_icache_by_line start, end, tmp1, tmp2, label
434 	icache_line_size \tmp1, \tmp2
435 	sub	\tmp2, \tmp1, #1
436 	bic	\tmp2, \start, \tmp2
437 9997:
438 USER(\label, ic	ivau, \tmp2)			// invalidate I line PoU
439 	add	\tmp2, \tmp2, \tmp1
440 	cmp	\tmp2, \end
441 	b.lo	9997b
442 	dsb	ish
443 	isb
444 	.endm
445 
446 /*
447  * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
448  */
449 	.macro	reset_pmuserenr_el0, tmpreg
450 	mrs	\tmpreg, id_aa64dfr0_el1
451 	sbfx	\tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4
452 	cmp	\tmpreg, #1			// Skip if no PMU present
453 	b.lt	9000f
454 	msr	pmuserenr_el0, xzr		// Disable PMU access from EL0
455 9000:
456 	.endm
457 
458 /*
459  * reset_amuserenr_el0 - reset AMUSERENR_EL0 if AMUv1 present
460  */
461 	.macro	reset_amuserenr_el0, tmpreg
462 	mrs	\tmpreg, id_aa64pfr0_el1	// Check ID_AA64PFR0_EL1
463 	ubfx	\tmpreg, \tmpreg, #ID_AA64PFR0_AMU_SHIFT, #4
464 	cbz	\tmpreg, .Lskip_\@		// Skip if no AMU present
465 	msr_s	SYS_AMUSERENR_EL0, xzr		// Disable AMU access from EL0
466 .Lskip_\@:
467 	.endm
468 /*
469  * copy_page - copy src to dest using temp registers t1-t8
470  */
471 	.macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
472 9998:	ldp	\t1, \t2, [\src]
473 	ldp	\t3, \t4, [\src, #16]
474 	ldp	\t5, \t6, [\src, #32]
475 	ldp	\t7, \t8, [\src, #48]
476 	add	\src, \src, #64
477 	stnp	\t1, \t2, [\dest]
478 	stnp	\t3, \t4, [\dest, #16]
479 	stnp	\t5, \t6, [\dest, #32]
480 	stnp	\t7, \t8, [\dest, #48]
481 	add	\dest, \dest, #64
482 	tst	\src, #(PAGE_SIZE - 1)
483 	b.ne	9998b
484 	.endm
485 
486 /*
487  * Annotate a function as being unsuitable for kprobes.
488  */
489 #ifdef CONFIG_KPROBES
490 #define NOKPROBE(x)				\
491 	.pushsection "_kprobe_blacklist", "aw";	\
492 	.quad	x;				\
493 	.popsection;
494 #else
495 #define NOKPROBE(x)
496 #endif
497 
498 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
499 #define EXPORT_SYMBOL_NOKASAN(name)
500 #else
501 #define EXPORT_SYMBOL_NOKASAN(name)	EXPORT_SYMBOL(name)
502 #endif
503 
504 	/*
505 	 * Emit a 64-bit absolute little endian symbol reference in a way that
506 	 * ensures that it will be resolved at build time, even when building a
507 	 * PIE binary. This requires cooperation from the linker script, which
508 	 * must emit the lo32/hi32 halves individually.
509 	 */
510 	.macro	le64sym, sym
511 	.long	\sym\()_lo32
512 	.long	\sym\()_hi32
513 	.endm
514 
515 	/*
516 	 * mov_q - move an immediate constant into a 64-bit register using
517 	 *         between 2 and 4 movz/movk instructions (depending on the
518 	 *         magnitude and sign of the operand)
519 	 */
520 	.macro	mov_q, reg, val
521 	.if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
522 	movz	\reg, :abs_g1_s:\val
523 	.else
524 	.if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
525 	movz	\reg, :abs_g2_s:\val
526 	.else
527 	movz	\reg, :abs_g3:\val
528 	movk	\reg, :abs_g2_nc:\val
529 	.endif
530 	movk	\reg, :abs_g1_nc:\val
531 	.endif
532 	movk	\reg, :abs_g0_nc:\val
533 	.endm
534 
535 /*
536  * Return the current task_struct.
537  */
538 	.macro	get_current_task, rd
539 	mrs	\rd, sp_el0
540 	.endm
541 
542 /*
543  * Offset ttbr1 to allow for 48-bit kernel VAs set with 52-bit PTRS_PER_PGD.
544  * orr is used as it can cover the immediate value (and is idempotent).
545  * In future this may be nop'ed out when dealing with 52-bit kernel VAs.
546  * 	ttbr: Value of ttbr to set, modified.
547  */
548 	.macro	offset_ttbr1, ttbr, tmp
549 #ifdef CONFIG_ARM64_VA_BITS_52
550 	mrs_s	\tmp, SYS_ID_AA64MMFR2_EL1
551 	and	\tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
552 	cbnz	\tmp, .Lskipoffs_\@
553 	orr	\ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
554 .Lskipoffs_\@ :
555 #endif
556 	.endm
557 
558 /*
559  * Perform the reverse of offset_ttbr1.
560  * bic is used as it can cover the immediate value and, in future, won't need
561  * to be nop'ed out when dealing with 52-bit kernel VAs.
562  */
563 	.macro	restore_ttbr1, ttbr
564 #ifdef CONFIG_ARM64_VA_BITS_52
565 	bic	\ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
566 #endif
567 	.endm
568 
569 /*
570  * Arrange a physical address in a TTBR register, taking care of 52-bit
571  * addresses.
572  *
573  * 	phys:	physical address, preserved
574  * 	ttbr:	returns the TTBR value
575  */
576 	.macro	phys_to_ttbr, ttbr, phys
577 #ifdef CONFIG_ARM64_PA_BITS_52
578 	orr	\ttbr, \phys, \phys, lsr #46
579 	and	\ttbr, \ttbr, #TTBR_BADDR_MASK_52
580 #else
581 	mov	\ttbr, \phys
582 #endif
583 	.endm
584 
585 	.macro	phys_to_pte, pte, phys
586 #ifdef CONFIG_ARM64_PA_BITS_52
587 	/*
588 	 * We assume \phys is 64K aligned and this is guaranteed by only
589 	 * supporting this configuration with 64K pages.
590 	 */
591 	orr	\pte, \phys, \phys, lsr #36
592 	and	\pte, \pte, #PTE_ADDR_MASK
593 #else
594 	mov	\pte, \phys
595 #endif
596 	.endm
597 
598 	.macro	pte_to_phys, phys, pte
599 #ifdef CONFIG_ARM64_PA_BITS_52
600 	ubfiz	\phys, \pte, #(48 - 16 - 12), #16
601 	bfxil	\phys, \pte, #16, #32
602 	lsl	\phys, \phys, #16
603 #else
604 	and	\phys, \pte, #PTE_ADDR_MASK
605 #endif
606 	.endm
607 
608 /*
609  * tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU.
610  */
611 	.macro	tcr_clear_errata_bits, tcr, tmp1, tmp2
612 #ifdef CONFIG_FUJITSU_ERRATUM_010001
613 	mrs	\tmp1, midr_el1
614 
615 	mov_q	\tmp2, MIDR_FUJITSU_ERRATUM_010001_MASK
616 	and	\tmp1, \tmp1, \tmp2
617 	mov_q	\tmp2, MIDR_FUJITSU_ERRATUM_010001
618 	cmp	\tmp1, \tmp2
619 	b.ne	10f
620 
621 	mov_q	\tmp2, TCR_CLEAR_FUJITSU_ERRATUM_010001
622 	bic	\tcr, \tcr, \tmp2
623 10:
624 #endif /* CONFIG_FUJITSU_ERRATUM_010001 */
625 	.endm
626 
627 /**
628  * Errata workaround prior to disable MMU. Insert an ISB immediately prior
629  * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
630  */
631 	.macro pre_disable_mmu_workaround
632 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
633 	isb
634 #endif
635 	.endm
636 
637 	/*
638 	 * frame_push - Push @regcount callee saved registers to the stack,
639 	 *              starting at x19, as well as x29/x30, and set x29 to
640 	 *              the new value of sp. Add @extra bytes of stack space
641 	 *              for locals.
642 	 */
643 	.macro		frame_push, regcount:req, extra
644 	__frame		st, \regcount, \extra
645 	.endm
646 
647 	/*
648 	 * frame_pop  - Pop the callee saved registers from the stack that were
649 	 *              pushed in the most recent call to frame_push, as well
650 	 *              as x29/x30 and any extra stack space that may have been
651 	 *              allocated.
652 	 */
653 	.macro		frame_pop
654 	__frame		ld
655 	.endm
656 
657 	.macro		__frame_regs, reg1, reg2, op, num
658 	.if		.Lframe_regcount == \num
659 	\op\()r		\reg1, [sp, #(\num + 1) * 8]
660 	.elseif		.Lframe_regcount > \num
661 	\op\()p		\reg1, \reg2, [sp, #(\num + 1) * 8]
662 	.endif
663 	.endm
664 
665 	.macro		__frame, op, regcount, extra=0
666 	.ifc		\op, st
667 	.if		(\regcount) < 0 || (\regcount) > 10
668 	.error		"regcount should be in the range [0 ... 10]"
669 	.endif
670 	.if		((\extra) % 16) != 0
671 	.error		"extra should be a multiple of 16 bytes"
672 	.endif
673 	.ifdef		.Lframe_regcount
674 	.if		.Lframe_regcount != -1
675 	.error		"frame_push/frame_pop may not be nested"
676 	.endif
677 	.endif
678 	.set		.Lframe_regcount, \regcount
679 	.set		.Lframe_extra, \extra
680 	.set		.Lframe_local_offset, ((\regcount + 3) / 2) * 16
681 	stp		x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]!
682 	mov		x29, sp
683 	.endif
684 
685 	__frame_regs	x19, x20, \op, 1
686 	__frame_regs	x21, x22, \op, 3
687 	__frame_regs	x23, x24, \op, 5
688 	__frame_regs	x25, x26, \op, 7
689 	__frame_regs	x27, x28, \op, 9
690 
691 	.ifc		\op, ld
692 	.if		.Lframe_regcount == -1
693 	.error		"frame_push/frame_pop may not be nested"
694 	.endif
695 	ldp		x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra
696 	.set		.Lframe_regcount, -1
697 	.endif
698 	.endm
699 
700 /*
701  * Set SCTLR_ELx to the @reg value, and invalidate the local icache
702  * in the process. This is called when setting the MMU on.
703  */
704 .macro set_sctlr, sreg, reg
705 	msr	\sreg, \reg
706 	isb
707 	/*
708 	 * Invalidate the local I-cache so that any instructions fetched
709 	 * speculatively from the PoC are discarded, since they may have
710 	 * been dynamically patched at the PoU.
711 	 */
712 	ic	iallu
713 	dsb	nsh
714 	isb
715 .endm
716 
717 .macro set_sctlr_el1, reg
718 	set_sctlr sctlr_el1, \reg
719 .endm
720 
721 .macro set_sctlr_el2, reg
722 	set_sctlr sctlr_el2, \reg
723 .endm
724 
725 	/*
726 	 * Check whether preempt/bh-disabled asm code should yield as soon as
727 	 * it is able. This is the case if we are currently running in task
728 	 * context, and either a softirq is pending, or the TIF_NEED_RESCHED
729 	 * flag is set and re-enabling preemption a single time would result in
730 	 * a preempt count of zero. (Note that the TIF_NEED_RESCHED flag is
731 	 * stored negated in the top word of the thread_info::preempt_count
732 	 * field)
733 	 */
734 	.macro		cond_yield, lbl:req, tmp:req, tmp2:req
735 	get_current_task \tmp
736 	ldr		\tmp, [\tmp, #TSK_TI_PREEMPT]
737 	/*
738 	 * If we are serving a softirq, there is no point in yielding: the
739 	 * softirq will not be preempted no matter what we do, so we should
740 	 * run to completion as quickly as we can.
741 	 */
742 	tbnz		\tmp, #SOFTIRQ_SHIFT, .Lnoyield_\@
743 #ifdef CONFIG_PREEMPTION
744 	sub		\tmp, \tmp, #PREEMPT_DISABLE_OFFSET
745 	cbz		\tmp, \lbl
746 #endif
747 	adr_l		\tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
748 	this_cpu_offset	\tmp2
749 	ldr		w\tmp, [\tmp, \tmp2]
750 	cbnz		w\tmp, \lbl	// yield on pending softirq in task context
751 .Lnoyield_\@:
752 	.endm
753 
754 /*
755  * This macro emits a program property note section identifying
756  * architecture features which require special handling, mainly for
757  * use in assembly files included in the VDSO.
758  */
759 
760 #define NT_GNU_PROPERTY_TYPE_0  5
761 #define GNU_PROPERTY_AARCH64_FEATURE_1_AND      0xc0000000
762 
763 #define GNU_PROPERTY_AARCH64_FEATURE_1_BTI      (1U << 0)
764 #define GNU_PROPERTY_AARCH64_FEATURE_1_PAC      (1U << 1)
765 
766 #ifdef CONFIG_ARM64_BTI_KERNEL
767 #define GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT		\
768 		((GNU_PROPERTY_AARCH64_FEATURE_1_BTI |	\
769 		  GNU_PROPERTY_AARCH64_FEATURE_1_PAC))
770 #endif
771 
772 #ifdef GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
773 .macro emit_aarch64_feature_1_and, feat=GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
774 	.pushsection .note.gnu.property, "a"
775 	.align  3
776 	.long   2f - 1f
777 	.long   6f - 3f
778 	.long   NT_GNU_PROPERTY_TYPE_0
779 1:      .string "GNU"
780 2:
781 	.align  3
782 3:      .long   GNU_PROPERTY_AARCH64_FEATURE_1_AND
783 	.long   5f - 4f
784 4:
785 	/*
786 	 * This is described with an array of char in the Linux API
787 	 * spec but the text and all other usage (including binutils,
788 	 * clang and GCC) treat this as a 32 bit value so no swizzling
789 	 * is required for big endian.
790 	 */
791 	.long   \feat
792 5:
793 	.align  3
794 6:
795 	.popsection
796 .endm
797 
798 #else
799 .macro emit_aarch64_feature_1_and, feat=0
800 .endm
801 
802 #endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */
803 
804 #endif	/* __ASM_ASSEMBLER_H */
805