xref: /openbmc/linux/arch/arm64/include/asm/assembler.h (revision 4b4f3acc)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
4  *
5  * Copyright (C) 1996-2000 Russell King
6  * Copyright (C) 2012 ARM Ltd.
7  */
8 #ifndef __ASSEMBLY__
9 #error "Only include this from assembly code"
10 #endif
11 
12 #ifndef __ASM_ASSEMBLER_H
13 #define __ASM_ASSEMBLER_H
14 
15 #include <asm-generic/export.h>
16 
17 #include <asm/asm-offsets.h>
18 #include <asm/cpufeature.h>
19 #include <asm/cputype.h>
20 #include <asm/debug-monitors.h>
21 #include <asm/page.h>
22 #include <asm/pgtable-hwdef.h>
23 #include <asm/ptrace.h>
24 #include <asm/thread_info.h>
25 
26 	.macro save_and_disable_daif, flags
27 	mrs	\flags, daif
28 	msr	daifset, #0xf
29 	.endm
30 
31 	.macro disable_daif
32 	msr	daifset, #0xf
33 	.endm
34 
35 	.macro enable_daif
36 	msr	daifclr, #0xf
37 	.endm
38 
39 	.macro	restore_daif, flags:req
40 	msr	daif, \flags
41 	.endm
42 
43 	/* Only on aarch64 pstate, PSR_D_BIT is different for aarch32 */
44 	.macro	inherit_daif, pstate:req, tmp:req
45 	and	\tmp, \pstate, #(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
46 	msr	daif, \tmp
47 	.endm
48 
49 	/* IRQ is the lowest priority flag, unconditionally unmask the rest. */
50 	.macro enable_da_f
51 	msr	daifclr, #(8 | 4 | 1)
52 	.endm
53 
54 /*
55  * Save/restore interrupts.
56  */
57 	.macro	save_and_disable_irq, flags
58 	mrs	\flags, daif
59 	msr	daifset, #2
60 	.endm
61 
62 	.macro	restore_irq, flags
63 	msr	daif, \flags
64 	.endm
65 
66 	.macro	enable_dbg
67 	msr	daifclr, #8
68 	.endm
69 
70 	.macro	disable_step_tsk, flgs, tmp
71 	tbz	\flgs, #TIF_SINGLESTEP, 9990f
72 	mrs	\tmp, mdscr_el1
73 	bic	\tmp, \tmp, #DBG_MDSCR_SS
74 	msr	mdscr_el1, \tmp
75 	isb	// Synchronise with enable_dbg
76 9990:
77 	.endm
78 
79 	/* call with daif masked */
80 	.macro	enable_step_tsk, flgs, tmp
81 	tbz	\flgs, #TIF_SINGLESTEP, 9990f
82 	mrs	\tmp, mdscr_el1
83 	orr	\tmp, \tmp, #DBG_MDSCR_SS
84 	msr	mdscr_el1, \tmp
85 9990:
86 	.endm
87 
88 /*
89  * SMP data memory barrier
90  */
91 	.macro	smp_dmb, opt
92 	dmb	\opt
93 	.endm
94 
95 /*
96  * RAS Error Synchronization barrier
97  */
98 	.macro  esb
99 	hint    #16
100 	.endm
101 
102 /*
103  * Value prediction barrier
104  */
105 	.macro	csdb
106 	hint	#20
107 	.endm
108 
109 /*
110  * Speculation barrier
111  */
112 	.macro	sb
113 alternative_if_not ARM64_HAS_SB
114 	dsb	nsh
115 	isb
116 alternative_else
117 	SB_BARRIER_INSN
118 	nop
119 alternative_endif
120 	.endm
121 
122 /*
123  * Sanitise a 64-bit bounded index wrt speculation, returning zero if out
124  * of bounds.
125  */
126 	.macro	mask_nospec64, idx, limit, tmp
127 	sub	\tmp, \idx, \limit
128 	bic	\tmp, \tmp, \idx
129 	and	\idx, \idx, \tmp, asr #63
130 	csdb
131 	.endm
132 
133 /*
134  * NOP sequence
135  */
136 	.macro	nops, num
137 	.rept	\num
138 	nop
139 	.endr
140 	.endm
141 
142 /*
143  * Emit an entry into the exception table
144  */
145 	.macro		_asm_extable, from, to
146 	.pushsection	__ex_table, "a"
147 	.align		3
148 	.long		(\from - .), (\to - .)
149 	.popsection
150 	.endm
151 
152 #define USER(l, x...)				\
153 9999:	x;					\
154 	_asm_extable	9999b, l
155 
156 /*
157  * Register aliases.
158  */
159 lr	.req	x30		// link register
160 
161 /*
162  * Vector entry
163  */
164 	 .macro	ventry	label
165 	.align	7
166 	b	\label
167 	.endm
168 
169 /*
170  * Select code when configured for BE.
171  */
172 #ifdef CONFIG_CPU_BIG_ENDIAN
173 #define CPU_BE(code...) code
174 #else
175 #define CPU_BE(code...)
176 #endif
177 
178 /*
179  * Select code when configured for LE.
180  */
181 #ifdef CONFIG_CPU_BIG_ENDIAN
182 #define CPU_LE(code...)
183 #else
184 #define CPU_LE(code...) code
185 #endif
186 
187 /*
188  * Define a macro that constructs a 64-bit value by concatenating two
189  * 32-bit registers. Note that on big endian systems the order of the
190  * registers is swapped.
191  */
192 #ifndef CONFIG_CPU_BIG_ENDIAN
193 	.macro	regs_to_64, rd, lbits, hbits
194 #else
195 	.macro	regs_to_64, rd, hbits, lbits
196 #endif
197 	orr	\rd, \lbits, \hbits, lsl #32
198 	.endm
199 
200 /*
201  * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
202  * <symbol> is within the range +/- 4 GB of the PC.
203  */
204 	/*
205 	 * @dst: destination register (64 bit wide)
206 	 * @sym: name of the symbol
207 	 */
208 	.macro	adr_l, dst, sym
209 	adrp	\dst, \sym
210 	add	\dst, \dst, :lo12:\sym
211 	.endm
212 
213 	/*
214 	 * @dst: destination register (32 or 64 bit wide)
215 	 * @sym: name of the symbol
216 	 * @tmp: optional 64-bit scratch register to be used if <dst> is a
217 	 *       32-bit wide register, in which case it cannot be used to hold
218 	 *       the address
219 	 */
220 	.macro	ldr_l, dst, sym, tmp=
221 	.ifb	\tmp
222 	adrp	\dst, \sym
223 	ldr	\dst, [\dst, :lo12:\sym]
224 	.else
225 	adrp	\tmp, \sym
226 	ldr	\dst, [\tmp, :lo12:\sym]
227 	.endif
228 	.endm
229 
230 	/*
231 	 * @src: source register (32 or 64 bit wide)
232 	 * @sym: name of the symbol
233 	 * @tmp: mandatory 64-bit scratch register to calculate the address
234 	 *       while <src> needs to be preserved.
235 	 */
236 	.macro	str_l, src, sym, tmp
237 	adrp	\tmp, \sym
238 	str	\src, [\tmp, :lo12:\sym]
239 	.endm
240 
241 	/*
242 	 * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
243 	 * @sym: The name of the per-cpu variable
244 	 * @tmp: scratch register
245 	 */
246 	.macro adr_this_cpu, dst, sym, tmp
247 	adrp	\tmp, \sym
248 	add	\dst, \tmp, #:lo12:\sym
249 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
250 	mrs	\tmp, tpidr_el1
251 alternative_else
252 	mrs	\tmp, tpidr_el2
253 alternative_endif
254 	add	\dst, \dst, \tmp
255 	.endm
256 
257 	/*
258 	 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
259 	 * @sym: The name of the per-cpu variable
260 	 * @tmp: scratch register
261 	 */
262 	.macro ldr_this_cpu dst, sym, tmp
263 	adr_l	\dst, \sym
264 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
265 	mrs	\tmp, tpidr_el1
266 alternative_else
267 	mrs	\tmp, tpidr_el2
268 alternative_endif
269 	ldr	\dst, [\dst, \tmp]
270 	.endm
271 
272 /*
273  * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
274  */
275 	.macro	vma_vm_mm, rd, rn
276 	ldr	\rd, [\rn, #VMA_VM_MM]
277 	.endm
278 
279 /*
280  * mmid - get context id from mm pointer (mm->context.id)
281  */
282 	.macro	mmid, rd, rn
283 	ldr	\rd, [\rn, #MM_CONTEXT_ID]
284 	.endm
285 /*
286  * read_ctr - read CTR_EL0. If the system has mismatched register fields,
287  * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
288  */
289 	.macro	read_ctr, reg
290 alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
291 	mrs	\reg, ctr_el0			// read CTR
292 	nop
293 alternative_else
294 	ldr_l	\reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
295 alternative_endif
296 	.endm
297 
298 
299 /*
300  * raw_dcache_line_size - get the minimum D-cache line size on this CPU
301  * from the CTR register.
302  */
303 	.macro	raw_dcache_line_size, reg, tmp
304 	mrs	\tmp, ctr_el0			// read CTR
305 	ubfm	\tmp, \tmp, #16, #19		// cache line size encoding
306 	mov	\reg, #4			// bytes per word
307 	lsl	\reg, \reg, \tmp		// actual cache line size
308 	.endm
309 
310 /*
311  * dcache_line_size - get the safe D-cache line size across all CPUs
312  */
313 	.macro	dcache_line_size, reg, tmp
314 	read_ctr	\tmp
315 	ubfm		\tmp, \tmp, #16, #19	// cache line size encoding
316 	mov		\reg, #4		// bytes per word
317 	lsl		\reg, \reg, \tmp	// actual cache line size
318 	.endm
319 
320 /*
321  * raw_icache_line_size - get the minimum I-cache line size on this CPU
322  * from the CTR register.
323  */
324 	.macro	raw_icache_line_size, reg, tmp
325 	mrs	\tmp, ctr_el0			// read CTR
326 	and	\tmp, \tmp, #0xf		// cache line size encoding
327 	mov	\reg, #4			// bytes per word
328 	lsl	\reg, \reg, \tmp		// actual cache line size
329 	.endm
330 
331 /*
332  * icache_line_size - get the safe I-cache line size across all CPUs
333  */
334 	.macro	icache_line_size, reg, tmp
335 	read_ctr	\tmp
336 	and		\tmp, \tmp, #0xf	// cache line size encoding
337 	mov		\reg, #4		// bytes per word
338 	lsl		\reg, \reg, \tmp	// actual cache line size
339 	.endm
340 
341 /*
342  * tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map
343  */
344 	.macro	tcr_set_t0sz, valreg, t0sz
345 	bfi	\valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
346 	.endm
347 
348 /*
349  * tcr_compute_pa_size - set TCR.(I)PS to the highest supported
350  * ID_AA64MMFR0_EL1.PARange value
351  *
352  *	tcr:		register with the TCR_ELx value to be updated
353  *	pos:		IPS or PS bitfield position
354  *	tmp{0,1}:	temporary registers
355  */
356 	.macro	tcr_compute_pa_size, tcr, pos, tmp0, tmp1
357 	mrs	\tmp0, ID_AA64MMFR0_EL1
358 	// Narrow PARange to fit the PS field in TCR_ELx
359 	ubfx	\tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3
360 	mov	\tmp1, #ID_AA64MMFR0_PARANGE_MAX
361 	cmp	\tmp0, \tmp1
362 	csel	\tmp0, \tmp1, \tmp0, hi
363 	bfi	\tcr, \tmp0, \pos, #3
364 	.endm
365 
366 /*
367  * Macro to perform a data cache maintenance for the interval
368  * [kaddr, kaddr + size)
369  *
370  * 	op:		operation passed to dc instruction
371  * 	domain:		domain used in dsb instruciton
372  * 	kaddr:		starting virtual address of the region
373  * 	size:		size of the region
374  * 	Corrupts:	kaddr, size, tmp1, tmp2
375  */
376 	.macro __dcache_op_workaround_clean_cache, op, kaddr
377 alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
378 	dc	\op, \kaddr
379 alternative_else
380 	dc	civac, \kaddr
381 alternative_endif
382 	.endm
383 
384 	.macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
385 	dcache_line_size \tmp1, \tmp2
386 	add	\size, \kaddr, \size
387 	sub	\tmp2, \tmp1, #1
388 	bic	\kaddr, \kaddr, \tmp2
389 9998:
390 	.ifc	\op, cvau
391 	__dcache_op_workaround_clean_cache \op, \kaddr
392 	.else
393 	.ifc	\op, cvac
394 	__dcache_op_workaround_clean_cache \op, \kaddr
395 	.else
396 	.ifc	\op, cvap
397 	sys	3, c7, c12, 1, \kaddr	// dc cvap
398 	.else
399 	.ifc	\op, cvadp
400 	sys	3, c7, c13, 1, \kaddr	// dc cvadp
401 	.else
402 	dc	\op, \kaddr
403 	.endif
404 	.endif
405 	.endif
406 	.endif
407 	add	\kaddr, \kaddr, \tmp1
408 	cmp	\kaddr, \size
409 	b.lo	9998b
410 	dsb	\domain
411 	.endm
412 
413 /*
414  * Macro to perform an instruction cache maintenance for the interval
415  * [start, end)
416  *
417  * 	start, end:	virtual addresses describing the region
418  *	label:		A label to branch to on user fault.
419  * 	Corrupts:	tmp1, tmp2
420  */
421 	.macro invalidate_icache_by_line start, end, tmp1, tmp2, label
422 	icache_line_size \tmp1, \tmp2
423 	sub	\tmp2, \tmp1, #1
424 	bic	\tmp2, \start, \tmp2
425 9997:
426 USER(\label, ic	ivau, \tmp2)			// invalidate I line PoU
427 	add	\tmp2, \tmp2, \tmp1
428 	cmp	\tmp2, \end
429 	b.lo	9997b
430 	dsb	ish
431 	isb
432 	.endm
433 
434 /*
435  * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
436  */
437 	.macro	reset_pmuserenr_el0, tmpreg
438 	mrs	\tmpreg, id_aa64dfr0_el1
439 	sbfx	\tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4
440 	cmp	\tmpreg, #1			// Skip if no PMU present
441 	b.lt	9000f
442 	msr	pmuserenr_el0, xzr		// Disable PMU access from EL0
443 9000:
444 	.endm
445 
446 /*
447  * copy_page - copy src to dest using temp registers t1-t8
448  */
449 	.macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
450 9998:	ldp	\t1, \t2, [\src]
451 	ldp	\t3, \t4, [\src, #16]
452 	ldp	\t5, \t6, [\src, #32]
453 	ldp	\t7, \t8, [\src, #48]
454 	add	\src, \src, #64
455 	stnp	\t1, \t2, [\dest]
456 	stnp	\t3, \t4, [\dest, #16]
457 	stnp	\t5, \t6, [\dest, #32]
458 	stnp	\t7, \t8, [\dest, #48]
459 	add	\dest, \dest, #64
460 	tst	\src, #(PAGE_SIZE - 1)
461 	b.ne	9998b
462 	.endm
463 
464 /*
465  * Annotate a function as position independent, i.e., safe to be called before
466  * the kernel virtual mapping is activated.
467  */
468 #define ENDPIPROC(x)			\
469 	.globl	__pi_##x;		\
470 	.type 	__pi_##x, %function;	\
471 	.set	__pi_##x, x;		\
472 	.size	__pi_##x, . - x;	\
473 	ENDPROC(x)
474 
475 /*
476  * Annotate a function as being unsuitable for kprobes.
477  */
478 #ifdef CONFIG_KPROBES
479 #define NOKPROBE(x)				\
480 	.pushsection "_kprobe_blacklist", "aw";	\
481 	.quad	x;				\
482 	.popsection;
483 #else
484 #define NOKPROBE(x)
485 #endif
486 
487 #ifdef CONFIG_KASAN
488 #define EXPORT_SYMBOL_NOKASAN(name)
489 #else
490 #define EXPORT_SYMBOL_NOKASAN(name)	EXPORT_SYMBOL(name)
491 #endif
492 
493 	/*
494 	 * Emit a 64-bit absolute little endian symbol reference in a way that
495 	 * ensures that it will be resolved at build time, even when building a
496 	 * PIE binary. This requires cooperation from the linker script, which
497 	 * must emit the lo32/hi32 halves individually.
498 	 */
499 	.macro	le64sym, sym
500 	.long	\sym\()_lo32
501 	.long	\sym\()_hi32
502 	.endm
503 
504 	/*
505 	 * mov_q - move an immediate constant into a 64-bit register using
506 	 *         between 2 and 4 movz/movk instructions (depending on the
507 	 *         magnitude and sign of the operand)
508 	 */
509 	.macro	mov_q, reg, val
510 	.if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
511 	movz	\reg, :abs_g1_s:\val
512 	.else
513 	.if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
514 	movz	\reg, :abs_g2_s:\val
515 	.else
516 	movz	\reg, :abs_g3:\val
517 	movk	\reg, :abs_g2_nc:\val
518 	.endif
519 	movk	\reg, :abs_g1_nc:\val
520 	.endif
521 	movk	\reg, :abs_g0_nc:\val
522 	.endm
523 
524 /*
525  * Return the current task_struct.
526  */
527 	.macro	get_current_task, rd
528 	mrs	\rd, sp_el0
529 	.endm
530 
531 /*
532  * Offset ttbr1 to allow for 48-bit kernel VAs set with 52-bit PTRS_PER_PGD.
533  * orr is used as it can cover the immediate value (and is idempotent).
534  * In future this may be nop'ed out when dealing with 52-bit kernel VAs.
535  * 	ttbr: Value of ttbr to set, modified.
536  */
537 	.macro	offset_ttbr1, ttbr
538 #ifdef CONFIG_ARM64_USER_VA_BITS_52
539 	orr	\ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
540 #endif
541 	.endm
542 
543 /*
544  * Perform the reverse of offset_ttbr1.
545  * bic is used as it can cover the immediate value and, in future, won't need
546  * to be nop'ed out when dealing with 52-bit kernel VAs.
547  */
548 	.macro	restore_ttbr1, ttbr
549 #ifdef CONFIG_ARM64_USER_VA_BITS_52
550 	bic	\ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
551 #endif
552 	.endm
553 
554 /*
555  * Arrange a physical address in a TTBR register, taking care of 52-bit
556  * addresses.
557  *
558  * 	phys:	physical address, preserved
559  * 	ttbr:	returns the TTBR value
560  */
561 	.macro	phys_to_ttbr, ttbr, phys
562 #ifdef CONFIG_ARM64_PA_BITS_52
563 	orr	\ttbr, \phys, \phys, lsr #46
564 	and	\ttbr, \ttbr, #TTBR_BADDR_MASK_52
565 #else
566 	mov	\ttbr, \phys
567 #endif
568 	.endm
569 
570 	.macro	phys_to_pte, pte, phys
571 #ifdef CONFIG_ARM64_PA_BITS_52
572 	/*
573 	 * We assume \phys is 64K aligned and this is guaranteed by only
574 	 * supporting this configuration with 64K pages.
575 	 */
576 	orr	\pte, \phys, \phys, lsr #36
577 	and	\pte, \pte, #PTE_ADDR_MASK
578 #else
579 	mov	\pte, \phys
580 #endif
581 	.endm
582 
583 	.macro	pte_to_phys, phys, pte
584 #ifdef CONFIG_ARM64_PA_BITS_52
585 	ubfiz	\phys, \pte, #(48 - 16 - 12), #16
586 	bfxil	\phys, \pte, #16, #32
587 	lsl	\phys, \phys, #16
588 #else
589 	and	\phys, \pte, #PTE_ADDR_MASK
590 #endif
591 	.endm
592 
593 /*
594  * tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU.
595  */
596 	.macro	tcr_clear_errata_bits, tcr, tmp1, tmp2
597 #ifdef CONFIG_FUJITSU_ERRATUM_010001
598 	mrs	\tmp1, midr_el1
599 
600 	mov_q	\tmp2, MIDR_FUJITSU_ERRATUM_010001_MASK
601 	and	\tmp1, \tmp1, \tmp2
602 	mov_q	\tmp2, MIDR_FUJITSU_ERRATUM_010001
603 	cmp	\tmp1, \tmp2
604 	b.ne	10f
605 
606 	mov_q	\tmp2, TCR_CLEAR_FUJITSU_ERRATUM_010001
607 	bic	\tcr, \tcr, \tmp2
608 10:
609 #endif /* CONFIG_FUJITSU_ERRATUM_010001 */
610 	.endm
611 
612 /**
613  * Errata workaround prior to disable MMU. Insert an ISB immediately prior
614  * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
615  */
616 	.macro pre_disable_mmu_workaround
617 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
618 	isb
619 #endif
620 	.endm
621 
622 	/*
623 	 * frame_push - Push @regcount callee saved registers to the stack,
624 	 *              starting at x19, as well as x29/x30, and set x29 to
625 	 *              the new value of sp. Add @extra bytes of stack space
626 	 *              for locals.
627 	 */
628 	.macro		frame_push, regcount:req, extra
629 	__frame		st, \regcount, \extra
630 	.endm
631 
632 	/*
633 	 * frame_pop  - Pop the callee saved registers from the stack that were
634 	 *              pushed in the most recent call to frame_push, as well
635 	 *              as x29/x30 and any extra stack space that may have been
636 	 *              allocated.
637 	 */
638 	.macro		frame_pop
639 	__frame		ld
640 	.endm
641 
642 	.macro		__frame_regs, reg1, reg2, op, num
643 	.if		.Lframe_regcount == \num
644 	\op\()r		\reg1, [sp, #(\num + 1) * 8]
645 	.elseif		.Lframe_regcount > \num
646 	\op\()p		\reg1, \reg2, [sp, #(\num + 1) * 8]
647 	.endif
648 	.endm
649 
650 	.macro		__frame, op, regcount, extra=0
651 	.ifc		\op, st
652 	.if		(\regcount) < 0 || (\regcount) > 10
653 	.error		"regcount should be in the range [0 ... 10]"
654 	.endif
655 	.if		((\extra) % 16) != 0
656 	.error		"extra should be a multiple of 16 bytes"
657 	.endif
658 	.ifdef		.Lframe_regcount
659 	.if		.Lframe_regcount != -1
660 	.error		"frame_push/frame_pop may not be nested"
661 	.endif
662 	.endif
663 	.set		.Lframe_regcount, \regcount
664 	.set		.Lframe_extra, \extra
665 	.set		.Lframe_local_offset, ((\regcount + 3) / 2) * 16
666 	stp		x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]!
667 	mov		x29, sp
668 	.endif
669 
670 	__frame_regs	x19, x20, \op, 1
671 	__frame_regs	x21, x22, \op, 3
672 	__frame_regs	x23, x24, \op, 5
673 	__frame_regs	x25, x26, \op, 7
674 	__frame_regs	x27, x28, \op, 9
675 
676 	.ifc		\op, ld
677 	.if		.Lframe_regcount == -1
678 	.error		"frame_push/frame_pop may not be nested"
679 	.endif
680 	ldp		x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra
681 	.set		.Lframe_regcount, -1
682 	.endif
683 	.endm
684 
685 /*
686  * Check whether to yield to another runnable task from kernel mode NEON code
687  * (which runs with preemption disabled).
688  *
689  * if_will_cond_yield_neon
690  *        // pre-yield patchup code
691  * do_cond_yield_neon
692  *        // post-yield patchup code
693  * endif_yield_neon    <label>
694  *
695  * where <label> is optional, and marks the point where execution will resume
696  * after a yield has been performed. If omitted, execution resumes right after
697  * the endif_yield_neon invocation. Note that the entire sequence, including
698  * the provided patchup code, will be omitted from the image if CONFIG_PREEMPT
699  * is not defined.
700  *
701  * As a convenience, in the case where no patchup code is required, the above
702  * sequence may be abbreviated to
703  *
704  * cond_yield_neon <label>
705  *
706  * Note that the patchup code does not support assembler directives that change
707  * the output section, any use of such directives is undefined.
708  *
709  * The yield itself consists of the following:
710  * - Check whether the preempt count is exactly 1 and a reschedule is also
711  *   needed. If so, calling of preempt_enable() in kernel_neon_end() will
712  *   trigger a reschedule. If it is not the case, yielding is pointless.
713  * - Disable and re-enable kernel mode NEON, and branch to the yield fixup
714  *   code.
715  *
716  * This macro sequence may clobber all CPU state that is not guaranteed by the
717  * AAPCS to be preserved across an ordinary function call.
718  */
719 
720 	.macro		cond_yield_neon, lbl
721 	if_will_cond_yield_neon
722 	do_cond_yield_neon
723 	endif_yield_neon	\lbl
724 	.endm
725 
726 	.macro		if_will_cond_yield_neon
727 #ifdef CONFIG_PREEMPT
728 	get_current_task	x0
729 	ldr		x0, [x0, #TSK_TI_PREEMPT]
730 	sub		x0, x0, #PREEMPT_DISABLE_OFFSET
731 	cbz		x0, .Lyield_\@
732 	/* fall through to endif_yield_neon */
733 	.subsection	1
734 .Lyield_\@ :
735 #else
736 	.section	".discard.cond_yield_neon", "ax"
737 #endif
738 	.endm
739 
740 	.macro		do_cond_yield_neon
741 	bl		kernel_neon_end
742 	bl		kernel_neon_begin
743 	.endm
744 
745 	.macro		endif_yield_neon, lbl
746 	.ifnb		\lbl
747 	b		\lbl
748 	.else
749 	b		.Lyield_out_\@
750 	.endif
751 	.previous
752 .Lyield_out_\@ :
753 	.endm
754 
755 #endif	/* __ASM_ASSEMBLER_H */
756