xref: /openbmc/linux/arch/arm64/include/asm/assembler.h (revision 88aa7ae6)
1 /*
2  * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
3  *
4  * Copyright (C) 1996-2000 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #ifndef __ASSEMBLY__
20 #error "Only include this from assembly code"
21 #endif
22 
23 #ifndef __ASM_ASSEMBLER_H
24 #define __ASM_ASSEMBLER_H
25 
26 #include <asm-generic/export.h>
27 
28 #include <asm/asm-offsets.h>
29 #include <asm/cpufeature.h>
30 #include <asm/cputype.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/page.h>
33 #include <asm/pgtable-hwdef.h>
34 #include <asm/ptrace.h>
35 #include <asm/thread_info.h>
36 
37 	.macro save_and_disable_daif, flags
38 	mrs	\flags, daif
39 	msr	daifset, #0xf
40 	.endm
41 
42 	.macro disable_daif
43 	msr	daifset, #0xf
44 	.endm
45 
46 	.macro enable_daif
47 	msr	daifclr, #0xf
48 	.endm
49 
50 	.macro	restore_daif, flags:req
51 	msr	daif, \flags
52 	.endm
53 
54 	/* Only on aarch64 pstate, PSR_D_BIT is different for aarch32 */
55 	.macro	inherit_daif, pstate:req, tmp:req
56 	and	\tmp, \pstate, #(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
57 	msr	daif, \tmp
58 	.endm
59 
60 	/* IRQ is the lowest priority flag, unconditionally unmask the rest. */
61 	.macro enable_da_f
62 	msr	daifclr, #(8 | 4 | 1)
63 	.endm
64 
65 /*
66  * Save/restore interrupts.
67  */
68 	.macro	save_and_disable_irq, flags
69 	mrs	\flags, daif
70 	msr	daifset, #2
71 	.endm
72 
73 	.macro	restore_irq, flags
74 	msr	daif, \flags
75 	.endm
76 
77 	.macro	enable_dbg
78 	msr	daifclr, #8
79 	.endm
80 
81 	.macro	disable_step_tsk, flgs, tmp
82 	tbz	\flgs, #TIF_SINGLESTEP, 9990f
83 	mrs	\tmp, mdscr_el1
84 	bic	\tmp, \tmp, #DBG_MDSCR_SS
85 	msr	mdscr_el1, \tmp
86 	isb	// Synchronise with enable_dbg
87 9990:
88 	.endm
89 
90 	/* call with daif masked */
91 	.macro	enable_step_tsk, flgs, tmp
92 	tbz	\flgs, #TIF_SINGLESTEP, 9990f
93 	mrs	\tmp, mdscr_el1
94 	orr	\tmp, \tmp, #DBG_MDSCR_SS
95 	msr	mdscr_el1, \tmp
96 9990:
97 	.endm
98 
99 /*
100  * SMP data memory barrier
101  */
102 	.macro	smp_dmb, opt
103 	dmb	\opt
104 	.endm
105 
106 /*
107  * RAS Error Synchronization barrier
108  */
109 	.macro  esb
110 	hint    #16
111 	.endm
112 
113 /*
114  * Value prediction barrier
115  */
116 	.macro	csdb
117 	hint	#20
118 	.endm
119 
120 /*
121  * Speculation barrier
122  */
123 	.macro	sb
124 alternative_if_not ARM64_HAS_SB
125 	dsb	nsh
126 	isb
127 alternative_else
128 	SB_BARRIER_INSN
129 	nop
130 alternative_endif
131 	.endm
132 
133 /*
134  * Sanitise a 64-bit bounded index wrt speculation, returning zero if out
135  * of bounds.
136  */
137 	.macro	mask_nospec64, idx, limit, tmp
138 	sub	\tmp, \idx, \limit
139 	bic	\tmp, \tmp, \idx
140 	and	\idx, \idx, \tmp, asr #63
141 	csdb
142 	.endm
143 
144 /*
145  * NOP sequence
146  */
147 	.macro	nops, num
148 	.rept	\num
149 	nop
150 	.endr
151 	.endm
152 
153 /*
154  * Emit an entry into the exception table
155  */
156 	.macro		_asm_extable, from, to
157 	.pushsection	__ex_table, "a"
158 	.align		3
159 	.long		(\from - .), (\to - .)
160 	.popsection
161 	.endm
162 
163 #define USER(l, x...)				\
164 9999:	x;					\
165 	_asm_extable	9999b, l
166 
167 /*
168  * Register aliases.
169  */
170 lr	.req	x30		// link register
171 
172 /*
173  * Vector entry
174  */
175 	 .macro	ventry	label
176 	.align	7
177 	b	\label
178 	.endm
179 
180 /*
181  * Select code when configured for BE.
182  */
183 #ifdef CONFIG_CPU_BIG_ENDIAN
184 #define CPU_BE(code...) code
185 #else
186 #define CPU_BE(code...)
187 #endif
188 
189 /*
190  * Select code when configured for LE.
191  */
192 #ifdef CONFIG_CPU_BIG_ENDIAN
193 #define CPU_LE(code...)
194 #else
195 #define CPU_LE(code...) code
196 #endif
197 
198 /*
199  * Define a macro that constructs a 64-bit value by concatenating two
200  * 32-bit registers. Note that on big endian systems the order of the
201  * registers is swapped.
202  */
203 #ifndef CONFIG_CPU_BIG_ENDIAN
204 	.macro	regs_to_64, rd, lbits, hbits
205 #else
206 	.macro	regs_to_64, rd, hbits, lbits
207 #endif
208 	orr	\rd, \lbits, \hbits, lsl #32
209 	.endm
210 
211 /*
212  * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
213  * <symbol> is within the range +/- 4 GB of the PC.
214  */
215 	/*
216 	 * @dst: destination register (64 bit wide)
217 	 * @sym: name of the symbol
218 	 */
219 	.macro	adr_l, dst, sym
220 	adrp	\dst, \sym
221 	add	\dst, \dst, :lo12:\sym
222 	.endm
223 
224 	/*
225 	 * @dst: destination register (32 or 64 bit wide)
226 	 * @sym: name of the symbol
227 	 * @tmp: optional 64-bit scratch register to be used if <dst> is a
228 	 *       32-bit wide register, in which case it cannot be used to hold
229 	 *       the address
230 	 */
231 	.macro	ldr_l, dst, sym, tmp=
232 	.ifb	\tmp
233 	adrp	\dst, \sym
234 	ldr	\dst, [\dst, :lo12:\sym]
235 	.else
236 	adrp	\tmp, \sym
237 	ldr	\dst, [\tmp, :lo12:\sym]
238 	.endif
239 	.endm
240 
241 	/*
242 	 * @src: source register (32 or 64 bit wide)
243 	 * @sym: name of the symbol
244 	 * @tmp: mandatory 64-bit scratch register to calculate the address
245 	 *       while <src> needs to be preserved.
246 	 */
247 	.macro	str_l, src, sym, tmp
248 	adrp	\tmp, \sym
249 	str	\src, [\tmp, :lo12:\sym]
250 	.endm
251 
252 	/*
253 	 * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
254 	 * @sym: The name of the per-cpu variable
255 	 * @tmp: scratch register
256 	 */
257 	.macro adr_this_cpu, dst, sym, tmp
258 	adrp	\tmp, \sym
259 	add	\dst, \tmp, #:lo12:\sym
260 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
261 	mrs	\tmp, tpidr_el1
262 alternative_else
263 	mrs	\tmp, tpidr_el2
264 alternative_endif
265 	add	\dst, \dst, \tmp
266 	.endm
267 
268 	/*
269 	 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
270 	 * @sym: The name of the per-cpu variable
271 	 * @tmp: scratch register
272 	 */
273 	.macro ldr_this_cpu dst, sym, tmp
274 	adr_l	\dst, \sym
275 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
276 	mrs	\tmp, tpidr_el1
277 alternative_else
278 	mrs	\tmp, tpidr_el2
279 alternative_endif
280 	ldr	\dst, [\dst, \tmp]
281 	.endm
282 
283 /*
284  * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
285  */
286 	.macro	vma_vm_mm, rd, rn
287 	ldr	\rd, [\rn, #VMA_VM_MM]
288 	.endm
289 
290 /*
291  * mmid - get context id from mm pointer (mm->context.id)
292  */
293 	.macro	mmid, rd, rn
294 	ldr	\rd, [\rn, #MM_CONTEXT_ID]
295 	.endm
296 /*
297  * read_ctr - read CTR_EL0. If the system has mismatched register fields,
298  * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
299  */
300 	.macro	read_ctr, reg
301 alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
302 	mrs	\reg, ctr_el0			// read CTR
303 	nop
304 alternative_else
305 	ldr_l	\reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
306 alternative_endif
307 	.endm
308 
309 
310 /*
311  * raw_dcache_line_size - get the minimum D-cache line size on this CPU
312  * from the CTR register.
313  */
314 	.macro	raw_dcache_line_size, reg, tmp
315 	mrs	\tmp, ctr_el0			// read CTR
316 	ubfm	\tmp, \tmp, #16, #19		// cache line size encoding
317 	mov	\reg, #4			// bytes per word
318 	lsl	\reg, \reg, \tmp		// actual cache line size
319 	.endm
320 
321 /*
322  * dcache_line_size - get the safe D-cache line size across all CPUs
323  */
324 	.macro	dcache_line_size, reg, tmp
325 	read_ctr	\tmp
326 	ubfm		\tmp, \tmp, #16, #19	// cache line size encoding
327 	mov		\reg, #4		// bytes per word
328 	lsl		\reg, \reg, \tmp	// actual cache line size
329 	.endm
330 
331 /*
332  * raw_icache_line_size - get the minimum I-cache line size on this CPU
333  * from the CTR register.
334  */
335 	.macro	raw_icache_line_size, reg, tmp
336 	mrs	\tmp, ctr_el0			// read CTR
337 	and	\tmp, \tmp, #0xf		// cache line size encoding
338 	mov	\reg, #4			// bytes per word
339 	lsl	\reg, \reg, \tmp		// actual cache line size
340 	.endm
341 
342 /*
343  * icache_line_size - get the safe I-cache line size across all CPUs
344  */
345 	.macro	icache_line_size, reg, tmp
346 	read_ctr	\tmp
347 	and		\tmp, \tmp, #0xf	// cache line size encoding
348 	mov		\reg, #4		// bytes per word
349 	lsl		\reg, \reg, \tmp	// actual cache line size
350 	.endm
351 
352 /*
353  * tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map
354  */
355 	.macro	tcr_set_t0sz, valreg, t0sz
356 	bfi	\valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
357 	.endm
358 
359 /*
360  * tcr_compute_pa_size - set TCR.(I)PS to the highest supported
361  * ID_AA64MMFR0_EL1.PARange value
362  *
363  *	tcr:		register with the TCR_ELx value to be updated
364  *	pos:		IPS or PS bitfield position
365  *	tmp{0,1}:	temporary registers
366  */
367 	.macro	tcr_compute_pa_size, tcr, pos, tmp0, tmp1
368 	mrs	\tmp0, ID_AA64MMFR0_EL1
369 	// Narrow PARange to fit the PS field in TCR_ELx
370 	ubfx	\tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3
371 	mov	\tmp1, #ID_AA64MMFR0_PARANGE_MAX
372 	cmp	\tmp0, \tmp1
373 	csel	\tmp0, \tmp1, \tmp0, hi
374 	bfi	\tcr, \tmp0, \pos, #3
375 	.endm
376 
377 /*
378  * Macro to perform a data cache maintenance for the interval
379  * [kaddr, kaddr + size)
380  *
381  * 	op:		operation passed to dc instruction
382  * 	domain:		domain used in dsb instruciton
383  * 	kaddr:		starting virtual address of the region
384  * 	size:		size of the region
385  * 	Corrupts:	kaddr, size, tmp1, tmp2
386  */
387 	.macro __dcache_op_workaround_clean_cache, op, kaddr
388 alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
389 	dc	\op, \kaddr
390 alternative_else
391 	dc	civac, \kaddr
392 alternative_endif
393 	.endm
394 
395 	.macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
396 	dcache_line_size \tmp1, \tmp2
397 	add	\size, \kaddr, \size
398 	sub	\tmp2, \tmp1, #1
399 	bic	\kaddr, \kaddr, \tmp2
400 9998:
401 	.ifc	\op, cvau
402 	__dcache_op_workaround_clean_cache \op, \kaddr
403 	.else
404 	.ifc	\op, cvac
405 	__dcache_op_workaround_clean_cache \op, \kaddr
406 	.else
407 	.ifc	\op, cvap
408 	sys	3, c7, c12, 1, \kaddr	// dc cvap
409 	.else
410 	dc	\op, \kaddr
411 	.endif
412 	.endif
413 	.endif
414 	add	\kaddr, \kaddr, \tmp1
415 	cmp	\kaddr, \size
416 	b.lo	9998b
417 	dsb	\domain
418 	.endm
419 
420 /*
421  * Macro to perform an instruction cache maintenance for the interval
422  * [start, end)
423  *
424  * 	start, end:	virtual addresses describing the region
425  *	label:		A label to branch to on user fault.
426  * 	Corrupts:	tmp1, tmp2
427  */
428 	.macro invalidate_icache_by_line start, end, tmp1, tmp2, label
429 	icache_line_size \tmp1, \tmp2
430 	sub	\tmp2, \tmp1, #1
431 	bic	\tmp2, \start, \tmp2
432 9997:
433 USER(\label, ic	ivau, \tmp2)			// invalidate I line PoU
434 	add	\tmp2, \tmp2, \tmp1
435 	cmp	\tmp2, \end
436 	b.lo	9997b
437 	dsb	ish
438 	isb
439 	.endm
440 
441 /*
442  * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
443  */
444 	.macro	reset_pmuserenr_el0, tmpreg
445 	mrs	\tmpreg, id_aa64dfr0_el1	// Check ID_AA64DFR0_EL1 PMUVer
446 	sbfx	\tmpreg, \tmpreg, #8, #4
447 	cmp	\tmpreg, #1			// Skip if no PMU present
448 	b.lt	9000f
449 	msr	pmuserenr_el0, xzr		// Disable PMU access from EL0
450 9000:
451 	.endm
452 
453 /*
454  * copy_page - copy src to dest using temp registers t1-t8
455  */
456 	.macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
457 9998:	ldp	\t1, \t2, [\src]
458 	ldp	\t3, \t4, [\src, #16]
459 	ldp	\t5, \t6, [\src, #32]
460 	ldp	\t7, \t8, [\src, #48]
461 	add	\src, \src, #64
462 	stnp	\t1, \t2, [\dest]
463 	stnp	\t3, \t4, [\dest, #16]
464 	stnp	\t5, \t6, [\dest, #32]
465 	stnp	\t7, \t8, [\dest, #48]
466 	add	\dest, \dest, #64
467 	tst	\src, #(PAGE_SIZE - 1)
468 	b.ne	9998b
469 	.endm
470 
471 /*
472  * Annotate a function as position independent, i.e., safe to be called before
473  * the kernel virtual mapping is activated.
474  */
475 #define ENDPIPROC(x)			\
476 	.globl	__pi_##x;		\
477 	.type 	__pi_##x, %function;	\
478 	.set	__pi_##x, x;		\
479 	.size	__pi_##x, . - x;	\
480 	ENDPROC(x)
481 
482 /*
483  * Annotate a function as being unsuitable for kprobes.
484  */
485 #ifdef CONFIG_KPROBES
486 #define NOKPROBE(x)				\
487 	.pushsection "_kprobe_blacklist", "aw";	\
488 	.quad	x;				\
489 	.popsection;
490 #else
491 #define NOKPROBE(x)
492 #endif
493 
494 #ifdef CONFIG_KASAN
495 #define EXPORT_SYMBOL_NOKASAN(name)
496 #else
497 #define EXPORT_SYMBOL_NOKASAN(name)	EXPORT_SYMBOL(name)
498 #endif
499 
500 	/*
501 	 * Emit a 64-bit absolute little endian symbol reference in a way that
502 	 * ensures that it will be resolved at build time, even when building a
503 	 * PIE binary. This requires cooperation from the linker script, which
504 	 * must emit the lo32/hi32 halves individually.
505 	 */
506 	.macro	le64sym, sym
507 	.long	\sym\()_lo32
508 	.long	\sym\()_hi32
509 	.endm
510 
511 	/*
512 	 * mov_q - move an immediate constant into a 64-bit register using
513 	 *         between 2 and 4 movz/movk instructions (depending on the
514 	 *         magnitude and sign of the operand)
515 	 */
516 	.macro	mov_q, reg, val
517 	.if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
518 	movz	\reg, :abs_g1_s:\val
519 	.else
520 	.if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
521 	movz	\reg, :abs_g2_s:\val
522 	.else
523 	movz	\reg, :abs_g3:\val
524 	movk	\reg, :abs_g2_nc:\val
525 	.endif
526 	movk	\reg, :abs_g1_nc:\val
527 	.endif
528 	movk	\reg, :abs_g0_nc:\val
529 	.endm
530 
531 /*
532  * Return the current task_struct.
533  */
534 	.macro	get_current_task, rd
535 	mrs	\rd, sp_el0
536 	.endm
537 
538 /*
539  * Offset ttbr1 to allow for 48-bit kernel VAs set with 52-bit PTRS_PER_PGD.
540  * orr is used as it can cover the immediate value (and is idempotent).
541  * In future this may be nop'ed out when dealing with 52-bit kernel VAs.
542  * 	ttbr: Value of ttbr to set, modified.
543  */
544 	.macro	offset_ttbr1, ttbr
545 #ifdef CONFIG_ARM64_USER_VA_BITS_52
546 	orr	\ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
547 #endif
548 	.endm
549 
550 /*
551  * Perform the reverse of offset_ttbr1.
552  * bic is used as it can cover the immediate value and, in future, won't need
553  * to be nop'ed out when dealing with 52-bit kernel VAs.
554  */
555 	.macro	restore_ttbr1, ttbr
556 #ifdef CONFIG_ARM64_USER_VA_BITS_52
557 	bic	\ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
558 #endif
559 	.endm
560 
561 /*
562  * Arrange a physical address in a TTBR register, taking care of 52-bit
563  * addresses.
564  *
565  * 	phys:	physical address, preserved
566  * 	ttbr:	returns the TTBR value
567  */
568 	.macro	phys_to_ttbr, ttbr, phys
569 #ifdef CONFIG_ARM64_PA_BITS_52
570 	orr	\ttbr, \phys, \phys, lsr #46
571 	and	\ttbr, \ttbr, #TTBR_BADDR_MASK_52
572 #else
573 	mov	\ttbr, \phys
574 #endif
575 	.endm
576 
577 	.macro	phys_to_pte, pte, phys
578 #ifdef CONFIG_ARM64_PA_BITS_52
579 	/*
580 	 * We assume \phys is 64K aligned and this is guaranteed by only
581 	 * supporting this configuration with 64K pages.
582 	 */
583 	orr	\pte, \phys, \phys, lsr #36
584 	and	\pte, \pte, #PTE_ADDR_MASK
585 #else
586 	mov	\pte, \phys
587 #endif
588 	.endm
589 
590 	.macro	pte_to_phys, phys, pte
591 #ifdef CONFIG_ARM64_PA_BITS_52
592 	ubfiz	\phys, \pte, #(48 - 16 - 12), #16
593 	bfxil	\phys, \pte, #16, #32
594 	lsl	\phys, \phys, #16
595 #else
596 	and	\phys, \pte, #PTE_ADDR_MASK
597 #endif
598 	.endm
599 
600 /*
601  * tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU.
602  */
603 	.macro	tcr_clear_errata_bits, tcr, tmp1, tmp2
604 #ifdef CONFIG_FUJITSU_ERRATUM_010001
605 	mrs	\tmp1, midr_el1
606 
607 	mov_q	\tmp2, MIDR_FUJITSU_ERRATUM_010001_MASK
608 	and	\tmp1, \tmp1, \tmp2
609 	mov_q	\tmp2, MIDR_FUJITSU_ERRATUM_010001
610 	cmp	\tmp1, \tmp2
611 	b.ne	10f
612 
613 	mov_q	\tmp2, TCR_CLEAR_FUJITSU_ERRATUM_010001
614 	bic	\tcr, \tcr, \tmp2
615 10:
616 #endif /* CONFIG_FUJITSU_ERRATUM_010001 */
617 	.endm
618 
619 /**
620  * Errata workaround prior to disable MMU. Insert an ISB immediately prior
621  * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
622  */
623 	.macro pre_disable_mmu_workaround
624 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
625 	isb
626 #endif
627 	.endm
628 
629 	/*
630 	 * frame_push - Push @regcount callee saved registers to the stack,
631 	 *              starting at x19, as well as x29/x30, and set x29 to
632 	 *              the new value of sp. Add @extra bytes of stack space
633 	 *              for locals.
634 	 */
635 	.macro		frame_push, regcount:req, extra
636 	__frame		st, \regcount, \extra
637 	.endm
638 
639 	/*
640 	 * frame_pop  - Pop the callee saved registers from the stack that were
641 	 *              pushed in the most recent call to frame_push, as well
642 	 *              as x29/x30 and any extra stack space that may have been
643 	 *              allocated.
644 	 */
645 	.macro		frame_pop
646 	__frame		ld
647 	.endm
648 
649 	.macro		__frame_regs, reg1, reg2, op, num
650 	.if		.Lframe_regcount == \num
651 	\op\()r		\reg1, [sp, #(\num + 1) * 8]
652 	.elseif		.Lframe_regcount > \num
653 	\op\()p		\reg1, \reg2, [sp, #(\num + 1) * 8]
654 	.endif
655 	.endm
656 
657 	.macro		__frame, op, regcount, extra=0
658 	.ifc		\op, st
659 	.if		(\regcount) < 0 || (\regcount) > 10
660 	.error		"regcount should be in the range [0 ... 10]"
661 	.endif
662 	.if		((\extra) % 16) != 0
663 	.error		"extra should be a multiple of 16 bytes"
664 	.endif
665 	.ifdef		.Lframe_regcount
666 	.if		.Lframe_regcount != -1
667 	.error		"frame_push/frame_pop may not be nested"
668 	.endif
669 	.endif
670 	.set		.Lframe_regcount, \regcount
671 	.set		.Lframe_extra, \extra
672 	.set		.Lframe_local_offset, ((\regcount + 3) / 2) * 16
673 	stp		x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]!
674 	mov		x29, sp
675 	.endif
676 
677 	__frame_regs	x19, x20, \op, 1
678 	__frame_regs	x21, x22, \op, 3
679 	__frame_regs	x23, x24, \op, 5
680 	__frame_regs	x25, x26, \op, 7
681 	__frame_regs	x27, x28, \op, 9
682 
683 	.ifc		\op, ld
684 	.if		.Lframe_regcount == -1
685 	.error		"frame_push/frame_pop may not be nested"
686 	.endif
687 	ldp		x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra
688 	.set		.Lframe_regcount, -1
689 	.endif
690 	.endm
691 
692 /*
693  * Check whether to yield to another runnable task from kernel mode NEON code
694  * (which runs with preemption disabled).
695  *
696  * if_will_cond_yield_neon
697  *        // pre-yield patchup code
698  * do_cond_yield_neon
699  *        // post-yield patchup code
700  * endif_yield_neon    <label>
701  *
702  * where <label> is optional, and marks the point where execution will resume
703  * after a yield has been performed. If omitted, execution resumes right after
704  * the endif_yield_neon invocation. Note that the entire sequence, including
705  * the provided patchup code, will be omitted from the image if CONFIG_PREEMPT
706  * is not defined.
707  *
708  * As a convenience, in the case where no patchup code is required, the above
709  * sequence may be abbreviated to
710  *
711  * cond_yield_neon <label>
712  *
713  * Note that the patchup code does not support assembler directives that change
714  * the output section, any use of such directives is undefined.
715  *
716  * The yield itself consists of the following:
717  * - Check whether the preempt count is exactly 1, in which case disabling
718  *   preemption once will make the task preemptible. If this is not the case,
719  *   yielding is pointless.
720  * - Check whether TIF_NEED_RESCHED is set, and if so, disable and re-enable
721  *   kernel mode NEON (which will trigger a reschedule), and branch to the
722  *   yield fixup code.
723  *
724  * This macro sequence may clobber all CPU state that is not guaranteed by the
725  * AAPCS to be preserved across an ordinary function call.
726  */
727 
728 	.macro		cond_yield_neon, lbl
729 	if_will_cond_yield_neon
730 	do_cond_yield_neon
731 	endif_yield_neon	\lbl
732 	.endm
733 
734 	.macro		if_will_cond_yield_neon
735 #ifdef CONFIG_PREEMPT
736 	get_current_task	x0
737 	ldr		x0, [x0, #TSK_TI_PREEMPT]
738 	sub		x0, x0, #PREEMPT_DISABLE_OFFSET
739 	cbz		x0, .Lyield_\@
740 	/* fall through to endif_yield_neon */
741 	.subsection	1
742 .Lyield_\@ :
743 #else
744 	.section	".discard.cond_yield_neon", "ax"
745 #endif
746 	.endm
747 
748 	.macro		do_cond_yield_neon
749 	bl		kernel_neon_end
750 	bl		kernel_neon_begin
751 	.endm
752 
753 	.macro		endif_yield_neon, lbl
754 	.ifnb		\lbl
755 	b		\lbl
756 	.else
757 	b		.Lyield_out_\@
758 	.endif
759 	.previous
760 .Lyield_out_\@ :
761 	.endm
762 
763 #endif	/* __ASM_ASSEMBLER_H */
764