xref: /openbmc/linux/arch/arm64/include/asm/assembler.h (revision d4fd6347)
1 /*
2  * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
3  *
4  * Copyright (C) 1996-2000 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #ifndef __ASSEMBLY__
20 #error "Only include this from assembly code"
21 #endif
22 
23 #ifndef __ASM_ASSEMBLER_H
24 #define __ASM_ASSEMBLER_H
25 
26 #include <asm-generic/export.h>
27 
28 #include <asm/asm-offsets.h>
29 #include <asm/cpufeature.h>
30 #include <asm/cputype.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/page.h>
33 #include <asm/pgtable-hwdef.h>
34 #include <asm/ptrace.h>
35 #include <asm/thread_info.h>
36 
37 	.macro save_and_disable_daif, flags
38 	mrs	\flags, daif
39 	msr	daifset, #0xf
40 	.endm
41 
42 	.macro disable_daif
43 	msr	daifset, #0xf
44 	.endm
45 
46 	.macro enable_daif
47 	msr	daifclr, #0xf
48 	.endm
49 
50 	.macro	restore_daif, flags:req
51 	msr	daif, \flags
52 	.endm
53 
54 	/* Only on aarch64 pstate, PSR_D_BIT is different for aarch32 */
55 	.macro	inherit_daif, pstate:req, tmp:req
56 	and	\tmp, \pstate, #(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
57 	msr	daif, \tmp
58 	.endm
59 
60 	/* IRQ is the lowest priority flag, unconditionally unmask the rest. */
61 	.macro enable_da_f
62 	msr	daifclr, #(8 | 4 | 1)
63 	.endm
64 
65 /*
66  * Save/restore interrupts.
67  */
68 	.macro	save_and_disable_irq, flags
69 	mrs	\flags, daif
70 	msr	daifset, #2
71 	.endm
72 
73 	.macro	restore_irq, flags
74 	msr	daif, \flags
75 	.endm
76 
77 	.macro	enable_dbg
78 	msr	daifclr, #8
79 	.endm
80 
81 	.macro	disable_step_tsk, flgs, tmp
82 	tbz	\flgs, #TIF_SINGLESTEP, 9990f
83 	mrs	\tmp, mdscr_el1
84 	bic	\tmp, \tmp, #DBG_MDSCR_SS
85 	msr	mdscr_el1, \tmp
86 	isb	// Synchronise with enable_dbg
87 9990:
88 	.endm
89 
90 	/* call with daif masked */
91 	.macro	enable_step_tsk, flgs, tmp
92 	tbz	\flgs, #TIF_SINGLESTEP, 9990f
93 	mrs	\tmp, mdscr_el1
94 	orr	\tmp, \tmp, #DBG_MDSCR_SS
95 	msr	mdscr_el1, \tmp
96 9990:
97 	.endm
98 
99 /*
100  * SMP data memory barrier
101  */
102 	.macro	smp_dmb, opt
103 	dmb	\opt
104 	.endm
105 
106 /*
107  * RAS Error Synchronization barrier
108  */
109 	.macro  esb
110 	hint    #16
111 	.endm
112 
113 /*
114  * Value prediction barrier
115  */
116 	.macro	csdb
117 	hint	#20
118 	.endm
119 
120 /*
121  * Speculation barrier
122  */
123 	.macro	sb
124 alternative_if_not ARM64_HAS_SB
125 	dsb	nsh
126 	isb
127 alternative_else
128 	SB_BARRIER_INSN
129 	nop
130 alternative_endif
131 	.endm
132 
133 /*
134  * Sanitise a 64-bit bounded index wrt speculation, returning zero if out
135  * of bounds.
136  */
137 	.macro	mask_nospec64, idx, limit, tmp
138 	sub	\tmp, \idx, \limit
139 	bic	\tmp, \tmp, \idx
140 	and	\idx, \idx, \tmp, asr #63
141 	csdb
142 	.endm
143 
144 /*
145  * NOP sequence
146  */
147 	.macro	nops, num
148 	.rept	\num
149 	nop
150 	.endr
151 	.endm
152 
153 /*
154  * Emit an entry into the exception table
155  */
156 	.macro		_asm_extable, from, to
157 	.pushsection	__ex_table, "a"
158 	.align		3
159 	.long		(\from - .), (\to - .)
160 	.popsection
161 	.endm
162 
163 #define USER(l, x...)				\
164 9999:	x;					\
165 	_asm_extable	9999b, l
166 
167 /*
168  * Register aliases.
169  */
170 lr	.req	x30		// link register
171 
172 /*
173  * Vector entry
174  */
175 	 .macro	ventry	label
176 	.align	7
177 	b	\label
178 	.endm
179 
180 /*
181  * Select code when configured for BE.
182  */
183 #ifdef CONFIG_CPU_BIG_ENDIAN
184 #define CPU_BE(code...) code
185 #else
186 #define CPU_BE(code...)
187 #endif
188 
189 /*
190  * Select code when configured for LE.
191  */
192 #ifdef CONFIG_CPU_BIG_ENDIAN
193 #define CPU_LE(code...)
194 #else
195 #define CPU_LE(code...) code
196 #endif
197 
198 /*
199  * Define a macro that constructs a 64-bit value by concatenating two
200  * 32-bit registers. Note that on big endian systems the order of the
201  * registers is swapped.
202  */
203 #ifndef CONFIG_CPU_BIG_ENDIAN
204 	.macro	regs_to_64, rd, lbits, hbits
205 #else
206 	.macro	regs_to_64, rd, hbits, lbits
207 #endif
208 	orr	\rd, \lbits, \hbits, lsl #32
209 	.endm
210 
211 /*
212  * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
213  * <symbol> is within the range +/- 4 GB of the PC.
214  */
215 	/*
216 	 * @dst: destination register (64 bit wide)
217 	 * @sym: name of the symbol
218 	 */
219 	.macro	adr_l, dst, sym
220 	adrp	\dst, \sym
221 	add	\dst, \dst, :lo12:\sym
222 	.endm
223 
224 	/*
225 	 * @dst: destination register (32 or 64 bit wide)
226 	 * @sym: name of the symbol
227 	 * @tmp: optional 64-bit scratch register to be used if <dst> is a
228 	 *       32-bit wide register, in which case it cannot be used to hold
229 	 *       the address
230 	 */
231 	.macro	ldr_l, dst, sym, tmp=
232 	.ifb	\tmp
233 	adrp	\dst, \sym
234 	ldr	\dst, [\dst, :lo12:\sym]
235 	.else
236 	adrp	\tmp, \sym
237 	ldr	\dst, [\tmp, :lo12:\sym]
238 	.endif
239 	.endm
240 
241 	/*
242 	 * @src: source register (32 or 64 bit wide)
243 	 * @sym: name of the symbol
244 	 * @tmp: mandatory 64-bit scratch register to calculate the address
245 	 *       while <src> needs to be preserved.
246 	 */
247 	.macro	str_l, src, sym, tmp
248 	adrp	\tmp, \sym
249 	str	\src, [\tmp, :lo12:\sym]
250 	.endm
251 
252 	/*
253 	 * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
254 	 * @sym: The name of the per-cpu variable
255 	 * @tmp: scratch register
256 	 */
257 	.macro adr_this_cpu, dst, sym, tmp
258 	adrp	\tmp, \sym
259 	add	\dst, \tmp, #:lo12:\sym
260 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
261 	mrs	\tmp, tpidr_el1
262 alternative_else
263 	mrs	\tmp, tpidr_el2
264 alternative_endif
265 	add	\dst, \dst, \tmp
266 	.endm
267 
268 	/*
269 	 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
270 	 * @sym: The name of the per-cpu variable
271 	 * @tmp: scratch register
272 	 */
273 	.macro ldr_this_cpu dst, sym, tmp
274 	adr_l	\dst, \sym
275 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
276 	mrs	\tmp, tpidr_el1
277 alternative_else
278 	mrs	\tmp, tpidr_el2
279 alternative_endif
280 	ldr	\dst, [\dst, \tmp]
281 	.endm
282 
283 /*
284  * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
285  */
286 	.macro	vma_vm_mm, rd, rn
287 	ldr	\rd, [\rn, #VMA_VM_MM]
288 	.endm
289 
290 /*
291  * mmid - get context id from mm pointer (mm->context.id)
292  */
293 	.macro	mmid, rd, rn
294 	ldr	\rd, [\rn, #MM_CONTEXT_ID]
295 	.endm
296 /*
297  * read_ctr - read CTR_EL0. If the system has mismatched register fields,
298  * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
299  */
300 	.macro	read_ctr, reg
301 alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
302 	mrs	\reg, ctr_el0			// read CTR
303 	nop
304 alternative_else
305 	ldr_l	\reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
306 alternative_endif
307 	.endm
308 
309 
310 /*
311  * raw_dcache_line_size - get the minimum D-cache line size on this CPU
312  * from the CTR register.
313  */
314 	.macro	raw_dcache_line_size, reg, tmp
315 	mrs	\tmp, ctr_el0			// read CTR
316 	ubfm	\tmp, \tmp, #16, #19		// cache line size encoding
317 	mov	\reg, #4			// bytes per word
318 	lsl	\reg, \reg, \tmp		// actual cache line size
319 	.endm
320 
321 /*
322  * dcache_line_size - get the safe D-cache line size across all CPUs
323  */
324 	.macro	dcache_line_size, reg, tmp
325 	read_ctr	\tmp
326 	ubfm		\tmp, \tmp, #16, #19	// cache line size encoding
327 	mov		\reg, #4		// bytes per word
328 	lsl		\reg, \reg, \tmp	// actual cache line size
329 	.endm
330 
331 /*
332  * raw_icache_line_size - get the minimum I-cache line size on this CPU
333  * from the CTR register.
334  */
335 	.macro	raw_icache_line_size, reg, tmp
336 	mrs	\tmp, ctr_el0			// read CTR
337 	and	\tmp, \tmp, #0xf		// cache line size encoding
338 	mov	\reg, #4			// bytes per word
339 	lsl	\reg, \reg, \tmp		// actual cache line size
340 	.endm
341 
342 /*
343  * icache_line_size - get the safe I-cache line size across all CPUs
344  */
345 	.macro	icache_line_size, reg, tmp
346 	read_ctr	\tmp
347 	and		\tmp, \tmp, #0xf	// cache line size encoding
348 	mov		\reg, #4		// bytes per word
349 	lsl		\reg, \reg, \tmp	// actual cache line size
350 	.endm
351 
352 /*
353  * tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map
354  */
355 	.macro	tcr_set_t0sz, valreg, t0sz
356 	bfi	\valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
357 	.endm
358 
359 /*
360  * tcr_compute_pa_size - set TCR.(I)PS to the highest supported
361  * ID_AA64MMFR0_EL1.PARange value
362  *
363  *	tcr:		register with the TCR_ELx value to be updated
364  *	pos:		IPS or PS bitfield position
365  *	tmp{0,1}:	temporary registers
366  */
367 	.macro	tcr_compute_pa_size, tcr, pos, tmp0, tmp1
368 	mrs	\tmp0, ID_AA64MMFR0_EL1
369 	// Narrow PARange to fit the PS field in TCR_ELx
370 	ubfx	\tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3
371 	mov	\tmp1, #ID_AA64MMFR0_PARANGE_MAX
372 	cmp	\tmp0, \tmp1
373 	csel	\tmp0, \tmp1, \tmp0, hi
374 	bfi	\tcr, \tmp0, \pos, #3
375 	.endm
376 
377 /*
378  * Macro to perform a data cache maintenance for the interval
379  * [kaddr, kaddr + size)
380  *
381  * 	op:		operation passed to dc instruction
382  * 	domain:		domain used in dsb instruciton
383  * 	kaddr:		starting virtual address of the region
384  * 	size:		size of the region
385  * 	Corrupts:	kaddr, size, tmp1, tmp2
386  */
387 	.macro __dcache_op_workaround_clean_cache, op, kaddr
388 alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
389 	dc	\op, \kaddr
390 alternative_else
391 	dc	civac, \kaddr
392 alternative_endif
393 	.endm
394 
395 	.macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
396 	dcache_line_size \tmp1, \tmp2
397 	add	\size, \kaddr, \size
398 	sub	\tmp2, \tmp1, #1
399 	bic	\kaddr, \kaddr, \tmp2
400 9998:
401 	.ifc	\op, cvau
402 	__dcache_op_workaround_clean_cache \op, \kaddr
403 	.else
404 	.ifc	\op, cvac
405 	__dcache_op_workaround_clean_cache \op, \kaddr
406 	.else
407 	.ifc	\op, cvap
408 	sys	3, c7, c12, 1, \kaddr	// dc cvap
409 	.else
410 	.ifc	\op, cvadp
411 	sys	3, c7, c13, 1, \kaddr	// dc cvadp
412 	.else
413 	dc	\op, \kaddr
414 	.endif
415 	.endif
416 	.endif
417 	.endif
418 	add	\kaddr, \kaddr, \tmp1
419 	cmp	\kaddr, \size
420 	b.lo	9998b
421 	dsb	\domain
422 	.endm
423 
424 /*
425  * Macro to perform an instruction cache maintenance for the interval
426  * [start, end)
427  *
428  * 	start, end:	virtual addresses describing the region
429  *	label:		A label to branch to on user fault.
430  * 	Corrupts:	tmp1, tmp2
431  */
432 	.macro invalidate_icache_by_line start, end, tmp1, tmp2, label
433 	icache_line_size \tmp1, \tmp2
434 	sub	\tmp2, \tmp1, #1
435 	bic	\tmp2, \start, \tmp2
436 9997:
437 USER(\label, ic	ivau, \tmp2)			// invalidate I line PoU
438 	add	\tmp2, \tmp2, \tmp1
439 	cmp	\tmp2, \end
440 	b.lo	9997b
441 	dsb	ish
442 	isb
443 	.endm
444 
445 /*
446  * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
447  */
448 	.macro	reset_pmuserenr_el0, tmpreg
449 	mrs	\tmpreg, id_aa64dfr0_el1
450 	sbfx	\tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4
451 	cmp	\tmpreg, #1			// Skip if no PMU present
452 	b.lt	9000f
453 	msr	pmuserenr_el0, xzr		// Disable PMU access from EL0
454 9000:
455 	.endm
456 
457 /*
458  * copy_page - copy src to dest using temp registers t1-t8
459  */
460 	.macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
461 9998:	ldp	\t1, \t2, [\src]
462 	ldp	\t3, \t4, [\src, #16]
463 	ldp	\t5, \t6, [\src, #32]
464 	ldp	\t7, \t8, [\src, #48]
465 	add	\src, \src, #64
466 	stnp	\t1, \t2, [\dest]
467 	stnp	\t3, \t4, [\dest, #16]
468 	stnp	\t5, \t6, [\dest, #32]
469 	stnp	\t7, \t8, [\dest, #48]
470 	add	\dest, \dest, #64
471 	tst	\src, #(PAGE_SIZE - 1)
472 	b.ne	9998b
473 	.endm
474 
475 /*
476  * Annotate a function as position independent, i.e., safe to be called before
477  * the kernel virtual mapping is activated.
478  */
479 #define ENDPIPROC(x)			\
480 	.globl	__pi_##x;		\
481 	.type 	__pi_##x, %function;	\
482 	.set	__pi_##x, x;		\
483 	.size	__pi_##x, . - x;	\
484 	ENDPROC(x)
485 
486 /*
487  * Annotate a function as being unsuitable for kprobes.
488  */
489 #ifdef CONFIG_KPROBES
490 #define NOKPROBE(x)				\
491 	.pushsection "_kprobe_blacklist", "aw";	\
492 	.quad	x;				\
493 	.popsection;
494 #else
495 #define NOKPROBE(x)
496 #endif
497 
498 #ifdef CONFIG_KASAN
499 #define EXPORT_SYMBOL_NOKASAN(name)
500 #else
501 #define EXPORT_SYMBOL_NOKASAN(name)	EXPORT_SYMBOL(name)
502 #endif
503 
504 	/*
505 	 * Emit a 64-bit absolute little endian symbol reference in a way that
506 	 * ensures that it will be resolved at build time, even when building a
507 	 * PIE binary. This requires cooperation from the linker script, which
508 	 * must emit the lo32/hi32 halves individually.
509 	 */
510 	.macro	le64sym, sym
511 	.long	\sym\()_lo32
512 	.long	\sym\()_hi32
513 	.endm
514 
515 	/*
516 	 * mov_q - move an immediate constant into a 64-bit register using
517 	 *         between 2 and 4 movz/movk instructions (depending on the
518 	 *         magnitude and sign of the operand)
519 	 */
520 	.macro	mov_q, reg, val
521 	.if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
522 	movz	\reg, :abs_g1_s:\val
523 	.else
524 	.if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
525 	movz	\reg, :abs_g2_s:\val
526 	.else
527 	movz	\reg, :abs_g3:\val
528 	movk	\reg, :abs_g2_nc:\val
529 	.endif
530 	movk	\reg, :abs_g1_nc:\val
531 	.endif
532 	movk	\reg, :abs_g0_nc:\val
533 	.endm
534 
535 /*
536  * Return the current task_struct.
537  */
538 	.macro	get_current_task, rd
539 	mrs	\rd, sp_el0
540 	.endm
541 
542 /*
543  * Offset ttbr1 to allow for 48-bit kernel VAs set with 52-bit PTRS_PER_PGD.
544  * orr is used as it can cover the immediate value (and is idempotent).
545  * In future this may be nop'ed out when dealing with 52-bit kernel VAs.
546  * 	ttbr: Value of ttbr to set, modified.
547  */
548 	.macro	offset_ttbr1, ttbr
549 #ifdef CONFIG_ARM64_USER_VA_BITS_52
550 	orr	\ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
551 #endif
552 	.endm
553 
554 /*
555  * Perform the reverse of offset_ttbr1.
556  * bic is used as it can cover the immediate value and, in future, won't need
557  * to be nop'ed out when dealing with 52-bit kernel VAs.
558  */
559 	.macro	restore_ttbr1, ttbr
560 #ifdef CONFIG_ARM64_USER_VA_BITS_52
561 	bic	\ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
562 #endif
563 	.endm
564 
565 /*
566  * Arrange a physical address in a TTBR register, taking care of 52-bit
567  * addresses.
568  *
569  * 	phys:	physical address, preserved
570  * 	ttbr:	returns the TTBR value
571  */
572 	.macro	phys_to_ttbr, ttbr, phys
573 #ifdef CONFIG_ARM64_PA_BITS_52
574 	orr	\ttbr, \phys, \phys, lsr #46
575 	and	\ttbr, \ttbr, #TTBR_BADDR_MASK_52
576 #else
577 	mov	\ttbr, \phys
578 #endif
579 	.endm
580 
581 	.macro	phys_to_pte, pte, phys
582 #ifdef CONFIG_ARM64_PA_BITS_52
583 	/*
584 	 * We assume \phys is 64K aligned and this is guaranteed by only
585 	 * supporting this configuration with 64K pages.
586 	 */
587 	orr	\pte, \phys, \phys, lsr #36
588 	and	\pte, \pte, #PTE_ADDR_MASK
589 #else
590 	mov	\pte, \phys
591 #endif
592 	.endm
593 
594 	.macro	pte_to_phys, phys, pte
595 #ifdef CONFIG_ARM64_PA_BITS_52
596 	ubfiz	\phys, \pte, #(48 - 16 - 12), #16
597 	bfxil	\phys, \pte, #16, #32
598 	lsl	\phys, \phys, #16
599 #else
600 	and	\phys, \pte, #PTE_ADDR_MASK
601 #endif
602 	.endm
603 
604 /*
605  * tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU.
606  */
607 	.macro	tcr_clear_errata_bits, tcr, tmp1, tmp2
608 #ifdef CONFIG_FUJITSU_ERRATUM_010001
609 	mrs	\tmp1, midr_el1
610 
611 	mov_q	\tmp2, MIDR_FUJITSU_ERRATUM_010001_MASK
612 	and	\tmp1, \tmp1, \tmp2
613 	mov_q	\tmp2, MIDR_FUJITSU_ERRATUM_010001
614 	cmp	\tmp1, \tmp2
615 	b.ne	10f
616 
617 	mov_q	\tmp2, TCR_CLEAR_FUJITSU_ERRATUM_010001
618 	bic	\tcr, \tcr, \tmp2
619 10:
620 #endif /* CONFIG_FUJITSU_ERRATUM_010001 */
621 	.endm
622 
623 /**
624  * Errata workaround prior to disable MMU. Insert an ISB immediately prior
625  * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
626  */
627 	.macro pre_disable_mmu_workaround
628 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
629 	isb
630 #endif
631 	.endm
632 
633 	/*
634 	 * frame_push - Push @regcount callee saved registers to the stack,
635 	 *              starting at x19, as well as x29/x30, and set x29 to
636 	 *              the new value of sp. Add @extra bytes of stack space
637 	 *              for locals.
638 	 */
639 	.macro		frame_push, regcount:req, extra
640 	__frame		st, \regcount, \extra
641 	.endm
642 
643 	/*
644 	 * frame_pop  - Pop the callee saved registers from the stack that were
645 	 *              pushed in the most recent call to frame_push, as well
646 	 *              as x29/x30 and any extra stack space that may have been
647 	 *              allocated.
648 	 */
649 	.macro		frame_pop
650 	__frame		ld
651 	.endm
652 
653 	.macro		__frame_regs, reg1, reg2, op, num
654 	.if		.Lframe_regcount == \num
655 	\op\()r		\reg1, [sp, #(\num + 1) * 8]
656 	.elseif		.Lframe_regcount > \num
657 	\op\()p		\reg1, \reg2, [sp, #(\num + 1) * 8]
658 	.endif
659 	.endm
660 
661 	.macro		__frame, op, regcount, extra=0
662 	.ifc		\op, st
663 	.if		(\regcount) < 0 || (\regcount) > 10
664 	.error		"regcount should be in the range [0 ... 10]"
665 	.endif
666 	.if		((\extra) % 16) != 0
667 	.error		"extra should be a multiple of 16 bytes"
668 	.endif
669 	.ifdef		.Lframe_regcount
670 	.if		.Lframe_regcount != -1
671 	.error		"frame_push/frame_pop may not be nested"
672 	.endif
673 	.endif
674 	.set		.Lframe_regcount, \regcount
675 	.set		.Lframe_extra, \extra
676 	.set		.Lframe_local_offset, ((\regcount + 3) / 2) * 16
677 	stp		x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]!
678 	mov		x29, sp
679 	.endif
680 
681 	__frame_regs	x19, x20, \op, 1
682 	__frame_regs	x21, x22, \op, 3
683 	__frame_regs	x23, x24, \op, 5
684 	__frame_regs	x25, x26, \op, 7
685 	__frame_regs	x27, x28, \op, 9
686 
687 	.ifc		\op, ld
688 	.if		.Lframe_regcount == -1
689 	.error		"frame_push/frame_pop may not be nested"
690 	.endif
691 	ldp		x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra
692 	.set		.Lframe_regcount, -1
693 	.endif
694 	.endm
695 
696 /*
697  * Check whether to yield to another runnable task from kernel mode NEON code
698  * (which runs with preemption disabled).
699  *
700  * if_will_cond_yield_neon
701  *        // pre-yield patchup code
702  * do_cond_yield_neon
703  *        // post-yield patchup code
704  * endif_yield_neon    <label>
705  *
706  * where <label> is optional, and marks the point where execution will resume
707  * after a yield has been performed. If omitted, execution resumes right after
708  * the endif_yield_neon invocation. Note that the entire sequence, including
709  * the provided patchup code, will be omitted from the image if CONFIG_PREEMPT
710  * is not defined.
711  *
712  * As a convenience, in the case where no patchup code is required, the above
713  * sequence may be abbreviated to
714  *
715  * cond_yield_neon <label>
716  *
717  * Note that the patchup code does not support assembler directives that change
718  * the output section, any use of such directives is undefined.
719  *
720  * The yield itself consists of the following:
721  * - Check whether the preempt count is exactly 1, in which case disabling
722  *   preemption once will make the task preemptible. If this is not the case,
723  *   yielding is pointless.
724  * - Check whether TIF_NEED_RESCHED is set, and if so, disable and re-enable
725  *   kernel mode NEON (which will trigger a reschedule), and branch to the
726  *   yield fixup code.
727  *
728  * This macro sequence may clobber all CPU state that is not guaranteed by the
729  * AAPCS to be preserved across an ordinary function call.
730  */
731 
732 	.macro		cond_yield_neon, lbl
733 	if_will_cond_yield_neon
734 	do_cond_yield_neon
735 	endif_yield_neon	\lbl
736 	.endm
737 
738 	.macro		if_will_cond_yield_neon
739 #ifdef CONFIG_PREEMPT
740 	get_current_task	x0
741 	ldr		x0, [x0, #TSK_TI_PREEMPT]
742 	sub		x0, x0, #PREEMPT_DISABLE_OFFSET
743 	cbz		x0, .Lyield_\@
744 	/* fall through to endif_yield_neon */
745 	.subsection	1
746 .Lyield_\@ :
747 #else
748 	.section	".discard.cond_yield_neon", "ax"
749 #endif
750 	.endm
751 
752 	.macro		do_cond_yield_neon
753 	bl		kernel_neon_end
754 	bl		kernel_neon_begin
755 	.endm
756 
757 	.macro		endif_yield_neon, lbl
758 	.ifnb		\lbl
759 	b		\lbl
760 	.else
761 	b		.Lyield_out_\@
762 	.endif
763 	.previous
764 .Lyield_out_\@ :
765 	.endm
766 
767 #endif	/* __ASM_ASSEMBLER_H */
768