1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * FP/SIMD state saving and restoring macros
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  * Author: Catalin Marinas <catalin.marinas@arm.com>
7  */
8 
9 #include <asm/assembler.h>
10 
11 .macro fpsimd_save state, tmpnr
12 	stp	q0, q1, [\state, #16 * 0]
13 	stp	q2, q3, [\state, #16 * 2]
14 	stp	q4, q5, [\state, #16 * 4]
15 	stp	q6, q7, [\state, #16 * 6]
16 	stp	q8, q9, [\state, #16 * 8]
17 	stp	q10, q11, [\state, #16 * 10]
18 	stp	q12, q13, [\state, #16 * 12]
19 	stp	q14, q15, [\state, #16 * 14]
20 	stp	q16, q17, [\state, #16 * 16]
21 	stp	q18, q19, [\state, #16 * 18]
22 	stp	q20, q21, [\state, #16 * 20]
23 	stp	q22, q23, [\state, #16 * 22]
24 	stp	q24, q25, [\state, #16 * 24]
25 	stp	q26, q27, [\state, #16 * 26]
26 	stp	q28, q29, [\state, #16 * 28]
27 	stp	q30, q31, [\state, #16 * 30]!
28 	mrs	x\tmpnr, fpsr
29 	str	w\tmpnr, [\state, #16 * 2]
30 	mrs	x\tmpnr, fpcr
31 	str	w\tmpnr, [\state, #16 * 2 + 4]
32 .endm
33 
34 .macro fpsimd_restore_fpcr state, tmp
35 	/*
36 	 * Writes to fpcr may be self-synchronising, so avoid restoring
37 	 * the register if it hasn't changed.
38 	 */
39 	mrs	\tmp, fpcr
40 	cmp	\tmp, \state
41 	b.eq	9999f
42 	msr	fpcr, \state
43 9999:
44 .endm
45 
46 /* Clobbers \state */
47 .macro fpsimd_restore state, tmpnr
48 	ldp	q0, q1, [\state, #16 * 0]
49 	ldp	q2, q3, [\state, #16 * 2]
50 	ldp	q4, q5, [\state, #16 * 4]
51 	ldp	q6, q7, [\state, #16 * 6]
52 	ldp	q8, q9, [\state, #16 * 8]
53 	ldp	q10, q11, [\state, #16 * 10]
54 	ldp	q12, q13, [\state, #16 * 12]
55 	ldp	q14, q15, [\state, #16 * 14]
56 	ldp	q16, q17, [\state, #16 * 16]
57 	ldp	q18, q19, [\state, #16 * 18]
58 	ldp	q20, q21, [\state, #16 * 20]
59 	ldp	q22, q23, [\state, #16 * 22]
60 	ldp	q24, q25, [\state, #16 * 24]
61 	ldp	q26, q27, [\state, #16 * 26]
62 	ldp	q28, q29, [\state, #16 * 28]
63 	ldp	q30, q31, [\state, #16 * 30]!
64 	ldr	w\tmpnr, [\state, #16 * 2]
65 	msr	fpsr, x\tmpnr
66 	ldr	w\tmpnr, [\state, #16 * 2 + 4]
67 	fpsimd_restore_fpcr x\tmpnr, \state
68 .endm
69 
70 /* Sanity-check macros to help avoid encoding garbage instructions */
71 
72 .macro _check_general_reg nr
73 	.if (\nr) < 0 || (\nr) > 30
74 		.error "Bad register number \nr."
75 	.endif
76 .endm
77 
78 .macro _sve_check_zreg znr
79 	.if (\znr) < 0 || (\znr) > 31
80 		.error "Bad Scalable Vector Extension vector register number \znr."
81 	.endif
82 .endm
83 
84 .macro _sve_check_preg pnr
85 	.if (\pnr) < 0 || (\pnr) > 15
86 		.error "Bad Scalable Vector Extension predicate register number \pnr."
87 	.endif
88 .endm
89 
90 .macro _check_num n, min, max
91 	.if (\n) < (\min) || (\n) > (\max)
92 		.error "Number \n out of range [\min,\max]"
93 	.endif
94 .endm
95 
96 /* SVE instruction encodings for non-SVE-capable assemblers */
97 /* (pre binutils 2.28, all kernel capable clang versions support SVE) */
98 
99 /* STR (vector): STR Z\nz, [X\nxbase, #\offset, MUL VL] */
100 .macro _sve_str_v nz, nxbase, offset=0
101 	_sve_check_zreg \nz
102 	_check_general_reg \nxbase
103 	_check_num (\offset), -0x100, 0xff
104 	.inst	0xe5804000			\
105 		| (\nz)				\
106 		| ((\nxbase) << 5)		\
107 		| (((\offset) & 7) << 10)	\
108 		| (((\offset) & 0x1f8) << 13)
109 .endm
110 
111 /* LDR (vector): LDR Z\nz, [X\nxbase, #\offset, MUL VL] */
112 .macro _sve_ldr_v nz, nxbase, offset=0
113 	_sve_check_zreg \nz
114 	_check_general_reg \nxbase
115 	_check_num (\offset), -0x100, 0xff
116 	.inst	0x85804000			\
117 		| (\nz)				\
118 		| ((\nxbase) << 5)		\
119 		| (((\offset) & 7) << 10)	\
120 		| (((\offset) & 0x1f8) << 13)
121 .endm
122 
123 /* STR (predicate): STR P\np, [X\nxbase, #\offset, MUL VL] */
124 .macro _sve_str_p np, nxbase, offset=0
125 	_sve_check_preg \np
126 	_check_general_reg \nxbase
127 	_check_num (\offset), -0x100, 0xff
128 	.inst	0xe5800000			\
129 		| (\np)				\
130 		| ((\nxbase) << 5)		\
131 		| (((\offset) & 7) << 10)	\
132 		| (((\offset) & 0x1f8) << 13)
133 .endm
134 
135 /* LDR (predicate): LDR P\np, [X\nxbase, #\offset, MUL VL] */
136 .macro _sve_ldr_p np, nxbase, offset=0
137 	_sve_check_preg \np
138 	_check_general_reg \nxbase
139 	_check_num (\offset), -0x100, 0xff
140 	.inst	0x85800000			\
141 		| (\np)				\
142 		| ((\nxbase) << 5)		\
143 		| (((\offset) & 7) << 10)	\
144 		| (((\offset) & 0x1f8) << 13)
145 .endm
146 
147 /* RDVL X\nx, #\imm */
148 .macro _sve_rdvl nx, imm
149 	_check_general_reg \nx
150 	_check_num (\imm), -0x20, 0x1f
151 	.inst	0x04bf5000			\
152 		| (\nx)				\
153 		| (((\imm) & 0x3f) << 5)
154 .endm
155 
156 /* RDFFR (unpredicated): RDFFR P\np.B */
157 .macro _sve_rdffr np
158 	_sve_check_preg \np
159 	.inst	0x2519f000			\
160 		| (\np)
161 .endm
162 
163 /* WRFFR P\np.B */
164 .macro _sve_wrffr np
165 	_sve_check_preg \np
166 	.inst	0x25289000			\
167 		| ((\np) << 5)
168 .endm
169 
170 /* PFALSE P\np.B */
171 .macro _sve_pfalse np
172 	_sve_check_preg \np
173 	.inst	0x2518e400			\
174 		| (\np)
175 .endm
176 
177 .macro __for from:req, to:req
178 	.if (\from) == (\to)
179 		_for__body %\from
180 	.else
181 		__for %\from, %((\from) + ((\to) - (\from)) / 2)
182 		__for %((\from) + ((\to) - (\from)) / 2 + 1), %\to
183 	.endif
184 .endm
185 
186 .macro _for var:req, from:req, to:req, insn:vararg
187 	.macro _for__body \var:req
188 		.noaltmacro
189 		\insn
190 		.altmacro
191 	.endm
192 
193 	.altmacro
194 	__for \from, \to
195 	.noaltmacro
196 
197 	.purgem _for__body
198 .endm
199 
200 /* Update ZCR_EL1.LEN with the new VQ */
201 .macro sve_load_vq xvqminus1, xtmp, xtmp2
202 		mrs_s		\xtmp, SYS_ZCR_EL1
203 		bic		\xtmp2, \xtmp, ZCR_ELx_LEN_MASK
204 		orr		\xtmp2, \xtmp2, \xvqminus1
205 		cmp		\xtmp2, \xtmp
206 		b.eq		921f
207 		msr_s		SYS_ZCR_EL1, \xtmp2	//self-synchronising
208 921:
209 .endm
210 
211 /* Preserve the first 128-bits of Znz and zero the rest. */
212 .macro _sve_flush_z nz
213 	_sve_check_zreg \nz
214 	mov	v\nz\().16b, v\nz\().16b
215 .endm
216 
217 .macro sve_flush_z
218  _for n, 0, 31, _sve_flush_z	\n
219 .endm
220 .macro sve_flush_p_ffr
221  _for n, 0, 15, _sve_pfalse	\n
222 		_sve_wrffr	0
223 .endm
224 
225 .macro sve_save nxbase, xpfpsr, nxtmp
226  _for n, 0, 31,	_sve_str_v	\n, \nxbase, \n - 34
227  _for n, 0, 15,	_sve_str_p	\n, \nxbase, \n - 16
228 		_sve_rdffr	0
229 		_sve_str_p	0, \nxbase
230 		_sve_ldr_p	0, \nxbase, -16
231 
232 		mrs		x\nxtmp, fpsr
233 		str		w\nxtmp, [\xpfpsr]
234 		mrs		x\nxtmp, fpcr
235 		str		w\nxtmp, [\xpfpsr, #4]
236 .endm
237 
238 .macro __sve_load nxbase, xpfpsr, nxtmp
239  _for n, 0, 31,	_sve_ldr_v	\n, \nxbase, \n - 34
240 		_sve_ldr_p	0, \nxbase
241 		_sve_wrffr	0
242  _for n, 0, 15,	_sve_ldr_p	\n, \nxbase, \n - 16
243 
244 		ldr		w\nxtmp, [\xpfpsr]
245 		msr		fpsr, x\nxtmp
246 		ldr		w\nxtmp, [\xpfpsr, #4]
247 		msr		fpcr, x\nxtmp
248 .endm
249 
250 .macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp, xtmp2
251 		sve_load_vq	\xvqminus1, x\nxtmp, \xtmp2
252 		__sve_load	\nxbase, \xpfpsr, \nxtmp
253 .endm
254