xref: /openbmc/linux/arch/s390/include/asm/vx-insn.h (revision 0eab11c7)
1 /*
2  * Support for Vector Instructions
3  *
4  * Assembler macros to generate .byte/.word code for particular
5  * vector instructions that are supported by recent binutils (>= 2.26) only.
6  *
7  * Copyright IBM Corp. 2015
8  * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
9  */
10 
11 #ifndef __ASM_S390_VX_INSN_H
12 #define __ASM_S390_VX_INSN_H
13 
14 #ifdef __ASSEMBLY__
15 
16 
17 /* Macros to generate vector instruction byte code */
18 
19 /* GR_NUM - Retrieve general-purpose register number
20  *
21  * @opd:	Operand to store register number
22  * @r64:	String designation register in the format "%rN"
23  */
24 .macro	GR_NUM	opd gr
25 	\opd = 255
26 	.ifc \gr,%r0
27 		\opd = 0
28 	.endif
29 	.ifc \gr,%r1
30 		\opd = 1
31 	.endif
32 	.ifc \gr,%r2
33 		\opd = 2
34 	.endif
35 	.ifc \gr,%r3
36 		\opd = 3
37 	.endif
38 	.ifc \gr,%r4
39 		\opd = 4
40 	.endif
41 	.ifc \gr,%r5
42 		\opd = 5
43 	.endif
44 	.ifc \gr,%r6
45 		\opd = 6
46 	.endif
47 	.ifc \gr,%r7
48 		\opd = 7
49 	.endif
50 	.ifc \gr,%r8
51 		\opd = 8
52 	.endif
53 	.ifc \gr,%r9
54 		\opd = 9
55 	.endif
56 	.ifc \gr,%r10
57 		\opd = 10
58 	.endif
59 	.ifc \gr,%r11
60 		\opd = 11
61 	.endif
62 	.ifc \gr,%r12
63 		\opd = 12
64 	.endif
65 	.ifc \gr,%r13
66 		\opd = 13
67 	.endif
68 	.ifc \gr,%r14
69 		\opd = 14
70 	.endif
71 	.ifc \gr,%r15
72 		\opd = 15
73 	.endif
74 	.if \opd == 255
75 		\opd = \gr
76 	.endif
77 .endm
78 
79 /* VX_NUM - Retrieve vector register number
80  *
81  * @opd:	Operand to store register number
82  * @vxr:	String designation register in the format "%vN"
83  *
84  * The vector register number is used for as input number to the
85  * instruction and, as well as, to compute the RXB field of the
86  * instruction.
87  */
88 .macro	VX_NUM	opd vxr
89 	\opd = 255
90 	.ifc \vxr,%v0
91 		\opd = 0
92 	.endif
93 	.ifc \vxr,%v1
94 		\opd = 1
95 	.endif
96 	.ifc \vxr,%v2
97 		\opd = 2
98 	.endif
99 	.ifc \vxr,%v3
100 		\opd = 3
101 	.endif
102 	.ifc \vxr,%v4
103 		\opd = 4
104 	.endif
105 	.ifc \vxr,%v5
106 		\opd = 5
107 	.endif
108 	.ifc \vxr,%v6
109 		\opd = 6
110 	.endif
111 	.ifc \vxr,%v7
112 		\opd = 7
113 	.endif
114 	.ifc \vxr,%v8
115 		\opd = 8
116 	.endif
117 	.ifc \vxr,%v9
118 		\opd = 9
119 	.endif
120 	.ifc \vxr,%v10
121 		\opd = 10
122 	.endif
123 	.ifc \vxr,%v11
124 		\opd = 11
125 	.endif
126 	.ifc \vxr,%v12
127 		\opd = 12
128 	.endif
129 	.ifc \vxr,%v13
130 		\opd = 13
131 	.endif
132 	.ifc \vxr,%v14
133 		\opd = 14
134 	.endif
135 	.ifc \vxr,%v15
136 		\opd = 15
137 	.endif
138 	.ifc \vxr,%v16
139 		\opd = 16
140 	.endif
141 	.ifc \vxr,%v17
142 		\opd = 17
143 	.endif
144 	.ifc \vxr,%v18
145 		\opd = 18
146 	.endif
147 	.ifc \vxr,%v19
148 		\opd = 19
149 	.endif
150 	.ifc \vxr,%v20
151 		\opd = 20
152 	.endif
153 	.ifc \vxr,%v21
154 		\opd = 21
155 	.endif
156 	.ifc \vxr,%v22
157 		\opd = 22
158 	.endif
159 	.ifc \vxr,%v23
160 		\opd = 23
161 	.endif
162 	.ifc \vxr,%v24
163 		\opd = 24
164 	.endif
165 	.ifc \vxr,%v25
166 		\opd = 25
167 	.endif
168 	.ifc \vxr,%v26
169 		\opd = 26
170 	.endif
171 	.ifc \vxr,%v27
172 		\opd = 27
173 	.endif
174 	.ifc \vxr,%v28
175 		\opd = 28
176 	.endif
177 	.ifc \vxr,%v29
178 		\opd = 29
179 	.endif
180 	.ifc \vxr,%v30
181 		\opd = 30
182 	.endif
183 	.ifc \vxr,%v31
184 		\opd = 31
185 	.endif
186 	.if \opd == 255
187 		\opd = \vxr
188 	.endif
189 .endm
190 
191 /* RXB - Compute most significant bit used vector registers
192  *
193  * @rxb:	Operand to store computed RXB value
194  * @v1:		First vector register designated operand
195  * @v2:		Second vector register designated operand
196  * @v3:		Third vector register designated operand
197  * @v4:		Fourth vector register designated operand
198  */
199 .macro	RXB	rxb v1 v2=0 v3=0 v4=0
200 	\rxb = 0
201 	.if \v1 & 0x10
202 		\rxb = \rxb | 0x08
203 	.endif
204 	.if \v2 & 0x10
205 		\rxb = \rxb | 0x04
206 	.endif
207 	.if \v3 & 0x10
208 		\rxb = \rxb | 0x02
209 	.endif
210 	.if \v4 & 0x10
211 		\rxb = \rxb | 0x01
212 	.endif
213 .endm
214 
215 /* MRXB - Generate Element Size Control and RXB value
216  *
217  * @m:		Element size control
218  * @v1:		First vector register designated operand (for RXB)
219  * @v2:		Second vector register designated operand (for RXB)
220  * @v3:		Third vector register designated operand (for RXB)
221  * @v4:		Fourth vector register designated operand (for RXB)
222  */
223 .macro	MRXB	m v1 v2=0 v3=0 v4=0
224 	rxb = 0
225 	RXB	rxb, \v1, \v2, \v3, \v4
226 	.byte	(\m << 4) | rxb
227 .endm
228 
229 /* MRXBOPC - Generate Element Size Control, RXB, and final Opcode fields
230  *
231  * @m:		Element size control
232  * @opc:	Opcode
233  * @v1:		First vector register designated operand (for RXB)
234  * @v2:		Second vector register designated operand (for RXB)
235  * @v3:		Third vector register designated operand (for RXB)
236  * @v4:		Fourth vector register designated operand (for RXB)
237  */
238 .macro	MRXBOPC	m opc v1 v2=0 v3=0 v4=0
239 	MRXB	\m, \v1, \v2, \v3, \v4
240 	.byte	\opc
241 .endm
242 
243 /* Vector support instructions */
244 
245 /* VECTOR GENERATE BYTE MASK */
246 .macro	VGBM	vr imm2
247 	VX_NUM	v1, \vr
248 	.word	(0xE700 | ((v1&15) << 4))
249 	.word	\imm2
250 	MRXBOPC	0, 0x44, v1
251 .endm
252 .macro	VZERO	vxr
253 	VGBM	\vxr, 0
254 .endm
255 .macro	VONE	vxr
256 	VGBM	\vxr, 0xFFFF
257 .endm
258 
259 /* VECTOR LOAD VR ELEMENT FROM GR */
260 .macro	VLVG	v, gr, disp, m
261 	VX_NUM	v1, \v
262 	GR_NUM	b2, "%r0"
263 	GR_NUM	r3, \gr
264 	.word	0xE700 | ((v1&15) << 4) | r3
265 	.word	(b2 << 12) | (\disp)
266 	MRXBOPC	\m, 0x22, v1
267 .endm
268 .macro	VLVGB	v, gr, index, base
269 	VLVG	\v, \gr, \index, \base, 0
270 .endm
271 .macro	VLVGH	v, gr, index
272 	VLVG	\v, \gr, \index, 1
273 .endm
274 .macro	VLVGF	v, gr, index
275 	VLVG	\v, \gr, \index, 2
276 .endm
277 .macro	VLVGG	v, gr, index
278 	VLVG	\v, \gr, \index, 3
279 .endm
280 
281 /* VECTOR LOAD */
282 .macro	VL	v, disp, index="%r0", base
283 	VX_NUM	v1, \v
284 	GR_NUM	x2, \index
285 	GR_NUM	b2, \base
286 	.word	0xE700 | ((v1&15) << 4) | x2
287 	.word	(b2 << 12) | (\disp)
288 	MRXBOPC 0, 0x06, v1
289 .endm
290 
291 /* VECTOR LOAD ELEMENT */
292 .macro	VLEx	vr1, disp, index="%r0", base, m3, opc
293 	VX_NUM	v1, \vr1
294 	GR_NUM	x2, \index
295 	GR_NUM	b2, \base
296 	.word	0xE700 | ((v1&15) << 4) | x2
297 	.word	(b2 << 12) | (\disp)
298 	MRXBOPC	\m3, \opc, v1
299 .endm
300 .macro	VLEB	vr1, disp, index="%r0", base, m3
301 	VLEx	\vr1, \disp, \index, \base, \m3, 0x00
302 .endm
303 .macro	VLEH	vr1, disp, index="%r0", base, m3
304 	VLEx	\vr1, \disp, \index, \base, \m3, 0x01
305 .endm
306 .macro	VLEF	vr1, disp, index="%r0", base, m3
307 	VLEx	\vr1, \disp, \index, \base, \m3, 0x03
308 .endm
309 .macro	VLEG	vr1, disp, index="%r0", base, m3
310 	VLEx	\vr1, \disp, \index, \base, \m3, 0x02
311 .endm
312 
313 /* VECTOR LOAD ELEMENT IMMEDIATE */
314 .macro	VLEIx	vr1, imm2, m3, opc
315 	VX_NUM	v1, \vr1
316 	.word	0xE700 | ((v1&15) << 4)
317 	.word	\imm2
318 	MRXBOPC	\m3, \opc, v1
319 .endm
320 .macro	VLEIB	vr1, imm2, index
321 	VLEIx	\vr1, \imm2, \index, 0x40
322 .endm
323 .macro	VLEIH	vr1, imm2, index
324 	VLEIx	\vr1, \imm2, \index, 0x41
325 .endm
326 .macro	VLEIF	vr1, imm2, index
327 	VLEIx	\vr1, \imm2, \index, 0x43
328 .endm
329 .macro	VLEIG	vr1, imm2, index
330 	VLEIx	\vr1, \imm2, \index, 0x42
331 .endm
332 
333 /* VECTOR LOAD GR FROM VR ELEMENT */
334 .macro	VLGV	gr, vr, disp, base="%r0", m
335 	GR_NUM	r1, \gr
336 	GR_NUM	b2, \base
337 	VX_NUM	v3, \vr
338 	.word	0xE700 | (r1 << 4) | (v3&15)
339 	.word	(b2 << 12) | (\disp)
340 	MRXBOPC	\m, 0x21, v3
341 .endm
342 .macro	VLGVB	gr, vr, disp, base="%r0"
343 	VLGV	\gr, \vr, \disp, \base, 0
344 .endm
345 .macro	VLGVH	gr, vr, disp, base="%r0"
346 	VLGV	\gr, \vr, \disp, \base, 1
347 .endm
348 .macro	VLGVF	gr, vr, disp, base="%r0"
349 	VLGV	\gr, \vr, \disp, \base, 2
350 .endm
351 .macro	VLGVG	gr, vr, disp, base="%r0"
352 	VLGV	\gr, \vr, \disp, \base, 3
353 .endm
354 
355 /* VECTOR LOAD MULTIPLE */
356 .macro	VLM	vfrom, vto, disp, base
357 	VX_NUM	v1, \vfrom
358 	VX_NUM	v3, \vto
359 	GR_NUM	b2, \base	    /* Base register */
360 	.word	0xE700 | ((v1&15) << 4) | (v3&15)
361 	.word	(b2 << 12) | (\disp)
362 	MRXBOPC	0, 0x36, v1, v3
363 .endm
364 
365 /* VECTOR STORE MULTIPLE */
366 .macro	VSTM	vfrom, vto, disp, base
367 	VX_NUM	v1, \vfrom
368 	VX_NUM	v3, \vto
369 	GR_NUM	b2, \base	    /* Base register */
370 	.word	0xE700 | ((v1&15) << 4) | (v3&15)
371 	.word	(b2 << 12) | (\disp)
372 	MRXBOPC	0, 0x3E, v1, v3
373 .endm
374 
375 /* VECTOR PERMUTE */
376 .macro	VPERM	vr1, vr2, vr3, vr4
377 	VX_NUM	v1, \vr1
378 	VX_NUM	v2, \vr2
379 	VX_NUM	v3, \vr3
380 	VX_NUM	v4, \vr4
381 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
382 	.word	((v3&15) << 12)
383 	MRXBOPC	(v4&15), 0x8C, v1, v2, v3, v4
384 .endm
385 
386 /* VECTOR UNPACK LOGICAL LOW */
387 .macro	VUPLL	vr1, vr2, m3
388 	VX_NUM	v1, \vr1
389 	VX_NUM	v2, \vr2
390 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
391 	.word	0x0000
392 	MRXBOPC	\m3, 0xD4, v1, v2
393 .endm
394 .macro	VUPLLB	vr1, vr2
395 	VUPLL	\vr1, \vr2, 0
396 .endm
397 .macro	VUPLLH	vr1, vr2
398 	VUPLL	\vr1, \vr2, 1
399 .endm
400 .macro	VUPLLF	vr1, vr2
401 	VUPLL	\vr1, \vr2, 2
402 .endm
403 
404 
405 /* Vector integer instructions */
406 
407 /* VECTOR EXCLUSIVE OR */
408 .macro	VX	vr1, vr2, vr3
409 	VX_NUM	v1, \vr1
410 	VX_NUM	v2, \vr2
411 	VX_NUM	v3, \vr3
412 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
413 	.word	((v3&15) << 12)
414 	MRXBOPC	0, 0x6D, v1, v2, v3
415 .endm
416 
417 /* VECTOR GALOIS FIELD MULTIPLY SUM */
418 .macro	VGFM	vr1, vr2, vr3, m4
419 	VX_NUM	v1, \vr1
420 	VX_NUM	v2, \vr2
421 	VX_NUM	v3, \vr3
422 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
423 	.word	((v3&15) << 12)
424 	MRXBOPC	\m4, 0xB4, v1, v2, v3
425 .endm
426 .macro	VGFMB	vr1, vr2, vr3
427 	VGFM	\vr1, \vr2, \vr3, 0
428 .endm
429 .macro	VGFMH	vr1, vr2, vr3
430 	VGFM	\vr1, \vr2, \vr3, 1
431 .endm
432 .macro	VGFMF	vr1, vr2, vr3
433 	VGFM	\vr1, \vr2, \vr3, 2
434 .endm
435 .macro	VGFMG	vr1, vr2, vr3
436 	VGFM	\vr1, \vr2, \vr3, 3
437 .endm
438 
439 /* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */
440 .macro	VGFMA	vr1, vr2, vr3, vr4, m5
441 	VX_NUM	v1, \vr1
442 	VX_NUM	v2, \vr2
443 	VX_NUM	v3, \vr3
444 	VX_NUM	v4, \vr4
445 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
446 	.word	((v3&15) << 12) | (\m5 << 8)
447 	MRXBOPC	(v4&15), 0xBC, v1, v2, v3, v4
448 .endm
449 .macro	VGFMAB	vr1, vr2, vr3, vr4
450 	VGFMA	\vr1, \vr2, \vr3, \vr4, 0
451 .endm
452 .macro	VGFMAH	vr1, vr2, vr3, vr4
453 	VGFMA	\vr1, \vr2, \vr3, \vr4, 1
454 .endm
455 .macro	VGFMAF	vr1, vr2, vr3, vr4
456 	VGFMA	\vr1, \vr2, \vr3, \vr4, 2
457 .endm
458 .macro	VGFMAG	vr1, vr2, vr3, vr4
459 	VGFMA	\vr1, \vr2, \vr3, \vr4, 3
460 .endm
461 
462 /* VECTOR SHIFT RIGHT LOGICAL BY BYTE */
463 .macro	VSRLB	vr1, vr2, vr3
464 	VX_NUM	v1, \vr1
465 	VX_NUM	v2, \vr2
466 	VX_NUM	v3, \vr3
467 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
468 	.word	((v3&15) << 12)
469 	MRXBOPC	0, 0x7D, v1, v2, v3
470 .endm
471 
472 
473 #endif	/* __ASSEMBLY__ */
474 #endif	/* __ASM_S390_VX_INSN_H */
475