xref: /openbmc/linux/arch/s390/include/asm/vx-insn.h (revision 56b5b1c7)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Support for Vector Instructions
4  *
5  * Assembler macros to generate .byte/.word code for particular
6  * vector instructions that are supported by recent binutils (>= 2.26) only.
7  *
8  * Copyright IBM Corp. 2015
9  * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
10  */
11 
12 #ifndef __ASM_S390_VX_INSN_H
13 #define __ASM_S390_VX_INSN_H
14 
15 #ifdef __ASSEMBLY__
16 
17 
18 /* Macros to generate vector instruction byte code */
19 
20 /* GR_NUM - Retrieve general-purpose register number
21  *
22  * @opd:	Operand to store register number
23  * @r64:	String designation register in the format "%rN"
24  */
25 .macro	GR_NUM	opd gr
26 	\opd = 255
27 	.ifc \gr,%r0
28 		\opd = 0
29 	.endif
30 	.ifc \gr,%r1
31 		\opd = 1
32 	.endif
33 	.ifc \gr,%r2
34 		\opd = 2
35 	.endif
36 	.ifc \gr,%r3
37 		\opd = 3
38 	.endif
39 	.ifc \gr,%r4
40 		\opd = 4
41 	.endif
42 	.ifc \gr,%r5
43 		\opd = 5
44 	.endif
45 	.ifc \gr,%r6
46 		\opd = 6
47 	.endif
48 	.ifc \gr,%r7
49 		\opd = 7
50 	.endif
51 	.ifc \gr,%r8
52 		\opd = 8
53 	.endif
54 	.ifc \gr,%r9
55 		\opd = 9
56 	.endif
57 	.ifc \gr,%r10
58 		\opd = 10
59 	.endif
60 	.ifc \gr,%r11
61 		\opd = 11
62 	.endif
63 	.ifc \gr,%r12
64 		\opd = 12
65 	.endif
66 	.ifc \gr,%r13
67 		\opd = 13
68 	.endif
69 	.ifc \gr,%r14
70 		\opd = 14
71 	.endif
72 	.ifc \gr,%r15
73 		\opd = 15
74 	.endif
75 	.if \opd == 255
76 		\opd = \gr
77 	.endif
78 .endm
79 
80 /* VX_NUM - Retrieve vector register number
81  *
82  * @opd:	Operand to store register number
83  * @vxr:	String designation register in the format "%vN"
84  *
85  * The vector register number is used for as input number to the
86  * instruction and, as well as, to compute the RXB field of the
87  * instruction.
88  */
89 .macro	VX_NUM	opd vxr
90 	\opd = 255
91 	.ifc \vxr,%v0
92 		\opd = 0
93 	.endif
94 	.ifc \vxr,%v1
95 		\opd = 1
96 	.endif
97 	.ifc \vxr,%v2
98 		\opd = 2
99 	.endif
100 	.ifc \vxr,%v3
101 		\opd = 3
102 	.endif
103 	.ifc \vxr,%v4
104 		\opd = 4
105 	.endif
106 	.ifc \vxr,%v5
107 		\opd = 5
108 	.endif
109 	.ifc \vxr,%v6
110 		\opd = 6
111 	.endif
112 	.ifc \vxr,%v7
113 		\opd = 7
114 	.endif
115 	.ifc \vxr,%v8
116 		\opd = 8
117 	.endif
118 	.ifc \vxr,%v9
119 		\opd = 9
120 	.endif
121 	.ifc \vxr,%v10
122 		\opd = 10
123 	.endif
124 	.ifc \vxr,%v11
125 		\opd = 11
126 	.endif
127 	.ifc \vxr,%v12
128 		\opd = 12
129 	.endif
130 	.ifc \vxr,%v13
131 		\opd = 13
132 	.endif
133 	.ifc \vxr,%v14
134 		\opd = 14
135 	.endif
136 	.ifc \vxr,%v15
137 		\opd = 15
138 	.endif
139 	.ifc \vxr,%v16
140 		\opd = 16
141 	.endif
142 	.ifc \vxr,%v17
143 		\opd = 17
144 	.endif
145 	.ifc \vxr,%v18
146 		\opd = 18
147 	.endif
148 	.ifc \vxr,%v19
149 		\opd = 19
150 	.endif
151 	.ifc \vxr,%v20
152 		\opd = 20
153 	.endif
154 	.ifc \vxr,%v21
155 		\opd = 21
156 	.endif
157 	.ifc \vxr,%v22
158 		\opd = 22
159 	.endif
160 	.ifc \vxr,%v23
161 		\opd = 23
162 	.endif
163 	.ifc \vxr,%v24
164 		\opd = 24
165 	.endif
166 	.ifc \vxr,%v25
167 		\opd = 25
168 	.endif
169 	.ifc \vxr,%v26
170 		\opd = 26
171 	.endif
172 	.ifc \vxr,%v27
173 		\opd = 27
174 	.endif
175 	.ifc \vxr,%v28
176 		\opd = 28
177 	.endif
178 	.ifc \vxr,%v29
179 		\opd = 29
180 	.endif
181 	.ifc \vxr,%v30
182 		\opd = 30
183 	.endif
184 	.ifc \vxr,%v31
185 		\opd = 31
186 	.endif
187 	.if \opd == 255
188 		\opd = \vxr
189 	.endif
190 .endm
191 
192 /* RXB - Compute most significant bit used vector registers
193  *
194  * @rxb:	Operand to store computed RXB value
195  * @v1:		First vector register designated operand
196  * @v2:		Second vector register designated operand
197  * @v3:		Third vector register designated operand
198  * @v4:		Fourth vector register designated operand
199  */
200 .macro	RXB	rxb v1 v2=0 v3=0 v4=0
201 	\rxb = 0
202 	.if \v1 & 0x10
203 		\rxb = \rxb | 0x08
204 	.endif
205 	.if \v2 & 0x10
206 		\rxb = \rxb | 0x04
207 	.endif
208 	.if \v3 & 0x10
209 		\rxb = \rxb | 0x02
210 	.endif
211 	.if \v4 & 0x10
212 		\rxb = \rxb | 0x01
213 	.endif
214 .endm
215 
216 /* MRXB - Generate Element Size Control and RXB value
217  *
218  * @m:		Element size control
219  * @v1:		First vector register designated operand (for RXB)
220  * @v2:		Second vector register designated operand (for RXB)
221  * @v3:		Third vector register designated operand (for RXB)
222  * @v4:		Fourth vector register designated operand (for RXB)
223  */
224 .macro	MRXB	m v1 v2=0 v3=0 v4=0
225 	rxb = 0
226 	RXB	rxb, \v1, \v2, \v3, \v4
227 	.byte	(\m << 4) | rxb
228 .endm
229 
230 /* MRXBOPC - Generate Element Size Control, RXB, and final Opcode fields
231  *
232  * @m:		Element size control
233  * @opc:	Opcode
234  * @v1:		First vector register designated operand (for RXB)
235  * @v2:		Second vector register designated operand (for RXB)
236  * @v3:		Third vector register designated operand (for RXB)
237  * @v4:		Fourth vector register designated operand (for RXB)
238  */
239 .macro	MRXBOPC	m opc v1 v2=0 v3=0 v4=0
240 	MRXB	\m, \v1, \v2, \v3, \v4
241 	.byte	\opc
242 .endm
243 
244 /* Vector support instructions */
245 
246 /* VECTOR GENERATE BYTE MASK */
247 .macro	VGBM	vr imm2
248 	VX_NUM	v1, \vr
249 	.word	(0xE700 | ((v1&15) << 4))
250 	.word	\imm2
251 	MRXBOPC	0, 0x44, v1
252 .endm
253 .macro	VZERO	vxr
254 	VGBM	\vxr, 0
255 .endm
256 .macro	VONE	vxr
257 	VGBM	\vxr, 0xFFFF
258 .endm
259 
260 /* VECTOR LOAD VR ELEMENT FROM GR */
261 .macro	VLVG	v, gr, disp, m
262 	VX_NUM	v1, \v
263 	GR_NUM	b2, "%r0"
264 	GR_NUM	r3, \gr
265 	.word	0xE700 | ((v1&15) << 4) | r3
266 	.word	(b2 << 12) | (\disp)
267 	MRXBOPC	\m, 0x22, v1
268 .endm
269 .macro	VLVGB	v, gr, index, base
270 	VLVG	\v, \gr, \index, \base, 0
271 .endm
272 .macro	VLVGH	v, gr, index
273 	VLVG	\v, \gr, \index, 1
274 .endm
275 .macro	VLVGF	v, gr, index
276 	VLVG	\v, \gr, \index, 2
277 .endm
278 .macro	VLVGG	v, gr, index
279 	VLVG	\v, \gr, \index, 3
280 .endm
281 
282 /* VECTOR LOAD REGISTER */
283 .macro	VLR	v1, v2
284 	VX_NUM	v1, \v1
285 	VX_NUM	v2, \v2
286 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
287 	.word	0
288 	MRXBOPC	0, 0x56, v1, v2
289 .endm
290 
291 /* VECTOR LOAD */
292 .macro	VL	v, disp, index="%r0", base
293 	VX_NUM	v1, \v
294 	GR_NUM	x2, \index
295 	GR_NUM	b2, \base
296 	.word	0xE700 | ((v1&15) << 4) | x2
297 	.word	(b2 << 12) | (\disp)
298 	MRXBOPC 0, 0x06, v1
299 .endm
300 
301 /* VECTOR LOAD ELEMENT */
302 .macro	VLEx	vr1, disp, index="%r0", base, m3, opc
303 	VX_NUM	v1, \vr1
304 	GR_NUM	x2, \index
305 	GR_NUM	b2, \base
306 	.word	0xE700 | ((v1&15) << 4) | x2
307 	.word	(b2 << 12) | (\disp)
308 	MRXBOPC	\m3, \opc, v1
309 .endm
310 .macro	VLEB	vr1, disp, index="%r0", base, m3
311 	VLEx	\vr1, \disp, \index, \base, \m3, 0x00
312 .endm
313 .macro	VLEH	vr1, disp, index="%r0", base, m3
314 	VLEx	\vr1, \disp, \index, \base, \m3, 0x01
315 .endm
316 .macro	VLEF	vr1, disp, index="%r0", base, m3
317 	VLEx	\vr1, \disp, \index, \base, \m3, 0x03
318 .endm
319 .macro	VLEG	vr1, disp, index="%r0", base, m3
320 	VLEx	\vr1, \disp, \index, \base, \m3, 0x02
321 .endm
322 
323 /* VECTOR LOAD ELEMENT IMMEDIATE */
324 .macro	VLEIx	vr1, imm2, m3, opc
325 	VX_NUM	v1, \vr1
326 	.word	0xE700 | ((v1&15) << 4)
327 	.word	\imm2
328 	MRXBOPC	\m3, \opc, v1
329 .endm
330 .macro	VLEIB	vr1, imm2, index
331 	VLEIx	\vr1, \imm2, \index, 0x40
332 .endm
333 .macro	VLEIH	vr1, imm2, index
334 	VLEIx	\vr1, \imm2, \index, 0x41
335 .endm
336 .macro	VLEIF	vr1, imm2, index
337 	VLEIx	\vr1, \imm2, \index, 0x43
338 .endm
339 .macro	VLEIG	vr1, imm2, index
340 	VLEIx	\vr1, \imm2, \index, 0x42
341 .endm
342 
343 /* VECTOR LOAD GR FROM VR ELEMENT */
344 .macro	VLGV	gr, vr, disp, base="%r0", m
345 	GR_NUM	r1, \gr
346 	GR_NUM	b2, \base
347 	VX_NUM	v3, \vr
348 	.word	0xE700 | (r1 << 4) | (v3&15)
349 	.word	(b2 << 12) | (\disp)
350 	MRXBOPC	\m, 0x21, v3
351 .endm
352 .macro	VLGVB	gr, vr, disp, base="%r0"
353 	VLGV	\gr, \vr, \disp, \base, 0
354 .endm
355 .macro	VLGVH	gr, vr, disp, base="%r0"
356 	VLGV	\gr, \vr, \disp, \base, 1
357 .endm
358 .macro	VLGVF	gr, vr, disp, base="%r0"
359 	VLGV	\gr, \vr, \disp, \base, 2
360 .endm
361 .macro	VLGVG	gr, vr, disp, base="%r0"
362 	VLGV	\gr, \vr, \disp, \base, 3
363 .endm
364 
365 /* VECTOR LOAD MULTIPLE */
366 .macro	VLM	vfrom, vto, disp, base, hint=3
367 	VX_NUM	v1, \vfrom
368 	VX_NUM	v3, \vto
369 	GR_NUM	b2, \base	    /* Base register */
370 	.word	0xE700 | ((v1&15) << 4) | (v3&15)
371 	.word	(b2 << 12) | (\disp)
372 	MRXBOPC	\hint, 0x36, v1, v3
373 .endm
374 
375 /* VECTOR STORE */
376 .macro	VST	vr1, disp, index="%r0", base
377 	VX_NUM	v1, \vr1
378 	GR_NUM	x2, \index
379 	GR_NUM	b2, \base	    /* Base register */
380 	.word	0xE700 | ((v1&15) << 4) | (x2&15)
381 	.word	(b2 << 12) | (\disp)
382 	MRXBOPC	0, 0x0E, v1
383 .endm
384 
385 /* VECTOR STORE MULTIPLE */
386 .macro	VSTM	vfrom, vto, disp, base, hint=3
387 	VX_NUM	v1, \vfrom
388 	VX_NUM	v3, \vto
389 	GR_NUM	b2, \base	    /* Base register */
390 	.word	0xE700 | ((v1&15) << 4) | (v3&15)
391 	.word	(b2 << 12) | (\disp)
392 	MRXBOPC	\hint, 0x3E, v1, v3
393 .endm
394 
395 /* VECTOR PERMUTE */
396 .macro	VPERM	vr1, vr2, vr3, vr4
397 	VX_NUM	v1, \vr1
398 	VX_NUM	v2, \vr2
399 	VX_NUM	v3, \vr3
400 	VX_NUM	v4, \vr4
401 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
402 	.word	((v3&15) << 12)
403 	MRXBOPC	(v4&15), 0x8C, v1, v2, v3, v4
404 .endm
405 
406 /* VECTOR UNPACK LOGICAL LOW */
407 .macro	VUPLL	vr1, vr2, m3
408 	VX_NUM	v1, \vr1
409 	VX_NUM	v2, \vr2
410 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
411 	.word	0x0000
412 	MRXBOPC	\m3, 0xD4, v1, v2
413 .endm
414 .macro	VUPLLB	vr1, vr2
415 	VUPLL	\vr1, \vr2, 0
416 .endm
417 .macro	VUPLLH	vr1, vr2
418 	VUPLL	\vr1, \vr2, 1
419 .endm
420 .macro	VUPLLF	vr1, vr2
421 	VUPLL	\vr1, \vr2, 2
422 .endm
423 
424 /* VECTOR PERMUTE DOUBLEWORD IMMEDIATE */
425 .macro	VPDI	vr1, vr2, vr3, m4
426 	VX_NUM	v1, \vr1
427 	VX_NUM	v2, \vr2
428 	VX_NUM	v3, \vr3
429 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
430 	.word	((v3&15) << 12)
431 	MRXBOPC	\m4, 0x84, v1, v2, v3
432 .endm
433 
434 /* VECTOR REPLICATE */
435 .macro	VREP	vr1, vr3, imm2, m4
436 	VX_NUM	v1, \vr1
437 	VX_NUM	v3, \vr3
438 	.word	0xE700 | ((v1&15) << 4) | (v3&15)
439 	.word	\imm2
440 	MRXBOPC	\m4, 0x4D, v1, v3
441 .endm
442 .macro	VREPB	vr1, vr3, imm2
443 	VREP	\vr1, \vr3, \imm2, 0
444 .endm
445 .macro	VREPH	vr1, vr3, imm2
446 	VREP	\vr1, \vr3, \imm2, 1
447 .endm
448 .macro	VREPF	vr1, vr3, imm2
449 	VREP	\vr1, \vr3, \imm2, 2
450 .endm
451 .macro	VREPG	vr1, vr3, imm2
452 	VREP	\vr1, \vr3, \imm2, 3
453 .endm
454 
455 /* VECTOR MERGE HIGH */
456 .macro	VMRH	vr1, vr2, vr3, m4
457 	VX_NUM	v1, \vr1
458 	VX_NUM	v2, \vr2
459 	VX_NUM	v3, \vr3
460 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
461 	.word	((v3&15) << 12)
462 	MRXBOPC	\m4, 0x61, v1, v2, v3
463 .endm
464 .macro	VMRHB	vr1, vr2, vr3
465 	VMRH	\vr1, \vr2, \vr3, 0
466 .endm
467 .macro	VMRHH	vr1, vr2, vr3
468 	VMRH	\vr1, \vr2, \vr3, 1
469 .endm
470 .macro	VMRHF	vr1, vr2, vr3
471 	VMRH	\vr1, \vr2, \vr3, 2
472 .endm
473 .macro	VMRHG	vr1, vr2, vr3
474 	VMRH	\vr1, \vr2, \vr3, 3
475 .endm
476 
477 /* VECTOR MERGE LOW */
478 .macro	VMRL	vr1, vr2, vr3, m4
479 	VX_NUM	v1, \vr1
480 	VX_NUM	v2, \vr2
481 	VX_NUM	v3, \vr3
482 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
483 	.word	((v3&15) << 12)
484 	MRXBOPC	\m4, 0x60, v1, v2, v3
485 .endm
486 .macro	VMRLB	vr1, vr2, vr3
487 	VMRL	\vr1, \vr2, \vr3, 0
488 .endm
489 .macro	VMRLH	vr1, vr2, vr3
490 	VMRL	\vr1, \vr2, \vr3, 1
491 .endm
492 .macro	VMRLF	vr1, vr2, vr3
493 	VMRL	\vr1, \vr2, \vr3, 2
494 .endm
495 .macro	VMRLG	vr1, vr2, vr3
496 	VMRL	\vr1, \vr2, \vr3, 3
497 .endm
498 
499 
500 /* Vector integer instructions */
501 
502 /* VECTOR AND */
503 .macro	VN	vr1, vr2, vr3
504 	VX_NUM	v1, \vr1
505 	VX_NUM	v2, \vr2
506 	VX_NUM	v3, \vr3
507 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
508 	.word	((v3&15) << 12)
509 	MRXBOPC	0, 0x68, v1, v2, v3
510 .endm
511 
512 /* VECTOR EXCLUSIVE OR */
513 .macro	VX	vr1, vr2, vr3
514 	VX_NUM	v1, \vr1
515 	VX_NUM	v2, \vr2
516 	VX_NUM	v3, \vr3
517 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
518 	.word	((v3&15) << 12)
519 	MRXBOPC	0, 0x6D, v1, v2, v3
520 .endm
521 
522 /* VECTOR GALOIS FIELD MULTIPLY SUM */
523 .macro	VGFM	vr1, vr2, vr3, m4
524 	VX_NUM	v1, \vr1
525 	VX_NUM	v2, \vr2
526 	VX_NUM	v3, \vr3
527 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
528 	.word	((v3&15) << 12)
529 	MRXBOPC	\m4, 0xB4, v1, v2, v3
530 .endm
531 .macro	VGFMB	vr1, vr2, vr3
532 	VGFM	\vr1, \vr2, \vr3, 0
533 .endm
534 .macro	VGFMH	vr1, vr2, vr3
535 	VGFM	\vr1, \vr2, \vr3, 1
536 .endm
537 .macro	VGFMF	vr1, vr2, vr3
538 	VGFM	\vr1, \vr2, \vr3, 2
539 .endm
540 .macro	VGFMG	vr1, vr2, vr3
541 	VGFM	\vr1, \vr2, \vr3, 3
542 .endm
543 
544 /* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */
545 .macro	VGFMA	vr1, vr2, vr3, vr4, m5
546 	VX_NUM	v1, \vr1
547 	VX_NUM	v2, \vr2
548 	VX_NUM	v3, \vr3
549 	VX_NUM	v4, \vr4
550 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
551 	.word	((v3&15) << 12) | (\m5 << 8)
552 	MRXBOPC	(v4&15), 0xBC, v1, v2, v3, v4
553 .endm
554 .macro	VGFMAB	vr1, vr2, vr3, vr4
555 	VGFMA	\vr1, \vr2, \vr3, \vr4, 0
556 .endm
557 .macro	VGFMAH	vr1, vr2, vr3, vr4
558 	VGFMA	\vr1, \vr2, \vr3, \vr4, 1
559 .endm
560 .macro	VGFMAF	vr1, vr2, vr3, vr4
561 	VGFMA	\vr1, \vr2, \vr3, \vr4, 2
562 .endm
563 .macro	VGFMAG	vr1, vr2, vr3, vr4
564 	VGFMA	\vr1, \vr2, \vr3, \vr4, 3
565 .endm
566 
567 /* VECTOR SHIFT RIGHT LOGICAL BY BYTE */
568 .macro	VSRLB	vr1, vr2, vr3
569 	VX_NUM	v1, \vr1
570 	VX_NUM	v2, \vr2
571 	VX_NUM	v3, \vr3
572 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
573 	.word	((v3&15) << 12)
574 	MRXBOPC	0, 0x7D, v1, v2, v3
575 .endm
576 
577 /* VECTOR REPLICATE IMMEDIATE */
578 .macro	VREPI	vr1, imm2, m3
579 	VX_NUM	v1, \vr1
580 	.word	0xE700 | ((v1&15) << 4)
581 	.word	\imm2
582 	MRXBOPC	\m3, 0x45, v1
583 .endm
584 .macro	VREPIB	vr1, imm2
585 	VREPI	\vr1, \imm2, 0
586 .endm
587 .macro	VREPIH	vr1, imm2
588 	VREPI	\vr1, \imm2, 1
589 .endm
590 .macro	VREPIF	vr1, imm2
591 	VREPI	\vr1, \imm2, 2
592 .endm
593 .macro	VREPIG	vr1, imm2
594 	VREP	\vr1, \imm2, 3
595 .endm
596 
597 /* VECTOR ADD */
598 .macro	VA	vr1, vr2, vr3, m4
599 	VX_NUM	v1, \vr1
600 	VX_NUM	v2, \vr2
601 	VX_NUM	v3, \vr3
602 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
603 	.word	((v3&15) << 12)
604 	MRXBOPC	\m4, 0xF3, v1, v2, v3
605 .endm
606 .macro	VAB	vr1, vr2, vr3
607 	VA	\vr1, \vr2, \vr3, 0
608 .endm
609 .macro	VAH	vr1, vr2, vr3
610 	VA	\vr1, \vr2, \vr3, 1
611 .endm
612 .macro	VAF	vr1, vr2, vr3
613 	VA	\vr1, \vr2, \vr3, 2
614 .endm
615 .macro	VAG	vr1, vr2, vr3
616 	VA	\vr1, \vr2, \vr3, 3
617 .endm
618 .macro	VAQ	vr1, vr2, vr3
619 	VA	\vr1, \vr2, \vr3, 4
620 .endm
621 
622 /* VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */
623 .macro	VESRAV	vr1, vr2, vr3, m4
624 	VX_NUM	v1, \vr1
625 	VX_NUM	v2, \vr2
626 	VX_NUM	v3, \vr3
627 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
628 	.word	((v3&15) << 12)
629 	MRXBOPC \m4, 0x7A, v1, v2, v3
630 .endm
631 
632 .macro	VESRAVB	vr1, vr2, vr3
633 	VESRAV	\vr1, \vr2, \vr3, 0
634 .endm
635 .macro	VESRAVH	vr1, vr2, vr3
636 	VESRAV	\vr1, \vr2, \vr3, 1
637 .endm
638 .macro	VESRAVF	vr1, vr2, vr3
639 	VESRAV	\vr1, \vr2, \vr3, 2
640 .endm
641 .macro	VESRAVG	vr1, vr2, vr3
642 	VESRAV	\vr1, \vr2, \vr3, 3
643 .endm
644 
645 /* VECTOR ELEMENT ROTATE LEFT LOGICAL */
646 .macro	VERLL	vr1, vr3, disp, base="%r0", m4
647 	VX_NUM	v1, \vr1
648 	VX_NUM	v3, \vr3
649 	GR_NUM	b2, \base
650 	.word	0xE700 | ((v1&15) << 4) | (v3&15)
651 	.word	(b2 << 12) | (\disp)
652 	MRXBOPC	\m4, 0x33, v1, v3
653 .endm
654 .macro	VERLLB	vr1, vr3, disp, base="%r0"
655 	VERLL	\vr1, \vr3, \disp, \base, 0
656 .endm
657 .macro	VERLLH	vr1, vr3, disp, base="%r0"
658 	VERLL	\vr1, \vr3, \disp, \base, 1
659 .endm
660 .macro	VERLLF	vr1, vr3, disp, base="%r0"
661 	VERLL	\vr1, \vr3, \disp, \base, 2
662 .endm
663 .macro	VERLLG	vr1, vr3, disp, base="%r0"
664 	VERLL	\vr1, \vr3, \disp, \base, 3
665 .endm
666 
667 /* VECTOR SHIFT LEFT DOUBLE BY BYTE */
668 .macro	VSLDB	vr1, vr2, vr3, imm4
669 	VX_NUM	v1, \vr1
670 	VX_NUM	v2, \vr2
671 	VX_NUM	v3, \vr3
672 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
673 	.word	((v3&15) << 12) | (\imm4)
674 	MRXBOPC	0, 0x77, v1, v2, v3
675 .endm
676 
677 #endif	/* __ASSEMBLY__ */
678 #endif	/* __ASM_S390_VX_INSN_H */
679