Loading...
Note: File does not exist in v3.1.
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Support for Vector Instructions
4 *
5 * Assembler macros to generate .byte/.word code for particular
6 * vector instructions that are supported by recent binutils (>= 2.26) only.
7 *
8 * Copyright IBM Corp. 2015
9 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
10 */
11
12#ifndef __ASM_S390_VX_INSN_INTERNAL_H
13#define __ASM_S390_VX_INSN_INTERNAL_H
14
15#ifndef __ASM_S390_VX_INSN_H
16#error only <asm/vx-insn.h> can be included directly
17#endif
18
19#ifdef __ASSEMBLY__
20
21/* Macros to generate vector instruction byte code */
22
23/* GR_NUM - Retrieve general-purpose register number
24 *
25 * @opd: Operand to store register number
26 * @r64: String designation register in the format "%rN"
27 */
28.macro GR_NUM opd gr
29 \opd = 255
30 .ifc \gr,%r0
31 \opd = 0
32 .endif
33 .ifc \gr,%r1
34 \opd = 1
35 .endif
36 .ifc \gr,%r2
37 \opd = 2
38 .endif
39 .ifc \gr,%r3
40 \opd = 3
41 .endif
42 .ifc \gr,%r4
43 \opd = 4
44 .endif
45 .ifc \gr,%r5
46 \opd = 5
47 .endif
48 .ifc \gr,%r6
49 \opd = 6
50 .endif
51 .ifc \gr,%r7
52 \opd = 7
53 .endif
54 .ifc \gr,%r8
55 \opd = 8
56 .endif
57 .ifc \gr,%r9
58 \opd = 9
59 .endif
60 .ifc \gr,%r10
61 \opd = 10
62 .endif
63 .ifc \gr,%r11
64 \opd = 11
65 .endif
66 .ifc \gr,%r12
67 \opd = 12
68 .endif
69 .ifc \gr,%r13
70 \opd = 13
71 .endif
72 .ifc \gr,%r14
73 \opd = 14
74 .endif
75 .ifc \gr,%r15
76 \opd = 15
77 .endif
78 .if \opd == 255
79 \opd = \gr
80 .endif
81.endm
82
83/* VX_NUM - Retrieve vector register number
84 *
85 * @opd: Operand to store register number
86 * @vxr: String designation register in the format "%vN"
87 *
88 * The vector register number is used for as input number to the
89 * instruction and, as well as, to compute the RXB field of the
90 * instruction.
91 */
92.macro VX_NUM opd vxr
93 \opd = 255
94 .ifc \vxr,%v0
95 \opd = 0
96 .endif
97 .ifc \vxr,%v1
98 \opd = 1
99 .endif
100 .ifc \vxr,%v2
101 \opd = 2
102 .endif
103 .ifc \vxr,%v3
104 \opd = 3
105 .endif
106 .ifc \vxr,%v4
107 \opd = 4
108 .endif
109 .ifc \vxr,%v5
110 \opd = 5
111 .endif
112 .ifc \vxr,%v6
113 \opd = 6
114 .endif
115 .ifc \vxr,%v7
116 \opd = 7
117 .endif
118 .ifc \vxr,%v8
119 \opd = 8
120 .endif
121 .ifc \vxr,%v9
122 \opd = 9
123 .endif
124 .ifc \vxr,%v10
125 \opd = 10
126 .endif
127 .ifc \vxr,%v11
128 \opd = 11
129 .endif
130 .ifc \vxr,%v12
131 \opd = 12
132 .endif
133 .ifc \vxr,%v13
134 \opd = 13
135 .endif
136 .ifc \vxr,%v14
137 \opd = 14
138 .endif
139 .ifc \vxr,%v15
140 \opd = 15
141 .endif
142 .ifc \vxr,%v16
143 \opd = 16
144 .endif
145 .ifc \vxr,%v17
146 \opd = 17
147 .endif
148 .ifc \vxr,%v18
149 \opd = 18
150 .endif
151 .ifc \vxr,%v19
152 \opd = 19
153 .endif
154 .ifc \vxr,%v20
155 \opd = 20
156 .endif
157 .ifc \vxr,%v21
158 \opd = 21
159 .endif
160 .ifc \vxr,%v22
161 \opd = 22
162 .endif
163 .ifc \vxr,%v23
164 \opd = 23
165 .endif
166 .ifc \vxr,%v24
167 \opd = 24
168 .endif
169 .ifc \vxr,%v25
170 \opd = 25
171 .endif
172 .ifc \vxr,%v26
173 \opd = 26
174 .endif
175 .ifc \vxr,%v27
176 \opd = 27
177 .endif
178 .ifc \vxr,%v28
179 \opd = 28
180 .endif
181 .ifc \vxr,%v29
182 \opd = 29
183 .endif
184 .ifc \vxr,%v30
185 \opd = 30
186 .endif
187 .ifc \vxr,%v31
188 \opd = 31
189 .endif
190 .if \opd == 255
191 \opd = \vxr
192 .endif
193.endm
194
195/* RXB - Compute most significant bit used vector registers
196 *
197 * @rxb: Operand to store computed RXB value
198 * @v1: First vector register designated operand
199 * @v2: Second vector register designated operand
200 * @v3: Third vector register designated operand
201 * @v4: Fourth vector register designated operand
202 */
203.macro RXB rxb v1 v2=0 v3=0 v4=0
204 \rxb = 0
205 .if \v1 & 0x10
206 \rxb = \rxb | 0x08
207 .endif
208 .if \v2 & 0x10
209 \rxb = \rxb | 0x04
210 .endif
211 .if \v3 & 0x10
212 \rxb = \rxb | 0x02
213 .endif
214 .if \v4 & 0x10
215 \rxb = \rxb | 0x01
216 .endif
217.endm
218
219/* MRXB - Generate Element Size Control and RXB value
220 *
221 * @m: Element size control
222 * @v1: First vector register designated operand (for RXB)
223 * @v2: Second vector register designated operand (for RXB)
224 * @v3: Third vector register designated operand (for RXB)
225 * @v4: Fourth vector register designated operand (for RXB)
226 */
227.macro MRXB m v1 v2=0 v3=0 v4=0
228 rxb = 0
229 RXB rxb, \v1, \v2, \v3, \v4
230 .byte (\m << 4) | rxb
231.endm
232
233/* MRXBOPC - Generate Element Size Control, RXB, and final Opcode fields
234 *
235 * @m: Element size control
236 * @opc: Opcode
237 * @v1: First vector register designated operand (for RXB)
238 * @v2: Second vector register designated operand (for RXB)
239 * @v3: Third vector register designated operand (for RXB)
240 * @v4: Fourth vector register designated operand (for RXB)
241 */
242.macro MRXBOPC m opc v1 v2=0 v3=0 v4=0
243 MRXB \m, \v1, \v2, \v3, \v4
244 .byte \opc
245.endm
246
247/* Vector support instructions */
248
249/* VECTOR GENERATE BYTE MASK */
250.macro VGBM vr imm2
251 VX_NUM v1, \vr
252 .word (0xE700 | ((v1&15) << 4))
253 .word \imm2
254 MRXBOPC 0, 0x44, v1
255.endm
256.macro VZERO vxr
257 VGBM \vxr, 0
258.endm
259.macro VONE vxr
260 VGBM \vxr, 0xFFFF
261.endm
262
263/* VECTOR LOAD VR ELEMENT FROM GR */
264.macro VLVG v, gr, disp, m
265 VX_NUM v1, \v
266 GR_NUM b2, "%r0"
267 GR_NUM r3, \gr
268 .word 0xE700 | ((v1&15) << 4) | r3
269 .word (b2 << 12) | (\disp)
270 MRXBOPC \m, 0x22, v1
271.endm
272.macro VLVGB v, gr, index, base
273 VLVG \v, \gr, \index, \base, 0
274.endm
275.macro VLVGH v, gr, index
276 VLVG \v, \gr, \index, 1
277.endm
278.macro VLVGF v, gr, index
279 VLVG \v, \gr, \index, 2
280.endm
281.macro VLVGG v, gr, index
282 VLVG \v, \gr, \index, 3
283.endm
284
285/* VECTOR LOAD REGISTER */
286.macro VLR v1, v2
287 VX_NUM v1, \v1
288 VX_NUM v2, \v2
289 .word 0xE700 | ((v1&15) << 4) | (v2&15)
290 .word 0
291 MRXBOPC 0, 0x56, v1, v2
292.endm
293
294/* VECTOR LOAD */
295.macro VL v, disp, index="%r0", base
296 VX_NUM v1, \v
297 GR_NUM x2, \index
298 GR_NUM b2, \base
299 .word 0xE700 | ((v1&15) << 4) | x2
300 .word (b2 << 12) | (\disp)
301 MRXBOPC 0, 0x06, v1
302.endm
303
304/* VECTOR LOAD ELEMENT */
305.macro VLEx vr1, disp, index="%r0", base, m3, opc
306 VX_NUM v1, \vr1
307 GR_NUM x2, \index
308 GR_NUM b2, \base
309 .word 0xE700 | ((v1&15) << 4) | x2
310 .word (b2 << 12) | (\disp)
311 MRXBOPC \m3, \opc, v1
312.endm
313.macro VLEB vr1, disp, index="%r0", base, m3
314 VLEx \vr1, \disp, \index, \base, \m3, 0x00
315.endm
316.macro VLEH vr1, disp, index="%r0", base, m3
317 VLEx \vr1, \disp, \index, \base, \m3, 0x01
318.endm
319.macro VLEF vr1, disp, index="%r0", base, m3
320 VLEx \vr1, \disp, \index, \base, \m3, 0x03
321.endm
322.macro VLEG vr1, disp, index="%r0", base, m3
323 VLEx \vr1, \disp, \index, \base, \m3, 0x02
324.endm
325
326/* VECTOR LOAD ELEMENT IMMEDIATE */
327.macro VLEIx vr1, imm2, m3, opc
328 VX_NUM v1, \vr1
329 .word 0xE700 | ((v1&15) << 4)
330 .word \imm2
331 MRXBOPC \m3, \opc, v1
332.endm
333.macro VLEIB vr1, imm2, index
334 VLEIx \vr1, \imm2, \index, 0x40
335.endm
336.macro VLEIH vr1, imm2, index
337 VLEIx \vr1, \imm2, \index, 0x41
338.endm
339.macro VLEIF vr1, imm2, index
340 VLEIx \vr1, \imm2, \index, 0x43
341.endm
342.macro VLEIG vr1, imm2, index
343 VLEIx \vr1, \imm2, \index, 0x42
344.endm
345
346/* VECTOR LOAD GR FROM VR ELEMENT */
347.macro VLGV gr, vr, disp, base="%r0", m
348 GR_NUM r1, \gr
349 GR_NUM b2, \base
350 VX_NUM v3, \vr
351 .word 0xE700 | (r1 << 4) | (v3&15)
352 .word (b2 << 12) | (\disp)
353 MRXBOPC \m, 0x21, v3
354.endm
355.macro VLGVB gr, vr, disp, base="%r0"
356 VLGV \gr, \vr, \disp, \base, 0
357.endm
358.macro VLGVH gr, vr, disp, base="%r0"
359 VLGV \gr, \vr, \disp, \base, 1
360.endm
361.macro VLGVF gr, vr, disp, base="%r0"
362 VLGV \gr, \vr, \disp, \base, 2
363.endm
364.macro VLGVG gr, vr, disp, base="%r0"
365 VLGV \gr, \vr, \disp, \base, 3
366.endm
367
368/* VECTOR LOAD MULTIPLE */
369.macro VLM vfrom, vto, disp, base, hint=3
370 VX_NUM v1, \vfrom
371 VX_NUM v3, \vto
372 GR_NUM b2, \base
373 .word 0xE700 | ((v1&15) << 4) | (v3&15)
374 .word (b2 << 12) | (\disp)
375 MRXBOPC \hint, 0x36, v1, v3
376.endm
377
378/* VECTOR STORE */
379.macro VST vr1, disp, index="%r0", base
380 VX_NUM v1, \vr1
381 GR_NUM x2, \index
382 GR_NUM b2, \base
383 .word 0xE700 | ((v1&15) << 4) | (x2&15)
384 .word (b2 << 12) | (\disp)
385 MRXBOPC 0, 0x0E, v1
386.endm
387
388/* VECTOR STORE MULTIPLE */
389.macro VSTM vfrom, vto, disp, base, hint=3
390 VX_NUM v1, \vfrom
391 VX_NUM v3, \vto
392 GR_NUM b2, \base
393 .word 0xE700 | ((v1&15) << 4) | (v3&15)
394 .word (b2 << 12) | (\disp)
395 MRXBOPC \hint, 0x3E, v1, v3
396.endm
397
398/* VECTOR PERMUTE */
399.macro VPERM vr1, vr2, vr3, vr4
400 VX_NUM v1, \vr1
401 VX_NUM v2, \vr2
402 VX_NUM v3, \vr3
403 VX_NUM v4, \vr4
404 .word 0xE700 | ((v1&15) << 4) | (v2&15)
405 .word ((v3&15) << 12)
406 MRXBOPC (v4&15), 0x8C, v1, v2, v3, v4
407.endm
408
409/* VECTOR UNPACK LOGICAL LOW */
410.macro VUPLL vr1, vr2, m3
411 VX_NUM v1, \vr1
412 VX_NUM v2, \vr2
413 .word 0xE700 | ((v1&15) << 4) | (v2&15)
414 .word 0x0000
415 MRXBOPC \m3, 0xD4, v1, v2
416.endm
417.macro VUPLLB vr1, vr2
418 VUPLL \vr1, \vr2, 0
419.endm
420.macro VUPLLH vr1, vr2
421 VUPLL \vr1, \vr2, 1
422.endm
423.macro VUPLLF vr1, vr2
424 VUPLL \vr1, \vr2, 2
425.endm
426
427/* VECTOR PERMUTE DOUBLEWORD IMMEDIATE */
428.macro VPDI vr1, vr2, vr3, m4
429 VX_NUM v1, \vr1
430 VX_NUM v2, \vr2
431 VX_NUM v3, \vr3
432 .word 0xE700 | ((v1&15) << 4) | (v2&15)
433 .word ((v3&15) << 12)
434 MRXBOPC \m4, 0x84, v1, v2, v3
435.endm
436
437/* VECTOR REPLICATE */
438.macro VREP vr1, vr3, imm2, m4
439 VX_NUM v1, \vr1
440 VX_NUM v3, \vr3
441 .word 0xE700 | ((v1&15) << 4) | (v3&15)
442 .word \imm2
443 MRXBOPC \m4, 0x4D, v1, v3
444.endm
445.macro VREPB vr1, vr3, imm2
446 VREP \vr1, \vr3, \imm2, 0
447.endm
448.macro VREPH vr1, vr3, imm2
449 VREP \vr1, \vr3, \imm2, 1
450.endm
451.macro VREPF vr1, vr3, imm2
452 VREP \vr1, \vr3, \imm2, 2
453.endm
454.macro VREPG vr1, vr3, imm2
455 VREP \vr1, \vr3, \imm2, 3
456.endm
457
458/* VECTOR MERGE HIGH */
459.macro VMRH vr1, vr2, vr3, m4
460 VX_NUM v1, \vr1
461 VX_NUM v2, \vr2
462 VX_NUM v3, \vr3
463 .word 0xE700 | ((v1&15) << 4) | (v2&15)
464 .word ((v3&15) << 12)
465 MRXBOPC \m4, 0x61, v1, v2, v3
466.endm
467.macro VMRHB vr1, vr2, vr3
468 VMRH \vr1, \vr2, \vr3, 0
469.endm
470.macro VMRHH vr1, vr2, vr3
471 VMRH \vr1, \vr2, \vr3, 1
472.endm
473.macro VMRHF vr1, vr2, vr3
474 VMRH \vr1, \vr2, \vr3, 2
475.endm
476.macro VMRHG vr1, vr2, vr3
477 VMRH \vr1, \vr2, \vr3, 3
478.endm
479
480/* VECTOR MERGE LOW */
481.macro VMRL vr1, vr2, vr3, m4
482 VX_NUM v1, \vr1
483 VX_NUM v2, \vr2
484 VX_NUM v3, \vr3
485 .word 0xE700 | ((v1&15) << 4) | (v2&15)
486 .word ((v3&15) << 12)
487 MRXBOPC \m4, 0x60, v1, v2, v3
488.endm
489.macro VMRLB vr1, vr2, vr3
490 VMRL \vr1, \vr2, \vr3, 0
491.endm
492.macro VMRLH vr1, vr2, vr3
493 VMRL \vr1, \vr2, \vr3, 1
494.endm
495.macro VMRLF vr1, vr2, vr3
496 VMRL \vr1, \vr2, \vr3, 2
497.endm
498.macro VMRLG vr1, vr2, vr3
499 VMRL \vr1, \vr2, \vr3, 3
500.endm
501
502
503/* Vector integer instructions */
504
505/* VECTOR AND */
506.macro VN vr1, vr2, vr3
507 VX_NUM v1, \vr1
508 VX_NUM v2, \vr2
509 VX_NUM v3, \vr3
510 .word 0xE700 | ((v1&15) << 4) | (v2&15)
511 .word ((v3&15) << 12)
512 MRXBOPC 0, 0x68, v1, v2, v3
513.endm
514
515/* VECTOR EXCLUSIVE OR */
516.macro VX vr1, vr2, vr3
517 VX_NUM v1, \vr1
518 VX_NUM v2, \vr2
519 VX_NUM v3, \vr3
520 .word 0xE700 | ((v1&15) << 4) | (v2&15)
521 .word ((v3&15) << 12)
522 MRXBOPC 0, 0x6D, v1, v2, v3
523.endm
524
525/* VECTOR GALOIS FIELD MULTIPLY SUM */
526.macro VGFM vr1, vr2, vr3, m4
527 VX_NUM v1, \vr1
528 VX_NUM v2, \vr2
529 VX_NUM v3, \vr3
530 .word 0xE700 | ((v1&15) << 4) | (v2&15)
531 .word ((v3&15) << 12)
532 MRXBOPC \m4, 0xB4, v1, v2, v3
533.endm
534.macro VGFMB vr1, vr2, vr3
535 VGFM \vr1, \vr2, \vr3, 0
536.endm
537.macro VGFMH vr1, vr2, vr3
538 VGFM \vr1, \vr2, \vr3, 1
539.endm
540.macro VGFMF vr1, vr2, vr3
541 VGFM \vr1, \vr2, \vr3, 2
542.endm
543.macro VGFMG vr1, vr2, vr3
544 VGFM \vr1, \vr2, \vr3, 3
545.endm
546
547/* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */
548.macro VGFMA vr1, vr2, vr3, vr4, m5
549 VX_NUM v1, \vr1
550 VX_NUM v2, \vr2
551 VX_NUM v3, \vr3
552 VX_NUM v4, \vr4
553 .word 0xE700 | ((v1&15) << 4) | (v2&15)
554 .word ((v3&15) << 12) | (\m5 << 8)
555 MRXBOPC (v4&15), 0xBC, v1, v2, v3, v4
556.endm
557.macro VGFMAB vr1, vr2, vr3, vr4
558 VGFMA \vr1, \vr2, \vr3, \vr4, 0
559.endm
560.macro VGFMAH vr1, vr2, vr3, vr4
561 VGFMA \vr1, \vr2, \vr3, \vr4, 1
562.endm
563.macro VGFMAF vr1, vr2, vr3, vr4
564 VGFMA \vr1, \vr2, \vr3, \vr4, 2
565.endm
566.macro VGFMAG vr1, vr2, vr3, vr4
567 VGFMA \vr1, \vr2, \vr3, \vr4, 3
568.endm
569
570/* VECTOR SHIFT RIGHT LOGICAL BY BYTE */
571.macro VSRLB vr1, vr2, vr3
572 VX_NUM v1, \vr1
573 VX_NUM v2, \vr2
574 VX_NUM v3, \vr3
575 .word 0xE700 | ((v1&15) << 4) | (v2&15)
576 .word ((v3&15) << 12)
577 MRXBOPC 0, 0x7D, v1, v2, v3
578.endm
579
580/* VECTOR REPLICATE IMMEDIATE */
581.macro VREPI vr1, imm2, m3
582 VX_NUM v1, \vr1
583 .word 0xE700 | ((v1&15) << 4)
584 .word \imm2
585 MRXBOPC \m3, 0x45, v1
586.endm
587.macro VREPIB vr1, imm2
588 VREPI \vr1, \imm2, 0
589.endm
590.macro VREPIH vr1, imm2
591 VREPI \vr1, \imm2, 1
592.endm
593.macro VREPIF vr1, imm2
594 VREPI \vr1, \imm2, 2
595.endm
596.macro VREPIG vr1, imm2
597 VREP \vr1, \imm2, 3
598.endm
599
600/* VECTOR ADD */
601.macro VA vr1, vr2, vr3, m4
602 VX_NUM v1, \vr1
603 VX_NUM v2, \vr2
604 VX_NUM v3, \vr3
605 .word 0xE700 | ((v1&15) << 4) | (v2&15)
606 .word ((v3&15) << 12)
607 MRXBOPC \m4, 0xF3, v1, v2, v3
608.endm
609.macro VAB vr1, vr2, vr3
610 VA \vr1, \vr2, \vr3, 0
611.endm
612.macro VAH vr1, vr2, vr3
613 VA \vr1, \vr2, \vr3, 1
614.endm
615.macro VAF vr1, vr2, vr3
616 VA \vr1, \vr2, \vr3, 2
617.endm
618.macro VAG vr1, vr2, vr3
619 VA \vr1, \vr2, \vr3, 3
620.endm
621.macro VAQ vr1, vr2, vr3
622 VA \vr1, \vr2, \vr3, 4
623.endm
624
625/* VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */
626.macro VESRAV vr1, vr2, vr3, m4
627 VX_NUM v1, \vr1
628 VX_NUM v2, \vr2
629 VX_NUM v3, \vr3
630 .word 0xE700 | ((v1&15) << 4) | (v2&15)
631 .word ((v3&15) << 12)
632 MRXBOPC \m4, 0x7A, v1, v2, v3
633.endm
634
635.macro VESRAVB vr1, vr2, vr3
636 VESRAV \vr1, \vr2, \vr3, 0
637.endm
638.macro VESRAVH vr1, vr2, vr3
639 VESRAV \vr1, \vr2, \vr3, 1
640.endm
641.macro VESRAVF vr1, vr2, vr3
642 VESRAV \vr1, \vr2, \vr3, 2
643.endm
644.macro VESRAVG vr1, vr2, vr3
645 VESRAV \vr1, \vr2, \vr3, 3
646.endm
647
648/* VECTOR ELEMENT ROTATE LEFT LOGICAL */
649.macro VERLL vr1, vr3, disp, base="%r0", m4
650 VX_NUM v1, \vr1
651 VX_NUM v3, \vr3
652 GR_NUM b2, \base
653 .word 0xE700 | ((v1&15) << 4) | (v3&15)
654 .word (b2 << 12) | (\disp)
655 MRXBOPC \m4, 0x33, v1, v3
656.endm
657.macro VERLLB vr1, vr3, disp, base="%r0"
658 VERLL \vr1, \vr3, \disp, \base, 0
659.endm
660.macro VERLLH vr1, vr3, disp, base="%r0"
661 VERLL \vr1, \vr3, \disp, \base, 1
662.endm
663.macro VERLLF vr1, vr3, disp, base="%r0"
664 VERLL \vr1, \vr3, \disp, \base, 2
665.endm
666.macro VERLLG vr1, vr3, disp, base="%r0"
667 VERLL \vr1, \vr3, \disp, \base, 3
668.endm
669
670/* VECTOR SHIFT LEFT DOUBLE BY BYTE */
671.macro VSLDB vr1, vr2, vr3, imm4
672 VX_NUM v1, \vr1
673 VX_NUM v2, \vr2
674 VX_NUM v3, \vr3
675 .word 0xE700 | ((v1&15) << 4) | (v2&15)
676 .word ((v3&15) << 12) | (\imm4)
677 MRXBOPC 0, 0x77, v1, v2, v3
678.endm
679
680#endif /* __ASSEMBLY__ */
681#endif /* __ASM_S390_VX_INSN_INTERNAL_H */