xref: /openbmc/qemu/tests/tcg/s390x/vxeh2_vlstr.c (revision 1fba9dc71a170b3a05b9d3272dd8ecfe7f26e215)
1*bc556c66SDavid Miller /*
2*bc556c66SDavid Miller  * vxeh2_vlstr: vector-enhancements facility 2 vector load/store reversed *
3*bc556c66SDavid Miller  */
4*bc556c66SDavid Miller #include <stdint.h>
5*bc556c66SDavid Miller #include "vx.h"
6*bc556c66SDavid Miller 
7*bc556c66SDavid Miller #define vtst(v1, v2) \
8*bc556c66SDavid Miller     if (v1.d[0] != v2.d[0] || v1.d[1] != v2.d[1]) { \
9*bc556c66SDavid Miller         return 1;     \
10*bc556c66SDavid Miller     }
11*bc556c66SDavid Miller 
vler(S390Vector * v1,const void * va,uint8_t m3)12*bc556c66SDavid Miller static inline void vler(S390Vector *v1, const void *va, uint8_t m3)
13*bc556c66SDavid Miller {
14*bc556c66SDavid Miller     asm volatile("vler %[v1], 0(%[va]), %[m3]\n"
15*bc556c66SDavid Miller                 : [v1] "+v" (v1->v)
16*bc556c66SDavid Miller                 : [va]  "a" (va)
17*bc556c66SDavid Miller                 , [m3]  "i" (m3)
18*bc556c66SDavid Miller                 : "memory");
19*bc556c66SDavid Miller }
20*bc556c66SDavid Miller 
vster(S390Vector * v1,const void * va,uint8_t m3)21*bc556c66SDavid Miller static inline void vster(S390Vector *v1, const void *va, uint8_t m3)
22*bc556c66SDavid Miller {
23*bc556c66SDavid Miller     asm volatile("vster %[v1], 0(%[va]), %[m3]\n"
24*bc556c66SDavid Miller                 : [va] "+a" (va)
25*bc556c66SDavid Miller                 : [v1]  "v" (v1->v)
26*bc556c66SDavid Miller                 , [m3]  "i" (m3)
27*bc556c66SDavid Miller                 : "memory");
28*bc556c66SDavid Miller }
29*bc556c66SDavid Miller 
vlbr(S390Vector * v1,void * va,const uint8_t m3)30*bc556c66SDavid Miller static inline void vlbr(S390Vector *v1, void *va, const uint8_t m3)
31*bc556c66SDavid Miller {
32*bc556c66SDavid Miller     asm volatile("vlbr %[v1], 0(%[va]), %[m3]\n"
33*bc556c66SDavid Miller                 : [v1] "+v" (v1->v)
34*bc556c66SDavid Miller                 : [va]  "a" (va)
35*bc556c66SDavid Miller                 , [m3]  "i" (m3)
36*bc556c66SDavid Miller                 : "memory");
37*bc556c66SDavid Miller }
38*bc556c66SDavid Miller 
vstbr(S390Vector * v1,void * va,const uint8_t m3)39*bc556c66SDavid Miller static inline void vstbr(S390Vector *v1, void *va, const uint8_t m3)
40*bc556c66SDavid Miller {
41*bc556c66SDavid Miller     asm volatile("vstbr %[v1], 0(%[va]), %[m3]\n"
42*bc556c66SDavid Miller                 : [va] "+a" (va)
43*bc556c66SDavid Miller                 : [v1]  "v" (v1->v)
44*bc556c66SDavid Miller                 , [m3]  "i" (m3)
45*bc556c66SDavid Miller                 : "memory");
46*bc556c66SDavid Miller }
47*bc556c66SDavid Miller 
48*bc556c66SDavid Miller 
vlebrh(S390Vector * v1,void * va,const uint8_t m3)49*bc556c66SDavid Miller static inline void vlebrh(S390Vector *v1, void *va, const uint8_t m3)
50*bc556c66SDavid Miller {
51*bc556c66SDavid Miller     asm volatile("vlebrh %[v1], 0(%[va]), %[m3]\n"
52*bc556c66SDavid Miller                 : [v1] "+v" (v1->v)
53*bc556c66SDavid Miller                 : [va]  "a" (va)
54*bc556c66SDavid Miller                 , [m3]  "i" (m3)
55*bc556c66SDavid Miller                 : "memory");
56*bc556c66SDavid Miller }
57*bc556c66SDavid Miller 
vstebrh(S390Vector * v1,void * va,const uint8_t m3)58*bc556c66SDavid Miller static inline void vstebrh(S390Vector *v1, void *va, const uint8_t m3)
59*bc556c66SDavid Miller {
60*bc556c66SDavid Miller     asm volatile("vstebrh %[v1], 0(%[va]), %[m3]\n"
61*bc556c66SDavid Miller                 : [va] "+a" (va)
62*bc556c66SDavid Miller                 : [v1]  "v" (v1->v)
63*bc556c66SDavid Miller                 , [m3]  "i" (m3)
64*bc556c66SDavid Miller                 : "memory");
65*bc556c66SDavid Miller }
66*bc556c66SDavid Miller 
vllebrz(S390Vector * v1,void * va,const uint8_t m3)67*bc556c66SDavid Miller static inline void vllebrz(S390Vector *v1, void *va, const uint8_t m3)
68*bc556c66SDavid Miller {
69*bc556c66SDavid Miller     asm volatile("vllebrz %[v1], 0(%[va]), %[m3]\n"
70*bc556c66SDavid Miller                 : [v1] "+v" (v1->v)
71*bc556c66SDavid Miller                 : [va]  "a" (va)
72*bc556c66SDavid Miller                 , [m3]  "i" (m3)
73*bc556c66SDavid Miller                 : "memory");
74*bc556c66SDavid Miller }
75*bc556c66SDavid Miller 
vlbrrep(S390Vector * v1,void * va,const uint8_t m3)76*bc556c66SDavid Miller static inline void vlbrrep(S390Vector *v1, void *va, const uint8_t m3)
77*bc556c66SDavid Miller {
78*bc556c66SDavid Miller     asm volatile("vlbrrep %[v1], 0(%[va]), %[m3]\n"
79*bc556c66SDavid Miller                 : [v1] "+v" (v1->v)
80*bc556c66SDavid Miller                 : [va]  "a" (va)
81*bc556c66SDavid Miller                 , [m3]  "i" (m3)
82*bc556c66SDavid Miller                 : "memory");
83*bc556c66SDavid Miller }
84*bc556c66SDavid Miller 
main(int argc,char * argv[])85*bc556c66SDavid Miller int main(int argc, char *argv[])
86*bc556c66SDavid Miller {
87*bc556c66SDavid Miller     S390Vector vd = { .d[0] = 0, .d[1] = 0 };
88*bc556c66SDavid Miller     S390Vector vs = { .d[0] = 0x8FEEDDCCBBAA9988ull,
89*bc556c66SDavid Miller                       .d[1] = 0x7766554433221107ull };
90*bc556c66SDavid Miller 
91*bc556c66SDavid Miller     const S390Vector vt_v_er16 = {
92*bc556c66SDavid Miller         .h[0] = 0x1107, .h[1] = 0x3322, .h[2] = 0x5544, .h[3] = 0x7766,
93*bc556c66SDavid Miller         .h[4] = 0x9988, .h[5] = 0xBBAA, .h[6] = 0xDDCC, .h[7] = 0x8FEE };
94*bc556c66SDavid Miller 
95*bc556c66SDavid Miller     const S390Vector vt_v_br16 = {
96*bc556c66SDavid Miller         .h[0] = 0xEE8F, .h[1] = 0xCCDD, .h[2] = 0xAABB, .h[3] = 0x8899,
97*bc556c66SDavid Miller         .h[4] = 0x6677, .h[5] = 0x4455, .h[6] = 0x2233, .h[7] = 0x0711 };
98*bc556c66SDavid Miller 
99*bc556c66SDavid Miller     int ix;
100*bc556c66SDavid Miller     uint64_t ss64 = 0xFEEDFACE0BADBEEFull, sd64 = 0;
101*bc556c66SDavid Miller 
102*bc556c66SDavid Miller     vler(&vd, &vs, ES16);
103*bc556c66SDavid Miller     vtst(vd, vt_v_er16);
104*bc556c66SDavid Miller 
105*bc556c66SDavid Miller     vster(&vs, &vd, ES16);
106*bc556c66SDavid Miller     vtst(vd, vt_v_er16);
107*bc556c66SDavid Miller 
108*bc556c66SDavid Miller     vlbr(&vd, &vs, ES16);
109*bc556c66SDavid Miller     vtst(vd, vt_v_br16);
110*bc556c66SDavid Miller 
111*bc556c66SDavid Miller     vstbr(&vs, &vd, ES16);
112*bc556c66SDavid Miller     vtst(vd, vt_v_br16);
113*bc556c66SDavid Miller 
114*bc556c66SDavid Miller     vlebrh(&vd, &ss64, 5);
115*bc556c66SDavid Miller     if (0xEDFE != vd.h[5]) {
116*bc556c66SDavid Miller         return 1;
117*bc556c66SDavid Miller     }
118*bc556c66SDavid Miller 
119*bc556c66SDavid Miller     vstebrh(&vs, (uint8_t *)&sd64 + 4, 7);
120*bc556c66SDavid Miller     if (0x0000000007110000ull != sd64) {
121*bc556c66SDavid Miller         return 1;
122*bc556c66SDavid Miller     }
123*bc556c66SDavid Miller 
124*bc556c66SDavid Miller     vllebrz(&vd, (uint8_t *)&ss64 + 3, 2);
125*bc556c66SDavid Miller     for (ix = 0; ix < 4; ix++) {
126*bc556c66SDavid Miller         if (vd.w[ix] != (ix != 1 ? 0 : 0xBEAD0BCE)) {
127*bc556c66SDavid Miller             return 1;
128*bc556c66SDavid Miller         }
129*bc556c66SDavid Miller     }
130*bc556c66SDavid Miller 
131*bc556c66SDavid Miller     vlbrrep(&vd, (uint8_t *)&ss64 + 4, 1);
132*bc556c66SDavid Miller     for (ix = 0; ix < 8; ix++) {
133*bc556c66SDavid Miller         if (0xAD0B != vd.h[ix]) {
134*bc556c66SDavid Miller             return 1;
135*bc556c66SDavid Miller         }
136*bc556c66SDavid Miller     }
137*bc556c66SDavid Miller 
138*bc556c66SDavid Miller     return 0;
139*bc556c66SDavid Miller }
140