xref: /openbmc/qemu/target/riscv/vcrypto_helper.c (revision 06028472)
1 /*
2  * RISC-V Vector Crypto Extension Helpers for QEMU.
3  *
4  * Copyright (C) 2023 SiFive, Inc.
5  * Written by Codethink Ltd and SiFive.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/host-utils.h"
22 #include "qemu/bitops.h"
23 #include "qemu/bswap.h"
24 #include "cpu.h"
25 #include "exec/memop.h"
26 #include "exec/exec-all.h"
27 #include "exec/helper-proto.h"
28 #include "internals.h"
29 #include "vector_internals.h"
30 
31 static uint64_t clmul64(uint64_t y, uint64_t x)
32 {
33     uint64_t result = 0;
34     for (int j = 63; j >= 0; j--) {
35         if ((y >> j) & 1) {
36             result ^= (x << j);
37         }
38     }
39     return result;
40 }
41 
42 static uint64_t clmulh64(uint64_t y, uint64_t x)
43 {
44     uint64_t result = 0;
45     for (int j = 63; j >= 1; j--) {
46         if ((y >> j) & 1) {
47             result ^= (x >> (64 - j));
48         }
49     }
50     return result;
51 }
52 
53 RVVCALL(OPIVV2, vclmul_vv, OP_UUU_D, H8, H8, H8, clmul64)
54 GEN_VEXT_VV(vclmul_vv, 8)
55 RVVCALL(OPIVX2, vclmul_vx, OP_UUU_D, H8, H8, clmul64)
56 GEN_VEXT_VX(vclmul_vx, 8)
57 RVVCALL(OPIVV2, vclmulh_vv, OP_UUU_D, H8, H8, H8, clmulh64)
58 GEN_VEXT_VV(vclmulh_vv, 8)
59 RVVCALL(OPIVX2, vclmulh_vx, OP_UUU_D, H8, H8, clmulh64)
60 GEN_VEXT_VX(vclmulh_vx, 8)
61 
62 RVVCALL(OPIVV2, vror_vv_b, OP_UUU_B, H1, H1, H1, ror8)
63 RVVCALL(OPIVV2, vror_vv_h, OP_UUU_H, H2, H2, H2, ror16)
64 RVVCALL(OPIVV2, vror_vv_w, OP_UUU_W, H4, H4, H4, ror32)
65 RVVCALL(OPIVV2, vror_vv_d, OP_UUU_D, H8, H8, H8, ror64)
66 GEN_VEXT_VV(vror_vv_b, 1)
67 GEN_VEXT_VV(vror_vv_h, 2)
68 GEN_VEXT_VV(vror_vv_w, 4)
69 GEN_VEXT_VV(vror_vv_d, 8)
70 
71 RVVCALL(OPIVX2, vror_vx_b, OP_UUU_B, H1, H1, ror8)
72 RVVCALL(OPIVX2, vror_vx_h, OP_UUU_H, H2, H2, ror16)
73 RVVCALL(OPIVX2, vror_vx_w, OP_UUU_W, H4, H4, ror32)
74 RVVCALL(OPIVX2, vror_vx_d, OP_UUU_D, H8, H8, ror64)
75 GEN_VEXT_VX(vror_vx_b, 1)
76 GEN_VEXT_VX(vror_vx_h, 2)
77 GEN_VEXT_VX(vror_vx_w, 4)
78 GEN_VEXT_VX(vror_vx_d, 8)
79 
80 RVVCALL(OPIVV2, vrol_vv_b, OP_UUU_B, H1, H1, H1, rol8)
81 RVVCALL(OPIVV2, vrol_vv_h, OP_UUU_H, H2, H2, H2, rol16)
82 RVVCALL(OPIVV2, vrol_vv_w, OP_UUU_W, H4, H4, H4, rol32)
83 RVVCALL(OPIVV2, vrol_vv_d, OP_UUU_D, H8, H8, H8, rol64)
84 GEN_VEXT_VV(vrol_vv_b, 1)
85 GEN_VEXT_VV(vrol_vv_h, 2)
86 GEN_VEXT_VV(vrol_vv_w, 4)
87 GEN_VEXT_VV(vrol_vv_d, 8)
88 
89 RVVCALL(OPIVX2, vrol_vx_b, OP_UUU_B, H1, H1, rol8)
90 RVVCALL(OPIVX2, vrol_vx_h, OP_UUU_H, H2, H2, rol16)
91 RVVCALL(OPIVX2, vrol_vx_w, OP_UUU_W, H4, H4, rol32)
92 RVVCALL(OPIVX2, vrol_vx_d, OP_UUU_D, H8, H8, rol64)
93 GEN_VEXT_VX(vrol_vx_b, 1)
94 GEN_VEXT_VX(vrol_vx_h, 2)
95 GEN_VEXT_VX(vrol_vx_w, 4)
96 GEN_VEXT_VX(vrol_vx_d, 8)
97 
98 static uint64_t brev8(uint64_t val)
99 {
100     val = ((val & 0x5555555555555555ull) << 1) |
101           ((val & 0xAAAAAAAAAAAAAAAAull) >> 1);
102     val = ((val & 0x3333333333333333ull) << 2) |
103           ((val & 0xCCCCCCCCCCCCCCCCull) >> 2);
104     val = ((val & 0x0F0F0F0F0F0F0F0Full) << 4) |
105           ((val & 0xF0F0F0F0F0F0F0F0ull) >> 4);
106 
107     return val;
108 }
109 
110 RVVCALL(OPIVV1, vbrev8_v_b, OP_UU_B, H1, H1, brev8)
111 RVVCALL(OPIVV1, vbrev8_v_h, OP_UU_H, H2, H2, brev8)
112 RVVCALL(OPIVV1, vbrev8_v_w, OP_UU_W, H4, H4, brev8)
113 RVVCALL(OPIVV1, vbrev8_v_d, OP_UU_D, H8, H8, brev8)
114 GEN_VEXT_V(vbrev8_v_b, 1)
115 GEN_VEXT_V(vbrev8_v_h, 2)
116 GEN_VEXT_V(vbrev8_v_w, 4)
117 GEN_VEXT_V(vbrev8_v_d, 8)
118 
119 #define DO_IDENTITY(a) (a)
120 RVVCALL(OPIVV1, vrev8_v_b, OP_UU_B, H1, H1, DO_IDENTITY)
121 RVVCALL(OPIVV1, vrev8_v_h, OP_UU_H, H2, H2, bswap16)
122 RVVCALL(OPIVV1, vrev8_v_w, OP_UU_W, H4, H4, bswap32)
123 RVVCALL(OPIVV1, vrev8_v_d, OP_UU_D, H8, H8, bswap64)
124 GEN_VEXT_V(vrev8_v_b, 1)
125 GEN_VEXT_V(vrev8_v_h, 2)
126 GEN_VEXT_V(vrev8_v_w, 4)
127 GEN_VEXT_V(vrev8_v_d, 8)
128 
129 #define DO_ANDN(a, b) ((a) & ~(b))
130 RVVCALL(OPIVV2, vandn_vv_b, OP_UUU_B, H1, H1, H1, DO_ANDN)
131 RVVCALL(OPIVV2, vandn_vv_h, OP_UUU_H, H2, H2, H2, DO_ANDN)
132 RVVCALL(OPIVV2, vandn_vv_w, OP_UUU_W, H4, H4, H4, DO_ANDN)
133 RVVCALL(OPIVV2, vandn_vv_d, OP_UUU_D, H8, H8, H8, DO_ANDN)
134 GEN_VEXT_VV(vandn_vv_b, 1)
135 GEN_VEXT_VV(vandn_vv_h, 2)
136 GEN_VEXT_VV(vandn_vv_w, 4)
137 GEN_VEXT_VV(vandn_vv_d, 8)
138 
139 RVVCALL(OPIVX2, vandn_vx_b, OP_UUU_B, H1, H1, DO_ANDN)
140 RVVCALL(OPIVX2, vandn_vx_h, OP_UUU_H, H2, H2, DO_ANDN)
141 RVVCALL(OPIVX2, vandn_vx_w, OP_UUU_W, H4, H4, DO_ANDN)
142 RVVCALL(OPIVX2, vandn_vx_d, OP_UUU_D, H8, H8, DO_ANDN)
143 GEN_VEXT_VX(vandn_vx_b, 1)
144 GEN_VEXT_VX(vandn_vx_h, 2)
145 GEN_VEXT_VX(vandn_vx_w, 4)
146 GEN_VEXT_VX(vandn_vx_d, 8)
147 
148 RVVCALL(OPIVV1, vbrev_v_b, OP_UU_B, H1, H1, revbit8)
149 RVVCALL(OPIVV1, vbrev_v_h, OP_UU_H, H2, H2, revbit16)
150 RVVCALL(OPIVV1, vbrev_v_w, OP_UU_W, H4, H4, revbit32)
151 RVVCALL(OPIVV1, vbrev_v_d, OP_UU_D, H8, H8, revbit64)
152 GEN_VEXT_V(vbrev_v_b, 1)
153 GEN_VEXT_V(vbrev_v_h, 2)
154 GEN_VEXT_V(vbrev_v_w, 4)
155 GEN_VEXT_V(vbrev_v_d, 8)
156 
157 RVVCALL(OPIVV1, vclz_v_b, OP_UU_B, H1, H1, clz8)
158 RVVCALL(OPIVV1, vclz_v_h, OP_UU_H, H2, H2, clz16)
159 RVVCALL(OPIVV1, vclz_v_w, OP_UU_W, H4, H4, clz32)
160 RVVCALL(OPIVV1, vclz_v_d, OP_UU_D, H8, H8, clz64)
161 GEN_VEXT_V(vclz_v_b, 1)
162 GEN_VEXT_V(vclz_v_h, 2)
163 GEN_VEXT_V(vclz_v_w, 4)
164 GEN_VEXT_V(vclz_v_d, 8)
165 
166 RVVCALL(OPIVV1, vctz_v_b, OP_UU_B, H1, H1, ctz8)
167 RVVCALL(OPIVV1, vctz_v_h, OP_UU_H, H2, H2, ctz16)
168 RVVCALL(OPIVV1, vctz_v_w, OP_UU_W, H4, H4, ctz32)
169 RVVCALL(OPIVV1, vctz_v_d, OP_UU_D, H8, H8, ctz64)
170 GEN_VEXT_V(vctz_v_b, 1)
171 GEN_VEXT_V(vctz_v_h, 2)
172 GEN_VEXT_V(vctz_v_w, 4)
173 GEN_VEXT_V(vctz_v_d, 8)
174 
175 RVVCALL(OPIVV1, vcpop_v_b, OP_UU_B, H1, H1, ctpop8)
176 RVVCALL(OPIVV1, vcpop_v_h, OP_UU_H, H2, H2, ctpop16)
177 RVVCALL(OPIVV1, vcpop_v_w, OP_UU_W, H4, H4, ctpop32)
178 RVVCALL(OPIVV1, vcpop_v_d, OP_UU_D, H8, H8, ctpop64)
179 GEN_VEXT_V(vcpop_v_b, 1)
180 GEN_VEXT_V(vcpop_v_h, 2)
181 GEN_VEXT_V(vcpop_v_w, 4)
182 GEN_VEXT_V(vcpop_v_d, 8)
183 
184 #define DO_SLL(N, M) (N << (M & (sizeof(N) * 8 - 1)))
185 RVVCALL(OPIVV2, vwsll_vv_b, WOP_UUU_B, H2, H1, H1, DO_SLL)
186 RVVCALL(OPIVV2, vwsll_vv_h, WOP_UUU_H, H4, H2, H2, DO_SLL)
187 RVVCALL(OPIVV2, vwsll_vv_w, WOP_UUU_W, H8, H4, H4, DO_SLL)
188 GEN_VEXT_VV(vwsll_vv_b, 2)
189 GEN_VEXT_VV(vwsll_vv_h, 4)
190 GEN_VEXT_VV(vwsll_vv_w, 8)
191 
192 RVVCALL(OPIVX2, vwsll_vx_b, WOP_UUU_B, H2, H1, DO_SLL)
193 RVVCALL(OPIVX2, vwsll_vx_h, WOP_UUU_H, H4, H2, DO_SLL)
194 RVVCALL(OPIVX2, vwsll_vx_w, WOP_UUU_W, H8, H4, DO_SLL)
195 GEN_VEXT_VX(vwsll_vx_b, 2)
196 GEN_VEXT_VX(vwsll_vx_h, 4)
197 GEN_VEXT_VX(vwsll_vx_w, 8)
198