xref: /openbmc/qemu/target/riscv/crypto_helper.c (revision d2dfe0b5)
1 /*
2  * RISC-V Crypto Emulation Helpers for QEMU.
3  *
4  * Copyright (c) 2021 Ruibo Lu, luruibo2000@163.com
5  * Copyright (c) 2021 Zewen Ye, lustrew@foxmail.com
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "exec/helper-proto.h"
24 #include "crypto/aes.h"
25 #include "crypto/sm4.h"
26 
27 #define AES_XTIME(a) \
28     ((a << 1) ^ ((a & 0x80) ? 0x1b : 0))
29 
30 #define AES_GFMUL(a, b) (( \
31     (((b) & 0x1) ? (a) : 0) ^ \
32     (((b) & 0x2) ? AES_XTIME(a) : 0) ^ \
33     (((b) & 0x4) ? AES_XTIME(AES_XTIME(a)) : 0) ^ \
34     (((b) & 0x8) ? AES_XTIME(AES_XTIME(AES_XTIME(a))) : 0)) & 0xFF)
35 
36 static inline uint32_t aes_mixcolumn_byte(uint8_t x, bool fwd)
37 {
38     uint32_t u;
39 
40     if (fwd) {
41         u = (AES_GFMUL(x, 3) << 24) | (x << 16) | (x << 8) |
42             (AES_GFMUL(x, 2) << 0);
43     } else {
44         u = (AES_GFMUL(x, 0xb) << 24) | (AES_GFMUL(x, 0xd) << 16) |
45             (AES_GFMUL(x, 0x9) << 8) | (AES_GFMUL(x, 0xe) << 0);
46     }
47     return u;
48 }
49 
50 #define sext32_xlen(x) (target_ulong)(int32_t)(x)
51 
52 static inline target_ulong aes32_operation(target_ulong shamt,
53                                            target_ulong rs1, target_ulong rs2,
54                                            bool enc, bool mix)
55 {
56     uint8_t si = rs2 >> shamt;
57     uint8_t so;
58     uint32_t mixed;
59     target_ulong res;
60 
61     if (enc) {
62         so = AES_sbox[si];
63         if (mix) {
64             mixed = aes_mixcolumn_byte(so, true);
65         } else {
66             mixed = so;
67         }
68     } else {
69         so = AES_isbox[si];
70         if (mix) {
71             mixed = aes_mixcolumn_byte(so, false);
72         } else {
73             mixed = so;
74         }
75     }
76     mixed = rol32(mixed, shamt);
77     res = rs1 ^ mixed;
78 
79     return sext32_xlen(res);
80 }
81 
82 target_ulong HELPER(aes32esmi)(target_ulong rs1, target_ulong rs2,
83                                target_ulong shamt)
84 {
85     return aes32_operation(shamt, rs1, rs2, true, true);
86 }
87 
88 target_ulong HELPER(aes32esi)(target_ulong rs1, target_ulong rs2,
89                               target_ulong shamt)
90 {
91     return aes32_operation(shamt, rs1, rs2, true, false);
92 }
93 
94 target_ulong HELPER(aes32dsmi)(target_ulong rs1, target_ulong rs2,
95                                target_ulong shamt)
96 {
97     return aes32_operation(shamt, rs1, rs2, false, true);
98 }
99 
100 target_ulong HELPER(aes32dsi)(target_ulong rs1, target_ulong rs2,
101                               target_ulong shamt)
102 {
103     return aes32_operation(shamt, rs1, rs2, false, false);
104 }
105 
106 #define BY(X, I) ((X >> (8 * I)) & 0xFF)
107 
108 #define AES_SHIFROWS_LO(RS1, RS2) ( \
109     (((RS1 >> 24) & 0xFF) << 56) | (((RS2 >> 48) & 0xFF) << 48) | \
110     (((RS2 >> 8) & 0xFF) << 40) | (((RS1 >> 32) & 0xFF) << 32) | \
111     (((RS2 >> 56) & 0xFF) << 24) | (((RS2 >> 16) & 0xFF) << 16) | \
112     (((RS1 >> 40) & 0xFF) << 8) | (((RS1 >> 0) & 0xFF) << 0))
113 
114 #define AES_INVSHIFROWS_LO(RS1, RS2) ( \
115     (((RS2 >> 24) & 0xFF) << 56) | (((RS2 >> 48) & 0xFF) << 48) | \
116     (((RS1 >> 8) & 0xFF) << 40) | (((RS1 >> 32) & 0xFF) << 32) | \
117     (((RS1 >> 56) & 0xFF) << 24) | (((RS2 >> 16) & 0xFF) << 16) | \
118     (((RS2 >> 40) & 0xFF) << 8) | (((RS1 >> 0) & 0xFF) << 0))
119 
120 #define AES_MIXBYTE(COL, B0, B1, B2, B3) ( \
121     BY(COL, B3) ^ BY(COL, B2) ^ AES_GFMUL(BY(COL, B1), 3) ^ \
122     AES_GFMUL(BY(COL, B0), 2))
123 
124 #define AES_MIXCOLUMN(COL) ( \
125     AES_MIXBYTE(COL, 3, 0, 1, 2) << 24 | \
126     AES_MIXBYTE(COL, 2, 3, 0, 1) << 16 | \
127     AES_MIXBYTE(COL, 1, 2, 3, 0) << 8 | AES_MIXBYTE(COL, 0, 1, 2, 3) << 0)
128 
129 #define AES_INVMIXBYTE(COL, B0, B1, B2, B3) ( \
130     AES_GFMUL(BY(COL, B3), 0x9) ^ AES_GFMUL(BY(COL, B2), 0xd) ^ \
131     AES_GFMUL(BY(COL, B1), 0xb) ^ AES_GFMUL(BY(COL, B0), 0xe))
132 
133 #define AES_INVMIXCOLUMN(COL) ( \
134     AES_INVMIXBYTE(COL, 3, 0, 1, 2) << 24 | \
135     AES_INVMIXBYTE(COL, 2, 3, 0, 1) << 16 | \
136     AES_INVMIXBYTE(COL, 1, 2, 3, 0) << 8 | \
137     AES_INVMIXBYTE(COL, 0, 1, 2, 3) << 0)
138 
139 static inline target_ulong aes64_operation(target_ulong rs1, target_ulong rs2,
140                                            bool enc, bool mix)
141 {
142     uint64_t RS1 = rs1;
143     uint64_t RS2 = rs2;
144     uint64_t result;
145     uint64_t temp;
146     uint32_t col_0;
147     uint32_t col_1;
148 
149     if (enc) {
150         temp = AES_SHIFROWS_LO(RS1, RS2);
151         temp = (((uint64_t)AES_sbox[(temp >> 0) & 0xFF] << 0) |
152                 ((uint64_t)AES_sbox[(temp >> 8) & 0xFF] << 8) |
153                 ((uint64_t)AES_sbox[(temp >> 16) & 0xFF] << 16) |
154                 ((uint64_t)AES_sbox[(temp >> 24) & 0xFF] << 24) |
155                 ((uint64_t)AES_sbox[(temp >> 32) & 0xFF] << 32) |
156                 ((uint64_t)AES_sbox[(temp >> 40) & 0xFF] << 40) |
157                 ((uint64_t)AES_sbox[(temp >> 48) & 0xFF] << 48) |
158                 ((uint64_t)AES_sbox[(temp >> 56) & 0xFF] << 56));
159         if (mix) {
160             col_0 = temp & 0xFFFFFFFF;
161             col_1 = temp >> 32;
162 
163             col_0 = AES_MIXCOLUMN(col_0);
164             col_1 = AES_MIXCOLUMN(col_1);
165 
166             result = ((uint64_t)col_1 << 32) | col_0;
167         } else {
168             result = temp;
169         }
170     } else {
171         temp = AES_INVSHIFROWS_LO(RS1, RS2);
172         temp = (((uint64_t)AES_isbox[(temp >> 0) & 0xFF] << 0) |
173                 ((uint64_t)AES_isbox[(temp >> 8) & 0xFF] << 8) |
174                 ((uint64_t)AES_isbox[(temp >> 16) & 0xFF] << 16) |
175                 ((uint64_t)AES_isbox[(temp >> 24) & 0xFF] << 24) |
176                 ((uint64_t)AES_isbox[(temp >> 32) & 0xFF] << 32) |
177                 ((uint64_t)AES_isbox[(temp >> 40) & 0xFF] << 40) |
178                 ((uint64_t)AES_isbox[(temp >> 48) & 0xFF] << 48) |
179                 ((uint64_t)AES_isbox[(temp >> 56) & 0xFF] << 56));
180         if (mix) {
181             col_0 = temp & 0xFFFFFFFF;
182             col_1 = temp >> 32;
183 
184             col_0 = AES_INVMIXCOLUMN(col_0);
185             col_1 = AES_INVMIXCOLUMN(col_1);
186 
187             result = ((uint64_t)col_1 << 32) | col_0;
188         } else {
189             result = temp;
190         }
191     }
192 
193     return result;
194 }
195 
196 target_ulong HELPER(aes64esm)(target_ulong rs1, target_ulong rs2)
197 {
198     return aes64_operation(rs1, rs2, true, true);
199 }
200 
201 target_ulong HELPER(aes64es)(target_ulong rs1, target_ulong rs2)
202 {
203     return aes64_operation(rs1, rs2, true, false);
204 }
205 
206 target_ulong HELPER(aes64ds)(target_ulong rs1, target_ulong rs2)
207 {
208     return aes64_operation(rs1, rs2, false, false);
209 }
210 
211 target_ulong HELPER(aes64dsm)(target_ulong rs1, target_ulong rs2)
212 {
213     return aes64_operation(rs1, rs2, false, true);
214 }
215 
216 target_ulong HELPER(aes64ks2)(target_ulong rs1, target_ulong rs2)
217 {
218     uint64_t RS1 = rs1;
219     uint64_t RS2 = rs2;
220     uint32_t rs1_hi = RS1 >> 32;
221     uint32_t rs2_lo = RS2;
222     uint32_t rs2_hi = RS2 >> 32;
223 
224     uint32_t r_lo = (rs1_hi ^ rs2_lo);
225     uint32_t r_hi = (rs1_hi ^ rs2_lo ^ rs2_hi);
226     target_ulong result = ((uint64_t)r_hi << 32) | r_lo;
227 
228     return result;
229 }
230 
231 target_ulong HELPER(aes64ks1i)(target_ulong rs1, target_ulong rnum)
232 {
233     uint64_t RS1 = rs1;
234     static const uint8_t round_consts[10] = {
235         0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36
236     };
237 
238     uint8_t enc_rnum = rnum;
239     uint32_t temp = (RS1 >> 32) & 0xFFFFFFFF;
240     uint8_t rcon_ = 0;
241     target_ulong result;
242 
243     if (enc_rnum != 0xA) {
244         temp = ror32(temp, 8); /* Rotate right by 8 */
245         rcon_ = round_consts[enc_rnum];
246     }
247 
248     temp = ((uint32_t)AES_sbox[(temp >> 24) & 0xFF] << 24) |
249            ((uint32_t)AES_sbox[(temp >> 16) & 0xFF] << 16) |
250            ((uint32_t)AES_sbox[(temp >> 8) & 0xFF] << 8) |
251            ((uint32_t)AES_sbox[(temp >> 0) & 0xFF] << 0);
252 
253     temp ^= rcon_;
254 
255     result = ((uint64_t)temp << 32) | temp;
256 
257     return result;
258 }
259 
260 target_ulong HELPER(aes64im)(target_ulong rs1)
261 {
262     uint64_t RS1 = rs1;
263     uint32_t col_0 = RS1 & 0xFFFFFFFF;
264     uint32_t col_1 = RS1 >> 32;
265     target_ulong result;
266 
267     col_0 = AES_INVMIXCOLUMN(col_0);
268     col_1 = AES_INVMIXCOLUMN(col_1);
269 
270     result = ((uint64_t)col_1 << 32) | col_0;
271 
272     return result;
273 }
274 
275 target_ulong HELPER(sm4ed)(target_ulong rs1, target_ulong rs2,
276                            target_ulong shamt)
277 {
278     uint32_t sb_in = (uint8_t)(rs2 >> shamt);
279     uint32_t sb_out = (uint32_t)sm4_sbox[sb_in];
280 
281     uint32_t x = sb_out ^ (sb_out << 8) ^ (sb_out << 2) ^ (sb_out << 18) ^
282                  ((sb_out & 0x3f) << 26) ^ ((sb_out & 0xC0) << 10);
283 
284     uint32_t rotl = rol32(x, shamt);
285 
286     return sext32_xlen(rotl ^ (uint32_t)rs1);
287 }
288 
289 target_ulong HELPER(sm4ks)(target_ulong rs1, target_ulong rs2,
290                            target_ulong shamt)
291 {
292     uint32_t sb_in = (uint8_t)(rs2 >> shamt);
293     uint32_t sb_out = sm4_sbox[sb_in];
294 
295     uint32_t x = sb_out ^ ((sb_out & 0x07) << 29) ^ ((sb_out & 0xFE) << 7) ^
296                  ((sb_out & 0x01) << 23) ^ ((sb_out & 0xF8) << 13);
297 
298     uint32_t rotl = rol32(x, shamt);
299 
300     return sext32_xlen(rotl ^ (uint32_t)rs1);
301 }
302 #undef sext32_xlen
303