1 /*
2 * ASPEED Hash and Crypto Engine
3 *
4 * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
5 * Copyright (C) 2021 IBM Corp.
6 *
7 * Joel Stanley <joel@jms.id.au>
8 *
9 * SPDX-License-Identifier: GPL-2.0-or-later
10 */
11
12 #include "qemu/osdep.h"
13 #include "qemu/log.h"
14 #include "qemu/error-report.h"
15 #include "hw/misc/aspeed_hace.h"
16 #include "qapi/error.h"
17 #include "migration/vmstate.h"
18 #include "crypto/hash.h"
19 #include "hw/qdev-properties.h"
20 #include "hw/irq.h"
21
22 #define R_CRYPT_CMD (0x10 / 4)
23
24 #define R_STATUS (0x1c / 4)
25 #define HASH_IRQ BIT(9)
26 #define CRYPT_IRQ BIT(12)
27 #define TAG_IRQ BIT(15)
28
29 #define R_HASH_SRC (0x20 / 4)
30 #define R_HASH_DEST (0x24 / 4)
31 #define R_HASH_KEY_BUFF (0x28 / 4)
32 #define R_HASH_SRC_LEN (0x2c / 4)
33
34 #define R_HASH_CMD (0x30 / 4)
35 /* Hash algorithm selection */
36 #define HASH_ALGO_MASK (BIT(4) | BIT(5) | BIT(6))
37 #define HASH_ALGO_MD5 0
38 #define HASH_ALGO_SHA1 BIT(5)
39 #define HASH_ALGO_SHA224 BIT(6)
40 #define HASH_ALGO_SHA256 (BIT(4) | BIT(6))
41 #define HASH_ALGO_SHA512_SERIES (BIT(5) | BIT(6))
42 /* SHA512 algorithm selection */
43 #define SHA512_HASH_ALGO_MASK (BIT(10) | BIT(11) | BIT(12))
44 #define HASH_ALGO_SHA512_SHA512 0
45 #define HASH_ALGO_SHA512_SHA384 BIT(10)
46 #define HASH_ALGO_SHA512_SHA256 BIT(11)
47 #define HASH_ALGO_SHA512_SHA224 (BIT(10) | BIT(11))
48 /* HMAC modes */
49 #define HASH_HMAC_MASK (BIT(7) | BIT(8))
50 #define HASH_DIGEST 0
51 #define HASH_DIGEST_HMAC BIT(7)
52 #define HASH_DIGEST_ACCUM BIT(8)
53 #define HASH_HMAC_KEY (BIT(7) | BIT(8))
54 /* Cascaded operation modes */
55 #define HASH_ONLY 0
56 #define HASH_ONLY2 BIT(0)
57 #define HASH_CRYPT_THEN_HASH BIT(1)
58 #define HASH_HASH_THEN_CRYPT (BIT(0) | BIT(1))
59 /* Other cmd bits */
60 #define HASH_IRQ_EN BIT(9)
61 #define HASH_SG_EN BIT(18)
62 /* Scatter-gather data list */
63 #define SG_LIST_LEN_SIZE 4
64 #define SG_LIST_LEN_MASK 0x0FFFFFFF
65 #define SG_LIST_LEN_LAST BIT(31)
66 #define SG_LIST_ADDR_SIZE 4
67 #define SG_LIST_ADDR_MASK 0x7FFFFFFF
68 #define SG_LIST_ENTRY_SIZE (SG_LIST_LEN_SIZE + SG_LIST_ADDR_SIZE)
69
70 static const struct {
71 uint32_t mask;
72 QCryptoHashAlgo algo;
73 } hash_algo_map[] = {
74 { HASH_ALGO_MD5, QCRYPTO_HASH_ALGO_MD5 },
75 { HASH_ALGO_SHA1, QCRYPTO_HASH_ALGO_SHA1 },
76 { HASH_ALGO_SHA224, QCRYPTO_HASH_ALGO_SHA224 },
77 { HASH_ALGO_SHA256, QCRYPTO_HASH_ALGO_SHA256 },
78 { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA512, QCRYPTO_HASH_ALGO_SHA512 },
79 { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA384, QCRYPTO_HASH_ALGO_SHA384 },
80 { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA256, QCRYPTO_HASH_ALGO_SHA256 },
81 };
82
hash_algo_lookup(uint32_t reg)83 static int hash_algo_lookup(uint32_t reg)
84 {
85 int i;
86
87 reg &= HASH_ALGO_MASK | SHA512_HASH_ALGO_MASK;
88
89 for (i = 0; i < ARRAY_SIZE(hash_algo_map); i++) {
90 if (reg == hash_algo_map[i].mask) {
91 return hash_algo_map[i].algo;
92 }
93 }
94
95 return -1;
96 }
97
98 /**
99 * Check whether the request contains padding message.
100 *
101 * @param s aspeed hace state object
102 * @param iov iov of current request
103 * @param req_len length of the current request
104 * @param total_msg_len length of all acc_mode requests(excluding padding msg)
105 * @param pad_offset start offset of padding message
106 */
has_padding(AspeedHACEState * s,struct iovec * iov,hwaddr req_len,uint32_t * total_msg_len,uint32_t * pad_offset)107 static bool has_padding(AspeedHACEState *s, struct iovec *iov,
108 hwaddr req_len, uint32_t *total_msg_len,
109 uint32_t *pad_offset)
110 {
111 *total_msg_len = (uint32_t)(ldq_be_p(iov->iov_base + req_len - 8) / 8);
112 /*
113 * SG_LIST_LEN_LAST asserted in the request length doesn't mean it is the
114 * last request. The last request should contain padding message.
115 * We check whether message contains padding by
116 * 1. Get total message length. If the current message contains
117 * padding, the last 8 bytes are total message length.
118 * 2. Check whether the total message length is valid.
119 * If it is valid, the value should less than or equal to
120 * total_req_len.
121 * 3. Current request len - padding_size to get padding offset.
122 * The padding message's first byte should be 0x80
123 */
124 if (*total_msg_len <= s->total_req_len) {
125 uint32_t padding_size = s->total_req_len - *total_msg_len;
126 uint8_t *padding = iov->iov_base;
127 *pad_offset = req_len - padding_size;
128 if (padding[*pad_offset] == 0x80) {
129 return true;
130 }
131 }
132
133 return false;
134 }
135
reconstruct_iov(AspeedHACEState * s,struct iovec * iov,int id,uint32_t * pad_offset)136 static int reconstruct_iov(AspeedHACEState *s, struct iovec *iov, int id,
137 uint32_t *pad_offset)
138 {
139 int i, iov_count;
140 if (*pad_offset != 0) {
141 s->iov_cache[s->iov_count].iov_base = iov[id].iov_base;
142 s->iov_cache[s->iov_count].iov_len = *pad_offset;
143 ++s->iov_count;
144 }
145 for (i = 0; i < s->iov_count; i++) {
146 iov[i].iov_base = s->iov_cache[i].iov_base;
147 iov[i].iov_len = s->iov_cache[i].iov_len;
148 }
149 iov_count = s->iov_count;
150 s->iov_count = 0;
151 s->total_req_len = 0;
152 return iov_count;
153 }
154
do_hash_operation(AspeedHACEState * s,int algo,bool sg_mode,bool acc_mode)155 static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
156 bool acc_mode)
157 {
158 struct iovec iov[ASPEED_HACE_MAX_SG];
159 uint32_t total_msg_len;
160 uint32_t pad_offset;
161 g_autofree uint8_t *digest_buf = NULL;
162 size_t digest_len = 0;
163 bool sg_acc_mode_final_request = false;
164 int i;
165 void *haddr;
166 Error *local_err = NULL;
167
168 if (acc_mode && s->hash_ctx == NULL) {
169 s->hash_ctx = qcrypto_hash_new(algo, &local_err);
170 if (s->hash_ctx == NULL) {
171 qemu_log_mask(LOG_GUEST_ERROR, "qcrypto hash failed : %s",
172 error_get_pretty(local_err));
173 error_free(local_err);
174 return;
175 }
176 }
177
178 if (sg_mode) {
179 uint32_t len = 0;
180
181 for (i = 0; !(len & SG_LIST_LEN_LAST); i++) {
182 uint32_t addr, src;
183 hwaddr plen;
184
185 if (i == ASPEED_HACE_MAX_SG) {
186 qemu_log_mask(LOG_GUEST_ERROR,
187 "aspeed_hace: guest failed to set end of sg list marker\n");
188 break;
189 }
190
191 src = s->regs[R_HASH_SRC] + (i * SG_LIST_ENTRY_SIZE);
192
193 len = address_space_ldl_le(&s->dram_as, src,
194 MEMTXATTRS_UNSPECIFIED, NULL);
195
196 addr = address_space_ldl_le(&s->dram_as, src + SG_LIST_LEN_SIZE,
197 MEMTXATTRS_UNSPECIFIED, NULL);
198 addr &= SG_LIST_ADDR_MASK;
199
200 plen = len & SG_LIST_LEN_MASK;
201 haddr = address_space_map(&s->dram_as, addr, &plen, false,
202 MEMTXATTRS_UNSPECIFIED);
203 if (haddr == NULL) {
204 qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__);
205 return;
206 }
207 iov[i].iov_base = haddr;
208 if (acc_mode) {
209 s->total_req_len += plen;
210
211 if (has_padding(s, &iov[i], plen, &total_msg_len,
212 &pad_offset)) {
213 /* Padding being present indicates the final request */
214 sg_acc_mode_final_request = true;
215 iov[i].iov_len = pad_offset;
216 } else {
217 iov[i].iov_len = plen;
218 }
219 } else {
220 iov[i].iov_len = plen;
221 }
222 }
223 } else {
224 hwaddr len = s->regs[R_HASH_SRC_LEN];
225
226 haddr = address_space_map(&s->dram_as, s->regs[R_HASH_SRC],
227 &len, false, MEMTXATTRS_UNSPECIFIED);
228 if (haddr == NULL) {
229 qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__);
230 return;
231 }
232 iov[0].iov_base = haddr;
233 iov[0].iov_len = len;
234 i = 1;
235
236 if (s->iov_count) {
237 /*
238 * In aspeed sdk kernel driver, sg_mode is disabled in hash_final().
239 * Thus if we received a request with sg_mode disabled, it is
240 * required to check whether cache is empty. If no, we should
241 * combine cached iov and the current iov.
242 */
243 s->total_req_len += len;
244 if (has_padding(s, iov, len, &total_msg_len, &pad_offset)) {
245 i = reconstruct_iov(s, iov, 0, &pad_offset);
246 }
247 }
248 }
249
250 if (acc_mode) {
251 if (qcrypto_hash_updatev(s->hash_ctx, iov, i, &local_err) < 0) {
252 qemu_log_mask(LOG_GUEST_ERROR, "qcrypto hash update failed : %s",
253 error_get_pretty(local_err));
254 error_free(local_err);
255 return;
256 }
257
258 if (sg_acc_mode_final_request) {
259 if (qcrypto_hash_finalize_bytes(s->hash_ctx, &digest_buf,
260 &digest_len, &local_err)) {
261 qemu_log_mask(LOG_GUEST_ERROR,
262 "qcrypto hash finalize failed : %s",
263 error_get_pretty(local_err));
264 error_free(local_err);
265 local_err = NULL;
266 }
267
268 qcrypto_hash_free(s->hash_ctx);
269
270 s->hash_ctx = NULL;
271 s->iov_count = 0;
272 s->total_req_len = 0;
273 }
274 } else if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf,
275 &digest_len, &local_err) < 0) {
276 qemu_log_mask(LOG_GUEST_ERROR, "qcrypto hash bytesv failed : %s",
277 error_get_pretty(local_err));
278 error_free(local_err);
279 return;
280 }
281
282 if (address_space_write(&s->dram_as, s->regs[R_HASH_DEST],
283 MEMTXATTRS_UNSPECIFIED,
284 digest_buf, digest_len)) {
285 qemu_log_mask(LOG_GUEST_ERROR,
286 "aspeed_hace: address space write failed\n");
287 }
288
289 for (; i > 0; i--) {
290 address_space_unmap(&s->dram_as, iov[i - 1].iov_base,
291 iov[i - 1].iov_len, false,
292 iov[i - 1].iov_len);
293 }
294
295 /*
296 * Set status bits to indicate completion. Testing shows hardware sets
297 * these irrespective of HASH_IRQ_EN.
298 */
299 s->regs[R_STATUS] |= HASH_IRQ;
300 }
301
aspeed_hace_read(void * opaque,hwaddr addr,unsigned int size)302 static uint64_t aspeed_hace_read(void *opaque, hwaddr addr, unsigned int size)
303 {
304 AspeedHACEState *s = ASPEED_HACE(opaque);
305
306 addr >>= 2;
307
308 if (addr >= ASPEED_HACE_NR_REGS) {
309 qemu_log_mask(LOG_GUEST_ERROR,
310 "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n",
311 __func__, addr << 2);
312 return 0;
313 }
314
315 return s->regs[addr];
316 }
317
aspeed_hace_write(void * opaque,hwaddr addr,uint64_t data,unsigned int size)318 static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data,
319 unsigned int size)
320 {
321 AspeedHACEState *s = ASPEED_HACE(opaque);
322 AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s);
323
324 addr >>= 2;
325
326 if (addr >= ASPEED_HACE_NR_REGS) {
327 qemu_log_mask(LOG_GUEST_ERROR,
328 "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n",
329 __func__, addr << 2);
330 return;
331 }
332
333 switch (addr) {
334 case R_STATUS:
335 if (data & HASH_IRQ) {
336 data &= ~HASH_IRQ;
337
338 if (s->regs[addr] & HASH_IRQ) {
339 qemu_irq_lower(s->irq);
340 }
341 }
342 break;
343 case R_HASH_SRC:
344 data &= ahc->src_mask;
345 break;
346 case R_HASH_DEST:
347 data &= ahc->dest_mask;
348 break;
349 case R_HASH_KEY_BUFF:
350 data &= ahc->key_mask;
351 break;
352 case R_HASH_SRC_LEN:
353 data &= 0x0FFFFFFF;
354 break;
355 case R_HASH_CMD: {
356 int algo;
357 data &= ahc->hash_mask;
358
359 if ((data & HASH_DIGEST_HMAC)) {
360 qemu_log_mask(LOG_UNIMP,
361 "%s: HMAC mode not implemented\n",
362 __func__);
363 }
364 if (data & BIT(1)) {
365 qemu_log_mask(LOG_UNIMP,
366 "%s: Cascaded mode not implemented\n",
367 __func__);
368 }
369 algo = hash_algo_lookup(data);
370 if (algo < 0) {
371 qemu_log_mask(LOG_GUEST_ERROR,
372 "%s: Invalid hash algorithm selection 0x%"PRIx64"\n",
373 __func__, data & ahc->hash_mask);
374 break;
375 }
376 do_hash_operation(s, algo, data & HASH_SG_EN,
377 ((data & HASH_HMAC_MASK) == HASH_DIGEST_ACCUM));
378
379 if (data & HASH_IRQ_EN) {
380 qemu_irq_raise(s->irq);
381 }
382 break;
383 }
384 case R_CRYPT_CMD:
385 qemu_log_mask(LOG_UNIMP, "%s: Crypt commands not implemented\n",
386 __func__);
387 break;
388 default:
389 break;
390 }
391
392 s->regs[addr] = data;
393 }
394
395 static const MemoryRegionOps aspeed_hace_ops = {
396 .read = aspeed_hace_read,
397 .write = aspeed_hace_write,
398 .endianness = DEVICE_LITTLE_ENDIAN,
399 .valid = {
400 .min_access_size = 1,
401 .max_access_size = 4,
402 },
403 };
404
aspeed_hace_reset(DeviceState * dev)405 static void aspeed_hace_reset(DeviceState *dev)
406 {
407 struct AspeedHACEState *s = ASPEED_HACE(dev);
408
409 if (s->hash_ctx != NULL) {
410 qcrypto_hash_free(s->hash_ctx);
411 s->hash_ctx = NULL;
412 }
413
414 memset(s->regs, 0, sizeof(s->regs));
415 s->iov_count = 0;
416 s->total_req_len = 0;
417 }
418
aspeed_hace_realize(DeviceState * dev,Error ** errp)419 static void aspeed_hace_realize(DeviceState *dev, Error **errp)
420 {
421 AspeedHACEState *s = ASPEED_HACE(dev);
422 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
423
424 sysbus_init_irq(sbd, &s->irq);
425
426 memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_hace_ops, s,
427 TYPE_ASPEED_HACE, 0x1000);
428
429 if (!s->dram_mr) {
430 error_setg(errp, TYPE_ASPEED_HACE ": 'dram' link not set");
431 return;
432 }
433
434 address_space_init(&s->dram_as, s->dram_mr, "dram");
435
436 sysbus_init_mmio(sbd, &s->iomem);
437 }
438
439 static Property aspeed_hace_properties[] = {
440 DEFINE_PROP_LINK("dram", AspeedHACEState, dram_mr,
441 TYPE_MEMORY_REGION, MemoryRegion *),
442 DEFINE_PROP_END_OF_LIST(),
443 };
444
445
446 static const VMStateDescription vmstate_aspeed_hace = {
447 .name = TYPE_ASPEED_HACE,
448 .version_id = 1,
449 .minimum_version_id = 1,
450 .fields = (const VMStateField[]) {
451 VMSTATE_UINT32_ARRAY(regs, AspeedHACEState, ASPEED_HACE_NR_REGS),
452 VMSTATE_UINT32(total_req_len, AspeedHACEState),
453 VMSTATE_UINT32(iov_count, AspeedHACEState),
454 VMSTATE_END_OF_LIST(),
455 }
456 };
457
aspeed_hace_class_init(ObjectClass * klass,void * data)458 static void aspeed_hace_class_init(ObjectClass *klass, void *data)
459 {
460 DeviceClass *dc = DEVICE_CLASS(klass);
461
462 dc->realize = aspeed_hace_realize;
463 device_class_set_legacy_reset(dc, aspeed_hace_reset);
464 device_class_set_props(dc, aspeed_hace_properties);
465 dc->vmsd = &vmstate_aspeed_hace;
466 }
467
468 static const TypeInfo aspeed_hace_info = {
469 .name = TYPE_ASPEED_HACE,
470 .parent = TYPE_SYS_BUS_DEVICE,
471 .instance_size = sizeof(AspeedHACEState),
472 .class_init = aspeed_hace_class_init,
473 .class_size = sizeof(AspeedHACEClass)
474 };
475
aspeed_ast2400_hace_class_init(ObjectClass * klass,void * data)476 static void aspeed_ast2400_hace_class_init(ObjectClass *klass, void *data)
477 {
478 DeviceClass *dc = DEVICE_CLASS(klass);
479 AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
480
481 dc->desc = "AST2400 Hash and Crypto Engine";
482
483 ahc->src_mask = 0x0FFFFFFF;
484 ahc->dest_mask = 0x0FFFFFF8;
485 ahc->key_mask = 0x0FFFFFC0;
486 ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */
487 }
488
489 static const TypeInfo aspeed_ast2400_hace_info = {
490 .name = TYPE_ASPEED_AST2400_HACE,
491 .parent = TYPE_ASPEED_HACE,
492 .class_init = aspeed_ast2400_hace_class_init,
493 };
494
aspeed_ast2500_hace_class_init(ObjectClass * klass,void * data)495 static void aspeed_ast2500_hace_class_init(ObjectClass *klass, void *data)
496 {
497 DeviceClass *dc = DEVICE_CLASS(klass);
498 AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
499
500 dc->desc = "AST2500 Hash and Crypto Engine";
501
502 ahc->src_mask = 0x3fffffff;
503 ahc->dest_mask = 0x3ffffff8;
504 ahc->key_mask = 0x3FFFFFC0;
505 ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */
506 }
507
508 static const TypeInfo aspeed_ast2500_hace_info = {
509 .name = TYPE_ASPEED_AST2500_HACE,
510 .parent = TYPE_ASPEED_HACE,
511 .class_init = aspeed_ast2500_hace_class_init,
512 };
513
aspeed_ast2600_hace_class_init(ObjectClass * klass,void * data)514 static void aspeed_ast2600_hace_class_init(ObjectClass *klass, void *data)
515 {
516 DeviceClass *dc = DEVICE_CLASS(klass);
517 AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
518
519 dc->desc = "AST2600 Hash and Crypto Engine";
520
521 ahc->src_mask = 0x7FFFFFFF;
522 ahc->dest_mask = 0x7FFFFFF8;
523 ahc->key_mask = 0x7FFFFFF8;
524 ahc->hash_mask = 0x00147FFF;
525 }
526
527 static const TypeInfo aspeed_ast2600_hace_info = {
528 .name = TYPE_ASPEED_AST2600_HACE,
529 .parent = TYPE_ASPEED_HACE,
530 .class_init = aspeed_ast2600_hace_class_init,
531 };
532
aspeed_ast1030_hace_class_init(ObjectClass * klass,void * data)533 static void aspeed_ast1030_hace_class_init(ObjectClass *klass, void *data)
534 {
535 DeviceClass *dc = DEVICE_CLASS(klass);
536 AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
537
538 dc->desc = "AST1030 Hash and Crypto Engine";
539
540 ahc->src_mask = 0x7FFFFFFF;
541 ahc->dest_mask = 0x7FFFFFF8;
542 ahc->key_mask = 0x7FFFFFF8;
543 ahc->hash_mask = 0x00147FFF;
544 }
545
546 static const TypeInfo aspeed_ast1030_hace_info = {
547 .name = TYPE_ASPEED_AST1030_HACE,
548 .parent = TYPE_ASPEED_HACE,
549 .class_init = aspeed_ast1030_hace_class_init,
550 };
551
aspeed_hace_register_types(void)552 static void aspeed_hace_register_types(void)
553 {
554 type_register_static(&aspeed_ast2400_hace_info);
555 type_register_static(&aspeed_ast2500_hace_info);
556 type_register_static(&aspeed_ast2600_hace_info);
557 type_register_static(&aspeed_ast1030_hace_info);
558 type_register_static(&aspeed_hace_info);
559 }
560
561 type_init(aspeed_hace_register_types);
562