xref: /openbmc/qemu/hw/misc/aspeed_hace.c (revision f05cc69c6ce0242e2eeae3cd1513454006b8f040)
1 /*
2  * ASPEED Hash and Crypto Engine
3  *
4  * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
5  * Copyright (C) 2021 IBM Corp.
6  *
7  * Joel Stanley <joel@jms.id.au>
8  *
9  * SPDX-License-Identifier: GPL-2.0-or-later
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/log.h"
14 #include "qemu/error-report.h"
15 #include "hw/misc/aspeed_hace.h"
16 #include "qapi/error.h"
17 #include "migration/vmstate.h"
18 #include "crypto/hash.h"
19 #include "hw/qdev-properties.h"
20 #include "hw/irq.h"
21 
22 #define R_CRYPT_CMD     (0x10 / 4)
23 
24 #define R_STATUS        (0x1c / 4)
25 #define HASH_IRQ        BIT(9)
26 #define CRYPT_IRQ       BIT(12)
27 #define TAG_IRQ         BIT(15)
28 
29 #define R_HASH_SRC      (0x20 / 4)
30 #define R_HASH_DEST     (0x24 / 4)
31 #define R_HASH_KEY_BUFF (0x28 / 4)
32 #define R_HASH_SRC_LEN  (0x2c / 4)
33 
34 #define R_HASH_CMD      (0x30 / 4)
35 /* Hash algorithm selection */
36 #define  HASH_ALGO_MASK                 (BIT(4) | BIT(5) | BIT(6))
37 #define  HASH_ALGO_MD5                  0
38 #define  HASH_ALGO_SHA1                 BIT(5)
39 #define  HASH_ALGO_SHA224               BIT(6)
40 #define  HASH_ALGO_SHA256               (BIT(4) | BIT(6))
41 #define  HASH_ALGO_SHA512_SERIES        (BIT(5) | BIT(6))
42 /* SHA512 algorithm selection */
43 #define  SHA512_HASH_ALGO_MASK          (BIT(10) | BIT(11) | BIT(12))
44 #define  HASH_ALGO_SHA512_SHA512        0
45 #define  HASH_ALGO_SHA512_SHA384        BIT(10)
46 #define  HASH_ALGO_SHA512_SHA256        BIT(11)
47 #define  HASH_ALGO_SHA512_SHA224        (BIT(10) | BIT(11))
48 /* HMAC modes */
49 #define  HASH_HMAC_MASK                 (BIT(7) | BIT(8))
50 #define  HASH_DIGEST                    0
51 #define  HASH_DIGEST_HMAC               BIT(7)
52 #define  HASH_DIGEST_ACCUM              BIT(8)
53 #define  HASH_HMAC_KEY                  (BIT(7) | BIT(8))
54 /* Cascaded operation modes */
55 #define  HASH_ONLY                      0
56 #define  HASH_ONLY2                     BIT(0)
57 #define  HASH_CRYPT_THEN_HASH           BIT(1)
58 #define  HASH_HASH_THEN_CRYPT           (BIT(0) | BIT(1))
59 /* Other cmd bits */
60 #define  HASH_IRQ_EN                    BIT(9)
61 #define  HASH_SG_EN                     BIT(18)
62 #define  CRYPT_IRQ_EN                   BIT(12)
63 /* Scatter-gather data list */
64 #define SG_LIST_LEN_SIZE                4
65 #define SG_LIST_LEN_MASK                0x0FFFFFFF
66 #define SG_LIST_LEN_LAST                BIT(31)
67 #define SG_LIST_ADDR_SIZE               4
68 #define SG_LIST_ADDR_MASK               0x7FFFFFFF
69 #define SG_LIST_ENTRY_SIZE              (SG_LIST_LEN_SIZE + SG_LIST_ADDR_SIZE)
70 
71 static const struct {
72     uint32_t mask;
73     QCryptoHashAlgo algo;
74 } hash_algo_map[] = {
75     { HASH_ALGO_MD5, QCRYPTO_HASH_ALGO_MD5 },
76     { HASH_ALGO_SHA1, QCRYPTO_HASH_ALGO_SHA1 },
77     { HASH_ALGO_SHA224, QCRYPTO_HASH_ALGO_SHA224 },
78     { HASH_ALGO_SHA256, QCRYPTO_HASH_ALGO_SHA256 },
79     { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA512,
80       QCRYPTO_HASH_ALGO_SHA512 },
81     { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA384,
82       QCRYPTO_HASH_ALGO_SHA384 },
83     { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA256,
84       QCRYPTO_HASH_ALGO_SHA256 },
85 };
86 
87 static int hash_algo_lookup(uint32_t reg)
88 {
89     int i;
90 
91     reg &= HASH_ALGO_MASK | SHA512_HASH_ALGO_MASK;
92 
93     for (i = 0; i < ARRAY_SIZE(hash_algo_map); i++) {
94         if (reg == hash_algo_map[i].mask) {
95             return hash_algo_map[i].algo;
96         }
97     }
98 
99     return -1;
100 }
101 
102 /**
103  * Check whether the request contains padding message.
104  *
105  * @param s             aspeed hace state object
106  * @param iov           iov of current request
107  * @param req_len       length of the current request
108  * @param total_msg_len length of all acc_mode requests(excluding padding msg)
109  * @param pad_offset    start offset of padding message
110  */
111 static bool has_padding(AspeedHACEState *s, struct iovec *iov,
112                         hwaddr req_len, uint32_t *total_msg_len,
113                         uint32_t *pad_offset)
114 {
115     *total_msg_len = (uint32_t)(ldq_be_p(iov->iov_base + req_len - 8) / 8);
116     /*
117      * SG_LIST_LEN_LAST asserted in the request length doesn't mean it is the
118      * last request. The last request should contain padding message.
119      * We check whether message contains padding by
120      *   1. Get total message length. If the current message contains
121      *      padding, the last 8 bytes are total message length.
122      *   2. Check whether the total message length is valid.
123      *      If it is valid, the value should less than or equal to
124      *      total_req_len.
125      *   3. Current request len - padding_size to get padding offset.
126      *      The padding message's first byte should be 0x80
127      */
128     if (*total_msg_len <= s->total_req_len) {
129         uint32_t padding_size = s->total_req_len - *total_msg_len;
130         uint8_t *padding = iov->iov_base;
131 
132         if (padding_size > req_len) {
133             return false;
134         }
135 
136         *pad_offset = req_len - padding_size;
137         if (padding[*pad_offset] == 0x80) {
138             return true;
139         }
140     }
141 
142     return false;
143 }
144 
145 static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
146                               bool acc_mode)
147 {
148     struct iovec iov[ASPEED_HACE_MAX_SG];
149     uint32_t total_msg_len;
150     uint32_t pad_offset;
151     g_autofree uint8_t *digest_buf = NULL;
152     size_t digest_len = 0;
153     bool sg_acc_mode_final_request = false;
154     int i;
155     void *haddr;
156     Error *local_err = NULL;
157 
158     if (acc_mode && s->hash_ctx == NULL) {
159         s->hash_ctx = qcrypto_hash_new(algo, &local_err);
160         if (s->hash_ctx == NULL) {
161             qemu_log_mask(LOG_GUEST_ERROR, "qcrypto hash failed : %s",
162                           error_get_pretty(local_err));
163             error_free(local_err);
164             return;
165         }
166     }
167 
168     if (sg_mode) {
169         uint32_t len = 0;
170 
171         for (i = 0; !(len & SG_LIST_LEN_LAST); i++) {
172             uint32_t addr, src;
173             hwaddr plen;
174 
175             if (i == ASPEED_HACE_MAX_SG) {
176                 qemu_log_mask(LOG_GUEST_ERROR,
177                         "aspeed_hace: guest failed to set end of sg list marker\n");
178                 break;
179             }
180 
181             src = s->regs[R_HASH_SRC] + (i * SG_LIST_ENTRY_SIZE);
182 
183             len = address_space_ldl_le(&s->dram_as, src,
184                                        MEMTXATTRS_UNSPECIFIED, NULL);
185 
186             addr = address_space_ldl_le(&s->dram_as, src + SG_LIST_LEN_SIZE,
187                                         MEMTXATTRS_UNSPECIFIED, NULL);
188             addr &= SG_LIST_ADDR_MASK;
189 
190             plen = len & SG_LIST_LEN_MASK;
191             haddr = address_space_map(&s->dram_as, addr, &plen, false,
192                                       MEMTXATTRS_UNSPECIFIED);
193             if (haddr == NULL) {
194                 qemu_log_mask(LOG_GUEST_ERROR,
195                               "%s: qcrypto failed\n", __func__);
196                 return;
197             }
198             iov[i].iov_base = haddr;
199             if (acc_mode) {
200                 s->total_req_len += plen;
201 
202                 if (has_padding(s, &iov[i], plen, &total_msg_len,
203                                 &pad_offset)) {
204                     /* Padding being present indicates the final request */
205                     sg_acc_mode_final_request = true;
206                     iov[i].iov_len = pad_offset;
207                 } else {
208                     iov[i].iov_len = plen;
209                 }
210             } else {
211                 iov[i].iov_len = plen;
212             }
213         }
214     } else {
215         hwaddr len = s->regs[R_HASH_SRC_LEN];
216 
217         haddr = address_space_map(&s->dram_as, s->regs[R_HASH_SRC],
218                                   &len, false, MEMTXATTRS_UNSPECIFIED);
219         if (haddr == NULL) {
220             qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__);
221             return;
222         }
223         iov[0].iov_base = haddr;
224         iov[0].iov_len = len;
225         i = 1;
226     }
227 
228     if (acc_mode) {
229         if (qcrypto_hash_updatev(s->hash_ctx, iov, i, &local_err) < 0) {
230             qemu_log_mask(LOG_GUEST_ERROR, "qcrypto hash update failed : %s",
231                           error_get_pretty(local_err));
232             error_free(local_err);
233             return;
234         }
235 
236         if (sg_acc_mode_final_request) {
237             if (qcrypto_hash_finalize_bytes(s->hash_ctx, &digest_buf,
238                                             &digest_len, &local_err)) {
239                 qemu_log_mask(LOG_GUEST_ERROR,
240                               "qcrypto hash finalize failed : %s",
241                               error_get_pretty(local_err));
242                 error_free(local_err);
243                 local_err = NULL;
244             }
245 
246             qcrypto_hash_free(s->hash_ctx);
247 
248             s->hash_ctx = NULL;
249             s->total_req_len = 0;
250         }
251     } else if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf,
252                                    &digest_len, &local_err) < 0) {
253         qemu_log_mask(LOG_GUEST_ERROR, "qcrypto hash bytesv failed : %s",
254                       error_get_pretty(local_err));
255         error_free(local_err);
256         return;
257     }
258 
259     if (address_space_write(&s->dram_as, s->regs[R_HASH_DEST],
260                             MEMTXATTRS_UNSPECIFIED,
261                             digest_buf, digest_len)) {
262         qemu_log_mask(LOG_GUEST_ERROR,
263                       "aspeed_hace: address space write failed\n");
264     }
265 
266     for (; i > 0; i--) {
267         address_space_unmap(&s->dram_as, iov[i - 1].iov_base,
268                             iov[i - 1].iov_len, false,
269                             iov[i - 1].iov_len);
270     }
271 
272     /*
273      * Set status bits to indicate completion. Testing shows hardware sets
274      * these irrespective of HASH_IRQ_EN.
275      */
276     s->regs[R_STATUS] |= HASH_IRQ;
277 }
278 
279 static uint64_t aspeed_hace_read(void *opaque, hwaddr addr, unsigned int size)
280 {
281     AspeedHACEState *s = ASPEED_HACE(opaque);
282 
283     addr >>= 2;
284 
285     if (addr >= ASPEED_HACE_NR_REGS) {
286         qemu_log_mask(LOG_GUEST_ERROR,
287                       "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n",
288                       __func__, addr << 2);
289         return 0;
290     }
291 
292     return s->regs[addr];
293 }
294 
295 static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data,
296                               unsigned int size)
297 {
298     AspeedHACEState *s = ASPEED_HACE(opaque);
299     AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s);
300 
301     addr >>= 2;
302 
303     if (addr >= ASPEED_HACE_NR_REGS) {
304         qemu_log_mask(LOG_GUEST_ERROR,
305                       "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n",
306                       __func__, addr << 2);
307         return;
308     }
309 
310     switch (addr) {
311     case R_STATUS:
312         if (data & HASH_IRQ) {
313             data &= ~HASH_IRQ;
314 
315             if (s->regs[addr] & HASH_IRQ) {
316                 qemu_irq_lower(s->irq);
317             }
318         }
319         if (ahc->raise_crypt_interrupt_workaround) {
320             if (data & CRYPT_IRQ) {
321                 data &= ~CRYPT_IRQ;
322 
323                 if (s->regs[addr] & CRYPT_IRQ) {
324                     qemu_irq_lower(s->irq);
325                 }
326             }
327         }
328         break;
329     case R_HASH_SRC:
330         data &= ahc->src_mask;
331         break;
332     case R_HASH_DEST:
333         data &= ahc->dest_mask;
334         break;
335     case R_HASH_KEY_BUFF:
336         data &= ahc->key_mask;
337         break;
338     case R_HASH_SRC_LEN:
339         data &= 0x0FFFFFFF;
340         break;
341     case R_HASH_CMD: {
342         int algo;
343         data &= ahc->hash_mask;
344 
345         if ((data & HASH_DIGEST_HMAC)) {
346             qemu_log_mask(LOG_UNIMP,
347                           "%s: HMAC mode not implemented\n",
348                           __func__);
349         }
350         if (data & BIT(1)) {
351             qemu_log_mask(LOG_UNIMP,
352                           "%s: Cascaded mode not implemented\n",
353                           __func__);
354         }
355         algo = hash_algo_lookup(data);
356         if (algo < 0) {
357                 qemu_log_mask(LOG_GUEST_ERROR,
358                         "%s: Invalid hash algorithm selection 0x%"PRIx64"\n",
359                         __func__, data & ahc->hash_mask);
360                 break;
361         }
362         do_hash_operation(s, algo, data & HASH_SG_EN,
363                 ((data & HASH_HMAC_MASK) == HASH_DIGEST_ACCUM));
364 
365         if (data & HASH_IRQ_EN) {
366             qemu_irq_raise(s->irq);
367         }
368         break;
369     }
370     case R_CRYPT_CMD:
371         qemu_log_mask(LOG_UNIMP, "%s: Crypt commands not implemented\n",
372                        __func__);
373         if (ahc->raise_crypt_interrupt_workaround) {
374             s->regs[R_STATUS] |= CRYPT_IRQ;
375             if (data & CRYPT_IRQ_EN) {
376                 qemu_irq_raise(s->irq);
377             }
378         }
379         break;
380     default:
381         break;
382     }
383 
384     s->regs[addr] = data;
385 }
386 
387 static const MemoryRegionOps aspeed_hace_ops = {
388     .read = aspeed_hace_read,
389     .write = aspeed_hace_write,
390     .endianness = DEVICE_LITTLE_ENDIAN,
391     .valid = {
392         .min_access_size = 1,
393         .max_access_size = 4,
394     },
395 };
396 
397 static void aspeed_hace_reset(DeviceState *dev)
398 {
399     struct AspeedHACEState *s = ASPEED_HACE(dev);
400 
401     if (s->hash_ctx != NULL) {
402         qcrypto_hash_free(s->hash_ctx);
403         s->hash_ctx = NULL;
404     }
405 
406     memset(s->regs, 0, sizeof(s->regs));
407     s->total_req_len = 0;
408 }
409 
410 static void aspeed_hace_realize(DeviceState *dev, Error **errp)
411 {
412     AspeedHACEState *s = ASPEED_HACE(dev);
413     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
414 
415     sysbus_init_irq(sbd, &s->irq);
416 
417     memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_hace_ops, s,
418             TYPE_ASPEED_HACE, 0x1000);
419 
420     if (!s->dram_mr) {
421         error_setg(errp, TYPE_ASPEED_HACE ": 'dram' link not set");
422         return;
423     }
424 
425     address_space_init(&s->dram_as, s->dram_mr, "dram");
426 
427     sysbus_init_mmio(sbd, &s->iomem);
428 }
429 
430 static const Property aspeed_hace_properties[] = {
431     DEFINE_PROP_LINK("dram", AspeedHACEState, dram_mr,
432                      TYPE_MEMORY_REGION, MemoryRegion *),
433 };
434 
435 
436 static const VMStateDescription vmstate_aspeed_hace = {
437     .name = TYPE_ASPEED_HACE,
438     .version_id = 2,
439     .minimum_version_id = 2,
440     .fields = (const VMStateField[]) {
441         VMSTATE_UINT32_ARRAY(regs, AspeedHACEState, ASPEED_HACE_NR_REGS),
442         VMSTATE_UINT32(total_req_len, AspeedHACEState),
443         VMSTATE_END_OF_LIST(),
444     }
445 };
446 
447 static void aspeed_hace_class_init(ObjectClass *klass, const void *data)
448 {
449     DeviceClass *dc = DEVICE_CLASS(klass);
450 
451     dc->realize = aspeed_hace_realize;
452     device_class_set_legacy_reset(dc, aspeed_hace_reset);
453     device_class_set_props(dc, aspeed_hace_properties);
454     dc->vmsd = &vmstate_aspeed_hace;
455 }
456 
457 static const TypeInfo aspeed_hace_info = {
458     .name = TYPE_ASPEED_HACE,
459     .parent = TYPE_SYS_BUS_DEVICE,
460     .instance_size = sizeof(AspeedHACEState),
461     .class_init = aspeed_hace_class_init,
462     .class_size = sizeof(AspeedHACEClass)
463 };
464 
465 static void aspeed_ast2400_hace_class_init(ObjectClass *klass, const void *data)
466 {
467     DeviceClass *dc = DEVICE_CLASS(klass);
468     AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
469 
470     dc->desc = "AST2400 Hash and Crypto Engine";
471 
472     ahc->src_mask = 0x0FFFFFFF;
473     ahc->dest_mask = 0x0FFFFFF8;
474     ahc->key_mask = 0x0FFFFFC0;
475     ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */
476 }
477 
478 static const TypeInfo aspeed_ast2400_hace_info = {
479     .name = TYPE_ASPEED_AST2400_HACE,
480     .parent = TYPE_ASPEED_HACE,
481     .class_init = aspeed_ast2400_hace_class_init,
482 };
483 
484 static void aspeed_ast2500_hace_class_init(ObjectClass *klass, const void *data)
485 {
486     DeviceClass *dc = DEVICE_CLASS(klass);
487     AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
488 
489     dc->desc = "AST2500 Hash and Crypto Engine";
490 
491     ahc->src_mask = 0x3fffffff;
492     ahc->dest_mask = 0x3ffffff8;
493     ahc->key_mask = 0x3FFFFFC0;
494     ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */
495 }
496 
497 static const TypeInfo aspeed_ast2500_hace_info = {
498     .name = TYPE_ASPEED_AST2500_HACE,
499     .parent = TYPE_ASPEED_HACE,
500     .class_init = aspeed_ast2500_hace_class_init,
501 };
502 
503 static void aspeed_ast2600_hace_class_init(ObjectClass *klass, const void *data)
504 {
505     DeviceClass *dc = DEVICE_CLASS(klass);
506     AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
507 
508     dc->desc = "AST2600 Hash and Crypto Engine";
509 
510     ahc->src_mask = 0x7FFFFFFF;
511     ahc->dest_mask = 0x7FFFFFF8;
512     ahc->key_mask = 0x7FFFFFF8;
513     ahc->hash_mask = 0x00147FFF;
514 }
515 
516 static const TypeInfo aspeed_ast2600_hace_info = {
517     .name = TYPE_ASPEED_AST2600_HACE,
518     .parent = TYPE_ASPEED_HACE,
519     .class_init = aspeed_ast2600_hace_class_init,
520 };
521 
522 static void aspeed_ast1030_hace_class_init(ObjectClass *klass, const void *data)
523 {
524     DeviceClass *dc = DEVICE_CLASS(klass);
525     AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
526 
527     dc->desc = "AST1030 Hash and Crypto Engine";
528 
529     ahc->src_mask = 0x7FFFFFFF;
530     ahc->dest_mask = 0x7FFFFFF8;
531     ahc->key_mask = 0x7FFFFFF8;
532     ahc->hash_mask = 0x00147FFF;
533 }
534 
535 static const TypeInfo aspeed_ast1030_hace_info = {
536     .name = TYPE_ASPEED_AST1030_HACE,
537     .parent = TYPE_ASPEED_HACE,
538     .class_init = aspeed_ast1030_hace_class_init,
539 };
540 
541 static void aspeed_ast2700_hace_class_init(ObjectClass *klass, const void *data)
542 {
543     DeviceClass *dc = DEVICE_CLASS(klass);
544     AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
545 
546     dc->desc = "AST2700 Hash and Crypto Engine";
547 
548     ahc->src_mask = 0x7FFFFFFF;
549     ahc->dest_mask = 0x7FFFFFF8;
550     ahc->key_mask = 0x7FFFFFF8;
551     ahc->hash_mask = 0x00147FFF;
552 
553     /*
554      * Currently, it does not support the CRYPT command. Instead, it only
555      * sends an interrupt to notify the firmware that the crypt command
556      * has completed. It is a temporary workaround.
557      */
558     ahc->raise_crypt_interrupt_workaround = true;
559 }
560 
561 static const TypeInfo aspeed_ast2700_hace_info = {
562     .name = TYPE_ASPEED_AST2700_HACE,
563     .parent = TYPE_ASPEED_HACE,
564     .class_init = aspeed_ast2700_hace_class_init,
565 };
566 
567 static void aspeed_hace_register_types(void)
568 {
569     type_register_static(&aspeed_ast2400_hace_info);
570     type_register_static(&aspeed_ast2500_hace_info);
571     type_register_static(&aspeed_ast2600_hace_info);
572     type_register_static(&aspeed_ast1030_hace_info);
573     type_register_static(&aspeed_ast2700_hace_info);
574     type_register_static(&aspeed_hace_info);
575 }
576 
577 type_init(aspeed_hace_register_types);
578