xref: /openbmc/qemu/hw/misc/aspeed_hace.c (revision 7e65aa39b37cb189c4d0bc923d4d778bdd626f4b)
1 /*
2  * ASPEED Hash and Crypto Engine
3  *
4  * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
5  * Copyright (C) 2021 IBM Corp.
6  *
7  * Joel Stanley <joel@jms.id.au>
8  *
9  * SPDX-License-Identifier: GPL-2.0-or-later
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/log.h"
14 #include "qemu/error-report.h"
15 #include "hw/misc/aspeed_hace.h"
16 #include "qapi/error.h"
17 #include "migration/vmstate.h"
18 #include "crypto/hash.h"
19 #include "hw/qdev-properties.h"
20 #include "hw/irq.h"
21 
22 #define R_CRYPT_CMD     (0x10 / 4)
23 
24 #define R_STATUS        (0x1c / 4)
25 #define HASH_IRQ        BIT(9)
26 #define CRYPT_IRQ       BIT(12)
27 #define TAG_IRQ         BIT(15)
28 
29 #define R_HASH_SRC      (0x20 / 4)
30 #define R_HASH_DIGEST   (0x24 / 4)
31 #define R_HASH_KEY_BUFF (0x28 / 4)
32 #define R_HASH_SRC_LEN  (0x2c / 4)
33 #define R_HASH_SRC_HI       (0x90 / 4)
34 #define R_HASH_DIGEST_HI    (0x94 / 4)
35 #define R_HASH_KEY_BUFF_HI  (0x98 / 4)
36 
37 #define R_HASH_CMD      (0x30 / 4)
38 /* Hash algorithm selection */
39 #define  HASH_ALGO_MASK                 (BIT(4) | BIT(5) | BIT(6))
40 #define  HASH_ALGO_MD5                  0
41 #define  HASH_ALGO_SHA1                 BIT(5)
42 #define  HASH_ALGO_SHA224               BIT(6)
43 #define  HASH_ALGO_SHA256               (BIT(4) | BIT(6))
44 #define  HASH_ALGO_SHA512_SERIES        (BIT(5) | BIT(6))
45 /* SHA512 algorithm selection */
46 #define  SHA512_HASH_ALGO_MASK          (BIT(10) | BIT(11) | BIT(12))
47 #define  HASH_ALGO_SHA512_SHA512        0
48 #define  HASH_ALGO_SHA512_SHA384        BIT(10)
49 #define  HASH_ALGO_SHA512_SHA256        BIT(11)
50 #define  HASH_ALGO_SHA512_SHA224        (BIT(10) | BIT(11))
51 /* HMAC modes */
52 #define  HASH_HMAC_MASK                 (BIT(7) | BIT(8))
53 #define  HASH_DIGEST                    0
54 #define  HASH_DIGEST_HMAC               BIT(7)
55 #define  HASH_DIGEST_ACCUM              BIT(8)
56 #define  HASH_HMAC_KEY                  (BIT(7) | BIT(8))
57 /* Cascaded operation modes */
58 #define  HASH_ONLY                      0
59 #define  HASH_ONLY2                     BIT(0)
60 #define  HASH_CRYPT_THEN_HASH           BIT(1)
61 #define  HASH_HASH_THEN_CRYPT           (BIT(0) | BIT(1))
62 /* Other cmd bits */
63 #define  HASH_IRQ_EN                    BIT(9)
64 #define  HASH_SG_EN                     BIT(18)
65 #define  CRYPT_IRQ_EN                   BIT(12)
66 /* Scatter-gather data list */
67 #define SG_LIST_LEN_SIZE                4
68 #define SG_LIST_LEN_MASK                0x0FFFFFFF
69 #define SG_LIST_LEN_LAST                BIT(31)
70 #define SG_LIST_ADDR_SIZE               4
71 #define SG_LIST_ADDR_MASK               0x7FFFFFFF
72 #define SG_LIST_ENTRY_SIZE              (SG_LIST_LEN_SIZE + SG_LIST_ADDR_SIZE)
73 
74 static const struct {
75     uint32_t mask;
76     QCryptoHashAlgo algo;
77 } hash_algo_map[] = {
78     { HASH_ALGO_MD5, QCRYPTO_HASH_ALGO_MD5 },
79     { HASH_ALGO_SHA1, QCRYPTO_HASH_ALGO_SHA1 },
80     { HASH_ALGO_SHA224, QCRYPTO_HASH_ALGO_SHA224 },
81     { HASH_ALGO_SHA256, QCRYPTO_HASH_ALGO_SHA256 },
82     { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA512,
83       QCRYPTO_HASH_ALGO_SHA512 },
84     { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA384,
85       QCRYPTO_HASH_ALGO_SHA384 },
86     { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA256,
87       QCRYPTO_HASH_ALGO_SHA256 },
88 };
89 
90 static int hash_algo_lookup(uint32_t reg)
91 {
92     int i;
93 
94     reg &= HASH_ALGO_MASK | SHA512_HASH_ALGO_MASK;
95 
96     for (i = 0; i < ARRAY_SIZE(hash_algo_map); i++) {
97         if (reg == hash_algo_map[i].mask) {
98             return hash_algo_map[i].algo;
99         }
100     }
101 
102     return -1;
103 }
104 
105 /**
106  * Check whether the request contains padding message.
107  *
108  * @param s             aspeed hace state object
109  * @param iov           iov of current request
110  * @param req_len       length of the current request
111  * @param total_msg_len length of all acc_mode requests(excluding padding msg)
112  * @param pad_offset    start offset of padding message
113  */
114 static bool has_padding(AspeedHACEState *s, struct iovec *iov,
115                         hwaddr req_len, uint32_t *total_msg_len,
116                         uint32_t *pad_offset)
117 {
118     *total_msg_len = (uint32_t)(ldq_be_p(iov->iov_base + req_len - 8) / 8);
119     /*
120      * SG_LIST_LEN_LAST asserted in the request length doesn't mean it is the
121      * last request. The last request should contain padding message.
122      * We check whether message contains padding by
123      *   1. Get total message length. If the current message contains
124      *      padding, the last 8 bytes are total message length.
125      *   2. Check whether the total message length is valid.
126      *      If it is valid, the value should less than or equal to
127      *      total_req_len.
128      *   3. Current request len - padding_size to get padding offset.
129      *      The padding message's first byte should be 0x80
130      */
131     if (*total_msg_len <= s->total_req_len) {
132         uint32_t padding_size = s->total_req_len - *total_msg_len;
133         uint8_t *padding = iov->iov_base;
134 
135         if (padding_size > req_len) {
136             return false;
137         }
138 
139         *pad_offset = req_len - padding_size;
140         if (padding[*pad_offset] == 0x80) {
141             return true;
142         }
143     }
144 
145     return false;
146 }
147 
148 static uint64_t hash_get_source_addr(AspeedHACEState *s)
149 {
150     AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s);
151     uint64_t src_addr = 0;
152 
153     src_addr = deposit64(src_addr, 0, 32, s->regs[R_HASH_SRC]);
154     if (ahc->has_dma64) {
155         src_addr = deposit64(src_addr, 32, 32, s->regs[R_HASH_SRC_HI]);
156     }
157 
158     return src_addr;
159 }
160 
161 static int hash_prepare_direct_iov(AspeedHACEState *s, struct iovec *iov,
162                                    bool acc_mode, bool *acc_final_request)
163 {
164     uint32_t total_msg_len;
165     uint32_t pad_offset;
166     uint64_t src;
167     void *haddr;
168     hwaddr plen;
169     int iov_idx;
170 
171     plen = s->regs[R_HASH_SRC_LEN];
172     src = hash_get_source_addr(s);
173     haddr = address_space_map(&s->dram_as, src, &plen, false,
174                               MEMTXATTRS_UNSPECIFIED);
175     if (haddr == NULL) {
176         qemu_log_mask(LOG_GUEST_ERROR,
177                       "%s: Unable to map address, addr=0x%" HWADDR_PRIx
178                       " ,plen=0x%" HWADDR_PRIx "\n",
179                       __func__, src, plen);
180         return -1;
181     }
182 
183     iov[0].iov_base = haddr;
184     iov_idx = 1;
185 
186     if (acc_mode) {
187         s->total_req_len += plen;
188 
189         if (has_padding(s, &iov[0], plen, &total_msg_len,
190                         &pad_offset)) {
191             /* Padding being present indicates the final request */
192             *acc_final_request = true;
193             iov[0].iov_len = pad_offset;
194         } else {
195             iov[0].iov_len = plen;
196         }
197     } else {
198         iov[0].iov_len = plen;
199     }
200 
201     return iov_idx;
202 }
203 
204 static int hash_prepare_sg_iov(AspeedHACEState *s, struct iovec *iov,
205                                bool acc_mode, bool *acc_final_request)
206 {
207     uint32_t total_msg_len;
208     uint32_t pad_offset;
209     uint32_t len = 0;
210     uint32_t sg_addr;
211     uint64_t src;
212     int iov_idx;
213     hwaddr plen;
214     void *haddr;
215 
216     src = hash_get_source_addr(s);
217     for (iov_idx = 0; !(len & SG_LIST_LEN_LAST); iov_idx++) {
218         if (iov_idx == ASPEED_HACE_MAX_SG) {
219             qemu_log_mask(LOG_GUEST_ERROR,
220                           "%s: Failed to set end of sg list marker\n",
221                           __func__);
222             return -1;
223         }
224 
225         len = address_space_ldl_le(&s->dram_as, src,
226                                    MEMTXATTRS_UNSPECIFIED, NULL);
227         sg_addr = address_space_ldl_le(&s->dram_as, src + SG_LIST_LEN_SIZE,
228                                        MEMTXATTRS_UNSPECIFIED, NULL);
229         sg_addr &= SG_LIST_ADDR_MASK;
230         /*
231          * To maintain compatibility with older SoCs such as the AST2600,
232          * the AST2700 HW automatically set bit 34 of the 64-bit sg_addr.
233          * As a result, the firmware only needs to provide a 32-bit sg_addr
234          * containing bits [31:0]. This is sufficient for the AST2700, as
235          * it uses a DRAM offset rather than a DRAM address.
236          */
237         plen = len & SG_LIST_LEN_MASK;
238         haddr = address_space_map(&s->dram_as, sg_addr, &plen, false,
239                                   MEMTXATTRS_UNSPECIFIED);
240 
241         if (haddr == NULL) {
242             qemu_log_mask(LOG_GUEST_ERROR,
243                           "%s: Unable to map address, sg_addr=0x%x, "
244                           "plen=0x%" HWADDR_PRIx "\n",
245                           __func__, sg_addr, plen);
246             return -1;
247         }
248 
249         src += SG_LIST_ENTRY_SIZE;
250 
251         iov[iov_idx].iov_base = haddr;
252         if (acc_mode) {
253             s->total_req_len += plen;
254 
255             if (has_padding(s, &iov[iov_idx], plen, &total_msg_len,
256                             &pad_offset)) {
257                 /* Padding being present indicates the final request */
258                 *acc_final_request = true;
259                 iov[iov_idx].iov_len = pad_offset;
260             } else {
261                 iov[iov_idx].iov_len = plen;
262             }
263         } else {
264             iov[iov_idx].iov_len = plen;
265         }
266     }
267 
268     return iov_idx;
269 }
270 
271 static uint64_t hash_get_digest_addr(AspeedHACEState *s)
272 {
273     AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s);
274     uint64_t digest_addr = 0;
275 
276     digest_addr = deposit64(digest_addr, 0, 32, s->regs[R_HASH_DIGEST]);
277     if (ahc->has_dma64) {
278         digest_addr = deposit64(digest_addr, 32, 32, s->regs[R_HASH_DIGEST_HI]);
279     }
280 
281     return digest_addr;
282 }
283 
284 static void hash_write_digest_and_unmap_iov(AspeedHACEState *s,
285                                             struct iovec *iov,
286                                             int iov_idx,
287                                             uint8_t *digest_buf,
288                                             size_t digest_len)
289 {
290     uint64_t digest_addr = 0;
291 
292     digest_addr = hash_get_digest_addr(s);
293     if (address_space_write(&s->dram_as, digest_addr,
294                             MEMTXATTRS_UNSPECIFIED,
295                             digest_buf, digest_len)) {
296         qemu_log_mask(LOG_GUEST_ERROR,
297                       "%s: Failed to write digest to 0x%" HWADDR_PRIx "\n",
298                       __func__, digest_addr);
299     }
300 
301     for (; iov_idx > 0; iov_idx--) {
302         address_space_unmap(&s->dram_as, iov[iov_idx - 1].iov_base,
303                             iov[iov_idx - 1].iov_len, false,
304                             iov[iov_idx - 1].iov_len);
305     }
306 }
307 
308 static void hash_execute_non_acc_mode(AspeedHACEState *s, int algo,
309                                       struct iovec *iov, int iov_idx)
310 {
311     g_autofree uint8_t *digest_buf = NULL;
312     Error *local_err = NULL;
313     size_t digest_len = 0;
314 
315     if (qcrypto_hash_bytesv(algo, iov, iov_idx, &digest_buf,
316                             &digest_len, &local_err) < 0) {
317         qemu_log_mask(LOG_GUEST_ERROR,
318                       "%s: qcrypto hash bytesv failed : %s",
319                       __func__, error_get_pretty(local_err));
320         error_free(local_err);
321         return;
322     }
323 
324     hash_write_digest_and_unmap_iov(s, iov, iov_idx, digest_buf, digest_len);
325 }
326 
327 static void hash_execute_acc_mode(AspeedHACEState *s, int algo,
328                                   struct iovec *iov, int iov_idx,
329                                   bool final_request)
330 {
331     g_autofree uint8_t *digest_buf = NULL;
332     Error *local_err = NULL;
333     size_t digest_len = 0;
334 
335     if (s->hash_ctx == NULL) {
336         s->hash_ctx = qcrypto_hash_new(algo, &local_err);
337         if (s->hash_ctx == NULL) {
338             qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto hash new failed : %s",
339                           __func__, error_get_pretty(local_err));
340             error_free(local_err);
341             return;
342         }
343     }
344 
345     if (qcrypto_hash_updatev(s->hash_ctx, iov, iov_idx, &local_err) < 0) {
346         qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto hash updatev failed : %s",
347                       __func__, error_get_pretty(local_err));
348         error_free(local_err);
349         return;
350     }
351 
352     if (final_request) {
353         if (qcrypto_hash_finalize_bytes(s->hash_ctx, &digest_buf,
354                                         &digest_len, &local_err)) {
355             qemu_log_mask(LOG_GUEST_ERROR,
356                           "%s: qcrypto hash finalize bytes failed : %s",
357                           __func__, error_get_pretty(local_err));
358             error_free(local_err);
359             local_err = NULL;
360         }
361 
362         qcrypto_hash_free(s->hash_ctx);
363 
364         s->hash_ctx = NULL;
365         s->total_req_len = 0;
366     }
367 
368     hash_write_digest_and_unmap_iov(s, iov, iov_idx, digest_buf, digest_len);
369 }
370 
371 static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
372                               bool acc_mode)
373 {
374     struct iovec iov[ASPEED_HACE_MAX_SG];
375     bool acc_final_request = false;
376     int iov_idx = -1;
377 
378     /* Prepares the iov for hashing operations based on the selected mode */
379     if (sg_mode) {
380         iov_idx = hash_prepare_sg_iov(s, iov, acc_mode, &acc_final_request);
381     } else {
382         iov_idx = hash_prepare_direct_iov(s, iov, acc_mode,
383                                           &acc_final_request);
384     }
385 
386     if (iov_idx <= 0) {
387         qemu_log_mask(LOG_GUEST_ERROR,
388                       "%s: Failed to prepare iov\n", __func__);
389          return;
390     }
391 
392     /* Executes the hash operation */
393     if (acc_mode) {
394         hash_execute_acc_mode(s, algo, iov, iov_idx, acc_final_request);
395     } else {
396         hash_execute_non_acc_mode(s, algo, iov, iov_idx);
397     }
398 }
399 
400 static uint64_t aspeed_hace_read(void *opaque, hwaddr addr, unsigned int size)
401 {
402     AspeedHACEState *s = ASPEED_HACE(opaque);
403 
404     addr >>= 2;
405 
406     return s->regs[addr];
407 }
408 
409 static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data,
410                               unsigned int size)
411 {
412     AspeedHACEState *s = ASPEED_HACE(opaque);
413     AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s);
414 
415     addr >>= 2;
416 
417     switch (addr) {
418     case R_STATUS:
419         if (data & HASH_IRQ) {
420             data &= ~HASH_IRQ;
421 
422             if (s->regs[addr] & HASH_IRQ) {
423                 qemu_irq_lower(s->irq);
424             }
425         }
426         if (ahc->raise_crypt_interrupt_workaround) {
427             if (data & CRYPT_IRQ) {
428                 data &= ~CRYPT_IRQ;
429 
430                 if (s->regs[addr] & CRYPT_IRQ) {
431                     qemu_irq_lower(s->irq);
432                 }
433             }
434         }
435         break;
436     case R_HASH_SRC:
437         data &= ahc->src_mask;
438         break;
439     case R_HASH_DIGEST:
440         data &= ahc->dest_mask;
441         break;
442     case R_HASH_KEY_BUFF:
443         data &= ahc->key_mask;
444         break;
445     case R_HASH_SRC_LEN:
446         data &= 0x0FFFFFFF;
447         break;
448     case R_HASH_CMD: {
449         int algo;
450         data &= ahc->hash_mask;
451 
452         if ((data & HASH_DIGEST_HMAC)) {
453             qemu_log_mask(LOG_UNIMP,
454                           "%s: HMAC mode not implemented\n",
455                           __func__);
456         }
457         if (data & BIT(1)) {
458             qemu_log_mask(LOG_UNIMP,
459                           "%s: Cascaded mode not implemented\n",
460                           __func__);
461         }
462         algo = hash_algo_lookup(data);
463         if (algo < 0) {
464                 qemu_log_mask(LOG_GUEST_ERROR,
465                         "%s: Invalid hash algorithm selection 0x%"PRIx64"\n",
466                         __func__, data & ahc->hash_mask);
467         } else {
468             do_hash_operation(s, algo, data & HASH_SG_EN,
469                     ((data & HASH_HMAC_MASK) == HASH_DIGEST_ACCUM));
470         }
471 
472         /*
473          * Set status bits to indicate completion. Testing shows hardware sets
474          * these irrespective of HASH_IRQ_EN.
475          */
476         s->regs[R_STATUS] |= HASH_IRQ;
477 
478         if (data & HASH_IRQ_EN) {
479             qemu_irq_raise(s->irq);
480         }
481         break;
482     }
483     case R_CRYPT_CMD:
484         qemu_log_mask(LOG_UNIMP, "%s: Crypt commands not implemented\n",
485                        __func__);
486         if (ahc->raise_crypt_interrupt_workaround) {
487             s->regs[R_STATUS] |= CRYPT_IRQ;
488             if (data & CRYPT_IRQ_EN) {
489                 qemu_irq_raise(s->irq);
490             }
491         }
492         break;
493     case R_HASH_SRC_HI:
494         data &= ahc->src_hi_mask;
495         break;
496     case R_HASH_DIGEST_HI:
497         data &= ahc->dest_hi_mask;
498         break;
499     case R_HASH_KEY_BUFF_HI:
500         data &= ahc->key_hi_mask;
501         break;
502     default:
503         break;
504     }
505 
506     s->regs[addr] = data;
507 }
508 
509 static const MemoryRegionOps aspeed_hace_ops = {
510     .read = aspeed_hace_read,
511     .write = aspeed_hace_write,
512     .endianness = DEVICE_LITTLE_ENDIAN,
513     .valid = {
514         .min_access_size = 1,
515         .max_access_size = 4,
516     },
517 };
518 
519 static void aspeed_hace_reset(DeviceState *dev)
520 {
521     struct AspeedHACEState *s = ASPEED_HACE(dev);
522     AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s);
523 
524     if (s->hash_ctx != NULL) {
525         qcrypto_hash_free(s->hash_ctx);
526         s->hash_ctx = NULL;
527     }
528 
529     memset(s->regs, 0, ahc->nr_regs << 2);
530     s->total_req_len = 0;
531 }
532 
533 static void aspeed_hace_realize(DeviceState *dev, Error **errp)
534 {
535     AspeedHACEState *s = ASPEED_HACE(dev);
536     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
537     AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s);
538 
539     sysbus_init_irq(sbd, &s->irq);
540 
541     s->regs = g_new(uint32_t, ahc->nr_regs);
542     memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_hace_ops, s,
543                           TYPE_ASPEED_HACE, ahc->nr_regs << 2);
544 
545     if (!s->dram_mr) {
546         error_setg(errp, TYPE_ASPEED_HACE ": 'dram' link not set");
547         return;
548     }
549 
550     address_space_init(&s->dram_as, s->dram_mr, "dram");
551 
552     sysbus_init_mmio(sbd, &s->iomem);
553 }
554 
555 static const Property aspeed_hace_properties[] = {
556     DEFINE_PROP_LINK("dram", AspeedHACEState, dram_mr,
557                      TYPE_MEMORY_REGION, MemoryRegion *),
558 };
559 
560 
561 static const VMStateDescription vmstate_aspeed_hace = {
562     .name = TYPE_ASPEED_HACE,
563     .version_id = 2,
564     .minimum_version_id = 2,
565     .fields = (const VMStateField[]) {
566         VMSTATE_UINT32(total_req_len, AspeedHACEState),
567         VMSTATE_END_OF_LIST(),
568     }
569 };
570 
571 static void aspeed_hace_unrealize(DeviceState *dev)
572 {
573     AspeedHACEState *s = ASPEED_HACE(dev);
574 
575     g_free(s->regs);
576     s->regs = NULL;
577 }
578 
579 static void aspeed_hace_class_init(ObjectClass *klass, const void *data)
580 {
581     DeviceClass *dc = DEVICE_CLASS(klass);
582 
583     dc->realize = aspeed_hace_realize;
584     dc->unrealize = aspeed_hace_unrealize;
585     device_class_set_legacy_reset(dc, aspeed_hace_reset);
586     device_class_set_props(dc, aspeed_hace_properties);
587     dc->vmsd = &vmstate_aspeed_hace;
588 }
589 
590 static const TypeInfo aspeed_hace_info = {
591     .name = TYPE_ASPEED_HACE,
592     .parent = TYPE_SYS_BUS_DEVICE,
593     .instance_size = sizeof(AspeedHACEState),
594     .class_init = aspeed_hace_class_init,
595     .class_size = sizeof(AspeedHACEClass)
596 };
597 
598 static void aspeed_ast2400_hace_class_init(ObjectClass *klass, const void *data)
599 {
600     DeviceClass *dc = DEVICE_CLASS(klass);
601     AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
602 
603     dc->desc = "AST2400 Hash and Crypto Engine";
604 
605     ahc->nr_regs = 0x64 >> 2;
606     ahc->src_mask = 0x0FFFFFFF;
607     ahc->dest_mask = 0x0FFFFFF8;
608     ahc->key_mask = 0x0FFFFFC0;
609     ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */
610 }
611 
612 static const TypeInfo aspeed_ast2400_hace_info = {
613     .name = TYPE_ASPEED_AST2400_HACE,
614     .parent = TYPE_ASPEED_HACE,
615     .class_init = aspeed_ast2400_hace_class_init,
616 };
617 
618 static void aspeed_ast2500_hace_class_init(ObjectClass *klass, const void *data)
619 {
620     DeviceClass *dc = DEVICE_CLASS(klass);
621     AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
622 
623     dc->desc = "AST2500 Hash and Crypto Engine";
624 
625     ahc->nr_regs = 0x64 >> 2;
626     ahc->src_mask = 0x3fffffff;
627     ahc->dest_mask = 0x3ffffff8;
628     ahc->key_mask = 0x3FFFFFC0;
629     ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */
630 }
631 
632 static const TypeInfo aspeed_ast2500_hace_info = {
633     .name = TYPE_ASPEED_AST2500_HACE,
634     .parent = TYPE_ASPEED_HACE,
635     .class_init = aspeed_ast2500_hace_class_init,
636 };
637 
638 static void aspeed_ast2600_hace_class_init(ObjectClass *klass, const void *data)
639 {
640     DeviceClass *dc = DEVICE_CLASS(klass);
641     AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
642 
643     dc->desc = "AST2600 Hash and Crypto Engine";
644 
645     ahc->nr_regs = 0x64 >> 2;
646     ahc->src_mask = 0x7FFFFFFF;
647     ahc->dest_mask = 0x7FFFFFF8;
648     ahc->key_mask = 0x7FFFFFF8;
649     ahc->hash_mask = 0x00147FFF;
650 }
651 
652 static const TypeInfo aspeed_ast2600_hace_info = {
653     .name = TYPE_ASPEED_AST2600_HACE,
654     .parent = TYPE_ASPEED_HACE,
655     .class_init = aspeed_ast2600_hace_class_init,
656 };
657 
658 static void aspeed_ast1030_hace_class_init(ObjectClass *klass, const void *data)
659 {
660     DeviceClass *dc = DEVICE_CLASS(klass);
661     AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
662 
663     dc->desc = "AST1030 Hash and Crypto Engine";
664 
665     ahc->nr_regs = 0x64 >> 2;
666     ahc->src_mask = 0x7FFFFFFF;
667     ahc->dest_mask = 0x7FFFFFF8;
668     ahc->key_mask = 0x7FFFFFF8;
669     ahc->hash_mask = 0x00147FFF;
670 }
671 
672 static const TypeInfo aspeed_ast1030_hace_info = {
673     .name = TYPE_ASPEED_AST1030_HACE,
674     .parent = TYPE_ASPEED_HACE,
675     .class_init = aspeed_ast1030_hace_class_init,
676 };
677 
678 static void aspeed_ast2700_hace_class_init(ObjectClass *klass, const void *data)
679 {
680     DeviceClass *dc = DEVICE_CLASS(klass);
681     AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
682 
683     dc->desc = "AST2700 Hash and Crypto Engine";
684 
685     ahc->nr_regs = 0x9C >> 2;
686     ahc->src_mask = 0x7FFFFFFF;
687     ahc->dest_mask = 0x7FFFFFF8;
688     ahc->key_mask = 0x7FFFFFF8;
689     ahc->hash_mask = 0x00147FFF;
690 
691     /*
692      * The AST2700 supports a maximum DRAM size of 8 GB, with a DRAM
693      * addressable range from 0x0_0000_0000 to 0x1_FFFF_FFFF. Since this range
694      * fits within 34 bits, only bits [33:0] are needed to store the DRAM
695      * offset. To optimize address storage, the high physical address bits
696      * [1:0] of the source, digest and key buffer addresses are stored as
697      * dram_offset bits [33:32].
698      *
699      * This approach eliminates the need to reduce the high part of the DRAM
700      * physical address for DMA operations. Previously, this was calculated as
701      * (high physical address bits [7:0] - 4), since the DRAM start address is
702      * 0x4_00000000, making the high part address [7:0] - 4.
703      */
704     ahc->src_hi_mask = 0x00000003;
705     ahc->dest_hi_mask = 0x00000003;
706     ahc->key_hi_mask = 0x00000003;
707 
708     /*
709      * Currently, it does not support the CRYPT command. Instead, it only
710      * sends an interrupt to notify the firmware that the crypt command
711      * has completed. It is a temporary workaround.
712      */
713     ahc->raise_crypt_interrupt_workaround = true;
714     ahc->has_dma64 = true;
715 }
716 
717 static const TypeInfo aspeed_ast2700_hace_info = {
718     .name = TYPE_ASPEED_AST2700_HACE,
719     .parent = TYPE_ASPEED_HACE,
720     .class_init = aspeed_ast2700_hace_class_init,
721 };
722 
723 static void aspeed_hace_register_types(void)
724 {
725     type_register_static(&aspeed_ast2400_hace_info);
726     type_register_static(&aspeed_ast2500_hace_info);
727     type_register_static(&aspeed_ast2600_hace_info);
728     type_register_static(&aspeed_ast1030_hace_info);
729     type_register_static(&aspeed_ast2700_hace_info);
730     type_register_static(&aspeed_hace_info);
731 }
732 
733 type_init(aspeed_hace_register_types);
734