1 /*
2 * ASPEED Hash and Crypto Engine
3 *
4 * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
5 * Copyright (C) 2021 IBM Corp.
6 *
7 * Joel Stanley <joel@jms.id.au>
8 *
9 * SPDX-License-Identifier: GPL-2.0-or-later
10 */
11
12 #include "qemu/osdep.h"
13 #include "qemu/cutils.h"
14 #include "qemu/log.h"
15 #include "qemu/error-report.h"
16 #include "qemu/iov.h"
17 #include "hw/misc/aspeed_hace.h"
18 #include "qapi/error.h"
19 #include "migration/vmstate.h"
20 #include "crypto/hash.h"
21 #include "hw/qdev-properties.h"
22 #include "hw/irq.h"
23 #include "trace.h"
24
25 /* #define DEBUG_HACE 1 */
26
27 #define R_CRYPT_CMD (0x10 / 4)
28
29 #define R_STATUS (0x1c / 4)
30 #define HASH_IRQ BIT(9)
31 #define CRYPT_IRQ BIT(12)
32 #define TAG_IRQ BIT(15)
33
34 #define R_HASH_SRC (0x20 / 4)
35 #define R_HASH_DEST (0x24 / 4)
36 #define R_HASH_KEY_BUFF (0x28 / 4)
37 #define R_HASH_SRC_LEN (0x2c / 4)
38 #define R_HASH_SRC_HI (0x90 / 4)
39 #define R_HASH_DEST_HI (0x94 / 4)
40 #define R_HASH_KEY_BUFF_HI (0x98 / 4)
41
42 #define R_HASH_CMD (0x30 / 4)
43 /* Hash algorithm selection */
44 #define HASH_ALGO_MASK (BIT(4) | BIT(5) | BIT(6))
45 #define HASH_ALGO_MD5 0
46 #define HASH_ALGO_SHA1 BIT(5)
47 #define HASH_ALGO_SHA224 BIT(6)
48 #define HASH_ALGO_SHA256 (BIT(4) | BIT(6))
49 #define HASH_ALGO_SHA512_SERIES (BIT(5) | BIT(6))
50 /* SHA512 algorithm selection */
51 #define SHA512_HASH_ALGO_MASK (BIT(10) | BIT(11) | BIT(12))
52 #define HASH_ALGO_SHA512_SHA512 0
53 #define HASH_ALGO_SHA512_SHA384 BIT(10)
54 #define HASH_ALGO_SHA512_SHA256 BIT(11)
55 #define HASH_ALGO_SHA512_SHA224 (BIT(10) | BIT(11))
56 /* HMAC modes */
57 #define HASH_HMAC_MASK (BIT(7) | BIT(8))
58 #define HASH_DIGEST 0
59 #define HASH_DIGEST_HMAC BIT(7)
60 #define HASH_DIGEST_ACCUM BIT(8)
61 #define HASH_HMAC_KEY (BIT(7) | BIT(8))
62 /* Cascaded operation modes */
63 #define HASH_ONLY 0
64 #define HASH_ONLY2 BIT(0)
65 #define HASH_CRYPT_THEN_HASH BIT(1)
66 #define HASH_HASH_THEN_CRYPT (BIT(0) | BIT(1))
67 /* Other cmd bits */
68 #define HASH_IRQ_EN BIT(9)
69 #define HASH_SG_EN BIT(18)
70 #define CRYPT_IRQ_EN BIT(12)
71 /* Scatter-gather data list */
72 #define SG_LIST_LEN_SIZE 4
73 #define SG_LIST_LEN_MASK 0x0FFFFFFF
74 #define SG_LIST_LEN_LAST BIT(31)
75 #define SG_LIST_ADDR_SIZE 4
76 #define SG_LIST_ADDR_MASK 0x7FFFFFFF
77 #define SG_LIST_ENTRY_SIZE (SG_LIST_LEN_SIZE + SG_LIST_ADDR_SIZE)
78
79 static const struct {
80 uint32_t mask;
81 QCryptoHashAlgo algo;
82 } hash_algo_map[] = {
83 { HASH_ALGO_MD5, QCRYPTO_HASH_ALGO_MD5 },
84 { HASH_ALGO_SHA1, QCRYPTO_HASH_ALGO_SHA1 },
85 { HASH_ALGO_SHA224, QCRYPTO_HASH_ALGO_SHA224 },
86 { HASH_ALGO_SHA256, QCRYPTO_HASH_ALGO_SHA256 },
87 { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA512,
88 QCRYPTO_HASH_ALGO_SHA512 },
89 { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA384,
90 QCRYPTO_HASH_ALGO_SHA384 },
91 { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA256,
92 QCRYPTO_HASH_ALGO_SHA256 },
93 };
94
hash_algo_lookup(uint32_t reg)95 static int hash_algo_lookup(uint32_t reg)
96 {
97 int i;
98
99 reg &= HASH_ALGO_MASK | SHA512_HASH_ALGO_MASK;
100
101 for (i = 0; i < ARRAY_SIZE(hash_algo_map); i++) {
102 if (reg == hash_algo_map[i].mask) {
103 return hash_algo_map[i].algo;
104 }
105 }
106
107 return -1;
108 }
109
110 /**
111 * Check whether the request contains padding message.
112 *
113 * @param s aspeed hace state object
114 * @param iov iov of current request
115 * @param req_len length of the current request
116 * @param total_msg_len length of all acc_mode requests(excluding padding msg)
117 * @param pad_offset start offset of padding message
118 */
has_padding(AspeedHACEState * s,struct iovec * iov,hwaddr req_len,uint32_t * total_msg_len,uint32_t * pad_offset)119 static bool has_padding(AspeedHACEState *s, struct iovec *iov,
120 hwaddr req_len, uint32_t *total_msg_len,
121 uint32_t *pad_offset)
122 {
123 *total_msg_len = (uint32_t)(ldq_be_p(iov->iov_base + req_len - 8) / 8);
124 /*
125 * SG_LIST_LEN_LAST asserted in the request length doesn't mean it is the
126 * last request. The last request should contain padding message.
127 * We check whether message contains padding by
128 * 1. Get total message length. If the current message contains
129 * padding, the last 8 bytes are total message length.
130 * 2. Check whether the total message length is valid.
131 * If it is valid, the value should less than or equal to
132 * total_req_len.
133 * 3. Current request len - padding_size to get padding offset.
134 * The padding message's first byte should be 0x80
135 */
136 if (*total_msg_len <= s->total_req_len) {
137 uint32_t padding_size = s->total_req_len - *total_msg_len;
138 uint8_t *padding = iov->iov_base;
139
140 if (padding_size > req_len) {
141 return false;
142 }
143
144 *pad_offset = req_len - padding_size;
145 if (padding[*pad_offset] == 0x80) {
146 return true;
147 }
148 }
149
150 return false;
151 }
152
do_hash_operation(AspeedHACEState * s,int algo,bool sg_mode,bool acc_mode)153 static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
154 bool acc_mode)
155 {
156 AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s);
157 bool sg_acc_mode_final_request = false;
158 g_autofree uint8_t *digest_buf = NULL;
159 struct iovec iov[ASPEED_HACE_MAX_SG];
160 uint64_t digest_addr = 0;
161 Error *local_err = NULL;
162 uint32_t total_msg_len;
163 size_t digest_len = 0;
164 uint32_t sg_addr = 0;
165 uint32_t pad_offset;
166 uint32_t len = 0;
167 uint64_t src = 0;
168 void *haddr;
169 hwaddr plen;
170 int i;
171
172 if (acc_mode && s->hash_ctx == NULL) {
173 s->hash_ctx = qcrypto_hash_new(algo, &local_err);
174 if (s->hash_ctx == NULL) {
175 qemu_log_mask(LOG_GUEST_ERROR, "qcrypto hash failed : %s",
176 error_get_pretty(local_err));
177 error_free(local_err);
178 return;
179 }
180 }
181
182 if (sg_mode) {
183 for (i = 0; !(len & SG_LIST_LEN_LAST); i++) {
184 if (i == ASPEED_HACE_MAX_SG) {
185 qemu_log_mask(LOG_GUEST_ERROR,
186 "aspeed_hace: guest failed to set end of sg list marker\n");
187 break;
188 }
189
190 src = deposit64(src, 0, 32, s->regs[R_HASH_SRC]);
191 if (ahc->has_dma64) {
192 src = deposit64(src, 32, 32, s->regs[R_HASH_SRC_HI]);
193 }
194 trace_aspeed_hace_addr("src", src);
195 src += i * SG_LIST_ENTRY_SIZE;
196
197 len = address_space_ldl_le(&s->dram_as, src,
198 MEMTXATTRS_UNSPECIFIED, NULL);
199
200 sg_addr = address_space_ldl_le(&s->dram_as, src + SG_LIST_LEN_SIZE,
201 MEMTXATTRS_UNSPECIFIED, NULL);
202 sg_addr &= SG_LIST_ADDR_MASK;
203 trace_aspeed_hace_sg(i, sg_addr, len);
204 /*
205 * Ideally, sg_addr should be 64-bit for the AST2700, using the
206 * following program to obtain the 64-bit sg_addr and convert it
207 * to a DRAM offset:
208 * sg_addr = deposit64(sg_addr, 32, 32,
209 * address_space_ldl_le(&s->dram_as, src + SG_ADDR_LEN_SIZE,
210 * MEMTXATTRS_UNSPECIFIED, NULL);
211 * sg_addr -= 0x400000000;
212 *
213 * To maintain compatibility with older SoCs such as the AST2600,
214 * the AST2700 HW automatically set bit 34 of the 64-bit sg_addr.
215 * As a result, the firmware only needs to provide a 32-bit sg_addr
216 * containing bits [31:0]. This is sufficient for the AST2700, as
217 * it uses a DRAM offset rather than a DRAM address.
218 */
219
220 plen = len & SG_LIST_LEN_MASK;
221 haddr = address_space_map(&s->dram_as, sg_addr, &plen, false,
222 MEMTXATTRS_UNSPECIFIED);
223 if (haddr == NULL) {
224 qemu_log_mask(LOG_GUEST_ERROR,
225 "%s: qcrypto failed\n", __func__);
226 return;
227 }
228 iov[i].iov_base = haddr;
229 if (acc_mode) {
230 s->total_req_len += plen;
231
232 if (has_padding(s, &iov[i], plen, &total_msg_len,
233 &pad_offset)) {
234 /* Padding being present indicates the final request */
235 sg_acc_mode_final_request = true;
236 iov[i].iov_len = pad_offset;
237 } else {
238 iov[i].iov_len = plen;
239 }
240 } else {
241 iov[i].iov_len = plen;
242 }
243 }
244 } else {
245 plen = s->regs[R_HASH_SRC_LEN];
246 src = deposit64(src, 0, 32, s->regs[R_HASH_SRC]);
247 trace_aspeed_hace_addr("src", src);
248 if (ahc->has_dma64) {
249 src = deposit64(src, 32, 32, s->regs[R_HASH_SRC_HI]);
250 }
251 haddr = address_space_map(&s->dram_as, src,
252 &plen, false, MEMTXATTRS_UNSPECIFIED);
253 if (haddr == NULL) {
254 qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__);
255 return;
256 }
257 iov[0].iov_base = haddr;
258 i = 1;
259 if (acc_mode) {
260 s->total_req_len += plen;
261
262 if (has_padding(s, &iov[0], plen, &total_msg_len,
263 &pad_offset)) {
264 /* Padding being present indicates the final request */
265 sg_acc_mode_final_request = true;
266 iov[0].iov_len = pad_offset;
267 } else {
268 iov[0].iov_len = plen;
269 }
270 } else {
271 iov[0].iov_len = plen;
272 }
273 }
274
275 #ifdef DEBUG_HACE
276 iov_hexdump(iov, i, stdout, "plaintext", 0xa000);
277 #endif
278
279 if (acc_mode) {
280 if (qcrypto_hash_updatev(s->hash_ctx, iov, i, &local_err) < 0) {
281 qemu_log_mask(LOG_GUEST_ERROR, "qcrypto hash update failed : %s",
282 error_get_pretty(local_err));
283 error_free(local_err);
284 return;
285 }
286
287 if (sg_acc_mode_final_request) {
288 if (qcrypto_hash_finalize_bytes(s->hash_ctx, &digest_buf,
289 &digest_len, &local_err)) {
290 qemu_log_mask(LOG_GUEST_ERROR,
291 "qcrypto hash finalize failed : %s",
292 error_get_pretty(local_err));
293 error_free(local_err);
294 local_err = NULL;
295 }
296
297 qcrypto_hash_free(s->hash_ctx);
298
299 s->hash_ctx = NULL;
300 s->total_req_len = 0;
301 }
302 } else if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf,
303 &digest_len, &local_err) < 0) {
304 qemu_log_mask(LOG_GUEST_ERROR, "qcrypto hash bytesv failed : %s",
305 error_get_pretty(local_err));
306 error_free(local_err);
307 return;
308 }
309
310 digest_addr = deposit64(digest_addr, 0, 32, s->regs[R_HASH_DEST]);
311 if (ahc->has_dma64) {
312 digest_addr = deposit64(digest_addr, 32, 32, s->regs[R_HASH_DEST_HI]);
313 }
314 trace_aspeed_hace_addr("digest", digest_addr);
315 if (address_space_write(&s->dram_as, digest_addr,
316 MEMTXATTRS_UNSPECIFIED,
317 digest_buf, digest_len)) {
318 qemu_log_mask(LOG_GUEST_ERROR,
319 "aspeed_hace: address space write failed\n");
320 }
321
322 #ifdef DEBUG_HACE
323 qemu_hexdump(stdout, "digest", digest_buf, digest_len);
324 #endif
325
326 for (; i > 0; i--) {
327 address_space_unmap(&s->dram_as, iov[i - 1].iov_base,
328 iov[i - 1].iov_len, false,
329 iov[i - 1].iov_len);
330 }
331 }
332
aspeed_hace_read(void * opaque,hwaddr addr,unsigned int size)333 static uint64_t aspeed_hace_read(void *opaque, hwaddr addr, unsigned int size)
334 {
335 AspeedHACEState *s = ASPEED_HACE(opaque);
336
337 addr >>= 2;
338
339 if (addr >= ASPEED_HACE_NR_REGS) {
340 qemu_log_mask(LOG_GUEST_ERROR,
341 "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n",
342 __func__, addr << 2);
343 return 0;
344 }
345
346 trace_aspeed_hace_read(addr << 2, s->regs[addr]);
347 return s->regs[addr];
348 }
349
aspeed_hace_write(void * opaque,hwaddr addr,uint64_t data,unsigned int size)350 static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data,
351 unsigned int size)
352 {
353 AspeedHACEState *s = ASPEED_HACE(opaque);
354 AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s);
355
356 addr >>= 2;
357
358 if (addr >= ASPEED_HACE_NR_REGS) {
359 qemu_log_mask(LOG_GUEST_ERROR,
360 "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n",
361 __func__, addr << 2);
362 return;
363 }
364
365 trace_aspeed_hace_write(addr << 2, data);
366
367 switch (addr) {
368 case R_STATUS:
369 if (data & HASH_IRQ) {
370 data &= ~HASH_IRQ;
371
372 if (s->regs[addr] & HASH_IRQ) {
373 qemu_irq_lower(s->irq);
374 }
375 }
376 if (ahc->raise_crypt_interrupt_workaround) {
377 if (data & CRYPT_IRQ) {
378 data &= ~CRYPT_IRQ;
379
380 if (s->regs[addr] & CRYPT_IRQ) {
381 qemu_irq_lower(s->irq);
382 }
383 }
384 }
385 break;
386 case R_HASH_SRC:
387 data &= ahc->src_mask;
388 break;
389 case R_HASH_DEST:
390 data &= ahc->dest_mask;
391 break;
392 case R_HASH_KEY_BUFF:
393 data &= ahc->key_mask;
394 break;
395 case R_HASH_SRC_LEN:
396 data &= 0x0FFFFFFF;
397 break;
398 case R_HASH_CMD: {
399 int algo;
400 data &= ahc->hash_mask;
401
402 if ((data & HASH_DIGEST_HMAC)) {
403 qemu_log_mask(LOG_UNIMP,
404 "%s: HMAC mode not implemented\n",
405 __func__);
406 }
407 if (data & BIT(1)) {
408 qemu_log_mask(LOG_UNIMP,
409 "%s: Cascaded mode not implemented\n",
410 __func__);
411 }
412 algo = hash_algo_lookup(data);
413 if (algo < 0) {
414 qemu_log_mask(LOG_GUEST_ERROR,
415 "%s: Invalid hash algorithm selection 0x%"PRIx64"\n",
416 __func__, data & ahc->hash_mask);
417 } else {
418 do_hash_operation(s, algo, data & HASH_SG_EN,
419 ((data & HASH_HMAC_MASK) == HASH_DIGEST_ACCUM));
420 }
421
422 /*
423 * Set status bits to indicate completion. Testing shows hardware sets
424 * these irrespective of HASH_IRQ_EN.
425 */
426 s->regs[R_STATUS] |= HASH_IRQ;
427
428 if (data & HASH_IRQ_EN) {
429 qemu_irq_raise(s->irq);
430 }
431 break;
432 }
433 case R_CRYPT_CMD:
434 qemu_log_mask(LOG_UNIMP, "%s: Crypt commands not implemented\n",
435 __func__);
436 if (ahc->raise_crypt_interrupt_workaround) {
437 s->regs[R_STATUS] |= CRYPT_IRQ;
438 if (data & CRYPT_IRQ_EN) {
439 qemu_irq_raise(s->irq);
440 }
441 }
442 break;
443 case R_HASH_SRC_HI:
444 data &= ahc->src_hi_mask;
445 break;
446 case R_HASH_DEST_HI:
447 data &= ahc->dest_hi_mask;
448 break;
449 case R_HASH_KEY_BUFF_HI:
450 data &= ahc->key_hi_mask;
451 break;
452 default:
453 break;
454 }
455
456 s->regs[addr] = data;
457 }
458
459 static const MemoryRegionOps aspeed_hace_ops = {
460 .read = aspeed_hace_read,
461 .write = aspeed_hace_write,
462 .endianness = DEVICE_LITTLE_ENDIAN,
463 .valid = {
464 .min_access_size = 1,
465 .max_access_size = 4,
466 },
467 };
468
aspeed_hace_reset(DeviceState * dev)469 static void aspeed_hace_reset(DeviceState *dev)
470 {
471 struct AspeedHACEState *s = ASPEED_HACE(dev);
472
473 if (s->hash_ctx != NULL) {
474 qcrypto_hash_free(s->hash_ctx);
475 s->hash_ctx = NULL;
476 }
477
478 memset(s->regs, 0, sizeof(s->regs));
479 s->total_req_len = 0;
480 }
481
aspeed_hace_realize(DeviceState * dev,Error ** errp)482 static void aspeed_hace_realize(DeviceState *dev, Error **errp)
483 {
484 AspeedHACEState *s = ASPEED_HACE(dev);
485 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
486 AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s);
487
488 sysbus_init_irq(sbd, &s->irq);
489
490 memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_hace_ops, s,
491 TYPE_ASPEED_HACE, ahc->mem_size);
492
493 if (!s->dram_mr) {
494 error_setg(errp, TYPE_ASPEED_HACE ": 'dram' link not set");
495 return;
496 }
497
498 address_space_init(&s->dram_as, s->dram_mr, "dram");
499
500 sysbus_init_mmio(sbd, &s->iomem);
501 }
502
503 static const Property aspeed_hace_properties[] = {
504 DEFINE_PROP_LINK("dram", AspeedHACEState, dram_mr,
505 TYPE_MEMORY_REGION, MemoryRegion *),
506 };
507
508
509 static const VMStateDescription vmstate_aspeed_hace = {
510 .name = TYPE_ASPEED_HACE,
511 .version_id = 1,
512 .minimum_version_id = 1,
513 .fields = (const VMStateField[]) {
514 VMSTATE_UINT32_ARRAY(regs, AspeedHACEState, ASPEED_HACE_NR_REGS),
515 VMSTATE_UINT32(total_req_len, AspeedHACEState),
516 VMSTATE_END_OF_LIST(),
517 }
518 };
519
aspeed_hace_class_init(ObjectClass * klass,void * data)520 static void aspeed_hace_class_init(ObjectClass *klass, void *data)
521 {
522 DeviceClass *dc = DEVICE_CLASS(klass);
523
524 dc->realize = aspeed_hace_realize;
525 device_class_set_legacy_reset(dc, aspeed_hace_reset);
526 device_class_set_props(dc, aspeed_hace_properties);
527 dc->vmsd = &vmstate_aspeed_hace;
528 }
529
530 static const TypeInfo aspeed_hace_info = {
531 .name = TYPE_ASPEED_HACE,
532 .parent = TYPE_SYS_BUS_DEVICE,
533 .instance_size = sizeof(AspeedHACEState),
534 .class_init = aspeed_hace_class_init,
535 .class_size = sizeof(AspeedHACEClass)
536 };
537
aspeed_ast2400_hace_class_init(ObjectClass * klass,void * data)538 static void aspeed_ast2400_hace_class_init(ObjectClass *klass, void *data)
539 {
540 DeviceClass *dc = DEVICE_CLASS(klass);
541 AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
542
543 dc->desc = "AST2400 Hash and Crypto Engine";
544
545 ahc->mem_size = 0x1000;
546 ahc->src_mask = 0x0FFFFFFF;
547 ahc->dest_mask = 0x0FFFFFF8;
548 ahc->key_mask = 0x0FFFFFC0;
549 ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */
550 }
551
552 static const TypeInfo aspeed_ast2400_hace_info = {
553 .name = TYPE_ASPEED_AST2400_HACE,
554 .parent = TYPE_ASPEED_HACE,
555 .class_init = aspeed_ast2400_hace_class_init,
556 };
557
aspeed_ast2500_hace_class_init(ObjectClass * klass,void * data)558 static void aspeed_ast2500_hace_class_init(ObjectClass *klass, void *data)
559 {
560 DeviceClass *dc = DEVICE_CLASS(klass);
561 AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
562
563 dc->desc = "AST2500 Hash and Crypto Engine";
564
565 ahc->mem_size = 0x1000;
566 ahc->src_mask = 0x3fffffff;
567 ahc->dest_mask = 0x3ffffff8;
568 ahc->key_mask = 0x3FFFFFC0;
569 ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */
570 }
571
572 static const TypeInfo aspeed_ast2500_hace_info = {
573 .name = TYPE_ASPEED_AST2500_HACE,
574 .parent = TYPE_ASPEED_HACE,
575 .class_init = aspeed_ast2500_hace_class_init,
576 };
577
aspeed_ast2600_hace_class_init(ObjectClass * klass,void * data)578 static void aspeed_ast2600_hace_class_init(ObjectClass *klass, void *data)
579 {
580 DeviceClass *dc = DEVICE_CLASS(klass);
581 AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
582
583 dc->desc = "AST2600 Hash and Crypto Engine";
584
585 ahc->mem_size = 0x10000;
586 ahc->src_mask = 0x7FFFFFFF;
587 ahc->dest_mask = 0x7FFFFFF8;
588 ahc->key_mask = 0x7FFFFFF8;
589 ahc->hash_mask = 0x00147FFF;
590 }
591
592 static const TypeInfo aspeed_ast2600_hace_info = {
593 .name = TYPE_ASPEED_AST2600_HACE,
594 .parent = TYPE_ASPEED_HACE,
595 .class_init = aspeed_ast2600_hace_class_init,
596 };
597
aspeed_ast1030_hace_class_init(ObjectClass * klass,void * data)598 static void aspeed_ast1030_hace_class_init(ObjectClass *klass, void *data)
599 {
600 DeviceClass *dc = DEVICE_CLASS(klass);
601 AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
602
603 dc->desc = "AST1030 Hash and Crypto Engine";
604
605 ahc->mem_size = 0x10000;
606 ahc->src_mask = 0x7FFFFFFF;
607 ahc->dest_mask = 0x7FFFFFF8;
608 ahc->key_mask = 0x7FFFFFF8;
609 ahc->hash_mask = 0x00147FFF;
610 }
611
612 static const TypeInfo aspeed_ast1030_hace_info = {
613 .name = TYPE_ASPEED_AST1030_HACE,
614 .parent = TYPE_ASPEED_HACE,
615 .class_init = aspeed_ast1030_hace_class_init,
616 };
617
aspeed_ast2700_hace_class_init(ObjectClass * klass,void * data)618 static void aspeed_ast2700_hace_class_init(ObjectClass *klass, void *data)
619 {
620 DeviceClass *dc = DEVICE_CLASS(klass);
621 AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
622
623 dc->desc = "AST2700 Hash and Crypto Engine";
624
625 ahc->mem_size = 0x100;
626 ahc->src_mask = 0x7FFFFFFF;
627 ahc->dest_mask = 0x7FFFFFF8;
628 ahc->key_mask = 0x7FFFFFF8;
629 ahc->hash_mask = 0x00147FFF;
630
631 /*
632 * The AST2700 supports a maximum DRAM size of 8 GB, with a DRAM
633 * addressable range from 0x0_0000_0000 to 0x1_FFFF_FFFF. Since this range
634 * fits within 34 bits, only bits [33:0] are needed to store the DRAM
635 * offset. To optimize address storage, the high physical address bits
636 * [1:0] of the source, digest and key buffer addresses are stored as
637 * dram_offset bits [33:32].
638 *
639 * This approach eliminates the need to reduce the high part of the DRAM
640 * physical address for DMA operations. Previously, this was calculated as
641 * (high physical address bits [7:0] - 4), since the DRAM start address is
642 * 0x4_00000000, making the high part address [7:0] - 4.
643 */
644 ahc->src_hi_mask = 0x00000003;
645 ahc->dest_hi_mask = 0x00000003;
646 ahc->key_hi_mask = 0x00000003;
647
648 /*
649 * Currently, it does not support the CRYPT command. Instead, it only
650 * sends an interrupt to notify the firmware that the crypt command
651 * has completed. It is a temporary workaround.
652 */
653 ahc->raise_crypt_interrupt_workaround = true;
654 ahc->has_dma64 = true;
655 }
656
657 static const TypeInfo aspeed_ast2700_hace_info = {
658 .name = TYPE_ASPEED_AST2700_HACE,
659 .parent = TYPE_ASPEED_HACE,
660 .class_init = aspeed_ast2700_hace_class_init,
661 };
662
aspeed_hace_register_types(void)663 static void aspeed_hace_register_types(void)
664 {
665 type_register_static(&aspeed_ast2400_hace_info);
666 type_register_static(&aspeed_ast2500_hace_info);
667 type_register_static(&aspeed_ast2600_hace_info);
668 type_register_static(&aspeed_ast1030_hace_info);
669 type_register_static(&aspeed_ast2700_hace_info);
670 type_register_static(&aspeed_hace_info);
671 }
672
673 type_init(aspeed_hace_register_types);
674