1 /* 2 * ASPEED Hash and Crypto Engine 3 * 4 * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates 5 * Copyright (C) 2021 IBM Corp. 6 * 7 * Joel Stanley <joel@jms.id.au> 8 * 9 * SPDX-License-Identifier: GPL-2.0-or-later 10 */ 11 12 #include "qemu/osdep.h" 13 #include "qemu/log.h" 14 #include "qemu/error-report.h" 15 #include "hw/misc/aspeed_hace.h" 16 #include "qapi/error.h" 17 #include "migration/vmstate.h" 18 #include "crypto/hash.h" 19 #include "hw/qdev-properties.h" 20 #include "hw/irq.h" 21 22 #define R_CRYPT_CMD (0x10 / 4) 23 24 #define R_STATUS (0x1c / 4) 25 #define HASH_IRQ BIT(9) 26 #define CRYPT_IRQ BIT(12) 27 #define TAG_IRQ BIT(15) 28 29 #define R_HASH_SRC (0x20 / 4) 30 #define R_HASH_DIGEST (0x24 / 4) 31 #define R_HASH_KEY_BUFF (0x28 / 4) 32 #define R_HASH_SRC_LEN (0x2c / 4) 33 #define R_HASH_SRC_HI (0x90 / 4) 34 #define R_HASH_DIGEST_HI (0x94 / 4) 35 #define R_HASH_KEY_BUFF_HI (0x98 / 4) 36 37 #define R_HASH_CMD (0x30 / 4) 38 /* Hash algorithm selection */ 39 #define HASH_ALGO_MASK (BIT(4) | BIT(5) | BIT(6)) 40 #define HASH_ALGO_MD5 0 41 #define HASH_ALGO_SHA1 BIT(5) 42 #define HASH_ALGO_SHA224 BIT(6) 43 #define HASH_ALGO_SHA256 (BIT(4) | BIT(6)) 44 #define HASH_ALGO_SHA512_SERIES (BIT(5) | BIT(6)) 45 /* SHA512 algorithm selection */ 46 #define SHA512_HASH_ALGO_MASK (BIT(10) | BIT(11) | BIT(12)) 47 #define HASH_ALGO_SHA512_SHA512 0 48 #define HASH_ALGO_SHA512_SHA384 BIT(10) 49 #define HASH_ALGO_SHA512_SHA256 BIT(11) 50 #define HASH_ALGO_SHA512_SHA224 (BIT(10) | BIT(11)) 51 /* HMAC modes */ 52 #define HASH_HMAC_MASK (BIT(7) | BIT(8)) 53 #define HASH_DIGEST 0 54 #define HASH_DIGEST_HMAC BIT(7) 55 #define HASH_DIGEST_ACCUM BIT(8) 56 #define HASH_HMAC_KEY (BIT(7) | BIT(8)) 57 /* Cascaded operation modes */ 58 #define HASH_ONLY 0 59 #define HASH_ONLY2 BIT(0) 60 #define HASH_CRYPT_THEN_HASH BIT(1) 61 #define HASH_HASH_THEN_CRYPT (BIT(0) | BIT(1)) 62 /* Other cmd bits */ 63 #define HASH_IRQ_EN BIT(9) 64 #define HASH_SG_EN BIT(18) 65 #define CRYPT_IRQ_EN BIT(12) 66 /* Scatter-gather data list */ 67 #define SG_LIST_LEN_SIZE 4 68 #define SG_LIST_LEN_MASK 0x0FFFFFFF 69 #define SG_LIST_LEN_LAST BIT(31) 70 #define SG_LIST_ADDR_SIZE 4 71 #define SG_LIST_ADDR_MASK 0x7FFFFFFF 72 #define SG_LIST_ENTRY_SIZE (SG_LIST_LEN_SIZE + SG_LIST_ADDR_SIZE) 73 74 static const struct { 75 uint32_t mask; 76 QCryptoHashAlgo algo; 77 } hash_algo_map[] = { 78 { HASH_ALGO_MD5, QCRYPTO_HASH_ALGO_MD5 }, 79 { HASH_ALGO_SHA1, QCRYPTO_HASH_ALGO_SHA1 }, 80 { HASH_ALGO_SHA224, QCRYPTO_HASH_ALGO_SHA224 }, 81 { HASH_ALGO_SHA256, QCRYPTO_HASH_ALGO_SHA256 }, 82 { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA512, 83 QCRYPTO_HASH_ALGO_SHA512 }, 84 { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA384, 85 QCRYPTO_HASH_ALGO_SHA384 }, 86 { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA256, 87 QCRYPTO_HASH_ALGO_SHA256 }, 88 }; 89 90 static int hash_algo_lookup(uint32_t reg) 91 { 92 int i; 93 94 reg &= HASH_ALGO_MASK | SHA512_HASH_ALGO_MASK; 95 96 for (i = 0; i < ARRAY_SIZE(hash_algo_map); i++) { 97 if (reg == hash_algo_map[i].mask) { 98 return hash_algo_map[i].algo; 99 } 100 } 101 102 return -1; 103 } 104 105 /** 106 * Check whether the request contains padding message. 107 * 108 * @param s aspeed hace state object 109 * @param iov iov of current request 110 * @param req_len length of the current request 111 * @param total_msg_len length of all acc_mode requests(excluding padding msg) 112 * @param pad_offset start offset of padding message 113 */ 114 static bool has_padding(AspeedHACEState *s, struct iovec *iov, 115 hwaddr req_len, uint32_t *total_msg_len, 116 uint32_t *pad_offset) 117 { 118 *total_msg_len = (uint32_t)(ldq_be_p(iov->iov_base + req_len - 8) / 8); 119 /* 120 * SG_LIST_LEN_LAST asserted in the request length doesn't mean it is the 121 * last request. The last request should contain padding message. 122 * We check whether message contains padding by 123 * 1. Get total message length. If the current message contains 124 * padding, the last 8 bytes are total message length. 125 * 2. Check whether the total message length is valid. 126 * If it is valid, the value should less than or equal to 127 * total_req_len. 128 * 3. Current request len - padding_size to get padding offset. 129 * The padding message's first byte should be 0x80 130 */ 131 if (*total_msg_len <= s->total_req_len) { 132 uint32_t padding_size = s->total_req_len - *total_msg_len; 133 uint8_t *padding = iov->iov_base; 134 135 if (padding_size > req_len) { 136 return false; 137 } 138 139 *pad_offset = req_len - padding_size; 140 if (padding[*pad_offset] == 0x80) { 141 return true; 142 } 143 } 144 145 return false; 146 } 147 148 static uint64_t hash_get_source_addr(AspeedHACEState *s) 149 { 150 uint64_t src_addr = 0; 151 152 src_addr = deposit64(src_addr, 0, 32, s->regs[R_HASH_SRC]); 153 154 return src_addr; 155 } 156 157 static int hash_prepare_direct_iov(AspeedHACEState *s, struct iovec *iov, 158 bool acc_mode, bool *acc_final_request) 159 { 160 uint32_t total_msg_len; 161 uint32_t pad_offset; 162 uint64_t src; 163 void *haddr; 164 hwaddr plen; 165 int iov_idx; 166 167 plen = s->regs[R_HASH_SRC_LEN]; 168 src = hash_get_source_addr(s); 169 haddr = address_space_map(&s->dram_as, src, &plen, false, 170 MEMTXATTRS_UNSPECIFIED); 171 if (haddr == NULL) { 172 qemu_log_mask(LOG_GUEST_ERROR, 173 "%s: Unable to map address, addr=0x%" HWADDR_PRIx 174 " ,plen=0x%" HWADDR_PRIx "\n", 175 __func__, src, plen); 176 return -1; 177 } 178 179 iov[0].iov_base = haddr; 180 iov_idx = 1; 181 182 if (acc_mode) { 183 s->total_req_len += plen; 184 185 if (has_padding(s, &iov[0], plen, &total_msg_len, 186 &pad_offset)) { 187 /* Padding being present indicates the final request */ 188 *acc_final_request = true; 189 iov[0].iov_len = pad_offset; 190 } else { 191 iov[0].iov_len = plen; 192 } 193 } else { 194 iov[0].iov_len = plen; 195 } 196 197 return iov_idx; 198 } 199 200 static int hash_prepare_sg_iov(AspeedHACEState *s, struct iovec *iov, 201 bool acc_mode, bool *acc_final_request) 202 { 203 uint32_t total_msg_len; 204 uint32_t pad_offset; 205 uint32_t len = 0; 206 uint32_t sg_addr; 207 uint64_t src; 208 int iov_idx; 209 hwaddr plen; 210 void *haddr; 211 212 src = hash_get_source_addr(s); 213 for (iov_idx = 0; !(len & SG_LIST_LEN_LAST); iov_idx++) { 214 if (iov_idx == ASPEED_HACE_MAX_SG) { 215 qemu_log_mask(LOG_GUEST_ERROR, 216 "%s: Failed to set end of sg list marker\n", 217 __func__); 218 return -1; 219 } 220 221 len = address_space_ldl_le(&s->dram_as, src, 222 MEMTXATTRS_UNSPECIFIED, NULL); 223 sg_addr = address_space_ldl_le(&s->dram_as, src + SG_LIST_LEN_SIZE, 224 MEMTXATTRS_UNSPECIFIED, NULL); 225 sg_addr &= SG_LIST_ADDR_MASK; 226 227 plen = len & SG_LIST_LEN_MASK; 228 haddr = address_space_map(&s->dram_as, sg_addr, &plen, false, 229 MEMTXATTRS_UNSPECIFIED); 230 231 if (haddr == NULL) { 232 qemu_log_mask(LOG_GUEST_ERROR, 233 "%s: Unable to map address, sg_addr=0x%x, " 234 "plen=0x%" HWADDR_PRIx "\n", 235 __func__, sg_addr, plen); 236 return -1; 237 } 238 239 src += SG_LIST_ENTRY_SIZE; 240 241 iov[iov_idx].iov_base = haddr; 242 if (acc_mode) { 243 s->total_req_len += plen; 244 245 if (has_padding(s, &iov[iov_idx], plen, &total_msg_len, 246 &pad_offset)) { 247 /* Padding being present indicates the final request */ 248 *acc_final_request = true; 249 iov[iov_idx].iov_len = pad_offset; 250 } else { 251 iov[iov_idx].iov_len = plen; 252 } 253 } else { 254 iov[iov_idx].iov_len = plen; 255 } 256 } 257 258 return iov_idx; 259 } 260 261 static uint64_t hash_get_digest_addr(AspeedHACEState *s) 262 { 263 uint64_t digest_addr = 0; 264 265 digest_addr = deposit64(digest_addr, 0, 32, s->regs[R_HASH_DIGEST]); 266 267 return digest_addr; 268 } 269 270 static void hash_write_digest_and_unmap_iov(AspeedHACEState *s, 271 struct iovec *iov, 272 int iov_idx, 273 uint8_t *digest_buf, 274 size_t digest_len) 275 { 276 uint64_t digest_addr = 0; 277 278 digest_addr = hash_get_digest_addr(s); 279 if (address_space_write(&s->dram_as, digest_addr, 280 MEMTXATTRS_UNSPECIFIED, 281 digest_buf, digest_len)) { 282 qemu_log_mask(LOG_GUEST_ERROR, 283 "%s: Failed to write digest to 0x%" HWADDR_PRIx "\n", 284 __func__, digest_addr); 285 } 286 287 for (; iov_idx > 0; iov_idx--) { 288 address_space_unmap(&s->dram_as, iov[iov_idx - 1].iov_base, 289 iov[iov_idx - 1].iov_len, false, 290 iov[iov_idx - 1].iov_len); 291 } 292 } 293 294 static void hash_execute_non_acc_mode(AspeedHACEState *s, int algo, 295 struct iovec *iov, int iov_idx) 296 { 297 g_autofree uint8_t *digest_buf = NULL; 298 Error *local_err = NULL; 299 size_t digest_len = 0; 300 301 if (qcrypto_hash_bytesv(algo, iov, iov_idx, &digest_buf, 302 &digest_len, &local_err) < 0) { 303 qemu_log_mask(LOG_GUEST_ERROR, 304 "%s: qcrypto hash bytesv failed : %s", 305 __func__, error_get_pretty(local_err)); 306 error_free(local_err); 307 return; 308 } 309 310 hash_write_digest_and_unmap_iov(s, iov, iov_idx, digest_buf, digest_len); 311 } 312 313 static void hash_execute_acc_mode(AspeedHACEState *s, int algo, 314 struct iovec *iov, int iov_idx, 315 bool final_request) 316 { 317 g_autofree uint8_t *digest_buf = NULL; 318 Error *local_err = NULL; 319 size_t digest_len = 0; 320 321 if (s->hash_ctx == NULL) { 322 s->hash_ctx = qcrypto_hash_new(algo, &local_err); 323 if (s->hash_ctx == NULL) { 324 qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto hash new failed : %s", 325 __func__, error_get_pretty(local_err)); 326 error_free(local_err); 327 return; 328 } 329 } 330 331 if (qcrypto_hash_updatev(s->hash_ctx, iov, iov_idx, &local_err) < 0) { 332 qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto hash updatev failed : %s", 333 __func__, error_get_pretty(local_err)); 334 error_free(local_err); 335 return; 336 } 337 338 if (final_request) { 339 if (qcrypto_hash_finalize_bytes(s->hash_ctx, &digest_buf, 340 &digest_len, &local_err)) { 341 qemu_log_mask(LOG_GUEST_ERROR, 342 "%s: qcrypto hash finalize bytes failed : %s", 343 __func__, error_get_pretty(local_err)); 344 error_free(local_err); 345 local_err = NULL; 346 } 347 348 qcrypto_hash_free(s->hash_ctx); 349 350 s->hash_ctx = NULL; 351 s->total_req_len = 0; 352 } 353 354 hash_write_digest_and_unmap_iov(s, iov, iov_idx, digest_buf, digest_len); 355 } 356 357 static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode, 358 bool acc_mode) 359 { 360 struct iovec iov[ASPEED_HACE_MAX_SG]; 361 bool acc_final_request = false; 362 int iov_idx = -1; 363 364 /* Prepares the iov for hashing operations based on the selected mode */ 365 if (sg_mode) { 366 iov_idx = hash_prepare_sg_iov(s, iov, acc_mode, &acc_final_request); 367 } else { 368 iov_idx = hash_prepare_direct_iov(s, iov, acc_mode, 369 &acc_final_request); 370 } 371 372 if (iov_idx <= 0) { 373 qemu_log_mask(LOG_GUEST_ERROR, 374 "%s: Failed to prepare iov\n", __func__); 375 return; 376 } 377 378 /* Executes the hash operation */ 379 if (acc_mode) { 380 hash_execute_acc_mode(s, algo, iov, iov_idx, acc_final_request); 381 } else { 382 hash_execute_non_acc_mode(s, algo, iov, iov_idx); 383 } 384 } 385 386 static uint64_t aspeed_hace_read(void *opaque, hwaddr addr, unsigned int size) 387 { 388 AspeedHACEState *s = ASPEED_HACE(opaque); 389 390 addr >>= 2; 391 392 return s->regs[addr]; 393 } 394 395 static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data, 396 unsigned int size) 397 { 398 AspeedHACEState *s = ASPEED_HACE(opaque); 399 AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s); 400 401 addr >>= 2; 402 403 switch (addr) { 404 case R_STATUS: 405 if (data & HASH_IRQ) { 406 data &= ~HASH_IRQ; 407 408 if (s->regs[addr] & HASH_IRQ) { 409 qemu_irq_lower(s->irq); 410 } 411 } 412 if (ahc->raise_crypt_interrupt_workaround) { 413 if (data & CRYPT_IRQ) { 414 data &= ~CRYPT_IRQ; 415 416 if (s->regs[addr] & CRYPT_IRQ) { 417 qemu_irq_lower(s->irq); 418 } 419 } 420 } 421 break; 422 case R_HASH_SRC: 423 data &= ahc->src_mask; 424 break; 425 case R_HASH_DIGEST: 426 data &= ahc->dest_mask; 427 break; 428 case R_HASH_KEY_BUFF: 429 data &= ahc->key_mask; 430 break; 431 case R_HASH_SRC_LEN: 432 data &= 0x0FFFFFFF; 433 break; 434 case R_HASH_CMD: { 435 int algo; 436 data &= ahc->hash_mask; 437 438 if ((data & HASH_DIGEST_HMAC)) { 439 qemu_log_mask(LOG_UNIMP, 440 "%s: HMAC mode not implemented\n", 441 __func__); 442 } 443 if (data & BIT(1)) { 444 qemu_log_mask(LOG_UNIMP, 445 "%s: Cascaded mode not implemented\n", 446 __func__); 447 } 448 algo = hash_algo_lookup(data); 449 if (algo < 0) { 450 qemu_log_mask(LOG_GUEST_ERROR, 451 "%s: Invalid hash algorithm selection 0x%"PRIx64"\n", 452 __func__, data & ahc->hash_mask); 453 } else { 454 do_hash_operation(s, algo, data & HASH_SG_EN, 455 ((data & HASH_HMAC_MASK) == HASH_DIGEST_ACCUM)); 456 } 457 458 /* 459 * Set status bits to indicate completion. Testing shows hardware sets 460 * these irrespective of HASH_IRQ_EN. 461 */ 462 s->regs[R_STATUS] |= HASH_IRQ; 463 464 if (data & HASH_IRQ_EN) { 465 qemu_irq_raise(s->irq); 466 } 467 break; 468 } 469 case R_CRYPT_CMD: 470 qemu_log_mask(LOG_UNIMP, "%s: Crypt commands not implemented\n", 471 __func__); 472 if (ahc->raise_crypt_interrupt_workaround) { 473 s->regs[R_STATUS] |= CRYPT_IRQ; 474 if (data & CRYPT_IRQ_EN) { 475 qemu_irq_raise(s->irq); 476 } 477 } 478 break; 479 case R_HASH_SRC_HI: 480 data &= ahc->src_hi_mask; 481 break; 482 case R_HASH_DIGEST_HI: 483 data &= ahc->dest_hi_mask; 484 break; 485 case R_HASH_KEY_BUFF_HI: 486 data &= ahc->key_hi_mask; 487 break; 488 default: 489 break; 490 } 491 492 s->regs[addr] = data; 493 } 494 495 static const MemoryRegionOps aspeed_hace_ops = { 496 .read = aspeed_hace_read, 497 .write = aspeed_hace_write, 498 .endianness = DEVICE_LITTLE_ENDIAN, 499 .valid = { 500 .min_access_size = 1, 501 .max_access_size = 4, 502 }, 503 }; 504 505 static void aspeed_hace_reset(DeviceState *dev) 506 { 507 struct AspeedHACEState *s = ASPEED_HACE(dev); 508 AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s); 509 510 if (s->hash_ctx != NULL) { 511 qcrypto_hash_free(s->hash_ctx); 512 s->hash_ctx = NULL; 513 } 514 515 memset(s->regs, 0, ahc->nr_regs << 2); 516 s->total_req_len = 0; 517 } 518 519 static void aspeed_hace_realize(DeviceState *dev, Error **errp) 520 { 521 AspeedHACEState *s = ASPEED_HACE(dev); 522 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 523 AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s); 524 525 sysbus_init_irq(sbd, &s->irq); 526 527 s->regs = g_new(uint32_t, ahc->nr_regs); 528 memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_hace_ops, s, 529 TYPE_ASPEED_HACE, ahc->nr_regs << 2); 530 531 if (!s->dram_mr) { 532 error_setg(errp, TYPE_ASPEED_HACE ": 'dram' link not set"); 533 return; 534 } 535 536 address_space_init(&s->dram_as, s->dram_mr, "dram"); 537 538 sysbus_init_mmio(sbd, &s->iomem); 539 } 540 541 static const Property aspeed_hace_properties[] = { 542 DEFINE_PROP_LINK("dram", AspeedHACEState, dram_mr, 543 TYPE_MEMORY_REGION, MemoryRegion *), 544 }; 545 546 547 static const VMStateDescription vmstate_aspeed_hace = { 548 .name = TYPE_ASPEED_HACE, 549 .version_id = 2, 550 .minimum_version_id = 2, 551 .fields = (const VMStateField[]) { 552 VMSTATE_UINT32(total_req_len, AspeedHACEState), 553 VMSTATE_END_OF_LIST(), 554 } 555 }; 556 557 static void aspeed_hace_unrealize(DeviceState *dev) 558 { 559 AspeedHACEState *s = ASPEED_HACE(dev); 560 561 g_free(s->regs); 562 s->regs = NULL; 563 } 564 565 static void aspeed_hace_class_init(ObjectClass *klass, const void *data) 566 { 567 DeviceClass *dc = DEVICE_CLASS(klass); 568 569 dc->realize = aspeed_hace_realize; 570 dc->unrealize = aspeed_hace_unrealize; 571 device_class_set_legacy_reset(dc, aspeed_hace_reset); 572 device_class_set_props(dc, aspeed_hace_properties); 573 dc->vmsd = &vmstate_aspeed_hace; 574 } 575 576 static const TypeInfo aspeed_hace_info = { 577 .name = TYPE_ASPEED_HACE, 578 .parent = TYPE_SYS_BUS_DEVICE, 579 .instance_size = sizeof(AspeedHACEState), 580 .class_init = aspeed_hace_class_init, 581 .class_size = sizeof(AspeedHACEClass) 582 }; 583 584 static void aspeed_ast2400_hace_class_init(ObjectClass *klass, const void *data) 585 { 586 DeviceClass *dc = DEVICE_CLASS(klass); 587 AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass); 588 589 dc->desc = "AST2400 Hash and Crypto Engine"; 590 591 ahc->nr_regs = 0x64 >> 2; 592 ahc->src_mask = 0x0FFFFFFF; 593 ahc->dest_mask = 0x0FFFFFF8; 594 ahc->key_mask = 0x0FFFFFC0; 595 ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */ 596 } 597 598 static const TypeInfo aspeed_ast2400_hace_info = { 599 .name = TYPE_ASPEED_AST2400_HACE, 600 .parent = TYPE_ASPEED_HACE, 601 .class_init = aspeed_ast2400_hace_class_init, 602 }; 603 604 static void aspeed_ast2500_hace_class_init(ObjectClass *klass, const void *data) 605 { 606 DeviceClass *dc = DEVICE_CLASS(klass); 607 AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass); 608 609 dc->desc = "AST2500 Hash and Crypto Engine"; 610 611 ahc->nr_regs = 0x64 >> 2; 612 ahc->src_mask = 0x3fffffff; 613 ahc->dest_mask = 0x3ffffff8; 614 ahc->key_mask = 0x3FFFFFC0; 615 ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */ 616 } 617 618 static const TypeInfo aspeed_ast2500_hace_info = { 619 .name = TYPE_ASPEED_AST2500_HACE, 620 .parent = TYPE_ASPEED_HACE, 621 .class_init = aspeed_ast2500_hace_class_init, 622 }; 623 624 static void aspeed_ast2600_hace_class_init(ObjectClass *klass, const void *data) 625 { 626 DeviceClass *dc = DEVICE_CLASS(klass); 627 AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass); 628 629 dc->desc = "AST2600 Hash and Crypto Engine"; 630 631 ahc->nr_regs = 0x64 >> 2; 632 ahc->src_mask = 0x7FFFFFFF; 633 ahc->dest_mask = 0x7FFFFFF8; 634 ahc->key_mask = 0x7FFFFFF8; 635 ahc->hash_mask = 0x00147FFF; 636 } 637 638 static const TypeInfo aspeed_ast2600_hace_info = { 639 .name = TYPE_ASPEED_AST2600_HACE, 640 .parent = TYPE_ASPEED_HACE, 641 .class_init = aspeed_ast2600_hace_class_init, 642 }; 643 644 static void aspeed_ast1030_hace_class_init(ObjectClass *klass, const void *data) 645 { 646 DeviceClass *dc = DEVICE_CLASS(klass); 647 AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass); 648 649 dc->desc = "AST1030 Hash and Crypto Engine"; 650 651 ahc->nr_regs = 0x64 >> 2; 652 ahc->src_mask = 0x7FFFFFFF; 653 ahc->dest_mask = 0x7FFFFFF8; 654 ahc->key_mask = 0x7FFFFFF8; 655 ahc->hash_mask = 0x00147FFF; 656 } 657 658 static const TypeInfo aspeed_ast1030_hace_info = { 659 .name = TYPE_ASPEED_AST1030_HACE, 660 .parent = TYPE_ASPEED_HACE, 661 .class_init = aspeed_ast1030_hace_class_init, 662 }; 663 664 static void aspeed_ast2700_hace_class_init(ObjectClass *klass, const void *data) 665 { 666 DeviceClass *dc = DEVICE_CLASS(klass); 667 AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass); 668 669 dc->desc = "AST2700 Hash and Crypto Engine"; 670 671 ahc->nr_regs = 0x9C >> 2; 672 ahc->src_mask = 0x7FFFFFFF; 673 ahc->dest_mask = 0x7FFFFFF8; 674 ahc->key_mask = 0x7FFFFFF8; 675 ahc->hash_mask = 0x00147FFF; 676 677 /* 678 * The AST2700 supports a maximum DRAM size of 8 GB, with a DRAM 679 * addressable range from 0x0_0000_0000 to 0x1_FFFF_FFFF. Since this range 680 * fits within 34 bits, only bits [33:0] are needed to store the DRAM 681 * offset. To optimize address storage, the high physical address bits 682 * [1:0] of the source, digest and key buffer addresses are stored as 683 * dram_offset bits [33:32]. 684 * 685 * This approach eliminates the need to reduce the high part of the DRAM 686 * physical address for DMA operations. Previously, this was calculated as 687 * (high physical address bits [7:0] - 4), since the DRAM start address is 688 * 0x4_00000000, making the high part address [7:0] - 4. 689 */ 690 ahc->src_hi_mask = 0x00000003; 691 ahc->dest_hi_mask = 0x00000003; 692 ahc->key_hi_mask = 0x00000003; 693 694 /* 695 * Currently, it does not support the CRYPT command. Instead, it only 696 * sends an interrupt to notify the firmware that the crypt command 697 * has completed. It is a temporary workaround. 698 */ 699 ahc->raise_crypt_interrupt_workaround = true; 700 } 701 702 static const TypeInfo aspeed_ast2700_hace_info = { 703 .name = TYPE_ASPEED_AST2700_HACE, 704 .parent = TYPE_ASPEED_HACE, 705 .class_init = aspeed_ast2700_hace_class_init, 706 }; 707 708 static void aspeed_hace_register_types(void) 709 { 710 type_register_static(&aspeed_ast2400_hace_info); 711 type_register_static(&aspeed_ast2500_hace_info); 712 type_register_static(&aspeed_ast2600_hace_info); 713 type_register_static(&aspeed_ast1030_hace_info); 714 type_register_static(&aspeed_ast2700_hace_info); 715 type_register_static(&aspeed_hace_info); 716 } 717 718 type_init(aspeed_hace_register_types); 719