1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTX CPT driver 3 * 4 * Copyright (C) 2019 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/ctype.h> 12 #include <linux/firmware.h> 13 #include "otx_cpt_common.h" 14 #include "otx_cptpf_ucode.h" 15 #include "otx_cptpf.h" 16 17 #define CSR_DELAY 30 18 /* Tar archive defines */ 19 #define TAR_MAGIC "ustar" 20 #define TAR_MAGIC_LEN 6 21 #define TAR_BLOCK_LEN 512 22 #define REGTYPE '0' 23 #define AREGTYPE '\0' 24 25 /* tar header as defined in POSIX 1003.1-1990. */ 26 struct tar_hdr_t { 27 char name[100]; 28 char mode[8]; 29 char uid[8]; 30 char gid[8]; 31 char size[12]; 32 char mtime[12]; 33 char chksum[8]; 34 char typeflag; 35 char linkname[100]; 36 char magic[6]; 37 char version[2]; 38 char uname[32]; 39 char gname[32]; 40 char devmajor[8]; 41 char devminor[8]; 42 char prefix[155]; 43 }; 44 45 struct tar_blk_t { 46 union { 47 struct tar_hdr_t hdr; 48 char block[TAR_BLOCK_LEN]; 49 }; 50 }; 51 52 struct tar_arch_info_t { 53 struct list_head ucodes; 54 const struct firmware *fw; 55 }; 56 57 static struct otx_cpt_bitmap get_cores_bmap(struct device *dev, 58 struct otx_cpt_eng_grp_info *eng_grp) 59 { 60 struct otx_cpt_bitmap bmap = { {0} }; 61 bool found = false; 62 int i; 63 64 if (eng_grp->g->engs_num > OTX_CPT_MAX_ENGINES) { 65 dev_err(dev, "unsupported number of engines %d on octeontx\n", 66 eng_grp->g->engs_num); 67 return bmap; 68 } 69 70 for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) { 71 if (eng_grp->engs[i].type) { 72 bitmap_or(bmap.bits, bmap.bits, 73 eng_grp->engs[i].bmap, 74 eng_grp->g->engs_num); 75 bmap.size = eng_grp->g->engs_num; 76 found = true; 77 } 78 } 79 80 if (!found) 81 dev_err(dev, "No engines reserved for engine group %d\n", 82 eng_grp->idx); 83 return bmap; 84 } 85 86 static int is_eng_type(int val, int eng_type) 87 { 88 return val & (1 << eng_type); 89 } 90 91 static int dev_supports_eng_type(struct otx_cpt_eng_grps *eng_grps, 92 int eng_type) 93 { 94 return is_eng_type(eng_grps->eng_types_supported, eng_type); 95 } 96 97 static void set_ucode_filename(struct otx_cpt_ucode *ucode, 98 const char *filename) 99 { 100 strscpy(ucode->filename, filename, OTX_CPT_UCODE_NAME_LENGTH); 101 } 102 103 static char *get_eng_type_str(int eng_type) 104 { 105 char *str = "unknown"; 106 107 switch (eng_type) { 108 case OTX_CPT_SE_TYPES: 109 str = "SE"; 110 break; 111 112 case OTX_CPT_AE_TYPES: 113 str = "AE"; 114 break; 115 } 116 return str; 117 } 118 119 static char *get_ucode_type_str(int ucode_type) 120 { 121 char *str = "unknown"; 122 123 switch (ucode_type) { 124 case (1 << OTX_CPT_SE_TYPES): 125 str = "SE"; 126 break; 127 128 case (1 << OTX_CPT_AE_TYPES): 129 str = "AE"; 130 break; 131 } 132 return str; 133 } 134 135 static int get_ucode_type(struct otx_cpt_ucode_hdr *ucode_hdr, int *ucode_type) 136 { 137 char tmp_ver_str[OTX_CPT_UCODE_VER_STR_SZ]; 138 u32 i, val = 0; 139 u8 nn; 140 141 strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ); 142 for (i = 0; i < strlen(tmp_ver_str); i++) 143 tmp_ver_str[i] = tolower(tmp_ver_str[i]); 144 145 nn = ucode_hdr->ver_num.nn; 146 if (strnstr(tmp_ver_str, "se-", OTX_CPT_UCODE_VER_STR_SZ) && 147 (nn == OTX_CPT_SE_UC_TYPE1 || nn == OTX_CPT_SE_UC_TYPE2 || 148 nn == OTX_CPT_SE_UC_TYPE3)) 149 val |= 1 << OTX_CPT_SE_TYPES; 150 if (strnstr(tmp_ver_str, "ae", OTX_CPT_UCODE_VER_STR_SZ) && 151 nn == OTX_CPT_AE_UC_TYPE) 152 val |= 1 << OTX_CPT_AE_TYPES; 153 154 *ucode_type = val; 155 156 if (!val) 157 return -EINVAL; 158 if (is_eng_type(val, OTX_CPT_AE_TYPES) && 159 is_eng_type(val, OTX_CPT_SE_TYPES)) 160 return -EINVAL; 161 return 0; 162 } 163 164 static int is_mem_zero(const char *ptr, int size) 165 { 166 int i; 167 168 for (i = 0; i < size; i++) { 169 if (ptr[i]) 170 return 0; 171 } 172 return 1; 173 } 174 175 static int cpt_set_ucode_base(struct otx_cpt_eng_grp_info *eng_grp, void *obj) 176 { 177 struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj; 178 dma_addr_t dma_addr; 179 struct otx_cpt_bitmap bmap; 180 int i; 181 182 bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp); 183 if (!bmap.size) 184 return -EINVAL; 185 186 if (eng_grp->mirror.is_ena) 187 dma_addr = 188 eng_grp->g->grp[eng_grp->mirror.idx].ucode[0].align_dma; 189 else 190 dma_addr = eng_grp->ucode[0].align_dma; 191 192 /* 193 * Set UCODE_BASE only for the cores which are not used, 194 * other cores should have already valid UCODE_BASE set 195 */ 196 for_each_set_bit(i, bmap.bits, bmap.size) 197 if (!eng_grp->g->eng_ref_cnt[i]) 198 writeq((u64) dma_addr, cpt->reg_base + 199 OTX_CPT_PF_ENGX_UCODE_BASE(i)); 200 return 0; 201 } 202 203 static int cpt_detach_and_disable_cores(struct otx_cpt_eng_grp_info *eng_grp, 204 void *obj) 205 { 206 struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj; 207 struct otx_cpt_bitmap bmap = { {0} }; 208 int timeout = 10; 209 int i, busy; 210 u64 reg; 211 212 bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp); 213 if (!bmap.size) 214 return -EINVAL; 215 216 /* Detach the cores from group */ 217 reg = readq(cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx)); 218 for_each_set_bit(i, bmap.bits, bmap.size) { 219 if (reg & (1ull << i)) { 220 eng_grp->g->eng_ref_cnt[i]--; 221 reg &= ~(1ull << i); 222 } 223 } 224 writeq(reg, cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx)); 225 226 /* Wait for cores to become idle */ 227 do { 228 busy = 0; 229 usleep_range(10000, 20000); 230 if (timeout-- < 0) 231 return -EBUSY; 232 233 reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY); 234 for_each_set_bit(i, bmap.bits, bmap.size) 235 if (reg & (1ull << i)) { 236 busy = 1; 237 break; 238 } 239 } while (busy); 240 241 /* Disable the cores only if they are not used anymore */ 242 reg = readq(cpt->reg_base + OTX_CPT_PF_EXE_CTL); 243 for_each_set_bit(i, bmap.bits, bmap.size) 244 if (!eng_grp->g->eng_ref_cnt[i]) 245 reg &= ~(1ull << i); 246 writeq(reg, cpt->reg_base + OTX_CPT_PF_EXE_CTL); 247 248 return 0; 249 } 250 251 static int cpt_attach_and_enable_cores(struct otx_cpt_eng_grp_info *eng_grp, 252 void *obj) 253 { 254 struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj; 255 struct otx_cpt_bitmap bmap; 256 u64 reg; 257 int i; 258 259 bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp); 260 if (!bmap.size) 261 return -EINVAL; 262 263 /* Attach the cores to the group */ 264 reg = readq(cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx)); 265 for_each_set_bit(i, bmap.bits, bmap.size) { 266 if (!(reg & (1ull << i))) { 267 eng_grp->g->eng_ref_cnt[i]++; 268 reg |= 1ull << i; 269 } 270 } 271 writeq(reg, cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx)); 272 273 /* Enable the cores */ 274 reg = readq(cpt->reg_base + OTX_CPT_PF_EXE_CTL); 275 for_each_set_bit(i, bmap.bits, bmap.size) 276 reg |= 1ull << i; 277 writeq(reg, cpt->reg_base + OTX_CPT_PF_EXE_CTL); 278 279 return 0; 280 } 281 282 static int process_tar_file(struct device *dev, 283 struct tar_arch_info_t *tar_arch, char *filename, 284 const u8 *data, u32 size) 285 { 286 struct tar_ucode_info_t *tar_info; 287 struct otx_cpt_ucode_hdr *ucode_hdr; 288 int ucode_type, ucode_size; 289 unsigned int code_length; 290 291 /* 292 * If size is less than microcode header size then don't report 293 * an error because it might not be microcode file, just process 294 * next file from archive 295 */ 296 if (size < sizeof(struct otx_cpt_ucode_hdr)) 297 return 0; 298 299 ucode_hdr = (struct otx_cpt_ucode_hdr *) data; 300 /* 301 * If microcode version can't be found don't report an error 302 * because it might not be microcode file, just process next file 303 */ 304 if (get_ucode_type(ucode_hdr, &ucode_type)) 305 return 0; 306 307 code_length = ntohl(ucode_hdr->code_length); 308 if (code_length >= INT_MAX / 2) { 309 dev_err(dev, "Invalid code_length %u\n", code_length); 310 return -EINVAL; 311 } 312 313 ucode_size = code_length * 2; 314 if (!ucode_size || (size < round_up(ucode_size, 16) + 315 sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) { 316 dev_err(dev, "Ucode %s invalid size\n", filename); 317 return -EINVAL; 318 } 319 320 tar_info = kzalloc(sizeof(struct tar_ucode_info_t), GFP_KERNEL); 321 if (!tar_info) 322 return -ENOMEM; 323 324 tar_info->ucode_ptr = data; 325 set_ucode_filename(&tar_info->ucode, filename); 326 memcpy(tar_info->ucode.ver_str, ucode_hdr->ver_str, 327 OTX_CPT_UCODE_VER_STR_SZ); 328 tar_info->ucode.ver_num = ucode_hdr->ver_num; 329 tar_info->ucode.type = ucode_type; 330 tar_info->ucode.size = ucode_size; 331 list_add_tail(&tar_info->list, &tar_arch->ucodes); 332 333 return 0; 334 } 335 336 static void release_tar_archive(struct tar_arch_info_t *tar_arch) 337 { 338 struct tar_ucode_info_t *curr, *temp; 339 340 if (!tar_arch) 341 return; 342 343 list_for_each_entry_safe(curr, temp, &tar_arch->ucodes, list) { 344 list_del(&curr->list); 345 kfree(curr); 346 } 347 348 if (tar_arch->fw) 349 release_firmware(tar_arch->fw); 350 kfree(tar_arch); 351 } 352 353 static struct tar_ucode_info_t *get_uc_from_tar_archive( 354 struct tar_arch_info_t *tar_arch, 355 int ucode_type) 356 { 357 struct tar_ucode_info_t *curr, *uc_found = NULL; 358 359 list_for_each_entry(curr, &tar_arch->ucodes, list) { 360 if (!is_eng_type(curr->ucode.type, ucode_type)) 361 continue; 362 363 if (!uc_found) { 364 uc_found = curr; 365 continue; 366 } 367 368 switch (ucode_type) { 369 case OTX_CPT_AE_TYPES: 370 break; 371 372 case OTX_CPT_SE_TYPES: 373 if (uc_found->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE2 || 374 (uc_found->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE3 375 && curr->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE1)) 376 uc_found = curr; 377 break; 378 } 379 } 380 381 return uc_found; 382 } 383 384 static void print_tar_dbg_info(struct tar_arch_info_t *tar_arch, 385 char *tar_filename) 386 { 387 struct tar_ucode_info_t *curr; 388 389 pr_debug("Tar archive filename %s\n", tar_filename); 390 pr_debug("Tar archive pointer %p, size %ld\n", tar_arch->fw->data, 391 tar_arch->fw->size); 392 list_for_each_entry(curr, &tar_arch->ucodes, list) { 393 pr_debug("Ucode filename %s\n", curr->ucode.filename); 394 pr_debug("Ucode version string %s\n", curr->ucode.ver_str); 395 pr_debug("Ucode version %d.%d.%d.%d\n", 396 curr->ucode.ver_num.nn, curr->ucode.ver_num.xx, 397 curr->ucode.ver_num.yy, curr->ucode.ver_num.zz); 398 pr_debug("Ucode type (%d) %s\n", curr->ucode.type, 399 get_ucode_type_str(curr->ucode.type)); 400 pr_debug("Ucode size %d\n", curr->ucode.size); 401 pr_debug("Ucode ptr %p\n", curr->ucode_ptr); 402 } 403 } 404 405 static struct tar_arch_info_t *load_tar_archive(struct device *dev, 406 char *tar_filename) 407 { 408 struct tar_arch_info_t *tar_arch = NULL; 409 struct tar_blk_t *tar_blk; 410 unsigned int cur_size; 411 size_t tar_offs = 0; 412 size_t tar_size; 413 int ret; 414 415 tar_arch = kzalloc(sizeof(struct tar_arch_info_t), GFP_KERNEL); 416 if (!tar_arch) 417 return NULL; 418 419 INIT_LIST_HEAD(&tar_arch->ucodes); 420 421 /* Load tar archive */ 422 ret = request_firmware(&tar_arch->fw, tar_filename, dev); 423 if (ret) 424 goto release_tar_arch; 425 426 if (tar_arch->fw->size < TAR_BLOCK_LEN) { 427 dev_err(dev, "Invalid tar archive %s\n", tar_filename); 428 goto release_tar_arch; 429 } 430 431 tar_size = tar_arch->fw->size; 432 tar_blk = (struct tar_blk_t *) tar_arch->fw->data; 433 if (strncmp(tar_blk->hdr.magic, TAR_MAGIC, TAR_MAGIC_LEN - 1)) { 434 dev_err(dev, "Unsupported format of tar archive %s\n", 435 tar_filename); 436 goto release_tar_arch; 437 } 438 439 while (1) { 440 /* Read current file size */ 441 ret = kstrtouint(tar_blk->hdr.size, 8, &cur_size); 442 if (ret) 443 goto release_tar_arch; 444 445 if (tar_offs + cur_size > tar_size || 446 tar_offs + 2*TAR_BLOCK_LEN > tar_size) { 447 dev_err(dev, "Invalid tar archive %s\n", tar_filename); 448 goto release_tar_arch; 449 } 450 451 tar_offs += TAR_BLOCK_LEN; 452 if (tar_blk->hdr.typeflag == REGTYPE || 453 tar_blk->hdr.typeflag == AREGTYPE) { 454 ret = process_tar_file(dev, tar_arch, 455 tar_blk->hdr.name, 456 &tar_arch->fw->data[tar_offs], 457 cur_size); 458 if (ret) 459 goto release_tar_arch; 460 } 461 462 tar_offs += (cur_size/TAR_BLOCK_LEN) * TAR_BLOCK_LEN; 463 if (cur_size % TAR_BLOCK_LEN) 464 tar_offs += TAR_BLOCK_LEN; 465 466 /* Check for the end of the archive */ 467 if (tar_offs + 2*TAR_BLOCK_LEN > tar_size) { 468 dev_err(dev, "Invalid tar archive %s\n", tar_filename); 469 goto release_tar_arch; 470 } 471 472 if (is_mem_zero(&tar_arch->fw->data[tar_offs], 473 2*TAR_BLOCK_LEN)) 474 break; 475 476 /* Read next block from tar archive */ 477 tar_blk = (struct tar_blk_t *) &tar_arch->fw->data[tar_offs]; 478 } 479 480 print_tar_dbg_info(tar_arch, tar_filename); 481 return tar_arch; 482 release_tar_arch: 483 release_tar_archive(tar_arch); 484 return NULL; 485 } 486 487 static struct otx_cpt_engs_rsvd *find_engines_by_type( 488 struct otx_cpt_eng_grp_info *eng_grp, 489 int eng_type) 490 { 491 int i; 492 493 for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) { 494 if (!eng_grp->engs[i].type) 495 continue; 496 497 if (eng_grp->engs[i].type == eng_type) 498 return &eng_grp->engs[i]; 499 } 500 return NULL; 501 } 502 503 int otx_cpt_uc_supports_eng_type(struct otx_cpt_ucode *ucode, int eng_type) 504 { 505 return is_eng_type(ucode->type, eng_type); 506 } 507 EXPORT_SYMBOL_GPL(otx_cpt_uc_supports_eng_type); 508 509 int otx_cpt_eng_grp_has_eng_type(struct otx_cpt_eng_grp_info *eng_grp, 510 int eng_type) 511 { 512 struct otx_cpt_engs_rsvd *engs; 513 514 engs = find_engines_by_type(eng_grp, eng_type); 515 516 return (engs != NULL ? 1 : 0); 517 } 518 EXPORT_SYMBOL_GPL(otx_cpt_eng_grp_has_eng_type); 519 520 static void print_ucode_info(struct otx_cpt_eng_grp_info *eng_grp, 521 char *buf, int size) 522 { 523 if (eng_grp->mirror.is_ena) { 524 scnprintf(buf, size, "%s (shared with engine_group%d)", 525 eng_grp->g->grp[eng_grp->mirror.idx].ucode[0].ver_str, 526 eng_grp->mirror.idx); 527 } else { 528 scnprintf(buf, size, "%s", eng_grp->ucode[0].ver_str); 529 } 530 } 531 532 static void print_engs_info(struct otx_cpt_eng_grp_info *eng_grp, 533 char *buf, int size, int idx) 534 { 535 struct otx_cpt_engs_rsvd *mirrored_engs = NULL; 536 struct otx_cpt_engs_rsvd *engs; 537 int len, i; 538 539 buf[0] = '\0'; 540 for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) { 541 engs = &eng_grp->engs[i]; 542 if (!engs->type) 543 continue; 544 if (idx != -1 && idx != i) 545 continue; 546 547 if (eng_grp->mirror.is_ena) 548 mirrored_engs = find_engines_by_type( 549 &eng_grp->g->grp[eng_grp->mirror.idx], 550 engs->type); 551 if (i > 0 && idx == -1) { 552 len = strlen(buf); 553 scnprintf(buf+len, size-len, ", "); 554 } 555 556 len = strlen(buf); 557 scnprintf(buf+len, size-len, "%d %s ", mirrored_engs ? 558 engs->count + mirrored_engs->count : engs->count, 559 get_eng_type_str(engs->type)); 560 if (mirrored_engs) { 561 len = strlen(buf); 562 scnprintf(buf+len, size-len, 563 "(%d shared with engine_group%d) ", 564 engs->count <= 0 ? engs->count + 565 mirrored_engs->count : mirrored_engs->count, 566 eng_grp->mirror.idx); 567 } 568 } 569 } 570 571 static void print_ucode_dbg_info(struct otx_cpt_ucode *ucode) 572 { 573 pr_debug("Ucode info\n"); 574 pr_debug("Ucode version string %s\n", ucode->ver_str); 575 pr_debug("Ucode version %d.%d.%d.%d\n", ucode->ver_num.nn, 576 ucode->ver_num.xx, ucode->ver_num.yy, ucode->ver_num.zz); 577 pr_debug("Ucode type %s\n", get_ucode_type_str(ucode->type)); 578 pr_debug("Ucode size %d\n", ucode->size); 579 pr_debug("Ucode virt address %16.16llx\n", (u64)ucode->align_va); 580 pr_debug("Ucode phys address %16.16llx\n", ucode->align_dma); 581 } 582 583 static void cpt_print_engines_mask(struct otx_cpt_eng_grp_info *eng_grp, 584 struct device *dev, char *buf, int size) 585 { 586 struct otx_cpt_bitmap bmap; 587 u32 mask[2]; 588 589 bmap = get_cores_bmap(dev, eng_grp); 590 if (!bmap.size) { 591 scnprintf(buf, size, "unknown"); 592 return; 593 } 594 bitmap_to_arr32(mask, bmap.bits, bmap.size); 595 scnprintf(buf, size, "%8.8x %8.8x", mask[1], mask[0]); 596 } 597 598 599 static void print_dbg_info(struct device *dev, 600 struct otx_cpt_eng_grps *eng_grps) 601 { 602 char engs_info[2*OTX_CPT_UCODE_NAME_LENGTH]; 603 struct otx_cpt_eng_grp_info *mirrored_grp; 604 char engs_mask[OTX_CPT_UCODE_NAME_LENGTH]; 605 struct otx_cpt_eng_grp_info *grp; 606 struct otx_cpt_engs_rsvd *engs; 607 u32 mask[4]; 608 int i, j; 609 610 pr_debug("Engine groups global info\n"); 611 pr_debug("max SE %d, max AE %d\n", 612 eng_grps->avail.max_se_cnt, eng_grps->avail.max_ae_cnt); 613 pr_debug("free SE %d\n", eng_grps->avail.se_cnt); 614 pr_debug("free AE %d\n", eng_grps->avail.ae_cnt); 615 616 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) { 617 grp = &eng_grps->grp[i]; 618 pr_debug("engine_group%d, state %s\n", i, grp->is_enabled ? 619 "enabled" : "disabled"); 620 if (grp->is_enabled) { 621 mirrored_grp = &eng_grps->grp[grp->mirror.idx]; 622 pr_debug("Ucode0 filename %s, version %s\n", 623 grp->mirror.is_ena ? 624 mirrored_grp->ucode[0].filename : 625 grp->ucode[0].filename, 626 grp->mirror.is_ena ? 627 mirrored_grp->ucode[0].ver_str : 628 grp->ucode[0].ver_str); 629 } 630 631 for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) { 632 engs = &grp->engs[j]; 633 if (engs->type) { 634 print_engs_info(grp, engs_info, 635 2*OTX_CPT_UCODE_NAME_LENGTH, j); 636 pr_debug("Slot%d: %s\n", j, engs_info); 637 bitmap_to_arr32(mask, engs->bmap, 638 eng_grps->engs_num); 639 pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x\n", 640 mask[3], mask[2], mask[1], mask[0]); 641 } else 642 pr_debug("Slot%d not used\n", j); 643 } 644 if (grp->is_enabled) { 645 cpt_print_engines_mask(grp, dev, engs_mask, 646 OTX_CPT_UCODE_NAME_LENGTH); 647 pr_debug("Cmask: %s\n", engs_mask); 648 } 649 } 650 } 651 652 static int update_engines_avail_count(struct device *dev, 653 struct otx_cpt_engs_available *avail, 654 struct otx_cpt_engs_rsvd *engs, int val) 655 { 656 switch (engs->type) { 657 case OTX_CPT_SE_TYPES: 658 avail->se_cnt += val; 659 break; 660 661 case OTX_CPT_AE_TYPES: 662 avail->ae_cnt += val; 663 break; 664 665 default: 666 dev_err(dev, "Invalid engine type %d\n", engs->type); 667 return -EINVAL; 668 } 669 670 return 0; 671 } 672 673 static int update_engines_offset(struct device *dev, 674 struct otx_cpt_engs_available *avail, 675 struct otx_cpt_engs_rsvd *engs) 676 { 677 switch (engs->type) { 678 case OTX_CPT_SE_TYPES: 679 engs->offset = 0; 680 break; 681 682 case OTX_CPT_AE_TYPES: 683 engs->offset = avail->max_se_cnt; 684 break; 685 686 default: 687 dev_err(dev, "Invalid engine type %d\n", engs->type); 688 return -EINVAL; 689 } 690 691 return 0; 692 } 693 694 static int release_engines(struct device *dev, struct otx_cpt_eng_grp_info *grp) 695 { 696 int i, ret = 0; 697 698 for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) { 699 if (!grp->engs[i].type) 700 continue; 701 702 if (grp->engs[i].count > 0) { 703 ret = update_engines_avail_count(dev, &grp->g->avail, 704 &grp->engs[i], 705 grp->engs[i].count); 706 if (ret) 707 return ret; 708 } 709 710 grp->engs[i].type = 0; 711 grp->engs[i].count = 0; 712 grp->engs[i].offset = 0; 713 grp->engs[i].ucode = NULL; 714 bitmap_zero(grp->engs[i].bmap, grp->g->engs_num); 715 } 716 717 return 0; 718 } 719 720 static int do_reserve_engines(struct device *dev, 721 struct otx_cpt_eng_grp_info *grp, 722 struct otx_cpt_engines *req_engs) 723 { 724 struct otx_cpt_engs_rsvd *engs = NULL; 725 int i, ret; 726 727 for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) { 728 if (!grp->engs[i].type) { 729 engs = &grp->engs[i]; 730 break; 731 } 732 } 733 734 if (!engs) 735 return -ENOMEM; 736 737 engs->type = req_engs->type; 738 engs->count = req_engs->count; 739 740 ret = update_engines_offset(dev, &grp->g->avail, engs); 741 if (ret) 742 return ret; 743 744 if (engs->count > 0) { 745 ret = update_engines_avail_count(dev, &grp->g->avail, engs, 746 -engs->count); 747 if (ret) 748 return ret; 749 } 750 751 return 0; 752 } 753 754 static int check_engines_availability(struct device *dev, 755 struct otx_cpt_eng_grp_info *grp, 756 struct otx_cpt_engines *req_eng) 757 { 758 int avail_cnt = 0; 759 760 switch (req_eng->type) { 761 case OTX_CPT_SE_TYPES: 762 avail_cnt = grp->g->avail.se_cnt; 763 break; 764 765 case OTX_CPT_AE_TYPES: 766 avail_cnt = grp->g->avail.ae_cnt; 767 break; 768 769 default: 770 dev_err(dev, "Invalid engine type %d\n", req_eng->type); 771 return -EINVAL; 772 } 773 774 if (avail_cnt < req_eng->count) { 775 dev_err(dev, 776 "Error available %s engines %d < than requested %d\n", 777 get_eng_type_str(req_eng->type), 778 avail_cnt, req_eng->count); 779 return -EBUSY; 780 } 781 782 return 0; 783 } 784 785 static int reserve_engines(struct device *dev, struct otx_cpt_eng_grp_info *grp, 786 struct otx_cpt_engines *req_engs, int req_cnt) 787 { 788 int i, ret; 789 790 /* Validate if a number of requested engines is available */ 791 for (i = 0; i < req_cnt; i++) { 792 ret = check_engines_availability(dev, grp, &req_engs[i]); 793 if (ret) 794 return ret; 795 } 796 797 /* Reserve requested engines for this engine group */ 798 for (i = 0; i < req_cnt; i++) { 799 ret = do_reserve_engines(dev, grp, &req_engs[i]); 800 if (ret) 801 return ret; 802 } 803 return 0; 804 } 805 806 static ssize_t eng_grp_info_show(struct device *dev, 807 struct device_attribute *attr, 808 char *buf) 809 { 810 char ucode_info[2*OTX_CPT_UCODE_NAME_LENGTH]; 811 char engs_info[2*OTX_CPT_UCODE_NAME_LENGTH]; 812 char engs_mask[OTX_CPT_UCODE_NAME_LENGTH]; 813 struct otx_cpt_eng_grp_info *eng_grp; 814 int ret; 815 816 eng_grp = container_of(attr, struct otx_cpt_eng_grp_info, info_attr); 817 mutex_lock(&eng_grp->g->lock); 818 819 print_engs_info(eng_grp, engs_info, 2*OTX_CPT_UCODE_NAME_LENGTH, -1); 820 print_ucode_info(eng_grp, ucode_info, 2*OTX_CPT_UCODE_NAME_LENGTH); 821 cpt_print_engines_mask(eng_grp, dev, engs_mask, 822 OTX_CPT_UCODE_NAME_LENGTH); 823 ret = scnprintf(buf, PAGE_SIZE, 824 "Microcode : %s\nEngines: %s\nEngines mask: %s\n", 825 ucode_info, engs_info, engs_mask); 826 827 mutex_unlock(&eng_grp->g->lock); 828 return ret; 829 } 830 831 static int create_sysfs_eng_grps_info(struct device *dev, 832 struct otx_cpt_eng_grp_info *eng_grp) 833 { 834 eng_grp->info_attr.show = eng_grp_info_show; 835 eng_grp->info_attr.store = NULL; 836 eng_grp->info_attr.attr.name = eng_grp->sysfs_info_name; 837 eng_grp->info_attr.attr.mode = 0440; 838 sysfs_attr_init(&eng_grp->info_attr.attr); 839 return device_create_file(dev, &eng_grp->info_attr); 840 } 841 842 static void ucode_unload(struct device *dev, struct otx_cpt_ucode *ucode) 843 { 844 if (ucode->va) { 845 dma_free_coherent(dev, ucode->size + OTX_CPT_UCODE_ALIGNMENT, 846 ucode->va, ucode->dma); 847 ucode->va = NULL; 848 ucode->align_va = NULL; 849 ucode->dma = 0; 850 ucode->align_dma = 0; 851 ucode->size = 0; 852 } 853 854 memset(&ucode->ver_str, 0, OTX_CPT_UCODE_VER_STR_SZ); 855 memset(&ucode->ver_num, 0, sizeof(struct otx_cpt_ucode_ver_num)); 856 set_ucode_filename(ucode, ""); 857 ucode->type = 0; 858 } 859 860 static int copy_ucode_to_dma_mem(struct device *dev, 861 struct otx_cpt_ucode *ucode, 862 const u8 *ucode_data) 863 { 864 u32 i; 865 866 /* Allocate DMAable space */ 867 ucode->va = dma_alloc_coherent(dev, ucode->size + 868 OTX_CPT_UCODE_ALIGNMENT, 869 &ucode->dma, GFP_KERNEL); 870 if (!ucode->va) { 871 dev_err(dev, "Unable to allocate space for microcode\n"); 872 return -ENOMEM; 873 } 874 ucode->align_va = PTR_ALIGN(ucode->va, OTX_CPT_UCODE_ALIGNMENT); 875 ucode->align_dma = PTR_ALIGN(ucode->dma, OTX_CPT_UCODE_ALIGNMENT); 876 877 memcpy((void *) ucode->align_va, (void *) ucode_data + 878 sizeof(struct otx_cpt_ucode_hdr), ucode->size); 879 880 /* Byte swap 64-bit */ 881 for (i = 0; i < (ucode->size / 8); i++) 882 ((__be64 *)ucode->align_va)[i] = 883 cpu_to_be64(((u64 *)ucode->align_va)[i]); 884 /* Ucode needs 16-bit swap */ 885 for (i = 0; i < (ucode->size / 2); i++) 886 ((__be16 *)ucode->align_va)[i] = 887 cpu_to_be16(((u16 *)ucode->align_va)[i]); 888 return 0; 889 } 890 891 static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode, 892 const char *ucode_filename) 893 { 894 struct otx_cpt_ucode_hdr *ucode_hdr; 895 const struct firmware *fw; 896 unsigned int code_length; 897 int ret; 898 899 set_ucode_filename(ucode, ucode_filename); 900 ret = request_firmware(&fw, ucode->filename, dev); 901 if (ret) 902 return ret; 903 904 ucode_hdr = (struct otx_cpt_ucode_hdr *) fw->data; 905 memcpy(ucode->ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ); 906 ucode->ver_num = ucode_hdr->ver_num; 907 code_length = ntohl(ucode_hdr->code_length); 908 if (code_length >= INT_MAX / 2) { 909 dev_err(dev, "Ucode invalid code_length %u\n", code_length); 910 ret = -EINVAL; 911 goto release_fw; 912 } 913 ucode->size = code_length * 2; 914 if (!ucode->size || (fw->size < round_up(ucode->size, 16) 915 + sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) { 916 dev_err(dev, "Ucode %s invalid size\n", ucode_filename); 917 ret = -EINVAL; 918 goto release_fw; 919 } 920 921 ret = get_ucode_type(ucode_hdr, &ucode->type); 922 if (ret) { 923 dev_err(dev, "Microcode %s unknown type 0x%x\n", 924 ucode->filename, ucode->type); 925 goto release_fw; 926 } 927 928 ret = copy_ucode_to_dma_mem(dev, ucode, fw->data); 929 if (ret) 930 goto release_fw; 931 932 print_ucode_dbg_info(ucode); 933 release_fw: 934 release_firmware(fw); 935 return ret; 936 } 937 938 static int enable_eng_grp(struct otx_cpt_eng_grp_info *eng_grp, 939 void *obj) 940 { 941 int ret; 942 943 ret = cpt_set_ucode_base(eng_grp, obj); 944 if (ret) 945 return ret; 946 947 ret = cpt_attach_and_enable_cores(eng_grp, obj); 948 return ret; 949 } 950 951 static int disable_eng_grp(struct device *dev, 952 struct otx_cpt_eng_grp_info *eng_grp, 953 void *obj) 954 { 955 int i, ret; 956 957 ret = cpt_detach_and_disable_cores(eng_grp, obj); 958 if (ret) 959 return ret; 960 961 /* Unload ucode used by this engine group */ 962 ucode_unload(dev, &eng_grp->ucode[0]); 963 964 for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) { 965 if (!eng_grp->engs[i].type) 966 continue; 967 968 eng_grp->engs[i].ucode = &eng_grp->ucode[0]; 969 } 970 971 ret = cpt_set_ucode_base(eng_grp, obj); 972 973 return ret; 974 } 975 976 static void setup_eng_grp_mirroring(struct otx_cpt_eng_grp_info *dst_grp, 977 struct otx_cpt_eng_grp_info *src_grp) 978 { 979 /* Setup fields for engine group which is mirrored */ 980 src_grp->mirror.is_ena = false; 981 src_grp->mirror.idx = 0; 982 src_grp->mirror.ref_count++; 983 984 /* Setup fields for mirroring engine group */ 985 dst_grp->mirror.is_ena = true; 986 dst_grp->mirror.idx = src_grp->idx; 987 dst_grp->mirror.ref_count = 0; 988 } 989 990 static void remove_eng_grp_mirroring(struct otx_cpt_eng_grp_info *dst_grp) 991 { 992 struct otx_cpt_eng_grp_info *src_grp; 993 994 if (!dst_grp->mirror.is_ena) 995 return; 996 997 src_grp = &dst_grp->g->grp[dst_grp->mirror.idx]; 998 999 src_grp->mirror.ref_count--; 1000 dst_grp->mirror.is_ena = false; 1001 dst_grp->mirror.idx = 0; 1002 dst_grp->mirror.ref_count = 0; 1003 } 1004 1005 static void update_requested_engs(struct otx_cpt_eng_grp_info *mirrored_eng_grp, 1006 struct otx_cpt_engines *engs, int engs_cnt) 1007 { 1008 struct otx_cpt_engs_rsvd *mirrored_engs; 1009 int i; 1010 1011 for (i = 0; i < engs_cnt; i++) { 1012 mirrored_engs = find_engines_by_type(mirrored_eng_grp, 1013 engs[i].type); 1014 if (!mirrored_engs) 1015 continue; 1016 1017 /* 1018 * If mirrored group has this type of engines attached then 1019 * there are 3 scenarios possible: 1020 * 1) mirrored_engs.count == engs[i].count then all engines 1021 * from mirrored engine group will be shared with this engine 1022 * group 1023 * 2) mirrored_engs.count > engs[i].count then only a subset of 1024 * engines from mirrored engine group will be shared with this 1025 * engine group 1026 * 3) mirrored_engs.count < engs[i].count then all engines 1027 * from mirrored engine group will be shared with this group 1028 * and additional engines will be reserved for exclusively use 1029 * by this engine group 1030 */ 1031 engs[i].count -= mirrored_engs->count; 1032 } 1033 } 1034 1035 static struct otx_cpt_eng_grp_info *find_mirrored_eng_grp( 1036 struct otx_cpt_eng_grp_info *grp) 1037 { 1038 struct otx_cpt_eng_grps *eng_grps = grp->g; 1039 int i; 1040 1041 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) { 1042 if (!eng_grps->grp[i].is_enabled) 1043 continue; 1044 if (eng_grps->grp[i].ucode[0].type) 1045 continue; 1046 if (grp->idx == i) 1047 continue; 1048 if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str, 1049 grp->ucode[0].ver_str, 1050 OTX_CPT_UCODE_VER_STR_SZ)) 1051 return &eng_grps->grp[i]; 1052 } 1053 1054 return NULL; 1055 } 1056 1057 static struct otx_cpt_eng_grp_info *find_unused_eng_grp( 1058 struct otx_cpt_eng_grps *eng_grps) 1059 { 1060 int i; 1061 1062 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) { 1063 if (!eng_grps->grp[i].is_enabled) 1064 return &eng_grps->grp[i]; 1065 } 1066 return NULL; 1067 } 1068 1069 static int eng_grp_update_masks(struct device *dev, 1070 struct otx_cpt_eng_grp_info *eng_grp) 1071 { 1072 struct otx_cpt_engs_rsvd *engs, *mirrored_engs; 1073 struct otx_cpt_bitmap tmp_bmap = { {0} }; 1074 int i, j, cnt, max_cnt; 1075 int bit; 1076 1077 for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) { 1078 engs = &eng_grp->engs[i]; 1079 if (!engs->type) 1080 continue; 1081 if (engs->count <= 0) 1082 continue; 1083 1084 switch (engs->type) { 1085 case OTX_CPT_SE_TYPES: 1086 max_cnt = eng_grp->g->avail.max_se_cnt; 1087 break; 1088 1089 case OTX_CPT_AE_TYPES: 1090 max_cnt = eng_grp->g->avail.max_ae_cnt; 1091 break; 1092 1093 default: 1094 dev_err(dev, "Invalid engine type %d\n", engs->type); 1095 return -EINVAL; 1096 } 1097 1098 cnt = engs->count; 1099 WARN_ON(engs->offset + max_cnt > OTX_CPT_MAX_ENGINES); 1100 bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num); 1101 for (j = engs->offset; j < engs->offset + max_cnt; j++) { 1102 if (!eng_grp->g->eng_ref_cnt[j]) { 1103 bitmap_set(tmp_bmap.bits, j, 1); 1104 cnt--; 1105 if (!cnt) 1106 break; 1107 } 1108 } 1109 1110 if (cnt) 1111 return -ENOSPC; 1112 1113 bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num); 1114 } 1115 1116 if (!eng_grp->mirror.is_ena) 1117 return 0; 1118 1119 for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) { 1120 engs = &eng_grp->engs[i]; 1121 if (!engs->type) 1122 continue; 1123 1124 mirrored_engs = find_engines_by_type( 1125 &eng_grp->g->grp[eng_grp->mirror.idx], 1126 engs->type); 1127 WARN_ON(!mirrored_engs && engs->count <= 0); 1128 if (!mirrored_engs) 1129 continue; 1130 1131 bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap, 1132 eng_grp->g->engs_num); 1133 if (engs->count < 0) { 1134 bit = find_first_bit(mirrored_engs->bmap, 1135 eng_grp->g->engs_num); 1136 bitmap_clear(tmp_bmap.bits, bit, -engs->count); 1137 } 1138 bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits, 1139 eng_grp->g->engs_num); 1140 } 1141 return 0; 1142 } 1143 1144 static int delete_engine_group(struct device *dev, 1145 struct otx_cpt_eng_grp_info *eng_grp) 1146 { 1147 int i, ret; 1148 1149 if (!eng_grp->is_enabled) 1150 return -EINVAL; 1151 1152 if (eng_grp->mirror.ref_count) { 1153 dev_err(dev, "Can't delete engine_group%d as it is used by engine_group(s):", 1154 eng_grp->idx); 1155 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) { 1156 if (eng_grp->g->grp[i].mirror.is_ena && 1157 eng_grp->g->grp[i].mirror.idx == eng_grp->idx) 1158 pr_cont(" %d", i); 1159 } 1160 pr_cont("\n"); 1161 return -EINVAL; 1162 } 1163 1164 /* Removing engine group mirroring if enabled */ 1165 remove_eng_grp_mirroring(eng_grp); 1166 1167 /* Disable engine group */ 1168 ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj); 1169 if (ret) 1170 return ret; 1171 1172 /* Release all engines held by this engine group */ 1173 ret = release_engines(dev, eng_grp); 1174 if (ret) 1175 return ret; 1176 1177 device_remove_file(dev, &eng_grp->info_attr); 1178 eng_grp->is_enabled = false; 1179 1180 return 0; 1181 } 1182 1183 static int validate_1_ucode_scenario(struct device *dev, 1184 struct otx_cpt_eng_grp_info *eng_grp, 1185 struct otx_cpt_engines *engs, int engs_cnt) 1186 { 1187 int i; 1188 1189 /* Verify that ucode loaded supports requested engine types */ 1190 for (i = 0; i < engs_cnt; i++) { 1191 if (!otx_cpt_uc_supports_eng_type(&eng_grp->ucode[0], 1192 engs[i].type)) { 1193 dev_err(dev, 1194 "Microcode %s does not support %s engines\n", 1195 eng_grp->ucode[0].filename, 1196 get_eng_type_str(engs[i].type)); 1197 return -EINVAL; 1198 } 1199 } 1200 return 0; 1201 } 1202 1203 static void update_ucode_ptrs(struct otx_cpt_eng_grp_info *eng_grp) 1204 { 1205 struct otx_cpt_ucode *ucode; 1206 1207 if (eng_grp->mirror.is_ena) 1208 ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0]; 1209 else 1210 ucode = &eng_grp->ucode[0]; 1211 WARN_ON(!eng_grp->engs[0].type); 1212 eng_grp->engs[0].ucode = ucode; 1213 } 1214 1215 static int create_engine_group(struct device *dev, 1216 struct otx_cpt_eng_grps *eng_grps, 1217 struct otx_cpt_engines *engs, int engs_cnt, 1218 void *ucode_data[], int ucodes_cnt, 1219 bool use_uc_from_tar_arch) 1220 { 1221 struct otx_cpt_eng_grp_info *mirrored_eng_grp; 1222 struct tar_ucode_info_t *tar_info; 1223 struct otx_cpt_eng_grp_info *eng_grp; 1224 int i, ret = 0; 1225 1226 if (ucodes_cnt > OTX_CPT_MAX_ETYPES_PER_GRP) 1227 return -EINVAL; 1228 1229 /* Validate if requested engine types are supported by this device */ 1230 for (i = 0; i < engs_cnt; i++) 1231 if (!dev_supports_eng_type(eng_grps, engs[i].type)) { 1232 dev_err(dev, "Device does not support %s engines\n", 1233 get_eng_type_str(engs[i].type)); 1234 return -EPERM; 1235 } 1236 1237 /* Find engine group which is not used */ 1238 eng_grp = find_unused_eng_grp(eng_grps); 1239 if (!eng_grp) { 1240 dev_err(dev, "Error all engine groups are being used\n"); 1241 return -ENOSPC; 1242 } 1243 1244 /* Load ucode */ 1245 for (i = 0; i < ucodes_cnt; i++) { 1246 if (use_uc_from_tar_arch) { 1247 tar_info = (struct tar_ucode_info_t *) ucode_data[i]; 1248 eng_grp->ucode[i] = tar_info->ucode; 1249 ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i], 1250 tar_info->ucode_ptr); 1251 } else 1252 ret = ucode_load(dev, &eng_grp->ucode[i], 1253 (char *) ucode_data[i]); 1254 if (ret) 1255 goto err_ucode_unload; 1256 } 1257 1258 /* Validate scenario where 1 ucode is used */ 1259 ret = validate_1_ucode_scenario(dev, eng_grp, engs, engs_cnt); 1260 if (ret) 1261 goto err_ucode_unload; 1262 1263 /* Check if this group mirrors another existing engine group */ 1264 mirrored_eng_grp = find_mirrored_eng_grp(eng_grp); 1265 if (mirrored_eng_grp) { 1266 /* Setup mirroring */ 1267 setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp); 1268 1269 /* 1270 * Update count of requested engines because some 1271 * of them might be shared with mirrored group 1272 */ 1273 update_requested_engs(mirrored_eng_grp, engs, engs_cnt); 1274 } 1275 1276 /* Reserve engines */ 1277 ret = reserve_engines(dev, eng_grp, engs, engs_cnt); 1278 if (ret) 1279 goto err_ucode_unload; 1280 1281 /* Update ucode pointers used by engines */ 1282 update_ucode_ptrs(eng_grp); 1283 1284 /* Update engine masks used by this group */ 1285 ret = eng_grp_update_masks(dev, eng_grp); 1286 if (ret) 1287 goto err_release_engs; 1288 1289 /* Create sysfs entry for engine group info */ 1290 ret = create_sysfs_eng_grps_info(dev, eng_grp); 1291 if (ret) 1292 goto err_release_engs; 1293 1294 /* Enable engine group */ 1295 ret = enable_eng_grp(eng_grp, eng_grps->obj); 1296 if (ret) 1297 goto err_release_engs; 1298 1299 /* 1300 * If this engine group mirrors another engine group 1301 * then we need to unload ucode as we will use ucode 1302 * from mirrored engine group 1303 */ 1304 if (eng_grp->mirror.is_ena) 1305 ucode_unload(dev, &eng_grp->ucode[0]); 1306 1307 eng_grp->is_enabled = true; 1308 if (eng_grp->mirror.is_ena) 1309 dev_info(dev, 1310 "Engine_group%d: reuse microcode %s from group %d\n", 1311 eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str, 1312 mirrored_eng_grp->idx); 1313 else 1314 dev_info(dev, "Engine_group%d: microcode loaded %s\n", 1315 eng_grp->idx, eng_grp->ucode[0].ver_str); 1316 1317 return 0; 1318 1319 err_release_engs: 1320 release_engines(dev, eng_grp); 1321 err_ucode_unload: 1322 ucode_unload(dev, &eng_grp->ucode[0]); 1323 return ret; 1324 } 1325 1326 static ssize_t ucode_load_store(struct device *dev, 1327 struct device_attribute *attr, 1328 const char *buf, size_t count) 1329 { 1330 struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = { {0} }; 1331 char *ucode_filename[OTX_CPT_MAX_ETYPES_PER_GRP]; 1332 char tmp_buf[OTX_CPT_UCODE_NAME_LENGTH] = { 0 }; 1333 char *start, *val, *err_msg, *tmp; 1334 struct otx_cpt_eng_grps *eng_grps; 1335 int grp_idx = 0, ret = -EINVAL; 1336 bool has_se, has_ie, has_ae; 1337 int del_grp_idx = -1; 1338 int ucode_idx = 0; 1339 1340 if (strlen(buf) > OTX_CPT_UCODE_NAME_LENGTH) 1341 return -EINVAL; 1342 1343 eng_grps = container_of(attr, struct otx_cpt_eng_grps, ucode_load_attr); 1344 err_msg = "Invalid engine group format"; 1345 strscpy(tmp_buf, buf, OTX_CPT_UCODE_NAME_LENGTH); 1346 start = tmp_buf; 1347 1348 has_se = has_ie = has_ae = false; 1349 1350 for (;;) { 1351 val = strsep(&start, ";"); 1352 if (!val) 1353 break; 1354 val = strim(val); 1355 if (!*val) 1356 continue; 1357 1358 if (!strncasecmp(val, "engine_group", 12)) { 1359 if (del_grp_idx != -1) 1360 goto err_print; 1361 tmp = strim(strsep(&val, ":")); 1362 if (!val) 1363 goto err_print; 1364 if (strlen(tmp) != 13) 1365 goto err_print; 1366 if (kstrtoint((tmp + 12), 10, &del_grp_idx)) 1367 goto err_print; 1368 val = strim(val); 1369 if (strncasecmp(val, "null", 4)) 1370 goto err_print; 1371 if (strlen(val) != 4) 1372 goto err_print; 1373 } else if (!strncasecmp(val, "se", 2) && strchr(val, ':')) { 1374 if (has_se || ucode_idx) 1375 goto err_print; 1376 tmp = strim(strsep(&val, ":")); 1377 if (!val) 1378 goto err_print; 1379 if (strlen(tmp) != 2) 1380 goto err_print; 1381 if (kstrtoint(strim(val), 10, &engs[grp_idx].count)) 1382 goto err_print; 1383 engs[grp_idx++].type = OTX_CPT_SE_TYPES; 1384 has_se = true; 1385 } else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) { 1386 if (has_ae || ucode_idx) 1387 goto err_print; 1388 tmp = strim(strsep(&val, ":")); 1389 if (!val) 1390 goto err_print; 1391 if (strlen(tmp) != 2) 1392 goto err_print; 1393 if (kstrtoint(strim(val), 10, &engs[grp_idx].count)) 1394 goto err_print; 1395 engs[grp_idx++].type = OTX_CPT_AE_TYPES; 1396 has_ae = true; 1397 } else { 1398 if (ucode_idx > 1) 1399 goto err_print; 1400 if (!strlen(val)) 1401 goto err_print; 1402 if (strnstr(val, " ", strlen(val))) 1403 goto err_print; 1404 ucode_filename[ucode_idx++] = val; 1405 } 1406 } 1407 1408 /* Validate input parameters */ 1409 if (del_grp_idx == -1) { 1410 if (!(grp_idx && ucode_idx)) 1411 goto err_print; 1412 1413 if (ucode_idx > 1 && grp_idx < 2) 1414 goto err_print; 1415 1416 if (grp_idx > OTX_CPT_MAX_ETYPES_PER_GRP) { 1417 err_msg = "Error max 2 engine types can be attached"; 1418 goto err_print; 1419 } 1420 1421 } else { 1422 if (del_grp_idx < 0 || 1423 del_grp_idx >= OTX_CPT_MAX_ENGINE_GROUPS) { 1424 dev_err(dev, "Invalid engine group index %d\n", 1425 del_grp_idx); 1426 ret = -EINVAL; 1427 return ret; 1428 } 1429 1430 if (!eng_grps->grp[del_grp_idx].is_enabled) { 1431 dev_err(dev, "Error engine_group%d is not configured\n", 1432 del_grp_idx); 1433 ret = -EINVAL; 1434 return ret; 1435 } 1436 1437 if (grp_idx || ucode_idx) 1438 goto err_print; 1439 } 1440 1441 mutex_lock(&eng_grps->lock); 1442 1443 if (eng_grps->is_rdonly) { 1444 dev_err(dev, "Disable VFs before modifying engine groups\n"); 1445 ret = -EACCES; 1446 goto err_unlock; 1447 } 1448 1449 if (del_grp_idx == -1) 1450 /* create engine group */ 1451 ret = create_engine_group(dev, eng_grps, engs, grp_idx, 1452 (void **) ucode_filename, 1453 ucode_idx, false); 1454 else 1455 /* delete engine group */ 1456 ret = delete_engine_group(dev, &eng_grps->grp[del_grp_idx]); 1457 if (ret) 1458 goto err_unlock; 1459 1460 print_dbg_info(dev, eng_grps); 1461 err_unlock: 1462 mutex_unlock(&eng_grps->lock); 1463 return ret ? ret : count; 1464 err_print: 1465 dev_err(dev, "%s\n", err_msg); 1466 1467 return ret; 1468 } 1469 1470 int otx_cpt_try_create_default_eng_grps(struct pci_dev *pdev, 1471 struct otx_cpt_eng_grps *eng_grps, 1472 int pf_type) 1473 { 1474 struct tar_ucode_info_t *tar_info[OTX_CPT_MAX_ETYPES_PER_GRP] = {}; 1475 struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = {}; 1476 struct tar_arch_info_t *tar_arch = NULL; 1477 char *tar_filename; 1478 int i, ret = 0; 1479 1480 mutex_lock(&eng_grps->lock); 1481 1482 /* 1483 * We don't create engine group for kernel crypto if attempt to create 1484 * it was already made (when user enabled VFs for the first time) 1485 */ 1486 if (eng_grps->is_first_try) 1487 goto unlock_mutex; 1488 eng_grps->is_first_try = true; 1489 1490 /* We create group for kcrypto only if no groups are configured */ 1491 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) 1492 if (eng_grps->grp[i].is_enabled) 1493 goto unlock_mutex; 1494 1495 switch (pf_type) { 1496 case OTX_CPT_AE: 1497 case OTX_CPT_SE: 1498 tar_filename = OTX_CPT_UCODE_TAR_FILE_NAME; 1499 break; 1500 1501 default: 1502 dev_err(&pdev->dev, "Unknown PF type %d\n", pf_type); 1503 ret = -EINVAL; 1504 goto unlock_mutex; 1505 } 1506 1507 tar_arch = load_tar_archive(&pdev->dev, tar_filename); 1508 if (!tar_arch) 1509 goto unlock_mutex; 1510 1511 /* 1512 * If device supports SE engines and there is SE microcode in tar 1513 * archive try to create engine group with SE engines for kernel 1514 * crypto functionality (symmetric crypto) 1515 */ 1516 tar_info[0] = get_uc_from_tar_archive(tar_arch, OTX_CPT_SE_TYPES); 1517 if (tar_info[0] && 1518 dev_supports_eng_type(eng_grps, OTX_CPT_SE_TYPES)) { 1519 1520 engs[0].type = OTX_CPT_SE_TYPES; 1521 engs[0].count = eng_grps->avail.max_se_cnt; 1522 1523 ret = create_engine_group(&pdev->dev, eng_grps, engs, 1, 1524 (void **) tar_info, 1, true); 1525 if (ret) 1526 goto release_tar_arch; 1527 } 1528 /* 1529 * If device supports AE engines and there is AE microcode in tar 1530 * archive try to create engine group with AE engines for asymmetric 1531 * crypto functionality. 1532 */ 1533 tar_info[0] = get_uc_from_tar_archive(tar_arch, OTX_CPT_AE_TYPES); 1534 if (tar_info[0] && 1535 dev_supports_eng_type(eng_grps, OTX_CPT_AE_TYPES)) { 1536 1537 engs[0].type = OTX_CPT_AE_TYPES; 1538 engs[0].count = eng_grps->avail.max_ae_cnt; 1539 1540 ret = create_engine_group(&pdev->dev, eng_grps, engs, 1, 1541 (void **) tar_info, 1, true); 1542 if (ret) 1543 goto release_tar_arch; 1544 } 1545 1546 print_dbg_info(&pdev->dev, eng_grps); 1547 release_tar_arch: 1548 release_tar_archive(tar_arch); 1549 unlock_mutex: 1550 mutex_unlock(&eng_grps->lock); 1551 return ret; 1552 } 1553 1554 void otx_cpt_set_eng_grps_is_rdonly(struct otx_cpt_eng_grps *eng_grps, 1555 bool is_rdonly) 1556 { 1557 mutex_lock(&eng_grps->lock); 1558 1559 eng_grps->is_rdonly = is_rdonly; 1560 1561 mutex_unlock(&eng_grps->lock); 1562 } 1563 1564 void otx_cpt_disable_all_cores(struct otx_cpt_device *cpt) 1565 { 1566 int grp, timeout = 100; 1567 u64 reg; 1568 1569 /* Disengage the cores from groups */ 1570 for (grp = 0; grp < OTX_CPT_MAX_ENGINE_GROUPS; grp++) { 1571 writeq(0, cpt->reg_base + OTX_CPT_PF_GX_EN(grp)); 1572 udelay(CSR_DELAY); 1573 } 1574 1575 reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY); 1576 while (reg) { 1577 udelay(CSR_DELAY); 1578 reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY); 1579 if (timeout--) { 1580 dev_warn(&cpt->pdev->dev, "Cores still busy\n"); 1581 break; 1582 } 1583 } 1584 1585 /* Disable the cores */ 1586 writeq(0, cpt->reg_base + OTX_CPT_PF_EXE_CTL); 1587 } 1588 1589 void otx_cpt_cleanup_eng_grps(struct pci_dev *pdev, 1590 struct otx_cpt_eng_grps *eng_grps) 1591 { 1592 struct otx_cpt_eng_grp_info *grp; 1593 int i, j; 1594 1595 mutex_lock(&eng_grps->lock); 1596 if (eng_grps->is_ucode_load_created) { 1597 device_remove_file(&pdev->dev, 1598 &eng_grps->ucode_load_attr); 1599 eng_grps->is_ucode_load_created = false; 1600 } 1601 1602 /* First delete all mirroring engine groups */ 1603 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) 1604 if (eng_grps->grp[i].mirror.is_ena) 1605 delete_engine_group(&pdev->dev, &eng_grps->grp[i]); 1606 1607 /* Delete remaining engine groups */ 1608 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) 1609 delete_engine_group(&pdev->dev, &eng_grps->grp[i]); 1610 1611 /* Release memory */ 1612 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) { 1613 grp = &eng_grps->grp[i]; 1614 for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) { 1615 kfree(grp->engs[j].bmap); 1616 grp->engs[j].bmap = NULL; 1617 } 1618 } 1619 1620 mutex_unlock(&eng_grps->lock); 1621 } 1622 1623 int otx_cpt_init_eng_grps(struct pci_dev *pdev, 1624 struct otx_cpt_eng_grps *eng_grps, int pf_type) 1625 { 1626 struct otx_cpt_eng_grp_info *grp; 1627 int i, j, ret = 0; 1628 1629 mutex_init(&eng_grps->lock); 1630 eng_grps->obj = pci_get_drvdata(pdev); 1631 eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt; 1632 eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt; 1633 1634 eng_grps->engs_num = eng_grps->avail.max_se_cnt + 1635 eng_grps->avail.max_ae_cnt; 1636 if (eng_grps->engs_num > OTX_CPT_MAX_ENGINES) { 1637 dev_err(&pdev->dev, 1638 "Number of engines %d > than max supported %d\n", 1639 eng_grps->engs_num, OTX_CPT_MAX_ENGINES); 1640 ret = -EINVAL; 1641 goto err; 1642 } 1643 1644 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) { 1645 grp = &eng_grps->grp[i]; 1646 grp->g = eng_grps; 1647 grp->idx = i; 1648 1649 snprintf(grp->sysfs_info_name, OTX_CPT_UCODE_NAME_LENGTH, 1650 "engine_group%d", i); 1651 for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) { 1652 grp->engs[j].bmap = 1653 kcalloc(BITS_TO_LONGS(eng_grps->engs_num), 1654 sizeof(long), GFP_KERNEL); 1655 if (!grp->engs[j].bmap) { 1656 ret = -ENOMEM; 1657 goto err; 1658 } 1659 } 1660 } 1661 1662 switch (pf_type) { 1663 case OTX_CPT_SE: 1664 /* OcteonTX 83XX SE CPT PF has only SE engines attached */ 1665 eng_grps->eng_types_supported = 1 << OTX_CPT_SE_TYPES; 1666 break; 1667 1668 case OTX_CPT_AE: 1669 /* OcteonTX 83XX AE CPT PF has only AE engines attached */ 1670 eng_grps->eng_types_supported = 1 << OTX_CPT_AE_TYPES; 1671 break; 1672 1673 default: 1674 dev_err(&pdev->dev, "Unknown PF type %d\n", pf_type); 1675 ret = -EINVAL; 1676 goto err; 1677 } 1678 1679 eng_grps->ucode_load_attr.show = NULL; 1680 eng_grps->ucode_load_attr.store = ucode_load_store; 1681 eng_grps->ucode_load_attr.attr.name = "ucode_load"; 1682 eng_grps->ucode_load_attr.attr.mode = 0220; 1683 sysfs_attr_init(&eng_grps->ucode_load_attr.attr); 1684 ret = device_create_file(&pdev->dev, 1685 &eng_grps->ucode_load_attr); 1686 if (ret) 1687 goto err; 1688 eng_grps->is_ucode_load_created = true; 1689 1690 print_dbg_info(&pdev->dev, eng_grps); 1691 return ret; 1692 err: 1693 otx_cpt_cleanup_eng_grps(pdev, eng_grps); 1694 return ret; 1695 } 1696