1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 4 #include <linux/acpi.h> 5 #include <linux/aer.h> 6 #include <linux/bitops.h> 7 #include <linux/debugfs.h> 8 #include <linux/init.h> 9 #include <linux/io.h> 10 #include <linux/iommu.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/pci.h> 14 #include <linux/seq_file.h> 15 #include <linux/topology.h> 16 #include <linux/uacce.h> 17 18 #include "sec.h" 19 20 #define SEC_VF_NUM 63 21 #define SEC_QUEUE_NUM_V1 4096 22 #define SEC_PF_PCI_DEVICE_ID 0xa255 23 #define SEC_VF_PCI_DEVICE_ID 0xa256 24 25 #define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF 26 #define SEC_BD_ERR_CHK_EN1 0x7ffff7fd 27 #define SEC_BD_ERR_CHK_EN3 0xffffbfff 28 29 #define SEC_SQE_SIZE 128 30 #define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH) 31 #define SEC_PF_DEF_Q_NUM 256 32 #define SEC_PF_DEF_Q_BASE 0 33 #define SEC_CTX_Q_NUM_DEF 2 34 #define SEC_CTX_Q_NUM_MAX 32 35 36 #define SEC_CTRL_CNT_CLR_CE 0x301120 37 #define SEC_CTRL_CNT_CLR_CE_BIT BIT(0) 38 #define SEC_CORE_INT_SOURCE 0x301010 39 #define SEC_CORE_INT_MASK 0x301000 40 #define SEC_CORE_INT_STATUS 0x301008 41 #define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14 42 #define SEC_ECC_NUM 16 43 #define SEC_ECC_MASH 0xFF 44 #define SEC_CORE_INT_DISABLE 0x0 45 #define SEC_CORE_INT_ENABLE 0x7c1ff 46 #define SEC_CORE_INT_CLEAR 0x7c1ff 47 #define SEC_SAA_ENABLE 0x17f 48 49 #define SEC_RAS_CE_REG 0x301050 50 #define SEC_RAS_FE_REG 0x301054 51 #define SEC_RAS_NFE_REG 0x301058 52 #define SEC_RAS_CE_ENB_MSK 0x88 53 #define SEC_RAS_FE_ENB_MSK 0x0 54 #define SEC_RAS_NFE_ENB_MSK 0x7c177 55 #define SEC_RAS_DISABLE 0x0 56 #define SEC_MEM_START_INIT_REG 0x301100 57 #define SEC_MEM_INIT_DONE_REG 0x301104 58 59 #define SEC_CONTROL_REG 0x301200 60 #define SEC_TRNG_EN_SHIFT 8 61 #define SEC_CLK_GATE_ENABLE BIT(3) 62 #define SEC_CLK_GATE_DISABLE (~BIT(3)) 63 #define SEC_AXI_SHUTDOWN_ENABLE BIT(12) 64 #define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF 65 66 #define SEC_INTERFACE_USER_CTRL0_REG 0x301220 67 #define SEC_INTERFACE_USER_CTRL1_REG 0x301224 68 #define SEC_SAA_EN_REG 0x301270 69 #define SEC_BD_ERR_CHK_EN_REG0 0x301380 70 #define SEC_BD_ERR_CHK_EN_REG1 0x301384 71 #define SEC_BD_ERR_CHK_EN_REG3 0x30138c 72 73 #define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15)) 74 #define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7)) 75 #define SEC_USER1_ENABLE_CONTEXT_SSV BIT(24) 76 #define SEC_USER1_ENABLE_DATA_SSV BIT(16) 77 #define SEC_USER1_WB_CONTEXT_SSV BIT(8) 78 #define SEC_USER1_WB_DATA_SSV BIT(0) 79 #define SEC_USER1_SVA_SET (SEC_USER1_ENABLE_CONTEXT_SSV | \ 80 SEC_USER1_ENABLE_DATA_SSV | \ 81 SEC_USER1_WB_CONTEXT_SSV | \ 82 SEC_USER1_WB_DATA_SSV) 83 #define SEC_USER1_SMMU_SVA (SEC_USER1_SMMU_NORMAL | SEC_USER1_SVA_SET) 84 #define SEC_USER1_SMMU_MASK (~SEC_USER1_SVA_SET) 85 #define SEC_CORE_INT_STATUS_M_ECC BIT(2) 86 87 #define SEC_DELAY_10_US 10 88 #define SEC_POLL_TIMEOUT_US 1000 89 #define SEC_DBGFS_VAL_MAX_LEN 20 90 #define SEC_SINGLE_PORT_MAX_TRANS 0x2060 91 92 #define SEC_SQE_MASK_OFFSET 64 93 #define SEC_SQE_MASK_LEN 48 94 95 struct sec_hw_error { 96 u32 int_msk; 97 const char *msg; 98 }; 99 100 struct sec_dfx_item { 101 const char *name; 102 u32 offset; 103 }; 104 105 static const char sec_name[] = "hisi_sec2"; 106 static struct dentry *sec_debugfs_root; 107 108 static struct hisi_qm_list sec_devices = { 109 .register_to_crypto = sec_register_to_crypto, 110 .unregister_from_crypto = sec_unregister_from_crypto, 111 }; 112 113 static const struct sec_hw_error sec_hw_errors[] = { 114 { 115 .int_msk = BIT(0), 116 .msg = "sec_axi_rresp_err_rint" 117 }, 118 { 119 .int_msk = BIT(1), 120 .msg = "sec_axi_bresp_err_rint" 121 }, 122 { 123 .int_msk = BIT(2), 124 .msg = "sec_ecc_2bit_err_rint" 125 }, 126 { 127 .int_msk = BIT(3), 128 .msg = "sec_ecc_1bit_err_rint" 129 }, 130 { 131 .int_msk = BIT(4), 132 .msg = "sec_req_trng_timeout_rint" 133 }, 134 { 135 .int_msk = BIT(5), 136 .msg = "sec_fsm_hbeat_rint" 137 }, 138 { 139 .int_msk = BIT(6), 140 .msg = "sec_channel_req_rng_timeout_rint" 141 }, 142 { 143 .int_msk = BIT(7), 144 .msg = "sec_bd_err_rint" 145 }, 146 { 147 .int_msk = BIT(8), 148 .msg = "sec_chain_buff_err_rint" 149 }, 150 { 151 .int_msk = BIT(14), 152 .msg = "sec_no_secure_access" 153 }, 154 { 155 .int_msk = BIT(15), 156 .msg = "sec_wrapping_key_auth_err" 157 }, 158 { 159 .int_msk = BIT(16), 160 .msg = "sec_km_key_crc_fail" 161 }, 162 { 163 .int_msk = BIT(17), 164 .msg = "sec_axi_poison_err" 165 }, 166 { 167 .int_msk = BIT(18), 168 .msg = "sec_sva_err" 169 }, 170 {} 171 }; 172 173 static const char * const sec_dbg_file_name[] = { 174 [SEC_CLEAR_ENABLE] = "clear_enable", 175 }; 176 177 static struct sec_dfx_item sec_dfx_labels[] = { 178 {"send_cnt", offsetof(struct sec_dfx, send_cnt)}, 179 {"recv_cnt", offsetof(struct sec_dfx, recv_cnt)}, 180 {"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)}, 181 {"recv_busy_cnt", offsetof(struct sec_dfx, recv_busy_cnt)}, 182 {"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)}, 183 {"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)}, 184 {"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)}, 185 }; 186 187 static const struct debugfs_reg32 sec_dfx_regs[] = { 188 {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010}, 189 {"SEC_SAA_EN ", 0x301270}, 190 {"SEC_BD_LATENCY_MIN ", 0x301600}, 191 {"SEC_BD_LATENCY_MAX ", 0x301608}, 192 {"SEC_BD_LATENCY_AVG ", 0x30160C}, 193 {"SEC_BD_NUM_IN_SAA0 ", 0x301670}, 194 {"SEC_BD_NUM_IN_SAA1 ", 0x301674}, 195 {"SEC_BD_NUM_IN_SEC ", 0x301680}, 196 {"SEC_ECC_1BIT_CNT ", 0x301C00}, 197 {"SEC_ECC_1BIT_INFO ", 0x301C04}, 198 {"SEC_ECC_2BIT_CNT ", 0x301C10}, 199 {"SEC_ECC_2BIT_INFO ", 0x301C14}, 200 {"SEC_BD_SAA0 ", 0x301C20}, 201 {"SEC_BD_SAA1 ", 0x301C24}, 202 {"SEC_BD_SAA2 ", 0x301C28}, 203 {"SEC_BD_SAA3 ", 0x301C2C}, 204 {"SEC_BD_SAA4 ", 0x301C30}, 205 {"SEC_BD_SAA5 ", 0x301C34}, 206 {"SEC_BD_SAA6 ", 0x301C38}, 207 {"SEC_BD_SAA7 ", 0x301C3C}, 208 {"SEC_BD_SAA8 ", 0x301C40}, 209 }; 210 211 static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp) 212 { 213 return q_num_set(val, kp, SEC_PF_PCI_DEVICE_ID); 214 } 215 216 static const struct kernel_param_ops sec_pf_q_num_ops = { 217 .set = sec_pf_q_num_set, 218 .get = param_get_int, 219 }; 220 221 static u32 pf_q_num = SEC_PF_DEF_Q_NUM; 222 module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444); 223 MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 2-4096, v2 2-1024)"); 224 225 static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp) 226 { 227 u32 ctx_q_num; 228 int ret; 229 230 if (!val) 231 return -EINVAL; 232 233 ret = kstrtou32(val, 10, &ctx_q_num); 234 if (ret) 235 return -EINVAL; 236 237 if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) { 238 pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num); 239 return -EINVAL; 240 } 241 242 return param_set_int(val, kp); 243 } 244 245 static const struct kernel_param_ops sec_ctx_q_num_ops = { 246 .set = sec_ctx_q_num_set, 247 .get = param_get_int, 248 }; 249 static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF; 250 module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444); 251 MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (2 default, 2, 4, ..., 32)"); 252 253 static const struct kernel_param_ops vfs_num_ops = { 254 .set = vfs_num_set, 255 .get = param_get_int, 256 }; 257 258 static u32 vfs_num; 259 module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); 260 MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); 261 262 void sec_destroy_qps(struct hisi_qp **qps, int qp_num) 263 { 264 hisi_qm_free_qps(qps, qp_num); 265 kfree(qps); 266 } 267 268 struct hisi_qp **sec_create_qps(void) 269 { 270 int node = cpu_to_node(smp_processor_id()); 271 u32 ctx_num = ctx_q_num; 272 struct hisi_qp **qps; 273 int ret; 274 275 qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL); 276 if (!qps) 277 return NULL; 278 279 ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, 0, node, qps); 280 if (!ret) 281 return qps; 282 283 kfree(qps); 284 return NULL; 285 } 286 287 static const struct kernel_param_ops sec_uacce_mode_ops = { 288 .set = uacce_mode_set, 289 .get = param_get_int, 290 }; 291 292 /* 293 * uacce_mode = 0 means sec only register to crypto, 294 * uacce_mode = 1 means sec both register to crypto and uacce. 295 */ 296 static u32 uacce_mode = UACCE_MODE_NOUACCE; 297 module_param_cb(uacce_mode, &sec_uacce_mode_ops, &uacce_mode, 0444); 298 MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC); 299 300 static const struct pci_device_id sec_dev_ids[] = { 301 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) }, 302 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) }, 303 { 0, } 304 }; 305 MODULE_DEVICE_TABLE(pci, sec_dev_ids); 306 307 static u8 sec_get_endian(struct hisi_qm *qm) 308 { 309 u32 reg; 310 311 /* 312 * As for VF, it is a wrong way to get endian setting by 313 * reading a register of the engine 314 */ 315 if (qm->pdev->is_virtfn) { 316 dev_err_ratelimited(&qm->pdev->dev, 317 "cannot access a register in VF!\n"); 318 return SEC_LE; 319 } 320 reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); 321 /* BD little endian mode */ 322 if (!(reg & BIT(0))) 323 return SEC_LE; 324 325 /* BD 32-bits big endian mode */ 326 else if (!(reg & BIT(1))) 327 return SEC_32BE; 328 329 /* BD 64-bits big endian mode */ 330 else 331 return SEC_64BE; 332 } 333 334 static int sec_engine_init(struct hisi_qm *qm) 335 { 336 int ret; 337 u32 reg; 338 339 /* disable clock gate control */ 340 reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); 341 reg &= SEC_CLK_GATE_DISABLE; 342 writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); 343 344 writel_relaxed(0x1, qm->io_base + SEC_MEM_START_INIT_REG); 345 346 ret = readl_relaxed_poll_timeout(qm->io_base + SEC_MEM_INIT_DONE_REG, 347 reg, reg & 0x1, SEC_DELAY_10_US, 348 SEC_POLL_TIMEOUT_US); 349 if (ret) { 350 pci_err(qm->pdev, "fail to init sec mem\n"); 351 return ret; 352 } 353 354 reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); 355 reg |= (0x1 << SEC_TRNG_EN_SHIFT); 356 writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); 357 358 reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL0_REG); 359 reg |= SEC_USER0_SMMU_NORMAL; 360 writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL0_REG); 361 362 reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL1_REG); 363 reg &= SEC_USER1_SMMU_MASK; 364 if (qm->use_sva && qm->ver == QM_HW_V2) 365 reg |= SEC_USER1_SMMU_SVA; 366 else 367 reg |= SEC_USER1_SMMU_NORMAL; 368 writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL1_REG); 369 370 writel(SEC_SINGLE_PORT_MAX_TRANS, 371 qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS); 372 373 writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG); 374 375 /* Enable sm4 extra mode, as ctr/ecb */ 376 writel_relaxed(SEC_BD_ERR_CHK_EN0, 377 qm->io_base + SEC_BD_ERR_CHK_EN_REG0); 378 /* Enable sm4 xts mode multiple iv */ 379 writel_relaxed(SEC_BD_ERR_CHK_EN1, 380 qm->io_base + SEC_BD_ERR_CHK_EN_REG1); 381 writel_relaxed(SEC_BD_ERR_CHK_EN3, 382 qm->io_base + SEC_BD_ERR_CHK_EN_REG3); 383 384 /* config endian */ 385 reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); 386 reg |= sec_get_endian(qm); 387 writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); 388 389 return 0; 390 } 391 392 static int sec_set_user_domain_and_cache(struct hisi_qm *qm) 393 { 394 /* qm user domain */ 395 writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1); 396 writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE); 397 writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1); 398 writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE); 399 writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE); 400 401 /* qm cache */ 402 writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG); 403 writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE); 404 405 /* disable FLR triggered by BME(bus master enable) */ 406 writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG); 407 writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE); 408 409 /* enable sqc,cqc writeback */ 410 writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE | 411 CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) | 412 FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL); 413 414 return sec_engine_init(qm); 415 } 416 417 /* sec_debug_regs_clear() - clear the sec debug regs */ 418 static void sec_debug_regs_clear(struct hisi_qm *qm) 419 { 420 int i; 421 422 /* clear sec dfx regs */ 423 writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE); 424 for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) 425 readl(qm->io_base + sec_dfx_regs[i].offset); 426 427 /* clear rdclr_en */ 428 writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE); 429 430 hisi_qm_debug_regs_clear(qm); 431 } 432 433 static void sec_hw_error_enable(struct hisi_qm *qm) 434 { 435 u32 val; 436 437 if (qm->ver == QM_HW_V1) { 438 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); 439 pci_info(qm->pdev, "V1 not support hw error handle\n"); 440 return; 441 } 442 443 val = readl(qm->io_base + SEC_CONTROL_REG); 444 445 /* clear SEC hw error source if having */ 446 writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE); 447 448 /* enable SEC hw error interrupts */ 449 writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK); 450 451 /* enable RAS int */ 452 writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG); 453 writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG); 454 writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG); 455 456 /* enable SEC block master OOO when m-bit error occur */ 457 val = val | SEC_AXI_SHUTDOWN_ENABLE; 458 459 writel(val, qm->io_base + SEC_CONTROL_REG); 460 } 461 462 static void sec_hw_error_disable(struct hisi_qm *qm) 463 { 464 u32 val; 465 466 val = readl(qm->io_base + SEC_CONTROL_REG); 467 468 /* disable RAS int */ 469 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG); 470 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG); 471 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG); 472 473 /* disable SEC hw error interrupts */ 474 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); 475 476 /* disable SEC block master OOO when m-bit error occur */ 477 val = val & SEC_AXI_SHUTDOWN_DISABLE; 478 479 writel(val, qm->io_base + SEC_CONTROL_REG); 480 } 481 482 static u32 sec_clear_enable_read(struct sec_debug_file *file) 483 { 484 struct hisi_qm *qm = file->qm; 485 486 return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) & 487 SEC_CTRL_CNT_CLR_CE_BIT; 488 } 489 490 static int sec_clear_enable_write(struct sec_debug_file *file, u32 val) 491 { 492 struct hisi_qm *qm = file->qm; 493 u32 tmp; 494 495 if (val != 1 && val) 496 return -EINVAL; 497 498 tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) & 499 ~SEC_CTRL_CNT_CLR_CE_BIT) | val; 500 writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE); 501 502 return 0; 503 } 504 505 static ssize_t sec_debug_read(struct file *filp, char __user *buf, 506 size_t count, loff_t *pos) 507 { 508 struct sec_debug_file *file = filp->private_data; 509 char tbuf[SEC_DBGFS_VAL_MAX_LEN]; 510 u32 val; 511 int ret; 512 513 spin_lock_irq(&file->lock); 514 515 switch (file->index) { 516 case SEC_CLEAR_ENABLE: 517 val = sec_clear_enable_read(file); 518 break; 519 default: 520 spin_unlock_irq(&file->lock); 521 return -EINVAL; 522 } 523 524 spin_unlock_irq(&file->lock); 525 ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val); 526 527 return simple_read_from_buffer(buf, count, pos, tbuf, ret); 528 } 529 530 static ssize_t sec_debug_write(struct file *filp, const char __user *buf, 531 size_t count, loff_t *pos) 532 { 533 struct sec_debug_file *file = filp->private_data; 534 char tbuf[SEC_DBGFS_VAL_MAX_LEN]; 535 unsigned long val; 536 int len, ret; 537 538 if (*pos != 0) 539 return 0; 540 541 if (count >= SEC_DBGFS_VAL_MAX_LEN) 542 return -ENOSPC; 543 544 len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1, 545 pos, buf, count); 546 if (len < 0) 547 return len; 548 549 tbuf[len] = '\0'; 550 if (kstrtoul(tbuf, 0, &val)) 551 return -EFAULT; 552 553 spin_lock_irq(&file->lock); 554 555 switch (file->index) { 556 case SEC_CLEAR_ENABLE: 557 ret = sec_clear_enable_write(file, val); 558 if (ret) 559 goto err_input; 560 break; 561 default: 562 ret = -EINVAL; 563 goto err_input; 564 } 565 566 spin_unlock_irq(&file->lock); 567 568 return count; 569 570 err_input: 571 spin_unlock_irq(&file->lock); 572 return ret; 573 } 574 575 static const struct file_operations sec_dbg_fops = { 576 .owner = THIS_MODULE, 577 .open = simple_open, 578 .read = sec_debug_read, 579 .write = sec_debug_write, 580 }; 581 582 static int sec_debugfs_atomic64_get(void *data, u64 *val) 583 { 584 *val = atomic64_read((atomic64_t *)data); 585 586 return 0; 587 } 588 589 static int sec_debugfs_atomic64_set(void *data, u64 val) 590 { 591 if (val) 592 return -EINVAL; 593 594 atomic64_set((atomic64_t *)data, 0); 595 596 return 0; 597 } 598 599 DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get, 600 sec_debugfs_atomic64_set, "%lld\n"); 601 602 static int sec_core_debug_init(struct hisi_qm *qm) 603 { 604 struct sec_dev *sec = container_of(qm, struct sec_dev, qm); 605 struct device *dev = &qm->pdev->dev; 606 struct sec_dfx *dfx = &sec->debug.dfx; 607 struct debugfs_regset32 *regset; 608 struct dentry *tmp_d; 609 int i; 610 611 tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root); 612 613 regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); 614 if (!regset) 615 return -ENOMEM; 616 617 regset->regs = sec_dfx_regs; 618 regset->nregs = ARRAY_SIZE(sec_dfx_regs); 619 regset->base = qm->io_base; 620 621 if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) 622 debugfs_create_regset32("regs", 0444, tmp_d, regset); 623 624 for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) { 625 atomic64_t *data = (atomic64_t *)((uintptr_t)dfx + 626 sec_dfx_labels[i].offset); 627 debugfs_create_file(sec_dfx_labels[i].name, 0644, 628 tmp_d, data, &sec_atomic64_ops); 629 } 630 631 return 0; 632 } 633 634 static int sec_debug_init(struct hisi_qm *qm) 635 { 636 struct sec_dev *sec = container_of(qm, struct sec_dev, qm); 637 int i; 638 639 if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) { 640 for (i = SEC_CLEAR_ENABLE; i < SEC_DEBUG_FILE_NUM; i++) { 641 spin_lock_init(&sec->debug.files[i].lock); 642 sec->debug.files[i].index = i; 643 sec->debug.files[i].qm = qm; 644 645 debugfs_create_file(sec_dbg_file_name[i], 0600, 646 qm->debug.debug_root, 647 sec->debug.files + i, 648 &sec_dbg_fops); 649 } 650 } 651 652 return sec_core_debug_init(qm); 653 } 654 655 static int sec_debugfs_init(struct hisi_qm *qm) 656 { 657 struct device *dev = &qm->pdev->dev; 658 int ret; 659 660 qm->debug.debug_root = debugfs_create_dir(dev_name(dev), 661 sec_debugfs_root); 662 qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET; 663 qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN; 664 hisi_qm_debug_init(qm); 665 666 ret = sec_debug_init(qm); 667 if (ret) 668 goto failed_to_create; 669 670 return 0; 671 672 failed_to_create: 673 debugfs_remove_recursive(sec_debugfs_root); 674 return ret; 675 } 676 677 static void sec_debugfs_exit(struct hisi_qm *qm) 678 { 679 debugfs_remove_recursive(qm->debug.debug_root); 680 } 681 682 static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts) 683 { 684 const struct sec_hw_error *errs = sec_hw_errors; 685 struct device *dev = &qm->pdev->dev; 686 u32 err_val; 687 688 while (errs->msg) { 689 if (errs->int_msk & err_sts) { 690 dev_err(dev, "%s [error status=0x%x] found\n", 691 errs->msg, errs->int_msk); 692 693 if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) { 694 err_val = readl(qm->io_base + 695 SEC_CORE_SRAM_ECC_ERR_INFO); 696 dev_err(dev, "multi ecc sram num=0x%x\n", 697 ((err_val) >> SEC_ECC_NUM) & 698 SEC_ECC_MASH); 699 } 700 } 701 errs++; 702 } 703 } 704 705 static u32 sec_get_hw_err_status(struct hisi_qm *qm) 706 { 707 return readl(qm->io_base + SEC_CORE_INT_STATUS); 708 } 709 710 static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) 711 { 712 writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE); 713 } 714 715 static void sec_open_axi_master_ooo(struct hisi_qm *qm) 716 { 717 u32 val; 718 719 val = readl(qm->io_base + SEC_CONTROL_REG); 720 writel(val & SEC_AXI_SHUTDOWN_DISABLE, qm->io_base + SEC_CONTROL_REG); 721 writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG); 722 } 723 724 static void sec_err_info_init(struct hisi_qm *qm) 725 { 726 struct hisi_qm_err_info *err_info = &qm->err_info; 727 728 err_info->ce = QM_BASE_CE; 729 err_info->fe = 0; 730 err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC; 731 err_info->dev_ce_mask = SEC_RAS_CE_ENB_MSK; 732 err_info->msi_wr_port = BIT(0); 733 err_info->acpi_rst = "SRST"; 734 err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT | 735 QM_ACC_WB_NOT_READY_TIMEOUT; 736 } 737 738 static const struct hisi_qm_err_ini sec_err_ini = { 739 .hw_init = sec_set_user_domain_and_cache, 740 .hw_err_enable = sec_hw_error_enable, 741 .hw_err_disable = sec_hw_error_disable, 742 .get_dev_hw_err_status = sec_get_hw_err_status, 743 .clear_dev_hw_err_status = sec_clear_hw_err_status, 744 .log_dev_hw_err = sec_log_hw_error, 745 .open_axi_master_ooo = sec_open_axi_master_ooo, 746 .err_info_init = sec_err_info_init, 747 }; 748 749 static int sec_pf_probe_init(struct sec_dev *sec) 750 { 751 struct hisi_qm *qm = &sec->qm; 752 int ret; 753 754 qm->err_ini = &sec_err_ini; 755 qm->err_ini->err_info_init(qm); 756 757 ret = sec_set_user_domain_and_cache(qm); 758 if (ret) 759 return ret; 760 761 hisi_qm_dev_err_init(qm); 762 sec_debug_regs_clear(qm); 763 764 return 0; 765 } 766 767 static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) 768 { 769 int ret; 770 771 qm->pdev = pdev; 772 qm->ver = pdev->revision; 773 qm->algs = "cipher\ndigest\naead"; 774 qm->mode = uacce_mode; 775 qm->sqe_size = SEC_SQE_SIZE; 776 qm->dev_name = sec_name; 777 778 qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ? 779 QM_HW_PF : QM_HW_VF; 780 if (qm->fun_type == QM_HW_PF) { 781 qm->qp_base = SEC_PF_DEF_Q_BASE; 782 qm->qp_num = pf_q_num; 783 qm->debug.curr_qm_qp_num = pf_q_num; 784 qm->qm_list = &sec_devices; 785 } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { 786 /* 787 * have no way to get qm configure in VM in v1 hardware, 788 * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force 789 * to trigger only one VF in v1 hardware. 790 * v2 hardware has no such problem. 791 */ 792 qm->qp_base = SEC_PF_DEF_Q_NUM; 793 qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM; 794 } 795 796 /* 797 * WQ_HIGHPRI: SEC request must be low delayed, 798 * so need a high priority workqueue. 799 * WQ_UNBOUND: SEC task is likely with long 800 * running CPU intensive workloads. 801 */ 802 qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | 803 WQ_UNBOUND, num_online_cpus(), 804 pci_name(qm->pdev)); 805 if (!qm->wq) { 806 pci_err(qm->pdev, "fail to alloc workqueue\n"); 807 return -ENOMEM; 808 } 809 810 ret = hisi_qm_init(qm); 811 if (ret) 812 destroy_workqueue(qm->wq); 813 814 return ret; 815 } 816 817 static void sec_qm_uninit(struct hisi_qm *qm) 818 { 819 hisi_qm_uninit(qm); 820 } 821 822 static int sec_probe_init(struct sec_dev *sec) 823 { 824 struct hisi_qm *qm = &sec->qm; 825 int ret; 826 827 if (qm->fun_type == QM_HW_PF) { 828 ret = sec_pf_probe_init(sec); 829 if (ret) 830 return ret; 831 } 832 833 return 0; 834 } 835 836 static void sec_probe_uninit(struct hisi_qm *qm) 837 { 838 hisi_qm_dev_err_uninit(qm); 839 840 destroy_workqueue(qm->wq); 841 } 842 843 static void sec_iommu_used_check(struct sec_dev *sec) 844 { 845 struct iommu_domain *domain; 846 struct device *dev = &sec->qm.pdev->dev; 847 848 domain = iommu_get_domain_for_dev(dev); 849 850 /* Check if iommu is used */ 851 sec->iommu_used = false; 852 if (domain) { 853 if (domain->type & __IOMMU_DOMAIN_PAGING) 854 sec->iommu_used = true; 855 dev_info(dev, "SMMU Opened, the iommu type = %u\n", 856 domain->type); 857 } 858 } 859 860 static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) 861 { 862 struct sec_dev *sec; 863 struct hisi_qm *qm; 864 int ret; 865 866 sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL); 867 if (!sec) 868 return -ENOMEM; 869 870 qm = &sec->qm; 871 ret = sec_qm_init(qm, pdev); 872 if (ret) { 873 pci_err(pdev, "Failed to init SEC QM (%d)!\n", ret); 874 return ret; 875 } 876 877 sec->ctx_q_num = ctx_q_num; 878 sec_iommu_used_check(sec); 879 880 ret = sec_probe_init(sec); 881 if (ret) { 882 pci_err(pdev, "Failed to probe!\n"); 883 goto err_qm_uninit; 884 } 885 886 ret = hisi_qm_start(qm); 887 if (ret) { 888 pci_err(pdev, "Failed to start sec qm!\n"); 889 goto err_probe_uninit; 890 } 891 892 ret = sec_debugfs_init(qm); 893 if (ret) 894 pci_warn(pdev, "Failed to init debugfs!\n"); 895 896 if (qm->qp_num >= ctx_q_num) { 897 ret = hisi_qm_alg_register(qm, &sec_devices); 898 if (ret < 0) { 899 pr_err("Failed to register driver to crypto.\n"); 900 goto err_qm_stop; 901 } 902 } else { 903 pci_warn(qm->pdev, 904 "Failed to use kernel mode, qp not enough!\n"); 905 } 906 907 if (qm->uacce) { 908 ret = uacce_register(qm->uacce); 909 if (ret) { 910 pci_err(pdev, "failed to register uacce (%d)!\n", ret); 911 goto err_alg_unregister; 912 } 913 } 914 915 if (qm->fun_type == QM_HW_PF && vfs_num) { 916 ret = hisi_qm_sriov_enable(pdev, vfs_num); 917 if (ret < 0) 918 goto err_alg_unregister; 919 } 920 921 return 0; 922 923 err_alg_unregister: 924 hisi_qm_alg_unregister(qm, &sec_devices); 925 err_qm_stop: 926 sec_debugfs_exit(qm); 927 hisi_qm_stop(qm, QM_NORMAL); 928 err_probe_uninit: 929 sec_probe_uninit(qm); 930 err_qm_uninit: 931 sec_qm_uninit(qm); 932 return ret; 933 } 934 935 static void sec_remove(struct pci_dev *pdev) 936 { 937 struct hisi_qm *qm = pci_get_drvdata(pdev); 938 939 hisi_qm_wait_task_finish(qm, &sec_devices); 940 if (qm->qp_num >= ctx_q_num) 941 hisi_qm_alg_unregister(qm, &sec_devices); 942 943 if (qm->fun_type == QM_HW_PF && qm->vfs_num) 944 hisi_qm_sriov_disable(pdev, true); 945 946 sec_debugfs_exit(qm); 947 948 (void)hisi_qm_stop(qm, QM_NORMAL); 949 950 if (qm->fun_type == QM_HW_PF) 951 sec_debug_regs_clear(qm); 952 953 sec_probe_uninit(qm); 954 955 sec_qm_uninit(qm); 956 } 957 958 static const struct pci_error_handlers sec_err_handler = { 959 .error_detected = hisi_qm_dev_err_detected, 960 .slot_reset = hisi_qm_dev_slot_reset, 961 .reset_prepare = hisi_qm_reset_prepare, 962 .reset_done = hisi_qm_reset_done, 963 }; 964 965 static struct pci_driver sec_pci_driver = { 966 .name = "hisi_sec2", 967 .id_table = sec_dev_ids, 968 .probe = sec_probe, 969 .remove = sec_remove, 970 .err_handler = &sec_err_handler, 971 .sriov_configure = hisi_qm_sriov_configure, 972 .shutdown = hisi_qm_dev_shutdown, 973 }; 974 975 static void sec_register_debugfs(void) 976 { 977 if (!debugfs_initialized()) 978 return; 979 980 sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL); 981 } 982 983 static void sec_unregister_debugfs(void) 984 { 985 debugfs_remove_recursive(sec_debugfs_root); 986 } 987 988 static int __init sec_init(void) 989 { 990 int ret; 991 992 hisi_qm_init_list(&sec_devices); 993 sec_register_debugfs(); 994 995 ret = pci_register_driver(&sec_pci_driver); 996 if (ret < 0) { 997 sec_unregister_debugfs(); 998 pr_err("Failed to register pci driver.\n"); 999 return ret; 1000 } 1001 1002 return 0; 1003 } 1004 1005 static void __exit sec_exit(void) 1006 { 1007 pci_unregister_driver(&sec_pci_driver); 1008 sec_unregister_debugfs(); 1009 } 1010 1011 module_init(sec_init); 1012 module_exit(sec_exit); 1013 1014 MODULE_LICENSE("GPL v2"); 1015 MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>"); 1016 MODULE_AUTHOR("Longfang Liu <liulongfang@huawei.com>"); 1017 MODULE_AUTHOR("Kai Ye <yekai13@huawei.com>"); 1018 MODULE_AUTHOR("Wei Zhang <zhangwei375@huawei.com>"); 1019 MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator"); 1020