1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 4 #include <linux/acpi.h> 5 #include <linux/aer.h> 6 #include <linux/bitops.h> 7 #include <linux/debugfs.h> 8 #include <linux/init.h> 9 #include <linux/io.h> 10 #include <linux/iommu.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/pci.h> 14 #include <linux/seq_file.h> 15 #include <linux/topology.h> 16 #include <linux/uacce.h> 17 18 #include "sec.h" 19 20 #define SEC_VF_NUM 63 21 #define SEC_QUEUE_NUM_V1 4096 22 #define SEC_PF_PCI_DEVICE_ID 0xa255 23 #define SEC_VF_PCI_DEVICE_ID 0xa256 24 25 #define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF 26 #define SEC_BD_ERR_CHK_EN1 0x7ffff7fd 27 #define SEC_BD_ERR_CHK_EN3 0xffffbfff 28 29 #define SEC_SQE_SIZE 128 30 #define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH) 31 #define SEC_PF_DEF_Q_NUM 256 32 #define SEC_PF_DEF_Q_BASE 0 33 #define SEC_CTX_Q_NUM_DEF 2 34 #define SEC_CTX_Q_NUM_MAX 32 35 36 #define SEC_CTRL_CNT_CLR_CE 0x301120 37 #define SEC_CTRL_CNT_CLR_CE_BIT BIT(0) 38 #define SEC_CORE_INT_SOURCE 0x301010 39 #define SEC_CORE_INT_MASK 0x301000 40 #define SEC_CORE_INT_STATUS 0x301008 41 #define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14 42 #define SEC_ECC_NUM 16 43 #define SEC_ECC_MASH 0xFF 44 #define SEC_CORE_INT_DISABLE 0x0 45 #define SEC_CORE_INT_ENABLE 0x7c1ff 46 #define SEC_CORE_INT_CLEAR 0x7c1ff 47 #define SEC_SAA_ENABLE 0x17f 48 49 #define SEC_RAS_CE_REG 0x301050 50 #define SEC_RAS_FE_REG 0x301054 51 #define SEC_RAS_NFE_REG 0x301058 52 #define SEC_RAS_CE_ENB_MSK 0x88 53 #define SEC_RAS_FE_ENB_MSK 0x0 54 #define SEC_RAS_NFE_ENB_MSK 0x7c177 55 #define SEC_OOO_SHUTDOWN_SEL 0x301014 56 #define SEC_RAS_DISABLE 0x0 57 #define SEC_MEM_START_INIT_REG 0x301100 58 #define SEC_MEM_INIT_DONE_REG 0x301104 59 60 #define SEC_CONTROL_REG 0x301200 61 #define SEC_TRNG_EN_SHIFT 8 62 #define SEC_CLK_GATE_ENABLE BIT(3) 63 #define SEC_CLK_GATE_DISABLE (~BIT(3)) 64 #define SEC_AXI_SHUTDOWN_ENABLE BIT(12) 65 #define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF 66 67 #define SEC_INTERFACE_USER_CTRL0_REG 0x301220 68 #define SEC_INTERFACE_USER_CTRL1_REG 0x301224 69 #define SEC_SAA_EN_REG 0x301270 70 #define SEC_BD_ERR_CHK_EN_REG0 0x301380 71 #define SEC_BD_ERR_CHK_EN_REG1 0x301384 72 #define SEC_BD_ERR_CHK_EN_REG3 0x30138c 73 74 #define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15)) 75 #define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7)) 76 #define SEC_USER1_ENABLE_CONTEXT_SSV BIT(24) 77 #define SEC_USER1_ENABLE_DATA_SSV BIT(16) 78 #define SEC_USER1_WB_CONTEXT_SSV BIT(8) 79 #define SEC_USER1_WB_DATA_SSV BIT(0) 80 #define SEC_USER1_SVA_SET (SEC_USER1_ENABLE_CONTEXT_SSV | \ 81 SEC_USER1_ENABLE_DATA_SSV | \ 82 SEC_USER1_WB_CONTEXT_SSV | \ 83 SEC_USER1_WB_DATA_SSV) 84 #define SEC_USER1_SMMU_SVA (SEC_USER1_SMMU_NORMAL | SEC_USER1_SVA_SET) 85 #define SEC_USER1_SMMU_MASK (~SEC_USER1_SVA_SET) 86 #define SEC_CORE_INT_STATUS_M_ECC BIT(2) 87 88 #define SEC_PREFETCH_CFG 0x301130 89 #define SEC_SVA_TRANS 0x301EC4 90 #define SEC_PREFETCH_ENABLE (~(BIT(0) | BIT(1) | BIT(11))) 91 #define SEC_PREFETCH_DISABLE BIT(1) 92 #define SEC_SVA_DISABLE_READY (BIT(7) | BIT(11)) 93 94 #define SEC_DELAY_10_US 10 95 #define SEC_POLL_TIMEOUT_US 1000 96 #define SEC_DBGFS_VAL_MAX_LEN 20 97 #define SEC_SINGLE_PORT_MAX_TRANS 0x2060 98 99 #define SEC_SQE_MASK_OFFSET 64 100 #define SEC_SQE_MASK_LEN 48 101 #define SEC_SHAPER_TYPE_RATE 128 102 103 struct sec_hw_error { 104 u32 int_msk; 105 const char *msg; 106 }; 107 108 struct sec_dfx_item { 109 const char *name; 110 u32 offset; 111 }; 112 113 static const char sec_name[] = "hisi_sec2"; 114 static struct dentry *sec_debugfs_root; 115 116 static struct hisi_qm_list sec_devices = { 117 .register_to_crypto = sec_register_to_crypto, 118 .unregister_from_crypto = sec_unregister_from_crypto, 119 }; 120 121 static const struct sec_hw_error sec_hw_errors[] = { 122 { 123 .int_msk = BIT(0), 124 .msg = "sec_axi_rresp_err_rint" 125 }, 126 { 127 .int_msk = BIT(1), 128 .msg = "sec_axi_bresp_err_rint" 129 }, 130 { 131 .int_msk = BIT(2), 132 .msg = "sec_ecc_2bit_err_rint" 133 }, 134 { 135 .int_msk = BIT(3), 136 .msg = "sec_ecc_1bit_err_rint" 137 }, 138 { 139 .int_msk = BIT(4), 140 .msg = "sec_req_trng_timeout_rint" 141 }, 142 { 143 .int_msk = BIT(5), 144 .msg = "sec_fsm_hbeat_rint" 145 }, 146 { 147 .int_msk = BIT(6), 148 .msg = "sec_channel_req_rng_timeout_rint" 149 }, 150 { 151 .int_msk = BIT(7), 152 .msg = "sec_bd_err_rint" 153 }, 154 { 155 .int_msk = BIT(8), 156 .msg = "sec_chain_buff_err_rint" 157 }, 158 { 159 .int_msk = BIT(14), 160 .msg = "sec_no_secure_access" 161 }, 162 { 163 .int_msk = BIT(15), 164 .msg = "sec_wrapping_key_auth_err" 165 }, 166 { 167 .int_msk = BIT(16), 168 .msg = "sec_km_key_crc_fail" 169 }, 170 { 171 .int_msk = BIT(17), 172 .msg = "sec_axi_poison_err" 173 }, 174 { 175 .int_msk = BIT(18), 176 .msg = "sec_sva_err" 177 }, 178 {} 179 }; 180 181 static const char * const sec_dbg_file_name[] = { 182 [SEC_CLEAR_ENABLE] = "clear_enable", 183 }; 184 185 static struct sec_dfx_item sec_dfx_labels[] = { 186 {"send_cnt", offsetof(struct sec_dfx, send_cnt)}, 187 {"recv_cnt", offsetof(struct sec_dfx, recv_cnt)}, 188 {"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)}, 189 {"recv_busy_cnt", offsetof(struct sec_dfx, recv_busy_cnt)}, 190 {"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)}, 191 {"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)}, 192 {"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)}, 193 }; 194 195 static const struct debugfs_reg32 sec_dfx_regs[] = { 196 {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010}, 197 {"SEC_SAA_EN ", 0x301270}, 198 {"SEC_BD_LATENCY_MIN ", 0x301600}, 199 {"SEC_BD_LATENCY_MAX ", 0x301608}, 200 {"SEC_BD_LATENCY_AVG ", 0x30160C}, 201 {"SEC_BD_NUM_IN_SAA0 ", 0x301670}, 202 {"SEC_BD_NUM_IN_SAA1 ", 0x301674}, 203 {"SEC_BD_NUM_IN_SEC ", 0x301680}, 204 {"SEC_ECC_1BIT_CNT ", 0x301C00}, 205 {"SEC_ECC_1BIT_INFO ", 0x301C04}, 206 {"SEC_ECC_2BIT_CNT ", 0x301C10}, 207 {"SEC_ECC_2BIT_INFO ", 0x301C14}, 208 {"SEC_BD_SAA0 ", 0x301C20}, 209 {"SEC_BD_SAA1 ", 0x301C24}, 210 {"SEC_BD_SAA2 ", 0x301C28}, 211 {"SEC_BD_SAA3 ", 0x301C2C}, 212 {"SEC_BD_SAA4 ", 0x301C30}, 213 {"SEC_BD_SAA5 ", 0x301C34}, 214 {"SEC_BD_SAA6 ", 0x301C38}, 215 {"SEC_BD_SAA7 ", 0x301C3C}, 216 {"SEC_BD_SAA8 ", 0x301C40}, 217 }; 218 219 static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp) 220 { 221 return q_num_set(val, kp, SEC_PF_PCI_DEVICE_ID); 222 } 223 224 static const struct kernel_param_ops sec_pf_q_num_ops = { 225 .set = sec_pf_q_num_set, 226 .get = param_get_int, 227 }; 228 229 static u32 pf_q_num = SEC_PF_DEF_Q_NUM; 230 module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444); 231 MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 2-4096, v2 2-1024)"); 232 233 static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp) 234 { 235 u32 ctx_q_num; 236 int ret; 237 238 if (!val) 239 return -EINVAL; 240 241 ret = kstrtou32(val, 10, &ctx_q_num); 242 if (ret) 243 return -EINVAL; 244 245 if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) { 246 pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num); 247 return -EINVAL; 248 } 249 250 return param_set_int(val, kp); 251 } 252 253 static const struct kernel_param_ops sec_ctx_q_num_ops = { 254 .set = sec_ctx_q_num_set, 255 .get = param_get_int, 256 }; 257 static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF; 258 module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444); 259 MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (2 default, 2, 4, ..., 32)"); 260 261 static const struct kernel_param_ops vfs_num_ops = { 262 .set = vfs_num_set, 263 .get = param_get_int, 264 }; 265 266 static u32 vfs_num; 267 module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); 268 MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); 269 270 void sec_destroy_qps(struct hisi_qp **qps, int qp_num) 271 { 272 hisi_qm_free_qps(qps, qp_num); 273 kfree(qps); 274 } 275 276 struct hisi_qp **sec_create_qps(void) 277 { 278 int node = cpu_to_node(smp_processor_id()); 279 u32 ctx_num = ctx_q_num; 280 struct hisi_qp **qps; 281 int ret; 282 283 qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL); 284 if (!qps) 285 return NULL; 286 287 ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, 0, node, qps); 288 if (!ret) 289 return qps; 290 291 kfree(qps); 292 return NULL; 293 } 294 295 static const struct kernel_param_ops sec_uacce_mode_ops = { 296 .set = uacce_mode_set, 297 .get = param_get_int, 298 }; 299 300 /* 301 * uacce_mode = 0 means sec only register to crypto, 302 * uacce_mode = 1 means sec both register to crypto and uacce. 303 */ 304 static u32 uacce_mode = UACCE_MODE_NOUACCE; 305 module_param_cb(uacce_mode, &sec_uacce_mode_ops, &uacce_mode, 0444); 306 MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC); 307 308 static const struct pci_device_id sec_dev_ids[] = { 309 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) }, 310 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) }, 311 { 0, } 312 }; 313 MODULE_DEVICE_TABLE(pci, sec_dev_ids); 314 315 static u8 sec_get_endian(struct hisi_qm *qm) 316 { 317 u32 reg; 318 319 /* 320 * As for VF, it is a wrong way to get endian setting by 321 * reading a register of the engine 322 */ 323 if (qm->pdev->is_virtfn) { 324 dev_err_ratelimited(&qm->pdev->dev, 325 "cannot access a register in VF!\n"); 326 return SEC_LE; 327 } 328 reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); 329 /* BD little endian mode */ 330 if (!(reg & BIT(0))) 331 return SEC_LE; 332 333 /* BD 32-bits big endian mode */ 334 else if (!(reg & BIT(1))) 335 return SEC_32BE; 336 337 /* BD 64-bits big endian mode */ 338 else 339 return SEC_64BE; 340 } 341 342 static void sec_open_sva_prefetch(struct hisi_qm *qm) 343 { 344 u32 val; 345 int ret; 346 347 if (qm->ver < QM_HW_V3) 348 return; 349 350 /* Enable prefetch */ 351 val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); 352 val &= SEC_PREFETCH_ENABLE; 353 writel(val, qm->io_base + SEC_PREFETCH_CFG); 354 355 ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG, 356 val, !(val & SEC_PREFETCH_DISABLE), 357 SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); 358 if (ret) 359 pci_err(qm->pdev, "failed to open sva prefetch\n"); 360 } 361 362 static void sec_close_sva_prefetch(struct hisi_qm *qm) 363 { 364 u32 val; 365 int ret; 366 367 val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); 368 val |= SEC_PREFETCH_DISABLE; 369 writel(val, qm->io_base + SEC_PREFETCH_CFG); 370 371 ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS, 372 val, !(val & SEC_SVA_DISABLE_READY), 373 SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); 374 if (ret) 375 pci_err(qm->pdev, "failed to close sva prefetch\n"); 376 } 377 378 static int sec_engine_init(struct hisi_qm *qm) 379 { 380 int ret; 381 u32 reg; 382 383 /* disable clock gate control */ 384 reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); 385 reg &= SEC_CLK_GATE_DISABLE; 386 writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); 387 388 writel_relaxed(0x1, qm->io_base + SEC_MEM_START_INIT_REG); 389 390 ret = readl_relaxed_poll_timeout(qm->io_base + SEC_MEM_INIT_DONE_REG, 391 reg, reg & 0x1, SEC_DELAY_10_US, 392 SEC_POLL_TIMEOUT_US); 393 if (ret) { 394 pci_err(qm->pdev, "fail to init sec mem\n"); 395 return ret; 396 } 397 398 reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); 399 reg |= (0x1 << SEC_TRNG_EN_SHIFT); 400 writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); 401 402 reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL0_REG); 403 reg |= SEC_USER0_SMMU_NORMAL; 404 writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL0_REG); 405 406 reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL1_REG); 407 reg &= SEC_USER1_SMMU_MASK; 408 if (qm->use_sva && qm->ver == QM_HW_V2) 409 reg |= SEC_USER1_SMMU_SVA; 410 else 411 reg |= SEC_USER1_SMMU_NORMAL; 412 writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL1_REG); 413 414 writel(SEC_SINGLE_PORT_MAX_TRANS, 415 qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS); 416 417 writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG); 418 419 /* Enable sm4 extra mode, as ctr/ecb */ 420 writel_relaxed(SEC_BD_ERR_CHK_EN0, 421 qm->io_base + SEC_BD_ERR_CHK_EN_REG0); 422 /* Enable sm4 xts mode multiple iv */ 423 writel_relaxed(SEC_BD_ERR_CHK_EN1, 424 qm->io_base + SEC_BD_ERR_CHK_EN_REG1); 425 writel_relaxed(SEC_BD_ERR_CHK_EN3, 426 qm->io_base + SEC_BD_ERR_CHK_EN_REG3); 427 428 /* config endian */ 429 reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); 430 reg |= sec_get_endian(qm); 431 writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); 432 433 return 0; 434 } 435 436 static int sec_set_user_domain_and_cache(struct hisi_qm *qm) 437 { 438 /* qm user domain */ 439 writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1); 440 writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE); 441 writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1); 442 writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE); 443 writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE); 444 445 /* qm cache */ 446 writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG); 447 writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE); 448 449 /* disable FLR triggered by BME(bus master enable) */ 450 writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG); 451 writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE); 452 453 /* enable sqc,cqc writeback */ 454 writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE | 455 CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) | 456 FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL); 457 458 return sec_engine_init(qm); 459 } 460 461 /* sec_debug_regs_clear() - clear the sec debug regs */ 462 static void sec_debug_regs_clear(struct hisi_qm *qm) 463 { 464 int i; 465 466 /* clear sec dfx regs */ 467 writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE); 468 for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) 469 readl(qm->io_base + sec_dfx_regs[i].offset); 470 471 /* clear rdclr_en */ 472 writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE); 473 474 hisi_qm_debug_regs_clear(qm); 475 } 476 477 static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable) 478 { 479 u32 val1, val2; 480 481 val1 = readl(qm->io_base + SEC_CONTROL_REG); 482 if (enable) { 483 val1 |= SEC_AXI_SHUTDOWN_ENABLE; 484 val2 = SEC_RAS_NFE_ENB_MSK; 485 } else { 486 val1 &= SEC_AXI_SHUTDOWN_DISABLE; 487 val2 = 0x0; 488 } 489 490 if (qm->ver > QM_HW_V2) 491 writel(val2, qm->io_base + SEC_OOO_SHUTDOWN_SEL); 492 493 writel(val1, qm->io_base + SEC_CONTROL_REG); 494 } 495 496 static void sec_hw_error_enable(struct hisi_qm *qm) 497 { 498 if (qm->ver == QM_HW_V1) { 499 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); 500 pci_info(qm->pdev, "V1 not support hw error handle\n"); 501 return; 502 } 503 504 /* clear SEC hw error source if having */ 505 writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE); 506 507 /* enable RAS int */ 508 writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG); 509 writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG); 510 writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG); 511 512 /* enable SEC block master OOO when nfe occurs on Kunpeng930 */ 513 sec_master_ooo_ctrl(qm, true); 514 515 /* enable SEC hw error interrupts */ 516 writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK); 517 } 518 519 static void sec_hw_error_disable(struct hisi_qm *qm) 520 { 521 /* disable SEC hw error interrupts */ 522 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); 523 524 /* disable SEC block master OOO when nfe occurs on Kunpeng930 */ 525 sec_master_ooo_ctrl(qm, false); 526 527 /* disable RAS int */ 528 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG); 529 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG); 530 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG); 531 } 532 533 static u32 sec_clear_enable_read(struct sec_debug_file *file) 534 { 535 struct hisi_qm *qm = file->qm; 536 537 return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) & 538 SEC_CTRL_CNT_CLR_CE_BIT; 539 } 540 541 static int sec_clear_enable_write(struct sec_debug_file *file, u32 val) 542 { 543 struct hisi_qm *qm = file->qm; 544 u32 tmp; 545 546 if (val != 1 && val) 547 return -EINVAL; 548 549 tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) & 550 ~SEC_CTRL_CNT_CLR_CE_BIT) | val; 551 writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE); 552 553 return 0; 554 } 555 556 static ssize_t sec_debug_read(struct file *filp, char __user *buf, 557 size_t count, loff_t *pos) 558 { 559 struct sec_debug_file *file = filp->private_data; 560 char tbuf[SEC_DBGFS_VAL_MAX_LEN]; 561 u32 val; 562 int ret; 563 564 spin_lock_irq(&file->lock); 565 566 switch (file->index) { 567 case SEC_CLEAR_ENABLE: 568 val = sec_clear_enable_read(file); 569 break; 570 default: 571 spin_unlock_irq(&file->lock); 572 return -EINVAL; 573 } 574 575 spin_unlock_irq(&file->lock); 576 ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val); 577 578 return simple_read_from_buffer(buf, count, pos, tbuf, ret); 579 } 580 581 static ssize_t sec_debug_write(struct file *filp, const char __user *buf, 582 size_t count, loff_t *pos) 583 { 584 struct sec_debug_file *file = filp->private_data; 585 char tbuf[SEC_DBGFS_VAL_MAX_LEN]; 586 unsigned long val; 587 int len, ret; 588 589 if (*pos != 0) 590 return 0; 591 592 if (count >= SEC_DBGFS_VAL_MAX_LEN) 593 return -ENOSPC; 594 595 len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1, 596 pos, buf, count); 597 if (len < 0) 598 return len; 599 600 tbuf[len] = '\0'; 601 if (kstrtoul(tbuf, 0, &val)) 602 return -EFAULT; 603 604 spin_lock_irq(&file->lock); 605 606 switch (file->index) { 607 case SEC_CLEAR_ENABLE: 608 ret = sec_clear_enable_write(file, val); 609 if (ret) 610 goto err_input; 611 break; 612 default: 613 ret = -EINVAL; 614 goto err_input; 615 } 616 617 spin_unlock_irq(&file->lock); 618 619 return count; 620 621 err_input: 622 spin_unlock_irq(&file->lock); 623 return ret; 624 } 625 626 static const struct file_operations sec_dbg_fops = { 627 .owner = THIS_MODULE, 628 .open = simple_open, 629 .read = sec_debug_read, 630 .write = sec_debug_write, 631 }; 632 633 static int sec_debugfs_atomic64_get(void *data, u64 *val) 634 { 635 *val = atomic64_read((atomic64_t *)data); 636 637 return 0; 638 } 639 640 static int sec_debugfs_atomic64_set(void *data, u64 val) 641 { 642 if (val) 643 return -EINVAL; 644 645 atomic64_set((atomic64_t *)data, 0); 646 647 return 0; 648 } 649 650 DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get, 651 sec_debugfs_atomic64_set, "%lld\n"); 652 653 static int sec_core_debug_init(struct hisi_qm *qm) 654 { 655 struct sec_dev *sec = container_of(qm, struct sec_dev, qm); 656 struct device *dev = &qm->pdev->dev; 657 struct sec_dfx *dfx = &sec->debug.dfx; 658 struct debugfs_regset32 *regset; 659 struct dentry *tmp_d; 660 int i; 661 662 tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root); 663 664 regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); 665 if (!regset) 666 return -ENOMEM; 667 668 regset->regs = sec_dfx_regs; 669 regset->nregs = ARRAY_SIZE(sec_dfx_regs); 670 regset->base = qm->io_base; 671 672 if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) 673 debugfs_create_regset32("regs", 0444, tmp_d, regset); 674 675 for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) { 676 atomic64_t *data = (atomic64_t *)((uintptr_t)dfx + 677 sec_dfx_labels[i].offset); 678 debugfs_create_file(sec_dfx_labels[i].name, 0644, 679 tmp_d, data, &sec_atomic64_ops); 680 } 681 682 return 0; 683 } 684 685 static int sec_debug_init(struct hisi_qm *qm) 686 { 687 struct sec_dev *sec = container_of(qm, struct sec_dev, qm); 688 int i; 689 690 if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) { 691 for (i = SEC_CLEAR_ENABLE; i < SEC_DEBUG_FILE_NUM; i++) { 692 spin_lock_init(&sec->debug.files[i].lock); 693 sec->debug.files[i].index = i; 694 sec->debug.files[i].qm = qm; 695 696 debugfs_create_file(sec_dbg_file_name[i], 0600, 697 qm->debug.debug_root, 698 sec->debug.files + i, 699 &sec_dbg_fops); 700 } 701 } 702 703 return sec_core_debug_init(qm); 704 } 705 706 static int sec_debugfs_init(struct hisi_qm *qm) 707 { 708 struct device *dev = &qm->pdev->dev; 709 int ret; 710 711 qm->debug.debug_root = debugfs_create_dir(dev_name(dev), 712 sec_debugfs_root); 713 qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET; 714 qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN; 715 hisi_qm_debug_init(qm); 716 717 ret = sec_debug_init(qm); 718 if (ret) 719 goto failed_to_create; 720 721 return 0; 722 723 failed_to_create: 724 debugfs_remove_recursive(sec_debugfs_root); 725 return ret; 726 } 727 728 static void sec_debugfs_exit(struct hisi_qm *qm) 729 { 730 debugfs_remove_recursive(qm->debug.debug_root); 731 } 732 733 static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts) 734 { 735 const struct sec_hw_error *errs = sec_hw_errors; 736 struct device *dev = &qm->pdev->dev; 737 u32 err_val; 738 739 while (errs->msg) { 740 if (errs->int_msk & err_sts) { 741 dev_err(dev, "%s [error status=0x%x] found\n", 742 errs->msg, errs->int_msk); 743 744 if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) { 745 err_val = readl(qm->io_base + 746 SEC_CORE_SRAM_ECC_ERR_INFO); 747 dev_err(dev, "multi ecc sram num=0x%x\n", 748 ((err_val) >> SEC_ECC_NUM) & 749 SEC_ECC_MASH); 750 } 751 } 752 errs++; 753 } 754 } 755 756 static u32 sec_get_hw_err_status(struct hisi_qm *qm) 757 { 758 return readl(qm->io_base + SEC_CORE_INT_STATUS); 759 } 760 761 static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) 762 { 763 writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE); 764 } 765 766 static void sec_open_axi_master_ooo(struct hisi_qm *qm) 767 { 768 u32 val; 769 770 val = readl(qm->io_base + SEC_CONTROL_REG); 771 writel(val & SEC_AXI_SHUTDOWN_DISABLE, qm->io_base + SEC_CONTROL_REG); 772 writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG); 773 } 774 775 static void sec_err_info_init(struct hisi_qm *qm) 776 { 777 struct hisi_qm_err_info *err_info = &qm->err_info; 778 779 err_info->ce = QM_BASE_CE; 780 err_info->fe = 0; 781 err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC; 782 err_info->dev_ce_mask = SEC_RAS_CE_ENB_MSK; 783 err_info->msi_wr_port = BIT(0); 784 err_info->acpi_rst = "SRST"; 785 err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT | 786 QM_ACC_WB_NOT_READY_TIMEOUT; 787 } 788 789 static const struct hisi_qm_err_ini sec_err_ini = { 790 .hw_init = sec_set_user_domain_and_cache, 791 .hw_err_enable = sec_hw_error_enable, 792 .hw_err_disable = sec_hw_error_disable, 793 .get_dev_hw_err_status = sec_get_hw_err_status, 794 .clear_dev_hw_err_status = sec_clear_hw_err_status, 795 .log_dev_hw_err = sec_log_hw_error, 796 .open_axi_master_ooo = sec_open_axi_master_ooo, 797 .open_sva_prefetch = sec_open_sva_prefetch, 798 .close_sva_prefetch = sec_close_sva_prefetch, 799 .err_info_init = sec_err_info_init, 800 }; 801 802 static int sec_pf_probe_init(struct sec_dev *sec) 803 { 804 struct hisi_qm *qm = &sec->qm; 805 int ret; 806 807 qm->err_ini = &sec_err_ini; 808 qm->err_ini->err_info_init(qm); 809 810 ret = sec_set_user_domain_and_cache(qm); 811 if (ret) 812 return ret; 813 814 sec_open_sva_prefetch(qm); 815 hisi_qm_dev_err_init(qm); 816 sec_debug_regs_clear(qm); 817 818 return 0; 819 } 820 821 static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) 822 { 823 int ret; 824 825 qm->pdev = pdev; 826 qm->ver = pdev->revision; 827 qm->algs = "cipher\ndigest\naead"; 828 qm->mode = uacce_mode; 829 qm->sqe_size = SEC_SQE_SIZE; 830 qm->dev_name = sec_name; 831 832 qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ? 833 QM_HW_PF : QM_HW_VF; 834 if (qm->fun_type == QM_HW_PF) { 835 qm->qp_base = SEC_PF_DEF_Q_BASE; 836 qm->qp_num = pf_q_num; 837 qm->debug.curr_qm_qp_num = pf_q_num; 838 qm->qm_list = &sec_devices; 839 } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { 840 /* 841 * have no way to get qm configure in VM in v1 hardware, 842 * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force 843 * to trigger only one VF in v1 hardware. 844 * v2 hardware has no such problem. 845 */ 846 qm->qp_base = SEC_PF_DEF_Q_NUM; 847 qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM; 848 } 849 850 /* 851 * WQ_HIGHPRI: SEC request must be low delayed, 852 * so need a high priority workqueue. 853 * WQ_UNBOUND: SEC task is likely with long 854 * running CPU intensive workloads. 855 */ 856 qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | 857 WQ_UNBOUND, num_online_cpus(), 858 pci_name(qm->pdev)); 859 if (!qm->wq) { 860 pci_err(qm->pdev, "fail to alloc workqueue\n"); 861 return -ENOMEM; 862 } 863 864 ret = hisi_qm_init(qm); 865 if (ret) 866 destroy_workqueue(qm->wq); 867 868 return ret; 869 } 870 871 static void sec_qm_uninit(struct hisi_qm *qm) 872 { 873 hisi_qm_uninit(qm); 874 } 875 876 static int sec_probe_init(struct sec_dev *sec) 877 { 878 u32 type_rate = SEC_SHAPER_TYPE_RATE; 879 struct hisi_qm *qm = &sec->qm; 880 int ret; 881 882 if (qm->fun_type == QM_HW_PF) { 883 ret = sec_pf_probe_init(sec); 884 if (ret) 885 return ret; 886 /* enable shaper type 0 */ 887 if (qm->ver >= QM_HW_V3) { 888 type_rate |= QM_SHAPER_ENABLE; 889 qm->type_rate = type_rate; 890 } 891 } 892 893 return 0; 894 } 895 896 static void sec_probe_uninit(struct hisi_qm *qm) 897 { 898 hisi_qm_dev_err_uninit(qm); 899 900 destroy_workqueue(qm->wq); 901 } 902 903 static void sec_iommu_used_check(struct sec_dev *sec) 904 { 905 struct iommu_domain *domain; 906 struct device *dev = &sec->qm.pdev->dev; 907 908 domain = iommu_get_domain_for_dev(dev); 909 910 /* Check if iommu is used */ 911 sec->iommu_used = false; 912 if (domain) { 913 if (domain->type & __IOMMU_DOMAIN_PAGING) 914 sec->iommu_used = true; 915 dev_info(dev, "SMMU Opened, the iommu type = %u\n", 916 domain->type); 917 } 918 } 919 920 static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) 921 { 922 struct sec_dev *sec; 923 struct hisi_qm *qm; 924 int ret; 925 926 sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL); 927 if (!sec) 928 return -ENOMEM; 929 930 qm = &sec->qm; 931 ret = sec_qm_init(qm, pdev); 932 if (ret) { 933 pci_err(pdev, "Failed to init SEC QM (%d)!\n", ret); 934 return ret; 935 } 936 937 sec->ctx_q_num = ctx_q_num; 938 sec_iommu_used_check(sec); 939 940 ret = sec_probe_init(sec); 941 if (ret) { 942 pci_err(pdev, "Failed to probe!\n"); 943 goto err_qm_uninit; 944 } 945 946 ret = hisi_qm_start(qm); 947 if (ret) { 948 pci_err(pdev, "Failed to start sec qm!\n"); 949 goto err_probe_uninit; 950 } 951 952 ret = sec_debugfs_init(qm); 953 if (ret) 954 pci_warn(pdev, "Failed to init debugfs!\n"); 955 956 if (qm->qp_num >= ctx_q_num) { 957 ret = hisi_qm_alg_register(qm, &sec_devices); 958 if (ret < 0) { 959 pr_err("Failed to register driver to crypto.\n"); 960 goto err_qm_stop; 961 } 962 } else { 963 pci_warn(qm->pdev, 964 "Failed to use kernel mode, qp not enough!\n"); 965 } 966 967 if (qm->uacce) { 968 ret = uacce_register(qm->uacce); 969 if (ret) { 970 pci_err(pdev, "failed to register uacce (%d)!\n", ret); 971 goto err_alg_unregister; 972 } 973 } 974 975 if (qm->fun_type == QM_HW_PF && vfs_num) { 976 ret = hisi_qm_sriov_enable(pdev, vfs_num); 977 if (ret < 0) 978 goto err_alg_unregister; 979 } 980 981 return 0; 982 983 err_alg_unregister: 984 hisi_qm_alg_unregister(qm, &sec_devices); 985 err_qm_stop: 986 sec_debugfs_exit(qm); 987 hisi_qm_stop(qm, QM_NORMAL); 988 err_probe_uninit: 989 sec_probe_uninit(qm); 990 err_qm_uninit: 991 sec_qm_uninit(qm); 992 return ret; 993 } 994 995 static void sec_remove(struct pci_dev *pdev) 996 { 997 struct hisi_qm *qm = pci_get_drvdata(pdev); 998 999 hisi_qm_wait_task_finish(qm, &sec_devices); 1000 if (qm->qp_num >= ctx_q_num) 1001 hisi_qm_alg_unregister(qm, &sec_devices); 1002 1003 if (qm->fun_type == QM_HW_PF && qm->vfs_num) 1004 hisi_qm_sriov_disable(pdev, true); 1005 1006 sec_debugfs_exit(qm); 1007 1008 (void)hisi_qm_stop(qm, QM_NORMAL); 1009 1010 if (qm->fun_type == QM_HW_PF) 1011 sec_debug_regs_clear(qm); 1012 1013 sec_probe_uninit(qm); 1014 1015 sec_qm_uninit(qm); 1016 } 1017 1018 static const struct pci_error_handlers sec_err_handler = { 1019 .error_detected = hisi_qm_dev_err_detected, 1020 .slot_reset = hisi_qm_dev_slot_reset, 1021 .reset_prepare = hisi_qm_reset_prepare, 1022 .reset_done = hisi_qm_reset_done, 1023 }; 1024 1025 static struct pci_driver sec_pci_driver = { 1026 .name = "hisi_sec2", 1027 .id_table = sec_dev_ids, 1028 .probe = sec_probe, 1029 .remove = sec_remove, 1030 .err_handler = &sec_err_handler, 1031 .sriov_configure = hisi_qm_sriov_configure, 1032 .shutdown = hisi_qm_dev_shutdown, 1033 }; 1034 1035 static void sec_register_debugfs(void) 1036 { 1037 if (!debugfs_initialized()) 1038 return; 1039 1040 sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL); 1041 } 1042 1043 static void sec_unregister_debugfs(void) 1044 { 1045 debugfs_remove_recursive(sec_debugfs_root); 1046 } 1047 1048 static int __init sec_init(void) 1049 { 1050 int ret; 1051 1052 hisi_qm_init_list(&sec_devices); 1053 sec_register_debugfs(); 1054 1055 ret = pci_register_driver(&sec_pci_driver); 1056 if (ret < 0) { 1057 sec_unregister_debugfs(); 1058 pr_err("Failed to register pci driver.\n"); 1059 return ret; 1060 } 1061 1062 return 0; 1063 } 1064 1065 static void __exit sec_exit(void) 1066 { 1067 pci_unregister_driver(&sec_pci_driver); 1068 sec_unregister_debugfs(); 1069 } 1070 1071 module_init(sec_init); 1072 module_exit(sec_exit); 1073 1074 MODULE_LICENSE("GPL v2"); 1075 MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>"); 1076 MODULE_AUTHOR("Longfang Liu <liulongfang@huawei.com>"); 1077 MODULE_AUTHOR("Kai Ye <yekai13@huawei.com>"); 1078 MODULE_AUTHOR("Wei Zhang <zhangwei375@huawei.com>"); 1079 MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator"); 1080